summaryrefslogtreecommitdiff
path: root/src/backend/storage/buffer
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/storage/buffer')
-rw-r--r--src/backend/storage/buffer/README4
-rw-r--r--src/backend/storage/buffer/bufmgr.c8
-rw-r--r--src/backend/storage/buffer/freelist.c10
-rw-r--r--src/backend/storage/buffer/localbuf.c2
4 files changed, 12 insertions, 12 deletions
diff --git a/src/backend/storage/buffer/README b/src/backend/storage/buffer/README
index a182fcd660c..4b13da5d7ad 100644
--- a/src/backend/storage/buffer/README
+++ b/src/backend/storage/buffer/README
@@ -211,9 +211,9 @@ Buffer Ring Replacement Strategy
When running a query that needs to access a large number of pages just once,
such as VACUUM or a large sequential scan, a different strategy is used.
A page that has been touched only by such a scan is unlikely to be needed
-again soon, so instead of running the normal clock sweep algorithm and
+again soon, so instead of running the normal clock-sweep algorithm and
blowing out the entire buffer cache, a small ring of buffers is allocated
-using the normal clock sweep algorithm and those buffers are reused for the
+using the normal clock-sweep algorithm and those buffers are reused for the
whole scan. This also implies that much of the write traffic caused by such
a statement will be done by the backend itself and not pushed off onto other
processes.
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 350cc0402aa..9fc906a4a40 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -3608,7 +3608,7 @@ BufferSync(int flags)
* This is called periodically by the background writer process.
*
* Returns true if it's appropriate for the bgwriter process to go into
- * low-power hibernation mode. (This happens if the strategy clock sweep
+ * low-power hibernation mode. (This happens if the strategy clock-sweep
* has been "lapped" and no buffer allocations have occurred recently,
* or if the bgwriter has been effectively disabled by setting
* bgwriter_lru_maxpages to 0.)
@@ -3658,7 +3658,7 @@ BgBufferSync(WritebackContext *wb_context)
uint32 new_recent_alloc;
/*
- * Find out where the freelist clock sweep currently is, and how many
+ * Find out where the freelist clock-sweep currently is, and how many
* buffer allocations have happened since our last call.
*/
strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
@@ -3679,8 +3679,8 @@ BgBufferSync(WritebackContext *wb_context)
/*
* Compute strategy_delta = how many buffers have been scanned by the
- * clock sweep since last time. If first time through, assume none. Then
- * see if we are still ahead of the clock sweep, and if so, how many
+ * clock-sweep since last time. If first time through, assume none. Then
+ * see if we are still ahead of the clock-sweep, and if so, how many
* buffers we could scan before we'd catch up with it and "lap" it. Note:
* weird-looking coding of xxx_passes comparisons are to avoid bogus
* behavior when the passes counts wrap around.
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index 01909be0272..cd94a7d8a7b 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -33,7 +33,7 @@ typedef struct
slock_t buffer_strategy_lock;
/*
- * Clock sweep hand: index of next buffer to consider grabbing. Note that
+ * clock-sweep hand: index of next buffer to consider grabbing. Note that
* this isn't a concrete buffer - we only ever increase the value. So, to
* get an actual buffer, it needs to be used modulo NBuffers.
*/
@@ -51,7 +51,7 @@ typedef struct
* Statistics. These counters should be wide enough that they can't
* overflow during a single bgwriter cycle.
*/
- uint32 completePasses; /* Complete cycles of the clock sweep */
+ uint32 completePasses; /* Complete cycles of the clock-sweep */
pg_atomic_uint32 numBufferAllocs; /* Buffers allocated since last reset */
/*
@@ -311,7 +311,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_r
}
}
- /* Nothing on the freelist, so run the "clock sweep" algorithm */
+ /* Nothing on the freelist, so run the "clock-sweep" algorithm */
trycounter = NBuffers;
for (;;)
{
@@ -511,7 +511,7 @@ StrategyInitialize(bool init)
StrategyControl->firstFreeBuffer = 0;
StrategyControl->lastFreeBuffer = NBuffers - 1;
- /* Initialize the clock sweep pointer */
+ /* Initialize the clock-sweep pointer */
pg_atomic_init_u32(&StrategyControl->nextVictimBuffer, 0);
/* Clear statistics */
@@ -759,7 +759,7 @@ GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_state)
*
* If usage_count is 0 or 1 then the buffer is fair game (we expect 1,
* since our own previous usage of the ring element would have left it
- * there, but it might've been decremented by clock sweep since then). A
+ * there, but it might've been decremented by clock-sweep since then). A
* higher usage_count indicates someone else has touched the buffer, so we
* shouldn't re-use it.
*/
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 3c0d20f4659..04fef13409b 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -229,7 +229,7 @@ GetLocalVictimBuffer(void)
ResourceOwnerEnlarge(CurrentResourceOwner);
/*
- * Need to get a new buffer. We use a clock sweep algorithm (essentially
+ * Need to get a new buffer. We use a clock-sweep algorithm (essentially
* the same as what freelist.c does now...)
*/
trycounter = NLocBuffer;