summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAlvaro Herrera <alvherre@alvh.no-ip.org>2007-09-24 03:53:12 +0000
committerAlvaro Herrera <alvherre@alvh.no-ip.org>2007-09-24 03:53:12 +0000
commit46dcd202ef39374a57d9f8278399bcb315428ecb (patch)
treef9b21bf04b94675ccab495ee93dabb5faf38122a /src
parentf0a06adfe74792a30428320b925faa532ec39c00 (diff)
Reduce the size of memory allocations by lazy vacuum when processing a small
table, by allocating just enough for a hardcoded number of dead tuples per page. The current estimate is 200 dead tuples per page. Per reports from Jeff Amiel, Erik Jones and Marko Kreen, and subsequent discussion. CVS: ---------------------------------------------------------------------- CVS: Enter Log. Lines beginning with `CVS:' are removed automatically CVS: CVS: Committing in . CVS: CVS: Modified Files: CVS: commands/vacuumlazy.c CVS: ----------------------------------------------------------------------
Diffstat (limited to 'src')
-rw-r--r--src/backend/commands/vacuumlazy.c22
1 files changed, 17 insertions, 5 deletions
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 3385b39c105..e809b0e56fb 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -11,10 +11,12 @@
* on the number of tuples and pages we will keep track of at once.
*
* We are willing to use at most maintenance_work_mem memory space to keep
- * track of dead tuples. We initially allocate an array of TIDs of that size.
- * If the array threatens to overflow, we suspend the heap scan phase and
- * perform a pass of index cleanup and page compaction, then resume the heap
- * scan with an empty TID array.
+ * track of dead tuples. We initially allocate an array of TIDs of that size,
+ * with an upper limit that depends on table size (this limit ensures we don't
+ * allocate a huge area uselessly for vacuuming small tables). If the array
+ * threatens to overflow, we suspend the heap scan phase and perform a pass of
+ * index cleanup and page compaction, then resume the heap scan with an empty
+ * TID array.
*
* We can limit the storage for page free space to MaxFSMPages entries,
* since that's the most the free space map will be willing to remember
@@ -31,7 +33,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.50.4.5 2007/09/16 02:38:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.50.4.6 2007/09/24 03:53:12 alvherre Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,6 +65,12 @@
/* MAX_TUPLES_PER_PAGE can be a conservative upper limit */
#define MAX_TUPLES_PER_PAGE ((int) (BLCKSZ / sizeof(HeapTupleHeaderData)))
+/*
+ * Guesstimation of number of dead tuples per page. This is used to
+ * provide an upper limit to memory allocated when vacuuming small
+ * tables.
+ */
+#define LAZY_ALLOC_TUPLES 200
typedef struct LVRelStats
{
@@ -942,6 +950,10 @@ lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
if (maxtuples < MAX_TUPLES_PER_PAGE)
maxtuples = MAX_TUPLES_PER_PAGE;
+ /* curious coding here to ensure the multiplication can't overflow */
+ if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES) > relblocks)
+ maxtuples = relblocks * LAZY_ALLOC_TUPLES;
+
vacrelstats->num_dead_tuples = 0;
vacrelstats->max_dead_tuples = maxtuples;
vacrelstats->dead_tuples = (ItemPointer)