summaryrefslogtreecommitdiff
path: root/src/backend/storage/lmgr/lock.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2013-06-04 14:58:46 -0400
committerTom Lane <tgl@sss.pgh.pa.us>2013-06-04 14:58:46 -0400
commitdbc6eb1f4b840d252031419d4bf694316812124f (patch)
treeb72f25c03ff11e29dd7e76fbd43781311d77c32d /src/backend/storage/lmgr/lock.c
parent79e15c7d86d3f781cc390a5a04db18254ce97b79 (diff)
Fix memory leak in LogStandbySnapshot().
The array allocated by GetRunningTransactionLocks() needs to be pfree'd when we're done with it. Otherwise we leak some memory during each checkpoint, if wal_level = hot_standby. This manifests as memory bloat in the checkpointer process, or in bgwriter in versions before we made the checkpointer separate. Reported and fixed by Naoya Anzai. Back-patch to 9.0 where the issue was introduced. In passing, improve comments for GetRunningTransactionLocks(), and add an Assert that we didn't overrun the palloc'd array.
Diffstat (limited to 'src/backend/storage/lmgr/lock.c')
-rw-r--r--src/backend/storage/lmgr/lock.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 8cd871f4b40..273c7223027 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -3398,18 +3398,26 @@ GetLockStatusData(void)
}
/*
- * Returns a list of currently held AccessExclusiveLocks, for use
- * by GetRunningTransactionData().
+ * Returns a list of currently held AccessExclusiveLocks, for use by
+ * LogStandbySnapshot(). The result is a palloc'd array,
+ * with the number of elements returned into *nlocks.
+ *
+ * XXX This currently takes a lock on all partitions of the lock table,
+ * but it's possible to do better. By reference counting locks and storing
+ * the value in the ProcArray entry for each backend we could tell if any
+ * locks need recording without having to acquire the partition locks and
+ * scan the lock table. Whether that's worth the additional overhead
+ * is pretty dubious though.
*/
xl_standby_lock *
GetRunningTransactionLocks(int *nlocks)
{
+ xl_standby_lock *accessExclusiveLocks;
PROCLOCK *proclock;
HASH_SEQ_STATUS seqstat;
int i;
int index;
int els;
- xl_standby_lock *accessExclusiveLocks;
/*
* Acquire lock on the entire shared lock data structure.
@@ -3467,6 +3475,8 @@ GetRunningTransactionLocks(int *nlocks)
}
}
+ Assert(index <= els);
+
/*
* And release locks. We do this in reverse order for two reasons: (1)
* Anyone else who needs more than one of the locks will be trying to lock