From 375d8526f2900d0c377f44532f6d09ee06531f67 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 4 Sep 2013 23:14:33 +0300 Subject: Keep heavily-contended fields in XLogCtlInsert on different cache lines. Performance testing shows that if the insertpos_lck spinlock and the fields that it protects are on the same cache line with other variables that are frequently accessed, the false sharing can hurt performance a lot. Keep them apart by adding some padding. --- src/backend/access/transam/xlog.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'src/backend/access/transam/xlog.c') diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 39c58d00fe6..386811389d7 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -408,7 +408,7 @@ typedef struct typedef union XLogInsertSlotPadded { XLogInsertSlot slot; - char pad[64]; + char pad[CACHE_LINE_SIZE]; } XLogInsertSlotPadded; /* @@ -428,8 +428,14 @@ typedef struct XLogCtlInsert uint64 CurrBytePos; uint64 PrevBytePos; - /* insertion slots, see above for details */ - XLogInsertSlotPadded *insertSlots; + /* + * Make sure the above heavily-contended spinlock and byte positions are + * on their own cache line. In particular, the RedoRecPtr and full page + * write variables below should be on a different cache line. They are + * read on every WAL insertion, but updated rarely, and we don't want + * those reads to steal the cache line containing Curr/PrevBytePos. + */ + char pad[CACHE_LINE_SIZE]; /* * fullPageWrites is the master copy used by all backends to determine @@ -455,6 +461,9 @@ typedef struct XLogCtlInsert bool exclusiveBackup; int nonExclusiveBackups; XLogRecPtr lastBackupStart; + + /* insertion slots, see XLogInsertSlot struct above for details */ + XLogInsertSlotPadded *insertSlots; } XLogCtlInsert; /* -- cgit v1.2.3