summaryrefslogtreecommitdiff
path: root/src/backend/storage/lmgr/spin.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/storage/lmgr/spin.c')
-rw-r--r--src/backend/storage/lmgr/spin.c99
1 files changed, 76 insertions, 23 deletions
diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
index dfa47a723fa..7c381fe2ae1 100644
--- a/src/backend/storage/lmgr/spin.c
+++ b/src/backend/storage/lmgr/spin.c
@@ -31,8 +31,24 @@
#ifndef HAVE_SPINLOCKS
+
+/*
+ * No TAS, so spinlocks are implemented as PGSemaphores.
+ */
+
+#ifndef HAVE_ATOMICS
+#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES)
+#else
+#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES)
+#endif /* DISABLE_ATOMICS */
+
PGSemaphore SpinlockSemaArray;
-#endif
+
+#else /* !HAVE_SPINLOCKS */
+
+#define NUM_EMULATION_SEMAPHORES 0
+
+#endif /* HAVE_SPINLOCKS */
/*
* Report the amount of shared memory needed to store semaphores for spinlock
@@ -41,34 +57,19 @@ PGSemaphore SpinlockSemaArray;
Size
SpinlockSemaSize(void)
{
- return SpinlockSemas() * sizeof(PGSemaphoreData);
+ return NUM_EMULATION_SEMAPHORES * sizeof(PGSemaphoreData);
}
-#ifdef HAVE_SPINLOCKS
-
/*
* Report number of semaphores needed to support spinlocks.
*/
int
SpinlockSemas(void)
{
- return 0;
+ return NUM_EMULATION_SEMAPHORES;
}
-#else /* !HAVE_SPINLOCKS */
-/*
- * No TAS, so spinlocks are implemented as PGSemaphores.
- */
-
-
-/*
- * Report number of semaphores needed to support spinlocks.
- */
-int
-SpinlockSemas(void)
-{
- return NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES;
-}
+#ifndef HAVE_SPINLOCKS
/*
* Initialize semaphores.
@@ -85,20 +86,68 @@ SpinlockSemaInit(PGSemaphore spinsemas)
}
/*
- * s_lock.h hardware-spinlock emulation
+ * s_lock.h hardware-spinlock emulation using semaphores
+ *
+ * We map all spinlocks onto NUM_EMULATION_SEMAPHORES semaphores. It's okay to
+ * map multiple spinlocks onto one semaphore because no process should ever
+ * hold more than one at a time. We just need enough semaphores so that we
+ * aren't adding too much extra contention from that.
+ *
+ * There is one exception to the restriction of only holding one spinlock at a
+ * time, which is that it's ok if emulated atomic operations are nested inside
+ * spinlocks. To avoid the danger of spinlocks and atomic using the same sema,
+ * we make sure "normal" spinlocks and atomics backed by spinlocks use
+ * distinct semaphores (see the nested argument to s_init_lock_sema).
+ *
+ * slock_t is just an int for this implementation; it holds the spinlock
+ * number from 0..(NUM_EMULATION_SEMAPHORES - 1).
*/
+static inline void
+s_check_valid(int lockndx)
+{
+ if (lockndx < 0 || lockndx >= NUM_EMULATION_SEMAPHORES)
+ elog(ERROR, "invalid spinlock number2: %d", lockndx);
+}
+
void
s_init_lock_sema(volatile slock_t *lock, bool nested)
{
static uint32 counter = 0;
-
- *lock = (++counter) % NUM_SPINLOCK_SEMAPHORES;
+ uint32 offset;
+ uint32 sema_total;
+ uint32 idx;
+
+ if (nested)
+ {
+ /*
+ * To allow nesting atomics inside spinlocked sections, use a
+ * different spinlock. See comment above.
+ */
+ offset = NUM_SPINLOCK_SEMAPHORES;
+ sema_total = NUM_ATOMICS_SEMAPHORES;
+ }
+ else
+ {
+ offset = 0;
+ sema_total = NUM_SPINLOCK_SEMAPHORES;
+ }
+
+ idx = (counter++ % sema_total) + offset;
+
+ /* double check we did things correctly */
+ s_check_valid(idx);
+
+ *lock = idx;
}
void
s_unlock_sema(volatile slock_t *lock)
{
+ int lockndx = *lock;
+
+ s_check_valid(lockndx);
+
PGSemaphoreUnlock(&SpinlockSemaArray[*lock]);
}
@@ -113,8 +162,12 @@ s_lock_free_sema(volatile slock_t *lock)
int
tas_sema(volatile slock_t *lock)
{
+ int lockndx = *lock;
+
+ s_check_valid(lockndx);
+
/* Note that TAS macros return 0 if *success* */
- return !PGSemaphoreTryLock(&SpinlockSemaArray[*lock]);
+ return !PGSemaphoreTryLock(&SpinlockSemaArray[lockndx]);
}
#endif /* !HAVE_SPINLOCKS */