summaryrefslogtreecommitdiff
path: root/src/test
diff options
context:
space:
mode:
authorThomas Munro <tmunro@postgresql.org>2024-07-30 21:45:01 +1200
committerThomas Munro <tmunro@postgresql.org>2024-07-30 22:58:37 +1200
commite25626677f8076eb3ce94586136c5464ee154381 (patch)
treef03f87af97b998e2dd5aadcfe8a93a83a1ae716b /src/test
parent1330843bb78e9d2422af2f2b9909b80732bd6fc0 (diff)
Remove --disable-spinlocks.
A later change will require atomic support, so it wouldn't make sense for a hypothetical new system not to be able to implement spinlocks. Reviewed-by: Heikki Linnakangas <hlinnaka@iki.fi> Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us> (concept, not the patch) Reviewed-by: Andres Freund <andres@anarazel.de> (concept, not the patch) Discussion: https://postgr.es/m/3351991.1697728588%40sss.pgh.pa.us
Diffstat (limited to 'src/test')
-rw-r--r--src/test/regress/regress.c86
1 files changed, 0 insertions, 86 deletions
diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c
index 45a6ad3c49e..14aad5a0c6e 100644
--- a/src/test/regress/regress.c
+++ b/src/test/regress/regress.c
@@ -887,91 +887,7 @@ test_spinlock(void)
if (memcmp(struct_w_lock.data_after, "ef12", 4) != 0)
elog(ERROR, "padding after spinlock modified");
}
-
- /*
- * Ensure that allocating more than INT32_MAX emulated spinlocks works.
- * That's interesting because the spinlock emulation uses a 32bit integer
- * to map spinlocks onto semaphores. There've been bugs...
- */
-#ifndef HAVE_SPINLOCKS
- {
- /*
- * Initialize enough spinlocks to advance counter close to wraparound.
- * It's too expensive to perform acquire/release for each, as those
- * may be syscalls when the spinlock emulation is used (and even just
- * atomic TAS would be expensive).
- */
- for (uint32 i = 0; i < INT32_MAX - 100000; i++)
- {
- slock_t lock;
-
- SpinLockInit(&lock);
- }
-
- for (uint32 i = 0; i < 200000; i++)
- {
- slock_t lock;
-
- SpinLockInit(&lock);
-
- SpinLockAcquire(&lock);
- SpinLockRelease(&lock);
- SpinLockAcquire(&lock);
- SpinLockRelease(&lock);
- }
- }
-#endif
-}
-
-/*
- * Verify that performing atomic ops inside a spinlock isn't a
- * problem. Realistically that's only going to be a problem when both
- * --disable-spinlocks and --disable-atomics are used, but it's cheap enough
- * to just always test.
- *
- * The test works by initializing enough atomics that we'd conflict if there
- * were an overlap between a spinlock and an atomic by holding a spinlock
- * while manipulating more than NUM_SPINLOCK_SEMAPHORES atomics.
- *
- * NUM_TEST_ATOMICS doesn't really need to be more than
- * NUM_SPINLOCK_SEMAPHORES, but it seems better to test a bit more
- * extensively.
- */
-static void
-test_atomic_spin_nest(void)
-{
- slock_t lock;
-#define NUM_TEST_ATOMICS (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES + 27)
- pg_atomic_uint32 atomics32[NUM_TEST_ATOMICS];
- pg_atomic_uint64 atomics64[NUM_TEST_ATOMICS];
-
- SpinLockInit(&lock);
-
- for (int i = 0; i < NUM_TEST_ATOMICS; i++)
- {
- pg_atomic_init_u32(&atomics32[i], 0);
- pg_atomic_init_u64(&atomics64[i], 0);
- }
-
- /* just so it's not all zeroes */
- for (int i = 0; i < NUM_TEST_ATOMICS; i++)
- {
- EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&atomics32[i], i), 0);
- EXPECT_EQ_U64(pg_atomic_fetch_add_u64(&atomics64[i], i), 0);
- }
-
- /* test whether we can do atomic op with lock held */
- SpinLockAcquire(&lock);
- for (int i = 0; i < NUM_TEST_ATOMICS; i++)
- {
- EXPECT_EQ_U32(pg_atomic_fetch_sub_u32(&atomics32[i], i), i);
- EXPECT_EQ_U32(pg_atomic_read_u32(&atomics32[i]), 0);
- EXPECT_EQ_U64(pg_atomic_fetch_sub_u64(&atomics64[i], i), i);
- EXPECT_EQ_U64(pg_atomic_read_u64(&atomics64[i]), 0);
- }
- SpinLockRelease(&lock);
}
-#undef NUM_TEST_ATOMICS
PG_FUNCTION_INFO_V1(test_atomic_ops);
Datum
@@ -989,8 +905,6 @@ test_atomic_ops(PG_FUNCTION_ARGS)
*/
test_spinlock();
- test_atomic_spin_nest();
-
PG_RETURN_BOOL(true);
}