diff options
author | Andres Freund <andres@anarazel.de> | 2017-04-07 14:44:47 -0700 |
---|---|---|
committer | Andres Freund <andres@anarazel.de> | 2017-04-07 14:48:11 -0700 |
commit | e8fdbd58fe564a29977f4331cd26f9697d76fc40 (patch) | |
tree | 7ac78010b4ad51730fe948a79e7c5d7e5f461981 /src/backend/port/atomics.c | |
parent | 28afad5c85b436f19d9f2c0e3197c7db960fef6f (diff) |
Improve 64bit atomics support.
When adding atomics back in b64d92f1a, I added 64bit support as
optional; there wasn't yet a direct user in sight. That turned out to
be a bit short-sighted, it'd already have been useful a number of times.
Add a fallback implementation of 64bit atomics, just like the one we
have for 32bit atomics.
Additionally optimize reads/writes to 64bit on a number of platforms
where aligned writes of that size are atomic. This can now be tested
with PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY.
Author: Andres Freund
Reviewed-By: Amit Kapila
Discussion: https://postgr.es/m/20160330230914.GH13305@awork2.anarazel.de
Diffstat (limited to 'src/backend/port/atomics.c')
-rw-r--r-- | src/backend/port/atomics.c | 65 |
1 files changed, 64 insertions, 1 deletions
diff --git a/src/backend/port/atomics.c b/src/backend/port/atomics.c index 756a2ef997a..231d847de79 100644 --- a/src/backend/port/atomics.c +++ b/src/backend/port/atomics.c @@ -89,7 +89,7 @@ void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_) { StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t), - "size mismatch of atomic_flag vs slock_t"); + "size mismatch of atomic_uint32 vs slock_t"); /* * If we're using semaphore based atomic flags, be careful about nested @@ -157,3 +157,66 @@ pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_) } #endif /* PG_HAVE_ATOMIC_U32_SIMULATION */ + + +#ifdef PG_HAVE_ATOMIC_U64_SIMULATION + +void +pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_) +{ + StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t), + "size mismatch of atomic_uint64 vs slock_t"); + + /* + * If we're using semaphore based atomic flags, be careful about nested + * usage of atomics while a spinlock is held. + */ +#ifndef HAVE_SPINLOCKS + s_init_lock_sema((slock_t *) &ptr->sema, true); +#else + SpinLockInit((slock_t *) &ptr->sema); +#endif + ptr->value = val_; +} + +bool +pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, + uint64 *expected, uint64 newval) +{ + bool ret; + + /* + * Do atomic op under a spinlock. It might look like we could just skip + * the cmpxchg if the lock isn't available, but that'd just emulate a + * 'weak' compare and swap. I.e. one that allows spurious failures. Since + * several algorithms rely on a strong variant and that is efficiently + * implementable on most major architectures let's emulate it here as + * well. + */ + SpinLockAcquire((slock_t *) &ptr->sema); + + /* perform compare/exchange logic */ + ret = ptr->value == *expected; + *expected = ptr->value; + if (ret) + ptr->value = newval; + + /* and release lock */ + SpinLockRelease((slock_t *) &ptr->sema); + + return ret; +} + +uint64 +pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) +{ + uint64 oldval; + + SpinLockAcquire((slock_t *) &ptr->sema); + oldval = ptr->value; + ptr->value += add_; + SpinLockRelease((slock_t *) &ptr->sema); + return oldval; +} + +#endif /* PG_HAVE_ATOMIC_U64_SIMULATION */ |