summaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/catalog/catalog.c31
-rw-r--r--src/backend/storage/buffer/bufmgr.c70
-rw-r--r--src/backend/storage/lmgr/lwlock.c15
-rw-r--r--src/backend/utils/adt/pg_locale.c2
-rw-r--r--src/backend/utils/cache/catcache.c51
-rw-r--r--src/backend/utils/cache/inval.c14
-rw-r--r--src/backend/utils/cache/relcache.c26
-rw-r--r--src/backend/utils/mb/mbutils.c2
8 files changed, 187 insertions, 24 deletions
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index a6edf614606..35ebb0ccda4 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -34,6 +34,7 @@
#include "catalog/pg_namespace.h"
#include "catalog/pg_parameter_acl.h"
#include "catalog/pg_replication_origin.h"
+#include "catalog/pg_seclabel.h"
#include "catalog/pg_shdepend.h"
#include "catalog/pg_shdescription.h"
#include "catalog/pg_shseclabel.h"
@@ -136,6 +137,36 @@ IsCatalogRelationOid(Oid relid)
}
/*
+ * IsCatalogTextUniqueIndexOid
+ * True iff the relation identified by this OID is a catalog UNIQUE index
+ * having a column of type "text".
+ *
+ * The relcache must not use these indexes. Inserting into any UNIQUE
+ * index compares index keys while holding BUFFER_LOCK_EXCLUSIVE.
+ * bttextcmp() can search the COLLID catcache. Depending on concurrent
+ * invalidation traffic, catcache can reach relcache builds. A backend
+ * would self-deadlock on LWLocks if the relcache build read the
+ * exclusive-locked buffer.
+ *
+ * To avoid being itself the cause of self-deadlock, this doesn't read
+ * catalogs. Instead, it uses a hard-coded list with a supporting
+ * regression test.
+ */
+bool
+IsCatalogTextUniqueIndexOid(Oid relid)
+{
+ switch (relid)
+ {
+ case ParameterAclParnameIndexId:
+ case ReplicationOriginNameIndex:
+ case SecLabelObjectIndexId:
+ case SharedSecLabelObjectIndexId:
+ return true;
+ }
+ return false;
+}
+
+/*
* IsInplaceUpdateRelation
* True iff core code performs inplace updates on the relation.
*
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 0b0e056eea2..1f2a9fe9976 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -40,6 +40,9 @@
#include "access/tableam.h"
#include "access/xloginsert.h"
#include "access/xlogutils.h"
+#ifdef USE_ASSERT_CHECKING
+#include "catalog/pg_tablespace_d.h"
+#endif
#include "catalog/storage.h"
#include "catalog/storage_xlog.h"
#include "executor/instrument.h"
@@ -541,6 +544,10 @@ static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator,
ForkNumber forkNum, bool permanent);
static void AtProcExit_Buffers(int code, Datum arg);
static void CheckForBufferLeaks(void);
+#ifdef USE_ASSERT_CHECKING
+static void AssertNotCatalogBufferLock(LWLock *lock, LWLockMode mode,
+ void *unused_context);
+#endif
static int rlocator_comparator(const void *p1, const void *p2);
static inline int buffertag_comparator(const BufferTag *ba, const BufferTag *bb);
static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
@@ -4097,6 +4104,69 @@ CheckForBufferLeaks(void)
#endif
}
+#ifdef USE_ASSERT_CHECKING
+/*
+ * Check for exclusive-locked catalog buffers. This is the core of
+ * AssertCouldGetRelation().
+ *
+ * A backend would self-deadlock on LWLocks if the catalog scan read the
+ * exclusive-locked buffer. The main threat is exclusive-locked buffers of
+ * catalogs used in relcache, because a catcache search on any catalog may
+ * build that catalog's relcache entry. We don't have an inventory of
+ * catalogs relcache uses, so just check buffers of most catalogs.
+ *
+ * It's better to minimize waits while holding an exclusive buffer lock, so it
+ * would be nice to broaden this check not to be catalog-specific. However,
+ * bttextcmp() accesses pg_collation, and non-core opclasses might similarly
+ * read tables. That is deadlock-free as long as there's no loop in the
+ * dependency graph: modifying table A may cause an opclass to read table B,
+ * but it must not cause a read of table A.
+ */
+void
+AssertBufferLocksPermitCatalogRead(void)
+{
+ ForEachLWLockHeldByMe(AssertNotCatalogBufferLock, NULL);
+}
+
+static void
+AssertNotCatalogBufferLock(LWLock *lock, LWLockMode mode,
+ void *unused_context)
+{
+ BufferDesc *bufHdr;
+ BufferTag tag;
+ Oid relid;
+
+ if (mode != LW_EXCLUSIVE)
+ return;
+
+ if (!((BufferDescPadded *) lock > BufferDescriptors &&
+ (BufferDescPadded *) lock < BufferDescriptors + NBuffers))
+ return; /* not a buffer lock */
+
+ bufHdr = (BufferDesc *)
+ ((char *) lock - offsetof(BufferDesc, content_lock));
+ tag = bufHdr->tag;
+
+ /*
+ * This relNumber==relid assumption holds until a catalog experiences
+ * VACUUM FULL or similar. After a command like that, relNumber will be
+ * in the normal (non-catalog) range, and we lose the ability to detect
+ * hazardous access to that catalog. Calling RelidByRelfilenumber() would
+ * close that gap, but RelidByRelfilenumber() might then deadlock with a
+ * held lock.
+ */
+ relid = tag.relNumber;
+
+ if (IsCatalogTextUniqueIndexOid(relid)) /* see comments at the callee */
+ return;
+
+ Assert(!IsCatalogRelationOid(relid));
+ /* Shared rels are always catalogs: detect even after VACUUM FULL. */
+ Assert(tag.spcOid != GLOBALTABLESPACE_OID);
+}
+#endif
+
+
/*
* Helper routine to issue warnings when a buffer is unexpectedly pinned
*/
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 13772c04568..5148ef982e3 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -1962,6 +1962,21 @@ LWLockReleaseAll(void)
/*
+ * ForEachLWLockHeldByMe - run a callback for each held lock
+ *
+ * This is meant as debug support only.
+ */
+void
+ForEachLWLockHeldByMe(void (*callback) (LWLock *, LWLockMode, void *),
+ void *context)
+{
+ int i;
+
+ for (i = 0; i < num_held_lwlocks; i++)
+ callback(held_lwlocks[i].lock, held_lwlocks[i].mode, context);
+}
+
+/*
* LWLockHeldByMe - test whether my process holds a lock in any mode
*
* This is meant as debug support only.
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 6e43b708c0f..a73aac4f98c 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -1196,6 +1196,8 @@ pg_newlocale_from_collation(Oid collid)
if (!OidIsValid(collid))
elog(ERROR, "cache lookup failed for collation %u", collid);
+ AssertCouldGetRelation();
+
if (last_collation_cache_oid == collid)
return last_collation_cache_locale;
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 9ad7681f155..6e3cad454c0 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -1055,11 +1055,40 @@ RehashCatCacheLists(CatCache *cp)
}
/*
+ * ConditionalCatalogCacheInitializeCache
+ *
+ * Call CatalogCacheInitializeCache() if not yet done.
+ */
+pg_attribute_always_inline
+static void
+ConditionalCatalogCacheInitializeCache(CatCache *cache)
+{
+#ifdef USE_ASSERT_CHECKING
+ /*
+ * TypeCacheRelCallback() runs outside transactions and relies on TYPEOID
+ * for hashing. This isn't ideal. Since lookup_type_cache() both
+ * registers the callback and searches TYPEOID, reaching trouble likely
+ * requires OOM at an unlucky moment.
+ *
+ * InvalidateAttoptCacheCallback() runs outside transactions and likewise
+ * relies on ATTNUM. InitPostgres() initializes ATTNUM, so it's reliable.
+ */
+ if (!(cache->id == TYPEOID || cache->id == ATTNUM) ||
+ IsTransactionState())
+ AssertCouldGetRelation();
+ else
+ Assert(cache->cc_tupdesc != NULL);
+#endif
+
+ if (unlikely(cache->cc_tupdesc == NULL))
+ CatalogCacheInitializeCache(cache);
+}
+
+/*
* CatalogCacheInitializeCache
*
* This function does final initialization of a catcache: obtain the tuple
- * descriptor and set up the hash and equality function links. We assume
- * that the relcache entry can be opened at this point!
+ * descriptor and set up the hash and equality function links.
*/
#ifdef CACHEDEBUG
#define CatalogCacheInitializeCache_DEBUG1 \
@@ -1194,8 +1223,7 @@ CatalogCacheInitializeCache(CatCache *cache)
void
InitCatCachePhase2(CatCache *cache, bool touch_index)
{
- if (cache->cc_tupdesc == NULL)
- CatalogCacheInitializeCache(cache);
+ ConditionalCatalogCacheInitializeCache(cache);
if (touch_index &&
cache->id != AMOID &&
@@ -1374,16 +1402,12 @@ SearchCatCacheInternal(CatCache *cache,
dlist_head *bucket;
CatCTup *ct;
- /* Make sure we're in an xact, even if this ends up being a cache hit */
- Assert(IsTransactionState());
-
Assert(cache->cc_nkeys == nkeys);
/*
* one-time startup overhead for each cache
*/
- if (unlikely(cache->cc_tupdesc == NULL))
- CatalogCacheInitializeCache(cache);
+ ConditionalCatalogCacheInitializeCache(cache);
#ifdef CATCACHE_STATS
cache->cc_searches++;
@@ -1668,8 +1692,7 @@ GetCatCacheHashValue(CatCache *cache,
/*
* one-time startup overhead for each cache
*/
- if (cache->cc_tupdesc == NULL)
- CatalogCacheInitializeCache(cache);
+ ConditionalCatalogCacheInitializeCache(cache);
/*
* calculate the hash value
@@ -1720,8 +1743,7 @@ SearchCatCacheList(CatCache *cache,
/*
* one-time startup overhead for each cache
*/
- if (unlikely(cache->cc_tupdesc == NULL))
- CatalogCacheInitializeCache(cache);
+ ConditionalCatalogCacheInitializeCache(cache);
Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
@@ -2390,8 +2412,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
continue;
/* Just in case cache hasn't finished initialization yet... */
- if (ccp->cc_tupdesc == NULL)
- CatalogCacheInitializeCache(ccp);
+ ConditionalCatalogCacheInitializeCache(ccp);
hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 4893dbdd7c8..c93679dfdcc 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -683,7 +683,8 @@ PrepareInvalidationState(void)
{
TransInvalidationInfo *myInfo;
- Assert(IsTransactionState());
+ /* PrepareToInvalidateCacheTuple() needs relcache */
+ AssertCouldGetRelation();
/* Can't queue transactional message while collecting inplace messages. */
Assert(inplaceInvalInfo == NULL);
@@ -752,7 +753,7 @@ PrepareInplaceInvalidationState(void)
{
InvalidationInfo *myInfo;
- Assert(IsTransactionState());
+ AssertCouldGetRelation();
/* limit of one inplace update under assembly */
Assert(inplaceInvalInfo == NULL);
@@ -928,6 +929,12 @@ InvalidateSystemCaches(void)
void
AcceptInvalidationMessages(void)
{
+#ifdef USE_ASSERT_CHECKING
+ /* message handlers shall access catalogs only during transactions */
+ if (IsTransactionState())
+ AssertCouldGetRelation();
+#endif
+
ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage,
InvalidateSystemCaches);
@@ -1436,6 +1443,9 @@ CacheInvalidateHeapTupleCommon(Relation relation,
Oid databaseId;
Oid relationId;
+ /* PrepareToInvalidateCacheTuple() needs relcache */
+ AssertCouldGetRelation();
+
/* Do nothing during bootstrap */
if (IsBootstrapProcessingMode())
return;
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 2905ae86a20..68ff67de549 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -2056,6 +2056,23 @@ formrdesc(const char *relationName, Oid relationReltype,
relation->rd_isvalid = true;
}
+#ifdef USE_ASSERT_CHECKING
+/*
+ * AssertCouldGetRelation
+ *
+ * Check safety of calling RelationIdGetRelation().
+ *
+ * In code that reads catalogs in the event of a cache miss, call this
+ * before checking the cache.
+ */
+void
+AssertCouldGetRelation(void)
+{
+ Assert(IsTransactionState());
+ AssertBufferLocksPermitCatalogRead();
+}
+#endif
+
/* ----------------------------------------------------------------
* Relation Descriptor Lookup Interface
@@ -2083,8 +2100,7 @@ RelationIdGetRelation(Oid relationId)
{
Relation rd;
- /* Make sure we're in an xact, even if this ends up being a cache hit */
- Assert(IsTransactionState());
+ AssertCouldGetRelation();
/*
* first try to find reldesc in the cache
@@ -2373,8 +2389,7 @@ RelationReloadNailed(Relation relation)
Assert(relation->rd_isnailed);
/* nailed indexes are handled by RelationReloadIndexInfo() */
Assert(relation->rd_rel->relkind == RELKIND_RELATION);
- /* can only reread catalog contents in a transaction */
- Assert(IsTransactionState());
+ AssertCouldGetRelation();
/*
* Redo RelationInitPhysicalAddr in case it is a mapped relation whose
@@ -2570,8 +2585,7 @@ static void
RelationRebuildRelation(Relation relation)
{
Assert(!RelationHasReferenceCountZero(relation));
- /* rebuilding requires access to the catalogs */
- Assert(IsTransactionState());
+ AssertCouldGetRelation();
/* there is no reason to ever rebuild a dropped relation */
Assert(relation->rd_droppedSubid == InvalidSubTransactionId);
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index 885dc11d51d..5ddba5bccb4 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -310,7 +310,7 @@ InitializeClientEncoding(void)
{
Oid utf8_to_server_proc;
- Assert(IsTransactionState());
+ AssertCouldGetRelation();
utf8_to_server_proc =
FindDefaultConversionProc(PG_UTF8,
current_server_encoding);