summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2012-10-19 15:24:15 -0400
committerTom Lane <tgl@sss.pgh.pa.us>2012-10-19 15:24:15 -0400
commitd01a7442190686a981c0a5ce330e962d8083ac4f (patch)
treeb7a2d4f1c338ce56bc152bc5e5e6005b92927e7e
parent823f83d3d53ad2c0e799b1953ed9a1955840f11c (diff)
Fix hash_search to avoid corruption of the hash table on out-of-memory.
An out-of-memory error during expand_table() on a palloc-based hash table would leave a partially-initialized entry in the table. This would not be harmful for transient hash tables, since they'd get thrown away anyway at transaction abort. But for long-lived hash tables, such as the relcache hash, this would effectively corrupt the table, leading to crash or other misbehavior later. To fix, rearrange the order of operations so that table enlargement is attempted before we insert a new entry, rather than after adding it to the hash table. Problem discovered by Hitoshi Harada, though this is a bit different from his proposed patch.
-rw-r--r--src/backend/utils/hash/dynahash.c41
1 files changed, 25 insertions, 16 deletions
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index d9027291ee3..6f9f21fa308 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -821,6 +821,27 @@ hash_search_with_hash_value(HTAB *hashp,
#endif
/*
+ * If inserting, check if it is time to split a bucket.
+ *
+ * NOTE: failure to expand table is not a fatal error, it just means we
+ * have to run at higher fill factor than we wanted. However, if we're
+ * using the palloc allocator then it will throw error anyway on
+ * out-of-memory, so we must do this before modifying the table.
+ */
+ if (action == HASH_ENTER || action == HASH_ENTER_NULL)
+ {
+ /*
+ * Can't split if running in partitioned mode, nor if frozen, nor if
+ * table is the subject of any active hash_seq_search scans. Strange
+ * order of these tests is to try to check cheaper conditions first.
+ */
+ if (!IS_PARTITIONED(hctl) && !hashp->frozen &&
+ hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor &&
+ !has_seq_scans(hashp))
+ (void) expand_table(hashp);
+ }
+
+ /*
* Do the initial lookup
*/
bucket = calc_bucket(hctl, hashvalue);
@@ -940,24 +961,12 @@ hash_search_with_hash_value(HTAB *hashp,
currBucket->hashvalue = hashvalue;
hashp->keycopy(ELEMENTKEY(currBucket), keyPtr, keysize);
- /* caller is expected to fill the data field on return */
-
/*
- * Check if it is time to split a bucket. Can't split if running
- * in partitioned mode, nor if table is the subject of any active
- * hash_seq_search scans. Strange order of these tests is to try
- * to check cheaper conditions first.
+ * Caller is expected to fill the data field on return. DO NOT
+ * insert any code that could possibly throw error here, as doing
+ * so would leave the table entry incomplete and hence corrupt the
+ * caller's data structure.
*/
- if (!IS_PARTITIONED(hctl) &&
- hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor &&
- !has_seq_scans(hashp))
- {
- /*
- * NOTE: failure to expand table is not a fatal error, it just
- * means we have to run at higher fill factor than we wanted.
- */
- expand_table(hashp);
- }
return (void *) ELEMENTKEY(currBucket);
}