summaryrefslogtreecommitdiff
path: root/src/backend/access/nbtree/nbtree.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2006-02-11 23:31:34 +0000
committerTom Lane <tgl@sss.pgh.pa.us>2006-02-11 23:31:34 +0000
commitfd267c1ebc363ae6b1bf586794fa5cc9e8cca43c (patch)
tree13c387759e392e99689d2a37a55db6a84f139bf5 /src/backend/access/nbtree/nbtree.c
parenta02f6ce33b593039213a8987ba651f121b64464c (diff)
Skip ambulkdelete scan if there's nothing to delete and the index is not
partial. None of the existing AMs do anything useful except counting tuples when there's nothing to delete, and we can get a tuple count from the heap as long as it's not a partial index. (hash actually can skip anyway because it maintains a tuple count in the index metapage.) GIST is not currently able to exploit this optimization because, due to failure to index NULLs, GIST is always effectively partial. Possibly we should fix that sometime. Simon Riggs w/ some review by Tom Lane.
Diffstat (limited to 'src/backend/access/nbtree/nbtree.c')
-rw-r--r--src/backend/access/nbtree/nbtree.c25
1 files changed, 23 insertions, 2 deletions
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 4fb70302d7a..e28faef141d 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.138 2006/02/11 17:14:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.139 2006/02/11 23:31:33 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -564,8 +564,22 @@ btbulkdelete(PG_FUNCTION_ARGS)
* further to its right, which the indexscan will have no pin on.) We can
* skip obtaining exclusive lock on empty pages though, since no indexscan
* could be stopped on those.
+ *
+ * We can skip the scan entirely if there's nothing to delete (indicated
+ * by callback_state == NULL) and the index isn't partial. For a partial
+ * index we must scan in order to derive a trustworthy tuple count.
*/
- buf = _bt_get_endpoint(rel, 0, false);
+ if (callback_state || vac_is_partial_index(rel))
+ {
+ buf = _bt_get_endpoint(rel, 0, false);
+ }
+ else
+ {
+ /* skip scan and set flag for btvacuumcleanup */
+ buf = InvalidBuffer;
+ num_index_tuples = -1;
+ }
+
if (BufferIsValid(buf)) /* check for empty index */
{
for (;;)
@@ -836,6 +850,13 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
stats->pages_deleted = pages_deleted;
stats->pages_free = nFreePages;
+ /* if btbulkdelete skipped the scan, use heap's tuple count */
+ if (stats->num_index_tuples < 0)
+ {
+ Assert(info->num_heap_tuples >= 0);
+ stats->num_index_tuples = info->num_heap_tuples;
+ }
+
PG_RETURN_POINTER(stats);
}