summaryrefslogtreecommitdiff
path: root/src/backend/optimizer
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/optimizer')
-rw-r--r--src/backend/optimizer/path/costsize.c11
-rw-r--r--src/backend/optimizer/util/plancat.c11
2 files changed, 18 insertions, 4 deletions
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index c67398d5b0d..677c86e7516 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -49,7 +49,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.135 2004/10/23 00:05:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.136 2004/12/02 01:34:17 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -616,6 +616,15 @@ cost_material(Path *path,
}
/*
+ * Charge a very small amount per inserted tuple, to reflect bookkeeping
+ * costs. We use cpu_tuple_cost/10 for this. This is needed to break
+ * the tie that would otherwise exist between nestloop with A outer,
+ * materialized B inner and nestloop with B outer, materialized A inner.
+ * The extra cost ensures we'll prefer materializing the smaller rel.
+ */
+ startup_cost += cpu_tuple_cost * 0.1 * tuples;
+
+ /*
* Also charge a small amount per extracted tuple. We use
* cpu_tuple_cost so that it doesn't appear worthwhile to materialize
* a bare seqscan.
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index b4f42ac1235..9c9f586727a 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.98 2004/12/01 19:00:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.99 2004/12/02 01:34:17 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -268,6 +268,12 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
* but is probably an overestimate for indexes. Fortunately
* get_relation_info() can clamp the overestimate to the
* parent table's size.
+ *
+ * Note: this code intentionally disregards alignment
+ * considerations, because (a) that would be gilding the
+ * lily considering how crude the estimate is, and (b)
+ * it creates platform dependencies in the default plans
+ * which are kind of a headache for regression testing.
*/
int32 tuple_width = 0;
int i;
@@ -291,8 +297,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
attr_widths[i] = item_width;
tuple_width += item_width;
}
- tuple_width = MAXALIGN(tuple_width);
- tuple_width += MAXALIGN(sizeof(HeapTupleHeaderData));
+ tuple_width += sizeof(HeapTupleHeaderData);
tuple_width += sizeof(ItemPointerData);
/* note: integer division is intentional here */
density = (BLCKSZ - sizeof(PageHeaderData)) / tuple_width;