summaryrefslogtreecommitdiff
path: root/src/backend/commands
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/commands')
-rw-r--r--src/backend/commands/trigger.c30
-rw-r--r--src/backend/commands/vacuum.c138
2 files changed, 96 insertions, 72 deletions
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 09c593db37a..125a9d2063e 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.136.2.2 2003/05/19 17:23:54 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.136.2.3 2005/08/26 20:07:15 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1351,14 +1351,18 @@ GetTupleForTrigger(EState *estate, ResultRelInfo *relinfo,
if (newSlot != NULL)
{
int test;
+ ItemPointerData update_ctid;
+ TransactionId update_xmax;
+
+ *newSlot = NULL;
/*
* mark tuple for update
*/
- *newSlot = NULL;
- tuple.t_self = *tid;
ltrmark:;
- test = heap_mark4update(relation, &tuple, &buffer, cid);
+ tuple.t_self = *tid;
+ test = heap_mark4update(relation, &tuple, &buffer,
+ &update_ctid, &update_xmax, cid);
switch (test)
{
case HeapTupleSelfUpdated:
@@ -1373,15 +1377,18 @@ ltrmark:;
ReleaseBuffer(buffer);
if (XactIsoLevel == XACT_SERIALIZABLE)
elog(ERROR, "Can't serialize access due to concurrent update");
- else if (!(ItemPointerEquals(&(tuple.t_self), tid)))
+ else if (!ItemPointerEquals(&update_ctid, &tuple.t_self))
{
- TupleTableSlot *epqslot = EvalPlanQual(estate,
- relinfo->ri_RangeTableIndex,
- &(tuple.t_self));
-
- if (!(TupIsNull(epqslot)))
+ /* it was updated, so look at the updated version */
+ TupleTableSlot *epqslot;
+
+ epqslot = EvalPlanQual(estate,
+ relinfo->ri_RangeTableIndex,
+ &update_ctid,
+ update_xmax);
+ if (!TupIsNull(epqslot))
{
- *tid = tuple.t_self;
+ *tid = update_ctid;
*newSlot = epqslot;
goto ltrmark;
}
@@ -1418,6 +1425,7 @@ ltrmark:;
tuple.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
tuple.t_len = ItemIdGetLength(lp);
tuple.t_self = *tid;
+ tuple.t_tableOid = RelationGetRelid(relation);
}
result = heap_copytuple(&tuple);
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 51082689250..0b40fbde618 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.244 2002/10/31 19:25:29 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.244.2.1 2005/08/26 20:07:16 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1646,8 +1646,6 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
Buffer Cbuf = buf;
bool freeCbuf = false;
bool chain_move_failed = false;
- Page Cpage;
- ItemId Citemid;
ItemPointerData Ctid;
HeapTupleData tp = tuple;
Size tlen = tuple_len;
@@ -1671,68 +1669,85 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
break; /* out of walk-along-page loop */
}
- vtmove = (VTupleMove) palloc(100 * sizeof(VTupleMoveData));
- num_vtmove = 0;
- free_vtmove = 100;
-
/*
* If this tuple is in the begin/middle of the chain then
- * we have to move to the end of chain.
+ * we have to move to the end of chain. As with any
+ * t_ctid chase, we have to verify that each new tuple
+ * is really the descendant of the tuple we came from.
*/
while (!(tp.t_data->t_infomask & (HEAP_XMAX_INVALID |
HEAP_MARKED_FOR_UPDATE)) &&
!(ItemPointerEquals(&(tp.t_self),
&(tp.t_data->t_ctid))))
{
- Ctid = tp.t_data->t_ctid;
- if (freeCbuf)
- ReleaseBuffer(Cbuf);
- freeCbuf = true;
- Cbuf = ReadBuffer(onerel,
- ItemPointerGetBlockNumber(&Ctid));
- Cpage = BufferGetPage(Cbuf);
- Citemid = PageGetItemId(Cpage,
- ItemPointerGetOffsetNumber(&Ctid));
- if (!ItemIdIsUsed(Citemid))
+ ItemPointerData nextTid;
+ TransactionId priorXmax;
+ Buffer nextBuf;
+ Page nextPage;
+ OffsetNumber nextOffnum;
+ ItemId nextItemid;
+ HeapTupleHeader nextTdata;
+
+ nextTid = tp.t_data->t_ctid;
+ priorXmax = HeapTupleHeaderGetXmax(tp.t_data);
+ /* assume block# is OK (see heap_fetch comments) */
+ nextBuf = ReadBuffer(onerel,
+ ItemPointerGetBlockNumber(&nextTid));
+ nextPage = BufferGetPage(nextBuf);
+ /* If bogus or unused slot, assume tp is end of chain */
+ nextOffnum = ItemPointerGetOffsetNumber(&nextTid);
+ if (nextOffnum < FirstOffsetNumber ||
+ nextOffnum > PageGetMaxOffsetNumber(nextPage))
{
- /*
- * This means that in the middle of chain there
- * was tuple updated by older (than OldestXmin)
- * xaction and this tuple is already deleted by
- * me. Actually, upper part of chain should be
- * removed and seems that this should be handled
- * in scan_heap(), but it's not implemented at the
- * moment and so we just stop shrinking here.
- */
- elog(DEBUG1, "Child itemid in update-chain marked as unused - can't continue repair_frag");
- chain_move_failed = true;
- break; /* out of loop to move to chain end */
+ ReleaseBuffer(nextBuf);
+ break;
}
+ nextItemid = PageGetItemId(nextPage, nextOffnum);
+ if (!ItemIdIsUsed(nextItemid))
+ {
+ ReleaseBuffer(nextBuf);
+ break;
+ }
+ /* if not matching XMIN, assume tp is end of chain */
+ nextTdata = (HeapTupleHeader) PageGetItem(nextPage,
+ nextItemid);
+ if (!TransactionIdEquals(HeapTupleHeaderGetXmin(nextTdata),
+ priorXmax))
+ {
+ ReleaseBuffer(nextBuf);
+ break;
+ }
+ /* OK, switch our attention to the next tuple in chain */
tp.t_datamcxt = NULL;
- tp.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid);
- tp.t_self = Ctid;
- tlen = tp.t_len = ItemIdGetLength(Citemid);
- }
- if (chain_move_failed)
- {
+ tp.t_data = nextTdata;
+ tp.t_self = nextTid;
+ tlen = tp.t_len = ItemIdGetLength(nextItemid);
if (freeCbuf)
ReleaseBuffer(Cbuf);
- pfree(vtmove);
- break; /* out of walk-along-page loop */
+ Cbuf = nextBuf;
+ freeCbuf = true;
}
+ /* Set up workspace for planning the chain move */
+ vtmove = (VTupleMove) palloc(100 * sizeof(VTupleMoveData));
+ num_vtmove = 0;
+ free_vtmove = 100;
+
/*
- * Check if all items in chain can be moved
+ * Now, walk backwards up the chain (towards older tuples)
+ * and check if all items in chain can be moved. We record
+ * all the moves that need to be made in the vtmove array.
*/
for (;;)
{
Buffer Pbuf;
Page Ppage;
ItemId Pitemid;
- HeapTupleData Ptp;
+ HeapTupleHeader PTdata;
VTupleLinkData vtld,
*vtlp;
+ /* Identify a target page to move this tuple to */
if (to_vacpage == NULL ||
!enough_space(to_vacpage, tlen))
{
@@ -1802,18 +1817,17 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* this can't happen since we saw tuple earlier: */
if (!ItemIdIsUsed(Pitemid))
elog(ERROR, "Parent itemid marked as unused");
- Ptp.t_datamcxt = NULL;
- Ptp.t_data = (HeapTupleHeader) PageGetItem(Ppage, Pitemid);
+ PTdata = (HeapTupleHeader) PageGetItem(Ppage, Pitemid);
/* ctid should not have changed since we saved it */
Assert(ItemPointerEquals(&(vtld.new_tid),
- &(Ptp.t_data->t_ctid)));
+ &(PTdata->t_ctid)));
/*
- * Read above about cases when !ItemIdIsUsed(Citemid)
+ * Read above about cases when !ItemIdIsUsed(nextItemid)
* (child item is removed)... Due to the fact that at
* the moment we don't remove unuseful part of
- * update-chain, it's possible to get too old parent
+ * update-chain, it's possible to get non-matching parent
* row here. Like as in the case which caused this
* problem, we stop shrinking here. I could try to
* find real parent row but want not to do it because
@@ -1821,7 +1835,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* and we are too close to 6.5 release. - vadim
* 06/11/99
*/
- if (!(TransactionIdEquals(HeapTupleHeaderGetXmax(Ptp.t_data),
+ if (!(TransactionIdEquals(HeapTupleHeaderGetXmax(PTdata),
HeapTupleHeaderGetXmin(tp.t_data))))
{
ReleaseBuffer(Pbuf);
@@ -1829,8 +1843,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
chain_move_failed = true;
break; /* out of check-all-items loop */
}
- tp.t_datamcxt = Ptp.t_datamcxt;
- tp.t_data = Ptp.t_data;
+ tp.t_datamcxt = NULL;
+ tp.t_data = PTdata;
tlen = tp.t_len = ItemIdGetLength(Pitemid);
if (freeCbuf)
ReleaseBuffer(Cbuf);
@@ -1865,6 +1879,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
for (ti = 0; ti < num_vtmove; ti++)
{
VacPage destvacpage = vtmove[ti].vacpage;
+ Page Cpage;
+ ItemId Citemid;
/* Get page to move from */
tuple.t_self = vtmove[ti].tid;
@@ -1953,16 +1969,27 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
InvalidOffsetNumber,
LP_USED);
if (newoff == InvalidOffsetNumber)
- {
elog(PANIC, "moving chain: failed to add item with len = %lu to page %u",
(unsigned long) tuple_len, destvacpage->blkno);
- }
newitemid = PageGetItemId(ToPage, newoff);
+ /* drop temporary copy, and point to the version on the dest page */
pfree(newtup.t_data);
newtup.t_datamcxt = NULL;
newtup.t_data = (HeapTupleHeader) PageGetItem(ToPage, newitemid);
+
ItemPointerSet(&(newtup.t_self), destvacpage->blkno, newoff);
+ /*
+ * Set new tuple's t_ctid pointing to itself for last
+ * tuple in chain, and to next tuple in chain
+ * otherwise.
+ */
+ if (!ItemPointerIsValid(&Ctid))
+ newtup.t_data->t_ctid = newtup.t_self;
+ else
+ newtup.t_data->t_ctid = Ctid;
+ Ctid = newtup.t_self;
+
/* XLOG stuff */
if (!onerel->rd_istemp)
{
@@ -1992,17 +2019,6 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (destvacpage->blkno > last_move_dest_block)
last_move_dest_block = destvacpage->blkno;
- /*
- * Set new tuple's t_ctid pointing to itself for last
- * tuple in chain, and to next tuple in chain
- * otherwise.
- */
- if (!ItemPointerIsValid(&Ctid))
- newtup.t_data->t_ctid = newtup.t_self;
- else
- newtup.t_data->t_ctid = Ctid;
- Ctid = newtup.t_self;
-
num_moved++;
/*