summaryrefslogtreecommitdiff
path: root/src/backend/commands
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/commands')
-rw-r--r--src/backend/commands/Makefile34
-rw-r--r--src/backend/commands/_deadcode/recipe.c1318
-rw-r--r--src/backend/commands/_deadcode/recipe.h20
-rw-r--r--src/backend/commands/_deadcode/version.c346
-rw-r--r--src/backend/commands/aggregatecmds.c208
-rw-r--r--src/backend/commands/analyze.c1794
-rw-r--r--src/backend/commands/async.c896
-rw-r--r--src/backend/commands/cluster.c255
-rw-r--r--src/backend/commands/comment.c809
-rw-r--r--src/backend/commands/copy.c1363
-rw-r--r--src/backend/commands/dbcommands.c761
-rw-r--r--src/backend/commands/define.c233
-rw-r--r--src/backend/commands/explain.c914
-rw-r--r--src/backend/commands/functioncmds.c587
-rw-r--r--src/backend/commands/indexcmds.c781
-rw-r--r--src/backend/commands/lockcmds.c70
-rw-r--r--src/backend/commands/operatorcmds.c253
-rw-r--r--src/backend/commands/portalcmds.c220
-rw-r--r--src/backend/commands/proclang.c174
-rw-r--r--src/backend/commands/schemacmds.c141
-rw-r--r--src/backend/commands/sequence.c889
-rw-r--r--src/backend/commands/tablecmds.c3094
-rw-r--r--src/backend/commands/trigger.c2163
-rw-r--r--src/backend/commands/typecmds.c679
-rw-r--r--src/backend/commands/user.c1525
-rw-r--r--src/backend/commands/vacuum.c2944
-rw-r--r--src/backend/commands/vacuumlazy.c1120
-rw-r--r--src/backend/commands/variable.c587
-rw-r--r--src/backend/commands/view.c268
29 files changed, 0 insertions, 24446 deletions
diff --git a/src/backend/commands/Makefile b/src/backend/commands/Makefile
deleted file mode 100644
index 190b0fd64f6..00000000000
--- a/src/backend/commands/Makefile
+++ /dev/null
@@ -1,34 +0,0 @@
-#-------------------------------------------------------------------------
-#
-# Makefile--
-# Makefile for backend/commands
-#
-# IDENTIFICATION
-# $Header: /cvsroot/pgsql/src/backend/commands/Makefile,v 1.28 2002/04/15 05:22:03 tgl Exp $
-#
-#-------------------------------------------------------------------------
-
-subdir = src/backend/commands
-top_builddir = ../../..
-include $(top_builddir)/src/Makefile.global
-
-OBJS = aggregatecmds.o analyze.o async.o cluster.o comment.o copy.o \
- dbcommands.o define.o explain.o functioncmds.o \
- indexcmds.o lockcmds.o operatorcmds.o portalcmds.o proclang.o \
- schemacmds.o sequence.o tablecmds.o trigger.o typecmds.o user.o \
- vacuum.o vacuumlazy.o variable.o view.o
-
-all: SUBSYS.o
-
-SUBSYS.o: $(OBJS)
- $(LD) $(LDREL) $(LDOUT) SUBSYS.o $(OBJS)
-
-depend dep:
- $(CC) -MM $(CFLAGS) *.c >depend
-
-clean:
- rm -f SUBSYS.o $(OBJS)
-
-ifeq (depend,$(wildcard depend))
-include depend
-endif
diff --git a/src/backend/commands/_deadcode/recipe.c b/src/backend/commands/_deadcode/recipe.c
deleted file mode 100644
index f1b3d84ab48..00000000000
--- a/src/backend/commands/_deadcode/recipe.c
+++ /dev/null
@@ -1,1318 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * recipe.c
- * routines for handling execution of Tioga recipes
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.17 2002/06/20 20:29:27 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include "catalog/pg_type.h"
-#include "commands/recipe.h"
-#include "executor/executor.h"
-#include "libpq/libpq-be.h"
-#include "nodes/execnodes.h"
-#include "nodes/makefuncs.h"
-#include "nodes/parsenodes.h"
-#include "nodes/plannodes.h"
-#include "optimizer/planner.h"
-#include "parser/parse_node.h"
-#include "rewrite/rewriteHandler.h"
-#include "rewrite/rewriteManip.h"
-#include "tcop/dest.h"
-#include "tcop/pquery.h"
-#include "utils/builtins.h"
-#include "utils/relcache.h"
-
-/* from tcop/postgres.c */
-extern CommandDest whereToSendOutput;
-
-#ifndef TIOGA
-
-void
-beginRecipe(RecipeStmt *stmt)
-{
- elog(WARNING, "You must compile with TIOGA defined in order to use recipes\n");
-}
-
-#else
-
-#include "tioga/tgRecipe.h"
-
-#define DEBUG_RECIPE 1
-
-/* structure to keep track of the tee node plans */
-typedef struct _teePlanInfo
-{
- char *tpi_relName;
- Query *tpi_parsetree;
- Plan *tpi_plan;
-} TeePlanInfo;
-
-typedef struct _teeInfo
-{
- int num;
- TeePlanInfo *val;
-} TeeInfo;
-
-QueryTreeList *appendQlist(QueryTreeList * q1, QueryTreeList * q2);
-void OffsetVarAttno(Node *node, int varno, int offset);
-
-static void appendTeeQuery(TeeInfo * teeInfo,
- QueryTreeList * q,
- char *teeNodeName);
-
-static Plan *replaceTeeScans(Plan *plan,
- Query *parsetree,
- TeeInfo * teeInfo);
-static void replaceSeqScan(Plan *plan,
- Plan *parent,
- int rt_ind,
- Plan *tplan);
-
-static void tg_rewriteQuery(TgRecipe * r, TgNode * n,
- QueryTreeList * q,
- QueryTreeList * inputQlist);
-static Node *tg_replaceNumberedParam(Node *expression,
- int pnum,
- int rt_ind,
- char *teeRelName);
-static Node *tg_rewriteParamsInExpr(Node *expression,
- QueryTreeList * inputQlist);
-static QueryTreeList *tg_parseSubQuery(TgRecipe * r,
- TgNode * n,
- TeeInfo * teeInfo);
-static QueryTreeList *tg_parseTeeNode(TgRecipe * r,
- TgNode * n,
- int i,
- QueryTreeList * qList,
- TeeInfo * teeInfo);
-
-
-/*
- The Tioga recipe rewrite algorithm:
-
- To parse a Tioga recipe, we start from an eye node and go backwards through
- its input nodes. To rewrite a Tioga node, we do the following:
-
- 1) parse the node we're at in the standard way (calling parser() )
- 2) rewrite its input nodes recursively using Tioga rewrite
- 3) now, with the rewritten input parse trees and the original parse tree
- of the node, we rewrite the the node.
- To do the rewrite, we use the target lists, range tables, and
- qualifications of the input parse trees
-*/
-
-/*
- * beginRecipe:
- * this is the main function to recipe execution
- * this function is invoked for EXECUTE RECIPE ... statements
- *
- * takes in a RecipeStmt structure from the parser
- * and returns a list of cursor names
- */
-
-void
-beginRecipe(RecipeStmt *stmt)
-{
- TgRecipe *r;
- int i,
- numTees;
- QueryTreeList *qList;
- char portalName[1024];
-
- Plan *plan;
- TupleDesc attinfo;
- QueryDesc *queryDesc;
- Query *parsetree;
-
- TeeInfo *teeInfo;
-
- /*
- * retrieveRecipe() reads the recipe from the database and returns a
- * TgRecipe* structure we can work with
- */
-
- r = retrieveRecipe(stmt->recipeName);
-
- if (r == NULL)
- return;
-
- /* find the number of tees in the recipe */
- numTees = r->tees->num;
-
- if (numTees > 0)
- {
- /* allocate a teePlan structure */
- teeInfo = (TeeInfo *) malloc(sizeof(TeeInfo));
- teeInfo->num = numTees;
- teeInfo->val = (TeePlanInfo *) malloc(numTees * sizeof(TeePlanInfo));
- for (i = 0; i < numTees; i++)
- {
- teeInfo->val[i].tpi_relName = r->tees->val[i]->nodeName;
- teeInfo->val[i].tpi_parsetree = NULL;
- teeInfo->val[i].tpi_plan = NULL;
- }
- }
- else
- teeInfo = NULL;
-
- /*
- * for each viewer in the recipe, go backwards from each viewer input
- * and generate a plan. Attach the plan to cursors.
- */
- for (i = 0; i < r->eyes->num; i++)
- {
- TgNodePtr e;
-
- e = r->eyes->val[i];
- if (e->inNodes->num > 1)
- {
- elog(WARNING,
- "beginRecipe: Currently eyes cannot have more than one input");
- }
- if (e->inNodes->num == 0)
- {
- /* no input to this eye, skip it */
- continue;
- }
-
-#ifdef DEBUG_RECIPE
- elog(WARNING, "beginRecipe: eyes[%d] = %s\n", i, e->nodeName);
-#endif /* DEBUG_RECIPE */
-
- qList = tg_parseSubQuery(r, e->inNodes->val[0], teeInfo);
-
- if (qList == NULL)
- {
- /* eye is directly connected to a tee node */
- /* XXX TODO: handle this case */
- }
-
- /* now, plan the queries */
-
- /*
- * should really do everything pg_plan() does, but for now, we
- * skip the rule rewrite and time qual stuff
- */
-
- /*
- * 1) plan the main query, everything from an eye node back to a
- * Tee
- */
- parsetree = qList->qtrees[0];
-
- /*
- * before we plan, we want to see all the changes we did, during
- * the rewrite phase, such as creating the tee tables,
- * CommandCounterIncrement() allows us to see the changes
- */
- CommandCounterIncrement();
-
- plan = planner(parsetree);
-
- /*
- * 2) plan the tee queries, (subgraphs rooted from a Tee) by the
- * time the eye is processed, all tees that contribute to that eye
- * will have been included in the teeInfo list
- */
- if (teeInfo)
- {
- int t;
- Plan *tplan;
- Tee *newplan;
-
- for (t = 0; t < teeInfo->num; t++)
- {
- if (teeInfo->val[t].tpi_plan == NULL)
- {
- /* plan it in the usual fashion */
- tplan = planner(teeInfo->val[t].tpi_parsetree);
-
- /* now add a tee node to the root of the plan */
- elog(WARNING, "adding tee plan node to the root of the %s\n",
- teeInfo->val[t].tpi_relName);
- newplan = (Tee *) makeNode(Tee);
- newplan->plan.targetlist = tplan->targetlist;
- newplan->plan.qual = NULL; /* tplan->qual; */
- newplan->plan.lefttree = tplan;
- newplan->plan.righttree = NULL;
- newplan->leftParent = NULL;
- newplan->rightParent = NULL;
-
- /*
- * the range table of the tee is the range table of
- * the tplan
- */
- newplan->rtentries = teeInfo->val[t].tpi_parsetree->rtable;
- strcpy(newplan->teeTableName,
- teeInfo->val[t].tpi_relName);
- teeInfo->val[t].tpi_plan = (Plan *) newplan;
- }
- }
-
- /*
- * 3) replace the tee table scans in the main plan with actual
- * tee plannodes
- */
-
- plan = replaceTeeScans(plan, parsetree, teeInfo);
-
- } /* if (teeInfo) */
-
- /* define a portal for this viewer input */
- /* for now, eyes can only have one input */
- snprintf(portalName, 1024, "%s%d", e->nodeName, 0);
-
- queryDesc = CreateQueryDesc(parsetree,
- plan,
- whereToSendOutput);
-
- /*
- * call ExecStart to prepare the plan for execution
- */
- attinfo = ExecutorStart(queryDesc, NULL);
-
- ProcessPortal(portalName,
- parsetree,
- plan,
- attinfo,
- whereToSendOutput);
- elog(WARNING, "beginRecipe: cursor named %s is now available", portalName);
- }
-
-}
-
-
-
-/*
- * tg_rewriteQuery -
- * r - the recipe being rewritten
- * n - the node that we're current at
- * q - a QueryTree List containing the parse tree of the node
- * inputQlist - the parsetrees of its input nodes,
- * the size of inputQlist must be the same as the
- * number of input nodes. Some elements in the inpuQlist
- * may be null if the inputs to those nodes are unconnected
- *
- * this is the main routine for rewriting the recipe queries
- * the original query tree 'q' is modified
- */
-
-static void
-tg_rewriteQuery(TgRecipe * r,
- TgNode * n,
- QueryTreeList * q,
- QueryTreeList * inputQlist)
-{
- Query *orig;
- Query *inputQ;
- int i;
- List *rtable;
- List *input_rtable;
- int rt_length;
-
- /* orig is the original parse tree of the node */
- orig = q->qtrees[0];
-
-
- /*
- * step 1:
- *
- * form a combined range table from all the range tables in the original
- * query as well as the input nodes
- *
- * form a combined qualification from the qual in the original plus the
- * quals of the input nodes
- */
-
- /* start with the original range table */
- rtable = orig->rtable;
- rt_length = length(rtable);
-
- for (i = 0; i < n->inNodes->num; i++)
- {
- if (n->inNodes->val[i] != NULL &&
- n->inNodes->val[i]->nodeType != TG_TEE_NODE)
- {
- inputQ = inputQlist->qtrees[i];
- input_rtable = inputQ->rtable;
-
- /*
- * need to offset the var nodes in the qual and targetlist
- * because they are indexed off the original rtable
- */
- OffsetVarNodes((Node *) inputQ->qual, rt_length, 0);
- OffsetVarNodes((Node *) inputQ->targetList, rt_length, 0);
-
- /* append the range tables from the children nodes */
- rtable = nconc(rtable, input_rtable);
-
- /*
- * append the qualifications of the child node into the
- * original qual list
- */
- AddQual(orig, inputQ->qual);
- }
- }
- orig->rtable = rtable;
-
- /*
- * step 2: rewrite the target list of the original parse tree if there
- * are any references to params, replace them with the appropriate
- * target list entry of the children node
- */
- if (orig->targetList != NIL)
- {
- List *tl;
- TargetEntry *tle;
-
- foreach(tl, orig->targetList)
- {
- tle = lfirst(tl);
- if (tle->resdom != NULL)
- tle->expr = tg_rewriteParamsInExpr(tle->expr, inputQlist);
- }
- }
-
- /*
- * step 3: rewrite the qual of the original parse tree if there are
- * any references to params, replace them with the appropriate target
- * list entry of the children node
- */
- if (orig->qual)
- {
- if (nodeTag(orig->qual) == T_List)
- elog(ERROR, "tg_rewriteQuery: Whoa! why is my qual a List???");
- orig->qual = tg_rewriteParamsInExpr(orig->qual, inputQlist);
- }
-
- /*
- * at this point, we're done with the rewrite, the querytreelist q has
- * been modified
- */
-
-}
-
-
-/* tg_replaceNumberedParam:
-
- this procedure replaces the specified numbered param with a
- reference to a range table
-
- this procedure recursively calls itself
-
- it returns a (possibly modified) Node*.
-
-*/
-static Node *
-tg_replaceNumberedParam(Node *expression,
- int pnum, /* the number of the parameter */
- int rt_ind, /* the range table index */
- char *teeRelName) /* the relname of the tee
- * table */
-{
- TargetEntry *param_tle;
- Param *p;
- Var *newVar,
- *oldVar;
-
- if (expression == NULL)
- return NULL;
-
- switch (nodeTag(expression))
- {
- case T_Param:
- {
- /*
- * the node is a parameter, substitute the entry from the
- * target list of the child that corresponds to the
- * parameter number
- */
- p = (Param *) expression;
-
- /* we only deal with the case of numbered parameters */
- if (p->paramkind == PARAM_NUM && p->paramid == pnum)
- {
-
- if (p->param_tlist)
- {
- /*
- * we have a parameter with an attribute like
- * $N.foo so replace it with a new var node
- */
-
- /* param tlist can only have one entry in them! */
- param_tle = (TargetEntry *) (lfirst(p->param_tlist));
- oldVar = (Var *) param_tle->expr;
- oldVar->varno = rt_ind;
- oldVar->varnoold = rt_ind;
- return (Node *) oldVar;
- }
- else
- {
- /* we have $N without the .foo */
- bool defined;
- bool isRel;
-
- /*
- * TODO here, we need to check to see whether the
- * type of the tee is a complex type (relation) or
- * a simple type
- */
-
- /*
- * if it is a simple type, then we need to get the
- * "result" attribute from the tee relation
- */
-
- isRel = (typeidTypeRelid(p->paramtype) != 0);
- if (isRel)
- {
- newVar = makeVar(rt_ind,
- 0, /* the whole tuple */
- TypeGet(teeRelName, &defined),
- -1,
- 0,
- rt_ind,
- 0);
- return (Node *) newVar;
- }
- else
- newVar = makeVar(rt_ind,
- 1, /* just the first field,
- * which is 'result' */
- TypeGet(teeRelName, &defined),
- -1,
- 0,
- rt_ind,
- 0);
- return (Node *) newVar;
-
- }
- }
- else
- elog(WARNING, "tg_replaceNumberedParam: unexpected paramkind value of %d", p->paramkind);
- }
- break;
- case T_Expr:
- {
- /*
- * the node is an expression, we need to recursively call
- * ourselves until we find parameter nodes
- */
- List *l;
- Expr *expr = (Expr *) expression;
- List *newArgs;
-
- /*
- * we have to make a new args lists because Params can be
- * replaced by Var nodes in tg_replaceNumberedParam()
- */
- newArgs = NIL;
-
- /*
- * we only care about argument to expressions, it doesn't
- * matter when the opType is
- */
- /* recursively rewrite the arguments of this expression */
- foreach(l, expr->args)
- {
- newArgs = lappend(newArgs,
- tg_replaceNumberedParam(lfirst(l),
- pnum,
- rt_ind,
- teeRelName));
- }
- /* change the arguments of the expression */
- expr->args = newArgs;
- }
- break;
- default:
- {
- /* ignore other expr types */
- }
- }
-
- return expression;
-}
-
-
-
-
-
-/* tg_rewriteParamsInExpr:
-
- rewrite the params in expressions by using the targetlist entries
- from the input parsetrees
-
- this procedure recursively calls itself
-
- it returns a (possibly modified) Node*.
-
-*/
-static Node *
-tg_rewriteParamsInExpr(Node *expression, QueryTreeList * inputQlist)
-{
- List *tl;
- TargetEntry *param_tle,
- *tle;
- Param *p;
- int childno;
- char *resname;
-
- if (expression == NULL)
- return NULL;
-
- switch (nodeTag(expression))
- {
- case T_Param:
- {
- /*
- * the node is a parameter, substitute the entry from the
- * target list of the child that corresponds to the
- * parameter number
- */
- p = (Param *) expression;
-
- /* we only deal with the case of numbered parameters */
- if (p->paramkind == PARAM_NUM)
- {
- /* paramid's start from 1 */
- childno = p->paramid - 1;
-
- if (p->param_tlist)
- {
- /*
- * we have a parameter with an attribute like
- * $N.foo so match the resname "foo" against the
- * target list of the (N-1)th inputQlist
- */
-
- /* param tlist can only have one entry in them! */
- param_tle = (TargetEntry *) (lfirst(p->param_tlist));
- resname = param_tle->resdom->resname;
-
- if (inputQlist->qtrees[childno])
- {
- foreach(tl, inputQlist->qtrees[childno]->targetList)
- {
- tle = lfirst(tl);
- if (strcmp(resname, tle->resdom->resname) == 0)
- return tle->expr;
- }
- }
- else
- elog(ERROR, "tg_rewriteParamsInExpr:can't substitute for parameter %d when that input is unconnected", p->paramid);
-
- }
- else
- {
- /* we have $N without the .foo */
- /* use the first resdom in the targetlist of the */
- /* appropriate child query */
- tl = inputQlist->qtrees[childno]->targetList;
- tle = lfirst(tl);
- return tle->expr;
- }
- }
- else
- elog(WARNING, "tg_rewriteParamsInExpr: unexpected paramkind value of %d", p->paramkind);
- }
- break;
- case T_Expr:
- {
- /*
- * the node is an expression, we need to recursively call
- * ourselves until we find parameter nodes
- */
- List *l;
- Expr *expr = (Expr *) expression;
- List *newArgs;
-
- /*
- * we have to make a new args lists because Params can be
- * replaced by Var nodes in tg_rewriteParamsInExpr()
- */
- newArgs = NIL;
-
- /*
- * we only care about argument to expressions, it doesn't
- * matter when the opType is
- */
- /* recursively rewrite the arguments of this expression */
- foreach(l, expr->args)
- {
- newArgs = lappend(newArgs,
- tg_rewriteParamsInExpr(lfirst(l), inputQlist));
- }
- /* change the arguments of the expression */
- expr->args = newArgs;
- }
- break;
- default:
- {
- /* ignore other expr types */
- }
- }
-
- return expression;
-}
-
-
-
-/*
- getParamTypes:
- given an element, finds its parameter types.
- the typev array argument is set to the parameter types.
- the parameterCount is returned
-
- this code is very similar to ProcedureDefine() in pg_proc.c
-*/
-static int
-getParamTypes(TgElement * elem, Oid *typev)
-{
- /* this code is similar to ProcedureDefine() */
- int16 parameterCount;
- bool defined;
- Oid toid;
- char *t;
- int i,
- j;
-
- parameterCount = 0;
- for (i = 0; i < FUNC_MAX_ARGS; i++)
- typev[i] = 0;
- for (j = 0; j < elem->inTypes->num; j++)
- {
- if (parameterCount == FUNC_MAX_ARGS)
- {
- elog(ERROR,
- "getParamTypes: Ingredients cannot take > %d arguments", FUNC_MAX_ARGS);
- }
- t = elem->inTypes->val[j];
- if (strcmp(t, "opaque") == 0)
- {
- elog(ERROR,
- "getParamTypes: Ingredient functions cannot take type 'opaque'");
- }
- else
- {
- toid = TypeGet(elem->inTypes->val[j], &defined);
- if (!OidIsValid(toid))
- elog(ERROR, "getParamTypes: arg type '%s' is not defined", t);
- if (!defined)
- elog(WARNING, "getParamTypes: arg type '%s' is only a shell", t);
- }
- typev[parameterCount++] = toid;
- }
-
- return parameterCount;
-}
-
-
-/*
- * tg_parseTeeNode
- *
- * handles the parsing of the tee node
- *
- *
- */
-
-static QueryTreeList *
-tg_parseTeeNode(TgRecipe * r,
- TgNode * n, /* the tee node */
- int i, /* which input this node is to its parent */
- QueryTreeList * qList,
- TeeInfo * teeInfo)
-
-{
- QueryTreeList *q;
- char *tt;
- int rt_ind;
- Query *orig;
-
- /*
- * the input Node is a tee node, so we need to do the following: we
- * need to parse the child of the tee node, we add that to our query
- * tree list we need the name of the tee node table the tee node table
- * is the table into which the tee node may materialize results. Call
- * it TT we add a range table to our existing query with TT in it we
- * need to replace the parameter $i with TT (otherwise the optimizer
- * won't know to use the table on expression containining $i) After
- * that rewrite, the optimizer will generate sequential scans of TT
- *
- * Later, in the glue phase, we replace all instances of TT sequential
- * scans with the actual Tee node
- */
- q = tg_parseSubQuery(r, n, teeInfo);
-
- /* tt is the name of the tee node table */
- tt = n->nodeName;
-
- if (q)
- appendTeeQuery(teeInfo, q, tt);
-
- orig = qList->qtrees[0];
- rt_ind = RangeTablePosn(orig->rtable, tt);
-
- /*
- * check to see that this table is not part of the range table
- * already. This usually only happens if multiple inputs are
- * connected to the same Tee.
- */
- if (rt_ind == 0)
- {
- orig->rtable = lappend(orig->rtable,
- addRangeTableEntry(NULL,
- tt,
- tt,
- FALSE,
- FALSE));
- rt_ind = length(orig->rtable);
- }
-
- orig->qual = tg_replaceNumberedParam(orig->qual,
- i + 1, /* params start at 1 */
- rt_ind,
- tt);
- return qList;
-}
-
-
-/*
- * tg_parseSubQuery:
- * go backwards from a node and parse the query
- *
- * the result parse tree is passed back
- *
- * could return NULL if trying to parse a teeNode
- * that's already been processed by another parent
- *
- */
-
-static QueryTreeList *
-tg_parseSubQuery(TgRecipe * r, TgNode * n, TeeInfo * teeInfo)
-{
- TgElement *elem;
- char *funcName;
- Oid typev[FUNC_MAX_ARGS], /* eight arguments maximum */
- relid;
- int i,
- parameterCount;
-
- QueryTreeList *qList; /* the parse tree of the nodeElement */
- QueryTreeList *inputQlist; /* the list of parse trees for the inputs
- * to this node */
- QueryTreeList *q;
- TgNode *child;
- Relation rel;
- unsigned int len;
- TupleDesc tupdesc;
-
- qList = NULL;
-
- if (n->nodeType == TG_INGRED_NODE)
- {
- /* parse each ingredient node in turn */
-
- elem = n->nodeElem;
- switch (elem->srcLang)
- {
- case TG_SQL:
- {
- /*
- * for SQL ingredients, the SQL query is contained in
- * the 'src' field
- */
-
-#ifdef DEBUG_RECIPE
- elog(WARNING, "calling parser with %s", elem->src);
-#endif /* DEBUG_RECIPE */
-
- parameterCount = getParamTypes(elem, typev);
-
- qList = parser(elem->src, typev, parameterCount);
-
- if (qList->len > 1)
- {
- elog(WARNING,
- "tg_parseSubQuery: parser produced > 1 query tree");
- }
- }
- break;
- case TG_C:
- {
- /* C ingredients are registered functions in postgres */
-
- /*
- * we create a new query string by using the function
- * name (found in the 'src' field) and adding
- * parameters to it so if the function was FOOBAR and
- * took in two arguments, we would create a string
- * select FOOBAR($1,$2)
- */
- char newquery[1000];
-
- funcName = elem->src;
- parameterCount = getParamTypes(elem, typev);
-
- if (parameterCount > 0)
- {
- int i;
-
- snprintf(newquery, 1000, "select %s($1", funcName);
- for (i = 1; i < parameterCount; i++)
- snprintf(newquery, 1000, "%s,$%d", pstrdup(newquery), i);
- snprintf(newquery, 1000, "%s)", pstrdup(newquery));
- }
- else
- snprintf(newquery, 1000, "select %s()", funcName);
-
-#ifdef DEBUG_RECIPE
- elog(WARNING, "calling parser with %s", newquery);
-#endif /* DEBUG_RECIPE */
-
- qList = parser(newquery, typev, parameterCount);
- if (qList->len > 1)
- {
- elog(WARNING,
- "tg_parseSubQuery: parser produced > 1 query tree");
- }
- }
- break;
- case TG_RECIPE_GRAPH:
- elog(WARNING, "tg_parseSubQuery: can't parse recipe graph ingredients yet!");
- break;
- case TG_COMPILED:
- elog(WARNING, "tg_parseSubQuery: can't parse compiled ingredients yet!");
- break;
- default:
- elog(WARNING, "tg_parseSubQuery: unknown srcLang: %d", elem->srcLang);
- }
-
- /* parse each of the subrecipes that are input to this node */
-
- if (n->inNodes->num > 0)
- {
- inputQlist = malloc(sizeof(QueryTreeList));
- inputQlist->len = n->inNodes->num + 1;
- inputQlist->qtrees = (Query **) malloc(inputQlist->len * sizeof(Query *));
- for (i = 0; i < n->inNodes->num; i++)
- {
-
- inputQlist->qtrees[i] = NULL;
- if (n->inNodes->val[i])
- {
- if (n->inNodes->val[i]->nodeType == TG_TEE_NODE)
- {
- qList = tg_parseTeeNode(r, n->inNodes->val[i],
- i, qList, teeInfo);
- }
- else
- { /* input node is not a Tee */
- q = tg_parseSubQuery(r, n->inNodes->val[i],
- teeInfo);
- Assert(q->len == 1);
- inputQlist->qtrees[i] = q->qtrees[0];
- }
- }
- }
-
- /* now, we have all the query trees from our input nodes */
- /* transform the original parse tree appropriately */
- tg_rewriteQuery(r, n, qList, inputQlist);
- }
- }
- else if (n->nodeType == TG_EYE_NODE)
- {
- /*
- * if we hit an eye, we need to stop and make what we have into a
- * subrecipe query block
- */
- elog(WARNING, "tg_parseSubQuery: can't handle eye nodes yet");
- }
- else if (n->nodeType == TG_TEE_NODE)
- {
- /*
- * if we hit a tee, check to see if the parsing has been done for
- * this tee already by the other parent
- */
-
- rel = RelationNameGetRelation(n->nodeName);
- if (RelationIsValid(rel))
- {
- /*
- * this tee has already been visited, no need to do any
- * further processing
- */
- return NULL;
- }
- else
- {
- /* we need to process the child of the tee first, */
- child = n->inNodes->val[0];
-
- if (child->nodeType == TG_TEE_NODE)
- {
- /* nested Tee nodes */
- qList = tg_parseTeeNode(r, child, 0, qList, teeInfo);
- return qList;
- }
-
- Assert(child != NULL);
-
- /* parse the input node */
- q = tg_parseSubQuery(r, child, teeInfo);
- Assert(q->len == 1);
-
- /* add the parsed query to the main list of queries */
- qList = appendQlist(qList, q);
-
- /* need to create the tee table here */
-
- /*
- * the tee table created is used both for materializing the
- * values at the tee node, and for parsing and optimization.
- * The optimization needs to have a real table before it will
- * consider scans on it
- */
-
- /*
- * first, find the type of the tuples being produced by the
- * tee. The type is the same as the output type of the child
- * node.
- *
- * NOTE: we are assuming that the child node only has a single
- * output here!
- */
- getParamTypes(child->nodeElem, typev);
-
- /*
- * the output type is either a complex type, (and is thus a
- * relation) or is a simple type
- */
-
- rel = RelationNameGetRelation(child->nodeElem->outTypes->val[0]);
-
- if (RelationIsValid(rel))
- {
- /*
- * for complex types, create new relation with the same
- * tuple descriptor as the output table type
- */
- len = length(q->qtrees[0]->targetList);
- tupdesc = rel->rd_att;
-
- relid = heap_create_with_catalog(
- child->nodeElem->outTypes->val[0],
- tupdesc, RELKIND_RELATION, false);
- }
- else
- {
- /*
- * we have to create a relation with one attribute of the
- * simple base type. That attribute will have an attr
- * name of "result"
- */
- /* NOTE: ignore array types for the time being */
-
- len = 1;
- tupdesc = CreateTemplateTupleDesc(len);
-
- if (!TupleDescInitEntry(tupdesc, 1,
- "result",
- InvalidOid,
- -1, 0, false))
- elog(WARNING, "tg_parseSubQuery: unexpected result from TupleDescInitEntry");
- else
- {
- relid = heap_create_with_catalog(
- child->nodeElem->outTypes->val[0],
- tupdesc, RELKIND_RELATION, false);
- }
- }
- }
- }
- else if (n->nodeType == TG_RECIPE_NODE)
- elog(WARNING, "tg_parseSubQuery: can't handle embedded recipes yet!");
- else
- elog(WARNING, "unknown nodeType: %d", n->nodeType);
-
- return qList;
-}
-
-/*
- * OffsetVarAttno -
- * recursively find all the var nodes with the specified varno
- * and offset their varattno with the offset
- *
- * code is similar to OffsetVarNodes in rewriteManip.c
- */
-
-void
-OffsetVarAttno(Node *node, int varno, int offset)
-{
- if (node == NULL)
- return;
- switch (nodeTag(node))
- {
- case T_TargetEntry:
- {
- TargetEntry *tle = (TargetEntry *) node;
-
- OffsetVarAttno(tle->expr, varno, offset);
- }
- break;
- case T_Expr:
- {
- Expr *expr = (Expr *) node;
-
- OffsetVarAttno((Node *) expr->args, varno, offset);
- }
- break;
- case T_Var:
- {
- Var *var = (Var *) node;
-
- if (var->varno == varno)
- var->varattno += offset;
- }
- break;
- case T_List:
- {
- List *l;
-
- foreach(l, (List *) node)
- OffsetVarAttno(lfirst(l), varno, offset);
- }
- break;
- default:
- /* ignore the others */
- break;
- }
-}
-
-/*
- * appendQlist
- * add the contents of a QueryTreeList q2 to the end of the QueryTreeList
- * q1
- *
- * returns a new querytree list
- */
-
-QueryTreeList *
-appendQlist(QueryTreeList * q1, QueryTreeList * q2)
-{
- QueryTreeList *newq;
- int i,
- j;
- int newlen;
-
- if (q1 == NULL)
- return q2;
-
- if (q2 == NULL)
- return q1;
-
- newlen = q1->len + q2->len;
- newq = (QueryTreeList *) malloc(sizeof(QueryTreeList));
- newq->len = newlen;
- newq->qtrees = (Query **) malloc(newlen * sizeof(Query *));
- for (i = 0; i < q1->len; i++)
- newq->qtrees[i] = q1->qtrees[i];
- for (j = 0; j < q2->len; j++)
- newq->qtrees[i + j] = q2->qtrees[j];
- return newq;
-}
-
-/*
- * appendTeeQuery
- *
- * modify the query field of the teeInfo list of the particular tee node
- */
-static void
-appendTeeQuery(TeeInfo * teeInfo, QueryTreeList * q, char *teeNodeName)
-{
- int i;
-
- Assert(teeInfo);
-
- for (i = 0; i < teeInfo->num; i++)
- {
- if (strcmp(teeInfo->val[i].tpi_relName, teeNodeName) == 0)
- {
-
- Assert(q->len == 1);
- teeInfo->val[i].tpi_parsetree = q->qtrees[0];
- return;
- }
- }
- elog(WARNING, "appendTeeQuery: teeNodeName '%s' not found in teeInfo");
-}
-
-
-
-/*
- * replaceSeqScan
- * replaces sequential scans of a specified relation with the tee plan
- * the relation is specified by its index in the range table, rt_ind
- *
- * returns the modified plan
- * the offset_attno is the offset that needs to be added to the parent's
- * qual or targetlist because the child plan has been replaced with a tee node
- */
-static void
-replaceSeqScan(Plan *plan, Plan *parent,
- int rt_ind, Plan *tplan)
-{
- Scan *snode;
- Tee *teePlan;
- Result *newPlan;
-
- if (plan == NULL)
- return;
-
- if (plan->type == T_SeqScan)
- {
- snode = (Scan *) plan;
- if (snode->scanrelid == rt_ind)
- {
- /*
- * found the sequential scan that should be replaced with the
- * tplan.
- */
- /* we replace the plan, but we also need to modify its parent */
-
- /*
- * replace the sequential scan with a Result node the reason
- * we use a result node is so that we get the proper
- * projection behavior. The Result node is simply (ab)used as
- * a projection node
- */
-
- newPlan = makeNode(Result);
- newPlan->plan.cost = 0.0;
- newPlan->plan.state = (EState *) NULL;
- newPlan->plan.targetlist = plan->targetlist;
- newPlan->plan.lefttree = tplan;
- newPlan->plan.righttree = NULL;
- newPlan->resconstantqual = NULL;
- newPlan->resstate = NULL;
-
- /* change all the varno's to 1 */
- ChangeVarNodes((Node *) newPlan->plan.targetlist,
- snode->scanrelid, 1);
-
- if (parent)
- {
- teePlan = (Tee *) tplan;
-
- if (parent->lefttree == plan)
- parent->lefttree = (Plan *) newPlan;
- else
- parent->righttree = (Plan *) newPlan;
-
-
- if (teePlan->leftParent == NULL)
- teePlan->leftParent = (Plan *) newPlan;
- else
- teePlan->rightParent = (Plan *) newPlan;
-
-/* comment for now to test out executor-stuff
- if (parent->state) {
- ExecInitNode((Plan*)newPlan, parent->state, (Plan*)newPlan);
- }
-*/
- }
- }
-
- }
- else
- {
- if (plan->lefttree)
- replaceSeqScan(plan->lefttree, plan, rt_ind, tplan);
- if (plan->righttree)
- replaceSeqScan(plan->righttree, plan, rt_ind, tplan);
- }
-}
-
-/*
- * replaceTeeScans
- * places the sequential scans of the Tee table with
- * a connection to the actual tee plan node
- */
-static Plan *
-replaceTeeScans(Plan *plan, Query *parsetree, TeeInfo * teeInfo)
-{
-
- int i;
- List *rtable;
- RangeTblEntry *rte;
- char prefix[5];
- int rt_ind;
- Plan *tplan;
-
- rtable = parsetree->rtable;
- if (rtable == NULL)
- return plan;
-
- /*
- * look through the range table for the tee relation entry, that will
- * give use the varno we need to detect which sequential scans need to
- * be replaced with tee nodes
- */
-
- rt_ind = 0;
- while (rtable != NIL)
- {
- rte = lfirst(rtable);
- rtable = lnext(rtable);
- rt_ind++; /* range table references in varno fields
- * start w/ 1 */
-
- /*
- * look for the "tee_" prefix in the refname, also check to see
- * that the relname and the refname are the same this should
- * eliminate any user-specified table and leave us with the tee
- * table entries only
- */
- if ((strlen(rte->refname) < 4) ||
- (strcmp(rte->relname, rte->refname) != 0))
- continue;
- StrNCpy(prefix, rte->refname, 5);
- if (strcmp(prefix, "tee_") == 0)
- {
- /* okay, we found a tee node entry in the range table */
-
- /* find the appropriate plan in the teeInfo list */
- tplan = NULL;
- for (i = 0; i < teeInfo->num; i++)
- {
- if (strcmp(teeInfo->val[i].tpi_relName,
- rte->refname) == 0)
- tplan = teeInfo->val[i].tpi_plan;
- }
- if (tplan == NULL)
- elog(WARNING, "replaceTeeScans didn't find the corresponding tee plan");
-
- /*
- * replace the sequential scan node with that var number with
- * the tee plan node
- */
- replaceSeqScan(plan, NULL, rt_ind, tplan);
- }
- }
-
- return plan;
-}
-
-
-#endif /* TIOGA */
diff --git a/src/backend/commands/_deadcode/recipe.h b/src/backend/commands/_deadcode/recipe.h
deleted file mode 100644
index 0bda61fc2f4..00000000000
--- a/src/backend/commands/_deadcode/recipe.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * recipe.h
- * recipe handling routines
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- * $Id: recipe.h,v 1.7 2002/06/20 20:29:27 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-#ifndef RECIPE_H
-#define RECIPE_H
-
-#include "nodes/parsenodes.h"
-
-extern void beginRecipe(RecipeStmt *stmt);
-
-#endif /* RECIPE_H */
diff --git a/src/backend/commands/_deadcode/version.c b/src/backend/commands/_deadcode/version.c
deleted file mode 100644
index a88247e684b..00000000000
--- a/src/backend/commands/_deadcode/version.c
+++ /dev/null
@@ -1,346 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * version.c
- * This file contains all the rules that govern all version semantics.
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- * The version stuff has not been tested under postgres95 and probably
- * doesn't work! - jolly 8/19/95
- *
- *
- * $Id: version.c,v 1.30 2002/06/20 20:29:27 momjian Exp $
- *
- * NOTES
- * At the point the version is defined, 2 physical relations are created
- * <vname>_added and <vname>_deleted.
- *
- * In addition, 4 rules are defined which govern the semantics of
- * versions w.r.t retrieves, appends, replaces and deletes.
- *
- *-------------------------------------------------------------------------
- */
-
-#include "postgres.h"
-
-
-#define MAX_QUERY_LEN 1024
-
-char rule_buf[MAX_QUERY_LEN];
-
-/*
- * problem: the version system assumes that the rules it declares will
- * be fired in the order of declaration, it also assumes
- * goh's silly instead semantics. Unfortunately, it is a pain
- * to make the version system work with the new semantics.
- * However the whole problem can be solved, and some nice
- * functionality can be achieved if we get multiple action rules
- * to work. So thats what I did -- glass
- *
- * Well, at least they've been working for about 20 minutes.
- *
- * So any comments in this code about 1 rule per transction are false...:)
- *
- */
-
-/*
- * This is needed because the rule system only allows
- * *1* rule to be defined per transaction.
- *
- * NOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
- * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
- * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
- * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
- * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
- * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
- * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
- * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
- * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
- * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
- * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
- * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
- * OOOOOOOOOOOOOOOOOOO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- *
- * DONT DO THAT!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- *
- * If you commit the current Xact all the palloced memory GOES AWAY
- * and could be re-palloced in the new Xact and the whole hell breaks
- * loose and poor people like me spend 2 hours of their live chassing
- * a strange memory bug instead of watching the "Get Smart" marathon
- * in NICK !
- * DO NOT COMMIT THE XACT, just increase the Cid counter!
- * _sp.
- */
-#ifdef NOT_USED
-static void
-eval_as_new_xact(char *query)
-{
-
- /*------
- * WARNING! do not uncomment the following lines WARNING!
- *
- * CommitTransactionCommand();
- * StartTransactionCommand();
- *------
- */
- CommandCounterIncrement();
- pg_exec_query(query);
-}
-#endif
-/*
- * Define a version.
- */
-#ifdef NOT_USED
-void
-DefineVersion(char *name, char *fromRelname, char *date)
-{
- char *bname;
- static char saved_basename[512];
- static char saved_snapshot[512];
-
- if (date == NULL)
- {
- /* no time ranges */
- bname = fromRelname;
- strcpy(saved_basename, (char *) bname);
- *saved_snapshot = (char) NULL;
- }
- else
- {
- /* version is a snapshot */
- bname = fromRelname;
- strcpy(saved_basename, (char *) bname);
- sprintf(saved_snapshot, "['%s']", date);
- }
-
-
- /*
- * Calls the routine ``GetAttrList'' get the list of attributes from
- * the base relation. Code is put here so that we only need to look up
- * the attribute once for both appends and replaces.
- */
- setAttrList(bname);
-
- VersionCreate(name, saved_basename);
- VersionAppend(name, saved_basename);
- VersionDelete(name, saved_basename, saved_snapshot);
- VersionReplace(name, saved_basename, saved_snapshot);
- VersionRetrieve(name, saved_basename, saved_snapshot);
-}
-#endif
-
-/*
- * Creates the deltas.
- */
-#ifdef NOT_USED
-void
-VersionCreate(char *vname, char *bname)
-{
- static char query_buf[MAX_QUERY_LEN];
-
- /*
- * Creating the dummy version relation for triggering rules.
- */
- sprintf(query_buf, "SELECT * INTO TABLE %s from %s where 1 =2",
- vname, bname);
-
- pg_exec_query(query_buf);
-
- /*
- * Creating the ``v_added'' relation
- */
- sprintf(query_buf, "SELECT * INTO TABLE %s_added from %s where 1 = 2",
- vname, bname);
- eval_as_new_xact(query_buf);
-
- /*
- * Creating the ``v_deleted'' relation.
- */
- sprintf(query_buf, "CREATE TABLE %s_del (DOID oid)", vname);
- eval_as_new_xact(query_buf);
-}
-#endif
-
-
-/*
- * Given the relation name, does a catalog lookup for that relation and
- * sets the global variable 'attr_list' with the list of attributes (names)
- * for that relation.
- */
-#ifdef NOT_USED
-static void
-setAttrList(char *bname)
-{
- Relation rel;
- int i = 0;
- int maxattrs = 0;
- char *attrname;
- char temp_buf[512];
- int notfirst = 0;
-
- rel = heap_openr(bname);
- if (rel == NULL)
- {
- elog(ERROR, "Unable to expand all -- amopenr failed ");
- return;
- }
- maxattrs = RelationGetNumberOfAttributes(rel);
-
- attr_list[0] = '\0';
-
- for (i = maxattrs - 1; i > -1; --i)
- {
- attrname = NameStr(rel->rd_att->attrs[i]->attname);
-
- if (notfirst == 1)
- sprintf(temp_buf, ", %s = new.%s", attrname, attrname);
- else
- {
- sprintf(temp_buf, "%s = new.%s", attrname, attrname);
- notfirst = 1;
- }
- strcat(attr_list, temp_buf);
- }
-
- heap_close(rel);
-
- return;
-}
-#endif
-
-/*
- * This routine defines the rule governing the append semantics of
- * versions. All tuples appended to a version gets appended to the
- * <vname>_added relation.
- */
-#ifdef NOT_USED
-static void
-VersionAppend(char *vname, char *bname)
-{
- sprintf(rule_buf,
- "define rewrite rule %s_append is on INSERT to %s do instead append %s_added(%s)",
- vname, vname, vname, attr_list);
-
- eval_as_new_xact(rule_buf);
-}
-#endif
-
-/*
- * This routine defines the rule governing the retrieval semantics of
- * versions. To retrieve tuples from a version , we need to:
- *
- * 1. Retrieve all tuples in the <vname>_added relation.
- * 2. Retrieve all tuples in the base relation which are not in
- * the <vname>_del relation.
- */
-#ifdef NOT_USED
-void
-VersionRetrieve(char *vname, char *bname, char *snapshot)
-{
-
- sprintf(rule_buf,
- "define rewrite rule %s_retrieve is on SELECT to %s do instead\n\
-SELECT %s_1.oid, %s_1.* from _%s in %s%s, %s_1 in (%s_added | _%s) \
-where _%s.oid !!= '%s_del.DOID'",
- vname, vname, vname, vname, bname,
- bname, snapshot,
- vname, vname, bname, bname, vname);
-
- eval_as_new_xact(rule_buf);
-
- /* printf("%s\n",rule_buf); */
-
-}
-#endif
-
-/*
- * This routine defines the rules that govern the delete semantics of
- * versions. Two things happens when we delete a tuple from a version:
- *
- * 1. If the tuple to be deleted was added to the version *after*
- * the version was created, then we simply delete the tuple
- * from the <vname>_added relation.
- * 2. If the tuple to be deleted is actually in the base relation,
- * then we have to mark that tuple as being deleted by adding
- * it to the <vname>_del relation.
- */
-#ifdef NOT_USED
-void
-VersionDelete(char *vname, char *bname, char *snapshot)
-{
-
- sprintf(rule_buf,
- "define rewrite rule %s_delete1 is on delete to %s do instead\n \
-[delete %s_added where current.oid = %s_added.oid\n \
- append %s_del(DOID = current.oid) from _%s in %s%s \
- where current.oid = _%s.oid] \n",
- vname, vname, vname, vname, vname,
- bname, bname, snapshot, bname);
-
- eval_as_new_xact(rule_buf);
-#ifdef OLD_REWRITE
- sprintf(rule_buf,
- "define rewrite rule %s_delete2 is on delete to %s do instead \n \
- append %s_del(DOID = current.oid) from _%s in %s%s \
- where current.oid = _%s.oid \n",
- vname, vname, vname, bname, bname, snapshot, bname);
-
- eval_as_new_xact(rule_buf);
-#endif /* OLD_REWRITE */
-}
-#endif
-
-/*
- * This routine defines the rules that govern the update semantics
- * of versions. To update a tuple in a version:
- *
- * 1. If the tuple is in <vname>_added, we simply ``replace''
- * the tuple (as per postgres style).
- * 2. if the tuple is in the base relation, then two things have to
- * happen:
- * 2.1 The tuple is marked ``deleted'' from the base relation by
- * adding the tuple to the <vname>_del relation.
- * 2.2 A copy of the tuple is appended to the <vname>_added relation
- */
-#ifdef NOT_USED
-void
-VersionReplace(char *vname, char *bname, char *snapshot)
-{
- sprintf(rule_buf,
- "define rewrite rule %s_replace1 is on replace to %s do instead \n\
-[replace %s_added(%s) where current.oid = %s_added.oid \n\
- append %s_del(DOID = current.oid) from _%s in %s%s \
- where current.oid = _%s.oid\n\
- append %s_added(%s) from _%s in %s%s \
- where current.oid !!= '%s_added.oid' and current.oid = _%s.oid]\n",
- vname, vname, vname, attr_list, vname,
- vname, bname, bname, snapshot, bname,
- vname, attr_list, bname, bname, snapshot, vname, bname);
-
- eval_as_new_xact(rule_buf);
-
-/* printf("%s\n",rule_buf); */
-#ifdef OLD_REWRITE
- sprintf(rule_buf,
- "define rewrite rule %s_replace2 is on replace to %s do \n\
- append %s_del(DOID = current.oid) from _%s in %s%s \
- where current.oid = _%s.oid\n",
- vname, vname, vname, bname, bname, snapshot, bname);
-
- eval_as_new_xact(rule_buf);
-
- sprintf(rule_buf,
- "define rewrite rule %s_replace3 is on replace to %s do instead\n\
- append %s_added(%s) from _%s in %s%s \
- where current.oid !!= '%s_added.oid' and current.oid = \
- _%s.oid\n",
- vname, vname, vname, attr_list, bname, bname, snapshot, vname, bname);
-
- eval_as_new_xact(rule_buf);
-#endif /* OLD_REWRITE */
-/* printf("%s\n",rule_buf); */
-
-}
-
-#endif
diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c
deleted file mode 100644
index c3c1ed16dfc..00000000000
--- a/src/backend/commands/aggregatecmds.c
+++ /dev/null
@@ -1,208 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * aggregatecmds.c
- *
- * Routines for aggregate-manipulation commands
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/aggregatecmds.c,v 1.2 2002/04/27 03:45:00 tgl Exp $
- *
- * DESCRIPTION
- * The "DefineFoo" routines take the parse tree and pick out the
- * appropriate arguments/flags, passing the results to the
- * corresponding "FooDefine" routines (in src/catalog) that do
- * the actual catalog-munging. These routines also verify permission
- * of the user to execute the command.
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include "access/heapam.h"
-#include "catalog/catname.h"
-#include "catalog/namespace.h"
-#include "catalog/pg_aggregate.h"
-#include "catalog/pg_proc.h"
-#include "commands/comment.h"
-#include "commands/defrem.h"
-#include "miscadmin.h"
-#include "parser/parse_func.h"
-#include "parser/parse_type.h"
-#include "utils/acl.h"
-#include "utils/builtins.h"
-#include "utils/lsyscache.h"
-#include "utils/syscache.h"
-
-
-/*
- * DefineAggregate
- */
-void
-DefineAggregate(List *names, List *parameters)
-{
- char *aggName;
- Oid aggNamespace;
- AclResult aclresult;
- List *transfuncName = NIL;
- List *finalfuncName = NIL;
- TypeName *baseType = NULL;
- TypeName *transType = NULL;
- char *initval = NULL;
- Oid baseTypeId;
- Oid transTypeId;
- List *pl;
-
- /* Convert list of names to a name and namespace */
- aggNamespace = QualifiedNameGetCreationNamespace(names, &aggName);
-
- /* Check we have creation rights in target namespace */
- aclresult = pg_namespace_aclcheck(aggNamespace, GetUserId(), ACL_CREATE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, get_namespace_name(aggNamespace));
-
- foreach(pl, parameters)
- {
- DefElem *defel = (DefElem *) lfirst(pl);
-
- /*
- * sfunc1, stype1, and initcond1 are accepted as obsolete
- * spellings for sfunc, stype, initcond.
- */
- if (strcasecmp(defel->defname, "sfunc") == 0)
- transfuncName = defGetQualifiedName(defel);
- else if (strcasecmp(defel->defname, "sfunc1") == 0)
- transfuncName = defGetQualifiedName(defel);
- else if (strcasecmp(defel->defname, "finalfunc") == 0)
- finalfuncName = defGetQualifiedName(defel);
- else if (strcasecmp(defel->defname, "basetype") == 0)
- baseType = defGetTypeName(defel);
- else if (strcasecmp(defel->defname, "stype") == 0)
- transType = defGetTypeName(defel);
- else if (strcasecmp(defel->defname, "stype1") == 0)
- transType = defGetTypeName(defel);
- else if (strcasecmp(defel->defname, "initcond") == 0)
- initval = defGetString(defel);
- else if (strcasecmp(defel->defname, "initcond1") == 0)
- initval = defGetString(defel);
- else
- elog(WARNING, "DefineAggregate: attribute \"%s\" not recognized",
- defel->defname);
- }
-
- /*
- * make sure we have our required definitions
- */
- if (baseType == NULL)
- elog(ERROR, "Define: \"basetype\" unspecified");
- if (transType == NULL)
- elog(ERROR, "Define: \"stype\" unspecified");
- if (transfuncName == NIL)
- elog(ERROR, "Define: \"sfunc\" unspecified");
-
- /*
- * Handle the aggregate's base type (input data type). This can be
- * specified as 'ANY' for a data-independent transition function, such
- * as COUNT(*).
- */
- baseTypeId = LookupTypeName(baseType);
- if (OidIsValid(baseTypeId))
- {
- /* no need to allow aggregates on as-yet-undefined types */
- if (!get_typisdefined(baseTypeId))
- elog(ERROR, "Type \"%s\" is only a shell",
- TypeNameToString(baseType));
- }
- else
- {
- char *typnam = TypeNameToString(baseType);
-
- if (strcasecmp(typnam, "ANY") != 0)
- elog(ERROR, "Type \"%s\" does not exist", typnam);
- baseTypeId = InvalidOid;
- }
-
- /* handle transtype --- no special cases here */
- transTypeId = typenameTypeId(transType);
-
- /*
- * Most of the argument-checking is done inside of AggregateCreate
- */
- AggregateCreate(aggName, /* aggregate name */
- aggNamespace, /* namespace */
- transfuncName, /* step function name */
- finalfuncName, /* final function name */
- baseTypeId, /* type of data being aggregated */
- transTypeId, /* transition data type */
- initval); /* initial condition */
-}
-
-
-void
-RemoveAggregate(List *aggName, TypeName *aggType)
-{
- Relation relation;
- HeapTuple tup;
- Oid basetypeID;
- Oid procOid;
-
- /*
- * if a basetype is passed in, then attempt to find an aggregate for
- * that specific type.
- *
- * else if the basetype is blank, then attempt to find an aggregate with
- * a basetype of zero. This is valid. It means that the aggregate is
- * to apply to all basetypes (eg, COUNT).
- */
- if (aggType)
- basetypeID = typenameTypeId(aggType);
- else
- basetypeID = InvalidOid;
-
- procOid = find_aggregate_func("RemoveAggregate", aggName, basetypeID);
-
- relation = heap_openr(ProcedureRelationName, RowExclusiveLock);
-
- tup = SearchSysCache(PROCOID,
- ObjectIdGetDatum(procOid),
- 0, 0, 0);
- if (!HeapTupleIsValid(tup)) /* should not happen */
- elog(ERROR, "RemoveAggregate: couldn't find pg_proc tuple for %s",
- NameListToString(aggName));
-
- /* Permission check: must own agg or its namespace */
- if (!pg_proc_ownercheck(procOid, GetUserId()) &&
- !pg_namespace_ownercheck(((Form_pg_proc) GETSTRUCT(tup))->pronamespace,
- GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, NameListToString(aggName));
-
- /* Delete any comments associated with this function */
- DeleteComments(procOid, RelationGetRelid(relation));
-
- /* Remove the pg_proc tuple */
- simple_heap_delete(relation, &tup->t_self);
-
- ReleaseSysCache(tup);
-
- heap_close(relation, RowExclusiveLock);
-
- /* Remove the pg_aggregate tuple */
-
- relation = heap_openr(AggregateRelationName, RowExclusiveLock);
-
- tup = SearchSysCache(AGGFNOID,
- ObjectIdGetDatum(procOid),
- 0, 0, 0);
- if (!HeapTupleIsValid(tup)) /* should not happen */
- elog(ERROR, "RemoveAggregate: couldn't find pg_aggregate tuple for %s",
- NameListToString(aggName));
-
- simple_heap_delete(relation, &tup->t_self);
-
- ReleaseSysCache(tup);
-
- heap_close(relation, RowExclusiveLock);
-}
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
deleted file mode 100644
index 6caa968b5d2..00000000000
--- a/src/backend/commands/analyze.c
+++ /dev/null
@@ -1,1794 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * analyze.c
- * the postgres statistics generator
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.38 2002/06/20 20:29:26 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include <math.h>
-
-#include "access/heapam.h"
-#include "access/tuptoaster.h"
-#include "catalog/catalog.h"
-#include "catalog/catname.h"
-#include "catalog/indexing.h"
-#include "catalog/pg_operator.h"
-#include "catalog/pg_statistic.h"
-#include "catalog/pg_type.h"
-#include "commands/vacuum.h"
-#include "miscadmin.h"
-#include "parser/parse_oper.h"
-#include "utils/acl.h"
-#include "utils/builtins.h"
-#include "utils/datum.h"
-#include "utils/fmgroids.h"
-#include "utils/lsyscache.h"
-#include "utils/syscache.h"
-#include "utils/tuplesort.h"
-
-
-/*
- * Analysis algorithms supported
- */
-typedef enum
-{
- ALG_MINIMAL = 1, /* Compute only most-common-values */
- ALG_SCALAR /* Compute MCV, histogram, sort
- * correlation */
-} AlgCode;
-
-/*
- * To avoid consuming too much memory during analysis and/or too much space
- * in the resulting pg_statistic rows, we ignore varlena datums that are wider
- * than WIDTH_THRESHOLD (after detoasting!). This is legitimate for MCV
- * and distinct-value calculations since a wide value is unlikely to be
- * duplicated at all, much less be a most-common value. For the same reason,
- * ignoring wide values will not affect our estimates of histogram bin
- * boundaries very much.
- */
-#define WIDTH_THRESHOLD 256
-
-/*
- * We build one of these structs for each attribute (column) that is to be
- * analyzed. The struct and subsidiary data are in anl_context,
- * so they live until the end of the ANALYZE operation.
- */
-typedef struct
-{
- /* These fields are set up by examine_attribute */
- int attnum; /* attribute number */
- AlgCode algcode; /* Which algorithm to use for this column */
- int minrows; /* Minimum # of rows wanted for stats */
- Form_pg_attribute attr; /* copy of pg_attribute row for column */
- Form_pg_type attrtype; /* copy of pg_type row for column */
- Oid eqopr; /* '=' operator for datatype, if any */
- Oid eqfunc; /* and associated function */
- Oid ltopr; /* '<' operator for datatype, if any */
-
- /*
- * These fields are filled in by the actual statistics-gathering
- * routine
- */
- bool stats_valid;
- float4 stanullfrac; /* fraction of entries that are NULL */
- int4 stawidth; /* average width */
- float4 stadistinct; /* # distinct values */
- int2 stakind[STATISTIC_NUM_SLOTS];
- Oid staop[STATISTIC_NUM_SLOTS];
- int numnumbers[STATISTIC_NUM_SLOTS];
- float4 *stanumbers[STATISTIC_NUM_SLOTS];
- int numvalues[STATISTIC_NUM_SLOTS];
- Datum *stavalues[STATISTIC_NUM_SLOTS];
-} VacAttrStats;
-
-
-typedef struct
-{
- Datum value; /* a data value */
- int tupno; /* position index for tuple it came from */
-} ScalarItem;
-
-typedef struct
-{
- int count; /* # of duplicates */
- int first; /* values[] index of first occurrence */
-} ScalarMCVItem;
-
-
-#define swapInt(a,b) do {int _tmp; _tmp=a; a=b; b=_tmp;} while(0)
-#define swapDatum(a,b) do {Datum _tmp; _tmp=a; a=b; b=_tmp;} while(0)
-
-static int elevel = -1;
-
-static MemoryContext anl_context = NULL;
-
-/* context information for compare_scalars() */
-static FmgrInfo *datumCmpFn;
-static SortFunctionKind datumCmpFnKind;
-static int *datumCmpTupnoLink;
-
-
-static VacAttrStats *examine_attribute(Relation onerel, int attnum);
-static int acquire_sample_rows(Relation onerel, HeapTuple *rows,
- int targrows, double *totalrows);
-static double random_fract(void);
-static double init_selection_state(int n);
-static double select_next_random_record(double t, int n, double *stateptr);
-static int compare_rows(const void *a, const void *b);
-static int compare_scalars(const void *a, const void *b);
-static int compare_mcvs(const void *a, const void *b);
-static void compute_minimal_stats(VacAttrStats *stats,
- TupleDesc tupDesc, double totalrows,
- HeapTuple *rows, int numrows);
-static void compute_scalar_stats(VacAttrStats *stats,
- TupleDesc tupDesc, double totalrows,
- HeapTuple *rows, int numrows);
-static void update_attstats(Oid relid, int natts, VacAttrStats **vacattrstats);
-
-
-/*
- * analyze_rel() -- analyze one relation
- */
-void
-analyze_rel(Oid relid, VacuumStmt *vacstmt)
-{
- Relation onerel;
- Form_pg_attribute *attr;
- int attr_cnt,
- tcnt,
- i;
- VacAttrStats **vacattrstats;
- int targrows,
- numrows;
- double totalrows;
- HeapTuple *rows;
-
- if (vacstmt->verbose)
- elevel = INFO;
- else
- elevel = DEBUG1;
-
- /*
- * Use the current context for storing analysis info. vacuum.c ensures
- * that this context will be cleared when I return, thus releasing the
- * memory allocated here.
- */
- anl_context = CurrentMemoryContext;
-
- /*
- * Check for user-requested abort. Note we want this to be inside a
- * transaction, so xact.c doesn't issue useless WARNING.
- */
- CHECK_FOR_INTERRUPTS();
-
- /*
- * Race condition -- if the pg_class tuple has gone away since the
- * last time we saw it, we don't need to process it.
- */
- if (!SearchSysCacheExists(RELOID,
- ObjectIdGetDatum(relid),
- 0, 0, 0))
- return;
-
- /*
- * Open the class, getting only a read lock on it, and check
- * permissions. Permissions check should match vacuum's check!
- */
- onerel = relation_open(relid, AccessShareLock);
-
- if (!(pg_class_ownercheck(RelationGetRelid(onerel), GetUserId()) ||
- (is_dbadmin(MyDatabaseId) && !onerel->rd_rel->relisshared)))
- {
- /* No need for a WARNING if we already complained during VACUUM */
- if (!vacstmt->vacuum)
- elog(WARNING, "Skipping \"%s\" --- only table or database owner can ANALYZE it",
- RelationGetRelationName(onerel));
- relation_close(onerel, AccessShareLock);
- return;
- }
-
- /*
- * Check that it's a plain table; we used to do this in getrels() but
- * seems safer to check after we've locked the relation.
- */
- if (onerel->rd_rel->relkind != RELKIND_RELATION)
- {
- /* No need for a WARNING if we already complained during VACUUM */
- if (!vacstmt->vacuum)
- elog(WARNING, "Skipping \"%s\" --- can not process indexes, views or special system tables",
- RelationGetRelationName(onerel));
- relation_close(onerel, AccessShareLock);
- return;
- }
-
- /*
- * We can ANALYZE any table except pg_statistic. See update_attstats
- */
- if (IsSystemNamespace(RelationGetNamespace(onerel)) &&
- strcmp(RelationGetRelationName(onerel), StatisticRelationName) == 0)
- {
- relation_close(onerel, AccessShareLock);
- return;
- }
-
- elog(elevel, "Analyzing %s.%s",
- get_namespace_name(RelationGetNamespace(onerel)),
- RelationGetRelationName(onerel));
-
- /*
- * Determine which columns to analyze
- *
- * Note that system attributes are never analyzed.
- */
- attr = onerel->rd_att->attrs;
- attr_cnt = onerel->rd_att->natts;
-
- if (vacstmt->va_cols != NIL)
- {
- List *le;
-
- vacattrstats = (VacAttrStats **) palloc(length(vacstmt->va_cols) *
- sizeof(VacAttrStats *));
- tcnt = 0;
- foreach(le, vacstmt->va_cols)
- {
- char *col = strVal(lfirst(le));
-
- for (i = 0; i < attr_cnt; i++)
- {
- if (namestrcmp(&(attr[i]->attname), col) == 0)
- break;
- }
- if (i >= attr_cnt)
- elog(ERROR, "ANALYZE: there is no attribute %s in %s",
- col, RelationGetRelationName(onerel));
- vacattrstats[tcnt] = examine_attribute(onerel, i + 1);
- if (vacattrstats[tcnt] != NULL)
- tcnt++;
- }
- attr_cnt = tcnt;
- }
- else
- {
- vacattrstats = (VacAttrStats **) palloc(attr_cnt *
- sizeof(VacAttrStats *));
- tcnt = 0;
- for (i = 0; i < attr_cnt; i++)
- {
- vacattrstats[tcnt] = examine_attribute(onerel, i + 1);
- if (vacattrstats[tcnt] != NULL)
- tcnt++;
- }
- attr_cnt = tcnt;
- }
-
- /*
- * Quit if no analyzable columns
- */
- if (attr_cnt <= 0)
- {
- relation_close(onerel, NoLock);
- return;
- }
-
- /*
- * Determine how many rows we need to sample, using the worst case
- * from all analyzable columns. We use a lower bound of 100 rows to
- * avoid possible overflow in Vitter's algorithm.
- */
- targrows = 100;
- for (i = 0; i < attr_cnt; i++)
- {
- if (targrows < vacattrstats[i]->minrows)
- targrows = vacattrstats[i]->minrows;
- }
-
- /*
- * Acquire the sample rows
- */
- rows = (HeapTuple *) palloc(targrows * sizeof(HeapTuple));
- numrows = acquire_sample_rows(onerel, rows, targrows, &totalrows);
-
- /*
- * If we are running a standalone ANALYZE, update pages/tuples stats
- * in pg_class. We have the accurate page count from heap_beginscan,
- * but only an approximate number of tuples; therefore, if we are part
- * of VACUUM ANALYZE do *not* overwrite the accurate count already
- * inserted by VACUUM.
- */
- if (!vacstmt->vacuum)
- vac_update_relstats(RelationGetRelid(onerel),
- onerel->rd_nblocks,
- totalrows,
- RelationGetForm(onerel)->relhasindex);
-
- /*
- * Compute the statistics. Temporary results during the calculations
- * for each column are stored in a child context. The calc routines
- * are responsible to make sure that whatever they store into the
- * VacAttrStats structure is allocated in anl_context.
- */
- if (numrows > 0)
- {
- MemoryContext col_context,
- old_context;
-
- col_context = AllocSetContextCreate(anl_context,
- "Analyze Column",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
- old_context = MemoryContextSwitchTo(col_context);
- for (i = 0; i < attr_cnt; i++)
- {
- switch (vacattrstats[i]->algcode)
- {
- case ALG_MINIMAL:
- compute_minimal_stats(vacattrstats[i],
- onerel->rd_att, totalrows,
- rows, numrows);
- break;
- case ALG_SCALAR:
- compute_scalar_stats(vacattrstats[i],
- onerel->rd_att, totalrows,
- rows, numrows);
- break;
- }
- MemoryContextResetAndDeleteChildren(col_context);
- }
- MemoryContextSwitchTo(old_context);
- MemoryContextDelete(col_context);
-
- /*
- * Emit the completed stats rows into pg_statistic, replacing any
- * previous statistics for the target columns. (If there are
- * stats in pg_statistic for columns we didn't process, we leave
- * them alone.)
- */
- update_attstats(relid, attr_cnt, vacattrstats);
- }
-
- /*
- * Close source relation now, but keep lock so that no one deletes it
- * before we commit. (If someone did, they'd fail to clean up the
- * entries we made in pg_statistic.)
- */
- relation_close(onerel, NoLock);
-}
-
-/*
- * examine_attribute -- pre-analysis of a single column
- *
- * Determine whether the column is analyzable; if so, create and initialize
- * a VacAttrStats struct for it. If not, return NULL.
- */
-static VacAttrStats *
-examine_attribute(Relation onerel, int attnum)
-{
- Form_pg_attribute attr = onerel->rd_att->attrs[attnum - 1];
- Operator func_operator;
- Oid oprrest;
- HeapTuple typtuple;
- Oid eqopr = InvalidOid;
- Oid eqfunc = InvalidOid;
- Oid ltopr = InvalidOid;
- VacAttrStats *stats;
-
- /* Don't analyze column if user has specified not to */
- if (attr->attstattarget <= 0)
- return NULL;
-
- /* If column has no "=" operator, we can't do much of anything */
- func_operator = compatible_oper(makeList1(makeString("=")),
- attr->atttypid,
- attr->atttypid,
- true);
- if (func_operator != NULL)
- {
- oprrest = ((Form_pg_operator) GETSTRUCT(func_operator))->oprrest;
- if (oprrest == F_EQSEL)
- {
- eqopr = oprid(func_operator);
- eqfunc = oprfuncid(func_operator);
- }
- ReleaseSysCache(func_operator);
- }
- if (!OidIsValid(eqfunc))
- return NULL;
-
- /*
- * If we have "=" then we're at least able to do the minimal
- * algorithm, so start filling in a VacAttrStats struct.
- */
- stats = (VacAttrStats *) palloc(sizeof(VacAttrStats));
- MemSet(stats, 0, sizeof(VacAttrStats));
- stats->attnum = attnum;
- stats->attr = (Form_pg_attribute) palloc(ATTRIBUTE_TUPLE_SIZE);
- memcpy(stats->attr, attr, ATTRIBUTE_TUPLE_SIZE);
- typtuple = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(attr->atttypid),
- 0, 0, 0);
- if (!HeapTupleIsValid(typtuple))
- elog(ERROR, "cache lookup of type %u failed", attr->atttypid);
- stats->attrtype = (Form_pg_type) palloc(sizeof(FormData_pg_type));
- memcpy(stats->attrtype, GETSTRUCT(typtuple), sizeof(FormData_pg_type));
- ReleaseSysCache(typtuple);
- stats->eqopr = eqopr;
- stats->eqfunc = eqfunc;
-
- /* Is there a "<" operator with suitable semantics? */
- func_operator = compatible_oper(makeList1(makeString("<")),
- attr->atttypid,
- attr->atttypid,
- true);
- if (func_operator != NULL)
- {
- oprrest = ((Form_pg_operator) GETSTRUCT(func_operator))->oprrest;
- if (oprrest == F_SCALARLTSEL)
- ltopr = oprid(func_operator);
- ReleaseSysCache(func_operator);
- }
- stats->ltopr = ltopr;
-
- /*
- * Determine the algorithm to use (this will get more complicated
- * later)
- */
- if (OidIsValid(ltopr))
- {
- /* Seems to be a scalar datatype */
- stats->algcode = ALG_SCALAR;
- /*--------------------
- * The following choice of minrows is based on the paper
- * "Random sampling for histogram construction: how much is enough?"
- * by Surajit Chaudhuri, Rajeev Motwani and Vivek Narasayya, in
- * Proceedings of ACM SIGMOD International Conference on Management
- * of Data, 1998, Pages 436-447. Their Corollary 1 to Theorem 5
- * says that for table size n, histogram size k, maximum relative
- * error in bin size f, and error probability gamma, the minimum
- * random sample size is
- * r = 4 * k * ln(2*n/gamma) / f^2
- * Taking f = 0.5, gamma = 0.01, n = 1 million rows, we obtain
- * r = 305.82 * k
- * Note that because of the log function, the dependence on n is
- * quite weak; even at n = 1 billion, a 300*k sample gives <= 0.59
- * bin size error with probability 0.99. So there's no real need to
- * scale for n, which is a good thing because we don't necessarily
- * know it at this point.
- *--------------------
- */
- stats->minrows = 300 * attr->attstattarget;
- }
- else
- {
- /* Can't do much but the minimal stuff */
- stats->algcode = ALG_MINIMAL;
- /* Might as well use the same minrows as above */
- stats->minrows = 300 * attr->attstattarget;
- }
-
- return stats;
-}
-
-/*
- * acquire_sample_rows -- acquire a random sample of rows from the table
- *
- * Up to targrows rows are collected (if there are fewer than that many
- * rows in the table, all rows are collected). When the table is larger
- * than targrows, a truly random sample is collected: every row has an
- * equal chance of ending up in the final sample.
- *
- * We also estimate the total number of rows in the table, and return that
- * into *totalrows.
- *
- * The returned list of tuples is in order by physical position in the table.
- * (We will rely on this later to derive correlation estimates.)
- */
-static int
-acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
- double *totalrows)
-{
- int numrows = 0;
- HeapScanDesc scan;
- HeapTuple tuple;
- ItemPointer lasttuple;
- BlockNumber lastblock,
- estblock;
- OffsetNumber lastoffset;
- int numest;
- double tuplesperpage;
- double t;
- double rstate;
-
- Assert(targrows > 1);
-
- /*
- * Do a simple linear scan until we reach the target number of rows.
- */
- scan = heap_beginscan(onerel, SnapshotNow, 0, NULL);
- while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- rows[numrows++] = heap_copytuple(tuple);
- if (numrows >= targrows)
- break;
- CHECK_FOR_INTERRUPTS();
- }
- heap_endscan(scan);
-
- /*
- * If we ran out of tuples then we're done, no matter how few we
- * collected. No sort is needed, since they're already in order.
- */
- if (!HeapTupleIsValid(tuple))
- {
- *totalrows = (double) numrows;
- return numrows;
- }
-
- /*
- * Otherwise, start replacing tuples in the sample until we reach the
- * end of the relation. This algorithm is from Jeff Vitter's paper
- * (see full citation below). It works by repeatedly computing the
- * number of the next tuple we want to fetch, which will replace a
- * randomly chosen element of the reservoir (current set of tuples).
- * At all times the reservoir is a true random sample of the tuples
- * we've passed over so far, so when we fall off the end of the
- * relation we're done.
- *
- * A slight difficulty is that since we don't want to fetch tuples or
- * even pages that we skip over, it's not possible to fetch *exactly*
- * the N'th tuple at each step --- we don't know how many valid tuples
- * are on the skipped pages. We handle this by assuming that the
- * average number of valid tuples/page on the pages already scanned
- * over holds good for the rest of the relation as well; this lets us
- * estimate which page the next tuple should be on and its position in
- * the page. Then we fetch the first valid tuple at or after that
- * position, being careful not to use the same tuple twice. This
- * approach should still give a good random sample, although it's not
- * perfect.
- */
- lasttuple = &(rows[numrows - 1]->t_self);
- lastblock = ItemPointerGetBlockNumber(lasttuple);
- lastoffset = ItemPointerGetOffsetNumber(lasttuple);
-
- /*
- * If possible, estimate tuples/page using only completely-scanned
- * pages.
- */
- for (numest = numrows; numest > 0; numest--)
- {
- if (ItemPointerGetBlockNumber(&(rows[numest - 1]->t_self)) != lastblock)
- break;
- }
- if (numest == 0)
- {
- numest = numrows; /* don't have a full page? */
- estblock = lastblock + 1;
- }
- else
- estblock = lastblock;
- tuplesperpage = (double) numest / (double) estblock;
-
- t = (double) numrows; /* t is the # of records processed so far */
- rstate = init_selection_state(targrows);
- for (;;)
- {
- double targpos;
- BlockNumber targblock;
- Buffer targbuffer;
- Page targpage;
- OffsetNumber targoffset,
- maxoffset;
-
- CHECK_FOR_INTERRUPTS();
-
- t = select_next_random_record(t, targrows, &rstate);
- /* Try to read the t'th record in the table */
- targpos = t / tuplesperpage;
- targblock = (BlockNumber) targpos;
- targoffset = ((int) ((targpos - targblock) * tuplesperpage)) +
- FirstOffsetNumber;
- /* Make sure we are past the last selected record */
- if (targblock <= lastblock)
- {
- targblock = lastblock;
- if (targoffset <= lastoffset)
- targoffset = lastoffset + 1;
- }
- /* Loop to find first valid record at or after given position */
-pageloop:;
-
- /*
- * Have we fallen off the end of the relation? (We rely on
- * heap_beginscan to have updated rd_nblocks.)
- */
- if (targblock >= onerel->rd_nblocks)
- break;
-
- /*
- * We must maintain a pin on the target page's buffer to ensure
- * that the maxoffset value stays good (else concurrent VACUUM
- * might delete tuples out from under us). Hence, pin the page
- * until we are done looking at it. We don't maintain a lock on
- * the page, so tuples could get added to it, but we ignore such
- * tuples.
- */
- targbuffer = ReadBuffer(onerel, targblock);
- if (!BufferIsValid(targbuffer))
- elog(ERROR, "acquire_sample_rows: ReadBuffer(%s,%u) failed",
- RelationGetRelationName(onerel), targblock);
- LockBuffer(targbuffer, BUFFER_LOCK_SHARE);
- targpage = BufferGetPage(targbuffer);
- maxoffset = PageGetMaxOffsetNumber(targpage);
- LockBuffer(targbuffer, BUFFER_LOCK_UNLOCK);
-
- for (;;)
- {
- HeapTupleData targtuple;
- Buffer tupbuffer;
-
- if (targoffset > maxoffset)
- {
- /* Fell off end of this page, try next */
- ReleaseBuffer(targbuffer);
- targblock++;
- targoffset = FirstOffsetNumber;
- goto pageloop;
- }
- ItemPointerSet(&targtuple.t_self, targblock, targoffset);
- if (heap_fetch(onerel, SnapshotNow, &targtuple, &tupbuffer,
- false, NULL))
- {
- /*
- * Found a suitable tuple, so save it, replacing one old
- * tuple at random
- */
- int k = (int) (targrows * random_fract());
-
- Assert(k >= 0 && k < targrows);
- heap_freetuple(rows[k]);
- rows[k] = heap_copytuple(&targtuple);
- /* this releases the second pin acquired by heap_fetch: */
- ReleaseBuffer(tupbuffer);
- /* this releases the initial pin: */
- ReleaseBuffer(targbuffer);
- lastblock = targblock;
- lastoffset = targoffset;
- break;
- }
- /* this tuple is dead, so advance to next one on same page */
- targoffset++;
- }
- }
-
- /*
- * Now we need to sort the collected tuples by position (itempointer).
- */
- qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
-
- /*
- * Estimate total number of valid rows in relation.
- */
- *totalrows = floor((double) onerel->rd_nblocks * tuplesperpage + 0.5);
-
- return numrows;
-}
-
-/* Select a random value R uniformly distributed in 0 < R < 1 */
-static double
-random_fract(void)
-{
- long z;
-
- /* random() can produce endpoint values, try again if so */
- do
- {
- z = random();
- } while (!(z > 0 && z < MAX_RANDOM_VALUE));
- return (double) z / (double) MAX_RANDOM_VALUE;
-}
-
-/*
- * These two routines embody Algorithm Z from "Random sampling with a
- * reservoir" by Jeffrey S. Vitter, in ACM Trans. Math. Softw. 11, 1
- * (Mar. 1985), Pages 37-57. While Vitter describes his algorithm in terms
- * of the count S of records to skip before processing another record,
- * it is convenient to work primarily with t, the index (counting from 1)
- * of the last record processed and next record to process. The only extra
- * state needed between calls is W, a random state variable.
- *
- * Note: the original algorithm defines t, S, numer, and denom as integers.
- * Here we express them as doubles to avoid overflow if the number of rows
- * in the table exceeds INT_MAX. The algorithm should work as long as the
- * row count does not become so large that it is not represented accurately
- * in a double (on IEEE-math machines this would be around 2^52 rows).
- *
- * init_selection_state computes the initial W value.
- *
- * Given that we've already processed t records (t >= n),
- * select_next_random_record determines the number of the next record to
- * process.
- */
-static double
-init_selection_state(int n)
-{
- /* Initial value of W (for use when Algorithm Z is first applied) */
- return exp(-log(random_fract()) / n);
-}
-
-static double
-select_next_random_record(double t, int n, double *stateptr)
-{
- /* The magic constant here is T from Vitter's paper */
- if (t <= (22.0 * n))
- {
- /* Process records using Algorithm X until t is large enough */
- double V,
- quot;
-
- V = random_fract(); /* Generate V */
- t += 1;
- quot = (t - (double) n) / t;
- /* Find min S satisfying (4.1) */
- while (quot > V)
- {
- t += 1;
- quot *= (t - (double) n) / t;
- }
- }
- else
- {
- /* Now apply Algorithm Z */
- double W = *stateptr;
- double term = t - (double) n + 1;
- double S;
-
- for (;;)
- {
- double numer,
- numer_lim,
- denom;
- double U,
- X,
- lhs,
- rhs,
- y,
- tmp;
-
- /* Generate U and X */
- U = random_fract();
- X = t * (W - 1.0);
- S = floor(X); /* S is tentatively set to floor(X) */
- /* Test if U <= h(S)/cg(X) in the manner of (6.3) */
- tmp = (t + 1) / term;
- lhs = exp(log(((U * tmp * tmp) * (term + S)) / (t + X)) / n);
- rhs = (((t + X) / (term + S)) * term) / t;
- if (lhs <= rhs)
- {
- W = rhs / lhs;
- break;
- }
- /* Test if U <= f(S)/cg(X) */
- y = (((U * (t + 1)) / term) * (t + S + 1)) / (t + X);
- if ((double) n < S)
- {
- denom = t;
- numer_lim = term + S;
- }
- else
- {
- denom = t - (double) n + S;
- numer_lim = t + 1;
- }
- for (numer = t + S; numer >= numer_lim; numer -= 1)
- {
- y *= numer / denom;
- denom -= 1;
- }
- W = exp(-log(random_fract()) / n); /* Generate W in advance */
- if (exp(log(y) / n) <= (t + X) / t)
- break;
- }
- t += S + 1;
- *stateptr = W;
- }
- return t;
-}
-
-/*
- * qsort comparator for sorting rows[] array
- */
-static int
-compare_rows(const void *a, const void *b)
-{
- HeapTuple ha = *(HeapTuple *) a;
- HeapTuple hb = *(HeapTuple *) b;
- BlockNumber ba = ItemPointerGetBlockNumber(&ha->t_self);
- OffsetNumber oa = ItemPointerGetOffsetNumber(&ha->t_self);
- BlockNumber bb = ItemPointerGetBlockNumber(&hb->t_self);
- OffsetNumber ob = ItemPointerGetOffsetNumber(&hb->t_self);
-
- if (ba < bb)
- return -1;
- if (ba > bb)
- return 1;
- if (oa < ob)
- return -1;
- if (oa > ob)
- return 1;
- return 0;
-}
-
-
-/*
- * compute_minimal_stats() -- compute minimal column statistics
- *
- * We use this when we can find only an "=" operator for the datatype.
- *
- * We determine the fraction of non-null rows, the average width, the
- * most common values, and the (estimated) number of distinct values.
- *
- * The most common values are determined by brute force: we keep a list
- * of previously seen values, ordered by number of times seen, as we scan
- * the samples. A newly seen value is inserted just after the last
- * multiply-seen value, causing the bottommost (oldest) singly-seen value
- * to drop off the list. The accuracy of this method, and also its cost,
- * depend mainly on the length of the list we are willing to keep.
- */
-static void
-compute_minimal_stats(VacAttrStats *stats,
- TupleDesc tupDesc, double totalrows,
- HeapTuple *rows, int numrows)
-{
- int i;
- int null_cnt = 0;
- int nonnull_cnt = 0;
- int toowide_cnt = 0;
- double total_width = 0;
- bool is_varlena = (!stats->attr->attbyval &&
- stats->attr->attlen == -1);
- FmgrInfo f_cmpeq;
- typedef struct
- {
- Datum value;
- int count;
- } TrackItem;
- TrackItem *track;
- int track_cnt,
- track_max;
- int num_mcv = stats->attr->attstattarget;
-
- /*
- * We track up to 2*n values for an n-element MCV list; but at least
- * 10
- */
- track_max = 2 * num_mcv;
- if (track_max < 10)
- track_max = 10;
- track = (TrackItem *) palloc(track_max * sizeof(TrackItem));
- track_cnt = 0;
-
- fmgr_info(stats->eqfunc, &f_cmpeq);
-
- for (i = 0; i < numrows; i++)
- {
- HeapTuple tuple = rows[i];
- Datum value;
- bool isnull;
- bool match;
- int firstcount1,
- j;
-
- CHECK_FOR_INTERRUPTS();
-
- value = heap_getattr(tuple, stats->attnum, tupDesc, &isnull);
-
- /* Check for null/nonnull */
- if (isnull)
- {
- null_cnt++;
- continue;
- }
- nonnull_cnt++;
-
- /*
- * If it's a varlena field, add up widths for average width
- * calculation. Note that if the value is toasted, we use the
- * toasted width. We don't bother with this calculation if it's a
- * fixed-width type.
- */
- if (is_varlena)
- {
- total_width += VARSIZE(DatumGetPointer(value));
-
- /*
- * If the value is toasted, we want to detoast it just once to
- * avoid repeated detoastings and resultant excess memory
- * usage during the comparisons. Also, check to see if the
- * value is excessively wide, and if so don't detoast at all
- * --- just ignore the value.
- */
- if (toast_raw_datum_size(value) > WIDTH_THRESHOLD)
- {
- toowide_cnt++;
- continue;
- }
- value = PointerGetDatum(PG_DETOAST_DATUM(value));
- }
-
- /*
- * See if the value matches anything we're already tracking.
- */
- match = false;
- firstcount1 = track_cnt;
- for (j = 0; j < track_cnt; j++)
- {
- if (DatumGetBool(FunctionCall2(&f_cmpeq, value, track[j].value)))
- {
- match = true;
- break;
- }
- if (j < firstcount1 && track[j].count == 1)
- firstcount1 = j;
- }
-
- if (match)
- {
- /* Found a match */
- track[j].count++;
- /* This value may now need to "bubble up" in the track list */
- while (j > 0 && track[j].count > track[j - 1].count)
- {
- swapDatum(track[j].value, track[j - 1].value);
- swapInt(track[j].count, track[j - 1].count);
- j--;
- }
- }
- else
- {
- /* No match. Insert at head of count-1 list */
- if (track_cnt < track_max)
- track_cnt++;
- for (j = track_cnt - 1; j > firstcount1; j--)
- {
- track[j].value = track[j - 1].value;
- track[j].count = track[j - 1].count;
- }
- if (firstcount1 < track_cnt)
- {
- track[firstcount1].value = value;
- track[firstcount1].count = 1;
- }
- }
- }
-
- /* We can only compute valid stats if we found some non-null values. */
- if (nonnull_cnt > 0)
- {
- int nmultiple,
- summultiple;
-
- stats->stats_valid = true;
- /* Do the simple null-frac and width stats */
- stats->stanullfrac = (double) null_cnt / (double) numrows;
- if (is_varlena)
- stats->stawidth = total_width / (double) nonnull_cnt;
- else
- stats->stawidth = stats->attrtype->typlen;
-
- /* Count the number of values we found multiple times */
- summultiple = 0;
- for (nmultiple = 0; nmultiple < track_cnt; nmultiple++)
- {
- if (track[nmultiple].count == 1)
- break;
- summultiple += track[nmultiple].count;
- }
-
- if (nmultiple == 0)
- {
- /* If we found no repeated values, assume it's a unique column */
- stats->stadistinct = -1.0;
- }
- else if (track_cnt < track_max && toowide_cnt == 0 &&
- nmultiple == track_cnt)
- {
- /*
- * Our track list includes every value in the sample, and
- * every value appeared more than once. Assume the column has
- * just these values.
- */
- stats->stadistinct = track_cnt;
- }
- else
- {
- /*----------
- * Estimate the number of distinct values using the estimator
- * proposed by Haas and Stokes in IBM Research Report RJ 10025:
- * n*d / (n - f1 + f1*n/N)
- * where f1 is the number of distinct values that occurred
- * exactly once in our sample of n rows (from a total of N),
- * and d is the total number of distinct values in the sample.
- * This is their Duj1 estimator; the other estimators they
- * recommend are considerably more complex, and are numerically
- * very unstable when n is much smaller than N.
- *
- * We assume (not very reliably!) that all the multiply-occurring
- * values are reflected in the final track[] list, and the other
- * nonnull values all appeared but once. (XXX this usually
- * results in a drastic overestimate of ndistinct. Can we do
- * any better?)
- *----------
- */
- int f1 = nonnull_cnt - summultiple;
- int d = f1 + nmultiple;
- double numer, denom, stadistinct;
-
- numer = (double) numrows * (double) d;
- denom = (double) (numrows - f1) +
- (double) f1 * (double) numrows / totalrows;
- stadistinct = numer / denom;
- /* Clamp to sane range in case of roundoff error */
- if (stadistinct < (double) d)
- stadistinct = (double) d;
- if (stadistinct > totalrows)
- stadistinct = totalrows;
- stats->stadistinct = floor(stadistinct + 0.5);
- }
-
- /*
- * If we estimated the number of distinct values at more than 10%
- * of the total row count (a very arbitrary limit), then assume
- * that stadistinct should scale with the row count rather than be
- * a fixed value.
- */
- if (stats->stadistinct > 0.1 * totalrows)
- stats->stadistinct = -(stats->stadistinct / totalrows);
-
- /*
- * Decide how many values are worth storing as most-common values.
- * If we are able to generate a complete MCV list (all the values
- * in the sample will fit, and we think these are all the ones in
- * the table), then do so. Otherwise, store only those values
- * that are significantly more common than the (estimated)
- * average. We set the threshold rather arbitrarily at 25% more
- * than average, with at least 2 instances in the sample.
- */
- if (track_cnt < track_max && toowide_cnt == 0 &&
- stats->stadistinct > 0 &&
- track_cnt <= num_mcv)
- {
- /* Track list includes all values seen, and all will fit */
- num_mcv = track_cnt;
- }
- else
- {
- double ndistinct = stats->stadistinct;
- double avgcount,
- mincount;
-
- if (ndistinct < 0)
- ndistinct = -ndistinct * totalrows;
- /* estimate # of occurrences in sample of a typical value */
- avgcount = (double) numrows / ndistinct;
- /* set minimum threshold count to store a value */
- mincount = avgcount * 1.25;
- if (mincount < 2)
- mincount = 2;
- if (num_mcv > track_cnt)
- num_mcv = track_cnt;
- for (i = 0; i < num_mcv; i++)
- {
- if (track[i].count < mincount)
- {
- num_mcv = i;
- break;
- }
- }
- }
-
- /* Generate MCV slot entry */
- if (num_mcv > 0)
- {
- MemoryContext old_context;
- Datum *mcv_values;
- float4 *mcv_freqs;
-
- /* Must copy the target values into anl_context */
- old_context = MemoryContextSwitchTo(anl_context);
- mcv_values = (Datum *) palloc(num_mcv * sizeof(Datum));
- mcv_freqs = (float4 *) palloc(num_mcv * sizeof(float4));
- for (i = 0; i < num_mcv; i++)
- {
- mcv_values[i] = datumCopy(track[i].value,
- stats->attr->attbyval,
- stats->attr->attlen);
- mcv_freqs[i] = (double) track[i].count / (double) numrows;
- }
- MemoryContextSwitchTo(old_context);
-
- stats->stakind[0] = STATISTIC_KIND_MCV;
- stats->staop[0] = stats->eqopr;
- stats->stanumbers[0] = mcv_freqs;
- stats->numnumbers[0] = num_mcv;
- stats->stavalues[0] = mcv_values;
- stats->numvalues[0] = num_mcv;
- }
- }
-
- /* We don't need to bother cleaning up any of our temporary palloc's */
-}
-
-
-/*
- * compute_scalar_stats() -- compute column statistics
- *
- * We use this when we can find "=" and "<" operators for the datatype.
- *
- * We determine the fraction of non-null rows, the average width, the
- * most common values, the (estimated) number of distinct values, the
- * distribution histogram, and the correlation of physical to logical order.
- *
- * The desired stats can be determined fairly easily after sorting the
- * data values into order.
- */
-static void
-compute_scalar_stats(VacAttrStats *stats,
- TupleDesc tupDesc, double totalrows,
- HeapTuple *rows, int numrows)
-{
- int i;
- int null_cnt = 0;
- int nonnull_cnt = 0;
- int toowide_cnt = 0;
- double total_width = 0;
- bool is_varlena = (!stats->attr->attbyval &&
- stats->attr->attlen == -1);
- double corr_xysum;
- RegProcedure cmpFn;
- SortFunctionKind cmpFnKind;
- FmgrInfo f_cmpfn;
- ScalarItem *values;
- int values_cnt = 0;
- int *tupnoLink;
- ScalarMCVItem *track;
- int track_cnt = 0;
- int num_mcv = stats->attr->attstattarget;
- int num_bins = stats->attr->attstattarget;
-
- values = (ScalarItem *) palloc(numrows * sizeof(ScalarItem));
- tupnoLink = (int *) palloc(numrows * sizeof(int));
- track = (ScalarMCVItem *) palloc(num_mcv * sizeof(ScalarMCVItem));
-
- SelectSortFunction(stats->ltopr, &cmpFn, &cmpFnKind);
- fmgr_info(cmpFn, &f_cmpfn);
-
- /* Initial scan to find sortable values */
- for (i = 0; i < numrows; i++)
- {
- HeapTuple tuple = rows[i];
- Datum value;
- bool isnull;
-
- CHECK_FOR_INTERRUPTS();
-
- value = heap_getattr(tuple, stats->attnum, tupDesc, &isnull);
-
- /* Check for null/nonnull */
- if (isnull)
- {
- null_cnt++;
- continue;
- }
- nonnull_cnt++;
-
- /*
- * If it's a varlena field, add up widths for average width
- * calculation. Note that if the value is toasted, we use the
- * toasted width. We don't bother with this calculation if it's a
- * fixed-width type.
- */
- if (is_varlena)
- {
- total_width += VARSIZE(DatumGetPointer(value));
-
- /*
- * If the value is toasted, we want to detoast it just once to
- * avoid repeated detoastings and resultant excess memory
- * usage during the comparisons. Also, check to see if the
- * value is excessively wide, and if so don't detoast at all
- * --- just ignore the value.
- */
- if (toast_raw_datum_size(value) > WIDTH_THRESHOLD)
- {
- toowide_cnt++;
- continue;
- }
- value = PointerGetDatum(PG_DETOAST_DATUM(value));
- }
-
- /* Add it to the list to be sorted */
- values[values_cnt].value = value;
- values[values_cnt].tupno = values_cnt;
- tupnoLink[values_cnt] = values_cnt;
- values_cnt++;
- }
-
- /* We can only compute valid stats if we found some sortable values. */
- if (values_cnt > 0)
- {
- int ndistinct, /* # distinct values in sample */
- nmultiple, /* # that appear multiple times */
- num_hist,
- dups_cnt;
- int slot_idx = 0;
-
- /* Sort the collected values */
- datumCmpFn = &f_cmpfn;
- datumCmpFnKind = cmpFnKind;
- datumCmpTupnoLink = tupnoLink;
- qsort((void *) values, values_cnt,
- sizeof(ScalarItem), compare_scalars);
-
- /*
- * Now scan the values in order, find the most common ones, and
- * also accumulate ordering-correlation statistics.
- *
- * To determine which are most common, we first have to count the
- * number of duplicates of each value. The duplicates are
- * adjacent in the sorted list, so a brute-force approach is to
- * compare successive datum values until we find two that are not
- * equal. However, that requires N-1 invocations of the datum
- * comparison routine, which are completely redundant with work
- * that was done during the sort. (The sort algorithm must at
- * some point have compared each pair of items that are adjacent
- * in the sorted order; otherwise it could not know that it's
- * ordered the pair correctly.) We exploit this by having
- * compare_scalars remember the highest tupno index that each
- * ScalarItem has been found equal to. At the end of the sort, a
- * ScalarItem's tupnoLink will still point to itself if and only
- * if it is the last item of its group of duplicates (since the
- * group will be ordered by tupno).
- */
- corr_xysum = 0;
- ndistinct = 0;
- nmultiple = 0;
- dups_cnt = 0;
- for (i = 0; i < values_cnt; i++)
- {
- int tupno = values[i].tupno;
-
- corr_xysum += ((double) i) * ((double) tupno);
- dups_cnt++;
- if (tupnoLink[tupno] == tupno)
- {
- /* Reached end of duplicates of this value */
- ndistinct++;
- if (dups_cnt > 1)
- {
- nmultiple++;
- if (track_cnt < num_mcv ||
- dups_cnt > track[track_cnt - 1].count)
- {
- /*
- * Found a new item for the mcv list; find its
- * position, bubbling down old items if needed.
- * Loop invariant is that j points at an empty/
- * replaceable slot.
- */
- int j;
-
- if (track_cnt < num_mcv)
- track_cnt++;
- for (j = track_cnt - 1; j > 0; j--)
- {
- if (dups_cnt <= track[j - 1].count)
- break;
- track[j].count = track[j - 1].count;
- track[j].first = track[j - 1].first;
- }
- track[j].count = dups_cnt;
- track[j].first = i + 1 - dups_cnt;
- }
- }
- dups_cnt = 0;
- }
- }
-
- stats->stats_valid = true;
- /* Do the simple null-frac and width stats */
- stats->stanullfrac = (double) null_cnt / (double) numrows;
- if (is_varlena)
- stats->stawidth = total_width / (double) nonnull_cnt;
- else
- stats->stawidth = stats->attrtype->typlen;
-
- if (nmultiple == 0)
- {
- /* If we found no repeated values, assume it's a unique column */
- stats->stadistinct = -1.0;
- }
- else if (toowide_cnt == 0 && nmultiple == ndistinct)
- {
- /*
- * Every value in the sample appeared more than once. Assume
- * the column has just these values.
- */
- stats->stadistinct = ndistinct;
- }
- else
- {
- /*----------
- * Estimate the number of distinct values using the estimator
- * proposed by Haas and Stokes in IBM Research Report RJ 10025:
- * n*d / (n - f1 + f1*n/N)
- * where f1 is the number of distinct values that occurred
- * exactly once in our sample of n rows (from a total of N),
- * and d is the total number of distinct values in the sample.
- * This is their Duj1 estimator; the other estimators they
- * recommend are considerably more complex, and are numerically
- * very unstable when n is much smaller than N.
- *
- * Overwidth values are assumed to have been distinct.
- *----------
- */
- int f1 = ndistinct - nmultiple + toowide_cnt;
- int d = f1 + nmultiple;
- double numer, denom, stadistinct;
-
- numer = (double) numrows * (double) d;
- denom = (double) (numrows - f1) +
- (double) f1 * (double) numrows / totalrows;
- stadistinct = numer / denom;
- /* Clamp to sane range in case of roundoff error */
- if (stadistinct < (double) d)
- stadistinct = (double) d;
- if (stadistinct > totalrows)
- stadistinct = totalrows;
- stats->stadistinct = floor(stadistinct + 0.5);
- }
-
- /*
- * If we estimated the number of distinct values at more than 10%
- * of the total row count (a very arbitrary limit), then assume
- * that stadistinct should scale with the row count rather than be
- * a fixed value.
- */
- if (stats->stadistinct > 0.1 * totalrows)
- stats->stadistinct = -(stats->stadistinct / totalrows);
-
- /*
- * Decide how many values are worth storing as most-common values.
- * If we are able to generate a complete MCV list (all the values
- * in the sample will fit, and we think these are all the ones in
- * the table), then do so. Otherwise, store only those values
- * that are significantly more common than the (estimated)
- * average. We set the threshold rather arbitrarily at 25% more
- * than average, with at least 2 instances in the sample. Also,
- * we won't suppress values that have a frequency of at least 1/K
- * where K is the intended number of histogram bins; such values
- * might otherwise cause us to emit duplicate histogram bin
- * boundaries.
- */
- if (track_cnt == ndistinct && toowide_cnt == 0 &&
- stats->stadistinct > 0 &&
- track_cnt <= num_mcv)
- {
- /* Track list includes all values seen, and all will fit */
- num_mcv = track_cnt;
- }
- else
- {
- double ndistinct = stats->stadistinct;
- double avgcount,
- mincount,
- maxmincount;
-
- if (ndistinct < 0)
- ndistinct = -ndistinct * totalrows;
- /* estimate # of occurrences in sample of a typical value */
- avgcount = (double) numrows / ndistinct;
- /* set minimum threshold count to store a value */
- mincount = avgcount * 1.25;
- if (mincount < 2)
- mincount = 2;
- /* don't let threshold exceed 1/K, however */
- maxmincount = (double) numrows / (double) num_bins;
- if (mincount > maxmincount)
- mincount = maxmincount;
- if (num_mcv > track_cnt)
- num_mcv = track_cnt;
- for (i = 0; i < num_mcv; i++)
- {
- if (track[i].count < mincount)
- {
- num_mcv = i;
- break;
- }
- }
- }
-
- /* Generate MCV slot entry */
- if (num_mcv > 0)
- {
- MemoryContext old_context;
- Datum *mcv_values;
- float4 *mcv_freqs;
-
- /* Must copy the target values into anl_context */
- old_context = MemoryContextSwitchTo(anl_context);
- mcv_values = (Datum *) palloc(num_mcv * sizeof(Datum));
- mcv_freqs = (float4 *) palloc(num_mcv * sizeof(float4));
- for (i = 0; i < num_mcv; i++)
- {
- mcv_values[i] = datumCopy(values[track[i].first].value,
- stats->attr->attbyval,
- stats->attr->attlen);
- mcv_freqs[i] = (double) track[i].count / (double) numrows;
- }
- MemoryContextSwitchTo(old_context);
-
- stats->stakind[slot_idx] = STATISTIC_KIND_MCV;
- stats->staop[slot_idx] = stats->eqopr;
- stats->stanumbers[slot_idx] = mcv_freqs;
- stats->numnumbers[slot_idx] = num_mcv;
- stats->stavalues[slot_idx] = mcv_values;
- stats->numvalues[slot_idx] = num_mcv;
- slot_idx++;
- }
-
- /*
- * Generate a histogram slot entry if there are at least two
- * distinct values not accounted for in the MCV list. (This
- * ensures the histogram won't collapse to empty or a singleton.)
- */
- num_hist = ndistinct - num_mcv;
- if (num_hist > num_bins)
- num_hist = num_bins + 1;
- if (num_hist >= 2)
- {
- MemoryContext old_context;
- Datum *hist_values;
- int nvals;
-
- /* Sort the MCV items into position order to speed next loop */
- qsort((void *) track, num_mcv,
- sizeof(ScalarMCVItem), compare_mcvs);
-
- /*
- * Collapse out the MCV items from the values[] array.
- *
- * Note we destroy the values[] array here... but we don't need
- * it for anything more. We do, however, still need
- * values_cnt. nvals will be the number of remaining entries
- * in values[].
- */
- if (num_mcv > 0)
- {
- int src,
- dest;
- int j;
-
- src = dest = 0;
- j = 0; /* index of next interesting MCV item */
- while (src < values_cnt)
- {
- int ncopy;
-
- if (j < num_mcv)
- {
- int first = track[j].first;
-
- if (src >= first)
- {
- /* advance past this MCV item */
- src = first + track[j].count;
- j++;
- continue;
- }
- ncopy = first - src;
- }
- else
- ncopy = values_cnt - src;
- memmove(&values[dest], &values[src],
- ncopy * sizeof(ScalarItem));
- src += ncopy;
- dest += ncopy;
- }
- nvals = dest;
- }
- else
- nvals = values_cnt;
- Assert(nvals >= num_hist);
-
- /* Must copy the target values into anl_context */
- old_context = MemoryContextSwitchTo(anl_context);
- hist_values = (Datum *) palloc(num_hist * sizeof(Datum));
- for (i = 0; i < num_hist; i++)
- {
- int pos;
-
- pos = (i * (nvals - 1)) / (num_hist - 1);
- hist_values[i] = datumCopy(values[pos].value,
- stats->attr->attbyval,
- stats->attr->attlen);
- }
- MemoryContextSwitchTo(old_context);
-
- stats->stakind[slot_idx] = STATISTIC_KIND_HISTOGRAM;
- stats->staop[slot_idx] = stats->ltopr;
- stats->stavalues[slot_idx] = hist_values;
- stats->numvalues[slot_idx] = num_hist;
- slot_idx++;
- }
-
- /* Generate a correlation entry if there are multiple values */
- if (values_cnt > 1)
- {
- MemoryContext old_context;
- float4 *corrs;
- double corr_xsum,
- corr_x2sum;
-
- /* Must copy the target values into anl_context */
- old_context = MemoryContextSwitchTo(anl_context);
- corrs = (float4 *) palloc(sizeof(float4));
- MemoryContextSwitchTo(old_context);
-
- /*----------
- * Since we know the x and y value sets are both
- * 0, 1, ..., values_cnt-1
- * we have sum(x) = sum(y) =
- * (values_cnt-1)*values_cnt / 2
- * and sum(x^2) = sum(y^2) =
- * (values_cnt-1)*values_cnt*(2*values_cnt-1) / 6.
- *----------
- */
- corr_xsum = ((double) (values_cnt - 1)) *
- ((double) values_cnt) / 2.0;
- corr_x2sum = ((double) (values_cnt - 1)) *
- ((double) values_cnt) * (double) (2 * values_cnt - 1) / 6.0;
-
- /* And the correlation coefficient reduces to */
- corrs[0] = (values_cnt * corr_xysum - corr_xsum * corr_xsum) /
- (values_cnt * corr_x2sum - corr_xsum * corr_xsum);
-
- stats->stakind[slot_idx] = STATISTIC_KIND_CORRELATION;
- stats->staop[slot_idx] = stats->ltopr;
- stats->stanumbers[slot_idx] = corrs;
- stats->numnumbers[slot_idx] = 1;
- slot_idx++;
- }
- }
-
- /* We don't need to bother cleaning up any of our temporary palloc's */
-}
-
-/*
- * qsort comparator for sorting ScalarItems
- *
- * Aside from sorting the items, we update the datumCmpTupnoLink[] array
- * whenever two ScalarItems are found to contain equal datums. The array
- * is indexed by tupno; for each ScalarItem, it contains the highest
- * tupno that that item's datum has been found to be equal to. This allows
- * us to avoid additional comparisons in compute_scalar_stats().
- */
-static int
-compare_scalars(const void *a, const void *b)
-{
- Datum da = ((ScalarItem *) a)->value;
- int ta = ((ScalarItem *) a)->tupno;
- Datum db = ((ScalarItem *) b)->value;
- int tb = ((ScalarItem *) b)->tupno;
- int32 compare;
-
- compare = ApplySortFunction(datumCmpFn, datumCmpFnKind,
- da, false, db, false);
- if (compare != 0)
- return compare;
-
- /*
- * The two datums are equal, so update datumCmpTupnoLink[].
- */
- if (datumCmpTupnoLink[ta] < tb)
- datumCmpTupnoLink[ta] = tb;
- if (datumCmpTupnoLink[tb] < ta)
- datumCmpTupnoLink[tb] = ta;
-
- /*
- * For equal datums, sort by tupno
- */
- return ta - tb;
-}
-
-/*
- * qsort comparator for sorting ScalarMCVItems by position
- */
-static int
-compare_mcvs(const void *a, const void *b)
-{
- int da = ((ScalarMCVItem *) a)->first;
- int db = ((ScalarMCVItem *) b)->first;
-
- return da - db;
-}
-
-
-/*
- * update_attstats() -- update attribute statistics for one relation
- *
- * Statistics are stored in several places: the pg_class row for the
- * relation has stats about the whole relation, and there is a
- * pg_statistic row for each (non-system) attribute that has ever
- * been analyzed. The pg_class values are updated by VACUUM, not here.
- *
- * pg_statistic rows are just added or updated normally. This means
- * that pg_statistic will probably contain some deleted rows at the
- * completion of a vacuum cycle, unless it happens to get vacuumed last.
- *
- * To keep things simple, we punt for pg_statistic, and don't try
- * to compute or store rows for pg_statistic itself in pg_statistic.
- * This could possibly be made to work, but it's not worth the trouble.
- * Note analyze_rel() has seen to it that we won't come here when
- * vacuuming pg_statistic itself.
- */
-static void
-update_attstats(Oid relid, int natts, VacAttrStats **vacattrstats)
-{
- Relation sd;
- int attno;
-
- /*
- * We use an ExclusiveLock on pg_statistic to ensure that only one
- * backend is writing it at a time --- without that, we might have to
- * deal with concurrent updates here, and it's not worth the trouble.
- */
- sd = heap_openr(StatisticRelationName, ExclusiveLock);
-
- for (attno = 0; attno < natts; attno++)
- {
- VacAttrStats *stats = vacattrstats[attno];
- FmgrInfo out_function;
- HeapTuple stup,
- oldtup;
- int i,
- k,
- n;
- Datum values[Natts_pg_statistic];
- char nulls[Natts_pg_statistic];
- char replaces[Natts_pg_statistic];
- Relation irelations[Num_pg_statistic_indices];
-
- /* Ignore attr if we weren't able to collect stats */
- if (!stats->stats_valid)
- continue;
-
- fmgr_info(stats->attrtype->typoutput, &out_function);
-
- /*
- * Construct a new pg_statistic tuple
- */
- for (i = 0; i < Natts_pg_statistic; ++i)
- {
- nulls[i] = ' ';
- replaces[i] = 'r';
- }
-
- i = 0;
- values[i++] = ObjectIdGetDatum(relid); /* starelid */
- values[i++] = Int16GetDatum(stats->attnum); /* staattnum */
- values[i++] = Float4GetDatum(stats->stanullfrac); /* stanullfrac */
- values[i++] = Int32GetDatum(stats->stawidth); /* stawidth */
- values[i++] = Float4GetDatum(stats->stadistinct); /* stadistinct */
- for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
- {
- values[i++] = Int16GetDatum(stats->stakind[k]); /* stakindN */
- }
- for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
- {
- values[i++] = ObjectIdGetDatum(stats->staop[k]); /* staopN */
- }
- for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
- {
- int nnum = stats->numnumbers[k];
-
- if (nnum > 0)
- {
- Datum *numdatums = (Datum *) palloc(nnum * sizeof(Datum));
- ArrayType *arry;
-
- for (n = 0; n < nnum; n++)
- numdatums[n] = Float4GetDatum(stats->stanumbers[k][n]);
- /* XXX knows more than it should about type float4: */
- arry = construct_array(numdatums, nnum,
- false, sizeof(float4), 'i');
- values[i++] = PointerGetDatum(arry); /* stanumbersN */
- }
- else
- {
- nulls[i] = 'n';
- values[i++] = (Datum) 0;
- }
- }
- for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
- {
- int ntxt = stats->numvalues[k];
-
- if (ntxt > 0)
- {
- Datum *txtdatums = (Datum *) palloc(ntxt * sizeof(Datum));
- ArrayType *arry;
-
- for (n = 0; n < ntxt; n++)
- {
- /*
- * Convert data values to a text string to be inserted
- * into the text array.
- */
- Datum stringdatum;
-
- stringdatum =
- FunctionCall3(&out_function,
- stats->stavalues[k][n],
- ObjectIdGetDatum(stats->attrtype->typelem),
- Int32GetDatum(stats->attr->atttypmod));
- txtdatums[n] = DirectFunctionCall1(textin, stringdatum);
- pfree(DatumGetPointer(stringdatum));
- }
- /* XXX knows more than it should about type text: */
- arry = construct_array(txtdatums, ntxt,
- false, -1, 'i');
- values[i++] = PointerGetDatum(arry); /* stavaluesN */
- }
- else
- {
- nulls[i] = 'n';
- values[i++] = (Datum) 0;
- }
- }
-
- /* Is there already a pg_statistic tuple for this attribute? */
- oldtup = SearchSysCache(STATRELATT,
- ObjectIdGetDatum(relid),
- Int16GetDatum(stats->attnum),
- 0, 0);
-
- if (HeapTupleIsValid(oldtup))
- {
- /* Yes, replace it */
- stup = heap_modifytuple(oldtup,
- sd,
- values,
- nulls,
- replaces);
- ReleaseSysCache(oldtup);
- simple_heap_update(sd, &stup->t_self, stup);
- }
- else
- {
- /* No, insert new tuple */
- stup = heap_formtuple(sd->rd_att, values, nulls);
- simple_heap_insert(sd, stup);
- }
-
- /* update indices too */
- CatalogOpenIndices(Num_pg_statistic_indices, Name_pg_statistic_indices,
- irelations);
- CatalogIndexInsert(irelations, Num_pg_statistic_indices, sd, stup);
- CatalogCloseIndices(Num_pg_statistic_indices, irelations);
-
- heap_freetuple(stup);
- }
-
- /* close rel, but hold lock till upcoming commit */
- heap_close(sd, NoLock);
-}
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
deleted file mode 100644
index 5f40f1617b6..00000000000
--- a/src/backend/commands/async.c
+++ /dev/null
@@ -1,896 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * async.c
- * Asynchronous notification: NOTIFY, LISTEN, UNLISTEN
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.87 2002/06/20 20:29:26 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-
-/*-------------------------------------------------------------------------
- * New Async Notification Model:
- * 1. Multiple backends on same machine. Multiple backends listening on
- * one relation. (Note: "listening on a relation" is not really the
- * right way to think about it, since the notify names need not have
- * anything to do with the names of relations actually in the database.
- * But this terminology is all over the code and docs, and I don't feel
- * like trying to replace it.)
- *
- * 2. There is a tuple in relation "pg_listener" for each active LISTEN,
- * ie, each relname/listenerPID pair. The "notification" field of the
- * tuple is zero when no NOTIFY is pending for that listener, or the PID
- * of the originating backend when a cross-backend NOTIFY is pending.
- * (We skip writing to pg_listener when doing a self-NOTIFY, so the
- * notification field should never be equal to the listenerPID field.)
- *
- * 3. The NOTIFY statement itself (routine Async_Notify) just adds the target
- * relname to a list of outstanding NOTIFY requests. Actual processing
- * happens if and only if we reach transaction commit. At that time (in
- * routine AtCommit_Notify) we scan pg_listener for matching relnames.
- * If the listenerPID in a matching tuple is ours, we just send a notify
- * message to our own front end. If it is not ours, and "notification"
- * is not already nonzero, we set notification to our own PID and send a
- * SIGUSR2 signal to the receiving process (indicated by listenerPID).
- * BTW: if the signal operation fails, we presume that the listener backend
- * crashed without removing this tuple, and remove the tuple for it.
- *
- * 4. Upon receipt of a SIGUSR2 signal, the signal handler can call inbound-
- * notify processing immediately if this backend is idle (ie, it is
- * waiting for a frontend command and is not within a transaction block).
- * Otherwise the handler may only set a flag, which will cause the
- * processing to occur just before we next go idle.
- *
- * 5. Inbound-notify processing consists of scanning pg_listener for tuples
- * matching our own listenerPID and having nonzero notification fields.
- * For each such tuple, we send a message to our frontend and clear the
- * notification field. BTW: this routine has to start/commit its own
- * transaction, since by assumption it is only called from outside any
- * transaction.
- *
- * Although we grab AccessExclusiveLock on pg_listener for any operation,
- * the lock is never held very long, so it shouldn't cause too much of
- * a performance problem.
- *
- * An application that listens on the same relname it notifies will get
- * NOTIFY messages for its own NOTIFYs. These can be ignored, if not useful,
- * by comparing be_pid in the NOTIFY message to the application's own backend's
- * PID. (As of FE/BE protocol 2.0, the backend's PID is provided to the
- * frontend during startup.) The above design guarantees that notifies from
- * other backends will never be missed by ignoring self-notifies. Note,
- * however, that we do *not* guarantee that a separate frontend message will
- * be sent for every outside NOTIFY. Since there is only room for one
- * originating PID in pg_listener, outside notifies occurring at about the
- * same time may be collapsed into a single message bearing the PID of the
- * first outside backend to perform the NOTIFY.
- *-------------------------------------------------------------------------
- */
-
-#include "postgres.h"
-
-#include <unistd.h>
-#include <signal.h>
-#include <errno.h>
-#include <sys/types.h>
-#include <netinet/in.h>
-
-#include "access/heapam.h"
-#include "catalog/catname.h"
-#include "catalog/pg_listener.h"
-#include "commands/async.h"
-#include "libpq/libpq.h"
-#include "libpq/pqformat.h"
-#include "miscadmin.h"
-#include "storage/ipc.h"
-#include "tcop/tcopprot.h"
-#include "utils/fmgroids.h"
-#include "utils/ps_status.h"
-#include "utils/syscache.h"
-
-
-/* stuff that we really ought not be touching directly :-( */
-extern TransactionState CurrentTransactionState;
-
-
-/*
- * State for outbound notifies consists of a list of all relnames NOTIFYed
- * in the current transaction. We do not actually perform a NOTIFY until
- * and unless the transaction commits. pendingNotifies is NIL if no
- * NOTIFYs have been done in the current transaction. The List nodes and
- * referenced strings are all palloc'd in TopTransactionContext.
- */
-static List *pendingNotifies = NIL;
-
-/*
- * State for inbound notifies consists of two flags: one saying whether
- * the signal handler is currently allowed to call ProcessIncomingNotify
- * directly, and one saying whether the signal has occurred but the handler
- * was not allowed to call ProcessIncomingNotify at the time.
- *
- * NB: the "volatile" on these declarations is critical! If your compiler
- * does not grok "volatile", you'd be best advised to compile this file
- * with all optimization turned off.
- */
-static volatile int notifyInterruptEnabled = 0;
-static volatile int notifyInterruptOccurred = 0;
-
-/* True if we've registered an on_shmem_exit cleanup */
-static bool unlistenExitRegistered = false;
-
-bool Trace_notify = false;
-
-
-static void Async_UnlistenAll(void);
-static void Async_UnlistenOnExit(void);
-static void ProcessIncomingNotify(void);
-static void NotifyMyFrontEnd(char *relname, int32 listenerPID);
-static bool AsyncExistsPendingNotify(const char *relname);
-static void ClearPendingNotifies(void);
-
-
-/*
- *--------------------------------------------------------------
- * Async_Notify
- *
- * This is executed by the SQL notify command.
- *
- * Adds the relation to the list of pending notifies.
- * Actual notification happens during transaction commit.
- * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- *
- * Results:
- * XXX
- *
- *--------------------------------------------------------------
- */
-void
-Async_Notify(char *relname)
-{
- if (Trace_notify)
- elog(LOG, "Async_Notify: %s", relname);
-
- /* no point in making duplicate entries in the list ... */
- if (!AsyncExistsPendingNotify(relname))
- {
- /*
- * The name list needs to live until end of transaction, so store
- * it in the top transaction context.
- */
- MemoryContext oldcontext;
-
- oldcontext = MemoryContextSwitchTo(TopTransactionContext);
-
- pendingNotifies = lcons(pstrdup(relname), pendingNotifies);
-
- MemoryContextSwitchTo(oldcontext);
- }
-}
-
-/*
- *--------------------------------------------------------------
- * Async_Listen
- *
- * This is executed by the SQL listen command.
- *
- * Register a backend (identified by its Unix PID) as listening
- * on the specified relation.
- *
- * Results:
- * XXX
- *
- * Side effects:
- * pg_listener is updated.
- *
- *--------------------------------------------------------------
- */
-void
-Async_Listen(char *relname, int pid)
-{
- Relation lRel;
- HeapScanDesc scan;
- HeapTuple tuple;
- Datum values[Natts_pg_listener];
- char nulls[Natts_pg_listener];
- int i;
- bool alreadyListener = false;
-
- if (Trace_notify)
- elog(LOG, "Async_Listen: %s", relname);
-
- lRel = heap_openr(ListenerRelationName, AccessExclusiveLock);
-
- /* Detect whether we are already listening on this relname */
- scan = heap_beginscan(lRel, SnapshotNow, 0, (ScanKey) NULL);
- while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- Form_pg_listener listener = (Form_pg_listener) GETSTRUCT(tuple);
-
- if (listener->listenerpid == pid &&
- strncmp(NameStr(listener->relname), relname, NAMEDATALEN) == 0)
- {
- alreadyListener = true;
- /* No need to scan the rest of the table */
- break;
- }
- }
- heap_endscan(scan);
-
- if (alreadyListener)
- {
- heap_close(lRel, AccessExclusiveLock);
- elog(WARNING, "Async_Listen: We are already listening on %s", relname);
- return;
- }
-
- /*
- * OK to insert a new tuple
- */
-
- for (i = 0; i < Natts_pg_listener; i++)
- {
- nulls[i] = ' ';
- values[i] = PointerGetDatum(NULL);
- }
-
- i = 0;
- values[i++] = (Datum) relname;
- values[i++] = (Datum) pid;
- values[i++] = (Datum) 0; /* no notifies pending */
-
- tuple = heap_formtuple(RelationGetDescr(lRel), values, nulls);
- simple_heap_insert(lRel, tuple);
-
-#ifdef NOT_USED /* currently there are no indexes */
- if (RelationGetForm(lRel)->relhasindex)
- {
- Relation idescs[Num_pg_listener_indices];
-
- CatalogOpenIndices(Num_pg_listener_indices, Name_pg_listener_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_listener_indices, lRel, tuple);
- CatalogCloseIndices(Num_pg_listener_indices, idescs);
- }
-#endif
-
- heap_freetuple(tuple);
-
- heap_close(lRel, AccessExclusiveLock);
-
- /*
- * now that we are listening, make sure we will unlisten before dying.
- */
- if (!unlistenExitRegistered)
- {
- on_shmem_exit(Async_UnlistenOnExit, 0);
- unlistenExitRegistered = true;
- }
-}
-
-/*
- *--------------------------------------------------------------
- * Async_Unlisten
- *
- * This is executed by the SQL unlisten command.
- *
- * Remove the backend from the list of listening backends
- * for the specified relation.
- *
- * Results:
- * XXX
- *
- * Side effects:
- * pg_listener is updated.
- *
- *--------------------------------------------------------------
- */
-void
-Async_Unlisten(char *relname, int pid)
-{
- Relation lRel;
- HeapScanDesc scan;
- HeapTuple tuple;
-
- /* Handle specially the `unlisten "*"' command */
- if ((!relname) || (*relname == '\0') || (strcmp(relname, "*") == 0))
- {
- Async_UnlistenAll();
- return;
- }
-
- if (Trace_notify)
- elog(LOG, "Async_Unlisten %s", relname);
-
- lRel = heap_openr(ListenerRelationName, AccessExclusiveLock);
-
- scan = heap_beginscan(lRel, SnapshotNow, 0, (ScanKey) NULL);
- while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- Form_pg_listener listener = (Form_pg_listener) GETSTRUCT(tuple);
-
- if (listener->listenerpid == pid &&
- strncmp(NameStr(listener->relname), relname, NAMEDATALEN) == 0)
- {
- /* Found the matching tuple, delete it */
- simple_heap_delete(lRel, &tuple->t_self);
-
- /*
- * We assume there can be only one match, so no need to scan
- * the rest of the table
- */
- break;
- }
- }
- heap_endscan(scan);
-
- heap_close(lRel, AccessExclusiveLock);
-
- /*
- * We do not complain about unlistening something not being listened;
- * should we?
- */
-}
-
-/*
- *--------------------------------------------------------------
- * Async_UnlistenAll
- *
- * Unlisten all relations for this backend.
- *
- * This is invoked by UNLISTEN "*" command, and also at backend exit.
- *
- * Results:
- * XXX
- *
- * Side effects:
- * pg_listener is updated.
- *
- *--------------------------------------------------------------
- */
-static void
-Async_UnlistenAll(void)
-{
- Relation lRel;
- TupleDesc tdesc;
- HeapScanDesc scan;
- HeapTuple lTuple;
- ScanKeyData key[1];
-
- if (Trace_notify)
- elog(LOG, "Async_UnlistenAll");
-
- lRel = heap_openr(ListenerRelationName, AccessExclusiveLock);
- tdesc = RelationGetDescr(lRel);
-
- /* Find and delete all entries with my listenerPID */
- ScanKeyEntryInitialize(&key[0], 0,
- Anum_pg_listener_pid,
- F_INT4EQ,
- Int32GetDatum(MyProcPid));
- scan = heap_beginscan(lRel, SnapshotNow, 1, key);
-
- while ((lTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- simple_heap_delete(lRel, &lTuple->t_self);
-
- heap_endscan(scan);
- heap_close(lRel, AccessExclusiveLock);
-}
-
-/*
- *--------------------------------------------------------------
- * Async_UnlistenOnExit
- *
- * Clean up the pg_listener table at backend exit.
- *
- * This is executed if we have done any LISTENs in this backend.
- * It might not be necessary anymore, if the user UNLISTENed everything,
- * but we don't try to detect that case.
- *
- * Results:
- * XXX
- *
- * Side effects:
- * pg_listener is updated if necessary.
- *
- *--------------------------------------------------------------
- */
-static void
-Async_UnlistenOnExit(void)
-{
- /*
- * We need to start/commit a transaction for the unlisten, but if
- * there is already an active transaction we had better abort that one
- * first. Otherwise we'd end up committing changes that probably
- * ought to be discarded.
- */
- AbortOutOfAnyTransaction();
- /* Now we can do the unlisten */
- StartTransactionCommand();
- Async_UnlistenAll();
- CommitTransactionCommand();
-}
-
-/*
- *--------------------------------------------------------------
- * AtCommit_Notify
- *
- * This is called at transaction commit.
- *
- * If there are outbound notify requests in the pendingNotifies list,
- * scan pg_listener for matching tuples, and either signal the other
- * backend or send a message to our own frontend.
- *
- * NOTE: we are still inside the current transaction, therefore can
- * piggyback on its committing of changes.
- *
- * Results:
- * XXX
- *
- * Side effects:
- * Tuples in pg_listener that have matching relnames and other peoples'
- * listenerPIDs are updated with a nonzero notification field.
- *
- *--------------------------------------------------------------
- */
-void
-AtCommit_Notify(void)
-{
- Relation lRel;
- TupleDesc tdesc;
- HeapScanDesc scan;
- HeapTuple lTuple,
- rTuple;
- Datum value[Natts_pg_listener];
- char repl[Natts_pg_listener],
- nulls[Natts_pg_listener];
-
- if (pendingNotifies == NIL)
- return; /* no NOTIFY statements in this
- * transaction */
-
- /*
- * NOTIFY is disabled if not normal processing mode. This test used to
- * be in xact.c, but it seems cleaner to do it here.
- */
- if (!IsNormalProcessingMode())
- {
- ClearPendingNotifies();
- return;
- }
-
- if (Trace_notify)
- elog(LOG, "AtCommit_Notify");
-
- /* preset data to update notify column to MyProcPid */
- nulls[0] = nulls[1] = nulls[2] = ' ';
- repl[0] = repl[1] = repl[2] = ' ';
- repl[Anum_pg_listener_notify - 1] = 'r';
- value[0] = value[1] = value[2] = (Datum) 0;
- value[Anum_pg_listener_notify - 1] = Int32GetDatum(MyProcPid);
-
- lRel = heap_openr(ListenerRelationName, AccessExclusiveLock);
- tdesc = RelationGetDescr(lRel);
- scan = heap_beginscan(lRel, SnapshotNow, 0, (ScanKey) NULL);
-
- while ((lTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- Form_pg_listener listener = (Form_pg_listener) GETSTRUCT(lTuple);
- char *relname = NameStr(listener->relname);
- int32 listenerPID = listener->listenerpid;
-
- if (!AsyncExistsPendingNotify(relname))
- continue;
-
- if (listenerPID == MyProcPid)
- {
- /*
- * Self-notify: no need to bother with table update. Indeed,
- * we *must not* clear the notification field in this path, or
- * we could lose an outside notify, which'd be bad for
- * applications that ignore self-notify messages.
- */
-
- if (Trace_notify)
- elog(LOG, "AtCommit_Notify: notifying self");
-
- NotifyMyFrontEnd(relname, listenerPID);
- }
- else
- {
- if (Trace_notify)
- elog(LOG, "AtCommit_Notify: notifying pid %d",
- listenerPID);
-
- /*
- * If someone has already notified this listener, we don't
- * bother modifying the table, but we do still send a SIGUSR2
- * signal, just in case that backend missed the earlier signal
- * for some reason. It's OK to send the signal first, because
- * the other guy can't read pg_listener until we unlock it.
- */
- if (kill(listenerPID, SIGUSR2) < 0)
- {
- /*
- * Get rid of pg_listener entry if it refers to a PID that
- * no longer exists. Presumably, that backend crashed
- * without deleting its pg_listener entries. This code
- * used to only delete the entry if errno==ESRCH, but as
- * far as I can see we should just do it for any failure
- * (certainly at least for EPERM too...)
- */
- simple_heap_delete(lRel, &lTuple->t_self);
- }
- else if (listener->notification == 0)
- {
- rTuple = heap_modifytuple(lTuple, lRel,
- value, nulls, repl);
- simple_heap_update(lRel, &lTuple->t_self, rTuple);
-
-#ifdef NOT_USED /* currently there are no indexes */
- if (RelationGetForm(lRel)->relhasindex)
- {
- Relation idescs[Num_pg_listener_indices];
-
- CatalogOpenIndices(Num_pg_listener_indices, Name_pg_listener_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_listener_indices, lRel, rTuple);
- CatalogCloseIndices(Num_pg_listener_indices, idescs);
- }
-#endif
- }
- }
- }
-
- heap_endscan(scan);
-
- /*
- * We do NOT release the lock on pg_listener here; we need to hold it
- * until end of transaction (which is about to happen, anyway) to
- * ensure that notified backends see our tuple updates when they look.
- * Else they might disregard the signal, which would make the
- * application programmer very unhappy.
- */
- heap_close(lRel, NoLock);
-
- ClearPendingNotifies();
-
- if (Trace_notify)
- elog(LOG, "AtCommit_Notify: done");
-}
-
-/*
- *--------------------------------------------------------------
- * AtAbort_Notify
- *
- * This is called at transaction abort.
- *
- * Gets rid of pending outbound notifies that we would have executed
- * if the transaction got committed.
- *
- * Results:
- * XXX
- *
- *--------------------------------------------------------------
- */
-void
-AtAbort_Notify(void)
-{
- ClearPendingNotifies();
-}
-
-/*
- *--------------------------------------------------------------
- * Async_NotifyHandler
- *
- * This is the signal handler for SIGUSR2.
- *
- * If we are idle (notifyInterruptEnabled is set), we can safely invoke
- * ProcessIncomingNotify directly. Otherwise, just set a flag
- * to do it later.
- *
- * Results:
- * none
- *
- * Side effects:
- * per above
- *--------------------------------------------------------------
- */
-void
-Async_NotifyHandler(SIGNAL_ARGS)
-{
- int save_errno = errno;
-
- /*
- * Note: this is a SIGNAL HANDLER. You must be very wary what you do
- * here. Some helpful soul had this routine sprinkled with TPRINTFs,
- * which would likely lead to corruption of stdio buffers if they were
- * ever turned on.
- */
-
- if (notifyInterruptEnabled)
- {
- /*
- * I'm not sure whether some flavors of Unix might allow another
- * SIGUSR2 occurrence to recursively interrupt this routine. To
- * cope with the possibility, we do the same sort of dance that
- * EnableNotifyInterrupt must do --- see that routine for
- * comments.
- */
- notifyInterruptEnabled = 0; /* disable any recursive signal */
- notifyInterruptOccurred = 1; /* do at least one iteration */
- for (;;)
- {
- notifyInterruptEnabled = 1;
- if (!notifyInterruptOccurred)
- break;
- notifyInterruptEnabled = 0;
- if (notifyInterruptOccurred)
- {
- /* Here, it is finally safe to do stuff. */
- if (Trace_notify)
- elog(LOG, "Async_NotifyHandler: perform async notify");
-
- ProcessIncomingNotify();
-
- if (Trace_notify)
- elog(LOG, "Async_NotifyHandler: done");
- }
- }
- }
- else
- {
- /*
- * In this path it is NOT SAFE to do much of anything, except
- * this:
- */
- notifyInterruptOccurred = 1;
- }
-
- errno = save_errno;
-}
-
-/*
- * --------------------------------------------------------------
- * EnableNotifyInterrupt
- *
- * This is called by the PostgresMain main loop just before waiting
- * for a frontend command. If we are truly idle (ie, *not* inside
- * a transaction block), then process any pending inbound notifies,
- * and enable the signal handler to process future notifies directly.
- *
- * NOTE: the signal handler starts out disabled, and stays so until
- * PostgresMain calls this the first time.
- * --------------------------------------------------------------
- */
-void
-EnableNotifyInterrupt(void)
-{
- if (CurrentTransactionState->blockState != TRANS_DEFAULT)
- return; /* not really idle */
-
- /*
- * This code is tricky because we are communicating with a signal
- * handler that could interrupt us at any point. If we just checked
- * notifyInterruptOccurred and then set notifyInterruptEnabled, we
- * could fail to respond promptly to a signal that happens in between
- * those two steps. (A very small time window, perhaps, but Murphy's
- * Law says you can hit it...) Instead, we first set the enable flag,
- * then test the occurred flag. If we see an unserviced interrupt has
- * occurred, we re-clear the enable flag before going off to do the
- * service work. (That prevents re-entrant invocation of
- * ProcessIncomingNotify() if another interrupt occurs.) If an
- * interrupt comes in between the setting and clearing of
- * notifyInterruptEnabled, then it will have done the service work and
- * left notifyInterruptOccurred zero, so we have to check again after
- * clearing enable. The whole thing has to be in a loop in case
- * another interrupt occurs while we're servicing the first. Once we
- * get out of the loop, enable is set and we know there is no
- * unserviced interrupt.
- *
- * NB: an overenthusiastic optimizing compiler could easily break this
- * code. Hopefully, they all understand what "volatile" means these
- * days.
- */
- for (;;)
- {
- notifyInterruptEnabled = 1;
- if (!notifyInterruptOccurred)
- break;
- notifyInterruptEnabled = 0;
- if (notifyInterruptOccurred)
- {
- if (Trace_notify)
- elog(LOG, "EnableNotifyInterrupt: perform async notify");
-
- ProcessIncomingNotify();
-
- if (Trace_notify)
- elog(LOG, "EnableNotifyInterrupt: done");
- }
- }
-}
-
-/*
- * --------------------------------------------------------------
- * DisableNotifyInterrupt
- *
- * This is called by the PostgresMain main loop just after receiving
- * a frontend command. Signal handler execution of inbound notifies
- * is disabled until the next EnableNotifyInterrupt call.
- * --------------------------------------------------------------
- */
-void
-DisableNotifyInterrupt(void)
-{
- notifyInterruptEnabled = 0;
-}
-
-/*
- * --------------------------------------------------------------
- * ProcessIncomingNotify
- *
- * Deal with arriving NOTIFYs from other backends.
- * This is called either directly from the SIGUSR2 signal handler,
- * or the next time control reaches the outer idle loop.
- * Scan pg_listener for arriving notifies, report them to my front end,
- * and clear the notification field in pg_listener until next time.
- *
- * NOTE: since we are outside any transaction, we must create our own.
- *
- * Results:
- * XXX
- *
- * --------------------------------------------------------------
- */
-static void
-ProcessIncomingNotify(void)
-{
- Relation lRel;
- TupleDesc tdesc;
- ScanKeyData key[1];
- HeapScanDesc scan;
- HeapTuple lTuple,
- rTuple;
- Datum value[Natts_pg_listener];
- char repl[Natts_pg_listener],
- nulls[Natts_pg_listener];
-
- if (Trace_notify)
- elog(LOG, "ProcessIncomingNotify");
-
- set_ps_display("async_notify");
-
- notifyInterruptOccurred = 0;
-
- StartTransactionCommand();
-
- lRel = heap_openr(ListenerRelationName, AccessExclusiveLock);
- tdesc = RelationGetDescr(lRel);
-
- /* Scan only entries with my listenerPID */
- ScanKeyEntryInitialize(&key[0], 0,
- Anum_pg_listener_pid,
- F_INT4EQ,
- Int32GetDatum(MyProcPid));
- scan = heap_beginscan(lRel, SnapshotNow, 1, key);
-
- /* Prepare data for rewriting 0 into notification field */
- nulls[0] = nulls[1] = nulls[2] = ' ';
- repl[0] = repl[1] = repl[2] = ' ';
- repl[Anum_pg_listener_notify - 1] = 'r';
- value[0] = value[1] = value[2] = (Datum) 0;
- value[Anum_pg_listener_notify - 1] = Int32GetDatum(0);
-
- while ((lTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- Form_pg_listener listener = (Form_pg_listener) GETSTRUCT(lTuple);
- char *relname = NameStr(listener->relname);
- int32 sourcePID = listener->notification;
-
- if (sourcePID != 0)
- {
- /* Notify the frontend */
-
- if (Trace_notify)
- elog(LOG, "ProcessIncomingNotify: received %s from %d",
- relname, (int) sourcePID);
-
- NotifyMyFrontEnd(relname, sourcePID);
- /* Rewrite the tuple with 0 in notification column */
- rTuple = heap_modifytuple(lTuple, lRel, value, nulls, repl);
- simple_heap_update(lRel, &lTuple->t_self, rTuple);
-
-#ifdef NOT_USED /* currently there are no indexes */
- if (RelationGetForm(lRel)->relhasindex)
- {
- Relation idescs[Num_pg_listener_indices];
-
- CatalogOpenIndices(Num_pg_listener_indices, Name_pg_listener_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_listener_indices, lRel, rTuple);
- CatalogCloseIndices(Num_pg_listener_indices, idescs);
- }
-#endif
- }
- }
- heap_endscan(scan);
-
- /*
- * We do NOT release the lock on pg_listener here; we need to hold it
- * until end of transaction (which is about to happen, anyway) to
- * ensure that other backends see our tuple updates when they look.
- * Otherwise, a transaction started after this one might mistakenly
- * think it doesn't need to send this backend a new NOTIFY.
- */
- heap_close(lRel, NoLock);
-
- CommitTransactionCommand();
-
- /*
- * Must flush the notify messages to ensure frontend gets them
- * promptly.
- */
- pq_flush();
-
- set_ps_display("idle");
-
- if (Trace_notify)
- elog(LOG, "ProcessIncomingNotify: done");
-}
-
-/*
- * Send NOTIFY message to my front end.
- */
-static void
-NotifyMyFrontEnd(char *relname, int32 listenerPID)
-{
- if (whereToSendOutput == Remote)
- {
- StringInfoData buf;
-
- pq_beginmessage(&buf);
- pq_sendbyte(&buf, 'A');
- pq_sendint(&buf, listenerPID, sizeof(int32));
- pq_sendstring(&buf, relname);
- pq_endmessage(&buf);
-
- /*
- * NOTE: we do not do pq_flush() here. For a self-notify, it will
- * happen at the end of the transaction, and for incoming notifies
- * ProcessIncomingNotify will do it after finding all the
- * notifies.
- */
- }
- else
- elog(INFO, "NOTIFY for %s", relname);
-}
-
-/* Does pendingNotifies include the given relname? */
-static bool
-AsyncExistsPendingNotify(const char *relname)
-{
- List *p;
-
- foreach(p, pendingNotifies)
- {
- /* Use NAMEDATALEN for relname comparison. DZ - 26-08-1996 */
- if (strncmp((const char *) lfirst(p), relname, NAMEDATALEN) == 0)
- return true;
- }
-
- return false;
-}
-
-/* Clear the pendingNotifies list. */
-static void
-ClearPendingNotifies(void)
-{
- /*
- * We used to have to explicitly deallocate the list members and
- * nodes, because they were malloc'd. Now, since we know they are
- * palloc'd in TopTransactionContext, we need not do that --- they'll
- * go away automatically at transaction exit. We need only reset the
- * list head pointer.
- */
- pendingNotifies = NIL;
-}
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
deleted file mode 100644
index 3306943fb04..00000000000
--- a/src/backend/commands/cluster.c
+++ /dev/null
@@ -1,255 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * cluster.c
- * Paul Brown's implementation of cluster index.
- *
- * I am going to use the rename function as a model for this in the
- * parser and executor, and the vacuum code as an example in this
- * file. As I go - in contrast to the rest of postgres - there will
- * be BUCKETS of comments. This is to allow reviewers to understand
- * my (probably bogus) assumptions about the way this works.
- * [pbrown '94]
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994-5, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.82 2002/06/20 20:29:26 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-
-#include "postgres.h"
-
-#include "access/genam.h"
-#include "access/heapam.h"
-#include "catalog/heap.h"
-#include "catalog/index.h"
-#include "catalog/pg_index.h"
-#include "catalog/pg_proc.h"
-#include "commands/cluster.h"
-#include "commands/tablecmds.h"
-#include "miscadmin.h"
-#include "utils/builtins.h"
-#include "utils/lsyscache.h"
-#include "utils/syscache.h"
-
-
-static Oid copy_heap(Oid OIDOldHeap, const char *NewName);
-static Oid copy_index(Oid OIDOldIndex, Oid OIDNewHeap,
- const char *NewIndexName);
-static void rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex);
-
-/*
- * cluster
- *
- * STILL TO DO:
- * Create a list of all the other indexes on this relation. Because
- * the cluster will wreck all the tids, I'll need to destroy bogus
- * indexes. The user will have to re-create them. Not nice, but
- * I'm not a nice guy. The alternative is to try some kind of post
- * destroy re-build. This may be possible. I'll check out what the
- * index create functiond want in the way of paramaters. On the other
- * hand, re-creating n indexes may blow out the space.
- */
-void
-cluster(RangeVar *oldrelation, char *oldindexname)
-{
- Oid OIDOldHeap,
- OIDOldIndex,
- OIDNewHeap,
- OIDNewIndex;
- Relation OldHeap,
- OldIndex;
- char NewHeapName[NAMEDATALEN];
- char NewIndexName[NAMEDATALEN];
-
- /*
- * We grab exclusive access to the target rel and index for the
- * duration of the transaction.
- */
- OldHeap = heap_openrv(oldrelation, AccessExclusiveLock);
- OIDOldHeap = RelationGetRelid(OldHeap);
-
- /*
- * The index is expected to be in the same namespace as the relation.
- */
- OIDOldIndex = get_relname_relid(oldindexname,
- RelationGetNamespace(OldHeap));
- if (!OidIsValid(OIDOldIndex))
- elog(ERROR, "CLUSTER: cannot find index \"%s\" for table \"%s\"",
- oldindexname, oldrelation->relname);
- OldIndex = index_open(OIDOldIndex);
- LockRelation(OldIndex, AccessExclusiveLock);
-
- /*
- * Check that index is in fact an index on the given relation
- */
- if (OldIndex->rd_index->indrelid != OIDOldHeap)
- elog(ERROR, "CLUSTER: \"%s\" is not an index for table \"%s\"",
- oldindexname, oldrelation->relname);
-
- /* Drop relcache refcnts, but do NOT give up the locks */
- heap_close(OldHeap, NoLock);
- index_close(OldIndex);
-
- /*
- * Create the new heap with a temporary name.
- */
- snprintf(NewHeapName, NAMEDATALEN, "temp_%u", OIDOldHeap);
-
- OIDNewHeap = copy_heap(OIDOldHeap, NewHeapName);
-
- /* We do not need CommandCounterIncrement() because copy_heap did it. */
-
- /*
- * Copy the heap data into the new table in the desired order.
- */
- rebuildheap(OIDNewHeap, OIDOldHeap, OIDOldIndex);
-
- /* To make the new heap's data visible. */
- CommandCounterIncrement();
-
- /* Create new index over the tuples of the new heap. */
- snprintf(NewIndexName, NAMEDATALEN, "temp_%u", OIDOldIndex);
-
- OIDNewIndex = copy_index(OIDOldIndex, OIDNewHeap, NewIndexName);
-
- CommandCounterIncrement();
-
- /* Destroy old heap (along with its index) and rename new. */
- heap_drop_with_catalog(OIDOldHeap, allowSystemTableMods);
-
- CommandCounterIncrement();
-
- renamerel(OIDNewHeap, oldrelation->relname);
-
- /* This one might be unnecessary, but let's be safe. */
- CommandCounterIncrement();
-
- renamerel(OIDNewIndex, oldindexname);
-}
-
-static Oid
-copy_heap(Oid OIDOldHeap, const char *NewName)
-{
- TupleDesc OldHeapDesc,
- tupdesc;
- Oid OIDNewHeap;
- Relation OldHeap;
-
- OldHeap = heap_open(OIDOldHeap, AccessExclusiveLock);
- OldHeapDesc = RelationGetDescr(OldHeap);
-
- /*
- * Need to make a copy of the tuple descriptor, since
- * heap_create_with_catalog modifies it.
- */
- tupdesc = CreateTupleDescCopyConstr(OldHeapDesc);
-
- OIDNewHeap = heap_create_with_catalog(NewName,
- RelationGetNamespace(OldHeap),
- tupdesc,
- OldHeap->rd_rel->relkind,
- OldHeap->rd_rel->relisshared,
- OldHeap->rd_rel->relhasoids,
- allowSystemTableMods);
-
- /*
- * Advance command counter so that the newly-created relation's
- * catalog tuples will be visible to heap_open.
- */
- CommandCounterIncrement();
-
- /*
- * If necessary, create a TOAST table for the new relation. Note that
- * AlterTableCreateToastTable ends with CommandCounterIncrement(), so
- * that the TOAST table will be visible for insertion.
- */
- AlterTableCreateToastTable(OIDNewHeap, true);
-
- heap_close(OldHeap, NoLock);
-
- return OIDNewHeap;
-}
-
-static Oid
-copy_index(Oid OIDOldIndex, Oid OIDNewHeap, const char *NewIndexName)
-{
- Oid OIDNewIndex;
- Relation OldIndex,
- NewHeap;
- IndexInfo *indexInfo;
-
- NewHeap = heap_open(OIDNewHeap, AccessExclusiveLock);
- OldIndex = index_open(OIDOldIndex);
-
- /*
- * Create a new index like the old one. To do this I get the info
- * from pg_index, and add a new index with a temporary name (that will
- * be changed later).
- */
- indexInfo = BuildIndexInfo(OldIndex->rd_index);
-
- OIDNewIndex = index_create(OIDNewHeap,
- NewIndexName,
- indexInfo,
- OldIndex->rd_rel->relam,
- OldIndex->rd_index->indclass,
- OldIndex->rd_index->indisprimary,
- allowSystemTableMods);
-
- setRelhasindex(OIDNewHeap, true,
- OldIndex->rd_index->indisprimary, InvalidOid);
-
- index_close(OldIndex);
- heap_close(NewHeap, NoLock);
-
- return OIDNewIndex;
-}
-
-
-static void
-rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
-{
- Relation LocalNewHeap,
- LocalOldHeap,
- LocalOldIndex;
- IndexScanDesc ScanDesc;
- HeapTuple LocalHeapTuple;
-
- /*
- * Open the relations I need. Scan through the OldHeap on the OldIndex
- * and insert each tuple into the NewHeap.
- */
- LocalNewHeap = heap_open(OIDNewHeap, AccessExclusiveLock);
- LocalOldHeap = heap_open(OIDOldHeap, AccessExclusiveLock);
- LocalOldIndex = index_open(OIDOldIndex);
-
- ScanDesc = index_beginscan(LocalOldHeap, LocalOldIndex,
- SnapshotNow, 0, (ScanKey) NULL);
-
- while ((LocalHeapTuple = index_getnext(ScanDesc, ForwardScanDirection)) != NULL)
- {
- /*
- * We must copy the tuple because heap_insert() will overwrite
- * the commit-status fields of the tuple it's handed, and the
- * retrieved tuple will actually be in a disk buffer! Thus,
- * the source relation would get trashed, which is bad news if
- * we abort later on. (This was a bug in releases thru 7.0)
- */
- HeapTuple copiedTuple = heap_copytuple(LocalHeapTuple);
-
- simple_heap_insert(LocalNewHeap, copiedTuple);
- heap_freetuple(copiedTuple);
-
- CHECK_FOR_INTERRUPTS();
- }
-
- index_endscan(ScanDesc);
-
- index_close(LocalOldIndex);
- heap_close(LocalOldHeap, NoLock);
- heap_close(LocalNewHeap, NoLock);
-}
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
deleted file mode 100644
index 72867206143..00000000000
--- a/src/backend/commands/comment.c
+++ /dev/null
@@ -1,809 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * comment.c
- *
- * PostgreSQL object comments utility code.
- *
- * Copyright (c) 1999-2001, PostgreSQL Global Development Group
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.48 2002/05/21 22:05:54 tgl Exp $
- *
- *-------------------------------------------------------------------------
- */
-
-#include "postgres.h"
-
-#include "access/genam.h"
-#include "access/heapam.h"
-#include "catalog/catname.h"
-#include "catalog/indexing.h"
-#include "catalog/namespace.h"
-#include "catalog/pg_database.h"
-#include "catalog/pg_description.h"
-#include "catalog/pg_namespace.h"
-#include "catalog/pg_operator.h"
-#include "catalog/pg_rewrite.h"
-#include "catalog/pg_trigger.h"
-#include "commands/comment.h"
-#include "miscadmin.h"
-#include "parser/parse_func.h"
-#include "parser/parse_oper.h"
-#include "parser/parse_type.h"
-#include "parser/parse.h"
-#include "utils/acl.h"
-#include "utils/builtins.h"
-#include "utils/fmgroids.h"
-#include "utils/lsyscache.h"
-#include "utils/syscache.h"
-
-
-/*
- * Static Function Prototypes --
- *
- * The following protoypes are declared static so as not to conflict
- * with any other routines outside this module. These routines are
- * called by the public function CommentObject() routine to create
- * the appropriate comment for the specific object type.
- */
-
-static void CommentRelation(int objtype, List *relname, char *comment);
-static void CommentAttribute(List *qualname, char *comment);
-static void CommentDatabase(List *qualname, char *comment);
-static void CommentNamespace(List *qualname, char *comment);
-static void CommentRule(List *qualname, char *comment);
-static void CommentType(List *typename, char *comment);
-static void CommentAggregate(List *aggregate, List *arguments, char *comment);
-static void CommentProc(List *function, List *arguments, char *comment);
-static void CommentOperator(List *opername, List *arguments, char *comment);
-static void CommentTrigger(List *qualname, char *comment);
-
-
-/*
- * CommentObject --
- *
- * This routine is used to add the associated comment into
- * pg_description for the object specified by the given SQL command.
- */
-void
-CommentObject(CommentStmt *stmt)
-{
- switch (stmt->objtype)
- {
- case INDEX:
- case SEQUENCE:
- case TABLE:
- case VIEW:
- CommentRelation(stmt->objtype, stmt->objname, stmt->comment);
- break;
- case COLUMN:
- CommentAttribute(stmt->objname, stmt->comment);
- break;
- case DATABASE:
- CommentDatabase(stmt->objname, stmt->comment);
- break;
- case RULE:
- CommentRule(stmt->objname, stmt->comment);
- break;
- case TYPE_P:
- CommentType(stmt->objname, stmt->comment);
- break;
- case AGGREGATE:
- CommentAggregate(stmt->objname, stmt->objargs, stmt->comment);
- break;
- case FUNCTION:
- CommentProc(stmt->objname, stmt->objargs, stmt->comment);
- break;
- case OPERATOR:
- CommentOperator(stmt->objname, stmt->objargs, stmt->comment);
- break;
- case TRIGGER:
- CommentTrigger(stmt->objname, stmt->comment);
- break;
- case SCHEMA:
- CommentNamespace(stmt->objname, stmt->comment);
- break;
- default:
- elog(ERROR, "An attempt was made to comment on a unknown type: %d",
- stmt->objtype);
- }
-}
-
-/*
- * CreateComments --
- *
- * Create a comment for the specified object descriptor. Inserts a new
- * pg_description tuple, or replaces an existing one with the same key.
- *
- * If the comment given is null or an empty string, instead delete any
- * existing comment for the specified key.
- */
-void
-CreateComments(Oid oid, Oid classoid, int32 subid, char *comment)
-{
- Relation description;
- ScanKeyData skey[3];
- SysScanDesc sd;
- HeapTuple oldtuple;
- HeapTuple newtuple = NULL;
- Datum values[Natts_pg_description];
- char nulls[Natts_pg_description];
- char replaces[Natts_pg_description];
- int i;
-
- /* Reduce empty-string to NULL case */
- if (comment != NULL && strlen(comment) == 0)
- comment = NULL;
-
- /* Prepare to form or update a tuple, if necessary */
- if (comment != NULL)
- {
- for (i = 0; i < Natts_pg_description; i++)
- {
- nulls[i] = ' ';
- replaces[i] = 'r';
- }
- i = 0;
- values[i++] = ObjectIdGetDatum(oid);
- values[i++] = ObjectIdGetDatum(classoid);
- values[i++] = Int32GetDatum(subid);
- values[i++] = DirectFunctionCall1(textin, CStringGetDatum(comment));
- }
-
- /* Use the index to search for a matching old tuple */
-
- ScanKeyEntryInitialize(&skey[0],
- (bits16) 0x0,
- (AttrNumber) 1,
- (RegProcedure) F_OIDEQ,
- ObjectIdGetDatum(oid));
-
- ScanKeyEntryInitialize(&skey[1],
- (bits16) 0x0,
- (AttrNumber) 2,
- (RegProcedure) F_OIDEQ,
- ObjectIdGetDatum(classoid));
-
- ScanKeyEntryInitialize(&skey[2],
- (bits16) 0x0,
- (AttrNumber) 3,
- (RegProcedure) F_INT4EQ,
- Int32GetDatum(subid));
-
- description = heap_openr(DescriptionRelationName, RowExclusiveLock);
-
- sd = systable_beginscan(description, DescriptionObjIndex, true,
- SnapshotNow, 3, skey);
-
- while ((oldtuple = systable_getnext(sd)) != NULL)
- {
- /* Found the old tuple, so delete or update it */
-
- if (comment == NULL)
- simple_heap_delete(description, &oldtuple->t_self);
- else
- {
- newtuple = heap_modifytuple(oldtuple, description, values,
- nulls, replaces);
- simple_heap_update(description, &oldtuple->t_self, newtuple);
- }
-
- break; /* Assume there can be only one match */
- }
-
- systable_endscan(sd);
-
- /* If we didn't find an old tuple, insert a new one */
-
- if (newtuple == NULL && comment != NULL)
- {
- newtuple = heap_formtuple(RelationGetDescr(description),
- values, nulls);
- simple_heap_insert(description, newtuple);
- }
-
- /* Update indexes, if necessary */
-
- if (newtuple != NULL)
- {
- if (RelationGetForm(description)->relhasindex)
- {
- Relation idescs[Num_pg_description_indices];
-
- CatalogOpenIndices(Num_pg_description_indices,
- Name_pg_description_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_description_indices, description,
- newtuple);
- CatalogCloseIndices(Num_pg_description_indices, idescs);
- }
- heap_freetuple(newtuple);
- }
-
- /* Done */
-
- heap_close(description, NoLock);
-}
-
-/*
- * DeleteComments --
- *
- * This routine is used to purge all comments associated with an object,
- * regardless of their objsubid. It is called, for example, when a relation
- * is destroyed.
- */
-void
-DeleteComments(Oid oid, Oid classoid)
-{
- Relation description;
- ScanKeyData skey[2];
- SysScanDesc sd;
- HeapTuple oldtuple;
-
- /* Use the index to search for all matching old tuples */
-
- ScanKeyEntryInitialize(&skey[0],
- (bits16) 0x0,
- (AttrNumber) 1,
- (RegProcedure) F_OIDEQ,
- ObjectIdGetDatum(oid));
-
- ScanKeyEntryInitialize(&skey[1],
- (bits16) 0x0,
- (AttrNumber) 2,
- (RegProcedure) F_OIDEQ,
- ObjectIdGetDatum(classoid));
-
- description = heap_openr(DescriptionRelationName, RowExclusiveLock);
-
- sd = systable_beginscan(description, DescriptionObjIndex, true,
- SnapshotNow, 2, skey);
-
- while ((oldtuple = systable_getnext(sd)) != NULL)
- {
- simple_heap_delete(description, &oldtuple->t_self);
- }
-
- /* Done */
-
- systable_endscan(sd);
- heap_close(description, NoLock);
-}
-
-/*
- * CommentRelation --
- *
- * This routine is used to add/drop a comment from a relation, where
- * a relation is a TABLE, SEQUENCE, VIEW or INDEX. The routine simply
- * finds the relation name by searching the system cache, locating
- * the appropriate tuple, and inserting a comment using that
- * tuple's oid. Its parameters are the relation name and comments.
- */
-static void
-CommentRelation(int objtype, List *relname, char *comment)
-{
- Relation relation;
- RangeVar *tgtrel;
-
- tgtrel = makeRangeVarFromNameList(relname);
-
- /*
- * Open the relation. We do this mainly to acquire a lock that
- * ensures no one else drops the relation before we commit. (If they
- * did, they'd fail to remove the entry we are about to make in
- * pg_description.)
- */
- relation = relation_openrv(tgtrel, AccessShareLock);
-
- /* Check object security */
- if (!pg_class_ownercheck(RelationGetRelid(relation), GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(relation));
-
- /* Next, verify that the relation type matches the intent */
-
- switch (objtype)
- {
- case INDEX:
- if (relation->rd_rel->relkind != RELKIND_INDEX)
- elog(ERROR, "relation \"%s\" is not an index",
- RelationGetRelationName(relation));
- break;
- case TABLE:
- if (relation->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "relation \"%s\" is not a table",
- RelationGetRelationName(relation));
- break;
- case VIEW:
- if (relation->rd_rel->relkind != RELKIND_VIEW)
- elog(ERROR, "relation \"%s\" is not a view",
- RelationGetRelationName(relation));
- break;
- case SEQUENCE:
- if (relation->rd_rel->relkind != RELKIND_SEQUENCE)
- elog(ERROR, "relation \"%s\" is not a sequence",
- RelationGetRelationName(relation));
- break;
- }
-
- /* Create the comment using the relation's oid */
-
- CreateComments(RelationGetRelid(relation), RelOid_pg_class, 0, comment);
-
- /* Done, but hold lock until commit */
- relation_close(relation, NoLock);
-}
-
-/*
- * CommentAttribute --
- *
- * This routine is used to add/drop a comment from an attribute
- * such as a table's column. The routine will check security
- * restrictions and then attempt to look up the specified
- * attribute. If successful, a comment is added/dropped, else an
- * elog() exception is thrown. The parameters are the relation
- * and attribute names, and the comments
- */
-static void
-CommentAttribute(List *qualname, char *comment)
-{
- int nnames;
- List *relname;
- char *attrname;
- RangeVar *rel;
- Relation relation;
- AttrNumber attnum;
-
- /* Separate relname and attr name */
- nnames = length(qualname);
- if (nnames < 2)
- elog(ERROR, "CommentAttribute: must specify relation.attribute");
- relname = ltruncate(nnames-1, listCopy(qualname));
- attrname = strVal(nth(nnames-1, qualname));
-
- /* Open the containing relation to ensure it won't go away meanwhile */
- rel = makeRangeVarFromNameList(relname);
- relation = heap_openrv(rel, AccessShareLock);
-
- /* Check object security */
-
- if (!pg_class_ownercheck(RelationGetRelid(relation), GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(relation));
-
- /* Now, fetch the attribute number from the system cache */
-
- attnum = get_attnum(RelationGetRelid(relation), attrname);
- if (attnum == InvalidAttrNumber)
- elog(ERROR, "\"%s\" is not an attribute of class \"%s\"",
- attrname, RelationGetRelationName(relation));
-
- /* Create the comment using the relation's oid */
-
- CreateComments(RelationGetRelid(relation), RelOid_pg_class,
- (int32) attnum, comment);
-
- /* Done, but hold lock until commit */
-
- heap_close(relation, NoLock);
-}
-
-/*
- * CommentDatabase --
- *
- * This routine is used to add/drop any user-comments a user might
- * have regarding the specified database. The routine will check
- * security for owner permissions, and, if succesful, will then
- * attempt to find the oid of the database specified. Once found,
- * a comment is added/dropped using the CreateComments() routine.
- */
-static void
-CommentDatabase(List *qualname, char *comment)
-{
- char *database;
- Relation pg_database;
- ScanKeyData entry;
- HeapScanDesc scan;
- HeapTuple dbtuple;
- Oid oid;
-
- if (length(qualname) != 1)
- elog(ERROR, "CommentDatabase: database name may not be qualified");
- database = strVal(lfirst(qualname));
-
- /* Only allow comments on the current database */
- if (strcmp(database, DatabaseName) != 0)
- elog(ERROR, "Database comments may only be applied to the current database");
-
- /* First find the tuple in pg_database for the database */
-
- pg_database = heap_openr(DatabaseRelationName, AccessShareLock);
- ScanKeyEntryInitialize(&entry, 0, Anum_pg_database_datname,
- F_NAMEEQ, CStringGetDatum(database));
- scan = heap_beginscan(pg_database, SnapshotNow, 1, &entry);
- dbtuple = heap_getnext(scan, ForwardScanDirection);
-
- /* Validate database exists, and fetch the db oid */
-
- if (!HeapTupleIsValid(dbtuple))
- elog(ERROR, "database \"%s\" does not exist", database);
- oid = dbtuple->t_data->t_oid;
-
- /* Allow if the user matches the database dba or is a superuser */
-
- if (!(superuser() || is_dbadmin(oid)))
- elog(ERROR, "you are not permitted to comment on database \"%s\"",
- database);
-
- /* Create the comments with the pg_database oid */
-
- CreateComments(oid, RelOid_pg_database, 0, comment);
-
- /* Complete the scan and close any opened relations */
-
- heap_endscan(scan);
- heap_close(pg_database, AccessShareLock);
-}
-
-/*
- * CommentNamespace --
- *
- * This routine is used to add/drop any user-comments a user might
- * have regarding the specified namespace. The routine will check
- * security for owner permissions, and, if succesful, will then
- * attempt to find the oid of the namespace specified. Once found,
- * a comment is added/dropped using the CreateComments() routine.
- */
-static void
-CommentNamespace(List *qualname, char *comment)
-{
- Oid oid;
- Oid classoid;
- HeapTuple tp;
- char *namespace;
-
- if (length(qualname) != 1)
- elog(ERROR, "CommentSchema: schema name may not be qualified");
- namespace = strVal(lfirst(qualname));
-
- tp = SearchSysCache(NAMESPACENAME,
- CStringGetDatum(namespace),
- 0, 0, 0);
- if (!HeapTupleIsValid(tp))
- elog(ERROR, "CommentSchema: Schema \"%s\" could not be found",
- namespace);
-
- oid = tp->t_data->t_oid;
-
- /* Check object security */
- if (!pg_namespace_ownercheck(oid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, namespace);
-
- /* pg_namespace doesn't have a hard-coded OID, so must look it up */
- classoid = get_relname_relid(NamespaceRelationName, PG_CATALOG_NAMESPACE);
- Assert(OidIsValid(classoid));
-
- /* Call CreateComments() to create/drop the comments */
- CreateComments(oid, classoid, 0, comment);
-
- /* Cleanup */
- ReleaseSysCache(tp);
-}
-
-/*
- * CommentRule --
- *
- * This routine is used to add/drop any user-comments a user might
- * have regarding a specified RULE. The rule for commenting is determined by
- * both its name and the relation to which it refers. The arguments to this
- * function are the rule name and relation name (merged into a qualified
- * name), and the comment to add/drop.
- *
- * Before PG 7.3, rules had unique names across the whole database, and so
- * the syntax was just COMMENT ON RULE rulename, with no relation name.
- * For purposes of backwards compatibility, we support that as long as there
- * is only one rule by the specified name in the database.
- */
-static void
-CommentRule(List *qualname, char *comment)
-{
- int nnames;
- List *relname;
- char *rulename;
- RangeVar *rel;
- Relation relation;
- HeapTuple tuple;
- Oid reloid;
- Oid ruleoid;
- Oid classoid;
- AclResult aclcheck;
-
- /* Separate relname and trig name */
- nnames = length(qualname);
- if (nnames == 1)
- {
- /* Old-style: only a rule name is given */
- Relation RewriteRelation;
- HeapScanDesc scanDesc;
- ScanKeyData scanKeyData;
-
- rulename = strVal(lfirst(qualname));
-
- /* Search pg_rewrite for such a rule */
- ScanKeyEntryInitialize(&scanKeyData,
- 0,
- Anum_pg_rewrite_rulename,
- F_NAMEEQ,
- PointerGetDatum(rulename));
-
- RewriteRelation = heap_openr(RewriteRelationName, AccessShareLock);
- scanDesc = heap_beginscan(RewriteRelation, SnapshotNow,
- 1, &scanKeyData);
-
- tuple = heap_getnext(scanDesc, ForwardScanDirection);
- if (HeapTupleIsValid(tuple))
- {
- reloid = ((Form_pg_rewrite) GETSTRUCT(tuple))->ev_class;
- ruleoid = tuple->t_data->t_oid;
- }
- else
- {
- elog(ERROR, "rule \"%s\" does not exist", rulename);
- reloid = ruleoid = 0; /* keep compiler quiet */
- }
-
- if (HeapTupleIsValid(tuple = heap_getnext(scanDesc,
- ForwardScanDirection)))
- elog(ERROR, "There are multiple rules \"%s\""
- "\n\tPlease specify a relation name as well as a rule name",
- rulename);
-
- heap_endscan(scanDesc);
- heap_close(RewriteRelation, AccessShareLock);
-
- /* Open the owning relation to ensure it won't go away meanwhile */
- relation = heap_open(reloid, AccessShareLock);
- }
- else
- {
- /* New-style: rule and relname both provided */
- Assert(nnames >= 2);
- relname = ltruncate(nnames-1, listCopy(qualname));
- rulename = strVal(nth(nnames-1, qualname));
-
- /* Open the owning relation to ensure it won't go away meanwhile */
- rel = makeRangeVarFromNameList(relname);
- relation = heap_openrv(rel, AccessShareLock);
- reloid = RelationGetRelid(relation);
-
- /* Find the rule's pg_rewrite tuple, get its OID */
- tuple = SearchSysCache(RULERELNAME,
- ObjectIdGetDatum(reloid),
- PointerGetDatum(rulename),
- 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "rule \"%s\" does not exist", rulename);
- Assert(reloid == ((Form_pg_rewrite) GETSTRUCT(tuple))->ev_class);
- ruleoid = tuple->t_data->t_oid;
- ReleaseSysCache(tuple);
- }
-
- /* Check object security */
-
- aclcheck = pg_class_aclcheck(reloid, GetUserId(), ACL_RULE);
- if (aclcheck != ACLCHECK_OK)
- aclcheck_error(aclcheck, rulename);
-
- /* pg_rewrite doesn't have a hard-coded OID, so must look it up */
- classoid = get_relname_relid(RewriteRelationName, PG_CATALOG_NAMESPACE);
- Assert(OidIsValid(classoid));
-
- /* Call CreateComments() to create/drop the comments */
-
- CreateComments(ruleoid, classoid, 0, comment);
-}
-
-/*
- * CommentType --
- *
- * This routine is used to add/drop any user-comments a user might
- * have regarding a TYPE. The type is specified by name
- * and, if found, and the user has appropriate permissions, a
- * comment will be added/dropped using the CreateComments() routine.
- * The type's name and the comments are the paramters to this routine.
- */
-static void
-CommentType(List *typename, char *comment)
-{
- TypeName *tname;
- Oid oid;
-
- /* XXX a bit of a crock; should accept TypeName in COMMENT syntax */
- tname = makeNode(TypeName);
- tname->names = typename;
- tname->typmod = -1;
-
- /* Find the type's oid */
-
- oid = typenameTypeId(tname);
-
- /* Check object security */
-
- if (!pg_type_ownercheck(oid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, TypeNameToString(tname));
-
- /* Call CreateComments() to create/drop the comments */
-
- CreateComments(oid, RelOid_pg_type, 0, comment);
-}
-
-/*
- * CommentAggregate --
- *
- * This routine is used to allow a user to provide comments on an
- * aggregate function. The aggregate function is determined by both
- * its name and its argument type, which, with the comments are
- * the three parameters handed to this routine.
- */
-static void
-CommentAggregate(List *aggregate, List *arguments, char *comment)
-{
- TypeName *aggtype = (TypeName *) lfirst(arguments);
- Oid baseoid,
- oid;
-
- /* First, attempt to determine the base aggregate oid */
- if (aggtype)
- baseoid = typenameTypeId(aggtype);
- else
- baseoid = InvalidOid;
-
- /* Now, attempt to find the actual tuple in pg_proc */
-
- oid = find_aggregate_func("CommentAggregate", aggregate, baseoid);
-
- /* Next, validate the user's attempt to comment */
-
- if (!pg_proc_ownercheck(oid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, NameListToString(aggregate));
-
- /* Call CreateComments() to create/drop the comments */
-
- CreateComments(oid, RelOid_pg_proc, 0, comment);
-}
-
-/*
- * CommentProc --
- *
- * This routine is used to allow a user to provide comments on an
- * procedure (function). The procedure is determined by both
- * its name and its argument list. The argument list is expected to
- * be a series of parsed nodes pointed to by a List object. If the
- * comments string is empty, the associated comment is dropped.
- */
-static void
-CommentProc(List *function, List *arguments, char *comment)
-{
- Oid oid;
-
- /* Look up the procedure */
-
- oid = LookupFuncNameTypeNames(function, arguments,
- true, "CommentProc");
-
- /* Now, validate the user's ability to comment on this function */
-
- if (!pg_proc_ownercheck(oid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, NameListToString(function));
-
- /* Call CreateComments() to create/drop the comments */
-
- CreateComments(oid, RelOid_pg_proc, 0, comment);
-}
-
-/*
- * CommentOperator --
- *
- * This routine is used to allow a user to provide comments on an
- * operator. The operator for commenting is determined by both
- * its name and its argument list which defines the left and right
- * hand types the operator will operate on. The argument list is
- * expected to be a couple of parse nodes pointed to be a List
- * object.
- */
-static void
-CommentOperator(List *opername, List *arguments, char *comment)
-{
- TypeName *typenode1 = (TypeName *) lfirst(arguments);
- TypeName *typenode2 = (TypeName *) lsecond(arguments);
- Oid oid;
- Oid classoid;
-
- /* Look up the operator */
- oid = LookupOperNameTypeNames(opername, typenode1, typenode2,
- "CommentOperator");
-
- /* Valid user's ability to comment on this operator */
- if (!pg_oper_ownercheck(oid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, NameListToString(opername));
-
- /* pg_operator doesn't have a hard-coded OID, so must look it up */
- classoid = get_relname_relid(OperatorRelationName, PG_CATALOG_NAMESPACE);
- Assert(OidIsValid(classoid));
-
- /* Call CreateComments() to create/drop the comments */
- CreateComments(oid, classoid, 0, comment);
-}
-
-/*
- * CommentTrigger --
- *
- * This routine is used to allow a user to provide comments on a
- * trigger event. The trigger for commenting is determined by both
- * its name and the relation to which it refers. The arguments to this
- * function are the trigger name and relation name (merged into a qualified
- * name), and the comment to add/drop.
- */
-static void
-CommentTrigger(List *qualname, char *comment)
-{
- int nnames;
- List *relname;
- char *trigname;
- RangeVar *rel;
- Relation pg_trigger,
- relation;
- HeapTuple triggertuple;
- SysScanDesc scan;
- ScanKeyData entry[2];
- Oid oid;
-
- /* Separate relname and trig name */
- nnames = length(qualname);
- if (nnames < 2)
- elog(ERROR, "CommentTrigger: must specify relation and trigger");
- relname = ltruncate(nnames-1, listCopy(qualname));
- trigname = strVal(nth(nnames-1, qualname));
-
- /* Open the owning relation to ensure it won't go away meanwhile */
- rel = makeRangeVarFromNameList(relname);
- relation = heap_openrv(rel, AccessShareLock);
-
- /* Check object security */
-
- if (!pg_class_ownercheck(RelationGetRelid(relation), GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(relation));
-
- /*
- * Fetch the trigger tuple from pg_trigger. There can be only one
- * because of the unique index.
- */
- pg_trigger = heap_openr(TriggerRelationName, AccessShareLock);
- ScanKeyEntryInitialize(&entry[0], 0x0,
- Anum_pg_trigger_tgrelid,
- F_OIDEQ,
- ObjectIdGetDatum(RelationGetRelid(relation)));
- ScanKeyEntryInitialize(&entry[1], 0x0,
- Anum_pg_trigger_tgname,
- F_NAMEEQ,
- CStringGetDatum(trigname));
- scan = systable_beginscan(pg_trigger, TriggerRelidNameIndex, true,
- SnapshotNow, 2, entry);
- triggertuple = systable_getnext(scan);
-
- /* If no trigger exists for the relation specified, notify user */
-
- if (!HeapTupleIsValid(triggertuple))
- elog(ERROR, "trigger \"%s\" for relation \"%s\" does not exist",
- trigname, RelationGetRelationName(relation));
-
- oid = triggertuple->t_data->t_oid;
-
- systable_endscan(scan);
-
- /* Create the comments with the pg_trigger oid */
-
- CreateComments(oid, RelationGetRelid(pg_trigger), 0, comment);
-
- /* Done, but hold lock on relation */
-
- heap_close(pg_trigger, AccessShareLock);
- heap_close(relation, NoLock);
-}
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
deleted file mode 100644
index 7410bff04b1..00000000000
--- a/src/backend/commands/copy.c
+++ /dev/null
@@ -1,1363 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * copy.c
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.158 2002/06/20 20:29:27 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include <unistd.h>
-#include <sys/stat.h>
-
-#include "access/genam.h"
-#include "access/heapam.h"
-#include "access/printtup.h"
-#include "catalog/catname.h"
-#include "catalog/index.h"
-#include "catalog/namespace.h"
-#include "catalog/pg_index.h"
-#include "catalog/pg_shadow.h"
-#include "catalog/pg_type.h"
-#include "commands/copy.h"
-#include "commands/trigger.h"
-#include "executor/executor.h"
-#include "libpq/libpq.h"
-#include "miscadmin.h"
-#include "tcop/pquery.h"
-#include "tcop/tcopprot.h"
-#include "utils/acl.h"
-#include "utils/builtins.h"
-#include "utils/relcache.h"
-#include "utils/syscache.h"
-
-#ifdef MULTIBYTE
-#include "mb/pg_wchar.h"
-#endif
-
-#define ISOCTAL(c) (((c) >= '0') && ((c) <= '7'))
-#define OCTVALUE(c) ((c) - '0')
-
-
-/* non-export function prototypes */
-static void CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim, char *null_print);
-static void CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim, char *null_print);
-static Oid GetInputFunction(Oid type);
-static Oid GetTypeElement(Oid type);
-static void CopyReadNewline(FILE *fp, int *newline);
-static char *CopyReadAttribute(FILE *fp, bool *isnull, char *delim, int *newline, char *null_print);
-static void CopyAttributeOut(FILE *fp, char *string, char *delim);
-
-static const char BinarySignature[12] = "PGBCOPY\n\377\r\n\0";
-
-/*
- * Static communication variables ... pretty grotty, but COPY has
- * never been reentrant...
- */
-int copy_lineno = 0; /* exported for use by elog() -- dz */
-static bool fe_eof;
-
-/*
- * These static variables are used to avoid incurring overhead for each
- * attribute processed. attribute_buf is reused on each CopyReadAttribute
- * call to hold the string being read in. Under normal use it will soon
- * grow to a suitable size, and then we will avoid palloc/pfree overhead
- * for subsequent attributes. Note that CopyReadAttribute returns a pointer
- * to attribute_buf's data buffer!
- * encoding, if needed, can be set once at the start of the copy operation.
- */
-static StringInfoData attribute_buf;
-
-#ifdef MULTIBYTE
-static int client_encoding;
-static int server_encoding;
-#endif
-
-
-/*
- * Internal communications functions
- */
-static void CopySendData(void *databuf, int datasize, FILE *fp);
-static void CopySendString(const char *str, FILE *fp);
-static void CopySendChar(char c, FILE *fp);
-static void CopyGetData(void *databuf, int datasize, FILE *fp);
-static int CopyGetChar(FILE *fp);
-static int CopyGetEof(FILE *fp);
-static int CopyPeekChar(FILE *fp);
-static void CopyDonePeek(FILE *fp, int c, bool pickup);
-
-/*
- * CopySendData sends output data either to the file
- * specified by fp or, if fp is NULL, using the standard
- * backend->frontend functions
- *
- * CopySendString does the same for null-terminated strings
- * CopySendChar does the same for single characters
- *
- * NB: no data conversion is applied by these functions
- */
-static void
-CopySendData(void *databuf, int datasize, FILE *fp)
-{
- if (!fp)
- {
- if (pq_putbytes((char *) databuf, datasize))
- fe_eof = true;
- }
- else
- {
- fwrite(databuf, datasize, 1, fp);
- if (ferror(fp))
- elog(ERROR, "CopySendData: %m");
- }
-}
-
-static void
-CopySendString(const char *str, FILE *fp)
-{
- CopySendData((void *) str, strlen(str), fp);
-}
-
-static void
-CopySendChar(char c, FILE *fp)
-{
- CopySendData(&c, 1, fp);
-}
-
-/*
- * CopyGetData reads output data either from the file
- * specified by fp or, if fp is NULL, using the standard
- * backend->frontend functions
- *
- * CopyGetChar does the same for single characters
- * CopyGetEof checks if it's EOF on the input (or, check for EOF result
- * from CopyGetChar)
- *
- * NB: no data conversion is applied by these functions
- */
-static void
-CopyGetData(void *databuf, int datasize, FILE *fp)
-{
- if (!fp)
- {
- if (pq_getbytes((char *) databuf, datasize))
- fe_eof = true;
- }
- else
- fread(databuf, datasize, 1, fp);
-}
-
-static int
-CopyGetChar(FILE *fp)
-{
- if (!fp)
- {
- int ch = pq_getbyte();
-
- if (ch == EOF)
- fe_eof = true;
- return ch;
- }
- else
- return getc(fp);
-}
-
-static int
-CopyGetEof(FILE *fp)
-{
- if (!fp)
- return fe_eof;
- else
- return feof(fp);
-}
-
-/*
- * CopyPeekChar reads a byte in "peekable" mode.
- *
- * after each call to CopyPeekChar, a call to CopyDonePeek _must_
- * follow, unless EOF was returned.
- *
- * CopyDonePeek will either take the peeked char off the stream
- * (if pickup is true) or leave it on the stream (if pickup is false).
- */
-static int
-CopyPeekChar(FILE *fp)
-{
- if (!fp)
- {
- int ch = pq_peekbyte();
-
- if (ch == EOF)
- fe_eof = true;
- return ch;
- }
- else
- return getc(fp);
-}
-
-static void
-CopyDonePeek(FILE *fp, int c, bool pickup)
-{
- if (!fp)
- {
- if (pickup)
- {
- /* We want to pick it up */
- (void) pq_getbyte();
- }
- /* If we didn't want to pick it up, just leave it where it sits */
- }
- else
- {
- if (!pickup)
- {
- /* We don't want to pick it up - so put it back in there */
- ungetc(c, fp);
- }
- /* If we wanted to pick it up, it's already done */
- }
-}
-
-
-
-/*
- * DoCopy executes the SQL COPY statement.
- *
- * Either unload or reload contents of table <relation>, depending on <from>.
- * (<from> = TRUE means we are inserting into the table.)
- *
- * If <pipe> is false, transfer is between the table and the file named
- * <filename>. Otherwise, transfer is between the table and our regular
- * input/output stream. The latter could be either stdin/stdout or a
- * socket, depending on whether we're running under Postmaster control.
- *
- * Iff <binary>, unload or reload in the binary format, as opposed to the
- * more wasteful but more robust and portable text format.
- *
- * Iff <oids>, unload or reload the format that includes OID information.
- * On input, we accept OIDs whether or not the table has an OID column,
- * but silently drop them if it does not. On output, we report an error
- * if the user asks for OIDs in a table that has none (not providing an
- * OID column might seem friendlier, but could seriously confuse programs).
- *
- * If in the text format, delimit columns with delimiter <delim> and print
- * NULL values as <null_print>.
- *
- * When loading in the text format from an input stream (as opposed to
- * a file), recognize a "." on a line by itself as EOF. Also recognize
- * a stream EOF. When unloading in the text format to an output stream,
- * write a "." on a line by itself at the end of the data.
- *
- * Do not allow a Postgres user without superuser privilege to read from
- * or write to a file.
- *
- * Do not allow the copy if user doesn't have proper permission to access
- * the table.
- */
-void
-DoCopy(const CopyStmt *stmt)
-{
- RangeVar *relation = stmt->relation;
- char *filename = stmt->filename;
- bool is_from = stmt->is_from;
- bool pipe = (stmt->filename == NULL);
- List *option;
- DefElem *dbinary = NULL;
- DefElem *doids = NULL;
- DefElem *ddelim = NULL;
- DefElem *dnull = NULL;
- bool binary = false;
- bool oids = false;
- char *delim = "\t";
- char *null_print = "\\N";
- FILE *fp;
- Relation rel;
- AclMode required_access = (is_from ? ACL_INSERT : ACL_SELECT);
- AclResult aclresult;
-
- /* Extract options from the statement node tree */
- foreach(option, stmt->options)
- {
- DefElem *defel = (DefElem *) lfirst(option);
-
- if (strcmp(defel->defname, "binary") == 0)
- {
- if (dbinary)
- elog(ERROR, "COPY: conflicting options");
- dbinary = defel;
- }
- else if (strcmp(defel->defname, "oids") == 0)
- {
- if (doids)
- elog(ERROR, "COPY: conflicting options");
- doids = defel;
- }
- else if (strcmp(defel->defname, "delimiter") == 0)
- {
- if (ddelim)
- elog(ERROR, "COPY: conflicting options");
- ddelim = defel;
- }
- else if (strcmp(defel->defname, "null") == 0)
- {
- if (dnull)
- elog(ERROR, "COPY: conflicting options");
- dnull = defel;
- }
- else
- elog(ERROR, "COPY: option \"%s\" not recognized",
- defel->defname);
- }
-
- if (dbinary)
- binary = intVal(dbinary->arg);
- if (doids)
- oids = intVal(doids->arg);
- if (ddelim)
- delim = strVal(ddelim->arg);
- if (dnull)
- null_print = strVal(dnull->arg);
-
- if (binary && ddelim)
- elog(ERROR, "You can not specify the DELIMITER in BINARY mode.");
-
- if (binary && dnull)
- elog(ERROR, "You can not specify NULL in BINARY mode.");
-
- /*
- * Open and lock the relation, using the appropriate lock type.
- */
- rel = heap_openrv(relation, (is_from ? RowExclusiveLock : AccessShareLock));
-
- /* Check permissions. */
- aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
- required_access);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, RelationGetRelationName(rel));
- if (!pipe && !superuser())
- elog(ERROR, "You must have Postgres superuser privilege to do a COPY "
- "directly to or from a file. Anyone can COPY to stdout or "
- "from stdin. Psql's \\copy command also works for anyone.");
-
- /*
- * This restriction is unfortunate, but necessary until the frontend
- * COPY protocol is redesigned to be binary-safe...
- */
- if (pipe && binary)
- elog(ERROR, "COPY BINARY is not supported to stdout or from stdin");
-
- /*
- * Presently, only single-character delimiter strings are supported.
- */
- if (strlen(delim) != 1)
- elog(ERROR, "COPY delimiter must be a single character");
-
- /*
- * Set up variables to avoid per-attribute overhead.
- */
- initStringInfo(&attribute_buf);
-#ifdef MULTIBYTE
- client_encoding = pg_get_client_encoding();
- server_encoding = GetDatabaseEncoding();
-#endif
-
- if (is_from)
- { /* copy from file to database */
- if (rel->rd_rel->relkind != RELKIND_RELATION)
- {
- if (rel->rd_rel->relkind == RELKIND_VIEW)
- elog(ERROR, "You cannot copy view %s",
- RelationGetRelationName(rel));
- else if (rel->rd_rel->relkind == RELKIND_SEQUENCE)
- elog(ERROR, "You cannot change sequence relation %s",
- RelationGetRelationName(rel));
- else
- elog(ERROR, "You cannot copy object %s",
- RelationGetRelationName(rel));
- }
- if (pipe)
- {
- if (IsUnderPostmaster)
- {
- ReceiveCopyBegin();
- fp = NULL;
- }
- else
- fp = stdin;
- }
- else
- {
- struct stat st;
-
- fp = AllocateFile(filename, PG_BINARY_R);
-
- if (fp == NULL)
- elog(ERROR, "COPY command, running in backend with "
- "effective uid %d, could not open file '%s' for "
- "reading. Errno = %s (%d).",
- (int) geteuid(), filename, strerror(errno), errno);
-
- fstat(fileno(fp), &st);
- if (S_ISDIR(st.st_mode))
- {
- FreeFile(fp);
- elog(ERROR, "COPY: %s is a directory.", filename);
- }
- }
- CopyFrom(rel, binary, oids, fp, delim, null_print);
- }
- else
- { /* copy from database to file */
- if (rel->rd_rel->relkind != RELKIND_RELATION)
- {
- if (rel->rd_rel->relkind == RELKIND_VIEW)
- elog(ERROR, "You cannot copy view %s",
- RelationGetRelationName(rel));
- else if (rel->rd_rel->relkind == RELKIND_SEQUENCE)
- elog(ERROR, "You cannot copy sequence %s",
- RelationGetRelationName(rel));
- else
- elog(ERROR, "You cannot copy object %s",
- RelationGetRelationName(rel));
- }
- if (pipe)
- {
- if (IsUnderPostmaster)
- {
- SendCopyBegin();
- pq_startcopyout();
- fp = NULL;
- }
- else
- fp = stdout;
- }
- else
- {
- mode_t oumask; /* Pre-existing umask value */
- struct stat st;
-
- /*
- * Prevent write to relative path ... too easy to shoot
- * oneself in the foot by overwriting a database file ...
- */
- if (filename[0] != '/')
- elog(ERROR, "Relative path not allowed for server side"
- " COPY command.");
-
- oumask = umask((mode_t) 022);
- fp = AllocateFile(filename, PG_BINARY_W);
- umask(oumask);
-
- if (fp == NULL)
- elog(ERROR, "COPY command, running in backend with "
- "effective uid %d, could not open file '%s' for "
- "writing. Errno = %s (%d).",
- (int) geteuid(), filename, strerror(errno), errno);
- fstat(fileno(fp), &st);
- if (S_ISDIR(st.st_mode))
- {
- FreeFile(fp);
- elog(ERROR, "COPY: %s is a directory.", filename);
- }
- }
- CopyTo(rel, binary, oids, fp, delim, null_print);
- }
-
- if (!pipe)
- FreeFile(fp);
- else if (!is_from)
- {
- if (!binary)
- CopySendData("\\.\n", 3, fp);
- if (IsUnderPostmaster)
- pq_endcopyout(false);
- }
- pfree(attribute_buf.data);
-
- /*
- * Close the relation. If reading, we can release the AccessShareLock
- * we got; if writing, we should hold the lock until end of
- * transaction to ensure that updates will be committed before lock is
- * released.
- */
- heap_close(rel, (is_from ? NoLock : AccessShareLock));
-}
-
-
-/*
- * Copy from relation TO file.
- */
-static void
-CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
- char *delim, char *null_print)
-{
- HeapTuple tuple;
- TupleDesc tupDesc;
- HeapScanDesc scandesc;
- int attr_count,
- i;
- Form_pg_attribute *attr;
- FmgrInfo *out_functions;
- Oid *elements;
- bool *isvarlena;
- int16 fld_size;
- char *string;
- Snapshot mySnapshot;
-
- if (oids && !rel->rd_rel->relhasoids)
- elog(ERROR, "COPY: table %s does not have OIDs",
- RelationGetRelationName(rel));
-
- tupDesc = rel->rd_att;
- attr_count = rel->rd_att->natts;
- attr = rel->rd_att->attrs;
-
- /*
- * For binary copy we really only need isvarlena, but compute it
- * all...
- */
- out_functions = (FmgrInfo *) palloc(attr_count * sizeof(FmgrInfo));
- elements = (Oid *) palloc(attr_count * sizeof(Oid));
- isvarlena = (bool *) palloc(attr_count * sizeof(bool));
- for (i = 0; i < attr_count; i++)
- {
- Oid out_func_oid;
-
- if (!getTypeOutputInfo(attr[i]->atttypid,
- &out_func_oid, &elements[i], &isvarlena[i]))
- elog(ERROR, "COPY: couldn't lookup info for type %u",
- attr[i]->atttypid);
- fmgr_info(out_func_oid, &out_functions[i]);
- }
-
- if (binary)
- {
- /* Generate header for a binary copy */
- int32 tmp;
-
- /* Signature */
- CopySendData((char *) BinarySignature, 12, fp);
- /* Integer layout field */
- tmp = 0x01020304;
- CopySendData(&tmp, sizeof(int32), fp);
- /* Flags field */
- tmp = 0;
- if (oids)
- tmp |= (1 << 16);
- CopySendData(&tmp, sizeof(int32), fp);
- /* No header extension */
- tmp = 0;
- CopySendData(&tmp, sizeof(int32), fp);
- }
-
- mySnapshot = CopyQuerySnapshot();
-
- scandesc = heap_beginscan(rel, mySnapshot, 0, NULL);
-
- while ((tuple = heap_getnext(scandesc, ForwardScanDirection)) != NULL)
- {
- bool need_delim = false;
-
- CHECK_FOR_INTERRUPTS();
-
- if (binary)
- {
- /* Binary per-tuple header */
- int16 fld_count = attr_count;
-
- CopySendData(&fld_count, sizeof(int16), fp);
- /* Send OID if wanted --- note fld_count doesn't include it */
- if (oids)
- {
- fld_size = sizeof(Oid);
- CopySendData(&fld_size, sizeof(int16), fp);
- CopySendData(&tuple->t_data->t_oid, sizeof(Oid), fp);
- }
- }
- else
- {
- /* Text format has no per-tuple header, but send OID if wanted */
- if (oids)
- {
- string = DatumGetCString(DirectFunctionCall1(oidout,
- ObjectIdGetDatum(tuple->t_data->t_oid)));
- CopySendString(string, fp);
- pfree(string);
- need_delim = true;
- }
- }
-
- for (i = 0; i < attr_count; i++)
- {
- Datum origvalue,
- value;
- bool isnull;
-
- origvalue = heap_getattr(tuple, i + 1, tupDesc, &isnull);
-
- if (!binary)
- {
- if (need_delim)
- CopySendChar(delim[0], fp);
- need_delim = true;
- }
-
- if (isnull)
- {
- if (!binary)
- {
- CopySendString(null_print, fp); /* null indicator */
- }
- else
- {
- fld_size = 0; /* null marker */
- CopySendData(&fld_size, sizeof(int16), fp);
- }
- }
- else
- {
- /*
- * If we have a toasted datum, forcibly detoast it to
- * avoid memory leakage inside the type's output routine
- * (or for binary case, becase we must output untoasted
- * value).
- */
- if (isvarlena[i])
- value = PointerGetDatum(PG_DETOAST_DATUM(origvalue));
- else
- value = origvalue;
-
- if (!binary)
- {
- string = DatumGetCString(FunctionCall3(&out_functions[i],
- value,
- ObjectIdGetDatum(elements[i]),
- Int32GetDatum(attr[i]->atttypmod)));
- CopyAttributeOut(fp, string, delim);
- pfree(string);
- }
- else
- {
- fld_size = attr[i]->attlen;
- CopySendData(&fld_size, sizeof(int16), fp);
- if (isvarlena[i])
- {
- /* varlena */
- Assert(fld_size == -1);
- CopySendData(DatumGetPointer(value),
- VARSIZE(value),
- fp);
- }
- else if (!attr[i]->attbyval)
- {
- /* fixed-length pass-by-reference */
- Assert(fld_size > 0);
- CopySendData(DatumGetPointer(value),
- fld_size,
- fp);
- }
- else
- {
- /* pass-by-value */
- Datum datumBuf;
-
- /*
- * We need this horsing around because we don't
- * know how shorter data values are aligned within
- * a Datum.
- */
- store_att_byval(&datumBuf, value, fld_size);
- CopySendData(&datumBuf,
- fld_size,
- fp);
- }
- }
-
- /* Clean up detoasted copy, if any */
- if (value != origvalue)
- pfree(DatumGetPointer(value));
- }
- }
-
- if (!binary)
- CopySendChar('\n', fp);
- }
-
- heap_endscan(scandesc);
-
- if (binary)
- {
- /* Generate trailer for a binary copy */
- int16 fld_count = -1;
-
- CopySendData(&fld_count, sizeof(int16), fp);
- }
-
- pfree(out_functions);
- pfree(elements);
- pfree(isvarlena);
-}
-
-
-/*
- * Copy FROM file to relation.
- */
-static void
-CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
- char *delim, char *null_print)
-{
- HeapTuple tuple;
- TupleDesc tupDesc;
- Form_pg_attribute *attr;
- AttrNumber attr_count;
- FmgrInfo *in_functions;
- Oid *elements;
- int i;
- Oid in_func_oid;
- Datum *values;
- char *nulls;
- bool isnull;
- int done = 0;
- char *string;
- ResultRelInfo *resultRelInfo;
- EState *estate = CreateExecutorState(); /* for ExecConstraints() */
- TupleTable tupleTable;
- TupleTableSlot *slot;
- Oid loaded_oid = InvalidOid;
- bool skip_tuple = false;
- bool file_has_oids;
-
- tupDesc = RelationGetDescr(rel);
- attr = tupDesc->attrs;
- attr_count = tupDesc->natts;
-
- /*
- * We need a ResultRelInfo so we can use the regular executor's
- * index-entry-making machinery. (There used to be a huge amount of
- * code here that basically duplicated execUtils.c ...)
- */
- resultRelInfo = makeNode(ResultRelInfo);
- resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
- resultRelInfo->ri_RelationDesc = rel;
- resultRelInfo->ri_TrigDesc = rel->trigdesc;
-
- ExecOpenIndices(resultRelInfo);
-
- estate->es_result_relations = resultRelInfo;
- estate->es_num_result_relations = 1;
- estate->es_result_relation_info = resultRelInfo;
-
- /* Set up a dummy tuple table too */
- tupleTable = ExecCreateTupleTable(1);
- slot = ExecAllocTableSlot(tupleTable);
- ExecSetSlotDescriptor(slot, tupDesc, false);
-
- if (!binary)
- {
- in_functions = (FmgrInfo *) palloc(attr_count * sizeof(FmgrInfo));
- elements = (Oid *) palloc(attr_count * sizeof(Oid));
- for (i = 0; i < attr_count; i++)
- {
- in_func_oid = (Oid) GetInputFunction(attr[i]->atttypid);
- fmgr_info(in_func_oid, &in_functions[i]);
- elements[i] = GetTypeElement(attr[i]->atttypid);
- }
- file_has_oids = oids; /* must rely on user to tell us this... */
- }
- else
- {
- /* Read and verify binary header */
- char readSig[12];
- int32 tmp;
-
- /* Signature */
- CopyGetData(readSig, 12, fp);
- if (CopyGetEof(fp) ||
- memcmp(readSig, BinarySignature, 12) != 0)
- elog(ERROR, "COPY BINARY: file signature not recognized");
- /* Integer layout field */
- CopyGetData(&tmp, sizeof(int32), fp);
- if (CopyGetEof(fp) ||
- tmp != 0x01020304)
- elog(ERROR, "COPY BINARY: incompatible integer layout");
- /* Flags field */
- CopyGetData(&tmp, sizeof(int32), fp);
- if (CopyGetEof(fp))
- elog(ERROR, "COPY BINARY: bogus file header (missing flags)");
- file_has_oids = (tmp & (1 << 16)) != 0;
- tmp &= ~(1 << 16);
- if ((tmp >> 16) != 0)
- elog(ERROR, "COPY BINARY: unrecognized critical flags in header");
- /* Header extension length */
- CopyGetData(&tmp, sizeof(int32), fp);
- if (CopyGetEof(fp) ||
- tmp < 0)
- elog(ERROR, "COPY BINARY: bogus file header (missing length)");
- /* Skip extension header, if present */
- while (tmp-- > 0)
- {
- CopyGetData(readSig, 1, fp);
- if (CopyGetEof(fp))
- elog(ERROR, "COPY BINARY: bogus file header (wrong length)");
- }
-
- in_functions = NULL;
- elements = NULL;
- }
-
- /* Silently drop incoming OIDs if table does not have OIDs */
- if (!rel->rd_rel->relhasoids)
- oids = false;
-
- values = (Datum *) palloc(attr_count * sizeof(Datum));
- nulls = (char *) palloc(attr_count * sizeof(char));
-
- copy_lineno = 0;
- fe_eof = false;
-
- while (!done)
- {
- CHECK_FOR_INTERRUPTS();
-
- copy_lineno++;
-
- /* Reset the per-output-tuple exprcontext */
- ResetPerTupleExprContext(estate);
-
- /* Initialize all values for row to NULL */
- MemSet(values, 0, attr_count * sizeof(Datum));
- MemSet(nulls, 'n', attr_count * sizeof(char));
-
- if (!binary)
- {
- int newline = 0;
-
- if (file_has_oids)
- {
- string = CopyReadAttribute(fp, &isnull, delim,
- &newline, null_print);
- if (isnull)
- elog(ERROR, "COPY TEXT: NULL Oid");
- else if (string == NULL)
- done = 1; /* end of file */
- else
- {
- loaded_oid = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(string)));
- if (loaded_oid == InvalidOid)
- elog(ERROR, "COPY TEXT: Invalid Oid");
- }
- }
-
- for (i = 0; i < attr_count && !done; i++)
- {
- string = CopyReadAttribute(fp, &isnull, delim,
- &newline, null_print);
- if (isnull)
- {
- /* already set values[i] and nulls[i] */
- }
- else if (string == NULL)
- done = 1; /* end of file */
- else
- {
- values[i] = FunctionCall3(&in_functions[i],
- CStringGetDatum(string),
- ObjectIdGetDatum(elements[i]),
- Int32GetDatum(attr[i]->atttypmod));
- nulls[i] = ' ';
- }
- }
- if (!done)
- CopyReadNewline(fp, &newline);
- }
- else
- { /* binary */
- int16 fld_count,
- fld_size;
-
- CopyGetData(&fld_count, sizeof(int16), fp);
- if (CopyGetEof(fp) ||
- fld_count == -1)
- done = 1;
- else
- {
- if (fld_count <= 0 || fld_count > attr_count)
- elog(ERROR, "COPY BINARY: tuple field count is %d, expected %d",
- (int) fld_count, attr_count);
-
- if (file_has_oids)
- {
- CopyGetData(&fld_size, sizeof(int16), fp);
- if (CopyGetEof(fp))
- elog(ERROR, "COPY BINARY: unexpected EOF");
- if (fld_size != (int16) sizeof(Oid))
- elog(ERROR, "COPY BINARY: sizeof(Oid) is %d, expected %d",
- (int) fld_size, (int) sizeof(Oid));
- CopyGetData(&loaded_oid, sizeof(Oid), fp);
- if (CopyGetEof(fp))
- elog(ERROR, "COPY BINARY: unexpected EOF");
- if (loaded_oid == InvalidOid)
- elog(ERROR, "COPY BINARY: Invalid Oid");
- }
-
- for (i = 0; i < (int) fld_count; i++)
- {
- CopyGetData(&fld_size, sizeof(int16), fp);
- if (CopyGetEof(fp))
- elog(ERROR, "COPY BINARY: unexpected EOF");
- if (fld_size == 0)
- continue; /* it's NULL; nulls[i] already set */
- if (fld_size != attr[i]->attlen)
- elog(ERROR, "COPY BINARY: sizeof(field %d) is %d, expected %d",
- i + 1, (int) fld_size, (int) attr[i]->attlen);
- if (fld_size == -1)
- {
- /* varlena field */
- int32 varlena_size;
- Pointer varlena_ptr;
-
- CopyGetData(&varlena_size, sizeof(int32), fp);
- if (CopyGetEof(fp))
- elog(ERROR, "COPY BINARY: unexpected EOF");
- if (varlena_size < (int32) sizeof(int32))
- elog(ERROR, "COPY BINARY: bogus varlena length");
- varlena_ptr = (Pointer) palloc(varlena_size);
- VARATT_SIZEP(varlena_ptr) = varlena_size;
- CopyGetData(VARDATA(varlena_ptr),
- varlena_size - sizeof(int32),
- fp);
- if (CopyGetEof(fp))
- elog(ERROR, "COPY BINARY: unexpected EOF");
- values[i] = PointerGetDatum(varlena_ptr);
- }
- else if (!attr[i]->attbyval)
- {
- /* fixed-length pass-by-reference */
- Pointer refval_ptr;
-
- Assert(fld_size > 0);
- refval_ptr = (Pointer) palloc(fld_size);
- CopyGetData(refval_ptr, fld_size, fp);
- if (CopyGetEof(fp))
- elog(ERROR, "COPY BINARY: unexpected EOF");
- values[i] = PointerGetDatum(refval_ptr);
- }
- else
- {
- /* pass-by-value */
- Datum datumBuf;
-
- /*
- * We need this horsing around because we don't
- * know how shorter data values are aligned within
- * a Datum.
- */
- Assert(fld_size > 0 && fld_size <= sizeof(Datum));
- CopyGetData(&datumBuf, fld_size, fp);
- if (CopyGetEof(fp))
- elog(ERROR, "COPY BINARY: unexpected EOF");
- values[i] = fetch_att(&datumBuf, true, fld_size);
- }
-
- nulls[i] = ' ';
- }
- }
- }
-
- if (done)
- break;
-
- tuple = heap_formtuple(tupDesc, values, nulls);
-
- if (oids && file_has_oids)
- tuple->t_data->t_oid = loaded_oid;
-
- skip_tuple = false;
-
- /* BEFORE ROW INSERT Triggers */
- if (resultRelInfo->ri_TrigDesc &&
- resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
- {
- HeapTuple newtuple;
-
- newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
-
- if (newtuple == NULL) /* "do nothing" */
- skip_tuple = true;
- else if (newtuple != tuple) /* modified by Trigger(s) */
- {
- heap_freetuple(tuple);
- tuple = newtuple;
- }
- }
-
- if (!skip_tuple)
- {
- ExecStoreTuple(tuple, slot, InvalidBuffer, false);
-
- /*
- * Check the constraints of the tuple
- */
- if (rel->rd_att->constr)
- ExecConstraints("CopyFrom", resultRelInfo, slot, estate);
-
- /*
- * OK, store the tuple and create index entries for it
- */
- simple_heap_insert(rel, tuple);
-
- if (resultRelInfo->ri_NumIndices > 0)
- ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
-
- /* AFTER ROW INSERT Triggers */
- if (resultRelInfo->ri_TrigDesc)
- ExecARInsertTriggers(estate, resultRelInfo, tuple);
- }
-
- for (i = 0; i < attr_count; i++)
- {
- if (!attr[i]->attbyval && nulls[i] != 'n')
- pfree(DatumGetPointer(values[i]));
- }
-
- heap_freetuple(tuple);
- }
-
- /*
- * Done, clean up
- */
- copy_lineno = 0;
-
- pfree(values);
- pfree(nulls);
-
- if (!binary)
- {
- pfree(in_functions);
- pfree(elements);
- }
-
- ExecDropTupleTable(tupleTable, true);
-
- ExecCloseIndices(resultRelInfo);
-}
-
-
-static Oid
-GetInputFunction(Oid type)
-{
- HeapTuple typeTuple;
- Oid result;
-
- typeTuple = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(type),
- 0, 0, 0);
- if (!HeapTupleIsValid(typeTuple))
- elog(ERROR, "GetInputFunction: Cache lookup of type %u failed", type);
- result = ((Form_pg_type) GETSTRUCT(typeTuple))->typinput;
- ReleaseSysCache(typeTuple);
- return result;
-}
-
-static Oid
-GetTypeElement(Oid type)
-{
- HeapTuple typeTuple;
- Oid result;
-
- typeTuple = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(type),
- 0, 0, 0);
- if (!HeapTupleIsValid(typeTuple))
- elog(ERROR, "GetTypeElement: Cache lookup of type %u failed", type);
- result = ((Form_pg_type) GETSTRUCT(typeTuple))->typelem;
- ReleaseSysCache(typeTuple);
- return result;
-}
-
-
-/*
- * Reads input from fp until an end of line is seen.
- */
-
-static void
-CopyReadNewline(FILE *fp, int *newline)
-{
- if (!*newline)
- {
- elog(WARNING, "CopyReadNewline: extra fields ignored");
- while (!CopyGetEof(fp) && (CopyGetChar(fp) != '\n'));
- }
- *newline = 0;
-}
-
-/*
- * Read the value of a single attribute.
- *
- * Result is either a string, or NULL (if EOF or a null attribute).
- * Note that the caller should not pfree the string!
- *
- * *isnull is set true if a null attribute, else false.
- * delim is the column delimiter string (currently always 1 character).
- * *newline remembers whether we've seen a newline ending this tuple.
- * null_print says how NULL values are represented
- */
-
-static char *
-CopyReadAttribute(FILE *fp, bool *isnull, char *delim, int *newline, char *null_print)
-{
- int c;
- int delimc = (unsigned char)delim[0];
-
-#ifdef MULTIBYTE
- int mblen;
- unsigned char s[2];
- char *cvt;
- int j;
-
- s[1] = 0;
-#endif
-
- /* reset attribute_buf to empty */
- attribute_buf.len = 0;
- attribute_buf.data[0] = '\0';
-
- /* if last delimiter was a newline return a NULL attribute */
- if (*newline)
- {
- *isnull = (bool) true;
- return NULL;
- }
-
- *isnull = (bool) false; /* set default */
-
- for (;;)
- {
- c = CopyGetChar(fp);
- if (c == EOF)
- goto endOfFile;
- if (c == '\n')
- {
- *newline = 1;
- break;
- }
- if (c == delimc)
- break;
- if (c == '\\')
- {
- c = CopyGetChar(fp);
- if (c == EOF)
- goto endOfFile;
- switch (c)
- {
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- {
- int val;
-
- val = OCTVALUE(c);
- c = CopyPeekChar(fp);
- if (ISOCTAL(c))
- {
- val = (val << 3) + OCTVALUE(c);
- CopyDonePeek(fp, c, true /* pick up */ );
- c = CopyPeekChar(fp);
- if (ISOCTAL(c))
- {
- val = (val << 3) + OCTVALUE(c);
- CopyDonePeek(fp, c, true /* pick up */ );
- }
- else
- {
- if (c == EOF)
- goto endOfFile;
- CopyDonePeek(fp, c, false /* put back */ );
- }
- }
- else
- {
- if (c == EOF)
- goto endOfFile;
- CopyDonePeek(fp, c, false /* put back */ );
- }
- c = val & 0377;
- }
- break;
-
- /*
- * This is a special hack to parse `\N' as
- * <backslash-N> rather then just 'N' to provide
- * compatibility with the default NULL output. -- pe
- */
- case 'N':
- appendStringInfoCharMacro(&attribute_buf, '\\');
- c = 'N';
- break;
- case 'b':
- c = '\b';
- break;
- case 'f':
- c = '\f';
- break;
- case 'n':
- c = '\n';
- break;
- case 'r':
- c = '\r';
- break;
- case 't':
- c = '\t';
- break;
- case 'v':
- c = '\v';
- break;
- case '.':
- c = CopyGetChar(fp);
- if (c != '\n')
- elog(ERROR, "CopyReadAttribute: end of record marker corrupted");
- goto endOfFile;
- }
- }
- appendStringInfoCharMacro(&attribute_buf, c);
-#ifdef MULTIBYTE
- /* XXX shouldn't this be done even when encoding is the same? */
- if (client_encoding != server_encoding)
- {
- /* get additional bytes of the char, if any */
- s[0] = c;
- mblen = pg_encoding_mblen(client_encoding, s);
- for (j = 1; j < mblen; j++)
- {
- c = CopyGetChar(fp);
- if (c == EOF)
- goto endOfFile;
- appendStringInfoCharMacro(&attribute_buf, c);
- }
- }
-#endif
- }
-
-#ifdef MULTIBYTE
- if (client_encoding != server_encoding)
- {
- cvt = (char *) pg_client_to_server((unsigned char *) attribute_buf.data,
- attribute_buf.len);
- if (cvt != attribute_buf.data)
- {
- /* transfer converted data back to attribute_buf */
- attribute_buf.len = 0;
- attribute_buf.data[0] = '\0';
- appendBinaryStringInfo(&attribute_buf, cvt, strlen(cvt));
- pfree(cvt);
- }
- }
-#endif
-
- if (strcmp(attribute_buf.data, null_print) == 0)
- *isnull = true;
-
- return attribute_buf.data;
-
-endOfFile:
- return NULL;
-}
-
-static void
-CopyAttributeOut(FILE *fp, char *server_string, char *delim)
-{
- char *string;
- char c;
- char delimc = delim[0];
-
-#ifdef MULTIBYTE
- bool same_encoding;
- char *string_start;
- int mblen;
- int i;
-#endif
-
-#ifdef MULTIBYTE
- same_encoding = (server_encoding == client_encoding);
- if (!same_encoding)
- {
- string = (char *) pg_server_to_client((unsigned char *) server_string,
- strlen(server_string));
- string_start = string;
- }
- else
- {
- string = server_string;
- string_start = NULL;
- }
-#else
- string = server_string;
-#endif
-
-#ifdef MULTIBYTE
- for (; (c = *string) != '\0'; string += mblen)
-#else
- for (; (c = *string) != '\0'; string++)
-#endif
- {
-#ifdef MULTIBYTE
- mblen = 1;
-#endif
- switch (c)
- {
- case '\b':
- CopySendString("\\b", fp);
- break;
- case '\f':
- CopySendString("\\f", fp);
- break;
- case '\n':
- CopySendString("\\n", fp);
- break;
- case '\r':
- CopySendString("\\r", fp);
- break;
- case '\t':
- CopySendString("\\t", fp);
- break;
- case '\v':
- CopySendString("\\v", fp);
- break;
- case '\\':
- CopySendString("\\\\", fp);
- break;
- default:
- if (c == delimc)
- CopySendChar('\\', fp);
- CopySendChar(c, fp);
-#ifdef MULTIBYTE
- /* XXX shouldn't this be done even when encoding is same? */
- if (!same_encoding)
- {
- /* send additional bytes of the char, if any */
- mblen = pg_encoding_mblen(client_encoding, string);
- for (i = 1; i < mblen; i++)
- CopySendChar(string[i], fp);
- }
-#endif
- break;
- }
- }
-
-#ifdef MULTIBYTE
- if (string_start)
- pfree(string_start); /* pfree pg_server_to_client result */
-#endif
-}
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
deleted file mode 100644
index 3526b91b997..00000000000
--- a/src/backend/commands/dbcommands.c
+++ /dev/null
@@ -1,761 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * dbcommands.c
- * Database management commands (create/drop database).
- *
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.95 2002/06/20 20:29:27 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-
-#include "access/heapam.h"
-#include "catalog/catname.h"
-#include "catalog/catalog.h"
-#include "catalog/pg_database.h"
-#include "catalog/pg_shadow.h"
-#include "catalog/indexing.h"
-#include "commands/comment.h"
-#include "commands/dbcommands.h"
-#include "miscadmin.h"
-#include "storage/freespace.h"
-#include "storage/sinval.h"
-#include "utils/array.h"
-#include "utils/builtins.h"
-#include "utils/fmgroids.h"
-#include "utils/guc.h"
-#include "utils/lsyscache.h"
-#include "utils/syscache.h"
-
-#ifdef MULTIBYTE
-#include "mb/pg_wchar.h" /* encoding check */
-#endif
-
-
-/* non-export function prototypes */
-static bool get_db_info(const char *name, Oid *dbIdP, int4 *ownerIdP,
- int *encodingP, bool *dbIsTemplateP, Oid *dbLastSysOidP,
- TransactionId *dbVacuumXidP, TransactionId *dbFrozenXidP,
- char *dbpath);
-static bool have_createdb_privilege(void);
-static char *resolve_alt_dbpath(const char *dbpath, Oid dboid);
-static bool remove_dbdirs(const char *real_loc, const char *altloc);
-
-/*
- * CREATE DATABASE
- */
-
-void
-createdb(const CreatedbStmt *stmt)
-{
- char *nominal_loc;
- char *alt_loc;
- char *target_dir;
- char src_loc[MAXPGPATH];
- char buf[2 * MAXPGPATH + 100];
- Oid src_dboid;
- int4 src_owner;
- int src_encoding;
- bool src_istemplate;
- Oid src_lastsysoid;
- TransactionId src_vacuumxid;
- TransactionId src_frozenxid;
- char src_dbpath[MAXPGPATH];
- Relation pg_database_rel;
- HeapTuple tuple;
- TupleDesc pg_database_dsc;
- Datum new_record[Natts_pg_database];
- char new_record_nulls[Natts_pg_database];
- Oid dboid;
- int32 datdba;
- List *option;
- DefElem *downer = NULL;
- DefElem *dpath = NULL;
- DefElem *dtemplate = NULL;
- DefElem *dencoding = NULL;
- char *dbname = stmt->dbname;
- char *dbowner = NULL;
- char *dbpath = NULL;
- char *dbtemplate = NULL;
- int encoding = -1;
-
- /* Extract options from the statement node tree */
- foreach(option, stmt->options)
- {
- DefElem *defel = (DefElem *) lfirst(option);
-
- if (strcmp(defel->defname, "owner") == 0)
- {
- if (downer)
- elog(ERROR, "CREATE DATABASE: conflicting options");
- downer = defel;
- }
- else if (strcmp(defel->defname, "location") == 0)
- {
- if (dpath)
- elog(ERROR, "CREATE DATABASE: conflicting options");
- dpath = defel;
- }
- else if (strcmp(defel->defname, "template") == 0)
- {
- if (dtemplate)
- elog(ERROR, "CREATE DATABASE: conflicting options");
- dtemplate = defel;
- }
- else if (strcmp(defel->defname, "encoding") == 0)
- {
- if (dencoding)
- elog(ERROR, "CREATE DATABASE: conflicting options");
- dencoding = defel;
- }
- else
- elog(ERROR, "CREATE DATABASE: option \"%s\" not recognized",
- defel->defname);
- }
-
- if (downer)
- dbowner = strVal(downer->arg);
- if (dpath)
- dbpath = strVal(dpath->arg);
- if (dtemplate)
- dbtemplate = strVal(dtemplate->arg);
- if (dencoding)
- encoding = intVal(dencoding->arg);
-
- /* obtain sysid of proposed owner */
- if (dbowner)
- datdba = get_usesysid(dbowner); /* will elog if no such user */
- else
- datdba = GetUserId();
-
- if (datdba == (int32) GetUserId())
- {
- /* creating database for self: can be superuser or createdb */
- if (!superuser() && !have_createdb_privilege())
- elog(ERROR, "CREATE DATABASE: permission denied");
- }
- else
- {
- /* creating database for someone else: must be superuser */
- /* note that the someone else need not have any permissions */
- if (!superuser())
- elog(ERROR, "CREATE DATABASE: permission denied");
- }
-
- /* don't call this in a transaction block */
- if (IsTransactionBlock())
- elog(ERROR, "CREATE DATABASE: may not be called in a transaction block");
-
- /*
- * Check for db name conflict. There is a race condition here, since
- * another backend could create the same DB name before we commit.
- * However, holding an exclusive lock on pg_database for the whole
- * time we are copying the source database doesn't seem like a good
- * idea, so accept possibility of race to create. We will check again
- * after we grab the exclusive lock.
- */
- if (get_db_info(dbname, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL))
- elog(ERROR, "CREATE DATABASE: database \"%s\" already exists", dbname);
-
- /*
- * Lookup database (template) to be cloned.
- */
- if (!dbtemplate)
- dbtemplate = "template1"; /* Default template database name */
-
- if (!get_db_info(dbtemplate, &src_dboid, &src_owner, &src_encoding,
- &src_istemplate, &src_lastsysoid,
- &src_vacuumxid, &src_frozenxid,
- src_dbpath))
- elog(ERROR, "CREATE DATABASE: template \"%s\" does not exist",
- dbtemplate);
-
- /*
- * Permission check: to copy a DB that's not marked datistemplate, you
- * must be superuser or the owner thereof.
- */
- if (!src_istemplate)
- {
- if (!superuser() && GetUserId() != src_owner )
- elog(ERROR, "CREATE DATABASE: permission to copy \"%s\" denied",
- dbtemplate);
- }
-
- /*
- * Determine physical path of source database
- */
- alt_loc = resolve_alt_dbpath(src_dbpath, src_dboid);
- if (!alt_loc)
- alt_loc = GetDatabasePath(src_dboid);
- strcpy(src_loc, alt_loc);
-
- /*
- * The source DB can't have any active backends, except this one
- * (exception is to allow CREATE DB while connected to template1).
- * Otherwise we might copy inconsistent data. This check is not
- * bulletproof, since someone might connect while we are copying...
- */
- if (DatabaseHasActiveBackends(src_dboid, true))
- elog(ERROR, "CREATE DATABASE: source database \"%s\" is being accessed by other users", dbtemplate);
-
- /* If encoding is defaulted, use source's encoding */
- if (encoding < 0)
- encoding = src_encoding;
-
-#ifdef MULTIBYTE
- /* Some encodings are client only */
- if (!PG_VALID_BE_ENCODING(encoding))
- elog(ERROR, "CREATE DATABASE: invalid backend encoding");
-#else
- Assert(encoding == 0); /* zero is PG_SQL_ASCII */
-#endif
-
- /*
- * Preassign OID for pg_database tuple, so that we can compute db
- * path.
- */
- dboid = newoid();
-
- /*
- * Compute nominal location (where we will try to access the
- * database), and resolve alternate physical location if one is
- * specified.
- *
- * If an alternate location is specified but is the same as the
- * normal path, just drop the alternate-location spec (this seems
- * friendlier than erroring out). We must test this case to avoid
- * creating a circular symlink below.
- */
- nominal_loc = GetDatabasePath(dboid);
- alt_loc = resolve_alt_dbpath(dbpath, dboid);
-
- if (alt_loc && strcmp(alt_loc, nominal_loc) == 0)
- {
- alt_loc = NULL;
- dbpath = NULL;
- }
-
- if (strchr(nominal_loc, '\''))
- elog(ERROR, "database path may not contain single quotes");
- if (alt_loc && strchr(alt_loc, '\''))
- elog(ERROR, "database path may not contain single quotes");
- if (strchr(src_loc, '\''))
- elog(ERROR, "database path may not contain single quotes");
- /* ... otherwise we'd be open to shell exploits below */
-
- /*
- * Force dirty buffers out to disk, to ensure source database is
- * up-to-date for the copy. (We really only need to flush buffers for
- * the source database...)
- */
- BufferSync();
-
- /*
- * Close virtual file descriptors so the kernel has more available for
- * the mkdir() and system() calls below.
- */
- closeAllVfds();
-
- /*
- * Check we can create the target directory --- but then remove it
- * because we rely on cp(1) to create it for real.
- */
- target_dir = alt_loc ? alt_loc : nominal_loc;
-
- if (mkdir(target_dir, S_IRWXU) != 0)
- elog(ERROR, "CREATE DATABASE: unable to create database directory '%s': %m",
- target_dir);
- if (rmdir(target_dir) != 0)
- elog(ERROR, "CREATE DATABASE: unable to remove temp directory '%s': %m",
- target_dir);
-
- /* Make the symlink, if needed */
- if (alt_loc)
- {
- if (symlink(alt_loc, nominal_loc) != 0)
- elog(ERROR, "CREATE DATABASE: could not link '%s' to '%s': %m",
- nominal_loc, alt_loc);
- }
-
- /* Copy the template database to the new location */
- snprintf(buf, sizeof(buf), "cp -r '%s' '%s'", src_loc, target_dir);
-
- if (system(buf) != 0)
- {
- if (remove_dbdirs(nominal_loc, alt_loc))
- elog(ERROR, "CREATE DATABASE: could not initialize database directory");
- else
- elog(ERROR, "CREATE DATABASE: could not initialize database directory; delete failed as well");
- }
-
- /*
- * Now OK to grab exclusive lock on pg_database.
- */
- pg_database_rel = heap_openr(DatabaseRelationName, AccessExclusiveLock);
-
- /* Check to see if someone else created same DB name meanwhile. */
- if (get_db_info(dbname, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL))
- {
- /* Don't hold lock while doing recursive remove */
- heap_close(pg_database_rel, AccessExclusiveLock);
- remove_dbdirs(nominal_loc, alt_loc);
- elog(ERROR, "CREATE DATABASE: database \"%s\" already exists", dbname);
- }
-
- /*
- * Insert a new tuple into pg_database
- */
- pg_database_dsc = RelationGetDescr(pg_database_rel);
-
- /* Form tuple */
- MemSet(new_record, 0, sizeof(new_record));
- MemSet(new_record_nulls, ' ', sizeof(new_record_nulls));
-
- new_record[Anum_pg_database_datname - 1] =
- DirectFunctionCall1(namein, CStringGetDatum(dbname));
- new_record[Anum_pg_database_datdba - 1] = Int32GetDatum(datdba);
- new_record[Anum_pg_database_encoding - 1] = Int32GetDatum(encoding);
- new_record[Anum_pg_database_datistemplate - 1] = BoolGetDatum(false);
- new_record[Anum_pg_database_datallowconn - 1] = BoolGetDatum(true);
- new_record[Anum_pg_database_datlastsysoid - 1] = ObjectIdGetDatum(src_lastsysoid);
- new_record[Anum_pg_database_datvacuumxid - 1] = TransactionIdGetDatum(src_vacuumxid);
- new_record[Anum_pg_database_datfrozenxid - 1] = TransactionIdGetDatum(src_frozenxid);
- /* do not set datpath to null, GetRawDatabaseInfo won't cope */
- new_record[Anum_pg_database_datpath - 1] =
- DirectFunctionCall1(textin, CStringGetDatum(dbpath ? dbpath : ""));
-
- new_record_nulls[Anum_pg_database_datconfig - 1] = 'n';
- new_record_nulls[Anum_pg_database_datacl - 1] = 'n';
-
- tuple = heap_formtuple(pg_database_dsc, new_record, new_record_nulls);
-
- tuple->t_data->t_oid = dboid; /* override heap_insert's OID
- * selection */
-
- simple_heap_insert(pg_database_rel, tuple);
-
- /*
- * Update indexes
- */
- if (RelationGetForm(pg_database_rel)->relhasindex)
- {
- Relation idescs[Num_pg_database_indices];
-
- CatalogOpenIndices(Num_pg_database_indices,
- Name_pg_database_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_database_indices, pg_database_rel,
- tuple);
- CatalogCloseIndices(Num_pg_database_indices, idescs);
- }
-
- /* Close pg_database, but keep lock till commit */
- heap_close(pg_database_rel, NoLock);
-
- /*
- * Force dirty buffers out to disk, so that newly-connecting backends
- * will see the new database in pg_database right away. (They'll see
- * an uncommitted tuple, but they don't care; see GetRawDatabaseInfo.)
- */
- BufferSync();
-}
-
-
-/*
- * DROP DATABASE
- */
-void
-dropdb(const char *dbname)
-{
- int4 db_owner;
- bool db_istemplate;
- Oid db_id;
- char *alt_loc;
- char *nominal_loc;
- char dbpath[MAXPGPATH];
- Relation pgdbrel;
- HeapScanDesc pgdbscan;
- ScanKeyData key;
- HeapTuple tup;
-
- AssertArg(dbname);
-
- if (strcmp(dbname, DatabaseName) == 0)
- elog(ERROR, "DROP DATABASE: cannot be executed on the currently open database");
-
- if (IsTransactionBlock())
- elog(ERROR, "DROP DATABASE: may not be called in a transaction block");
-
- /*
- * Obtain exclusive lock on pg_database. We need this to ensure that
- * no new backend starts up in the target database while we are
- * deleting it. (Actually, a new backend might still manage to start
- * up, because it will read pg_database without any locking to
- * discover the database's OID. But it will detect its error in
- * ReverifyMyDatabase and shut down before any serious damage is done.
- * See postinit.c.)
- */
- pgdbrel = heap_openr(DatabaseRelationName, AccessExclusiveLock);
-
- if (!get_db_info(dbname, &db_id, &db_owner, NULL,
- &db_istemplate, NULL, NULL, NULL, dbpath))
- elog(ERROR, "DROP DATABASE: database \"%s\" does not exist", dbname);
-
- if (GetUserId() != db_owner && !superuser())
- elog(ERROR, "DROP DATABASE: permission denied");
-
- /*
- * Disallow dropping a DB that is marked istemplate. This is just to
- * prevent people from accidentally dropping template0 or template1;
- * they can do so if they're really determined ...
- */
- if (db_istemplate)
- elog(ERROR, "DROP DATABASE: database is marked as a template");
-
- nominal_loc = GetDatabasePath(db_id);
- alt_loc = resolve_alt_dbpath(dbpath, db_id);
-
- /*
- * Check for active backends in the target database.
- */
- if (DatabaseHasActiveBackends(db_id, false))
- elog(ERROR, "DROP DATABASE: database \"%s\" is being accessed by other users", dbname);
-
- /*
- * Find the database's tuple by OID (should be unique).
- */
- ScanKeyEntryInitialize(&key, 0, ObjectIdAttributeNumber,
- F_OIDEQ, ObjectIdGetDatum(db_id));
-
- pgdbscan = heap_beginscan(pgdbrel, SnapshotNow, 1, &key);
-
- tup = heap_getnext(pgdbscan, ForwardScanDirection);
- if (!HeapTupleIsValid(tup))
- {
- /*
- * This error should never come up since the existence of the
- * database is checked earlier
- */
- elog(ERROR, "DROP DATABASE: Database \"%s\" doesn't exist despite earlier reports to the contrary",
- dbname);
- }
-
- /* Remove the database's tuple from pg_database */
- simple_heap_delete(pgdbrel, &tup->t_self);
-
- heap_endscan(pgdbscan);
-
- /* Delete any comments associated with the database */
- DeleteComments(db_id, RelationGetRelid(pgdbrel));
-
- /*
- * Close pg_database, but keep exclusive lock till commit to ensure
- * that any new backend scanning pg_database will see the tuple dead.
- */
- heap_close(pgdbrel, NoLock);
-
- /*
- * Drop pages for this database that are in the shared buffer cache.
- * This is important to ensure that no remaining backend tries to
- * write out a dirty buffer to the dead database later...
- */
- DropBuffers(db_id);
-
- /*
- * Also, clean out any entries in the shared free space map.
- */
- FreeSpaceMapForgetDatabase(db_id);
-
- /*
- * Remove the database's subdirectory and everything in it.
- */
- remove_dbdirs(nominal_loc, alt_loc);
-
- /*
- * Force dirty buffers out to disk, so that newly-connecting backends
- * will see the database tuple marked dead in pg_database right away.
- * (They'll see an uncommitted deletion, but they don't care; see
- * GetRawDatabaseInfo.)
- */
- BufferSync();
-}
-
-
-
-/*
- * ALTER DATABASE name SET ...
- */
-void
-AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
-{
- char *valuestr;
- HeapTuple tuple,
- newtuple;
- Relation rel;
- ScanKeyData scankey;
- HeapScanDesc scan;
- Datum repl_val[Natts_pg_database];
- char repl_null[Natts_pg_database];
- char repl_repl[Natts_pg_database];
-
- valuestr = flatten_set_variable_args(stmt->variable, stmt->value);
-
- rel = heap_openr(DatabaseRelationName, RowExclusiveLock);
- ScanKeyEntryInitialize(&scankey, 0, Anum_pg_database_datname,
- F_NAMEEQ, NameGetDatum(stmt->dbname));
- scan = heap_beginscan(rel, SnapshotNow, 1, &scankey);
- tuple = heap_getnext(scan, ForwardScanDirection);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "database \"%s\" does not exist", stmt->dbname);
-
- if (!(superuser()
- || ((Form_pg_database) GETSTRUCT(tuple))->datdba == GetUserId()))
- elog(ERROR, "permission denied");
-
- MemSet(repl_repl, ' ', sizeof(repl_repl));
- repl_repl[Anum_pg_database_datconfig-1] = 'r';
-
- if (strcmp(stmt->variable, "all")==0 && valuestr == NULL)
- {
- /* RESET ALL */
- repl_null[Anum_pg_database_datconfig-1] = 'n';
- repl_val[Anum_pg_database_datconfig-1] = (Datum) 0;
- }
- else
- {
- Datum datum;
- bool isnull;
- ArrayType *a;
-
- repl_null[Anum_pg_database_datconfig-1] = ' ';
-
- datum = heap_getattr(tuple, Anum_pg_database_datconfig,
- RelationGetDescr(rel), &isnull);
-
- a = isnull ? ((ArrayType *) NULL) : DatumGetArrayTypeP(datum);
-
- if (valuestr)
- a = GUCArrayAdd(a, stmt->variable, valuestr);
- else
- a = GUCArrayDelete(a, stmt->variable);
-
- repl_val[Anum_pg_database_datconfig-1] = PointerGetDatum(a);
- }
-
- newtuple = heap_modifytuple(tuple, rel, repl_val, repl_null, repl_repl);
- simple_heap_update(rel, &tuple->t_self, newtuple);
-
- /*
- * Update indexes
- */
- if (RelationGetForm(rel)->relhasindex)
- {
- Relation idescs[Num_pg_database_indices];
-
- CatalogOpenIndices(Num_pg_database_indices,
- Name_pg_database_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_database_indices, rel,
- newtuple);
- CatalogCloseIndices(Num_pg_database_indices, idescs);
- }
-
- heap_endscan(scan);
- heap_close(rel, RowExclusiveLock);
-}
-
-
-
-/*
- * Helper functions
- */
-
-static bool
-get_db_info(const char *name, Oid *dbIdP, int4 *ownerIdP,
- int *encodingP, bool *dbIsTemplateP, Oid *dbLastSysOidP,
- TransactionId *dbVacuumXidP, TransactionId *dbFrozenXidP,
- char *dbpath)
-{
- Relation relation;
- ScanKeyData scanKey;
- HeapScanDesc scan;
- HeapTuple tuple;
- bool gottuple;
-
- AssertArg(name);
-
- /* Caller may wish to grab a better lock on pg_database beforehand... */
- relation = heap_openr(DatabaseRelationName, AccessShareLock);
-
- ScanKeyEntryInitialize(&scanKey, 0, Anum_pg_database_datname,
- F_NAMEEQ, NameGetDatum(name));
-
- scan = heap_beginscan(relation, SnapshotNow, 1, &scanKey);
-
- tuple = heap_getnext(scan, ForwardScanDirection);
-
- gottuple = HeapTupleIsValid(tuple);
- if (gottuple)
- {
- Form_pg_database dbform = (Form_pg_database) GETSTRUCT(tuple);
-
- /* oid of the database */
- if (dbIdP)
- *dbIdP = tuple->t_data->t_oid;
- /* sysid of the owner */
- if (ownerIdP)
- *ownerIdP = dbform->datdba;
- /* multibyte encoding */
- if (encodingP)
- *encodingP = dbform->encoding;
- /* allowed as template? */
- if (dbIsTemplateP)
- *dbIsTemplateP = dbform->datistemplate;
- /* last system OID used in database */
- if (dbLastSysOidP)
- *dbLastSysOidP = dbform->datlastsysoid;
- /* limit of vacuumed XIDs */
- if (dbVacuumXidP)
- *dbVacuumXidP = dbform->datvacuumxid;
- /* limit of frozen XIDs */
- if (dbFrozenXidP)
- *dbFrozenXidP = dbform->datfrozenxid;
- /* database path (as registered in pg_database) */
- if (dbpath)
- {
- Datum datum;
- bool isnull;
-
- datum = heap_getattr(tuple,
- Anum_pg_database_datpath,
- RelationGetDescr(relation),
- &isnull);
- if (!isnull)
- {
- text *pathtext = DatumGetTextP(datum);
- int pathlen = VARSIZE(pathtext) - VARHDRSZ;
-
- Assert(pathlen >= 0 && pathlen < MAXPGPATH);
- strncpy(dbpath, VARDATA(pathtext), pathlen);
- *(dbpath + pathlen) = '\0';
- }
- else
- strcpy(dbpath, "");
- }
- }
-
- heap_endscan(scan);
- heap_close(relation, AccessShareLock);
-
- return gottuple;
-}
-
-static bool
-have_createdb_privilege(void)
-{
- HeapTuple utup;
- bool retval;
-
- utup = SearchSysCache(SHADOWSYSID,
- ObjectIdGetDatum(GetUserId()),
- 0, 0, 0);
-
- if (!HeapTupleIsValid(utup))
- retval = false;
- else
- retval = ((Form_pg_shadow) GETSTRUCT(utup))->usecreatedb;
-
- ReleaseSysCache(utup);
-
- return retval;
-}
-
-
-static char *
-resolve_alt_dbpath(const char *dbpath, Oid dboid)
-{
- const char *prefix;
- char *ret;
- size_t len;
-
- if (dbpath == NULL || dbpath[0] == '\0')
- return NULL;
-
- if (strchr(dbpath, '/'))
- {
- if (dbpath[0] != '/')
- elog(ERROR, "Relative paths are not allowed as database locations");
-#ifndef ALLOW_ABSOLUTE_DBPATHS
- elog(ERROR, "Absolute paths are not allowed as database locations");
-#endif
- prefix = dbpath;
- }
- else
- {
- /* must be environment variable */
- char *var = getenv(dbpath);
-
- if (!var)
- elog(ERROR, "Postmaster environment variable '%s' not set", dbpath);
- if (var[0] != '/')
- elog(ERROR, "Postmaster environment variable '%s' must be absolute path", dbpath);
- prefix = var;
- }
-
- len = strlen(prefix) + 6 + sizeof(Oid) * 8 + 1;
- if (len >= MAXPGPATH - 100)
- elog(ERROR, "Alternate path is too long");
-
- ret = palloc(len);
- snprintf(ret, len, "%s/base/%u", prefix, dboid);
-
- return ret;
-}
-
-
-static bool
-remove_dbdirs(const char *nominal_loc, const char *alt_loc)
-{
- const char *target_dir;
- char buf[MAXPGPATH + 100];
- bool success = true;
-
- target_dir = alt_loc ? alt_loc : nominal_loc;
-
- /*
- * Close virtual file descriptors so the kernel has more available for
- * the system() call below.
- */
- closeAllVfds();
-
- if (alt_loc)
- {
- /* remove symlink */
- if (unlink(nominal_loc) != 0)
- {
- elog(WARNING, "could not remove '%s': %m", nominal_loc);
- success = false;
- }
- }
-
- snprintf(buf, sizeof(buf), "rm -rf '%s'", target_dir);
-
- if (system(buf) != 0)
- {
- elog(WARNING, "database directory '%s' could not be removed",
- target_dir);
- success = false;
- }
-
- return success;
-}
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
deleted file mode 100644
index a1b47d1274d..00000000000
--- a/src/backend/commands/define.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * define.c
- * Support routines for various kinds of object creation.
- *
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.78 2002/06/20 20:29:27 momjian Exp $
- *
- * DESCRIPTION
- * The "DefineFoo" routines take the parse tree and pick out the
- * appropriate arguments/flags, passing the results to the
- * corresponding "FooDefine" routines (in src/catalog) that do
- * the actual catalog-munging. These routines also verify permission
- * of the user to execute the command.
- *
- * NOTES
- * These things must be defined and committed in the following order:
- * "create function":
- * input/output, recv/send procedures
- * "create type":
- * type
- * "create operator":
- * operators
- *
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include <ctype.h>
-#include <math.h>
-
-#include "commands/defrem.h"
-#include "parser/parse_type.h"
-#include "utils/int8.h"
-
-
-/*
- * Translate the input language name to lower case.
- *
- * Output buffer should be NAMEDATALEN long.
- */
-void
-case_translate_language_name(const char *input, char *output)
-{
- int i;
-
- for (i = 0; i < NAMEDATALEN - 1 && input[i]; ++i)
- output[i] = tolower((unsigned char) input[i]);
-
- output[i] = '\0';
-}
-
-
-/*
- * Extract a string value (otherwise uninterpreted) from a DefElem.
- */
-char *
-defGetString(DefElem *def)
-{
- if (def->arg == NULL)
- elog(ERROR, "Define: \"%s\" requires a parameter",
- def->defname);
- switch (nodeTag(def->arg))
- {
- case T_Integer:
- {
- char *str = palloc(32);
-
- snprintf(str, 32, "%ld", (long) intVal(def->arg));
- return str;
- }
- case T_Float:
-
- /*
- * T_Float values are kept in string form, so this type cheat
- * works (and doesn't risk losing precision)
- */
- return strVal(def->arg);
- case T_String:
- return strVal(def->arg);
- case T_TypeName:
- return TypeNameToString((TypeName *) def->arg);
- default:
- elog(ERROR, "Define: cannot interpret argument of \"%s\"",
- def->defname);
- }
- return NULL; /* keep compiler quiet */
-}
-
-/*
- * Extract a numeric value (actually double) from a DefElem.
- */
-double
-defGetNumeric(DefElem *def)
-{
- if (def->arg == NULL)
- elog(ERROR, "Define: \"%s\" requires a numeric value",
- def->defname);
- switch (nodeTag(def->arg))
- {
- case T_Integer:
- return (double) intVal(def->arg);
- case T_Float:
- return floatVal(def->arg);
- default:
- elog(ERROR, "Define: \"%s\" requires a numeric value",
- def->defname);
- }
- return 0; /* keep compiler quiet */
-}
-
-/*
- * Extract an int64 value from a DefElem.
- */
-int64
-defGetInt64(DefElem *def)
-{
- if (def->arg == NULL)
- elog(ERROR, "Define: \"%s\" requires a numeric value",
- def->defname);
- switch (nodeTag(def->arg))
- {
- case T_Integer:
- return (int64) intVal(def->arg);
- case T_Float:
- /*
- * Values too large for int4 will be represented as Float
- * constants by the lexer. Accept these if they are valid int8
- * strings.
- */
- return DatumGetInt64(DirectFunctionCall1(int8in,
- CStringGetDatum(strVal(def->arg))));
- default:
- elog(ERROR, "Define: \"%s\" requires a numeric value",
- def->defname);
- }
- return 0; /* keep compiler quiet */
-}
-
-/*
- * Extract a possibly-qualified name (as a List of Strings) from a DefElem.
- */
-List *
-defGetQualifiedName(DefElem *def)
-{
- if (def->arg == NULL)
- elog(ERROR, "Define: \"%s\" requires a parameter",
- def->defname);
- switch (nodeTag(def->arg))
- {
- case T_TypeName:
- return ((TypeName *) def->arg)->names;
- case T_String:
- /* Allow quoted name for backwards compatibility */
- return makeList1(def->arg);
- default:
- elog(ERROR, "Define: argument of \"%s\" must be a name",
- def->defname);
- }
- return NIL; /* keep compiler quiet */
-}
-
-/*
- * Extract a TypeName from a DefElem.
- */
-TypeName *
-defGetTypeName(DefElem *def)
-{
- if (def->arg == NULL)
- elog(ERROR, "Define: \"%s\" requires a parameter",
- def->defname);
- switch (nodeTag(def->arg))
- {
- case T_TypeName:
- return (TypeName *) def->arg;
- case T_String:
- {
- /* Allow quoted typename for backwards compatibility */
- TypeName *n = makeNode(TypeName);
-
- n->names = makeList1(def->arg);
- n->typmod = -1;
- return n;
- }
- default:
- elog(ERROR, "Define: argument of \"%s\" must be a type name",
- def->defname);
- }
- return NULL; /* keep compiler quiet */
-}
-
-/*
- * Extract a type length indicator (either absolute bytes, or
- * -1 for "variable") from a DefElem.
- */
-int
-defGetTypeLength(DefElem *def)
-{
- if (def->arg == NULL)
- elog(ERROR, "Define: \"%s\" requires a parameter",
- def->defname);
- switch (nodeTag(def->arg))
- {
- case T_Integer:
- return intVal(def->arg);
- case T_Float:
- elog(ERROR, "Define: \"%s\" requires an integral value",
- def->defname);
- break;
- case T_String:
- if (strcasecmp(strVal(def->arg), "variable") == 0)
- return -1; /* variable length */
- break;
- case T_TypeName:
- /* cope if grammar chooses to believe "variable" is a typename */
- if (strcasecmp(TypeNameToString((TypeName *) def->arg),
- "variable") == 0)
- return -1; /* variable length */
- break;
- default:
- elog(ERROR, "Define: cannot interpret argument of \"%s\"",
- def->defname);
- }
- elog(ERROR, "Define: invalid argument for \"%s\"",
- def->defname);
- return 0; /* keep compiler quiet */
-}
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
deleted file mode 100644
index b14ae4274f6..00000000000
--- a/src/backend/commands/explain.c
+++ /dev/null
@@ -1,914 +0,0 @@
-/*
- * explain.c
- * Explain the query execution plan
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994-5, Regents of the University of California
- *
- * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.80 2002/06/20 20:29:27 momjian Exp $
- *
- */
-
-#include "postgres.h"
-
-#include "access/genam.h"
-#include "access/heapam.h"
-#include "catalog/pg_type.h"
-#include "commands/explain.h"
-#include "executor/instrument.h"
-#include "lib/stringinfo.h"
-#include "nodes/print.h"
-#include "optimizer/clauses.h"
-#include "optimizer/planner.h"
-#include "optimizer/var.h"
-#include "parser/parsetree.h"
-#include "rewrite/rewriteHandler.h"
-#include "tcop/pquery.h"
-#include "utils/builtins.h"
-#include "utils/guc.h"
-#include "utils/lsyscache.h"
-
-
-typedef struct ExplainState
-{
- /* options */
- bool printCost; /* print cost */
- bool printNodes; /* do nodeToString() instead */
- /* other states */
- List *rtable; /* range table */
-} ExplainState;
-
-typedef struct TextOutputState
-{
- TupleDesc tupdesc;
- DestReceiver *destfunc;
-} TextOutputState;
-
-static StringInfo Explain_PlanToString(Plan *plan, ExplainState *es);
-static void ExplainOneQuery(Query *query, ExplainStmt *stmt,
- TextOutputState *tstate);
-static void explain_outNode(StringInfo str, Plan *plan, Plan *outer_plan,
- int indent, ExplainState *es);
-static void show_scan_qual(List *qual, bool is_or_qual, const char *qlabel,
- int scanrelid, Plan *outer_plan,
- StringInfo str, int indent, ExplainState *es);
-static void show_upper_qual(List *qual, const char *qlabel,
- const char *outer_name, int outer_varno, Plan *outer_plan,
- const char *inner_name, int inner_varno, Plan *inner_plan,
- StringInfo str, int indent, ExplainState *es);
-static void show_sort_keys(List *tlist, int nkeys, const char *qlabel,
- StringInfo str, int indent, ExplainState *es);
-static Node *make_ors_ands_explicit(List *orclauses);
-static TextOutputState *begin_text_output(CommandDest dest, char *title);
-static void do_text_output(TextOutputState *tstate, char *aline);
-static void do_text_output_multiline(TextOutputState *tstate, char *text);
-static void end_text_output(TextOutputState *tstate);
-
-
-/*
- * ExplainQuery -
- * execute an EXPLAIN command
- */
-void
-ExplainQuery(ExplainStmt *stmt, CommandDest dest)
-{
- Query *query = stmt->query;
- TextOutputState *tstate;
- List *rewritten;
- List *l;
-
- tstate = begin_text_output(dest, "QUERY PLAN");
-
- if (query->commandType == CMD_UTILITY)
- {
- /* rewriter will not cope with utility statements */
- do_text_output(tstate, "Utility statements have no plan structure");
- }
- else
- {
- /* Rewrite through rule system */
- rewritten = QueryRewrite(query);
-
- if (rewritten == NIL)
- {
- /* In the case of an INSTEAD NOTHING, tell at least that */
- do_text_output(tstate, "Query rewrites to nothing");
- }
- else
- {
- /* Explain every plan */
- foreach(l, rewritten)
- {
- ExplainOneQuery(lfirst(l), stmt, tstate);
- /* put a blank line between plans */
- if (lnext(l) != NIL)
- do_text_output(tstate, "");
- }
- }
- }
-
- end_text_output(tstate);
-}
-
-/*
- * ExplainOneQuery -
- * print out the execution plan for one query
- */
-static void
-ExplainOneQuery(Query *query, ExplainStmt *stmt, TextOutputState *tstate)
-{
- Plan *plan;
- ExplainState *es;
- double totaltime = 0;
-
- /* planner will not cope with utility statements */
- if (query->commandType == CMD_UTILITY)
- {
- if (query->utilityStmt && IsA(query->utilityStmt, NotifyStmt))
- do_text_output(tstate, "NOTIFY");
- else
- do_text_output(tstate, "UTILITY");
- return;
- }
-
- /* plan the query */
- plan = planner(query);
-
- /* pg_plan could have failed */
- if (plan == NULL)
- return;
-
- /* Execute the plan for statistics if asked for */
- if (stmt->analyze)
- {
- struct timeval starttime;
- struct timeval endtime;
-
- /*
- * Set up the instrumentation for the top node. This will cascade
- * during plan initialisation
- */
- plan->instrument = InstrAlloc();
-
- gettimeofday(&starttime, NULL);
- ProcessQuery(query, plan, None, NULL);
- CommandCounterIncrement();
- gettimeofday(&endtime, NULL);
-
- endtime.tv_sec -= starttime.tv_sec;
- endtime.tv_usec -= starttime.tv_usec;
- while (endtime.tv_usec < 0)
- {
- endtime.tv_usec += 1000000;
- endtime.tv_sec--;
- }
- totaltime = (double) endtime.tv_sec +
- (double) endtime.tv_usec / 1000000.0;
- }
-
- es = (ExplainState *) palloc(sizeof(ExplainState));
- MemSet(es, 0, sizeof(ExplainState));
-
- es->printCost = true; /* default */
-
- if (stmt->verbose)
- es->printNodes = true;
-
- es->rtable = query->rtable;
-
- if (es->printNodes)
- {
- char *s;
- char *f;
-
- s = nodeToString(plan);
- if (s)
- {
- if (Explain_pretty_print)
- f = pretty_format_node_dump(s);
- else
- f = format_node_dump(s);
- pfree(s);
- do_text_output_multiline(tstate, f);
- pfree(f);
- if (es->printCost)
- do_text_output(tstate, ""); /* separator line */
- }
- }
-
- if (es->printCost)
- {
- StringInfo str;
-
- str = Explain_PlanToString(plan, es);
- if (stmt->analyze)
- appendStringInfo(str, "Total runtime: %.2f msec\n",
- 1000.0 * totaltime);
- do_text_output_multiline(tstate, str->data);
- pfree(str->data);
- pfree(str);
- }
-
- pfree(es);
-}
-
-
-/*
- * explain_outNode -
- * converts a Plan node into ascii string and appends it to 'str'
- *
- * outer_plan, if not null, references another plan node that is the outer
- * side of a join with the current node. This is only interesting for
- * deciphering runtime keys of an inner indexscan.
- */
-static void
-explain_outNode(StringInfo str, Plan *plan, Plan *outer_plan,
- int indent, ExplainState *es)
-{
- List *l;
- char *pname;
- int i;
-
- if (plan == NULL)
- {
- appendStringInfo(str, "\n");
- return;
- }
-
- switch (nodeTag(plan))
- {
- case T_Result:
- pname = "Result";
- break;
- case T_Append:
- pname = "Append";
- break;
- case T_NestLoop:
- pname = "Nested Loop";
- break;
- case T_MergeJoin:
- pname = "Merge Join";
- break;
- case T_HashJoin:
- pname = "Hash Join";
- break;
- case T_SeqScan:
- pname = "Seq Scan";
- break;
- case T_IndexScan:
- pname = "Index Scan";
- break;
- case T_TidScan:
- pname = "Tid Scan";
- break;
- case T_SubqueryScan:
- pname = "Subquery Scan";
- break;
- case T_FunctionScan:
- pname = "Function Scan";
- break;
- case T_Material:
- pname = "Materialize";
- break;
- case T_Sort:
- pname = "Sort";
- break;
- case T_Group:
- pname = "Group";
- break;
- case T_Agg:
- pname = "Aggregate";
- break;
- case T_Unique:
- pname = "Unique";
- break;
- case T_SetOp:
- switch (((SetOp *) plan)->cmd)
- {
- case SETOPCMD_INTERSECT:
- pname = "SetOp Intersect";
- break;
- case SETOPCMD_INTERSECT_ALL:
- pname = "SetOp Intersect All";
- break;
- case SETOPCMD_EXCEPT:
- pname = "SetOp Except";
- break;
- case SETOPCMD_EXCEPT_ALL:
- pname = "SetOp Except All";
- break;
- default:
- pname = "SetOp ???";
- break;
- }
- break;
- case T_Limit:
- pname = "Limit";
- break;
- case T_Hash:
- pname = "Hash";
- break;
- default:
- pname = "???";
- break;
- }
-
- appendStringInfo(str, pname);
- switch (nodeTag(plan))
- {
- case T_IndexScan:
- if (ScanDirectionIsBackward(((IndexScan *) plan)->indxorderdir))
- appendStringInfo(str, " Backward");
- appendStringInfo(str, " using ");
- i = 0;
- foreach(l, ((IndexScan *) plan)->indxid)
- {
- Relation relation;
-
- relation = index_open(lfirsti(l));
- appendStringInfo(str, "%s%s",
- (++i > 1) ? ", " : "",
- quote_identifier(RelationGetRelationName(relation)));
- index_close(relation);
- }
- /* FALL THRU */
- case T_SeqScan:
- case T_TidScan:
- if (((Scan *) plan)->scanrelid > 0)
- {
- RangeTblEntry *rte = rt_fetch(((Scan *) plan)->scanrelid,
- es->rtable);
- char *relname;
-
- /* Assume it's on a real relation */
- Assert(rte->rtekind == RTE_RELATION);
-
- /* We only show the rel name, not schema name */
- relname = get_rel_name(rte->relid);
-
- appendStringInfo(str, " on %s",
- quote_identifier(relname));
- if (strcmp(rte->eref->aliasname, relname) != 0)
- appendStringInfo(str, " %s",
- quote_identifier(rte->eref->aliasname));
- }
- break;
- case T_SubqueryScan:
- if (((Scan *) plan)->scanrelid > 0)
- {
- RangeTblEntry *rte = rt_fetch(((Scan *) plan)->scanrelid,
- es->rtable);
-
- appendStringInfo(str, " %s",
- quote_identifier(rte->eref->aliasname));
- }
- break;
- case T_FunctionScan:
- if (((Scan *) plan)->scanrelid > 0)
- {
- RangeTblEntry *rte = rt_fetch(((Scan *) plan)->scanrelid,
- es->rtable);
- Expr *expr;
- Func *funcnode;
- Oid funcid;
- char *proname;
-
- /* Assert it's on a RangeFunction */
- Assert(rte->rtekind == RTE_FUNCTION);
-
- expr = (Expr *) rte->funcexpr;
- funcnode = (Func *) expr->oper;
- funcid = funcnode->funcid;
-
- /* We only show the func name, not schema name */
- proname = get_func_name(funcid);
-
- appendStringInfo(str, " on %s",
- quote_identifier(proname));
- if (strcmp(rte->eref->aliasname, proname) != 0)
- appendStringInfo(str, " %s",
- quote_identifier(rte->eref->aliasname));
- }
- break;
- default:
- break;
- }
- if (es->printCost)
- {
- appendStringInfo(str, " (cost=%.2f..%.2f rows=%.0f width=%d)",
- plan->startup_cost, plan->total_cost,
- plan->plan_rows, plan->plan_width);
-
- if (plan->instrument && plan->instrument->nloops > 0)
- {
- double nloops = plan->instrument->nloops;
-
- appendStringInfo(str, " (actual time=%.2f..%.2f rows=%.0f loops=%.0f)",
- 1000.0 * plan->instrument->startup / nloops,
- 1000.0 * plan->instrument->total / nloops,
- plan->instrument->ntuples / nloops,
- plan->instrument->nloops);
- }
- }
- appendStringInfo(str, "\n");
-
- /* quals, sort keys, etc */
- switch (nodeTag(plan))
- {
- case T_IndexScan:
- show_scan_qual(((IndexScan *) plan)->indxqualorig, true,
- "Index Cond",
- ((Scan *) plan)->scanrelid,
- outer_plan,
- str, indent, es);
- show_scan_qual(plan->qual, false,
- "Filter",
- ((Scan *) plan)->scanrelid,
- outer_plan,
- str, indent, es);
- break;
- case T_SeqScan:
- case T_TidScan:
- case T_FunctionScan:
- show_scan_qual(plan->qual, false,
- "Filter",
- ((Scan *) plan)->scanrelid,
- outer_plan,
- str, indent, es);
- break;
- case T_NestLoop:
- show_upper_qual(((NestLoop *) plan)->join.joinqual,
- "Join Filter",
- "outer", OUTER, outerPlan(plan),
- "inner", INNER, innerPlan(plan),
- str, indent, es);
- show_upper_qual(plan->qual,
- "Filter",
- "outer", OUTER, outerPlan(plan),
- "inner", INNER, innerPlan(plan),
- str, indent, es);
- break;
- case T_MergeJoin:
- show_upper_qual(((MergeJoin *) plan)->mergeclauses,
- "Merge Cond",
- "outer", OUTER, outerPlan(plan),
- "inner", INNER, innerPlan(plan),
- str, indent, es);
- show_upper_qual(((MergeJoin *) plan)->join.joinqual,
- "Join Filter",
- "outer", OUTER, outerPlan(plan),
- "inner", INNER, innerPlan(plan),
- str, indent, es);
- show_upper_qual(plan->qual,
- "Filter",
- "outer", OUTER, outerPlan(plan),
- "inner", INNER, innerPlan(plan),
- str, indent, es);
- break;
- case T_HashJoin:
- show_upper_qual(((HashJoin *) plan)->hashclauses,
- "Hash Cond",
- "outer", OUTER, outerPlan(plan),
- "inner", INNER, innerPlan(plan),
- str, indent, es);
- show_upper_qual(((HashJoin *) plan)->join.joinqual,
- "Join Filter",
- "outer", OUTER, outerPlan(plan),
- "inner", INNER, innerPlan(plan),
- str, indent, es);
- show_upper_qual(plan->qual,
- "Filter",
- "outer", OUTER, outerPlan(plan),
- "inner", INNER, innerPlan(plan),
- str, indent, es);
- break;
- case T_SubqueryScan:
- show_upper_qual(plan->qual,
- "Filter",
- "subplan", 1, ((SubqueryScan *) plan)->subplan,
- "", 0, NULL,
- str, indent, es);
- break;
- case T_Agg:
- case T_Group:
- show_upper_qual(plan->qual,
- "Filter",
- "subplan", 0, outerPlan(plan),
- "", 0, NULL,
- str, indent, es);
- break;
- case T_Sort:
- show_sort_keys(plan->targetlist, ((Sort *) plan)->keycount,
- "Sort Key",
- str, indent, es);
- break;
- case T_Result:
- show_upper_qual((List *) ((Result *) plan)->resconstantqual,
- "One-Time Filter",
- "subplan", OUTER, outerPlan(plan),
- "", 0, NULL,
- str, indent, es);
- show_upper_qual(plan->qual,
- "Filter",
- "subplan", OUTER, outerPlan(plan),
- "", 0, NULL,
- str, indent, es);
- break;
- default:
- break;
- }
-
- /* initPlan-s */
- if (plan->initPlan)
- {
- List *saved_rtable = es->rtable;
- List *lst;
-
- for (i = 0; i < indent; i++)
- appendStringInfo(str, " ");
- appendStringInfo(str, " InitPlan\n");
- foreach(lst, plan->initPlan)
- {
- es->rtable = ((SubPlan *) lfirst(lst))->rtable;
- for (i = 0; i < indent; i++)
- appendStringInfo(str, " ");
- appendStringInfo(str, " -> ");
- explain_outNode(str, ((SubPlan *) lfirst(lst))->plan, NULL,
- indent + 4, es);
- }
- es->rtable = saved_rtable;
- }
-
- /* lefttree */
- if (outerPlan(plan))
- {
- for (i = 0; i < indent; i++)
- appendStringInfo(str, " ");
- appendStringInfo(str, " -> ");
- explain_outNode(str, outerPlan(plan), NULL, indent + 3, es);
- }
-
- /* righttree */
- if (innerPlan(plan))
- {
- for (i = 0; i < indent; i++)
- appendStringInfo(str, " ");
- appendStringInfo(str, " -> ");
- explain_outNode(str, innerPlan(plan), outerPlan(plan),
- indent + 3, es);
- }
-
- if (IsA(plan, Append))
- {
- Append *appendplan = (Append *) plan;
- List *lst;
-
- foreach(lst, appendplan->appendplans)
- {
- Plan *subnode = (Plan *) lfirst(lst);
-
- for (i = 0; i < indent; i++)
- appendStringInfo(str, " ");
- appendStringInfo(str, " -> ");
-
- explain_outNode(str, subnode, NULL, indent + 3, es);
- }
- }
-
- if (IsA(plan, SubqueryScan))
- {
- SubqueryScan *subqueryscan = (SubqueryScan *) plan;
- Plan *subnode = subqueryscan->subplan;
- RangeTblEntry *rte = rt_fetch(subqueryscan->scan.scanrelid,
- es->rtable);
- List *saved_rtable = es->rtable;
-
- Assert(rte->rtekind == RTE_SUBQUERY);
- es->rtable = rte->subquery->rtable;
-
- for (i = 0; i < indent; i++)
- appendStringInfo(str, " ");
- appendStringInfo(str, " -> ");
-
- explain_outNode(str, subnode, NULL, indent + 3, es);
-
- es->rtable = saved_rtable;
- }
-
- /* subPlan-s */
- if (plan->subPlan)
- {
- List *saved_rtable = es->rtable;
- List *lst;
-
- for (i = 0; i < indent; i++)
- appendStringInfo(str, " ");
- appendStringInfo(str, " SubPlan\n");
- foreach(lst, plan->subPlan)
- {
- es->rtable = ((SubPlan *) lfirst(lst))->rtable;
- for (i = 0; i < indent; i++)
- appendStringInfo(str, " ");
- appendStringInfo(str, " -> ");
- explain_outNode(str, ((SubPlan *) lfirst(lst))->plan, NULL,
- indent + 4, es);
- }
- es->rtable = saved_rtable;
- }
-}
-
-static StringInfo
-Explain_PlanToString(Plan *plan, ExplainState *es)
-{
- StringInfo str = makeStringInfo();
-
- if (plan != NULL)
- explain_outNode(str, plan, NULL, 0, es);
- return str;
-}
-
-/*
- * Show a qualifier expression for a scan plan node
- */
-static void
-show_scan_qual(List *qual, bool is_or_qual, const char *qlabel,
- int scanrelid, Plan *outer_plan,
- StringInfo str, int indent, ExplainState *es)
-{
- RangeTblEntry *rte;
- Node *scancontext;
- Node *outercontext;
- List *context;
- Node *node;
- char *exprstr;
- int i;
-
- /* No work if empty qual */
- if (qual == NIL)
- return;
- if (is_or_qual)
- {
- if (lfirst(qual) == NIL && lnext(qual) == NIL)
- return;
- }
-
- /* Fix qual --- indexqual requires different processing */
- if (is_or_qual)
- node = make_ors_ands_explicit(qual);
- else
- node = (Node *) make_ands_explicit(qual);
-
- /* Generate deparse context */
- Assert(scanrelid > 0 && scanrelid <= length(es->rtable));
- rte = rt_fetch(scanrelid, es->rtable);
- scancontext = deparse_context_for_rte(rte);
-
- /*
- * If we have an outer plan that is referenced by the qual, add it to
- * the deparse context. If not, don't (so that we don't force prefixes
- * unnecessarily).
- */
- if (outer_plan)
- {
- if (intMember(OUTER, pull_varnos(node)))
- outercontext = deparse_context_for_subplan("outer",
- outer_plan->targetlist,
- es->rtable);
- else
- outercontext = NULL;
- }
- else
- outercontext = NULL;
-
- context = deparse_context_for_plan(scanrelid, scancontext,
- OUTER, outercontext,
- NIL);
-
- /* Deparse the expression */
- exprstr = deparse_expression(node, context, (outercontext != NULL));
-
- /* And add to str */
- for (i = 0; i < indent; i++)
- appendStringInfo(str, " ");
- appendStringInfo(str, " %s: %s\n", qlabel, exprstr);
-}
-
-/*
- * Show a qualifier expression for an upper-level plan node
- */
-static void
-show_upper_qual(List *qual, const char *qlabel,
- const char *outer_name, int outer_varno, Plan *outer_plan,
- const char *inner_name, int inner_varno, Plan *inner_plan,
- StringInfo str, int indent, ExplainState *es)
-{
- List *context;
- Node *outercontext;
- Node *innercontext;
- Node *node;
- char *exprstr;
- int i;
-
- /* No work if empty qual */
- if (qual == NIL)
- return;
-
- /* Generate deparse context */
- if (outer_plan)
- outercontext = deparse_context_for_subplan(outer_name,
- outer_plan->targetlist,
- es->rtable);
- else
- outercontext = NULL;
- if (inner_plan)
- innercontext = deparse_context_for_subplan(inner_name,
- inner_plan->targetlist,
- es->rtable);
- else
- innercontext = NULL;
- context = deparse_context_for_plan(outer_varno, outercontext,
- inner_varno, innercontext,
- NIL);
-
- /* Deparse the expression */
- node = (Node *) make_ands_explicit(qual);
- exprstr = deparse_expression(node, context, (inner_plan != NULL));
-
- /* And add to str */
- for (i = 0; i < indent; i++)
- appendStringInfo(str, " ");
- appendStringInfo(str, " %s: %s\n", qlabel, exprstr);
-}
-
-/*
- * Show the sort keys for a Sort node.
- */
-static void
-show_sort_keys(List *tlist, int nkeys, const char *qlabel,
- StringInfo str, int indent, ExplainState *es)
-{
- List *context;
- bool useprefix;
- int keyno;
- List *tl;
- char *exprstr;
- int i;
-
- if (nkeys <= 0)
- return;
-
- for (i = 0; i < indent; i++)
- appendStringInfo(str, " ");
- appendStringInfo(str, " %s: ", qlabel);
-
- /*
- * In this routine we expect that the plan node's tlist has not been
- * processed by set_plan_references(). Normally, any Vars will contain
- * valid varnos referencing the actual rtable. But we might instead be
- * looking at a dummy tlist generated by prepunion.c; if there are
- * Vars with zero varno, use the tlist itself to determine their names.
- */
- if (intMember(0, pull_varnos((Node *) tlist)))
- {
- Node *outercontext;
-
- outercontext = deparse_context_for_subplan("sort",
- tlist,
- es->rtable);
- context = deparse_context_for_plan(0, outercontext,
- 0, NULL,
- NIL);
- useprefix = false;
- }
- else
- {
- context = deparse_context_for_plan(0, NULL,
- 0, NULL,
- es->rtable);
- useprefix = length(es->rtable) > 1;
- }
-
- for (keyno = 1; keyno <= nkeys; keyno++)
- {
- /* find key expression in tlist */
- foreach(tl, tlist)
- {
- TargetEntry *target = (TargetEntry *) lfirst(tl);
-
- if (target->resdom->reskey == keyno)
- {
- /* Deparse the expression */
- exprstr = deparse_expression(target->expr, context, useprefix);
- /* And add to str */
- if (keyno > 1)
- appendStringInfo(str, ", ");
- appendStringInfo(str, "%s", exprstr);
- break;
- }
- }
- if (tl == NIL)
- elog(ERROR, "show_sort_keys: no tlist entry for key %d", keyno);
- }
-
- appendStringInfo(str, "\n");
-}
-
-/*
- * Indexscan qual lists have an implicit OR-of-ANDs structure. Make it
- * explicit so deparsing works properly.
- */
-static Node *
-make_ors_ands_explicit(List *orclauses)
-{
- if (orclauses == NIL)
- return NULL; /* probably can't happen */
- else if (lnext(orclauses) == NIL)
- return (Node *) make_ands_explicit(lfirst(orclauses));
- else
- {
- List *args = NIL;
- List *orptr;
-
- foreach(orptr, orclauses)
- {
- args = lappend(args, make_ands_explicit(lfirst(orptr)));
- }
-
- return (Node *) make_orclause(args);
- }
-}
-
-
-/*
- * Functions for sending text to the frontend (or other specified destination)
- * as though it is a SELECT result.
- *
- * We tell the frontend that the table structure is a single TEXT column.
- */
-
-static TextOutputState *
-begin_text_output(CommandDest dest, char *title)
-{
- TextOutputState *tstate;
- TupleDesc tupdesc;
-
- tstate = (TextOutputState *) palloc(sizeof(TextOutputState));
-
- /* need a tuple descriptor representing a single TEXT column */
- tupdesc = CreateTemplateTupleDesc(1);
- TupleDescInitEntry(tupdesc, (AttrNumber) 1, title,
- TEXTOID, -1, 0, false);
-
- tstate->tupdesc = tupdesc;
- tstate->destfunc = DestToFunction(dest);
-
- (*tstate->destfunc->setup) (tstate->destfunc, (int) CMD_SELECT,
- NULL, tupdesc);
-
- return tstate;
-}
-
-/* write a single line of text */
-static void
-do_text_output(TextOutputState *tstate, char *aline)
-{
- HeapTuple tuple;
- Datum values[1];
- char nulls[1];
-
- /* form a tuple and send it to the receiver */
- values[0] = DirectFunctionCall1(textin, CStringGetDatum(aline));
- nulls[0] = ' ';
- tuple = heap_formtuple(tstate->tupdesc, values, nulls);
- (*tstate->destfunc->receiveTuple) (tuple,
- tstate->tupdesc,
- tstate->destfunc);
- pfree(DatumGetPointer(values[0]));
- heap_freetuple(tuple);
-}
-
-/* write a chunk of text, breaking at newline characters */
-/* NB: scribbles on its input! */
-static void
-do_text_output_multiline(TextOutputState *tstate, char *text)
-{
- while (*text)
- {
- char *eol;
-
- eol = strchr(text, '\n');
- if (eol)
- *eol++ = '\0';
- else
- eol = text + strlen(text);
- do_text_output(tstate, text);
- text = eol;
- }
-}
-
-static void
-end_text_output(TextOutputState *tstate)
-{
- (*tstate->destfunc->cleanup) (tstate->destfunc);
- pfree(tstate);
-}
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
deleted file mode 100644
index 638fd19a8eb..00000000000
--- a/src/backend/commands/functioncmds.c
+++ /dev/null
@@ -1,587 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * functioncmds.c
- *
- * Routines for CREATE and DROP FUNCTION commands
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/functioncmds.c,v 1.7 2002/06/20 20:29:27 momjian Exp $
- *
- * DESCRIPTION
- * These routines take the parse tree and pick out the
- * appropriate arguments/flags, and pass the results to the
- * corresponding "FooDefine" routines (in src/catalog) that do
- * the actual catalog-munging. These routines also verify permission
- * of the user to execute the command.
- *
- * NOTES
- * These things must be defined and committed in the following order:
- * "create function":
- * input/output, recv/send procedures
- * "create type":
- * type
- * "create operator":
- * operators
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include "access/heapam.h"
-#include "catalog/catname.h"
-#include "catalog/namespace.h"
-#include "catalog/pg_language.h"
-#include "catalog/pg_proc.h"
-#include "catalog/pg_type.h"
-#include "commands/comment.h"
-#include "commands/defrem.h"
-#include "miscadmin.h"
-#include "optimizer/cost.h"
-#include "parser/parse_func.h"
-#include "parser/parse_type.h"
-#include "utils/acl.h"
-#include "utils/lsyscache.h"
-#include "utils/syscache.h"
-
-
-/*
- * Examine the "returns" clause returnType of the CREATE FUNCTION statement
- * and return information about it as *prorettype_p and *returnsSet.
- *
- * This is more complex than the average typename lookup because we want to
- * allow a shell type to be used, or even created if the specified return type
- * doesn't exist yet. (Without this, there's no way to define the I/O procs
- * for a new type.) But SQL function creation won't cope, so error out if
- * the target language is SQL.
- */
-static void
-compute_return_type(TypeName *returnType, Oid languageOid,
- Oid *prorettype_p, bool *returnsSet_p)
-{
- Oid rettype;
-
- rettype = LookupTypeName(returnType);
-
- if (OidIsValid(rettype))
- {
- if (!get_typisdefined(rettype))
- {
- if (languageOid == SQLlanguageId)
- elog(ERROR, "SQL functions cannot return shell types");
- else
- elog(WARNING, "Return type \"%s\" is only a shell",
- TypeNameToString(returnType));
- }
- }
- else
- {
- char *typnam = TypeNameToString(returnType);
-
- if (strcmp(typnam, "opaque") == 0)
- rettype = InvalidOid;
- else
- {
- Oid namespaceId;
- AclResult aclresult;
- char *typname;
-
- if (languageOid == SQLlanguageId)
- elog(ERROR, "Type \"%s\" does not exist", typnam);
- elog(WARNING, "ProcedureCreate: type %s is not yet defined",
- typnam);
- namespaceId = QualifiedNameGetCreationNamespace(returnType->names,
- &typname);
- aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
- ACL_CREATE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, get_namespace_name(namespaceId));
- rettype = TypeShellMake(typname, namespaceId);
- if (!OidIsValid(rettype))
- elog(ERROR, "could not create type %s", typnam);
- }
- }
-
- *prorettype_p = rettype;
- *returnsSet_p = returnType->setof;
-}
-
-/*
- * Interpret the argument-types list of the CREATE FUNCTION statement.
- */
-static int
-compute_parameter_types(List *argTypes, Oid languageOid,
- Oid *parameterTypes)
-{
- int parameterCount = 0;
- List *x;
-
- MemSet(parameterTypes, 0, FUNC_MAX_ARGS * sizeof(Oid));
- foreach(x, argTypes)
- {
- TypeName *t = (TypeName *) lfirst(x);
- Oid toid;
-
- if (parameterCount >= FUNC_MAX_ARGS)
- elog(ERROR, "functions cannot have more than %d arguments",
- FUNC_MAX_ARGS);
-
- toid = LookupTypeName(t);
- if (OidIsValid(toid))
- {
- if (!get_typisdefined(toid))
- elog(WARNING, "Argument type \"%s\" is only a shell",
- TypeNameToString(t));
- }
- else
- {
- char *typnam = TypeNameToString(t);
-
- if (strcmp(typnam, "opaque") == 0)
- {
- if (languageOid == SQLlanguageId)
- elog(ERROR, "SQL functions cannot have arguments of type \"opaque\"");
- toid = InvalidOid;
- }
- else
- elog(ERROR, "Type \"%s\" does not exist", typnam);
- }
-
- if (t->setof)
- elog(ERROR, "functions cannot accept set arguments");
-
- parameterTypes[parameterCount++] = toid;
- }
-
- return parameterCount;
-}
-
-
-/*
- * Dissect the list of options assembled in gram.y into function
- * attributes.
- */
-
-static void
-compute_attributes_sql_style(const List *options,
- List **as,
- char **language,
- char *volatility_p,
- bool *strict_p,
- bool *security_definer,
- bool *implicit_cast)
-{
- const List *option;
- DefElem *as_item = NULL;
- DefElem *language_item = NULL;
- DefElem *volatility_item = NULL;
- DefElem *strict_item = NULL;
- DefElem *security_item = NULL;
- DefElem *implicit_item = NULL;
-
- foreach(option, options)
- {
- DefElem *defel = (DefElem *) lfirst(option);
-
- if (strcmp(defel->defname, "as")==0)
- {
- if (as_item)
- elog(ERROR, "conflicting or redundant options");
- as_item = defel;
- }
- else if (strcmp(defel->defname, "language")==0)
- {
- if (language_item)
- elog(ERROR, "conflicting or redundant options");
- language_item = defel;
- }
- else if (strcmp(defel->defname, "volatility")==0)
- {
- if (volatility_item)
- elog(ERROR, "conflicting or redundant options");
- volatility_item = defel;
- }
- else if (strcmp(defel->defname, "strict")==0)
- {
- if (strict_item)
- elog(ERROR, "conflicting or redundant options");
- strict_item = defel;
- }
- else if (strcmp(defel->defname, "security")==0)
- {
- if (security_item)
- elog(ERROR, "conflicting or redundant options");
- security_item = defel;
- }
- else if (strcmp(defel->defname, "implicit")==0)
- {
- if (implicit_item)
- elog(ERROR, "conflicting or redundant options");
- implicit_item = defel;
- }
- else
- elog(ERROR, "invalid CREATE FUNCTION option");
- }
-
- if (as_item)
- *as = (List *)as_item->arg;
- else
- elog(ERROR, "no function body specified");
-
- if (language_item)
- *language = strVal(language_item->arg);
- else
- elog(ERROR, "no language specified");
-
- if (volatility_item)
- {
- if (strcmp(strVal(volatility_item->arg), "immutable")==0)
- *volatility_p = PROVOLATILE_IMMUTABLE;
- else if (strcmp(strVal(volatility_item->arg), "stable")==0)
- *volatility_p = PROVOLATILE_STABLE;
- else if (strcmp(strVal(volatility_item->arg), "volatile")==0)
- *volatility_p = PROVOLATILE_VOLATILE;
- else
- elog(ERROR, "invalid volatility");
- }
-
- if (strict_item)
- *strict_p = intVal(strict_item->arg);
- if (security_item)
- *security_definer = intVal(security_item->arg);
- if (implicit_item)
- *implicit_cast = intVal(implicit_item->arg);
-}
-
-
-/*-------------
- * Interpret the parameters *parameters and return their contents as
- * *byte_pct_p, etc.
- *
- * These parameters supply optional information about a function.
- * All have defaults if not specified.
- *
- * Note: currently, only three of these parameters actually do anything:
- *
- * * isImplicit means the function may be used as an implicit type
- * coercion.
- *
- * * isStrict means the function should not be called when any NULL
- * inputs are present; instead a NULL result value should be assumed.
- *
- * * volatility tells the optimizer whether the function's result can
- * be assumed to be repeatable over multiple evaluations.
- *
- * The other four parameters are not used anywhere. They used to be
- * used in the "expensive functions" optimizer, but that's been dead code
- * for a long time.
- *------------
- */
-static void
-compute_attributes_with_style(List *parameters,
- int32 *byte_pct_p, int32 *perbyte_cpu_p,
- int32 *percall_cpu_p, int32 *outin_ratio_p,
- bool *isImplicit_p, bool *isStrict_p,
- char *volatility_p)
-{
- List *pl;
-
- foreach(pl, parameters)
- {
- DefElem *param = (DefElem *) lfirst(pl);
-
- if (strcasecmp(param->defname, "implicitcoercion") == 0)
- *isImplicit_p = true;
- else if (strcasecmp(param->defname, "isstrict") == 0)
- *isStrict_p = true;
- else if (strcasecmp(param->defname, "isimmutable") == 0)
- *volatility_p = PROVOLATILE_IMMUTABLE;
- else if (strcasecmp(param->defname, "isstable") == 0)
- *volatility_p = PROVOLATILE_STABLE;
- else if (strcasecmp(param->defname, "isvolatile") == 0)
- *volatility_p = PROVOLATILE_VOLATILE;
- else if (strcasecmp(param->defname, "iscachable") == 0)
- {
- /* obsolete spelling of isImmutable */
- *volatility_p = PROVOLATILE_IMMUTABLE;
- }
- else if (strcasecmp(param->defname, "trusted") == 0)
- {
- /*
- * we don't have untrusted functions any more. The 4.2
- * implementation is lousy anyway so I took it out. -ay 10/94
- */
- elog(ERROR, "untrusted function has been decommissioned.");
- }
- else if (strcasecmp(param->defname, "byte_pct") == 0)
- *byte_pct_p = (int) defGetNumeric(param);
- else if (strcasecmp(param->defname, "perbyte_cpu") == 0)
- *perbyte_cpu_p = (int) defGetNumeric(param);
- else if (strcasecmp(param->defname, "percall_cpu") == 0)
- *percall_cpu_p = (int) defGetNumeric(param);
- else if (strcasecmp(param->defname, "outin_ratio") == 0)
- *outin_ratio_p = (int) defGetNumeric(param);
- else
- elog(WARNING, "Unrecognized function attribute '%s' ignored",
- param->defname);
- }
-}
-
-
-/*
- * For a dynamically linked C language object, the form of the clause is
- *
- * AS <object file name> [, <link symbol name> ]
- *
- * In all other cases
- *
- * AS <object reference, or sql code>
- *
- */
-
-static void
-interpret_AS_clause(Oid languageOid, const char *languageName, const List *as,
- char **prosrc_str_p, char **probin_str_p)
-{
- Assert(as != NIL);
-
- if (languageOid == ClanguageId)
- {
- /*
- * For "C" language, store the file name in probin and, when
- * given, the link symbol name in prosrc.
- */
- *probin_str_p = strVal(lfirst(as));
- if (lnext(as) == NULL)
- *prosrc_str_p = "-";
- else
- *prosrc_str_p = strVal(lsecond(as));
- }
- else
- {
- /* Everything else wants the given string in prosrc. */
- *prosrc_str_p = strVal(lfirst(as));
- *probin_str_p = "-";
-
- if (lnext(as) != NIL)
- elog(ERROR, "CREATE FUNCTION: only one AS item needed for %s language",
- languageName);
- }
-}
-
-
-
-/*
- * CreateFunction
- * Execute a CREATE FUNCTION utility statement.
- */
-void
-CreateFunction(CreateFunctionStmt *stmt)
-{
- char *probin_str;
- char *prosrc_str;
- Oid prorettype;
- bool returnsSet;
- char *language;
- char languageName[NAMEDATALEN];
- Oid languageOid;
- Oid languageValidator;
- char *funcname;
- Oid namespaceId;
- AclResult aclresult;
- int parameterCount;
- Oid parameterTypes[FUNC_MAX_ARGS];
- int32 byte_pct,
- perbyte_cpu,
- percall_cpu,
- outin_ratio;
- bool isImplicit,
- isStrict,
- security;
- char volatility;
- HeapTuple languageTuple;
- Form_pg_language languageStruct;
- List *as_clause;
-
- /* Convert list of names to a name and namespace */
- namespaceId = QualifiedNameGetCreationNamespace(stmt->funcname,
- &funcname);
-
- /* Check we have creation rights in target namespace */
- aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_CREATE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, get_namespace_name(namespaceId));
-
- /* defaults attributes */
- byte_pct = BYTE_PCT;
- perbyte_cpu = PERBYTE_CPU;
- percall_cpu = PERCALL_CPU;
- outin_ratio = OUTIN_RATIO;
- isImplicit = false;
- isStrict = false;
- security = false;
- volatility = PROVOLATILE_VOLATILE;
-
- /* override attributes from explicit list */
- compute_attributes_sql_style(stmt->options,
- &as_clause, &language, &volatility, &isStrict, &security, &isImplicit);
-
- /* Convert language name to canonical case */
- case_translate_language_name(language, languageName);
-
- /* Look up the language and validate permissions */
- languageTuple = SearchSysCache(LANGNAME,
- PointerGetDatum(languageName),
- 0, 0, 0);
- if (!HeapTupleIsValid(languageTuple))
- elog(ERROR, "language \"%s\" does not exist", languageName);
-
- languageOid = languageTuple->t_data->t_oid;
- languageStruct = (Form_pg_language) GETSTRUCT(languageTuple);
-
- if (languageStruct->lanpltrusted)
- {
- /* if trusted language, need USAGE privilege */
- AclResult aclresult;
-
- aclresult = pg_language_aclcheck(languageOid, GetUserId(), ACL_USAGE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, NameStr(languageStruct->lanname));
- }
- else
- {
- /* if untrusted language, must be superuser */
- if (!superuser())
- aclcheck_error(ACLCHECK_NO_PRIV, NameStr(languageStruct->lanname));
- }
-
- languageValidator = languageStruct->lanvalidator;
-
- ReleaseSysCache(languageTuple);
-
- /*
- * Convert remaining parameters of CREATE to form wanted by
- * ProcedureCreate.
- */
- compute_return_type(stmt->returnType, languageOid,
- &prorettype, &returnsSet);
-
- parameterCount = compute_parameter_types(stmt->argTypes, languageOid,
- parameterTypes);
-
- compute_attributes_with_style(stmt->withClause,
- &byte_pct, &perbyte_cpu, &percall_cpu,
- &outin_ratio, &isImplicit, &isStrict,
- &volatility);
-
- interpret_AS_clause(languageOid, languageName, as_clause,
- &prosrc_str, &probin_str);
-
- if (languageOid == INTERNALlanguageId)
- {
- /*
- * In PostgreSQL versions before 6.5, the SQL name of the
- * created function could not be different from the internal
- * name, and "prosrc" wasn't used. So there is code out there
- * that does CREATE FUNCTION xyz AS '' LANGUAGE 'internal'.
- * To preserve some modicum of backwards compatibility, accept
- * an empty "prosrc" value as meaning the supplied SQL
- * function name.
- */
- if (strlen(prosrc_str) == 0)
- prosrc_str = funcname;
- }
-
- if (languageOid == ClanguageId)
- {
- /* If link symbol is specified as "-", substitute procedure name */
- if (strcmp(prosrc_str, "-") == 0)
- prosrc_str = funcname;
- }
-
- /*
- * And now that we have all the parameters, and know we're permitted
- * to do so, go ahead and create the function.
- */
- ProcedureCreate(funcname,
- namespaceId,
- stmt->replace,
- returnsSet,
- prorettype,
- languageOid,
- languageValidator,
- prosrc_str, /* converted to text later */
- probin_str, /* converted to text later */
- false, /* not an aggregate */
- security,
- isImplicit,
- isStrict,
- volatility,
- byte_pct,
- perbyte_cpu,
- percall_cpu,
- outin_ratio,
- parameterCount,
- parameterTypes);
-}
-
-
-/*
- * RemoveFunction
- * Deletes a function.
- *
- * Exceptions:
- * BadArg if name is invalid.
- * "ERROR" if function nonexistent.
- * ...
- */
-void
-RemoveFunction(List *functionName, /* function name to be removed */
- List *argTypes) /* list of TypeName nodes */
-{
- Oid funcOid;
- Relation relation;
- HeapTuple tup;
-
- funcOid = LookupFuncNameTypeNames(functionName, argTypes,
- true, "RemoveFunction");
-
- relation = heap_openr(ProcedureRelationName, RowExclusiveLock);
-
- tup = SearchSysCache(PROCOID,
- ObjectIdGetDatum(funcOid),
- 0, 0, 0);
- if (!HeapTupleIsValid(tup)) /* should not happen */
- elog(ERROR, "RemoveFunction: couldn't find tuple for function %s",
- NameListToString(functionName));
-
- /* Permission check: must own func or its namespace */
- if (!pg_proc_ownercheck(funcOid, GetUserId()) &&
- !pg_namespace_ownercheck(((Form_pg_proc) GETSTRUCT(tup))->pronamespace,
- GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, NameListToString(functionName));
-
- if (((Form_pg_proc) GETSTRUCT(tup))->proisagg)
- elog(ERROR, "RemoveFunction: function '%s' is an aggregate"
- "\n\tUse DROP AGGREGATE to remove it",
- NameListToString(functionName));
-
- if (((Form_pg_proc) GETSTRUCT(tup))->prolang == INTERNALlanguageId)
- {
- /* "Helpful" WARNING when removing a builtin function ... */
- elog(WARNING, "Removing built-in function \"%s\"",
- NameListToString(functionName));
- }
-
- /* Delete any comments associated with this function */
- DeleteComments(funcOid, RelationGetRelid(relation));
-
- simple_heap_delete(relation, &tup->t_self);
-
- ReleaseSysCache(tup);
-
- heap_close(relation, RowExclusiveLock);
-}
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
deleted file mode 100644
index 3a1519a5007..00000000000
--- a/src/backend/commands/indexcmds.c
+++ /dev/null
@@ -1,781 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * indexcmds.c
- * POSTGRES define and remove index code.
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.75 2002/06/20 20:29:27 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-
-#include "postgres.h"
-
-#include "access/heapam.h"
-#include "catalog/catalog.h"
-#include "catalog/catname.h"
-#include "catalog/index.h"
-#include "catalog/namespace.h"
-#include "catalog/pg_opclass.h"
-#include "catalog/pg_proc.h"
-#include "commands/defrem.h"
-#include "miscadmin.h"
-#include "optimizer/clauses.h"
-#include "optimizer/planmain.h"
-#include "optimizer/prep.h"
-#include "parser/parsetree.h"
-#include "parser/parse_coerce.h"
-#include "parser/parse_func.h"
-#include "utils/acl.h"
-#include "utils/builtins.h"
-#include "utils/lsyscache.h"
-#include "utils/syscache.h"
-
-
-#define IsFuncIndex(ATTR_LIST) (((IndexElem*)lfirst(ATTR_LIST))->funcname != NIL)
-
-/* non-export function prototypes */
-static void CheckPredicate(List *predList, List *rangeTable, Oid baseRelOid);
-static void FuncIndexArgs(IndexInfo *indexInfo, Oid *classOidP,
- IndexElem *funcIndex,
- Oid relId,
- char *accessMethodName, Oid accessMethodId);
-static void NormIndexAttrs(IndexInfo *indexInfo, Oid *classOidP,
- List *attList,
- Oid relId,
- char *accessMethodName, Oid accessMethodId);
-static Oid GetAttrOpClass(IndexElem *attribute, Oid attrType,
- char *accessMethodName, Oid accessMethodId);
-static Oid GetDefaultOpClass(Oid attrType, Oid accessMethodId);
-
-/*
- * DefineIndex
- * Creates a new index.
- *
- * 'attributeList' is a list of IndexElem specifying either a functional
- * index or a list of attributes to index on.
- * 'predicate' is the qual specified in the where clause.
- * 'rangetable' is needed to interpret the predicate.
- */
-void
-DefineIndex(RangeVar *heapRelation,
- char *indexRelationName,
- char *accessMethodName,
- List *attributeList,
- bool unique,
- bool primary,
- Expr *predicate,
- List *rangetable)
-{
- Oid *classObjectId;
- Oid accessMethodId;
- Oid relationId;
- Oid namespaceId;
- Relation rel;
- HeapTuple tuple;
- Form_pg_am accessMethodForm;
- IndexInfo *indexInfo;
- int numberOfAttributes;
- List *cnfPred = NIL;
-
- /*
- * count attributes in index
- */
- numberOfAttributes = length(attributeList);
- if (numberOfAttributes <= 0)
- elog(ERROR, "DefineIndex: must specify at least one attribute");
- if (numberOfAttributes > INDEX_MAX_KEYS)
- elog(ERROR, "Cannot use more than %d attributes in an index",
- INDEX_MAX_KEYS);
-
- /*
- * Open heap relation, acquire a suitable lock on it, remember its OID
- */
- rel = heap_openrv(heapRelation, ShareLock);
-
- /* Note: during bootstrap may see uncataloged relation */
- if (rel->rd_rel->relkind != RELKIND_RELATION &&
- rel->rd_rel->relkind != RELKIND_UNCATALOGED)
- elog(ERROR, "DefineIndex: relation \"%s\" is not a table",
- heapRelation->relname);
-
- relationId = RelationGetRelid(rel);
- namespaceId = RelationGetNamespace(rel);
-
- if (!IsBootstrapProcessingMode() &&
- IsSystemRelation(rel) &&
- !IndexesAreActive(relationId, false))
- elog(ERROR, "Existing indexes are inactive. REINDEX first");
-
- heap_close(rel, NoLock);
-
- /*
- * Verify we (still) have CREATE rights in the rel's namespace.
- * (Presumably we did when the rel was created, but maybe not anymore.)
- * Skip check if bootstrapping, since permissions machinery may not
- * be working yet; also, always allow if it's a temp table.
- */
- if (!IsBootstrapProcessingMode() && !isTempNamespace(namespaceId))
- {
- AclResult aclresult;
-
- aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
- ACL_CREATE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, get_namespace_name(namespaceId));
- }
-
- /*
- * look up the access method, verify it can handle the requested
- * features
- */
- tuple = SearchSysCache(AMNAME,
- PointerGetDatum(accessMethodName),
- 0, 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "DefineIndex: access method \"%s\" not found",
- accessMethodName);
- accessMethodId = tuple->t_data->t_oid;
- accessMethodForm = (Form_pg_am) GETSTRUCT(tuple);
-
- if (unique && !accessMethodForm->amcanunique)
- elog(ERROR, "DefineIndex: access method \"%s\" does not support UNIQUE indexes",
- accessMethodName);
- if (numberOfAttributes > 1 && !accessMethodForm->amcanmulticol)
- elog(ERROR, "DefineIndex: access method \"%s\" does not support multi-column indexes",
- accessMethodName);
-
- ReleaseSysCache(tuple);
-
- /*
- * Convert the partial-index predicate from parsetree form to an
- * implicit-AND qual expression, for easier evaluation at runtime.
- * While we are at it, we reduce it to a canonical (CNF or DNF) form
- * to simplify the task of proving implications.
- */
- if (predicate != NULL && rangetable != NIL)
- {
- cnfPred = canonicalize_qual((Expr *) copyObject(predicate), true);
- fix_opids((Node *) cnfPred);
- CheckPredicate(cnfPred, rangetable, relationId);
- }
-
- /*
- * Prepare arguments for index_create, primarily an IndexInfo
- * structure
- */
- indexInfo = makeNode(IndexInfo);
- indexInfo->ii_Predicate = cnfPred;
- indexInfo->ii_FuncOid = InvalidOid;
- indexInfo->ii_Unique = unique;
-
- if (IsFuncIndex(attributeList))
- {
- IndexElem *funcIndex = (IndexElem *) lfirst(attributeList);
- int nargs;
-
- /* Parser should have given us only one list item, but check */
- if (numberOfAttributes != 1)
- elog(ERROR, "Functional index can only have one attribute");
-
- nargs = length(funcIndex->args);
- if (nargs > INDEX_MAX_KEYS)
- elog(ERROR, "Index function can take at most %d arguments",
- INDEX_MAX_KEYS);
-
- indexInfo->ii_NumIndexAttrs = 1;
- indexInfo->ii_NumKeyAttrs = nargs;
-
- classObjectId = (Oid *) palloc(sizeof(Oid));
-
- FuncIndexArgs(indexInfo, classObjectId, funcIndex,
- relationId, accessMethodName, accessMethodId);
- }
- else
- {
- indexInfo->ii_NumIndexAttrs = numberOfAttributes;
- indexInfo->ii_NumKeyAttrs = numberOfAttributes;
-
- classObjectId = (Oid *) palloc(numberOfAttributes * sizeof(Oid));
-
- NormIndexAttrs(indexInfo, classObjectId, attributeList,
- relationId, accessMethodName, accessMethodId);
- }
-
- index_create(relationId, indexRelationName,
- indexInfo, accessMethodId, classObjectId,
- primary, allowSystemTableMods);
-
- /*
- * We update the relation's pg_class tuple even if it already has
- * relhasindex = true. This is needed to cause a shared-cache-inval
- * message to be sent for the pg_class tuple, which will cause other
- * backends to flush their relcache entries and in particular their
- * cached lists of the indexes for this relation.
- */
- setRelhasindex(relationId, true, primary, InvalidOid);
-}
-
-
-/*
- * CheckPredicate
- * Checks that the given list of partial-index predicates refer
- * (via the given range table) only to the given base relation oid.
- *
- * This used to also constrain the form of the predicate to forms that
- * indxpath.c could do something with. However, that seems overly
- * restrictive. One useful application of partial indexes is to apply
- * a UNIQUE constraint across a subset of a table, and in that scenario
- * any evaluatable predicate will work. So accept any predicate here
- * (except ones requiring a plan), and let indxpath.c fend for itself.
- */
-
-static void
-CheckPredicate(List *predList, List *rangeTable, Oid baseRelOid)
-{
- if (length(rangeTable) != 1 || getrelid(1, rangeTable) != baseRelOid)
- elog(ERROR,
- "Partial-index predicates may refer only to the base relation");
-
- /*
- * We don't currently support generation of an actual query plan for a
- * predicate, only simple scalar expressions; hence these
- * restrictions.
- */
- if (contain_subplans((Node *) predList))
- elog(ERROR, "Cannot use subselect in index predicate");
- if (contain_agg_clause((Node *) predList))
- elog(ERROR, "Cannot use aggregate in index predicate");
-
- /*
- * A predicate using mutable functions is probably wrong, for the
- * same reasons that we don't allow a functional index to use one.
- */
- if (contain_mutable_functions((Node *) predList))
- elog(ERROR, "Functions in index predicate must be marked isImmutable");
-}
-
-
-static void
-FuncIndexArgs(IndexInfo *indexInfo,
- Oid *classOidP,
- IndexElem *funcIndex,
- Oid relId,
- char *accessMethodName,
- Oid accessMethodId)
-{
- Oid argTypes[FUNC_MAX_ARGS];
- List *arglist;
- int nargs = 0;
- int i;
- FuncDetailCode fdresult;
- Oid funcid;
- Oid rettype;
- bool retset;
- Oid *true_typeids;
-
- /*
- * process the function arguments, which are a list of T_String
- * (someday ought to allow more general expressions?)
- *
- * Note caller already checked that list is not too long.
- */
- MemSet(argTypes, 0, sizeof(argTypes));
-
- foreach(arglist, funcIndex->args)
- {
- char *arg = strVal(lfirst(arglist));
- HeapTuple tuple;
- Form_pg_attribute att;
-
- tuple = SearchSysCache(ATTNAME,
- ObjectIdGetDatum(relId),
- PointerGetDatum(arg),
- 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "DefineIndex: attribute \"%s\" not found", arg);
- att = (Form_pg_attribute) GETSTRUCT(tuple);
- indexInfo->ii_KeyAttrNumbers[nargs] = att->attnum;
- argTypes[nargs] = att->atttypid;
- ReleaseSysCache(tuple);
- nargs++;
- }
-
- /*
- * Lookup the function procedure to get its OID and result type.
- *
- * We rely on parse_func.c to find the correct function in the possible
- * presence of binary-compatible types. However, parse_func may do
- * too much: it will accept a function that requires run-time coercion
- * of input types, and the executor is not currently set up to support
- * that. So, check to make sure that the selected function has
- * exact-match or binary-compatible input types.
- */
- fdresult = func_get_detail(funcIndex->funcname, funcIndex->args,
- nargs, argTypes,
- &funcid, &rettype, &retset,
- &true_typeids);
- if (fdresult != FUNCDETAIL_NORMAL)
- {
- if (fdresult == FUNCDETAIL_AGGREGATE)
- elog(ERROR, "DefineIndex: functional index may not use an aggregate function");
- else if (fdresult == FUNCDETAIL_COERCION)
- elog(ERROR, "DefineIndex: functional index must use a real function, not a type coercion"
- "\n\tTry specifying the index opclass you want to use, instead");
- else
- func_error("DefineIndex", funcIndex->funcname, nargs, argTypes,
- NULL);
- }
-
- if (retset)
- elog(ERROR, "DefineIndex: cannot index on a function returning a set");
-
- for (i = 0; i < nargs; i++)
- {
- if (!IsBinaryCompatible(argTypes[i], true_typeids[i]))
- func_error("DefineIndex", funcIndex->funcname, nargs, argTypes,
- "Index function must be binary-compatible with table datatype");
- }
-
- /*
- * Require that the function be marked immutable. Using a mutable
- * function for a functional index is highly questionable, since if
- * you aren't going to get the same result for the same data every
- * time, it's not clear what the index entries mean at all.
- */
- if (func_volatile(funcid) != PROVOLATILE_IMMUTABLE)
- elog(ERROR, "DefineIndex: index function must be marked isImmutable");
-
- /* Process opclass, using func return type as default type */
-
- classOidP[0] = GetAttrOpClass(funcIndex, rettype,
- accessMethodName, accessMethodId);
-
- /* OK, return results */
-
- indexInfo->ii_FuncOid = funcid;
- /* Need to do the fmgr function lookup now, too */
- fmgr_info(funcid, &indexInfo->ii_FuncInfo);
-}
-
-static void
-NormIndexAttrs(IndexInfo *indexInfo,
- Oid *classOidP,
- List *attList, /* list of IndexElem's */
- Oid relId,
- char *accessMethodName,
- Oid accessMethodId)
-{
- List *rest;
- int attn = 0;
-
- /*
- * process attributeList
- */
- foreach(rest, attList)
- {
- IndexElem *attribute = (IndexElem *) lfirst(rest);
- HeapTuple atttuple;
- Form_pg_attribute attform;
-
- if (attribute->name == NULL)
- elog(ERROR, "missing attribute for define index");
-
- atttuple = SearchSysCache(ATTNAME,
- ObjectIdGetDatum(relId),
- PointerGetDatum(attribute->name),
- 0, 0);
- if (!HeapTupleIsValid(atttuple))
- elog(ERROR, "DefineIndex: attribute \"%s\" not found",
- attribute->name);
- attform = (Form_pg_attribute) GETSTRUCT(atttuple);
-
- indexInfo->ii_KeyAttrNumbers[attn] = attform->attnum;
-
- classOidP[attn] = GetAttrOpClass(attribute, attform->atttypid,
- accessMethodName, accessMethodId);
-
- ReleaseSysCache(atttuple);
- attn++;
- }
-}
-
-static Oid
-GetAttrOpClass(IndexElem *attribute, Oid attrType,
- char *accessMethodName, Oid accessMethodId)
-{
- char *catalogname;
- char *schemaname = NULL;
- char *opcname = NULL;
- HeapTuple tuple;
- Oid opClassId,
- opInputType;
-
- if (attribute->opclass == NIL)
- {
- /* no operator class specified, so find the default */
- opClassId = GetDefaultOpClass(attrType, accessMethodId);
- if (!OidIsValid(opClassId))
- elog(ERROR, "data type %s has no default operator class for access method \"%s\""
- "\n\tYou must specify an operator class for the index or define a"
- "\n\tdefault operator class for the data type",
- format_type_be(attrType), accessMethodName);
- return opClassId;
- }
-
- /*
- * Specific opclass name given, so look up the opclass.
- */
-
- /* deconstruct the name list */
- switch (length(attribute->opclass))
- {
- case 1:
- opcname = strVal(lfirst(attribute->opclass));
- break;
- case 2:
- schemaname = strVal(lfirst(attribute->opclass));
- opcname = strVal(lsecond(attribute->opclass));
- break;
- case 3:
- catalogname = strVal(lfirst(attribute->opclass));
- schemaname = strVal(lsecond(attribute->opclass));
- opcname = strVal(lfirst(lnext(lnext(attribute->opclass))));
- /*
- * We check the catalog name and then ignore it.
- */
- if (strcmp(catalogname, DatabaseName) != 0)
- elog(ERROR, "Cross-database references are not implemented");
- break;
- default:
- elog(ERROR, "Improper opclass name (too many dotted names): %s",
- NameListToString(attribute->opclass));
- break;
- }
-
- if (schemaname)
- {
- /* Look in specific schema only */
- Oid namespaceId;
-
- namespaceId = GetSysCacheOid(NAMESPACENAME,
- CStringGetDatum(schemaname),
- 0, 0, 0);
- if (!OidIsValid(namespaceId))
- elog(ERROR, "Namespace \"%s\" does not exist",
- schemaname);
- tuple = SearchSysCache(CLAAMNAMENSP,
- ObjectIdGetDatum(accessMethodId),
- PointerGetDatum(opcname),
- ObjectIdGetDatum(namespaceId),
- 0);
- }
- else
- {
- /* Unqualified opclass name, so search the search path */
- opClassId = OpclassnameGetOpcid(accessMethodId, opcname);
- if (!OidIsValid(opClassId))
- elog(ERROR, "DefineIndex: operator class \"%s\" not supported by access method \"%s\"",
- opcname, accessMethodName);
- tuple = SearchSysCache(CLAOID,
- ObjectIdGetDatum(opClassId),
- 0, 0, 0);
- }
-
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "DefineIndex: operator class \"%s\" not supported by access method \"%s\"",
- NameListToString(attribute->opclass), accessMethodName);
-
- /*
- * Verify that the index operator class accepts this
- * datatype. Note we will accept binary compatibility.
- */
- opClassId = tuple->t_data->t_oid;
- opInputType = ((Form_pg_opclass) GETSTRUCT(tuple))->opcintype;
-
- if (!IsBinaryCompatible(attrType, opInputType))
- elog(ERROR, "operator class \"%s\" does not accept data type %s",
- NameListToString(attribute->opclass), format_type_be(attrType));
-
- ReleaseSysCache(tuple);
-
- return opClassId;
-}
-
-static Oid
-GetDefaultOpClass(Oid attrType, Oid accessMethodId)
-{
- OpclassCandidateList opclass;
- int nexact = 0;
- int ncompatible = 0;
- Oid exactOid = InvalidOid;
- Oid compatibleOid = InvalidOid;
-
- /*
- * We scan through all the opclasses available for the access method,
- * looking for one that is marked default and matches the target type
- * (either exactly or binary-compatibly, but prefer an exact match).
- *
- * We could find more than one binary-compatible match, in which case we
- * require the user to specify which one he wants. If we find more
- * than one exact match, then someone put bogus entries in pg_opclass.
- *
- * The initial search is done by namespace.c so that we only consider
- * opclasses visible in the current namespace search path.
- */
- for (opclass = OpclassGetCandidates(accessMethodId);
- opclass != NULL;
- opclass = opclass->next)
- {
- if (opclass->opcdefault)
- {
- if (opclass->opcintype == attrType)
- {
- nexact++;
- exactOid = opclass->oid;
- }
- else if (IsBinaryCompatible(opclass->opcintype, attrType))
- {
- ncompatible++;
- compatibleOid = opclass->oid;
- }
- }
- }
-
- if (nexact == 1)
- return exactOid;
- if (nexact != 0)
- elog(ERROR, "pg_opclass contains multiple default opclasses for data type %s",
- format_type_be(attrType));
- if (ncompatible == 1)
- return compatibleOid;
-
- return InvalidOid;
-}
-
-/*
- * RemoveIndex
- * Deletes an index.
- *
- * Exceptions:
- * BadArg if name is invalid.
- * "ERROR" if index nonexistent.
- * ...
- */
-void
-RemoveIndex(RangeVar *relation)
-{
- Oid indOid;
- HeapTuple tuple;
-
- indOid = RangeVarGetRelid(relation, false);
- tuple = SearchSysCache(RELOID,
- ObjectIdGetDatum(indOid),
- 0, 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "index \"%s\" does not exist", relation->relname);
-
- if (((Form_pg_class) GETSTRUCT(tuple))->relkind != RELKIND_INDEX)
- elog(ERROR, "relation \"%s\" is of type \"%c\"",
- relation->relname, ((Form_pg_class) GETSTRUCT(tuple))->relkind);
-
- ReleaseSysCache(tuple);
-
- index_drop(indOid);
-}
-
-/*
- * Reindex
- * Recreate an index.
- *
- * Exceptions:
- * "ERROR" if index nonexistent.
- * ...
- */
-void
-ReindexIndex(RangeVar *indexRelation, bool force /* currently unused */ )
-{
- Oid indOid;
- HeapTuple tuple;
- bool overwrite = false;
-
- /*
- * REINDEX within a transaction block is dangerous, because if the
- * transaction is later rolled back we have no way to undo truncation
- * of the index's physical file. Disallow it.
- */
- if (IsTransactionBlock())
- elog(ERROR, "REINDEX cannot run inside a BEGIN/END block");
-
- indOid = RangeVarGetRelid(indexRelation, false);
- tuple = SearchSysCache(RELOID,
- ObjectIdGetDatum(indOid),
- 0, 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "index \"%s\" does not exist", indexRelation->relname);
-
- if (((Form_pg_class) GETSTRUCT(tuple))->relkind != RELKIND_INDEX)
- elog(ERROR, "relation \"%s\" is of type \"%c\"",
- indexRelation->relname,
- ((Form_pg_class) GETSTRUCT(tuple))->relkind);
-
- if (IsSystemClass((Form_pg_class) GETSTRUCT(tuple)))
- {
- if (!allowSystemTableMods)
- elog(ERROR, "\"%s\" is a system index. call REINDEX under standalone postgres with -O -P options",
- indexRelation->relname);
- if (!IsIgnoringSystemIndexes())
- elog(ERROR, "\"%s\" is a system index. call REINDEX under standalone postgres with -P -O options",
- indexRelation->relname);
- }
-
- ReleaseSysCache(tuple);
-
- if (IsIgnoringSystemIndexes())
- overwrite = true;
- if (!reindex_index(indOid, force, overwrite))
- elog(WARNING, "index \"%s\" wasn't reindexed", indexRelation->relname);
-}
-
-/*
- * ReindexTable
- * Recreate indexes of a table.
- *
- * Exceptions:
- * "ERROR" if table nonexistent.
- * ...
- */
-void
-ReindexTable(RangeVar *relation, bool force)
-{
- Oid heapOid;
- HeapTuple tuple;
-
- /*
- * REINDEX within a transaction block is dangerous, because if the
- * transaction is later rolled back we have no way to undo truncation
- * of the index's physical file. Disallow it.
- */
- if (IsTransactionBlock())
- elog(ERROR, "REINDEX cannot run inside a BEGIN/END block");
-
- heapOid = RangeVarGetRelid(relation, false);
- tuple = SearchSysCache(RELOID,
- ObjectIdGetDatum(heapOid),
- 0, 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "table \"%s\" does not exist", relation->relname);
-
- if (((Form_pg_class) GETSTRUCT(tuple))->relkind != RELKIND_RELATION)
- elog(ERROR, "relation \"%s\" is of type \"%c\"",
- relation->relname,
- ((Form_pg_class) GETSTRUCT(tuple))->relkind);
-
- ReleaseSysCache(tuple);
-
- if (!reindex_relation(heapOid, force))
- elog(WARNING, "table \"%s\" wasn't reindexed", relation->relname);
-}
-
-/*
- * ReindexDatabase
- * Recreate indexes of a database.
- */
-void
-ReindexDatabase(const char *dbname, bool force, bool all)
-{
- Relation relationRelation;
- HeapScanDesc scan;
- HeapTuple tuple;
- MemoryContext private_context;
- MemoryContext old;
- int relcnt,
- relalc,
- i,
- oncealc = 200;
- Oid *relids = (Oid *) NULL;
-
- AssertArg(dbname);
-
- if (strcmp(dbname, DatabaseName) != 0)
- elog(ERROR, "REINDEX DATABASE: Can be executed only on the currently open database.");
-
- if (!(superuser() || is_dbadmin(MyDatabaseId)))
- elog(ERROR, "REINDEX DATABASE: Permission denied.");
-
- if (!allowSystemTableMods)
- elog(ERROR, "must be called under standalone postgres with -O -P options");
- if (!IsIgnoringSystemIndexes())
- elog(ERROR, "must be called under standalone postgres with -P -O options");
-
- /*
- * We cannot run inside a user transaction block; if we were inside a
- * transaction, then our commit- and start-transaction-command calls
- * would not have the intended effect!
- */
- if (IsTransactionBlock())
- elog(ERROR, "REINDEX DATABASE cannot run inside a BEGIN/END block");
-
- /*
- * Create a memory context that will survive forced transaction
- * commits we do below. Since it is a child of QueryContext, it will
- * go away eventually even if we suffer an error; there's no need for
- * special abort cleanup logic.
- */
- private_context = AllocSetContextCreate(QueryContext,
- "ReindexDatabase",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
-
- /*
- * Scan pg_class to build a list of the relations we need to reindex.
- */
- relationRelation = heap_openr(RelationRelationName, AccessShareLock);
- scan = heap_beginscan(relationRelation, SnapshotNow, 0, NULL);
- relcnt = relalc = 0;
- while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- if (!all)
- {
- if (!IsSystemClass((Form_pg_class) GETSTRUCT(tuple)))
- continue;
- }
- if (((Form_pg_class) GETSTRUCT(tuple))->relkind == RELKIND_RELATION)
- {
- old = MemoryContextSwitchTo(private_context);
- if (relcnt == 0)
- {
- relalc = oncealc;
- relids = palloc(sizeof(Oid) * relalc);
- }
- else if (relcnt >= relalc)
- {
- relalc *= 2;
- relids = repalloc(relids, sizeof(Oid) * relalc);
- }
- MemoryContextSwitchTo(old);
- relids[relcnt] = tuple->t_data->t_oid;
- relcnt++;
- }
- }
- heap_endscan(scan);
- heap_close(relationRelation, AccessShareLock);
-
- /* Now reindex each rel in a separate transaction */
- CommitTransactionCommand();
- for (i = 0; i < relcnt; i++)
- {
- StartTransactionCommand();
- if (reindex_relation(relids[i], force))
- elog(WARNING, "relation %u was reindexed", relids[i]);
- CommitTransactionCommand();
- }
- StartTransactionCommand();
-
- MemoryContextDelete(private_context);
-}
diff --git a/src/backend/commands/lockcmds.c b/src/backend/commands/lockcmds.c
deleted file mode 100644
index fe2295cef0f..00000000000
--- a/src/backend/commands/lockcmds.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * lockcmds.c
- * Lock command support code
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/lockcmds.c,v 1.3 2002/06/20 20:29:27 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include "access/heapam.h"
-#include "catalog/namespace.h"
-#include "commands/lockcmds.h"
-#include "miscadmin.h"
-#include "utils/acl.h"
-#include "utils/lsyscache.h"
-
-
-/*
- * LOCK TABLE
- */
-void
-LockTableCommand(LockStmt *lockstmt)
-{
- List *p;
-
- /*
- * Iterate over the list and open, lock, and close the relations one
- * at a time
- */
-
- foreach(p, lockstmt->relations)
- {
- RangeVar *relation = lfirst(p);
- Oid reloid;
- AclResult aclresult;
- Relation rel;
-
- /*
- * We don't want to open the relation until we've checked privilege.
- * So, manually get the relation OID.
- */
- reloid = RangeVarGetRelid(relation, false);
-
- if (lockstmt->mode == AccessShareLock)
- aclresult = pg_class_aclcheck(reloid, GetUserId(),
- ACL_SELECT);
- else
- aclresult = pg_class_aclcheck(reloid, GetUserId(),
- ACL_UPDATE | ACL_DELETE);
-
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, get_rel_name(reloid));
-
- rel = relation_open(reloid, lockstmt->mode);
-
- /* Currently, we only allow plain tables to be locked */
- if (rel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "LOCK TABLE: %s is not a table",
- relation->relname);
-
- relation_close(rel, NoLock); /* close rel, keep lock */
- }
-}
diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c
deleted file mode 100644
index de8ec06acbf..00000000000
--- a/src/backend/commands/operatorcmds.c
+++ /dev/null
@@ -1,253 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * operatorcmds.c
- *
- * Routines for operator manipulation commands
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/operatorcmds.c,v 1.3 2002/04/27 03:45:01 tgl Exp $
- *
- * DESCRIPTION
- * The "DefineFoo" routines take the parse tree and pick out the
- * appropriate arguments/flags, passing the results to the
- * corresponding "FooDefine" routines (in src/catalog) that do
- * the actual catalog-munging. These routines also verify permission
- * of the user to execute the command.
- *
- * NOTES
- * These things must be defined and committed in the following order:
- * "create function":
- * input/output, recv/send procedures
- * "create type":
- * type
- * "create operator":
- * operators
- *
- * Most of the parse-tree manipulation routines are defined in
- * commands/manip.c.
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include "access/heapam.h"
-#include "catalog/catname.h"
-#include "catalog/namespace.h"
-#include "catalog/pg_operator.h"
-#include "commands/comment.h"
-#include "commands/defrem.h"
-#include "miscadmin.h"
-#include "parser/parse_oper.h"
-#include "parser/parse_type.h"
-#include "utils/acl.h"
-#include "utils/lsyscache.h"
-#include "utils/syscache.h"
-
-
-/*
- * DefineOperator
- * this function extracts all the information from the
- * parameter list generated by the parser and then has
- * OperatorCreate() do all the actual work.
- *
- * 'parameters' is a list of DefElem
- */
-void
-DefineOperator(List *names, List *parameters)
-{
- char *oprName;
- Oid oprNamespace;
- AclResult aclresult;
- uint16 precedence = 0; /* operator precedence */
- bool canHash = false; /* operator hashes */
- bool canMerge = false; /* operator merges */
- bool isLeftAssociative = true; /* operator is left
- * associative */
- List *functionName = NIL; /* function for operator */
- TypeName *typeName1 = NULL; /* first type name */
- TypeName *typeName2 = NULL; /* second type name */
- Oid typeId1 = InvalidOid; /* types converted to OID */
- Oid typeId2 = InvalidOid;
- List *commutatorName = NIL; /* optional commutator operator
- * name */
- List *negatorName = NIL; /* optional negator operator name */
- List *restrictionName = NIL; /* optional restrict. sel.
- * procedure */
- List *joinName = NIL; /* optional join sel. procedure */
- List *leftSortName = NIL; /* optional left sort operator */
- List *rightSortName = NIL; /* optional right sort operator */
- List *ltCompareName = NIL; /* optional < compare operator */
- List *gtCompareName = NIL; /* optional > compare operator */
- List *pl;
-
- /* Convert list of names to a name and namespace */
- oprNamespace = QualifiedNameGetCreationNamespace(names, &oprName);
-
- /* Check we have creation rights in target namespace */
- aclresult = pg_namespace_aclcheck(oprNamespace, GetUserId(), ACL_CREATE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, get_namespace_name(oprNamespace));
-
- /*
- * loop over the definition list and extract the information we need.
- */
- foreach(pl, parameters)
- {
- DefElem *defel = (DefElem *) lfirst(pl);
-
- if (strcasecmp(defel->defname, "leftarg") == 0)
- {
- typeName1 = defGetTypeName(defel);
- if (typeName1->setof)
- elog(ERROR, "setof type not implemented for leftarg");
- }
- else if (strcasecmp(defel->defname, "rightarg") == 0)
- {
- typeName2 = defGetTypeName(defel);
- if (typeName2->setof)
- elog(ERROR, "setof type not implemented for rightarg");
- }
- else if (strcasecmp(defel->defname, "procedure") == 0)
- functionName = defGetQualifiedName(defel);
- else if (strcasecmp(defel->defname, "precedence") == 0)
- {
- /* NOT IMPLEMENTED (never worked in v4.2) */
- elog(NOTICE, "CREATE OPERATOR: precedence not implemented");
- }
- else if (strcasecmp(defel->defname, "associativity") == 0)
- {
- /* NOT IMPLEMENTED (never worked in v4.2) */
- elog(NOTICE, "CREATE OPERATOR: associativity not implemented");
- }
- else if (strcasecmp(defel->defname, "commutator") == 0)
- commutatorName = defGetQualifiedName(defel);
- else if (strcasecmp(defel->defname, "negator") == 0)
- negatorName = defGetQualifiedName(defel);
- else if (strcasecmp(defel->defname, "restrict") == 0)
- restrictionName = defGetQualifiedName(defel);
- else if (strcasecmp(defel->defname, "join") == 0)
- joinName = defGetQualifiedName(defel);
- else if (strcasecmp(defel->defname, "hashes") == 0)
- canHash = TRUE;
- else if (strcasecmp(defel->defname, "merges") == 0)
- canMerge = TRUE;
- else if (strcasecmp(defel->defname, "sort1") == 0)
- leftSortName = defGetQualifiedName(defel);
- else if (strcasecmp(defel->defname, "sort2") == 0)
- rightSortName = defGetQualifiedName(defel);
- else if (strcasecmp(defel->defname, "ltcmp") == 0)
- ltCompareName = defGetQualifiedName(defel);
- else if (strcasecmp(defel->defname, "gtcmp") == 0)
- gtCompareName = defGetQualifiedName(defel);
- else
- {
- elog(WARNING, "DefineOperator: attribute \"%s\" not recognized",
- defel->defname);
- }
- }
-
- /*
- * make sure we have our required definitions
- */
- if (functionName == NIL)
- elog(ERROR, "Define: \"procedure\" unspecified");
-
- /* Transform type names to type OIDs */
- if (typeName1)
- typeId1 = typenameTypeId(typeName1);
- if (typeName2)
- typeId2 = typenameTypeId(typeName2);
-
- /*
- * If any of the mergejoin support operators were given, then canMerge
- * is implicit. If canMerge is specified or implicit, fill in default
- * operator names for any missing mergejoin support operators.
- */
- if (leftSortName || rightSortName || ltCompareName || gtCompareName)
- canMerge = true;
-
- if (canMerge)
- {
- if (!leftSortName)
- leftSortName = makeList1(makeString("<"));
- if (!rightSortName)
- rightSortName = makeList1(makeString("<"));
- if (!ltCompareName)
- ltCompareName = makeList1(makeString("<"));
- if (!gtCompareName)
- gtCompareName = makeList1(makeString(">"));
- }
-
- /*
- * now have OperatorCreate do all the work..
- */
- OperatorCreate(oprName, /* operator name */
- oprNamespace, /* namespace */
- typeId1, /* left type id */
- typeId2, /* right type id */
- functionName, /* function for operator */
- precedence, /* operator precedence */
- isLeftAssociative, /* operator is left associative */
- commutatorName, /* optional commutator operator
- * name */
- negatorName, /* optional negator operator name */
- restrictionName, /* optional restrict. sel.
- * procedure */
- joinName, /* optional join sel. procedure name */
- canHash, /* operator hashes */
- leftSortName, /* optional left sort operator */
- rightSortName, /* optional right sort operator */
- ltCompareName, /* optional < comparison op */
- gtCompareName); /* optional < comparison op */
-}
-
-
-/*
- * RemoveOperator
- * Deletes an operator.
- *
- * Exceptions:
- * BadArg if name is invalid.
- * BadArg if type1 is invalid.
- * "ERROR" if operator nonexistent.
- * ...
- */
-void
-RemoveOperator(List *operatorName, /* operator name */
- TypeName *typeName1, /* left argument type name */
- TypeName *typeName2) /* right argument type name */
-{
- Oid operOid;
- Relation relation;
- HeapTuple tup;
-
- operOid = LookupOperNameTypeNames(operatorName, typeName1, typeName2,
- "RemoveOperator");
-
- relation = heap_openr(OperatorRelationName, RowExclusiveLock);
-
- tup = SearchSysCacheCopy(OPEROID,
- ObjectIdGetDatum(operOid),
- 0, 0, 0);
- if (!HeapTupleIsValid(tup)) /* should not happen */
- elog(ERROR, "RemoveOperator: failed to find tuple for operator '%s'",
- NameListToString(operatorName));
-
- /* Permission check: must own operator or its namespace */
- if (!pg_oper_ownercheck(operOid, GetUserId()) &&
- !pg_namespace_ownercheck(((Form_pg_operator) GETSTRUCT(tup))->oprnamespace,
- GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, NameListToString(operatorName));
-
- /* Delete any comments associated with this operator */
- DeleteComments(operOid, RelationGetRelid(relation));
-
- simple_heap_delete(relation, &tup->t_self);
-
- heap_freetuple(tup);
- heap_close(relation, RowExclusiveLock);
-}
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
deleted file mode 100644
index 754ea46246c..00000000000
--- a/src/backend/commands/portalcmds.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * portalcmds.c
- * portal support code
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/portalcmds.c,v 1.2 2002/05/21 22:05:54 tgl Exp $
- *
- *-------------------------------------------------------------------------
- */
-
-#include "postgres.h"
-
-#include "commands/portalcmds.h"
-#include "executor/executor.h"
-
-
-/*
- * PortalCleanup
- */
-void
-PortalCleanup(Portal portal)
-{
- MemoryContext oldcontext;
-
- /*
- * sanity checks
- */
- AssertArg(PortalIsValid(portal));
- AssertArg(portal->cleanup == PortalCleanup);
-
- /*
- * set proper portal-executor context before calling ExecMain.
- */
- oldcontext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
-
- /*
- * tell the executor to shutdown the query
- */
- ExecutorEnd(PortalGetQueryDesc(portal), PortalGetState(portal));
-
- /*
- * switch back to previous context
- */
- MemoryContextSwitchTo(oldcontext);
-}
-
-
-/*
- * PerformPortalFetch
- *
- * name: name of portal
- * forward: forward or backward fetch?
- * count: # of tuples to fetch (0 implies all)
- * dest: where to send results
- * completionTag: points to a buffer of size COMPLETION_TAG_BUFSIZE
- * in which to store a command completion status string.
- *
- * completionTag may be NULL if caller doesn't want a status string.
- */
-void
-PerformPortalFetch(char *name,
- bool forward,
- int count,
- CommandDest dest,
- char *completionTag)
-{
- Portal portal;
- QueryDesc *queryDesc;
- EState *estate;
- MemoryContext oldcontext;
- ScanDirection direction;
- bool temp_desc = false;
-
- /* initialize completion status in case of early exit */
- if (completionTag)
- strcpy(completionTag, (dest == None) ? "MOVE 0" : "FETCH 0");
-
- /*
- * sanity checks
- */
- if (name == NULL)
- {
- elog(WARNING, "PerformPortalFetch: missing portal name");
- return;
- }
-
- /*
- * get the portal from the portal name
- */
- portal = GetPortalByName(name);
- if (!PortalIsValid(portal))
- {
- elog(WARNING, "PerformPortalFetch: portal \"%s\" not found",
- name);
- return;
- }
-
- /*
- * switch into the portal context
- */
- oldcontext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
-
- queryDesc = PortalGetQueryDesc(portal);
- estate = PortalGetState(portal);
-
- /*
- * If the requested destination is not the same as the query's
- * original destination, make a temporary QueryDesc with the proper
- * destination. This supports MOVE, for example, which will pass in
- * dest = None.
- *
- * EXCEPTION: if the query's original dest is RemoteInternal (ie, it's a
- * binary cursor) and the request is Remote, we do NOT override the
- * original dest. This is necessary since a FETCH command will pass
- * dest = Remote, not knowing whether the cursor is binary or not.
- */
- if (dest != queryDesc->dest &&
- !(queryDesc->dest == RemoteInternal && dest == Remote))
- {
- QueryDesc *qdesc = (QueryDesc *) palloc(sizeof(QueryDesc));
-
- memcpy(qdesc, queryDesc, sizeof(QueryDesc));
- qdesc->dest = dest;
- queryDesc = qdesc;
- temp_desc = true;
- }
-
- /*
- * Determine which direction to go in, and check to see if we're
- * already at the end of the available tuples in that direction. If
- * so, set the direction to NoMovement to avoid trying to fetch any
- * tuples. (This check exists because not all plan node types
- * are robust about being called again if they've already returned
- * NULL once.) Then call the executor (we must not skip this, because
- * the destination needs to see a setup and shutdown even if no tuples
- * are available). Finally, update the atStart/atEnd state depending
- * on the number of tuples that were retrieved.
- */
- if (forward)
- {
- if (portal->atEnd)
- direction = NoMovementScanDirection;
- else
- direction = ForwardScanDirection;
-
- ExecutorRun(queryDesc, estate, direction, (long) count);
-
- if (estate->es_processed > 0)
- portal->atStart = false; /* OK to back up now */
- if (count <= 0 || (int) estate->es_processed < count)
- portal->atEnd = true; /* we retrieved 'em all */
- }
- else
- {
- if (portal->atStart)
- direction = NoMovementScanDirection;
- else
- direction = BackwardScanDirection;
-
- ExecutorRun(queryDesc, estate, direction, (long) count);
-
- if (estate->es_processed > 0)
- portal->atEnd = false; /* OK to go forward now */
- if (count <= 0 || (int) estate->es_processed < count)
- portal->atStart = true; /* we retrieved 'em all */
- }
-
- /* Return command status if wanted */
- if (completionTag)
- snprintf(completionTag, COMPLETION_TAG_BUFSIZE, "%s %u",
- (dest == None) ? "MOVE" : "FETCH",
- estate->es_processed);
-
- /*
- * Clean up and switch back to old context.
- */
- if (temp_desc)
- pfree(queryDesc);
-
- MemoryContextSwitchTo(oldcontext);
-}
-
-/*
- * PerformPortalClose
- */
-void
-PerformPortalClose(char *name, CommandDest dest)
-{
- Portal portal;
-
- /*
- * sanity checks
- */
- if (name == NULL)
- {
- elog(WARNING, "PerformPortalClose: missing portal name");
- return;
- }
-
- /*
- * get the portal from the portal name
- */
- portal = GetPortalByName(name);
- if (!PortalIsValid(portal))
- {
- elog(WARNING, "PerformPortalClose: portal \"%s\" not found",
- name);
- return;
- }
-
- /*
- * Note: PortalCleanup is called as a side-effect
- */
- PortalDrop(portal);
-}
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
deleted file mode 100644
index 158927067f1..00000000000
--- a/src/backend/commands/proclang.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * proclang.c
- * PostgreSQL PROCEDURAL LANGUAGE support code.
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/proclang.c,v 1.34 2002/06/20 20:29:27 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include <ctype.h>
-
-#include "access/heapam.h"
-#include "catalog/catname.h"
-#include "catalog/indexing.h"
-#include "catalog/namespace.h"
-#include "catalog/pg_language.h"
-#include "catalog/pg_proc.h"
-#include "catalog/pg_type.h"
-#include "commands/proclang.h"
-#include "commands/defrem.h"
-#include "fmgr.h"
-#include "miscadmin.h"
-#include "parser/parse_func.h"
-#include "utils/builtins.h"
-#include "utils/lsyscache.h"
-#include "utils/syscache.h"
-
-
-/* ---------------------------------------------------------------------
- * CREATE PROCEDURAL LANGUAGE
- * ---------------------------------------------------------------------
- */
-void
-CreateProceduralLanguage(CreatePLangStmt *stmt)
-{
- char languageName[NAMEDATALEN];
- Oid procOid, valProcOid;
- Oid typev[FUNC_MAX_ARGS];
- char nulls[Natts_pg_language];
- Datum values[Natts_pg_language];
- Relation rel;
- HeapTuple tup;
- TupleDesc tupDesc;
- int i;
-
- /*
- * Check permission
- */
- if (!superuser())
- elog(ERROR, "Only users with Postgres superuser privilege are "
- "permitted to create procedural languages");
-
- /*
- * Translate the language name and check that this language doesn't
- * already exist
- */
- case_translate_language_name(stmt->plname, languageName);
-
- if (SearchSysCacheExists(LANGNAME,
- PointerGetDatum(languageName),
- 0, 0, 0))
- elog(ERROR, "Language %s already exists", languageName);
-
- /*
- * Lookup the PL handler function and check that it is of return type
- * Opaque
- */
- MemSet(typev, 0, sizeof(typev));
- procOid = LookupFuncName(stmt->plhandler, 0, typev);
- if (!OidIsValid(procOid))
- elog(ERROR, "PL handler function %s() doesn't exist",
- NameListToString(stmt->plhandler));
- if (get_func_rettype(procOid) != InvalidOid)
- elog(ERROR, "PL handler function %s() does not return type \"opaque\"",
- NameListToString(stmt->plhandler));
-
- /* validate the validator function */
- if (stmt->plvalidator)
- {
- typev[0] = OIDOID;
- valProcOid = LookupFuncName(stmt->plvalidator, 1, typev);
- if (!OidIsValid(valProcOid))
- elog(ERROR, "PL validator function %s(oid) doesn't exist",
- NameListToString(stmt->plvalidator));
- }
- else
- valProcOid = 0;
-
- /*
- * Insert the new language into pg_language
- */
- for (i = 0; i < Natts_pg_language; i++)
- {
- nulls[i] = ' ';
- values[i] = (Datum) NULL;
- }
-
- i = 0;
- values[i++] = PointerGetDatum(languageName);
- values[i++] = BoolGetDatum(true); /* lanispl */
- values[i++] = BoolGetDatum(stmt->pltrusted);
- values[i++] = ObjectIdGetDatum(procOid);
- values[i++] = ObjectIdGetDatum(valProcOid);
- values[i++] = DirectFunctionCall1(textin,
- CStringGetDatum(stmt->plcompiler));
- nulls[i] = 'n'; /* lanacl */
-
- rel = heap_openr(LanguageRelationName, RowExclusiveLock);
-
- tupDesc = rel->rd_att;
- tup = heap_formtuple(tupDesc, values, nulls);
-
- simple_heap_insert(rel, tup);
-
- if (RelationGetForm(rel)->relhasindex)
- {
- Relation idescs[Num_pg_language_indices];
-
- CatalogOpenIndices(Num_pg_language_indices, Name_pg_language_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_language_indices, rel, tup);
- CatalogCloseIndices(Num_pg_language_indices, idescs);
- }
-
- heap_close(rel, RowExclusiveLock);
-}
-
-
-/* ---------------------------------------------------------------------
- * DROP PROCEDURAL LANGUAGE
- * ---------------------------------------------------------------------
- */
-void
-DropProceduralLanguage(DropPLangStmt *stmt)
-{
- char languageName[NAMEDATALEN];
- HeapTuple langTup;
- Relation rel;
-
- /*
- * Check permission
- */
- if (!superuser())
- elog(ERROR, "Only users with Postgres superuser privilege are "
- "permitted to drop procedural languages");
-
- /*
- * Translate the language name, check that this language exist and is
- * a PL
- */
- case_translate_language_name(stmt->plname, languageName);
-
- rel = heap_openr(LanguageRelationName, RowExclusiveLock);
-
- langTup = SearchSysCacheCopy(LANGNAME,
- PointerGetDatum(languageName),
- 0, 0, 0);
- if (!HeapTupleIsValid(langTup))
- elog(ERROR, "Language %s doesn't exist", languageName);
-
- if (!((Form_pg_language) GETSTRUCT(langTup))->lanispl)
- elog(ERROR, "Language %s isn't a created procedural language",
- languageName);
-
- simple_heap_delete(rel, &langTup->t_self);
-
- heap_freetuple(langTup);
- heap_close(rel, RowExclusiveLock);
-}
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
deleted file mode 100644
index a790a28bccd..00000000000
--- a/src/backend/commands/schemacmds.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * schemacmds.c
- * schema creation command support code
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/schemacmds.c,v 1.4 2002/06/11 13:40:50 wieck Exp $
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include "catalog/catalog.h"
-#include "catalog/namespace.h"
-#include "catalog/pg_namespace.h"
-#include "commands/schemacmds.h"
-#include "miscadmin.h"
-#include "parser/analyze.h"
-#include "tcop/utility.h"
-#include "utils/acl.h"
-#include "utils/lsyscache.h"
-
-
-/*
- * CREATE SCHEMA
- */
-void
-CreateSchemaCommand(CreateSchemaStmt *stmt)
-{
- const char *schemaName = stmt->schemaname;
- const char *authId = stmt->authid;
- Oid namespaceId;
- List *parsetree_list;
- List *parsetree_item;
- const char *owner_name;
- Oid owner_userid;
- Oid saved_userid;
- AclResult aclresult;
-
- saved_userid = GetUserId();
-
- /*
- * Figure out user identities.
- */
-
- if (!authId)
- {
- owner_userid = saved_userid;
- owner_name = GetUserNameFromId(owner_userid);
- }
- else if (superuser())
- {
- owner_name = authId;
- /* The following will error out if user does not exist */
- owner_userid = get_usesysid(owner_name);
- /*
- * Set the current user to the requested authorization so
- * that objects created in the statement have the requested
- * owner. (This will revert to session user on error or at
- * the end of this routine.)
- */
- SetUserId(owner_userid);
- }
- else /* not superuser */
- {
- owner_userid = saved_userid;
- owner_name = GetUserNameFromId(owner_userid);
- if (strcmp(authId, owner_name) != 0)
- elog(ERROR, "CREATE SCHEMA: permission denied"
- "\n\t\"%s\" is not a superuser, so cannot create a schema for \"%s\"",
- owner_name, authId);
- }
-
- /*
- * Permissions checks.
- */
- aclresult = pg_database_aclcheck(MyDatabaseId, saved_userid, ACL_CREATE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, DatabaseName);
-
- if (!allowSystemTableMods && IsReservedName(schemaName))
- elog(ERROR, "CREATE SCHEMA: Illegal schema name: \"%s\" -- pg_ is reserved for system schemas",
- schemaName);
-
- /* Create the schema's namespace */
- namespaceId = NamespaceCreate(schemaName, owner_userid);
-
- /* Advance cmd counter to make the namespace visible */
- CommandCounterIncrement();
-
- /*
- * Temporarily make the new namespace be the front of the search path,
- * as well as the default creation target namespace. This will be undone
- * at the end of this routine, or upon error.
- */
- PushSpecialNamespace(namespaceId);
-
- /*
- * Examine the list of commands embedded in the CREATE SCHEMA command,
- * and reorganize them into a sequentially executable order with no
- * forward references. Note that the result is still a list of raw
- * parsetrees in need of parse analysis --- we cannot, in general,
- * run analyze.c on one statement until we have actually executed the
- * prior ones.
- */
- parsetree_list = analyzeCreateSchemaStmt(stmt);
-
- /*
- * Analyze and execute each command contained in the CREATE SCHEMA
- */
- foreach(parsetree_item, parsetree_list)
- {
- Node *parsetree = (Node *) lfirst(parsetree_item);
- List *querytree_list,
- *querytree_item;
-
- querytree_list = parse_analyze(parsetree, NULL);
-
- foreach(querytree_item, querytree_list)
- {
- Query *querytree = (Query *) lfirst(querytree_item);
-
- /* schemas should contain only utility stmts */
- Assert(querytree->commandType == CMD_UTILITY);
- /* do this step */
- ProcessUtility(querytree->utilityStmt, None, NULL);
- /* make sure later steps can see the object created here */
- CommandCounterIncrement();
- }
- }
-
- /* Reset search path to normal state */
- PopSpecialNamespace(namespaceId);
-
- /* Reset current user */
- SetUserId(saved_userid);
-}
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
deleted file mode 100644
index 1d013612da4..00000000000
--- a/src/backend/commands/sequence.c
+++ /dev/null
@@ -1,889 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * sequence.c
- * PostgreSQL sequences support code.
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.82 2002/06/20 20:29:27 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include "access/heapam.h"
-#include "catalog/namespace.h"
-#include "catalog/pg_type.h"
-#include "commands/defrem.h"
-#include "commands/tablecmds.h"
-#include "commands/sequence.h"
-#include "miscadmin.h"
-#include "utils/acl.h"
-#include "utils/builtins.h"
-
-
-#ifndef INT64_IS_BUSTED
-#ifdef HAVE_LL_CONSTANTS
-#define SEQ_MAXVALUE ((int64) 0x7FFFFFFFFFFFFFFFLL)
-#else
-#define SEQ_MAXVALUE ((int64) 0x7FFFFFFFFFFFFFFF)
-#endif
-#else /* INT64_IS_BUSTED */
-#define SEQ_MAXVALUE ((int64) 0x7FFFFFFF)
-#endif /* INT64_IS_BUSTED */
-
-#define SEQ_MINVALUE (-SEQ_MAXVALUE)
-
-/*
- * We don't want to log each fetching of a value from a sequence,
- * so we pre-log a few fetches in advance. In the event of
- * crash we can lose as much as we pre-logged.
- */
-#define SEQ_LOG_VALS 32
-
-/*
- * The "special area" of a sequence's buffer page looks like this.
- */
-#define SEQ_MAGIC 0x1717
-
-typedef struct sequence_magic
-{
- uint32 magic;
-} sequence_magic;
-
-/*
- * We store a SeqTable item for every sequence we have touched in the current
- * session. This is needed to hold onto nextval/currval state. (We can't
- * rely on the relcache, since it's only, well, a cache, and may decide to
- * discard entries.)
- *
- * XXX We use linear search to find pre-existing SeqTable entries. This is
- * good when only a small number of sequences are touched in a session, but
- * would suck with many different sequences. Perhaps use a hashtable someday.
- */
-typedef struct SeqTableData
-{
- struct SeqTableData *next; /* link to next SeqTable object */
- Oid relid; /* pg_class OID of this sequence */
- TransactionId xid; /* xact in which we last did a seq op */
- int64 last; /* value last returned by nextval */
- int64 cached; /* last value already cached for nextval */
- /* if last != cached, we have not used up all the cached values */
- int64 increment; /* copy of sequence's increment field */
-} SeqTableData;
-
-typedef SeqTableData *SeqTable;
-
-static SeqTable seqtab = NULL; /* Head of list of SeqTable items */
-
-
-static void init_sequence(const char *caller, RangeVar *relation,
- SeqTable *p_elm, Relation *p_rel);
-static Form_pg_sequence read_info(const char *caller, SeqTable elm,
- Relation rel, Buffer *buf);
-static void init_params(CreateSeqStmt *seq, Form_pg_sequence new);
-static void do_setval(RangeVar *sequence, int64 next, bool iscalled);
-
-/*
- * DefineSequence
- * Creates a new sequence relation
- */
-void
-DefineSequence(CreateSeqStmt *seq)
-{
- FormData_pg_sequence new;
- CreateStmt *stmt = makeNode(CreateStmt);
- Oid seqoid;
- Relation rel;
- Buffer buf;
- PageHeader page;
- sequence_magic *sm;
- HeapTuple tuple;
- TupleDesc tupDesc;
- Datum value[SEQ_COL_LASTCOL];
- char null[SEQ_COL_LASTCOL];
- int i;
- NameData name;
-
- /* Check and set values */
- init_params(seq, &new);
-
- /*
- * Create relation (and fill *null & *value)
- */
- stmt->tableElts = NIL;
- for (i = SEQ_COL_FIRSTCOL; i <= SEQ_COL_LASTCOL; i++)
- {
- ColumnDef *coldef;
- TypeName *typnam;
-
- typnam = makeNode(TypeName);
- typnam->setof = FALSE;
- typnam->arrayBounds = NIL;
- typnam->typmod = -1;
- coldef = makeNode(ColumnDef);
- coldef->typename = typnam;
- coldef->raw_default = NULL;
- coldef->cooked_default = NULL;
- coldef->is_not_null = false;
- null[i - 1] = ' ';
-
- switch (i)
- {
- case SEQ_COL_NAME:
- typnam->typeid = NAMEOID;
- coldef->colname = "sequence_name";
- namestrcpy(&name, seq->sequence->relname);
- value[i - 1] = NameGetDatum(&name);
- break;
- case SEQ_COL_LASTVAL:
- typnam->typeid = INT8OID;
- coldef->colname = "last_value";
- value[i - 1] = Int64GetDatumFast(new.last_value);
- break;
- case SEQ_COL_INCBY:
- typnam->typeid = INT8OID;
- coldef->colname = "increment_by";
- value[i - 1] = Int64GetDatumFast(new.increment_by);
- break;
- case SEQ_COL_MAXVALUE:
- typnam->typeid = INT8OID;
- coldef->colname = "max_value";
- value[i - 1] = Int64GetDatumFast(new.max_value);
- break;
- case SEQ_COL_MINVALUE:
- typnam->typeid = INT8OID;
- coldef->colname = "min_value";
- value[i - 1] = Int64GetDatumFast(new.min_value);
- break;
- case SEQ_COL_CACHE:
- typnam->typeid = INT8OID;
- coldef->colname = "cache_value";
- value[i - 1] = Int64GetDatumFast(new.cache_value);
- break;
- case SEQ_COL_LOG:
- typnam->typeid = INT8OID;
- coldef->colname = "log_cnt";
- value[i - 1] = Int64GetDatum((int64) 1);
- break;
- case SEQ_COL_CYCLE:
- typnam->typeid = BOOLOID;
- coldef->colname = "is_cycled";
- value[i - 1] = BoolGetDatum(new.is_cycled);
- break;
- case SEQ_COL_CALLED:
- typnam->typeid = BOOLOID;
- coldef->colname = "is_called";
- value[i - 1] = BoolGetDatum(false);
- break;
- }
- stmt->tableElts = lappend(stmt->tableElts, coldef);
- }
-
- stmt->relation = seq->sequence;
- stmt->inhRelations = NIL;
- stmt->constraints = NIL;
- stmt->hasoids = false;
-
- seqoid = DefineRelation(stmt, RELKIND_SEQUENCE);
-
- rel = heap_open(seqoid, AccessExclusiveLock);
- tupDesc = RelationGetDescr(rel);
-
- /* Initialize first page of relation with special magic number */
-
- buf = ReadBuffer(rel, P_NEW);
-
- if (!BufferIsValid(buf))
- elog(ERROR, "DefineSequence: ReadBuffer failed");
-
- Assert(BufferGetBlockNumber(buf) == 0);
-
- page = (PageHeader) BufferGetPage(buf);
-
- PageInit((Page) page, BufferGetPageSize(buf), sizeof(sequence_magic));
- sm = (sequence_magic *) PageGetSpecialPointer(page);
- sm->magic = SEQ_MAGIC;
-
- /* hack: ensure heap_insert will insert on the just-created page */
- rel->rd_targblock = 0;
-
- /* Now form & insert sequence tuple */
- tuple = heap_formtuple(tupDesc, value, null);
- simple_heap_insert(rel, tuple);
-
- Assert(ItemPointerGetOffsetNumber(&(tuple->t_self)) == FirstOffsetNumber);
-
- /*
- * Two special hacks here:
- *
- * 1. Since VACUUM does not process sequences, we have to force the tuple
- * to have xmin = FrozenTransactionId now. Otherwise it would become
- * invisible to SELECTs after 2G transactions. It is okay to do this
- * because if the current transaction aborts, no other xact will ever
- * examine the sequence tuple anyway.
- *
- * 2. Even though heap_insert emitted a WAL log record, we have to emit
- * an XLOG_SEQ_LOG record too, since (a) the heap_insert record will
- * not have the right xmin, and (b) REDO of the heap_insert record
- * would re-init page and sequence magic number would be lost. This
- * means two log records instead of one :-(
- */
- LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
- START_CRIT_SECTION();
-
- {
- /*
- * Note that the "tuple" structure is still just a local tuple record
- * created by heap_formtuple; its t_data pointer doesn't point at the
- * disk buffer. To scribble on the disk buffer we need to fetch the
- * item pointer. But do the same to the local tuple, since that will
- * be the source for the WAL log record, below.
- */
- ItemId itemId;
- Item item;
-
- itemId = PageGetItemId((Page) page, FirstOffsetNumber);
- item = PageGetItem((Page) page, itemId);
-
- HeapTupleHeaderSetXmin((HeapTupleHeader) item, FrozenTransactionId);
- ((HeapTupleHeader) item)->t_infomask |= HEAP_XMIN_COMMITTED;
-
- HeapTupleHeaderSetXmin(tuple->t_data, FrozenTransactionId);
- tuple->t_data->t_infomask |= HEAP_XMIN_COMMITTED;
- }
-
- {
- xl_seq_rec xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata[2];
- Form_pg_sequence newseq = (Form_pg_sequence) GETSTRUCT(tuple);
-
- /* We do not log first nextval call, so "advance" sequence here */
- /* Note we are scribbling on local tuple, not the disk buffer */
- newseq->is_called = true;
- newseq->log_cnt = 0;
-
- xlrec.node = rel->rd_node;
- rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char *) &xlrec;
- rdata[0].len = sizeof(xl_seq_rec);
- rdata[0].next = &(rdata[1]);
-
- rdata[1].buffer = InvalidBuffer;
- rdata[1].data = (char *) tuple->t_data;
- rdata[1].len = tuple->t_len;
- rdata[1].next = NULL;
-
- recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG | XLOG_NO_TRAN, rdata);
-
- PageSetLSN(page, recptr);
- PageSetSUI(page, ThisStartUpID);
- }
- END_CRIT_SECTION();
-
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
- heap_close(rel, NoLock);
-}
-
-
-Datum
-nextval(PG_FUNCTION_ARGS)
-{
- text *seqin = PG_GETARG_TEXT_P(0);
- RangeVar *sequence;
- SeqTable elm;
- Relation seqrel;
- Buffer buf;
- Page page;
- Form_pg_sequence seq;
- int64 incby,
- maxv,
- minv,
- cache,
- log,
- fetch,
- last;
- int64 result,
- next,
- rescnt = 0;
- bool logit = false;
-
- sequence = makeRangeVarFromNameList(textToQualifiedNameList(seqin,
- "nextval"));
-
- /* open and AccessShareLock sequence */
- init_sequence("nextval", sequence, &elm, &seqrel);
-
- if (pg_class_aclcheck(elm->relid, GetUserId(), ACL_UPDATE) != ACLCHECK_OK)
- elog(ERROR, "%s.nextval: you don't have permissions to set sequence %s",
- sequence->relname, sequence->relname);
-
- if (elm->last != elm->cached) /* some numbers were cached */
- {
- elm->last += elm->increment;
- relation_close(seqrel, NoLock);
- PG_RETURN_INT64(elm->last);
- }
-
- /* lock page' buffer and read tuple */
- seq = read_info("nextval", elm, seqrel, &buf);
- page = BufferGetPage(buf);
-
- last = next = result = seq->last_value;
- incby = seq->increment_by;
- maxv = seq->max_value;
- minv = seq->min_value;
- fetch = cache = seq->cache_value;
- log = seq->log_cnt;
-
- if (!seq->is_called)
- {
- rescnt++; /* last_value if not called */
- fetch--;
- log--;
- }
-
- /*
- * Decide whether we should emit a WAL log record. If so, force up
- * the fetch count to grab SEQ_LOG_VALS more values than we actually
- * need to cache. (These will then be usable without logging.)
- *
- * If this is the first nextval after a checkpoint, we must force
- * a new WAL record to be written anyway, else replay starting from the
- * checkpoint would fail to advance the sequence past the logged
- * values. In this case we may as well fetch extra values.
- */
- if (log < fetch)
- {
- /* forced log to satisfy local demand for values */
- fetch = log = fetch + SEQ_LOG_VALS;
- logit = true;
- }
- else
- {
- XLogRecPtr redoptr = GetRedoRecPtr();
-
- if (XLByteLE(PageGetLSN(page), redoptr))
- {
- /* last update of seq was before checkpoint */
- fetch = log = fetch + SEQ_LOG_VALS;
- logit = true;
- }
- }
-
- while (fetch) /* try to fetch cache [+ log ] numbers */
- {
- /*
- * Check MAXVALUE for ascending sequences and MINVALUE for
- * descending sequences
- */
- if (incby > 0)
- {
- /* ascending sequence */
- if ((maxv >= 0 && next > maxv - incby) ||
- (maxv < 0 && next + incby > maxv))
- {
- if (rescnt > 0)
- break; /* stop fetching */
- if (!seq->is_cycled)
- elog(ERROR, "%s.nextval: reached MAXVALUE (" INT64_FORMAT ")",
- sequence->relname, maxv);
- next = minv;
- }
- else
- next += incby;
- }
- else
- {
- /* descending sequence */
- if ((minv < 0 && next < minv - incby) ||
- (minv >= 0 && next + incby < minv))
- {
- if (rescnt > 0)
- break; /* stop fetching */
- if (!seq->is_cycled)
- elog(ERROR, "%s.nextval: reached MINVALUE (" INT64_FORMAT ")",
- sequence->relname, minv);
- next = maxv;
- }
- else
- next += incby;
- }
- fetch--;
- if (rescnt < cache)
- {
- log--;
- rescnt++;
- last = next;
- if (rescnt == 1) /* if it's first result - */
- result = next; /* it's what to return */
- }
- }
-
- log -= fetch; /* adjust for any unfetched numbers */
- Assert(log >= 0);
-
- /* save info in local cache */
- elm->last = result; /* last returned number */
- elm->cached = last; /* last fetched number */
-
- START_CRIT_SECTION();
- if (logit)
- {
- xl_seq_rec xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata[2];
-
- xlrec.node = seqrel->rd_node;
- rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char *) &xlrec;
- rdata[0].len = sizeof(xl_seq_rec);
- rdata[0].next = &(rdata[1]);
-
- seq->last_value = next;
- seq->is_called = true;
- seq->log_cnt = 0;
- rdata[1].buffer = InvalidBuffer;
- rdata[1].data = (char *) page + ((PageHeader) page)->pd_upper;
- rdata[1].len = ((PageHeader) page)->pd_special -
- ((PageHeader) page)->pd_upper;
- rdata[1].next = NULL;
-
- recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG | XLOG_NO_TRAN, rdata);
-
- PageSetLSN(page, recptr);
- PageSetSUI(page, ThisStartUpID);
- }
-
- /* update on-disk data */
- seq->last_value = last; /* last fetched number */
- seq->is_called = true;
- seq->log_cnt = log; /* how much is logged */
- END_CRIT_SECTION();
-
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-
- WriteBuffer(buf);
-
- relation_close(seqrel, NoLock);
-
- PG_RETURN_INT64(result);
-}
-
-Datum
-currval(PG_FUNCTION_ARGS)
-{
- text *seqin = PG_GETARG_TEXT_P(0);
- RangeVar *sequence;
- SeqTable elm;
- Relation seqrel;
- int64 result;
-
- sequence = makeRangeVarFromNameList(textToQualifiedNameList(seqin,
- "currval"));
-
- /* open and AccessShareLock sequence */
- init_sequence("currval", sequence, &elm, &seqrel);
-
- if (pg_class_aclcheck(elm->relid, GetUserId(), ACL_SELECT) != ACLCHECK_OK)
- elog(ERROR, "%s.currval: you don't have permissions to read sequence %s",
- sequence->relname, sequence->relname);
-
- if (elm->increment == 0) /* nextval/read_info were not called */
- elog(ERROR, "%s.currval is not yet defined in this session",
- sequence->relname);
-
- result = elm->last;
-
- relation_close(seqrel, NoLock);
-
- PG_RETURN_INT64(result);
-}
-
-/*
- * Main internal procedure that handles 2 & 3 arg forms of SETVAL.
- *
- * Note that the 3 arg version (which sets the is_called flag) is
- * only for use in pg_dump, and setting the is_called flag may not
- * work if multiple users are attached to the database and referencing
- * the sequence (unlikely if pg_dump is restoring it).
- *
- * It is necessary to have the 3 arg version so that pg_dump can
- * restore the state of a sequence exactly during data-only restores -
- * it is the only way to clear the is_called flag in an existing
- * sequence.
- */
-static void
-do_setval(RangeVar *sequence, int64 next, bool iscalled)
-{
- SeqTable elm;
- Relation seqrel;
- Buffer buf;
- Form_pg_sequence seq;
-
- /* open and AccessShareLock sequence */
- init_sequence("setval", sequence, &elm, &seqrel);
-
- if (pg_class_aclcheck(elm->relid, GetUserId(), ACL_UPDATE) != ACLCHECK_OK)
- elog(ERROR, "%s.setval: you don't have permissions to set sequence %s",
- sequence->relname, sequence->relname);
-
- /* lock page' buffer and read tuple */
- seq = read_info("setval", elm, seqrel, &buf);
-
- if ((next < seq->min_value) || (next > seq->max_value))
- elog(ERROR, "%s.setval: value " INT64_FORMAT " is out of bounds (" INT64_FORMAT "," INT64_FORMAT ")",
- sequence->relname, next, seq->min_value, seq->max_value);
-
- /* save info in local cache */
- elm->last = next; /* last returned number */
- elm->cached = next; /* last cached number (forget cached
- * values) */
-
- START_CRIT_SECTION();
- {
- xl_seq_rec xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata[2];
- Page page = BufferGetPage(buf);
-
- xlrec.node = seqrel->rd_node;
- rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char *) &xlrec;
- rdata[0].len = sizeof(xl_seq_rec);
- rdata[0].next = &(rdata[1]);
-
- seq->last_value = next;
- seq->is_called = true;
- seq->log_cnt = 0;
- rdata[1].buffer = InvalidBuffer;
- rdata[1].data = (char *) page + ((PageHeader) page)->pd_upper;
- rdata[1].len = ((PageHeader) page)->pd_special -
- ((PageHeader) page)->pd_upper;
- rdata[1].next = NULL;
-
- recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG | XLOG_NO_TRAN, rdata);
-
- PageSetLSN(page, recptr);
- PageSetSUI(page, ThisStartUpID);
- }
- /* save info in sequence relation */
- seq->last_value = next; /* last fetched number */
- seq->is_called = iscalled;
- seq->log_cnt = (iscalled) ? 0 : 1;
- END_CRIT_SECTION();
-
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-
- WriteBuffer(buf);
-
- relation_close(seqrel, NoLock);
-}
-
-/*
- * Implement the 2 arg setval procedure.
- * See do_setval for discussion.
- */
-Datum
-setval(PG_FUNCTION_ARGS)
-{
- text *seqin = PG_GETARG_TEXT_P(0);
- int64 next = PG_GETARG_INT64(1);
- RangeVar *sequence;
-
- sequence = makeRangeVarFromNameList(textToQualifiedNameList(seqin,
- "setval"));
-
- do_setval(sequence, next, true);
-
- PG_RETURN_INT64(next);
-}
-
-/*
- * Implement the 3 arg setval procedure.
- * See do_setval for discussion.
- */
-Datum
-setval_and_iscalled(PG_FUNCTION_ARGS)
-{
- text *seqin = PG_GETARG_TEXT_P(0);
- int64 next = PG_GETARG_INT64(1);
- bool iscalled = PG_GETARG_BOOL(2);
- RangeVar *sequence;
-
- sequence = makeRangeVarFromNameList(textToQualifiedNameList(seqin,
- "setval"));
-
- do_setval(sequence, next, iscalled);
-
- PG_RETURN_INT64(next);
-}
-
-
-/*
- * Given a relation name, open and lock the sequence. p_elm and p_rel are
- * output parameters.
- */
-static void
-init_sequence(const char *caller, RangeVar *relation,
- SeqTable *p_elm, Relation *p_rel)
-{
- Oid relid = RangeVarGetRelid(relation, false);
- TransactionId thisxid = GetCurrentTransactionId();
- SeqTable elm;
- Relation seqrel;
-
- /* Look to see if we already have a seqtable entry for relation */
- for (elm = seqtab; elm != NULL; elm = elm->next)
- {
- if (elm->relid == relid)
- break;
- }
-
- /*
- * Open the sequence relation, acquiring AccessShareLock if we don't
- * already have a lock in the current xact.
- */
- if (elm == NULL || elm->xid != thisxid)
- seqrel = relation_open(relid, AccessShareLock);
- else
- seqrel = relation_open(relid, NoLock);
-
- if (seqrel->rd_rel->relkind != RELKIND_SEQUENCE)
- elog(ERROR, "%s.%s: %s is not a sequence",
- relation->relname, caller, relation->relname);
-
- /*
- * Allocate new seqtable entry if we didn't find one.
- *
- * NOTE: seqtable entries remain in the list for the life of a backend.
- * If the sequence itself is deleted then the entry becomes wasted memory,
- * but it's small enough that this should not matter.
- */
- if (elm == NULL)
- {
- /*
- * Time to make a new seqtable entry. These entries live as long
- * as the backend does, so we use plain malloc for them.
- */
- elm = (SeqTable) malloc(sizeof(SeqTableData));
- if (elm == NULL)
- elog(ERROR, "Memory exhausted in init_sequence");
- elm->relid = relid;
- /* increment is set to 0 until we do read_info (see currval) */
- elm->last = elm->cached = elm->increment = 0;
- elm->next = seqtab;
- seqtab = elm;
- }
-
- /* Flag that we have a lock in the current xact. */
- elm->xid = thisxid;
-
- *p_elm = elm;
- *p_rel = seqrel;
-}
-
-
-/* Given an opened relation, lock the page buffer and find the tuple */
-static Form_pg_sequence
-read_info(const char *caller, SeqTable elm,
- Relation rel, Buffer *buf)
-{
- PageHeader page;
- ItemId lp;
- HeapTupleData tuple;
- sequence_magic *sm;
- Form_pg_sequence seq;
-
- if (rel->rd_nblocks > 1)
- elog(ERROR, "%s.%s: invalid number of blocks in sequence",
- RelationGetRelationName(rel), caller);
-
- *buf = ReadBuffer(rel, 0);
- if (!BufferIsValid(*buf))
- elog(ERROR, "%s.%s: ReadBuffer failed",
- RelationGetRelationName(rel), caller);
-
- LockBuffer(*buf, BUFFER_LOCK_EXCLUSIVE);
-
- page = (PageHeader) BufferGetPage(*buf);
- sm = (sequence_magic *) PageGetSpecialPointer(page);
-
- if (sm->magic != SEQ_MAGIC)
- elog(ERROR, "%s.%s: bad magic (%08X)",
- RelationGetRelationName(rel), caller, sm->magic);
-
- lp = PageGetItemId(page, FirstOffsetNumber);
- Assert(ItemIdIsUsed(lp));
- tuple.t_data = (HeapTupleHeader) PageGetItem((Page) page, lp);
-
- seq = (Form_pg_sequence) GETSTRUCT(&tuple);
-
- elm->increment = seq->increment_by;
-
- return seq;
-}
-
-
-static void
-init_params(CreateSeqStmt *seq, Form_pg_sequence new)
-{
- DefElem *last_value = NULL;
- DefElem *increment_by = NULL;
- DefElem *max_value = NULL;
- DefElem *min_value = NULL;
- DefElem *cache_value = NULL;
- List *option;
-
- new->is_cycled = false;
- foreach(option, seq->options)
- {
- DefElem *defel = (DefElem *) lfirst(option);
-
- if (strcmp(defel->defname, "increment") == 0)
- increment_by = defel;
- else if (strcmp(defel->defname, "start") == 0)
- last_value = defel;
- else if (strcmp(defel->defname, "maxvalue") == 0)
- max_value = defel;
- else if (strcmp(defel->defname, "minvalue") == 0)
- min_value = defel;
- else if (strcmp(defel->defname, "cache") == 0)
- cache_value = defel;
- else if (strcmp(defel->defname, "cycle") == 0)
- {
- if (defel->arg != (Node *) NULL)
- elog(ERROR, "DefineSequence: CYCLE ??");
- new->is_cycled = true;
- }
- else
- elog(ERROR, "DefineSequence: option \"%s\" not recognized",
- defel->defname);
- }
-
- if (increment_by == (DefElem *) NULL) /* INCREMENT BY */
- new->increment_by = 1;
- else if ((new->increment_by = defGetInt64(increment_by)) == 0)
- elog(ERROR, "DefineSequence: can't INCREMENT by 0");
-
- if (max_value == (DefElem *) NULL) /* MAXVALUE */
- {
- if (new->increment_by > 0)
- new->max_value = SEQ_MAXVALUE; /* ascending seq */
- else
- new->max_value = -1; /* descending seq */
- }
- else
- new->max_value = defGetInt64(max_value);
-
- if (min_value == (DefElem *) NULL) /* MINVALUE */
- {
- if (new->increment_by > 0)
- new->min_value = 1; /* ascending seq */
- else
- new->min_value = SEQ_MINVALUE; /* descending seq */
- }
- else
- new->min_value = defGetInt64(min_value);
-
- if (new->min_value >= new->max_value)
- elog(ERROR, "DefineSequence: MINVALUE (" INT64_FORMAT ") can't be >= MAXVALUE (" INT64_FORMAT ")",
- new->min_value, new->max_value);
-
- if (last_value == (DefElem *) NULL) /* START WITH */
- {
- if (new->increment_by > 0)
- new->last_value = new->min_value; /* ascending seq */
- else
- new->last_value = new->max_value; /* descending seq */
- }
- else
- new->last_value = defGetInt64(last_value);
-
- if (new->last_value < new->min_value)
- elog(ERROR, "DefineSequence: START value (" INT64_FORMAT ") can't be < MINVALUE (" INT64_FORMAT ")",
- new->last_value, new->min_value);
- if (new->last_value > new->max_value)
- elog(ERROR, "DefineSequence: START value (" INT64_FORMAT ") can't be > MAXVALUE (" INT64_FORMAT ")",
- new->last_value, new->max_value);
-
- if (cache_value == (DefElem *) NULL) /* CACHE */
- new->cache_value = 1;
- else if ((new->cache_value = defGetInt64(cache_value)) <= 0)
- elog(ERROR, "DefineSequence: CACHE (" INT64_FORMAT ") can't be <= 0",
- new->cache_value);
-
-}
-
-
-void
-seq_redo(XLogRecPtr lsn, XLogRecord *record)
-{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
- Relation reln;
- Buffer buffer;
- Page page;
- char *item;
- Size itemsz;
- xl_seq_rec *xlrec = (xl_seq_rec *) XLogRecGetData(record);
- sequence_magic *sm;
-
- if (info != XLOG_SEQ_LOG)
- elog(PANIC, "seq_redo: unknown op code %u", info);
-
- reln = XLogOpenRelation(true, RM_SEQ_ID, xlrec->node);
- if (!RelationIsValid(reln))
- return;
-
- buffer = XLogReadBuffer(true, reln, 0);
- if (!BufferIsValid(buffer))
- elog(PANIC, "seq_redo: can't read block of %u/%u",
- xlrec->node.tblNode, xlrec->node.relNode);
-
- page = (Page) BufferGetPage(buffer);
-
- /* Always reinit the page and reinstall the magic number */
- /* See comments in DefineSequence */
- PageInit((Page) page, BufferGetPageSize(buffer), sizeof(sequence_magic));
- sm = (sequence_magic *) PageGetSpecialPointer(page);
- sm->magic = SEQ_MAGIC;
-
- item = (char *) xlrec + sizeof(xl_seq_rec);
- itemsz = record->xl_len - sizeof(xl_seq_rec);
- itemsz = MAXALIGN(itemsz);
- if (PageAddItem(page, (Item) item, itemsz,
- FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
- elog(PANIC, "seq_redo: failed to add item to page");
-
- PageSetLSN(page, lsn);
- PageSetSUI(page, ThisStartUpID);
- UnlockAndWriteBuffer(buffer);
-}
-
-void
-seq_undo(XLogRecPtr lsn, XLogRecord *record)
-{
-}
-
-void
-seq_desc(char *buf, uint8 xl_info, char *rec)
-{
- uint8 info = xl_info & ~XLR_INFO_MASK;
- xl_seq_rec *xlrec = (xl_seq_rec *) rec;
-
- if (info == XLOG_SEQ_LOG)
- strcat(buf, "log: ");
- else
- {
- strcat(buf, "UNKNOWN");
- return;
- }
-
- sprintf(buf + strlen(buf), "node %u/%u",
- xlrec->node.tblNode, xlrec->node.relNode);
-}
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
deleted file mode 100644
index 3bcf774ba29..00000000000
--- a/src/backend/commands/tablecmds.c
+++ /dev/null
@@ -1,3094 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * tablecmds.c
- * Commands for creating and altering table structures and settings
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.17 2002/06/17 14:31:32 tgl Exp $
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include "access/genam.h"
-#include "access/tuptoaster.h"
-#include "catalog/catalog.h"
-#include "catalog/catname.h"
-#include "catalog/heap.h"
-#include "catalog/index.h"
-#include "catalog/indexing.h"
-#include "catalog/namespace.h"
-#include "catalog/pg_attrdef.h"
-#include "catalog/pg_inherits.h"
-#include "catalog/pg_namespace.h"
-#include "catalog/pg_opclass.h"
-#include "catalog/pg_trigger.h"
-#include "catalog/pg_type.h"
-#include "commands/tablecmds.h"
-#include "commands/trigger.h"
-#include "executor/executor.h"
-#include "miscadmin.h"
-#include "nodes/makefuncs.h"
-#include "optimizer/clauses.h"
-#include "optimizer/planmain.h"
-#include "optimizer/prep.h"
-#include "parser/parse.h"
-#include "parser/parse_coerce.h"
-#include "parser/parse_expr.h"
-#include "parser/parse_relation.h"
-#include "parser/parse_type.h"
-#include "utils/acl.h"
-#include "utils/builtins.h"
-#include "utils/fmgroids.h"
-#include "utils/lsyscache.h"
-#include "utils/syscache.h"
-#include "utils/relcache.h"
-
-
-static List *MergeDomainAttributes(List *schema);
-static List *MergeAttributes(List *schema, List *supers, bool istemp,
- List **supOids, List **supconstr, bool *supHasOids);
-static bool change_varattnos_of_a_node(Node *node, const AttrNumber *newattno);
-static void StoreCatalogInheritance(Oid relationId, List *supers);
-static int findAttrByName(const char *attributeName, List *schema);
-static void setRelhassubclassInRelation(Oid relationId, bool relhassubclass);
-static void drop_default(Oid relid, int16 attnum);
-static void CheckTupleType(Form_pg_class tuple_class);
-static bool needs_toast_table(Relation rel);
-
-/* Used by attribute and relation renaming routines: */
-
-#define RI_TRIGGER_PK 1 /* is a trigger on the PK relation */
-#define RI_TRIGGER_FK 2 /* is a trigger on the FK relation */
-#define RI_TRIGGER_NONE 0 /* is not an RI trigger function */
-
-static int ri_trigger_type(Oid tgfoid);
-static void update_ri_trigger_args(Oid relid,
- const char *oldname,
- const char *newname,
- bool fk_scan,
- bool update_relname);
-
-
-/* ----------------------------------------------------------------
- * DefineRelation
- * Creates a new relation.
- *
- * If successful, returns the OID of the new relation.
- * ----------------------------------------------------------------
- */
-Oid
-DefineRelation(CreateStmt *stmt, char relkind)
-{
- char relname[NAMEDATALEN];
- Oid namespaceId;
- List *schema = stmt->tableElts;
- int numberOfAttributes;
- Oid relationId;
- Relation rel;
- TupleDesc descriptor;
- List *inheritOids;
- List *old_constraints;
- bool parentHasOids;
- List *rawDefaults;
- List *listptr;
- int i;
- AttrNumber attnum;
-
- /*
- * Truncate relname to appropriate length (probably a waste of time,
- * as parser should have done this already).
- */
- StrNCpy(relname, stmt->relation->relname, NAMEDATALEN);
-
- /*
- * Look up the namespace in which we are supposed to create the
- * relation. Check we have permission to create there.
- * Skip check if bootstrapping, since permissions machinery may not
- * be working yet; also, always allow if it's a temp table.
- */
- namespaceId = RangeVarGetCreationNamespace(stmt->relation);
-
- if (!IsBootstrapProcessingMode() && !isTempNamespace(namespaceId))
- {
- AclResult aclresult;
-
- aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
- ACL_CREATE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, get_namespace_name(namespaceId));
- }
-
- /*
- * Merge domain attributes into the known columns before processing table
- * inheritance. Otherwise we risk adding double constraints to a
- * domain-type column that's inherited.
- */
- schema = MergeDomainAttributes(schema);
-
- /*
- * Look up inheritance ancestors and generate relation schema,
- * including inherited attributes.
- */
- schema = MergeAttributes(schema, stmt->inhRelations,
- stmt->relation->istemp,
- &inheritOids, &old_constraints, &parentHasOids);
-
- numberOfAttributes = length(schema);
- if (numberOfAttributes <= 0)
- elog(ERROR, "DefineRelation: please inherit from a relation or define an attribute");
-
- /*
- * Create a relation descriptor from the relation schema and create
- * the relation. Note that in this stage only inherited (pre-cooked)
- * defaults and constraints will be included into the new relation.
- * (BuildDescForRelation takes care of the inherited defaults, but we
- * have to copy inherited constraints here.)
- */
- descriptor = BuildDescForRelation(schema);
-
- if (old_constraints != NIL)
- {
- ConstrCheck *check = (ConstrCheck *) palloc(length(old_constraints) *
- sizeof(ConstrCheck));
- int ncheck = 0;
-
- foreach(listptr, old_constraints)
- {
- Constraint *cdef = (Constraint *) lfirst(listptr);
-
- if (cdef->contype != CONSTR_CHECK)
- continue;
-
- if (cdef->name != NULL)
- {
- for (i = 0; i < ncheck; i++)
- {
- if (strcmp(check[i].ccname, cdef->name) == 0)
- elog(ERROR, "Duplicate CHECK constraint name: '%s'",
- cdef->name);
- }
- check[ncheck].ccname = cdef->name;
- }
- else
- {
- check[ncheck].ccname = (char *) palloc(NAMEDATALEN);
- snprintf(check[ncheck].ccname, NAMEDATALEN, "$%d", ncheck + 1);
- }
- Assert(cdef->raw_expr == NULL && cdef->cooked_expr != NULL);
- check[ncheck].ccbin = pstrdup(cdef->cooked_expr);
- ncheck++;
- }
- if (ncheck > 0)
- {
- if (descriptor->constr == NULL)
- {
- descriptor->constr = (TupleConstr *) palloc(sizeof(TupleConstr));
- descriptor->constr->defval = NULL;
- descriptor->constr->num_defval = 0;
- descriptor->constr->has_not_null = false;
- }
- descriptor->constr->num_check = ncheck;
- descriptor->constr->check = check;
- }
- }
-
- relationId = heap_create_with_catalog(relname,
- namespaceId,
- descriptor,
- relkind,
- false,
- stmt->hasoids || parentHasOids,
- allowSystemTableMods);
-
- StoreCatalogInheritance(relationId, inheritOids);
-
- /*
- * We must bump the command counter to make the newly-created relation
- * tuple visible for opening.
- */
- CommandCounterIncrement();
-
- /*
- * Open the new relation and acquire exclusive lock on it. This isn't
- * really necessary for locking out other backends (since they can't
- * see the new rel anyway until we commit), but it keeps the lock
- * manager from complaining about deadlock risks.
- */
- rel = heap_open(relationId, AccessExclusiveLock);
-
- /*
- * Now add any newly specified column default values and CHECK
- * constraints to the new relation. These are passed to us in the
- * form of raw parsetrees; we need to transform them to executable
- * expression trees before they can be added. The most convenient way
- * to do that is to apply the parser's transformExpr routine, but
- * transformExpr doesn't work unless we have a pre-existing relation.
- * So, the transformation has to be postponed to this final step of
- * CREATE TABLE.
- *
- * First, scan schema to find new column defaults.
- */
- rawDefaults = NIL;
- attnum = 0;
-
- foreach(listptr, schema)
- {
- ColumnDef *colDef = lfirst(listptr);
- RawColumnDefault *rawEnt;
-
- attnum++;
-
- if (colDef->raw_default == NULL)
- continue;
- Assert(colDef->cooked_default == NULL);
-
- rawEnt = (RawColumnDefault *) palloc(sizeof(RawColumnDefault));
- rawEnt->attnum = attnum;
- rawEnt->raw_default = colDef->raw_default;
- rawDefaults = lappend(rawDefaults, rawEnt);
- }
-
- /*
- * Parse and add the defaults/constraints, if any.
- */
- if (rawDefaults || stmt->constraints)
- AddRelationRawConstraints(rel, rawDefaults, stmt->constraints);
-
- /*
- * Clean up. We keep lock on new relation (although it shouldn't be
- * visible to anyone else anyway, until commit).
- */
- heap_close(rel, NoLock);
-
- return relationId;
-}
-
-/*
- * RemoveRelation
- * Deletes a relation.
- *
- * Exceptions:
- * BadArg if name is invalid.
- *
- * Note:
- * If the relation has indices defined on it, then the index relations
- * themselves will be destroyed, too.
- */
-void
-RemoveRelation(const RangeVar *relation)
-{
- Oid relOid;
-
- relOid = RangeVarGetRelid(relation, false);
- heap_drop_with_catalog(relOid, allowSystemTableMods);
-}
-
-/*
- * TruncateRelation
- * Removes all the rows from a relation
- *
- * Exceptions:
- * BadArg if name is invalid
- *
- * Note:
- * Rows are removed, indices are truncated and reconstructed.
- */
-void
-TruncateRelation(const RangeVar *relation)
-{
- Relation rel;
- Oid relid;
-
- /* Grab exclusive lock in preparation for truncate */
- rel = heap_openrv(relation, AccessExclusiveLock);
- relid = RelationGetRelid(rel);
-
- if (rel->rd_rel->relkind == RELKIND_SEQUENCE)
- elog(ERROR, "TRUNCATE cannot be used on sequences. '%s' is a sequence",
- RelationGetRelationName(rel));
-
- if (rel->rd_rel->relkind == RELKIND_VIEW)
- elog(ERROR, "TRUNCATE cannot be used on views. '%s' is a view",
- RelationGetRelationName(rel));
-
- if (!allowSystemTableMods && IsSystemRelation(rel))
- elog(ERROR, "TRUNCATE cannot be used on system tables. '%s' is a system table",
- RelationGetRelationName(rel));
-
- if (!pg_class_ownercheck(relid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(rel));
-
- /* Keep the lock until transaction commit */
- heap_close(rel, NoLock);
-
- heap_truncate(relid);
-}
-
-
-/*
- * MergeDomainAttributes
- * Returns a new table schema with the constraints, types, and other
- * attributes of domains resolved for fields using a domain as
- * their type.
- */
-static List *
-MergeDomainAttributes(List *schema)
-{
- List *entry;
-
- /*
- * Loop through the table elements supplied. These should
- * never include inherited domains else they'll be
- * double (or more) processed.
- */
- foreach(entry, schema)
- {
- ColumnDef *coldef = lfirst(entry);
- HeapTuple tuple;
- Form_pg_type typeTup;
-
- tuple = typenameType(coldef->typename);
- typeTup = (Form_pg_type) GETSTRUCT(tuple);
-
- if (typeTup->typtype == 'd')
- {
- /* Force the column to have the correct typmod. */
- coldef->typename->typmod = typeTup->typtypmod;
- /* XXX more to do here? */
- }
-
- /* Enforce type NOT NULL || column definition NOT NULL -> NOT NULL */
- /* Currently only used for domains, but could be valid for all */
- coldef->is_not_null |= typeTup->typnotnull;
-
- ReleaseSysCache(tuple);
- }
-
- return schema;
-}
-
-/*----------
- * MergeAttributes
- * Returns new schema given initial schema and superclasses.
- *
- * Input arguments:
- * 'schema' is the column/attribute definition for the table. (It's a list
- * of ColumnDef's.) It is destructively changed.
- * 'supers' is a list of names (as RangeVar nodes) of parent relations.
- * 'istemp' is TRUE if we are creating a temp relation.
- *
- * Output arguments:
- * 'supOids' receives an integer list of the OIDs of the parent relations.
- * 'supconstr' receives a list of constraints belonging to the parents,
- * updated as necessary to be valid for the child.
- * 'supHasOids' is set TRUE if any parent has OIDs, else it is set FALSE.
- *
- * Return value:
- * Completed schema list.
- *
- * Notes:
- * The order in which the attributes are inherited is very important.
- * Intuitively, the inherited attributes should come first. If a table
- * inherits from multiple parents, the order of those attributes are
- * according to the order of the parents specified in CREATE TABLE.
- *
- * Here's an example:
- *
- * create table person (name text, age int4, location point);
- * create table emp (salary int4, manager text) inherits(person);
- * create table student (gpa float8) inherits (person);
- * create table stud_emp (percent int4) inherits (emp, student);
- *
- * The order of the attributes of stud_emp is:
- *
- * person {1:name, 2:age, 3:location}
- * / \
- * {6:gpa} student emp {4:salary, 5:manager}
- * \ /
- * stud_emp {7:percent}
- *
- * If the same attribute name appears multiple times, then it appears
- * in the result table in the proper location for its first appearance.
- *
- * Constraints (including NOT NULL constraints) for the child table
- * are the union of all relevant constraints, from both the child schema
- * and parent tables.
- *
- * The default value for a child column is defined as:
- * (1) If the child schema specifies a default, that value is used.
- * (2) If neither the child nor any parent specifies a default, then
- * the column will not have a default.
- * (3) If conflicting defaults are inherited from different parents
- * (and not overridden by the child), an error is raised.
- * (4) Otherwise the inherited default is used.
- * Rule (3) is new in Postgres 7.1; in earlier releases you got a
- * rather arbitrary choice of which parent default to use.
- *----------
- */
-static List *
-MergeAttributes(List *schema, List *supers, bool istemp,
- List **supOids, List **supconstr, bool *supHasOids)
-{
- List *entry;
- List *inhSchema = NIL;
- List *parentOids = NIL;
- List *constraints = NIL;
- bool parentHasOids = false;
- bool have_bogus_defaults = false;
- char *bogus_marker = "Bogus!"; /* marks conflicting
- * defaults */
- int child_attno;
-
- /*
- * Check for duplicate names in the explicit list of attributes.
- *
- * Although we might consider merging such entries in the same way that
- * we handle name conflicts for inherited attributes, it seems to make
- * more sense to assume such conflicts are errors.
- */
- foreach(entry, schema)
- {
- ColumnDef *coldef = lfirst(entry);
- List *rest;
-
- foreach(rest, lnext(entry))
- {
- ColumnDef *restdef = lfirst(rest);
-
- if (strcmp(coldef->colname, restdef->colname) == 0)
- elog(ERROR, "CREATE TABLE: attribute \"%s\" duplicated",
- coldef->colname);
- }
- }
-
- /*
- * Scan the parents left-to-right, and merge their attributes to form
- * a list of inherited attributes (inhSchema). Also check to see if
- * we need to inherit an OID column.
- */
- child_attno = 0;
- foreach(entry, supers)
- {
- RangeVar *parent = (RangeVar *) lfirst(entry);
- Relation relation;
- TupleDesc tupleDesc;
- TupleConstr *constr;
- AttrNumber *newattno;
- AttrNumber parent_attno;
-
- relation = heap_openrv(parent, AccessShareLock);
-
- if (relation->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "CREATE TABLE: inherited relation \"%s\" is not a table",
- parent->relname);
- /* Permanent rels cannot inherit from temporary ones */
- if (!istemp && isTempNamespace(RelationGetNamespace(relation)))
- elog(ERROR, "CREATE TABLE: cannot inherit from temp relation \"%s\"",
- parent->relname);
-
- /*
- * We should have an UNDER permission flag for this, but for now,
- * demand that creator of a child table own the parent.
- */
- if (!pg_class_ownercheck(RelationGetRelid(relation), GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER,
- RelationGetRelationName(relation));
-
- /*
- * Reject duplications in the list of parents.
- */
- if (intMember(RelationGetRelid(relation), parentOids))
- elog(ERROR, "CREATE TABLE: inherited relation \"%s\" duplicated",
- parent->relname);
-
- parentOids = lappendi(parentOids, RelationGetRelid(relation));
- setRelhassubclassInRelation(RelationGetRelid(relation), true);
-
- parentHasOids |= relation->rd_rel->relhasoids;
-
- tupleDesc = RelationGetDescr(relation);
- constr = tupleDesc->constr;
-
- /*
- * newattno[] will contain the child-table attribute numbers for
- * the attributes of this parent table. (They are not the same
- * for parents after the first one.)
- */
- newattno = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber));
-
- for (parent_attno = 1; parent_attno <= tupleDesc->natts;
- parent_attno++)
- {
- Form_pg_attribute attribute = tupleDesc->attrs[parent_attno - 1];
- char *attributeName = NameStr(attribute->attname);
- int exist_attno;
- ColumnDef *def;
- TypeName *typename;
-
- /*
- * Does it conflict with some previously inherited column?
- */
- exist_attno = findAttrByName(attributeName, inhSchema);
- if (exist_attno > 0)
- {
- /*
- * Yes, try to merge the two column definitions. They must
- * have the same type and typmod.
- */
- elog(NOTICE, "CREATE TABLE: merging multiple inherited definitions of attribute \"%s\"",
- attributeName);
- def = (ColumnDef *) nth(exist_attno - 1, inhSchema);
- if (typenameTypeId(def->typename) != attribute->atttypid ||
- def->typename->typmod != attribute->atttypmod)
- elog(ERROR, "CREATE TABLE: inherited attribute \"%s\" type conflict (%s and %s)",
- attributeName,
- TypeNameToString(def->typename),
- format_type_be(attribute->atttypid));
- /* Merge of NOT NULL constraints = OR 'em together */
- def->is_not_null |= attribute->attnotnull;
- /* Default and other constraints are handled below */
- newattno[parent_attno - 1] = exist_attno;
- }
- else
- {
- /*
- * No, create a new inherited column
- */
- def = makeNode(ColumnDef);
- def->colname = pstrdup(attributeName);
- typename = makeNode(TypeName);
- typename->typeid = attribute->atttypid;
- typename->typmod = attribute->atttypmod;
- def->typename = typename;
- def->is_not_null = attribute->attnotnull;
- def->raw_default = NULL;
- def->cooked_default = NULL;
- def->constraints = NIL;
- inhSchema = lappend(inhSchema, def);
- newattno[parent_attno - 1] = ++child_attno;
- }
-
- /*
- * Copy default if any
- */
- if (attribute->atthasdef)
- {
- char *this_default = NULL;
- AttrDefault *attrdef;
- int i;
-
- /* Find default in constraint structure */
- Assert(constr != NULL);
- attrdef = constr->defval;
- for (i = 0; i < constr->num_defval; i++)
- {
- if (attrdef[i].adnum == parent_attno)
- {
- this_default = attrdef[i].adbin;
- break;
- }
- }
- Assert(this_default != NULL);
-
- /*
- * If default expr could contain any vars, we'd need to
- * fix 'em, but it can't; so default is ready to apply to
- * child.
- *
- * If we already had a default from some prior parent, check
- * to see if they are the same. If so, no problem; if
- * not, mark the column as having a bogus default. Below,
- * we will complain if the bogus default isn't overridden
- * by the child schema.
- */
- Assert(def->raw_default == NULL);
- if (def->cooked_default == NULL)
- def->cooked_default = pstrdup(this_default);
- else if (strcmp(def->cooked_default, this_default) != 0)
- {
- def->cooked_default = bogus_marker;
- have_bogus_defaults = true;
- }
- }
- }
-
- /*
- * Now copy the constraints of this parent, adjusting attnos using
- * the completed newattno[] map
- */
- if (constr && constr->num_check > 0)
- {
- ConstrCheck *check = constr->check;
- int i;
-
- for (i = 0; i < constr->num_check; i++)
- {
- Constraint *cdef = makeNode(Constraint);
- Node *expr;
-
- cdef->contype = CONSTR_CHECK;
- if (check[i].ccname[0] == '$')
- cdef->name = NULL;
- else
- cdef->name = pstrdup(check[i].ccname);
- cdef->raw_expr = NULL;
- /* adjust varattnos of ccbin here */
- expr = stringToNode(check[i].ccbin);
- change_varattnos_of_a_node(expr, newattno);
- cdef->cooked_expr = nodeToString(expr);
- constraints = lappend(constraints, cdef);
- }
- }
-
- pfree(newattno);
-
- /*
- * Close the parent rel, but keep our AccessShareLock on it until
- * xact commit. That will prevent someone else from deleting or
- * ALTERing the parent before the child is committed.
- */
- heap_close(relation, NoLock);
- }
-
- /*
- * If we had no inherited attributes, the result schema is just the
- * explicitly declared columns. Otherwise, we need to merge the
- * declared columns into the inherited schema list.
- */
- if (inhSchema != NIL)
- {
- foreach(entry, schema)
- {
- ColumnDef *newdef = lfirst(entry);
- char *attributeName = newdef->colname;
- int exist_attno;
-
- /*
- * Does it conflict with some previously inherited column?
- */
- exist_attno = findAttrByName(attributeName, inhSchema);
- if (exist_attno > 0)
- {
- ColumnDef *def;
-
- /*
- * Yes, try to merge the two column definitions. They must
- * have the same type and typmod.
- */
- elog(NOTICE, "CREATE TABLE: merging attribute \"%s\" with inherited definition",
- attributeName);
- def = (ColumnDef *) nth(exist_attno - 1, inhSchema);
- if (typenameTypeId(def->typename) != typenameTypeId(newdef->typename) ||
- def->typename->typmod != newdef->typename->typmod)
- elog(ERROR, "CREATE TABLE: attribute \"%s\" type conflict (%s and %s)",
- attributeName,
- TypeNameToString(def->typename),
- TypeNameToString(newdef->typename));
- /* Merge of NOT NULL constraints = OR 'em together */
- def->is_not_null |= newdef->is_not_null;
- /* If new def has a default, override previous default */
- if (newdef->raw_default != NULL)
- {
- def->raw_default = newdef->raw_default;
- def->cooked_default = newdef->cooked_default;
- }
- }
- else
- {
- /*
- * No, attach new column to result schema
- */
- inhSchema = lappend(inhSchema, newdef);
- }
- }
-
- schema = inhSchema;
- }
-
- /*
- * If we found any conflicting parent default values, check to make
- * sure they were overridden by the child.
- */
- if (have_bogus_defaults)
- {
- foreach(entry, schema)
- {
- ColumnDef *def = lfirst(entry);
-
- if (def->cooked_default == bogus_marker)
- elog(ERROR, "CREATE TABLE: attribute \"%s\" inherits conflicting default values"
- "\n\tTo resolve the conflict, specify a default explicitly",
- def->colname);
- }
- }
-
- *supOids = parentOids;
- *supconstr = constraints;
- *supHasOids = parentHasOids;
- return schema;
-}
-
-/*
- * complementary static functions for MergeAttributes().
- *
- * Varattnos of pg_relcheck.rcbin must be rewritten when subclasses inherit
- * constraints from parent classes, since the inherited attributes could
- * be given different column numbers in multiple-inheritance cases.
- *
- * Note that the passed node tree is modified in place!
- */
-static bool
-change_varattnos_walker(Node *node, const AttrNumber *newattno)
-{
- if (node == NULL)
- return false;
- if (IsA(node, Var))
- {
- Var *var = (Var *) node;
-
- if (var->varlevelsup == 0 && var->varno == 1 &&
- var->varattno > 0)
- {
- /*
- * ??? the following may be a problem when the node is
- * multiply referenced though stringToNode() doesn't create
- * such a node currently.
- */
- Assert(newattno[var->varattno - 1] > 0);
- var->varattno = newattno[var->varattno - 1];
- }
- return false;
- }
- return expression_tree_walker(node, change_varattnos_walker,
- (void *) newattno);
-}
-
-static bool
-change_varattnos_of_a_node(Node *node, const AttrNumber *newattno)
-{
- return change_varattnos_walker(node, newattno);
-}
-
-/*
- * StoreCatalogInheritance
- * Updates the system catalogs with proper inheritance information.
- *
- * supers is an integer list of the OIDs of the new relation's direct
- * ancestors. NB: it is destructively changed to include indirect ancestors.
- */
-static void
-StoreCatalogInheritance(Oid relationId, List *supers)
-{
- Relation relation;
- TupleDesc desc;
- int16 seqNumber;
- List *entry;
- HeapTuple tuple;
-
- /*
- * sanity checks
- */
- AssertArg(OidIsValid(relationId));
-
- if (supers == NIL)
- return;
-
- /*
- * Catalog INHERITS information using direct ancestors only.
- */
- relation = heap_openr(InheritsRelationName, RowExclusiveLock);
- desc = RelationGetDescr(relation);
-
- seqNumber = 1;
- foreach(entry, supers)
- {
- Oid entryOid = lfirsti(entry);
- Datum datum[Natts_pg_inherits];
- char nullarr[Natts_pg_inherits];
-
- datum[0] = ObjectIdGetDatum(relationId); /* inhrel */
- datum[1] = ObjectIdGetDatum(entryOid); /* inhparent */
- datum[2] = Int16GetDatum(seqNumber); /* inhseqno */
-
- nullarr[0] = ' ';
- nullarr[1] = ' ';
- nullarr[2] = ' ';
-
- tuple = heap_formtuple(desc, datum, nullarr);
-
- simple_heap_insert(relation, tuple);
-
- if (RelationGetForm(relation)->relhasindex)
- {
- Relation idescs[Num_pg_inherits_indices];
-
- CatalogOpenIndices(Num_pg_inherits_indices, Name_pg_inherits_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_inherits_indices, relation, tuple);
- CatalogCloseIndices(Num_pg_inherits_indices, idescs);
- }
-
- heap_freetuple(tuple);
-
- seqNumber += 1;
- }
-
- heap_close(relation, RowExclusiveLock);
-
- /* ----------------
- * Expand supers list to include indirect ancestors as well.
- *
- * Algorithm:
- * 0. begin with list of direct superclasses.
- * 1. append after each relationId, its superclasses, recursively.
- * 2. remove all but last of duplicates.
- * ----------------
- */
-
- /*
- * 1. append after each relationId, its superclasses, recursively.
- */
- foreach(entry, supers)
- {
- HeapTuple tuple;
- Oid id;
- int16 number;
- List *next;
- List *current;
-
- id = (Oid) lfirsti(entry);
- current = entry;
- next = lnext(entry);
-
- for (number = 1;; number += 1)
- {
- tuple = SearchSysCache(INHRELID,
- ObjectIdGetDatum(id),
- Int16GetDatum(number),
- 0, 0);
- if (!HeapTupleIsValid(tuple))
- break;
-
- lnext(current) = lconsi(((Form_pg_inherits)
- GETSTRUCT(tuple))->inhparent,
- NIL);
-
- ReleaseSysCache(tuple);
-
- current = lnext(current);
- }
- lnext(current) = next;
- }
-
- /*
- * 2. remove all but last of duplicates.
- */
- foreach(entry, supers)
- {
- Oid thisone;
- bool found;
- List *rest;
-
-again:
- thisone = lfirsti(entry);
- found = false;
- foreach(rest, lnext(entry))
- {
- if (thisone == lfirsti(rest))
- {
- found = true;
- break;
- }
- }
- if (found)
- {
- /*
- * found a later duplicate, so remove this entry.
- */
- lfirsti(entry) = lfirsti(lnext(entry));
- lnext(entry) = lnext(lnext(entry));
-
- goto again;
- }
- }
-}
-
-/*
- * Look for an existing schema entry with the given name.
- *
- * Returns the index (starting with 1) if attribute already exists in schema,
- * 0 if it doesn't.
- */
-static int
-findAttrByName(const char *attributeName, List *schema)
-{
- List *s;
- int i = 0;
-
- foreach(s, schema)
- {
- ColumnDef *def = lfirst(s);
-
- ++i;
- if (strcmp(attributeName, def->colname) == 0)
- return i;
- }
- return 0;
-}
-
-/*
- * Update a relation's pg_class.relhassubclass entry to the given value
- */
-static void
-setRelhassubclassInRelation(Oid relationId, bool relhassubclass)
-{
- Relation relationRelation;
- HeapTuple tuple;
- Relation idescs[Num_pg_class_indices];
-
- /*
- * Fetch a modifiable copy of the tuple, modify it, update pg_class.
- */
- relationRelation = heap_openr(RelationRelationName, RowExclusiveLock);
- tuple = SearchSysCacheCopy(RELOID,
- ObjectIdGetDatum(relationId),
- 0, 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "setRelhassubclassInRelation: cache lookup failed for relation %u", relationId);
-
- ((Form_pg_class) GETSTRUCT(tuple))->relhassubclass = relhassubclass;
- simple_heap_update(relationRelation, &tuple->t_self, tuple);
-
- /* keep the catalog indices up to date */
- CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_class_indices, relationRelation, tuple);
- CatalogCloseIndices(Num_pg_class_indices, idescs);
-
- heap_freetuple(tuple);
- heap_close(relationRelation, RowExclusiveLock);
-}
-
-
-/*
- * renameatt - changes the name of a attribute in a relation
- *
- * Attname attribute is changed in attribute catalog.
- * No record of the previous attname is kept (correct?).
- *
- * get proper relrelation from relation catalog (if not arg)
- * scan attribute catalog
- * for name conflict (within rel)
- * for original attribute (if not arg)
- * modify attname in attribute tuple
- * insert modified attribute in attribute catalog
- * delete original attribute from attribute catalog
- */
-void
-renameatt(Oid relid,
- const char *oldattname,
- const char *newattname,
- bool recurse)
-{
- Relation targetrelation;
- Relation attrelation;
- HeapTuple atttup;
- List *indexoidlist;
- List *indexoidscan;
-
- /*
- * Grab an exclusive lock on the target table, which we will NOT
- * release until end of transaction.
- */
- targetrelation = heap_open(relid, AccessExclusiveLock);
-
- /*
- * permissions checking. this would normally be done in utility.c,
- * but this particular routine is recursive.
- *
- * normally, only the owner of a class can change its schema.
- */
- if (!allowSystemTableMods
- && IsSystemRelation(targetrelation))
- elog(ERROR, "renameatt: class \"%s\" is a system catalog",
- RelationGetRelationName(targetrelation));
- if (!pg_class_ownercheck(relid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER,
- RelationGetRelationName(targetrelation));
-
- /*
- * if the 'recurse' flag is set then we are supposed to rename this
- * attribute in all classes that inherit from 'relname' (as well as in
- * 'relname').
- *
- * any permissions or problems with duplicate attributes will cause the
- * whole transaction to abort, which is what we want -- all or
- * nothing.
- */
- if (recurse)
- {
- List *child,
- *children;
-
- /* this routine is actually in the planner */
- children = find_all_inheritors(relid);
-
- /*
- * find_all_inheritors does the recursive search of the
- * inheritance hierarchy, so all we have to do is process all of
- * the relids in the list that it returns.
- */
- foreach(child, children)
- {
- Oid childrelid = lfirsti(child);
-
- if (childrelid == relid)
- continue;
- /* note we need not recurse again! */
- renameatt(childrelid, oldattname, newattname, false);
- }
- }
-
- attrelation = heap_openr(AttributeRelationName, RowExclusiveLock);
-
- atttup = SearchSysCacheCopy(ATTNAME,
- ObjectIdGetDatum(relid),
- PointerGetDatum(oldattname),
- 0, 0);
- if (!HeapTupleIsValid(atttup))
- elog(ERROR, "renameatt: attribute \"%s\" does not exist", oldattname);
-
- if (((Form_pg_attribute) GETSTRUCT(atttup))->attnum < 0)
- elog(ERROR, "renameatt: system attribute \"%s\" not renamed", oldattname);
-
- /* should not already exist */
- if (SearchSysCacheExists(ATTNAME,
- ObjectIdGetDatum(relid),
- PointerGetDatum(newattname),
- 0, 0))
- elog(ERROR, "renameatt: attribute \"%s\" exists", newattname);
-
- namestrcpy(&(((Form_pg_attribute) GETSTRUCT(atttup))->attname),
- newattname);
-
- simple_heap_update(attrelation, &atttup->t_self, atttup);
-
- /* keep system catalog indices current */
- {
- Relation irelations[Num_pg_attr_indices];
-
- CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, irelations);
- CatalogIndexInsert(irelations, Num_pg_attr_indices, attrelation, atttup);
- CatalogCloseIndices(Num_pg_attr_indices, irelations);
- }
-
- heap_freetuple(atttup);
-
- /*
- * Update column names of indexes that refer to the column being
- * renamed.
- */
- indexoidlist = RelationGetIndexList(targetrelation);
-
- foreach(indexoidscan, indexoidlist)
- {
- Oid indexoid = lfirsti(indexoidscan);
- HeapTuple indextup;
-
- /*
- * First check to see if index is a functional index. If so, its
- * column name is a function name and shouldn't be renamed here.
- */
- indextup = SearchSysCache(INDEXRELID,
- ObjectIdGetDatum(indexoid),
- 0, 0, 0);
- if (!HeapTupleIsValid(indextup))
- elog(ERROR, "renameatt: can't find index id %u", indexoid);
- if (OidIsValid(((Form_pg_index) GETSTRUCT(indextup))->indproc))
- {
- ReleaseSysCache(indextup);
- continue;
- }
- ReleaseSysCache(indextup);
-
- /*
- * Okay, look to see if any column name of the index matches the
- * old attribute name.
- */
- atttup = SearchSysCacheCopy(ATTNAME,
- ObjectIdGetDatum(indexoid),
- PointerGetDatum(oldattname),
- 0, 0);
- if (!HeapTupleIsValid(atttup))
- continue; /* Nope, so ignore it */
-
- /*
- * Update the (copied) attribute tuple.
- */
- namestrcpy(&(((Form_pg_attribute) GETSTRUCT(atttup))->attname),
- newattname);
-
- simple_heap_update(attrelation, &atttup->t_self, atttup);
-
- /* keep system catalog indices current */
- {
- Relation irelations[Num_pg_attr_indices];
-
- CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, irelations);
- CatalogIndexInsert(irelations, Num_pg_attr_indices, attrelation, atttup);
- CatalogCloseIndices(Num_pg_attr_indices, irelations);
- }
- heap_freetuple(atttup);
- }
-
- freeList(indexoidlist);
-
- heap_close(attrelation, RowExclusiveLock);
-
- /*
- * Update att name in any RI triggers associated with the relation.
- */
- if (targetrelation->rd_rel->reltriggers > 0)
- {
- /* update tgargs column reference where att is primary key */
- update_ri_trigger_args(RelationGetRelid(targetrelation),
- oldattname, newattname,
- false, false);
- /* update tgargs column reference where att is foreign key */
- update_ri_trigger_args(RelationGetRelid(targetrelation),
- oldattname, newattname,
- true, false);
- }
-
- heap_close(targetrelation, NoLock); /* close rel but keep lock! */
-}
-
-/*
- * renamerel - change the name of a relation
- *
- * XXX - When renaming sequences, we don't bother to modify the
- * sequence name that is stored within the sequence itself
- * (this would cause problems with MVCC). In the future,
- * the sequence name should probably be removed from the
- * sequence, AFAIK there's no need for it to be there.
- */
-void
-renamerel(Oid relid, const char *newrelname)
-{
- Relation targetrelation;
- Relation relrelation; /* for RELATION relation */
- HeapTuple reltup;
- Oid namespaceId;
- char *oldrelname;
- char relkind;
- bool relhastriggers;
- Relation irelations[Num_pg_class_indices];
-
- /*
- * Grab an exclusive lock on the target table or index, which we will
- * NOT release until end of transaction.
- */
- targetrelation = relation_open(relid, AccessExclusiveLock);
-
- oldrelname = pstrdup(RelationGetRelationName(targetrelation));
- namespaceId = RelationGetNamespace(targetrelation);
-
- /* Validity checks */
- if (!allowSystemTableMods &&
- IsSystemRelation(targetrelation))
- elog(ERROR, "renamerel: system relation \"%s\" may not be renamed",
- oldrelname);
-
- relkind = targetrelation->rd_rel->relkind;
- relhastriggers = (targetrelation->rd_rel->reltriggers > 0);
-
- /*
- * Find relation's pg_class tuple, and make sure newrelname isn't in
- * use.
- */
- relrelation = heap_openr(RelationRelationName, RowExclusiveLock);
-
- reltup = SearchSysCacheCopy(RELOID,
- PointerGetDatum(relid),
- 0, 0, 0);
- if (!HeapTupleIsValid(reltup))
- elog(ERROR, "renamerel: relation \"%s\" does not exist",
- oldrelname);
-
- if (get_relname_relid(newrelname, namespaceId) != InvalidOid)
- elog(ERROR, "renamerel: relation \"%s\" exists", newrelname);
-
- /*
- * Update pg_class tuple with new relname. (Scribbling on reltup is
- * OK because it's a copy...)
- */
- namestrcpy(&(((Form_pg_class) GETSTRUCT(reltup))->relname), newrelname);
-
- simple_heap_update(relrelation, &reltup->t_self, reltup);
-
- /* keep the system catalog indices current */
- CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices, irelations);
- CatalogIndexInsert(irelations, Num_pg_class_indices, relrelation, reltup);
- CatalogCloseIndices(Num_pg_class_indices, irelations);
-
- heap_close(relrelation, NoLock);
- heap_freetuple(reltup);
-
- /*
- * Also rename the associated type, if any.
- */
- if (relkind != RELKIND_INDEX)
- TypeRename(oldrelname, namespaceId, newrelname);
-
- /*
- * Update rel name in any RI triggers associated with the relation.
- */
- if (relhastriggers)
- {
- /* update tgargs where relname is primary key */
- update_ri_trigger_args(relid,
- oldrelname,
- newrelname,
- false, true);
- /* update tgargs where relname is foreign key */
- update_ri_trigger_args(relid,
- oldrelname,
- newrelname,
- true, true);
- }
-
- /*
- * Close rel, but keep exclusive lock!
- */
- relation_close(targetrelation, NoLock);
-}
-
-
-/*
- * Given a trigger function OID, determine whether it is an RI trigger,
- * and if so whether it is attached to PK or FK relation.
- *
- * XXX this probably doesn't belong here; should be exported by
- * ri_triggers.c
- */
-static int
-ri_trigger_type(Oid tgfoid)
-{
- switch (tgfoid)
- {
- case F_RI_FKEY_CASCADE_DEL:
- case F_RI_FKEY_CASCADE_UPD:
- case F_RI_FKEY_RESTRICT_DEL:
- case F_RI_FKEY_RESTRICT_UPD:
- case F_RI_FKEY_SETNULL_DEL:
- case F_RI_FKEY_SETNULL_UPD:
- case F_RI_FKEY_SETDEFAULT_DEL:
- case F_RI_FKEY_SETDEFAULT_UPD:
- case F_RI_FKEY_NOACTION_DEL:
- case F_RI_FKEY_NOACTION_UPD:
- return RI_TRIGGER_PK;
-
- case F_RI_FKEY_CHECK_INS:
- case F_RI_FKEY_CHECK_UPD:
- return RI_TRIGGER_FK;
- }
-
- return RI_TRIGGER_NONE;
-}
-
-/*
- * Scan pg_trigger for RI triggers that are on the specified relation
- * (if fk_scan is false) or have it as the tgconstrrel (if fk_scan
- * is true). Update RI trigger args fields matching oldname to contain
- * newname instead. If update_relname is true, examine the relname
- * fields; otherwise examine the attname fields.
- */
-static void
-update_ri_trigger_args(Oid relid,
- const char *oldname,
- const char *newname,
- bool fk_scan,
- bool update_relname)
-{
- Relation tgrel;
- ScanKeyData skey[1];
- SysScanDesc trigscan;
- HeapTuple tuple;
- Datum values[Natts_pg_trigger];
- char nulls[Natts_pg_trigger];
- char replaces[Natts_pg_trigger];
-
- tgrel = heap_openr(TriggerRelationName, RowExclusiveLock);
- if (fk_scan)
- {
- ScanKeyEntryInitialize(&skey[0], 0x0,
- Anum_pg_trigger_tgconstrrelid,
- F_OIDEQ,
- ObjectIdGetDatum(relid));
- trigscan = systable_beginscan(tgrel, TriggerConstrRelidIndex,
- true, SnapshotNow,
- 1, skey);
- }
- else
- {
- ScanKeyEntryInitialize(&skey[0], 0x0,
- Anum_pg_trigger_tgrelid,
- F_OIDEQ,
- ObjectIdGetDatum(relid));
- trigscan = systable_beginscan(tgrel, TriggerRelidNameIndex,
- true, SnapshotNow,
- 1, skey);
- }
-
- while ((tuple = systable_getnext(trigscan)) != NULL)
- {
- Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
- bytea *val;
- bytea *newtgargs;
- bool isnull;
- int tg_type;
- bool examine_pk;
- bool changed;
- int tgnargs;
- int i;
- int newlen;
- const char *arga[RI_MAX_ARGUMENTS];
- const char *argp;
-
- tg_type = ri_trigger_type(pg_trigger->tgfoid);
- if (tg_type == RI_TRIGGER_NONE)
- {
- /* Not an RI trigger, forget it */
- continue;
- }
-
- /*
- * It is an RI trigger, so parse the tgargs bytea.
- *
- * NB: we assume the field will never be compressed or moved out of
- * line; so does trigger.c ...
- */
- tgnargs = pg_trigger->tgnargs;
- val = (bytea *) fastgetattr(tuple,
- Anum_pg_trigger_tgargs,
- tgrel->rd_att, &isnull);
- if (isnull || tgnargs < RI_FIRST_ATTNAME_ARGNO ||
- tgnargs > RI_MAX_ARGUMENTS)
- {
- /* This probably shouldn't happen, but ignore busted triggers */
- continue;
- }
- argp = (const char *) VARDATA(val);
- for (i = 0; i < tgnargs; i++)
- {
- arga[i] = argp;
- argp += strlen(argp) + 1;
- }
-
- /*
- * Figure out which item(s) to look at. If the trigger is
- * primary-key type and attached to my rel, I should look at the
- * PK fields; if it is foreign-key type and attached to my rel, I
- * should look at the FK fields. But the opposite rule holds when
- * examining triggers found by tgconstrrel search.
- */
- examine_pk = (tg_type == RI_TRIGGER_PK) == (!fk_scan);
-
- changed = false;
- if (update_relname)
- {
- /* Change the relname if needed */
- i = examine_pk ? RI_PK_RELNAME_ARGNO : RI_FK_RELNAME_ARGNO;
- if (strcmp(arga[i], oldname) == 0)
- {
- arga[i] = newname;
- changed = true;
- }
- }
- else
- {
- /* Change attname(s) if needed */
- i = examine_pk ? RI_FIRST_ATTNAME_ARGNO + RI_KEYPAIR_PK_IDX :
- RI_FIRST_ATTNAME_ARGNO + RI_KEYPAIR_FK_IDX;
- for (; i < tgnargs; i += 2)
- {
- if (strcmp(arga[i], oldname) == 0)
- {
- arga[i] = newname;
- changed = true;
- }
- }
- }
-
- if (!changed)
- {
- /* Don't need to update this tuple */
- continue;
- }
-
- /*
- * Construct modified tgargs bytea.
- */
- newlen = VARHDRSZ;
- for (i = 0; i < tgnargs; i++)
- newlen += strlen(arga[i]) + 1;
- newtgargs = (bytea *) palloc(newlen);
- VARATT_SIZEP(newtgargs) = newlen;
- newlen = VARHDRSZ;
- for (i = 0; i < tgnargs; i++)
- {
- strcpy(((char *) newtgargs) + newlen, arga[i]);
- newlen += strlen(arga[i]) + 1;
- }
-
- /*
- * Build modified tuple.
- */
- for (i = 0; i < Natts_pg_trigger; i++)
- {
- values[i] = (Datum) 0;
- replaces[i] = ' ';
- nulls[i] = ' ';
- }
- values[Anum_pg_trigger_tgargs - 1] = PointerGetDatum(newtgargs);
- replaces[Anum_pg_trigger_tgargs - 1] = 'r';
-
- tuple = heap_modifytuple(tuple, tgrel, values, nulls, replaces);
-
- /*
- * Update pg_trigger and its indexes
- */
- simple_heap_update(tgrel, &tuple->t_self, tuple);
-
- {
- Relation irelations[Num_pg_attr_indices];
-
- CatalogOpenIndices(Num_pg_trigger_indices, Name_pg_trigger_indices, irelations);
- CatalogIndexInsert(irelations, Num_pg_trigger_indices, tgrel, tuple);
- CatalogCloseIndices(Num_pg_trigger_indices, irelations);
- }
-
- /* free up our scratch memory */
- pfree(newtgargs);
- heap_freetuple(tuple);
- }
-
- systable_endscan(trigscan);
-
- heap_close(tgrel, RowExclusiveLock);
-
- /*
- * Increment cmd counter to make updates visible; this is needed in
- * case the same tuple has to be updated again by next pass (can
- * happen in case of a self-referential FK relationship).
- */
- CommandCounterIncrement();
-}
-
-
-/* ----------------
- * AlterTableAddColumn
- * (formerly known as PerformAddAttribute)
- *
- * adds an additional attribute to a relation
- *
- * Adds attribute field(s) to a relation. Each new attribute
- * is given attnums in sequential order and is added to the
- * ATTRIBUTE relation. If the AMI fails, defunct tuples will
- * remain in the ATTRIBUTE relation for later vacuuming.
- * Later, there may be some reserved attribute names???
- *
- * (If needed, can instead use elog to handle exceptions.)
- *
- * Note:
- * Initial idea of ordering the tuple attributes so that all
- * the variable length domains occured last was scratched. Doing
- * so would not speed access too much (in general) and would create
- * many complications in formtuple, heap_getattr, and addattribute.
- *
- * scan attribute catalog for name conflict (within rel)
- * scan type catalog for absence of data type (if not arg)
- * create attnum magically???
- * create attribute tuple
- * insert attribute in attribute catalog
- * modify reldesc
- * create new relation tuple
- * insert new relation in relation catalog
- * delete original relation from relation catalog
- * ----------------
- */
-void
-AlterTableAddColumn(Oid myrelid,
- bool inherits,
- ColumnDef *colDef)
-{
- Relation rel,
- pgclass,
- attrdesc;
- HeapTuple reltup;
- HeapTuple newreltup;
- HeapTuple attributeTuple;
- Form_pg_attribute attribute;
- FormData_pg_attribute attributeD;
- int i;
- int minattnum,
- maxatts;
- HeapTuple typeTuple;
- Form_pg_type tform;
- int attndims;
-
- /*
- * Grab an exclusive lock on the target table, which we will NOT
- * release until end of transaction.
- */
- rel = heap_open(myrelid, AccessExclusiveLock);
-
- if (rel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "ALTER TABLE: relation \"%s\" is not a table",
- RelationGetRelationName(rel));
-
- /*
- * permissions checking. this would normally be done in utility.c,
- * but this particular routine is recursive.
- *
- * normally, only the owner of a class can change its schema.
- */
- if (!allowSystemTableMods
- && IsSystemRelation(rel))
- elog(ERROR, "ALTER TABLE: relation \"%s\" is a system catalog",
- RelationGetRelationName(rel));
- if (!pg_class_ownercheck(myrelid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(rel));
-
- /*
- * Recurse to add the column to child classes, if requested.
- *
- * any permissions or problems with duplicate attributes will cause the
- * whole transaction to abort, which is what we want -- all or
- * nothing.
- */
- if (inherits)
- {
- List *child,
- *children;
-
- /* this routine is actually in the planner */
- children = find_all_inheritors(myrelid);
-
- /*
- * find_all_inheritors does the recursive search of the
- * inheritance hierarchy, so all we have to do is process all of
- * the relids in the list that it returns.
- */
- foreach(child, children)
- {
- Oid childrelid = lfirsti(child);
-
- if (childrelid == myrelid)
- continue;
-
- AlterTableAddColumn(childrelid, false, colDef);
- }
- }
-
- /*
- * OK, get on with it...
- *
- * Implementation restrictions: because we don't touch the table rows,
- * the new column values will initially appear to be NULLs. (This
- * happens because the heap tuple access routines always check for
- * attnum > # of attributes in tuple, and return NULL if so.)
- * Therefore we can't support a DEFAULT value in SQL92-compliant
- * fashion, and we also can't allow a NOT NULL constraint.
- *
- * We do allow CHECK constraints, even though these theoretically could
- * fail for NULL rows (eg, CHECK (newcol IS NOT NULL)).
- */
- if (colDef->raw_default || colDef->cooked_default)
- elog(ERROR, "Adding columns with defaults is not implemented."
- "\n\tAdd the column, then use ALTER TABLE SET DEFAULT.");
-
- if (colDef->is_not_null)
- elog(ERROR, "Adding NOT NULL columns is not implemented."
- "\n\tAdd the column, then use ALTER TABLE ... SET NOT NULL.");
-
- pgclass = heap_openr(RelationRelationName, RowExclusiveLock);
-
- reltup = SearchSysCache(RELOID,
- ObjectIdGetDatum(myrelid),
- 0, 0, 0);
- if (!HeapTupleIsValid(reltup))
- elog(ERROR, "ALTER TABLE: relation \"%s\" not found",
- RelationGetRelationName(rel));
-
- if (SearchSysCacheExists(ATTNAME,
- ObjectIdGetDatum(myrelid),
- PointerGetDatum(colDef->colname),
- 0, 0))
- elog(ERROR, "ALTER TABLE: column name \"%s\" already exists in table \"%s\"",
- colDef->colname, RelationGetRelationName(rel));
-
- minattnum = ((Form_pg_class) GETSTRUCT(reltup))->relnatts;
- maxatts = minattnum + 1;
- if (maxatts > MaxHeapAttributeNumber)
- elog(ERROR, "ALTER TABLE: relations limited to %d columns",
- MaxHeapAttributeNumber);
- i = minattnum + 1;
-
- attrdesc = heap_openr(AttributeRelationName, RowExclusiveLock);
-
- if (colDef->typename->arrayBounds)
- attndims = length(colDef->typename->arrayBounds);
- else
- attndims = 0;
-
- typeTuple = typenameType(colDef->typename);
- tform = (Form_pg_type) GETSTRUCT(typeTuple);
-
- attributeTuple = heap_addheader(Natts_pg_attribute,
- ATTRIBUTE_TUPLE_SIZE,
- (void *) &attributeD);
-
- attribute = (Form_pg_attribute) GETSTRUCT(attributeTuple);
-
- attribute->attrelid = myrelid;
- namestrcpy(&(attribute->attname), colDef->colname);
- attribute->atttypid = typeTuple->t_data->t_oid;
- attribute->attstattarget = DEFAULT_ATTSTATTARGET;
- attribute->attlen = tform->typlen;
- attribute->attcacheoff = -1;
- attribute->atttypmod = colDef->typename->typmod;
- attribute->attnum = i;
- attribute->attbyval = tform->typbyval;
- attribute->attndims = attndims;
- attribute->attisset = (bool) (tform->typtype == 'c');
- attribute->attstorage = tform->typstorage;
- attribute->attalign = tform->typalign;
- attribute->attnotnull = colDef->is_not_null;
- attribute->atthasdef = (colDef->raw_default != NULL ||
- colDef->cooked_default != NULL);
-
- ReleaseSysCache(typeTuple);
-
- simple_heap_insert(attrdesc, attributeTuple);
-
- /* Update indexes on pg_attribute */
- if (RelationGetForm(attrdesc)->relhasindex)
- {
- Relation idescs[Num_pg_attr_indices];
-
- CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_attr_indices, attrdesc, attributeTuple);
- CatalogCloseIndices(Num_pg_attr_indices, idescs);
- }
-
- heap_close(attrdesc, RowExclusiveLock);
-
- /*
- * Update number of attributes in pg_class tuple
- */
- newreltup = heap_copytuple(reltup);
-
- ((Form_pg_class) GETSTRUCT(newreltup))->relnatts = maxatts;
- simple_heap_update(pgclass, &newreltup->t_self, newreltup);
-
- /* keep catalog indices current */
- if (RelationGetForm(pgclass)->relhasindex)
- {
- Relation ridescs[Num_pg_class_indices];
-
- CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices, ridescs);
- CatalogIndexInsert(ridescs, Num_pg_class_indices, pgclass, newreltup);
- CatalogCloseIndices(Num_pg_class_indices, ridescs);
- }
-
- heap_freetuple(newreltup);
- ReleaseSysCache(reltup);
-
- heap_close(pgclass, NoLock);
-
- heap_close(rel, NoLock); /* close rel but keep lock! */
-
- /*
- * Make our catalog updates visible for subsequent steps.
- */
- CommandCounterIncrement();
-
- /*
- * Add any CHECK constraints attached to the new column.
- *
- * To do this we must re-open the rel so that its new attr list gets
- * loaded into the relcache.
- */
- if (colDef->constraints != NIL)
- {
- rel = heap_open(myrelid, AccessExclusiveLock);
- AddRelationRawConstraints(rel, NIL, colDef->constraints);
- heap_close(rel, NoLock);
- }
-
- /*
- * Automatically create the secondary relation for TOAST if it
- * formerly had no such but now has toastable attributes.
- */
- AlterTableCreateToastTable(myrelid, true);
-}
-
-/*
- * ALTER TABLE ALTER COLUMN DROP NOT NULL
- */
-void
-AlterTableAlterColumnDropNotNull(Oid myrelid,
- bool inh, const char *colName)
-{
- Relation rel;
- HeapTuple tuple;
- AttrNumber attnum;
- Relation attr_rel;
- List *indexoidlist;
- List *indexoidscan;
-
- rel = heap_open(myrelid, AccessExclusiveLock);
-
- if (rel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "ALTER TABLE: relation \"%s\" is not a table",
- RelationGetRelationName(rel));
-
- if (!allowSystemTableMods
- && IsSystemRelation(rel))
- elog(ERROR, "ALTER TABLE: relation \"%s\" is a system catalog",
- RelationGetRelationName(rel));
-
- if (!pg_class_ownercheck(myrelid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(rel));
-
- /*
- * Propagate to children if desired
- */
- if (inh)
- {
- List *child,
- *children;
-
- /* this routine is actually in the planner */
- children = find_all_inheritors(myrelid);
-
- /*
- * find_all_inheritors does the recursive search of the
- * inheritance hierarchy, so all we have to do is process all of
- * the relids in the list that it returns.
- */
- foreach(child, children)
- {
- Oid childrelid = lfirsti(child);
-
- if (childrelid == myrelid)
- continue;
- AlterTableAlterColumnDropNotNull(childrelid,
- false, colName);
- }
- }
-
- /* -= now do the thing on this relation =- */
-
- /*
- * get the number of the attribute
- */
- tuple = SearchSysCache(ATTNAME,
- ObjectIdGetDatum(myrelid),
- PointerGetDatum(colName),
- 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "ALTER TABLE: relation \"%s\" has no column \"%s\"",
- RelationGetRelationName(rel), colName);
-
- attnum = ((Form_pg_attribute) GETSTRUCT(tuple))->attnum;
- ReleaseSysCache(tuple);
-
- /* Prevent them from altering a system attribute */
- if (attnum < 0)
- elog(ERROR, "ALTER TABLE: Cannot alter system attribute \"%s\"",
- colName);
-
- /*
- * Check that the attribute is not in a primary key
- */
-
- /* Loop over all indices on the relation */
- indexoidlist = RelationGetIndexList(rel);
-
- foreach(indexoidscan, indexoidlist)
- {
- Oid indexoid = lfirsti(indexoidscan);
- HeapTuple indexTuple;
- Form_pg_index indexStruct;
- int i;
-
- indexTuple = SearchSysCache(INDEXRELID,
- ObjectIdGetDatum(indexoid),
- 0, 0, 0);
- if (!HeapTupleIsValid(indexTuple))
- elog(ERROR, "ALTER TABLE: Index %u not found",
- indexoid);
- indexStruct = (Form_pg_index) GETSTRUCT(indexTuple);
-
- /* If the index is not a primary key, skip the check */
- if (indexStruct->indisprimary)
- {
- /*
- * Loop over each attribute in the primary key and
- * see if it matches the to-be-altered attribute
- */
- for (i = 0; i < INDEX_MAX_KEYS &&
- indexStruct->indkey[i] != InvalidAttrNumber; i++)
- {
- if (indexStruct->indkey[i] == attnum)
- elog(ERROR, "ALTER TABLE: Attribute \"%s\" is in a primary key", colName);
- }
- }
-
- ReleaseSysCache(indexTuple);
- }
-
- freeList(indexoidlist);
-
- /*
- * Okay, actually perform the catalog change
- */
- attr_rel = heap_openr(AttributeRelationName, RowExclusiveLock);
-
- tuple = SearchSysCacheCopy(ATTNAME,
- ObjectIdGetDatum(myrelid),
- PointerGetDatum(colName),
- 0, 0);
- if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
- elog(ERROR, "ALTER TABLE: relation \"%s\" has no column \"%s\"",
- RelationGetRelationName(rel), colName);
-
- ((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull = FALSE;
-
- simple_heap_update(attr_rel, &tuple->t_self, tuple);
-
- /* keep the system catalog indices current */
- if (RelationGetForm(attr_rel)->relhasindex)
- {
- Relation idescs[Num_pg_attr_indices];
-
- CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_attr_indices, attr_rel, tuple);
- CatalogCloseIndices(Num_pg_attr_indices, idescs);
- }
-
- heap_close(attr_rel, RowExclusiveLock);
-
- heap_close(rel, NoLock);
-}
-
-/*
- * ALTER TABLE ALTER COLUMN SET NOT NULL
- */
-void
-AlterTableAlterColumnSetNotNull(Oid myrelid,
- bool inh, const char *colName)
-{
- Relation rel;
- HeapTuple tuple;
- AttrNumber attnum;
- Relation attr_rel;
- HeapScanDesc scan;
- TupleDesc tupdesc;
-
- rel = heap_open(myrelid, AccessExclusiveLock);
-
- if (rel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "ALTER TABLE: relation \"%s\" is not a table",
- RelationGetRelationName(rel));
-
- if (!allowSystemTableMods
- && IsSystemRelation(rel))
- elog(ERROR, "ALTER TABLE: relation \"%s\" is a system catalog",
- RelationGetRelationName(rel));
-
- if (!pg_class_ownercheck(myrelid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(rel));
-
- /*
- * Propagate to children if desired
- */
- if (inh)
- {
- List *child,
- *children;
-
- /* this routine is actually in the planner */
- children = find_all_inheritors(myrelid);
-
- /*
- * find_all_inheritors does the recursive search of the
- * inheritance hierarchy, so all we have to do is process all of
- * the relids in the list that it returns.
- */
- foreach(child, children)
- {
- Oid childrelid = lfirsti(child);
-
- if (childrelid == myrelid)
- continue;
- AlterTableAlterColumnSetNotNull(childrelid,
- false, colName);
- }
- }
-
- /* -= now do the thing on this relation =- */
-
- /*
- * get the number of the attribute
- */
- tuple = SearchSysCache(ATTNAME,
- ObjectIdGetDatum(myrelid),
- PointerGetDatum(colName),
- 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "ALTER TABLE: relation \"%s\" has no column \"%s\"",
- RelationGetRelationName(rel), colName);
-
- attnum = ((Form_pg_attribute) GETSTRUCT(tuple))->attnum;
- ReleaseSysCache(tuple);
-
- /* Prevent them from altering a system attribute */
- if (attnum < 0)
- elog(ERROR, "ALTER TABLE: Cannot alter system attribute \"%s\"",
- colName);
-
- /*
- * Perform a scan to ensure that there are no NULL
- * values already in the relation
- */
- tupdesc = RelationGetDescr(rel);
-
- scan = heap_beginscan(rel, SnapshotNow, 0, NULL);
-
- while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- Datum d;
- bool isnull;
-
- d = heap_getattr(tuple, attnum, tupdesc, &isnull);
-
- if (isnull)
- elog(ERROR, "ALTER TABLE: Attribute \"%s\" contains NULL values",
- colName);
- }
-
- heap_endscan(scan);
-
- /*
- * Okay, actually perform the catalog change
- */
- attr_rel = heap_openr(AttributeRelationName, RowExclusiveLock);
-
- tuple = SearchSysCacheCopy(ATTNAME,
- ObjectIdGetDatum(myrelid),
- PointerGetDatum(colName),
- 0, 0);
- if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
- elog(ERROR, "ALTER TABLE: relation \"%s\" has no column \"%s\"",
- RelationGetRelationName(rel), colName);
-
- ((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull = TRUE;
-
- simple_heap_update(attr_rel, &tuple->t_self, tuple);
-
- /* keep the system catalog indices current */
- if (RelationGetForm(attr_rel)->relhasindex)
- {
- Relation idescs[Num_pg_attr_indices];
-
- CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_attr_indices, attr_rel, tuple);
- CatalogCloseIndices(Num_pg_attr_indices, idescs);
- }
-
- heap_close(attr_rel, RowExclusiveLock);
-
- heap_close(rel, NoLock);
-}
-
-
-/*
- * ALTER TABLE ALTER COLUMN SET/DROP DEFAULT
- */
-void
-AlterTableAlterColumnDefault(Oid myrelid,
- bool inh, const char *colName,
- Node *newDefault)
-{
- Relation rel;
- HeapTuple tuple;
- AttrNumber attnum;
-
- rel = heap_open(myrelid, AccessExclusiveLock);
-
- /*
- * We allow defaults on views so that INSERT into a view can have
- * default-ish behavior. This works because the rewriter substitutes
- * default values into INSERTs before it expands rules.
- */
- if (rel->rd_rel->relkind != RELKIND_RELATION &&
- rel->rd_rel->relkind != RELKIND_VIEW)
- elog(ERROR, "ALTER TABLE: relation \"%s\" is not a table or view",
- RelationGetRelationName(rel));
-
- if (!allowSystemTableMods
- && IsSystemRelation(rel))
- elog(ERROR, "ALTER TABLE: relation \"%s\" is a system catalog",
- RelationGetRelationName(rel));
-
- if (!pg_class_ownercheck(myrelid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(rel));
-
- /*
- * Propagate to children if desired
- */
- if (inh)
- {
- List *child,
- *children;
-
- /* this routine is actually in the planner */
- children = find_all_inheritors(myrelid);
-
- /*
- * find_all_inheritors does the recursive search of the
- * inheritance hierarchy, so all we have to do is process all of
- * the relids in the list that it returns.
- */
- foreach(child, children)
- {
- Oid childrelid = lfirsti(child);
-
- if (childrelid == myrelid)
- continue;
- AlterTableAlterColumnDefault(childrelid,
- false, colName, newDefault);
- }
- }
-
- /* -= now do the thing on this relation =- */
-
- /*
- * get the number of the attribute
- */
- tuple = SearchSysCache(ATTNAME,
- ObjectIdGetDatum(myrelid),
- PointerGetDatum(colName),
- 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "ALTER TABLE: relation \"%s\" has no column \"%s\"",
- RelationGetRelationName(rel), colName);
-
- attnum = ((Form_pg_attribute) GETSTRUCT(tuple))->attnum;
- ReleaseSysCache(tuple);
-
- if (newDefault)
- {
- /* SET DEFAULT */
- RawColumnDefault *rawEnt;
-
- /* Get rid of the old one first */
- drop_default(myrelid, attnum);
-
- rawEnt = (RawColumnDefault *) palloc(sizeof(RawColumnDefault));
- rawEnt->attnum = attnum;
- rawEnt->raw_default = newDefault;
-
- /*
- * This function is intended for CREATE TABLE, so it processes a
- * _list_ of defaults, but we just do one.
- */
- AddRelationRawConstraints(rel, makeList1(rawEnt), NIL);
- }
- else
- {
- /* DROP DEFAULT */
- Relation attr_rel;
-
- /* Fix the pg_attribute row */
- attr_rel = heap_openr(AttributeRelationName, RowExclusiveLock);
-
- tuple = SearchSysCacheCopy(ATTNAME,
- ObjectIdGetDatum(myrelid),
- PointerGetDatum(colName),
- 0, 0);
- if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
- elog(ERROR, "ALTER TABLE: relation \"%s\" has no column \"%s\"",
- RelationGetRelationName(rel), colName);
-
- ((Form_pg_attribute) GETSTRUCT(tuple))->atthasdef = FALSE;
-
- simple_heap_update(attr_rel, &tuple->t_self, tuple);
-
- /* keep the system catalog indices current */
- if (RelationGetForm(attr_rel)->relhasindex)
- {
- Relation idescs[Num_pg_attr_indices];
-
- CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_attr_indices, attr_rel, tuple);
- CatalogCloseIndices(Num_pg_attr_indices, idescs);
- }
-
- heap_close(attr_rel, RowExclusiveLock);
-
- /* get rid of actual default definition in pg_attrdef */
- drop_default(myrelid, attnum);
- }
-
- heap_close(rel, NoLock);
-}
-
-
-static void
-drop_default(Oid relid, int16 attnum)
-{
- ScanKeyData scankeys[2];
- HeapScanDesc scan;
- Relation attrdef_rel;
- HeapTuple tuple;
-
- attrdef_rel = heap_openr(AttrDefaultRelationName, RowExclusiveLock);
- ScanKeyEntryInitialize(&scankeys[0], 0x0,
- Anum_pg_attrdef_adrelid, F_OIDEQ,
- ObjectIdGetDatum(relid));
- ScanKeyEntryInitialize(&scankeys[1], 0x0,
- Anum_pg_attrdef_adnum, F_INT2EQ,
- Int16GetDatum(attnum));
-
- scan = heap_beginscan(attrdef_rel, SnapshotNow, 2, scankeys);
-
- if ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- simple_heap_delete(attrdef_rel, &tuple->t_self);
-
- heap_endscan(scan);
-
- heap_close(attrdef_rel, NoLock);
-}
-
-
-/*
- * ALTER TABLE ALTER COLUMN SET STATISTICS / STORAGE
- */
-void
-AlterTableAlterColumnFlags(Oid myrelid,
- bool inh, const char *colName,
- Node *flagValue, const char *flagType)
-{
- Relation rel;
- int newtarget = 1;
- char newstorage = 'p';
- Relation attrelation;
- HeapTuple tuple;
- Form_pg_attribute attrtuple;
-
- rel = heap_open(myrelid, AccessExclusiveLock);
-
- if (rel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "ALTER TABLE: relation \"%s\" is not a table",
- RelationGetRelationName(rel));
-
- /*
- * we allow statistics case for system tables
- */
- if (*flagType != 'S' && !allowSystemTableMods && IsSystemRelation(rel))
- elog(ERROR, "ALTER TABLE: relation \"%s\" is a system catalog",
- RelationGetRelationName(rel));
-
- if (!pg_class_ownercheck(myrelid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(rel));
-
- /*
- * Check the supplied parameters before anything else
- */
- if (*flagType == 'S')
- {
- /* STATISTICS */
- Assert(IsA(flagValue, Integer));
- newtarget = intVal(flagValue);
-
- /*
- * Limit target to sane range (should we raise an error instead?)
- */
- if (newtarget < 0)
- newtarget = 0;
- else if (newtarget > 1000)
- newtarget = 1000;
- }
- else if (*flagType == 'M')
- {
- /* STORAGE */
- char *storagemode;
-
- Assert(IsA(flagValue, String));
- storagemode = strVal(flagValue);
-
- if (strcasecmp(storagemode, "plain") == 0)
- newstorage = 'p';
- else if (strcasecmp(storagemode, "external") == 0)
- newstorage = 'e';
- else if (strcasecmp(storagemode, "extended") == 0)
- newstorage = 'x';
- else if (strcasecmp(storagemode, "main") == 0)
- newstorage = 'm';
- else
- elog(ERROR, "ALTER TABLE: \"%s\" storage not recognized",
- storagemode);
- }
- else
- {
- elog(ERROR, "ALTER TABLE: Invalid column flag: %c",
- (int) *flagType);
- }
-
- /*
- * Propagate to children if desired
- */
- if (inh)
- {
- List *child,
- *children;
-
- /* this routine is actually in the planner */
- children = find_all_inheritors(myrelid);
-
- /*
- * find_all_inheritors does the recursive search of the
- * inheritance hierarchy, so all we have to do is process all of
- * the relids in the list that it returns.
- */
- foreach(child, children)
- {
- Oid childrelid = lfirsti(child);
-
- if (childrelid == myrelid)
- continue;
- AlterTableAlterColumnFlags(childrelid,
- false, colName, flagValue, flagType);
- }
- }
-
- /* -= now do the thing on this relation =- */
-
- attrelation = heap_openr(AttributeRelationName, RowExclusiveLock);
-
- tuple = SearchSysCacheCopy(ATTNAME,
- ObjectIdGetDatum(myrelid),
- PointerGetDatum(colName),
- 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "ALTER TABLE: relation \"%s\" has no column \"%s\"",
- RelationGetRelationName(rel), colName);
- attrtuple = (Form_pg_attribute) GETSTRUCT(tuple);
-
- if (attrtuple->attnum < 0)
- elog(ERROR, "ALTER TABLE: cannot change system attribute \"%s\"",
- colName);
- /*
- * Now change the appropriate field
- */
- if (*flagType == 'S')
- attrtuple->attstattarget = newtarget;
- else if (*flagType == 'M')
- {
- /*
- * safety check: do not allow toasted storage modes unless column
- * datatype is TOAST-aware.
- */
- if (newstorage == 'p' || TypeIsToastable(attrtuple->atttypid))
- attrtuple->attstorage = newstorage;
- else
- elog(ERROR, "ALTER TABLE: Column datatype %s can only have storage \"plain\"",
- format_type_be(attrtuple->atttypid));
- }
-
- simple_heap_update(attrelation, &tuple->t_self, tuple);
-
- /* keep system catalog indices current */
- {
- Relation irelations[Num_pg_attr_indices];
-
- CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, irelations);
- CatalogIndexInsert(irelations, Num_pg_attr_indices, attrelation, tuple);
- CatalogCloseIndices(Num_pg_attr_indices, irelations);
- }
-
- heap_freetuple(tuple);
- heap_close(attrelation, NoLock);
- heap_close(rel, NoLock); /* close rel, but keep lock! */
-}
-
-
-/*
- * ALTER TABLE DROP COLUMN
- */
-void
-AlterTableDropColumn(Oid myrelid,
- bool inh, const char *colName,
- int behavior)
-{
- elog(ERROR, "ALTER TABLE / DROP COLUMN is not implemented");
-}
-
-
-/*
- * ALTER TABLE ADD CONSTRAINT
- */
-void
-AlterTableAddConstraint(Oid myrelid,
- bool inh, List *newConstraints)
-{
- Relation rel;
- List *listptr;
-
- /*
- * Grab an exclusive lock on the target table, which we will NOT
- * release until end of transaction.
- */
- rel = heap_open(myrelid, AccessExclusiveLock);
-
- if (rel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "ALTER TABLE: relation \"%s\" is not a table",
- RelationGetRelationName(rel));
-
- if (!allowSystemTableMods
- && IsSystemRelation(rel))
- elog(ERROR, "ALTER TABLE: relation \"%s\" is a system catalog",
- RelationGetRelationName(rel));
-
- if (!pg_class_ownercheck(myrelid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(rel));
-
- if (inh)
- {
- List *child,
- *children;
-
- /* this routine is actually in the planner */
- children = find_all_inheritors(myrelid);
-
- /*
- * find_all_inheritors does the recursive search of the
- * inheritance hierarchy, so all we have to do is process all of
- * the relids in the list that it returns.
- */
- foreach(child, children)
- {
- Oid childrelid = lfirsti(child);
-
- if (childrelid == myrelid)
- continue;
- AlterTableAddConstraint(childrelid, false, newConstraints);
- }
- }
-
- foreach(listptr, newConstraints)
- {
- Node *newConstraint = lfirst(listptr);
-
- switch (nodeTag(newConstraint))
- {
- case T_Constraint:
- {
- Constraint *constr = (Constraint *) newConstraint;
-
- /*
- * Currently, we only expect to see CONSTR_CHECK nodes
- * arriving here (see the preprocessing done in
- * parser/analyze.c). Use a switch anyway to make it
- * easier to add more code later.
- */
- switch (constr->contype)
- {
- case CONSTR_CHECK:
- {
- ParseState *pstate;
- bool successful = true;
- HeapScanDesc scan;
- ExprContext *econtext;
- TupleTableSlot *slot;
- HeapTuple tuple;
- RangeTblEntry *rte;
- List *qual;
- Node *expr;
- char *name;
-
- if (constr->name)
- name = constr->name;
- else
- name = "<unnamed>";
-
- /*
- * We need to make a parse state and range
- * table to allow us to transformExpr and
- * fix_opids to get a version of the
- * expression we can pass to ExecQual
- */
- pstate = make_parsestate(NULL);
- rte = addRangeTableEntryForRelation(pstate,
- myrelid,
- makeAlias(RelationGetRelationName(rel), NIL),
- false,
- true);
- addRTEtoQuery(pstate, rte, true, true);
-
- /*
- * Convert the A_EXPR in raw_expr into an
- * EXPR
- */
- expr = transformExpr(pstate, constr->raw_expr);
-
- /*
- * Make sure it yields a boolean result.
- */
- expr = coerce_to_boolean(expr, "CHECK");
-
- /*
- * Make sure no outside relations are
- * referred to.
- */
- if (length(pstate->p_rtable) != 1)
- elog(ERROR, "Only relation '%s' can be referenced in CHECK",
- RelationGetRelationName(rel));
-
- /*
- * No subplans or aggregates, either...
- */
- if (contain_subplans(expr))
- elog(ERROR, "cannot use subselect in CHECK constraint expression");
- if (contain_agg_clause(expr))
- elog(ERROR, "cannot use aggregate function in CHECK constraint expression");
-
- /*
- * Might as well try to reduce any
- * constant expressions.
- */
- expr = eval_const_expressions(expr);
-
- /* And fix the opids */
- fix_opids(expr);
-
- qual = makeList1(expr);
-
- /* Make tuple slot to hold tuples */
- slot = MakeTupleTableSlot();
- ExecSetSlotDescriptor(slot, RelationGetDescr(rel), false);
- /* Make an expression context for ExecQual */
- econtext = MakeExprContext(slot, CurrentMemoryContext);
-
- /*
- * Scan through the rows now, checking the
- * expression at each row.
- */
- scan = heap_beginscan(rel, SnapshotNow, 0, NULL);
-
- while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- ExecStoreTuple(tuple, slot, InvalidBuffer, false);
- if (!ExecQual(qual, econtext, true))
- {
- successful = false;
- break;
- }
- ResetExprContext(econtext);
- }
-
- heap_endscan(scan);
-
- FreeExprContext(econtext);
- pfree(slot);
-
- if (!successful)
- elog(ERROR, "AlterTableAddConstraint: rejected due to CHECK constraint %s", name);
-
- /*
- * Call AddRelationRawConstraints to do
- * the real adding -- It duplicates some
- * of the above, but does not check the
- * validity of the constraint against
- * tuples already in the table.
- */
- AddRelationRawConstraints(rel, NIL,
- makeList1(constr));
-
- break;
- }
- default:
- elog(ERROR, "ALTER TABLE / ADD CONSTRAINT is not implemented for that constraint type.");
- }
- break;
- }
- case T_FkConstraint:
- {
- FkConstraint *fkconstraint = (FkConstraint *) newConstraint;
- Relation pkrel;
- HeapScanDesc scan;
- HeapTuple tuple;
- Trigger trig;
- List *list;
- int count;
-
- /*
- * Grab an exclusive lock on the pk table, so that
- * someone doesn't delete rows out from under us.
- *
- * XXX wouldn't a lesser lock be sufficient?
- */
- pkrel = heap_openrv(fkconstraint->pktable,
- AccessExclusiveLock);
-
- /*
- * Validity checks
- */
- if (pkrel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "referenced table \"%s\" not a relation",
- fkconstraint->pktable->relname);
-
- if (isTempNamespace(RelationGetNamespace(pkrel)) &&
- !isTempNamespace(RelationGetNamespace(rel)))
- elog(ERROR, "ALTER TABLE / ADD CONSTRAINT: Unable to reference temporary table from permanent table constraint.");
-
- /*
- * First we check for limited correctness of the
- * constraint.
- *
- * NOTE: we assume parser has already checked for
- * existence of an appropriate unique index on the
- * referenced relation, and that the column datatypes
- * are comparable.
- *
- * Scan through each tuple, calling RI_FKey_check_ins
- * (insert trigger) as if that tuple had just been
- * inserted. If any of those fail, it should
- * elog(ERROR) and that's that.
- */
- MemSet(&trig, 0, sizeof(trig));
- trig.tgoid = InvalidOid;
- if (fkconstraint->constr_name)
- trig.tgname = fkconstraint->constr_name;
- else
- trig.tgname = "<unknown>";
- trig.tgenabled = TRUE;
- trig.tgisconstraint = TRUE;
- trig.tgconstrrelid = RelationGetRelid(pkrel);
- trig.tgdeferrable = FALSE;
- trig.tginitdeferred = FALSE;
-
- trig.tgargs = (char **) palloc(
- sizeof(char *) * (4 + length(fkconstraint->fk_attrs)
- + length(fkconstraint->pk_attrs)));
-
- trig.tgargs[0] = trig.tgname;
- trig.tgargs[1] = RelationGetRelationName(rel);
- trig.tgargs[2] = RelationGetRelationName(pkrel);
- trig.tgargs[3] = fkconstraint->match_type;
- count = 4;
- foreach(list, fkconstraint->fk_attrs)
- {
- Ident *fk_at = lfirst(list);
-
- trig.tgargs[count] = fk_at->name;
- count += 2;
- }
- count = 5;
- foreach(list, fkconstraint->pk_attrs)
- {
- Ident *pk_at = lfirst(list);
-
- trig.tgargs[count] = pk_at->name;
- count += 2;
- }
- trig.tgnargs = count - 1;
-
- scan = heap_beginscan(rel, SnapshotNow, 0, NULL);
-
- while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- /* Make a call to the check function */
-
- /*
- * No parameters are passed, but we do set a
- * context
- */
- FunctionCallInfoData fcinfo;
- TriggerData trigdata;
-
- MemSet(&fcinfo, 0, sizeof(fcinfo));
-
- /*
- * We assume RI_FKey_check_ins won't look at
- * flinfo...
- */
-
- trigdata.type = T_TriggerData;
- trigdata.tg_event = TRIGGER_EVENT_INSERT | TRIGGER_EVENT_ROW;
- trigdata.tg_relation = rel;
- trigdata.tg_trigtuple = tuple;
- trigdata.tg_newtuple = NULL;
- trigdata.tg_trigger = &trig;
-
- fcinfo.context = (Node *) &trigdata;
-
- RI_FKey_check_ins(&fcinfo);
- }
- heap_endscan(scan);
-
- pfree(trig.tgargs);
-
- heap_close(pkrel, NoLock);
-
- break;
- }
- default:
- elog(ERROR, "ALTER TABLE / ADD CONSTRAINT unable to determine type of constraint passed");
- }
- }
-
- /* Close rel, but keep lock till commit */
- heap_close(rel, NoLock);
-}
-
-
-/*
- * ALTER TABLE DROP CONSTRAINT
- * Note: It is legal to remove a constraint with name "" as it is possible
- * to add a constraint with name "".
- * Christopher Kings-Lynne
- */
-void
-AlterTableDropConstraint(Oid myrelid,
- bool inh, const char *constrName,
- int behavior)
-{
- Relation rel;
- int deleted;
-
- /*
- * We don't support CASCADE yet - in fact, RESTRICT doesn't work to
- * the spec either!
- */
- if (behavior == CASCADE)
- elog(ERROR, "ALTER TABLE / DROP CONSTRAINT does not support the CASCADE keyword");
-
- /*
- * Acquire an exclusive lock on the target relation for the duration
- * of the operation.
- */
- rel = heap_open(myrelid, AccessExclusiveLock);
-
- /* Disallow DROP CONSTRAINT on views, indexes, sequences, etc */
- if (rel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "ALTER TABLE: relation \"%s\" is not a table",
- RelationGetRelationName(rel));
-
- if (!allowSystemTableMods
- && IsSystemRelation(rel))
- elog(ERROR, "ALTER TABLE: relation \"%s\" is a system catalog",
- RelationGetRelationName(rel));
-
- if (!pg_class_ownercheck(myrelid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(rel));
-
- /*
- * Since all we have is the name of the constraint, we have to look
- * through all catalogs that could possibly contain a constraint for
- * this relation. We also keep a count of the number of constraints
- * removed.
- */
-
- deleted = 0;
-
- /*
- * First, we remove all CHECK constraints with the given name
- */
-
- deleted += RemoveCheckConstraint(rel, constrName, inh);
-
- /*
- * Now we remove NULL, UNIQUE, PRIMARY KEY and FOREIGN KEY
- * constraints.
- *
- * Unimplemented.
- */
-
- /* Close the target relation */
- heap_close(rel, NoLock);
-
- /* If zero constraints deleted, complain */
- if (deleted == 0)
- elog(ERROR, "ALTER TABLE / DROP CONSTRAINT: %s does not exist",
- constrName);
- /* Otherwise if more than one constraint deleted, notify */
- else if (deleted > 1)
- elog(NOTICE, "Multiple constraints dropped");
-}
-
-/*
- * ALTER TABLE OWNER
- */
-void
-AlterTableOwner(Oid relationOid, int32 newOwnerSysId)
-{
- Relation target_rel;
- Relation class_rel;
- HeapTuple tuple;
- Relation idescs[Num_pg_class_indices];
- Form_pg_class tuple_class;
-
- /* Get exclusive lock till end of transaction on the target table */
- /* Use relation_open here so that we work on indexes... */
- target_rel = relation_open(relationOid, AccessExclusiveLock);
-
- /* Get its pg_class tuple, too */
- class_rel = heap_openr(RelationRelationName, RowExclusiveLock);
-
- tuple = SearchSysCacheCopy(RELOID,
- ObjectIdGetDatum(relationOid),
- 0, 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "ALTER TABLE: relation %u not found", relationOid);
- tuple_class = (Form_pg_class) GETSTRUCT(tuple);
-
- /* Can we change the ownership of this tuple? */
- CheckTupleType(tuple_class);
-
- /*
- * Okay, this is a valid tuple: change its ownership and
- * write to the heap.
- */
- tuple_class->relowner = newOwnerSysId;
- simple_heap_update(class_rel, &tuple->t_self, tuple);
-
- /* Keep the catalog indices up to date */
- CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_class_indices, class_rel, tuple);
- CatalogCloseIndices(Num_pg_class_indices, idescs);
-
- /*
- * If we are operating on a table, also change the ownership of any
- * indexes that belong to the table, as well as the table's toast
- * table (if it has one)
- */
- if (tuple_class->relkind == RELKIND_RELATION ||
- tuple_class->relkind == RELKIND_TOASTVALUE)
- {
- List *index_oid_list, *i;
-
- /* Find all the indexes belonging to this relation */
- index_oid_list = RelationGetIndexList(target_rel);
-
- /* For each index, recursively change its ownership */
- foreach(i, index_oid_list)
- {
- AlterTableOwner(lfirsti(i), newOwnerSysId);
- }
-
- freeList(index_oid_list);
- }
-
- if (tuple_class->relkind == RELKIND_RELATION)
- {
- /* If it has a toast table, recurse to change its ownership */
- if (tuple_class->reltoastrelid != InvalidOid)
- {
- AlterTableOwner(tuple_class->reltoastrelid, newOwnerSysId);
- }
- }
-
- heap_freetuple(tuple);
- heap_close(class_rel, RowExclusiveLock);
- relation_close(target_rel, NoLock);
-}
-
-static void
-CheckTupleType(Form_pg_class tuple_class)
-{
- switch (tuple_class->relkind)
- {
- case RELKIND_RELATION:
- case RELKIND_INDEX:
- case RELKIND_VIEW:
- case RELKIND_SEQUENCE:
- case RELKIND_TOASTVALUE:
- /* ok to change owner */
- break;
- default:
- elog(ERROR, "ALTER TABLE: relation \"%s\" is not a table, TOAST table, index, view, or sequence",
- NameStr(tuple_class->relname));
- }
-}
-
-/*
- * ALTER TABLE CREATE TOAST TABLE
- */
-void
-AlterTableCreateToastTable(Oid relOid, bool silent)
-{
- Relation rel;
- HeapTuple reltup;
- HeapTupleData classtuple;
- TupleDesc tupdesc;
- bool shared_relation;
- Relation class_rel;
- Buffer buffer;
- Relation ridescs[Num_pg_class_indices];
- Oid toast_relid;
- Oid toast_idxid;
- char toast_relname[NAMEDATALEN];
- char toast_idxname[NAMEDATALEN];
- IndexInfo *indexInfo;
- Oid classObjectId[2];
-
- /*
- * Grab an exclusive lock on the target table, which we will NOT
- * release until end of transaction.
- */
- rel = heap_open(relOid, AccessExclusiveLock);
-
- /* Check permissions */
- if (rel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "ALTER TABLE: relation \"%s\" is not a table",
- RelationGetRelationName(rel));
-
- if (!pg_class_ownercheck(relOid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(rel));
-
- /*
- * Toast table is shared if and only if its parent is.
- *
- * We cannot allow toasting a shared relation after initdb (because
- * there's no way to mark it toasted in other databases' pg_class).
- * Unfortunately we can't distinguish initdb from a manually started
- * standalone backend. However, we can at least prevent this mistake
- * under normal multi-user operation.
- */
- shared_relation = rel->rd_rel->relisshared;
- if (shared_relation && IsUnderPostmaster)
- elog(ERROR, "Shared relations cannot be toasted after initdb");
-
- /*
- * lock the pg_class tuple for update (is that really needed?)
- */
- class_rel = heap_openr(RelationRelationName, RowExclusiveLock);
-
- reltup = SearchSysCache(RELOID,
- ObjectIdGetDatum(relOid),
- 0, 0, 0);
- if (!HeapTupleIsValid(reltup))
- elog(ERROR, "ALTER TABLE: relation \"%s\" not found",
- RelationGetRelationName(rel));
- classtuple.t_self = reltup->t_self;
- ReleaseSysCache(reltup);
-
- switch (heap_mark4update(class_rel, &classtuple, &buffer,
- GetCurrentCommandId()))
- {
- case HeapTupleSelfUpdated:
- case HeapTupleMayBeUpdated:
- break;
- default:
- elog(ERROR, "couldn't lock pg_class tuple");
- }
- reltup = heap_copytuple(&classtuple);
- ReleaseBuffer(buffer);
-
- /*
- * Is it already toasted?
- */
- if (((Form_pg_class) GETSTRUCT(reltup))->reltoastrelid != InvalidOid)
- {
- if (silent)
- {
- heap_close(rel, NoLock);
- heap_close(class_rel, NoLock);
- heap_freetuple(reltup);
- return;
- }
-
- elog(ERROR, "ALTER TABLE: relation \"%s\" already has a toast table",
- RelationGetRelationName(rel));
- }
-
- /*
- * Check to see whether the table actually needs a TOAST table.
- */
- if (!needs_toast_table(rel))
- {
- if (silent)
- {
- heap_close(rel, NoLock);
- heap_close(class_rel, NoLock);
- heap_freetuple(reltup);
- return;
- }
-
- elog(ERROR, "ALTER TABLE: relation \"%s\" does not need a toast table",
- RelationGetRelationName(rel));
- }
-
- /*
- * Create the toast table and its index
- */
- sprintf(toast_relname, "pg_toast_%u", relOid);
- sprintf(toast_idxname, "pg_toast_%u_index", relOid);
-
- /* this is pretty painful... need a tuple descriptor */
- tupdesc = CreateTemplateTupleDesc(3);
- TupleDescInitEntry(tupdesc, (AttrNumber) 1,
- "chunk_id",
- OIDOID,
- -1, 0, false);
- TupleDescInitEntry(tupdesc, (AttrNumber) 2,
- "chunk_seq",
- INT4OID,
- -1, 0, false);
- TupleDescInitEntry(tupdesc, (AttrNumber) 3,
- "chunk_data",
- BYTEAOID,
- -1, 0, false);
-
- /*
- * Ensure that the toast table doesn't itself get toasted, or we'll be
- * toast :-(. This is essential for chunk_data because type bytea is
- * toastable; hit the other two just to be sure.
- */
- tupdesc->attrs[0]->attstorage = 'p';
- tupdesc->attrs[1]->attstorage = 'p';
- tupdesc->attrs[2]->attstorage = 'p';
-
- /*
- * Note: the toast relation is placed in the regular pg_toast namespace
- * even if its master relation is a temp table. There cannot be any
- * naming collision, and the toast rel will be destroyed when its master
- * is, so there's no need to handle the toast rel as temp.
- */
- toast_relid = heap_create_with_catalog(toast_relname,
- PG_TOAST_NAMESPACE,
- tupdesc,
- RELKIND_TOASTVALUE,
- shared_relation,
- false,
- true);
-
- /* make the toast relation visible, else index creation will fail */
- CommandCounterIncrement();
-
- /*
- * Create unique index on chunk_id, chunk_seq.
- *
- * NOTE: the tuple toaster could actually function with a single-column
- * index on chunk_id only. However, it couldn't be unique then. We
- * want it to be unique as a check against the possibility of
- * duplicate TOAST chunk OIDs. Too, the index might be a little more
- * efficient this way, since btree isn't all that happy with large
- * numbers of equal keys.
- */
-
- indexInfo = makeNode(IndexInfo);
- indexInfo->ii_NumIndexAttrs = 2;
- indexInfo->ii_NumKeyAttrs = 2;
- indexInfo->ii_KeyAttrNumbers[0] = 1;
- indexInfo->ii_KeyAttrNumbers[1] = 2;
- indexInfo->ii_Predicate = NIL;
- indexInfo->ii_FuncOid = InvalidOid;
- indexInfo->ii_Unique = true;
-
- classObjectId[0] = OID_BTREE_OPS_OID;
- classObjectId[1] = INT4_BTREE_OPS_OID;
-
- toast_idxid = index_create(toast_relid, toast_idxname, indexInfo,
- BTREE_AM_OID, classObjectId,
- true, true);
-
- /*
- * Update toast rel's pg_class entry to show that it has an index. The
- * index OID is stored into the reltoastidxid field for easy access by
- * the tuple toaster.
- */
- setRelhasindex(toast_relid, true, true, toast_idxid);
-
- /*
- * Store the toast table's OID in the parent relation's tuple
- */
- ((Form_pg_class) GETSTRUCT(reltup))->reltoastrelid = toast_relid;
- simple_heap_update(class_rel, &reltup->t_self, reltup);
-
- /*
- * Keep catalog indices current
- */
- CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices, ridescs);
- CatalogIndexInsert(ridescs, Num_pg_class_indices, class_rel, reltup);
- CatalogCloseIndices(Num_pg_class_indices, ridescs);
-
- heap_freetuple(reltup);
-
- /*
- * Close relations and make changes visible
- */
- heap_close(class_rel, NoLock);
- heap_close(rel, NoLock);
-
- CommandCounterIncrement();
-}
-
-/*
- * Check to see whether the table needs a TOAST table. It does only if
- * (1) there are any toastable attributes, and (2) the maximum length
- * of a tuple could exceed TOAST_TUPLE_THRESHOLD. (We don't want to
- * create a toast table for something like "f1 varchar(20)".)
- */
-static bool
-needs_toast_table(Relation rel)
-{
- int32 data_length = 0;
- bool maxlength_unknown = false;
- bool has_toastable_attrs = false;
- TupleDesc tupdesc;
- Form_pg_attribute *att;
- int32 tuple_length;
- int i;
-
- tupdesc = rel->rd_att;
- att = tupdesc->attrs;
-
- for (i = 0; i < tupdesc->natts; i++)
- {
- data_length = att_align(data_length, att[i]->attlen, att[i]->attalign);
- if (att[i]->attlen >= 0)
- {
- /* Fixed-length types are never toastable */
- data_length += att[i]->attlen;
- }
- else
- {
- int32 maxlen = type_maximum_size(att[i]->atttypid,
- att[i]->atttypmod);
-
- if (maxlen < 0)
- maxlength_unknown = true;
- else
- data_length += maxlen;
- if (att[i]->attstorage != 'p')
- has_toastable_attrs = true;
- }
- }
- if (!has_toastable_attrs)
- return false; /* nothing to toast? */
- if (maxlength_unknown)
- return true; /* any unlimited-length attrs? */
- tuple_length = MAXALIGN(offsetof(HeapTupleHeaderData, t_bits) +
- BITMAPLEN(tupdesc->natts)) +
- MAXALIGN(data_length);
- return (tuple_length > TOAST_TUPLE_THRESHOLD);
-}
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
deleted file mode 100644
index 18484372ed5..00000000000
--- a/src/backend/commands/trigger.c
+++ /dev/null
@@ -1,2163 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * trigger.c
- * PostgreSQL TRIGGERs support code.
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.120 2002/06/20 20:29:27 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include "access/genam.h"
-#include "access/heapam.h"
-#include "catalog/catalog.h"
-#include "catalog/catname.h"
-#include "catalog/indexing.h"
-#include "catalog/namespace.h"
-#include "catalog/pg_language.h"
-#include "catalog/pg_proc.h"
-#include "catalog/pg_trigger.h"
-#include "commands/comment.h"
-#include "commands/trigger.h"
-#include "executor/executor.h"
-#include "miscadmin.h"
-#include "parser/parse_func.h"
-#include "utils/acl.h"
-#include "utils/builtins.h"
-#include "utils/fmgroids.h"
-#include "utils/inval.h"
-#include "utils/lsyscache.h"
-#include "utils/syscache.h"
-
-
-static void InsertTrigger(TriggerDesc *trigdesc, Trigger *trigger, int indx);
-static HeapTuple GetTupleForTrigger(EState *estate,
- ResultRelInfo *relinfo,
- ItemPointer tid,
- TupleTableSlot **newSlot);
-static HeapTuple ExecCallTriggerFunc(TriggerData *trigdata,
- FmgrInfo *finfo,
- MemoryContext per_tuple_context);
-static void DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event,
- HeapTuple oldtup, HeapTuple newtup);
-static void DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
- Relation rel, FmgrInfo *finfo,
- MemoryContext per_tuple_context);
-
-
-void
-CreateTrigger(CreateTrigStmt *stmt)
-{
- int16 tgtype;
- int16 tgattr[FUNC_MAX_ARGS];
- Datum values[Natts_pg_trigger];
- char nulls[Natts_pg_trigger];
- Relation rel;
- AclResult aclresult;
- Relation tgrel;
- SysScanDesc tgscan;
- ScanKeyData key;
- Relation pgrel;
- HeapTuple tuple;
- Relation idescs[Num_pg_trigger_indices];
- Relation ridescs[Num_pg_class_indices];
- Oid fargtypes[FUNC_MAX_ARGS];
- Oid funcoid;
- Oid funclang;
- int found = 0;
- int i;
- char constrtrigname[NAMEDATALEN];
- char *constrname = "";
- Oid constrrelid = InvalidOid;
-
- rel = heap_openrv(stmt->relation, AccessExclusiveLock);
-
- if (rel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "CreateTrigger: relation \"%s\" is not a table",
- stmt->relation->relname);
-
- if (!allowSystemTableMods && IsSystemRelation(rel))
- elog(ERROR, "CreateTrigger: can't create trigger for system relation %s",
- stmt->relation->relname);
-
- aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
- stmt->isconstraint ? ACL_REFERENCES : ACL_TRIGGER);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, RelationGetRelationName(rel));
-
- /*
- * If trigger is an RI constraint, use trigger name as constraint name
- * and build a unique trigger name instead.
- */
- if (stmt->isconstraint)
- {
- constrname = stmt->trigname;
- snprintf(constrtrigname, sizeof(constrtrigname),
- "RI_ConstraintTrigger_%u", newoid());
- stmt->trigname = constrtrigname;
-
- if (stmt->constrrel != NULL)
- constrrelid = RangeVarGetRelid(stmt->constrrel, false);
- else
- constrrelid = InvalidOid;
- }
-
- TRIGGER_CLEAR_TYPE(tgtype);
- if (stmt->before)
- TRIGGER_SETT_BEFORE(tgtype);
- if (stmt->row)
- TRIGGER_SETT_ROW(tgtype);
- else
- elog(ERROR, "CreateTrigger: STATEMENT triggers are unimplemented, yet");
-
- for (i = 0; i < 3 && stmt->actions[i]; i++)
- {
- switch (stmt->actions[i])
- {
- case 'i':
- if (TRIGGER_FOR_INSERT(tgtype))
- elog(ERROR, "CreateTrigger: double INSERT event specified");
- TRIGGER_SETT_INSERT(tgtype);
- break;
- case 'd':
- if (TRIGGER_FOR_DELETE(tgtype))
- elog(ERROR, "CreateTrigger: double DELETE event specified");
- TRIGGER_SETT_DELETE(tgtype);
- break;
- case 'u':
- if (TRIGGER_FOR_UPDATE(tgtype))
- elog(ERROR, "CreateTrigger: double UPDATE event specified");
- TRIGGER_SETT_UPDATE(tgtype);
- break;
- default:
- elog(ERROR, "CreateTrigger: unknown event specified");
- break;
- }
- }
-
- /*
- * Scan pg_trigger for existing triggers on relation. We do this mainly
- * because we must count them; a secondary benefit is to give a nice
- * error message if there's already a trigger of the same name. (The
- * unique index on tgrelid/tgname would complain anyway.)
- *
- * NOTE that this is cool only because we have AccessExclusiveLock on the
- * relation, so the trigger set won't be changing underneath us.
- */
- tgrel = heap_openr(TriggerRelationName, RowExclusiveLock);
- ScanKeyEntryInitialize(&key, 0,
- Anum_pg_trigger_tgrelid,
- F_OIDEQ,
- ObjectIdGetDatum(RelationGetRelid(rel)));
- tgscan = systable_beginscan(tgrel, TriggerRelidNameIndex, true,
- SnapshotNow, 1, &key);
- while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
- {
- Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
-
- if (namestrcmp(&(pg_trigger->tgname), stmt->trigname) == 0)
- elog(ERROR, "CreateTrigger: trigger %s already defined on relation %s",
- stmt->trigname, stmt->relation->relname);
- found++;
- }
- systable_endscan(tgscan);
-
- /*
- * Find and validate the trigger function.
- */
- MemSet(fargtypes, 0, FUNC_MAX_ARGS * sizeof(Oid));
- funcoid = LookupFuncName(stmt->funcname, 0, fargtypes);
- if (!OidIsValid(funcoid))
- elog(ERROR, "CreateTrigger: function %s() does not exist",
- NameListToString(stmt->funcname));
- tuple = SearchSysCache(PROCOID,
- ObjectIdGetDatum(funcoid),
- 0, 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "CreateTrigger: function %s() does not exist",
- NameListToString(stmt->funcname));
- if (((Form_pg_proc) GETSTRUCT(tuple))->prorettype != 0)
- elog(ERROR, "CreateTrigger: function %s() must return OPAQUE",
- NameListToString(stmt->funcname));
- funclang = ((Form_pg_proc) GETSTRUCT(tuple))->prolang;
- ReleaseSysCache(tuple);
-
- if (funclang != ClanguageId && funclang != INTERNALlanguageId)
- {
- HeapTuple langTup;
-
- langTup = SearchSysCache(LANGOID,
- ObjectIdGetDatum(funclang),
- 0, 0, 0);
- if (!HeapTupleIsValid(langTup))
- elog(ERROR, "CreateTrigger: cache lookup for language %u failed",
- funclang);
- if (((Form_pg_language) GETSTRUCT(langTup))->lanispl == false)
- elog(ERROR, "CreateTrigger: only internal, C and PL functions are supported");
- ReleaseSysCache(langTup);
- }
-
- /*
- * Build the new pg_trigger tuple.
- */
- MemSet(nulls, ' ', Natts_pg_trigger * sizeof(char));
-
- values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
- values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(stmt->trigname));
- values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
- values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
- values[Anum_pg_trigger_tgenabled - 1] = BoolGetDatum(true);
- values[Anum_pg_trigger_tgisconstraint - 1] = BoolGetDatum(stmt->isconstraint);
- values[Anum_pg_trigger_tgconstrname - 1] = PointerGetDatum(constrname);
- values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
- values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
- values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
-
- if (stmt->args)
- {
- List *le;
- char *args;
- int16 nargs = length(stmt->args);
- int len = 0;
-
- foreach(le, stmt->args)
- {
- char *ar = ((Value *) lfirst(le))->val.str;
-
- len += strlen(ar) + 4;
- for (; *ar; ar++)
- {
- if (*ar == '\\')
- len++;
- }
- }
- args = (char *) palloc(len + 1);
- args[0] = '\0';
- foreach(le, stmt->args)
- {
- char *s = ((Value *) lfirst(le))->val.str;
- char *d = args + strlen(args);
-
- while (*s)
- {
- if (*s == '\\')
- *d++ = '\\';
- *d++ = *s++;
- }
- strcpy(d, "\\000");
- }
- values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
- values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
- CStringGetDatum(args));
- }
- else
- {
- values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
- values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
- CStringGetDatum(""));
- }
- MemSet(tgattr, 0, FUNC_MAX_ARGS * sizeof(int16));
- values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
-
- tuple = heap_formtuple(tgrel->rd_att, values, nulls);
-
- /*
- * Insert tuple into pg_trigger.
- */
- simple_heap_insert(tgrel, tuple);
- CatalogOpenIndices(Num_pg_trigger_indices, Name_pg_trigger_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_trigger_indices, tgrel, tuple);
- CatalogCloseIndices(Num_pg_trigger_indices, idescs);
- heap_freetuple(tuple);
- heap_close(tgrel, RowExclusiveLock);
-
- pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
- pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
-
- /*
- * Update relation's pg_class entry. Crucial side-effect: other
- * backends (and this one too!) are sent SI message to make them
- * rebuild relcache entries.
- */
- pgrel = heap_openr(RelationRelationName, RowExclusiveLock);
- tuple = SearchSysCacheCopy(RELOID,
- ObjectIdGetDatum(RelationGetRelid(rel)),
- 0, 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "CreateTrigger: relation %s not found in pg_class",
- stmt->relation->relname);
-
- ((Form_pg_class) GETSTRUCT(tuple))->reltriggers = found + 1;
- simple_heap_update(pgrel, &tuple->t_self, tuple);
- CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices, ridescs);
- CatalogIndexInsert(ridescs, Num_pg_class_indices, pgrel, tuple);
- CatalogCloseIndices(Num_pg_class_indices, ridescs);
- heap_freetuple(tuple);
- heap_close(pgrel, RowExclusiveLock);
-
- /*
- * We used to try to update the rel's relcache entry here, but that's
- * fairly pointless since it will happen as a byproduct of the
- * upcoming CommandCounterIncrement...
- */
-
- /* Keep lock on target rel until end of xact */
- heap_close(rel, NoLock);
-}
-
-/*
- * DropTrigger - drop an individual trigger by name
- */
-void
-DropTrigger(Oid relid, const char *trigname)
-{
- Relation rel;
- Relation tgrel;
- SysScanDesc tgscan;
- ScanKeyData key;
- Relation pgrel;
- HeapTuple tuple;
- Relation ridescs[Num_pg_class_indices];
- int remaining = 0;
- int found = 0;
-
- rel = heap_open(relid, AccessExclusiveLock);
-
- if (rel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "DropTrigger: relation \"%s\" is not a table",
- RelationGetRelationName(rel));
-
- if (!allowSystemTableMods && IsSystemRelation(rel))
- elog(ERROR, "DropTrigger: can't drop trigger for system relation %s",
- RelationGetRelationName(rel));
-
- if (!pg_class_ownercheck(relid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(rel));
-
- /*
- * Search pg_trigger, delete target trigger, count remaining triggers
- * for relation. (Although we could fetch and delete the target
- * trigger directly, we'd still have to scan the remaining triggers,
- * so we may as well do both in one indexscan.)
- *
- * Note this is OK only because we have AccessExclusiveLock on the rel,
- * so no one else is creating/deleting triggers on this rel at the same
- * time.
- */
- tgrel = heap_openr(TriggerRelationName, RowExclusiveLock);
- ScanKeyEntryInitialize(&key, 0,
- Anum_pg_trigger_tgrelid,
- F_OIDEQ,
- ObjectIdGetDatum(relid));
- tgscan = systable_beginscan(tgrel, TriggerRelidNameIndex, true,
- SnapshotNow, 1, &key);
- while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
- {
- Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
-
- if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
- {
- /* Delete any comments associated with this trigger */
- DeleteComments(tuple->t_data->t_oid, RelationGetRelid(tgrel));
-
- simple_heap_delete(tgrel, &tuple->t_self);
- found++;
- }
- else
- remaining++;
- }
- systable_endscan(tgscan);
- heap_close(tgrel, RowExclusiveLock);
-
- if (found == 0)
- elog(ERROR, "DropTrigger: there is no trigger %s on relation %s",
- trigname, RelationGetRelationName(rel));
- if (found > 1) /* shouldn't happen */
- elog(NOTICE, "DropTrigger: found (and deleted) %d triggers %s on relation %s",
- found, trigname, RelationGetRelationName(rel));
-
- /*
- * Update relation's pg_class entry. Crucial side-effect: other
- * backends (and this one too!) are sent SI message to make them
- * rebuild relcache entries.
- */
- pgrel = heap_openr(RelationRelationName, RowExclusiveLock);
- tuple = SearchSysCacheCopy(RELOID,
- ObjectIdGetDatum(relid),
- 0, 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "DropTrigger: relation %s not found in pg_class",
- RelationGetRelationName(rel));
-
- ((Form_pg_class) GETSTRUCT(tuple))->reltriggers = remaining;
- simple_heap_update(pgrel, &tuple->t_self, tuple);
- CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices, ridescs);
- CatalogIndexInsert(ridescs, Num_pg_class_indices, pgrel, tuple);
- CatalogCloseIndices(Num_pg_class_indices, ridescs);
- heap_freetuple(tuple);
- heap_close(pgrel, RowExclusiveLock);
-
- /* Keep lock on target rel until end of xact */
- heap_close(rel, NoLock);
-}
-
-/*
- * Remove all triggers for a relation that's being deleted.
- */
-void
-RelationRemoveTriggers(Relation rel)
-{
- Relation tgrel;
- SysScanDesc tgscan;
- ScanKeyData key;
- HeapTuple tup;
- bool found = false;
-
- tgrel = heap_openr(TriggerRelationName, RowExclusiveLock);
- ScanKeyEntryInitialize(&key, 0,
- Anum_pg_trigger_tgrelid,
- F_OIDEQ,
- ObjectIdGetDatum(RelationGetRelid(rel)));
- tgscan = systable_beginscan(tgrel, TriggerRelidNameIndex, true,
- SnapshotNow, 1, &key);
-
- while (HeapTupleIsValid(tup = systable_getnext(tgscan)))
- {
- /* Delete any comments associated with this trigger */
- DeleteComments(tup->t_data->t_oid, RelationGetRelid(tgrel));
-
- simple_heap_delete(tgrel, &tup->t_self);
-
- found = true;
- }
-
- systable_endscan(tgscan);
-
- /*
- * If we deleted any triggers, must update pg_class entry and advance
- * command counter to make the updated entry visible. This is fairly
- * annoying, since we'e just going to drop the durn thing later, but
- * it's necessary to have a consistent state in case we do
- * CommandCounterIncrement() below --- if RelationBuildTriggers()
- * runs, it will complain otherwise. Perhaps RelationBuildTriggers()
- * shouldn't be so picky...
- */
- if (found)
- {
- Relation pgrel;
- Relation ridescs[Num_pg_class_indices];
-
- pgrel = heap_openr(RelationRelationName, RowExclusiveLock);
- tup = SearchSysCacheCopy(RELOID,
- ObjectIdGetDatum(RelationGetRelid(rel)),
- 0, 0, 0);
- if (!HeapTupleIsValid(tup))
- elog(ERROR, "RelationRemoveTriggers: relation %u not found in pg_class",
- RelationGetRelid(rel));
-
- ((Form_pg_class) GETSTRUCT(tup))->reltriggers = 0;
- simple_heap_update(pgrel, &tup->t_self, tup);
- CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices, ridescs);
- CatalogIndexInsert(ridescs, Num_pg_class_indices, pgrel, tup);
- CatalogCloseIndices(Num_pg_class_indices, ridescs);
- heap_freetuple(tup);
- heap_close(pgrel, RowExclusiveLock);
- CommandCounterIncrement();
- }
-
- /*
- * Also drop all constraint triggers referencing this relation
- */
- ScanKeyEntryInitialize(&key, 0,
- Anum_pg_trigger_tgconstrrelid,
- F_OIDEQ,
- ObjectIdGetDatum(RelationGetRelid(rel)));
- tgscan = systable_beginscan(tgrel, TriggerConstrRelidIndex, true,
- SnapshotNow, 1, &key);
-
- while (HeapTupleIsValid(tup = systable_getnext(tgscan)))
- {
- Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tup);
-
- elog(NOTICE, "DROP TABLE implicitly drops referential integrity trigger from table \"%s\"",
- get_rel_name(pg_trigger->tgrelid));
-
- DropTrigger(pg_trigger->tgrelid, NameStr(pg_trigger->tgname));
-
- /*
- * Need to do a command counter increment here to show up new
- * pg_class.reltriggers in the next loop iteration (in case there
- * are multiple referential integrity action triggers for the same
- * FK table defined on the PK table).
- */
- CommandCounterIncrement();
- }
- systable_endscan(tgscan);
-
- heap_close(tgrel, RowExclusiveLock);
-}
-
-/*
- * renametrig - changes the name of a trigger on a relation
- *
- * trigger name is changed in trigger catalog.
- * No record of the previous name is kept.
- *
- * get proper relrelation from relation catalog (if not arg)
- * scan trigger catalog
- * for name conflict (within rel)
- * for original trigger (if not arg)
- * modify tgname in trigger tuple
- * update row in catalog
- */
-void
-renametrig(Oid relid,
- const char *oldname,
- const char *newname)
-{
- Relation targetrel;
- Relation tgrel;
- HeapTuple tuple;
- SysScanDesc tgscan;
- ScanKeyData key[2];
- Relation idescs[Num_pg_trigger_indices];
-
- /*
- * Grab an exclusive lock on the target table, which we will NOT
- * release until end of transaction.
- */
- targetrel = heap_open(relid, AccessExclusiveLock);
-
- /*
- * Scan pg_trigger twice for existing triggers on relation. We do this in
- * order to ensure a trigger does not exist with newname (The unique index
- * on tgrelid/tgname would complain anyway) and to ensure a trigger does
- * exist with oldname.
- *
- * NOTE that this is cool only because we have AccessExclusiveLock on the
- * relation, so the trigger set won't be changing underneath us.
- */
- tgrel = heap_openr(TriggerRelationName, RowExclusiveLock);
-
- /*
- * First pass -- look for name conflict
- */
- ScanKeyEntryInitialize(&key[0], 0,
- Anum_pg_trigger_tgrelid,
- F_OIDEQ,
- ObjectIdGetDatum(relid));
- ScanKeyEntryInitialize(&key[1], 0,
- Anum_pg_trigger_tgname,
- F_NAMEEQ,
- PointerGetDatum(newname));
- tgscan = systable_beginscan(tgrel, TriggerRelidNameIndex, true,
- SnapshotNow, 2, key);
- if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
- elog(ERROR, "renametrig: trigger %s already defined on relation %s",
- newname, RelationGetRelationName(targetrel));
- systable_endscan(tgscan);
-
- /*
- * Second pass -- look for trigger existing with oldname and update
- */
- ScanKeyEntryInitialize(&key[0], 0,
- Anum_pg_trigger_tgrelid,
- F_OIDEQ,
- ObjectIdGetDatum(relid));
- ScanKeyEntryInitialize(&key[1], 0,
- Anum_pg_trigger_tgname,
- F_NAMEEQ,
- PointerGetDatum(oldname));
- tgscan = systable_beginscan(tgrel, TriggerRelidNameIndex, true,
- SnapshotNow, 2, key);
- if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
- {
- /*
- * Update pg_trigger tuple with new tgname.
- */
- tuple = heap_copytuple(tuple); /* need a modifiable copy */
-
- namestrcpy(&((Form_pg_trigger) GETSTRUCT(tuple))->tgname, newname);
-
- simple_heap_update(tgrel, &tuple->t_self, tuple);
-
- /*
- * keep system catalog indices current
- */
- CatalogOpenIndices(Num_pg_trigger_indices, Name_pg_trigger_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_trigger_indices, tgrel, tuple);
- CatalogCloseIndices(Num_pg_trigger_indices, idescs);
-
- /*
- * Invalidate relation's relcache entry so that other backends (and
- * this one too!) are sent SI message to make them rebuild relcache
- * entries. (Ideally this should happen automatically...)
- */
- CacheInvalidateRelcache(relid);
- }
- else
- {
- elog(ERROR, "renametrig: trigger %s not defined on relation %s",
- oldname, RelationGetRelationName(targetrel));
- }
-
- systable_endscan(tgscan);
-
- heap_close(tgrel, RowExclusiveLock);
-
- /*
- * Close rel, but keep exclusive lock!
- */
- heap_close(targetrel, NoLock);
-}
-
-/*
- * Build trigger data to attach to the given relcache entry.
- *
- * Note that trigger data must be allocated in CacheMemoryContext
- * to ensure it survives as long as the relcache entry. But we
- * are probably running in a less long-lived working context.
- */
-void
-RelationBuildTriggers(Relation relation)
-{
- TriggerDesc *trigdesc;
- int ntrigs = relation->rd_rel->reltriggers;
- Trigger *triggers;
- int found = 0;
- Relation tgrel;
- ScanKeyData skey;
- SysScanDesc tgscan;
- HeapTuple htup;
- struct varlena *val;
- bool isnull;
-
- triggers = (Trigger *) MemoryContextAlloc(CacheMemoryContext,
- ntrigs * sizeof(Trigger));
-
- /*
- * Note: since we scan the triggers using TriggerRelidNameIndex,
- * we will be reading the triggers in name order, except possibly
- * during emergency-recovery operations (ie, IsIgnoringSystemIndexes).
- * This in turn ensures that triggers will be fired in name order.
- */
- ScanKeyEntryInitialize(&skey,
- (bits16) 0x0,
- (AttrNumber) Anum_pg_trigger_tgrelid,
- (RegProcedure) F_OIDEQ,
- ObjectIdGetDatum(RelationGetRelid(relation)));
-
- tgrel = heap_openr(TriggerRelationName, AccessShareLock);
- tgscan = systable_beginscan(tgrel, TriggerRelidNameIndex, true,
- SnapshotNow, 1, &skey);
-
- while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
- {
- Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
- Trigger *build;
-
- if (found >= ntrigs)
- elog(ERROR, "RelationBuildTriggers: unexpected record found for rel %s",
- RelationGetRelationName(relation));
- build = &(triggers[found]);
-
- build->tgoid = htup->t_data->t_oid;
- build->tgname = MemoryContextStrdup(CacheMemoryContext,
- DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&pg_trigger->tgname))));
- build->tgfoid = pg_trigger->tgfoid;
- build->tgtype = pg_trigger->tgtype;
- build->tgenabled = pg_trigger->tgenabled;
- build->tgisconstraint = pg_trigger->tgisconstraint;
- build->tgconstrrelid = pg_trigger->tgconstrrelid;
- build->tgdeferrable = pg_trigger->tgdeferrable;
- build->tginitdeferred = pg_trigger->tginitdeferred;
- build->tgnargs = pg_trigger->tgnargs;
- memcpy(build->tgattr, &(pg_trigger->tgattr),
- FUNC_MAX_ARGS * sizeof(int16));
- if (build->tgnargs > 0)
- {
- char *p;
- int i;
-
- val = (struct varlena *) fastgetattr(htup,
- Anum_pg_trigger_tgargs,
- tgrel->rd_att, &isnull);
- if (isnull)
- elog(ERROR, "RelationBuildTriggers: tgargs IS NULL for rel %s",
- RelationGetRelationName(relation));
- p = (char *) VARDATA(val);
- build->tgargs = (char **)
- MemoryContextAlloc(CacheMemoryContext,
- build->tgnargs * sizeof(char *));
- for (i = 0; i < build->tgnargs; i++)
- {
- build->tgargs[i] = MemoryContextStrdup(CacheMemoryContext,
- p);
- p += strlen(p) + 1;
- }
- }
- else
- build->tgargs = NULL;
-
- found++;
- }
-
- systable_endscan(tgscan);
- heap_close(tgrel, AccessShareLock);
-
- if (found != ntrigs)
- elog(ERROR, "RelationBuildTriggers: %d record(s) not found for rel %s",
- ntrigs - found,
- RelationGetRelationName(relation));
-
- /* Build trigdesc */
- trigdesc = (TriggerDesc *) MemoryContextAlloc(CacheMemoryContext,
- sizeof(TriggerDesc));
- MemSet(trigdesc, 0, sizeof(TriggerDesc));
- trigdesc->triggers = triggers;
- trigdesc->numtriggers = ntrigs;
- for (found = 0; found < ntrigs; found++)
- InsertTrigger(trigdesc, &(triggers[found]), found);
-
- relation->trigdesc = trigdesc;
-}
-
-/* Insert the given trigger into the appropriate index list(s) for it */
-static void
-InsertTrigger(TriggerDesc *trigdesc, Trigger *trigger, int indx)
-{
- uint16 *n;
- int **t,
- **tp;
-
- if (TRIGGER_FOR_ROW(trigger->tgtype))
- {
- /* ROW trigger */
- if (TRIGGER_FOR_BEFORE(trigger->tgtype))
- {
- n = trigdesc->n_before_row;
- t = trigdesc->tg_before_row;
- }
- else
- {
- n = trigdesc->n_after_row;
- t = trigdesc->tg_after_row;
- }
- }
- else
- {
- /* STATEMENT trigger */
- if (TRIGGER_FOR_BEFORE(trigger->tgtype))
- {
- n = trigdesc->n_before_statement;
- t = trigdesc->tg_before_statement;
- }
- else
- {
- n = trigdesc->n_after_statement;
- t = trigdesc->tg_after_statement;
- }
- }
-
- if (TRIGGER_FOR_INSERT(trigger->tgtype))
- {
- tp = &(t[TRIGGER_EVENT_INSERT]);
- if (*tp == NULL)
- *tp = (int *) MemoryContextAlloc(CacheMemoryContext,
- sizeof(int));
- else
- *tp = (int *) repalloc(*tp, (n[TRIGGER_EVENT_INSERT] + 1) *
- sizeof(int));
- (*tp)[n[TRIGGER_EVENT_INSERT]] = indx;
- (n[TRIGGER_EVENT_INSERT])++;
- }
-
- if (TRIGGER_FOR_DELETE(trigger->tgtype))
- {
- tp = &(t[TRIGGER_EVENT_DELETE]);
- if (*tp == NULL)
- *tp = (int *) MemoryContextAlloc(CacheMemoryContext,
- sizeof(int));
- else
- *tp = (int *) repalloc(*tp, (n[TRIGGER_EVENT_DELETE] + 1) *
- sizeof(int));
- (*tp)[n[TRIGGER_EVENT_DELETE]] = indx;
- (n[TRIGGER_EVENT_DELETE])++;
- }
-
- if (TRIGGER_FOR_UPDATE(trigger->tgtype))
- {
- tp = &(t[TRIGGER_EVENT_UPDATE]);
- if (*tp == NULL)
- *tp = (int *) MemoryContextAlloc(CacheMemoryContext,
- sizeof(int));
- else
- *tp = (int *) repalloc(*tp, (n[TRIGGER_EVENT_UPDATE] + 1) *
- sizeof(int));
- (*tp)[n[TRIGGER_EVENT_UPDATE]] = indx;
- (n[TRIGGER_EVENT_UPDATE])++;
- }
-}
-
-void
-FreeTriggerDesc(TriggerDesc *trigdesc)
-{
- int **t;
- Trigger *trigger;
- int i;
-
- if (trigdesc == NULL)
- return;
-
- t = trigdesc->tg_before_statement;
- for (i = 0; i < TRIGGER_NUM_EVENT_CLASSES; i++)
- if (t[i] != NULL)
- pfree(t[i]);
- t = trigdesc->tg_before_row;
- for (i = 0; i < TRIGGER_NUM_EVENT_CLASSES; i++)
- if (t[i] != NULL)
- pfree(t[i]);
- t = trigdesc->tg_after_row;
- for (i = 0; i < TRIGGER_NUM_EVENT_CLASSES; i++)
- if (t[i] != NULL)
- pfree(t[i]);
- t = trigdesc->tg_after_statement;
- for (i = 0; i < TRIGGER_NUM_EVENT_CLASSES; i++)
- if (t[i] != NULL)
- pfree(t[i]);
-
- trigger = trigdesc->triggers;
- for (i = 0; i < trigdesc->numtriggers; i++)
- {
- pfree(trigger->tgname);
- if (trigger->tgnargs > 0)
- {
- while (--(trigger->tgnargs) >= 0)
- pfree(trigger->tgargs[trigger->tgnargs]);
- pfree(trigger->tgargs);
- }
- trigger++;
- }
- pfree(trigdesc->triggers);
- pfree(trigdesc);
-}
-
-bool
-equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
-{
- int i,
- j;
-
- /*
- * We need not examine the "index" data, just the trigger array
- * itself; if we have the same triggers with the same types, the
- * derived index data should match.
- *
- * As of 7.3 we assume trigger set ordering is significant in the
- * comparison; so we just compare corresponding slots of the two sets.
- */
- if (trigdesc1 != NULL)
- {
- if (trigdesc2 == NULL)
- return false;
- if (trigdesc1->numtriggers != trigdesc2->numtriggers)
- return false;
- for (i = 0; i < trigdesc1->numtriggers; i++)
- {
- Trigger *trig1 = trigdesc1->triggers + i;
- Trigger *trig2 = trigdesc2->triggers + i;
-
- if (trig1->tgoid != trig2->tgoid)
- return false;
- if (strcmp(trig1->tgname, trig2->tgname) != 0)
- return false;
- if (trig1->tgfoid != trig2->tgfoid)
- return false;
- if (trig1->tgtype != trig2->tgtype)
- return false;
- if (trig1->tgenabled != trig2->tgenabled)
- return false;
- if (trig1->tgisconstraint != trig2->tgisconstraint)
- return false;
- if (trig1->tgconstrrelid != trig2->tgconstrrelid)
- return false;
- if (trig1->tgdeferrable != trig2->tgdeferrable)
- return false;
- if (trig1->tginitdeferred != trig2->tginitdeferred)
- return false;
- if (trig1->tgnargs != trig2->tgnargs)
- return false;
- if (memcmp(trig1->tgattr, trig2->tgattr,
- sizeof(trig1->tgattr)) != 0)
- return false;
- for (j = 0; j < trig1->tgnargs; j++)
- if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
- return false;
- }
- }
- else if (trigdesc2 != NULL)
- return false;
- return true;
-}
-
-/*
- * Call a trigger function.
- *
- * trigdata: trigger descriptor.
- * finfo: possibly-cached call info for the function.
- * per_tuple_context: memory context to execute the function in.
- *
- * Returns the tuple (or NULL) as returned by the function.
- */
-static HeapTuple
-ExecCallTriggerFunc(TriggerData *trigdata,
- FmgrInfo *finfo,
- MemoryContext per_tuple_context)
-{
- FunctionCallInfoData fcinfo;
- Datum result;
- MemoryContext oldContext;
-
- /*
- * We cache fmgr lookup info, to avoid making the lookup again on each
- * call.
- */
- if (finfo->fn_oid == InvalidOid)
- fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
-
- Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
-
- /*
- * Do the function evaluation in the per-tuple memory context, so that
- * leaked memory will be reclaimed once per tuple. Note in particular
- * that any new tuple created by the trigger function will live till
- * the end of the tuple cycle.
- */
- oldContext = MemoryContextSwitchTo(per_tuple_context);
-
- /*
- * Call the function, passing no arguments but setting a context.
- */
- MemSet(&fcinfo, 0, sizeof(fcinfo));
-
- fcinfo.flinfo = finfo;
- fcinfo.context = (Node *) trigdata;
-
- result = FunctionCallInvoke(&fcinfo);
-
- MemoryContextSwitchTo(oldContext);
-
- /*
- * Trigger protocol allows function to return a null pointer, but NOT
- * to set the isnull result flag.
- */
- if (fcinfo.isnull)
- elog(ERROR, "ExecCallTriggerFunc: function %u returned NULL",
- fcinfo.flinfo->fn_oid);
-
- return (HeapTuple) DatumGetPointer(result);
-}
-
-HeapTuple
-ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
- HeapTuple trigtuple)
-{
- TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
- int ntrigs = trigdesc->n_before_row[TRIGGER_EVENT_INSERT];
- int *tgindx = trigdesc->tg_before_row[TRIGGER_EVENT_INSERT];
- HeapTuple newtuple = trigtuple;
- HeapTuple oldtuple;
- TriggerData LocTriggerData;
- int i;
-
- /* Allocate cache space for fmgr lookup info, if not done yet */
- if (relinfo->ri_TrigFunctions == NULL)
- {
- relinfo->ri_TrigFunctions = (FmgrInfo *)
- palloc(trigdesc->numtriggers * sizeof(FmgrInfo));
- MemSet(relinfo->ri_TrigFunctions, 0,
- trigdesc->numtriggers * sizeof(FmgrInfo));
- }
-
- LocTriggerData.type = T_TriggerData;
- LocTriggerData.tg_event = TRIGGER_EVENT_INSERT | TRIGGER_EVENT_ROW | TRIGGER_EVENT_BEFORE;
- LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
- LocTriggerData.tg_newtuple = NULL;
- for (i = 0; i < ntrigs; i++)
- {
- Trigger *trigger = &trigdesc->triggers[tgindx[i]];
-
- if (!trigger->tgenabled)
- continue;
- LocTriggerData.tg_trigtuple = oldtuple = newtuple;
- LocTriggerData.tg_trigger = trigger;
- newtuple = ExecCallTriggerFunc(&LocTriggerData,
- relinfo->ri_TrigFunctions + tgindx[i],
- GetPerTupleMemoryContext(estate));
- if (oldtuple != newtuple && oldtuple != trigtuple)
- heap_freetuple(oldtuple);
- if (newtuple == NULL)
- break;
- }
- return newtuple;
-}
-
-void
-ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo,
- HeapTuple trigtuple)
-{
- TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
-
- if (trigdesc->n_after_row[TRIGGER_EVENT_INSERT] > 0)
- DeferredTriggerSaveEvent(relinfo, TRIGGER_EVENT_INSERT,
- NULL, trigtuple);
-}
-
-bool
-ExecBRDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
- ItemPointer tupleid)
-{
- TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
- int ntrigs = trigdesc->n_before_row[TRIGGER_EVENT_DELETE];
- int *tgindx = trigdesc->tg_before_row[TRIGGER_EVENT_DELETE];
- TriggerData LocTriggerData;
- HeapTuple trigtuple;
- HeapTuple newtuple = NULL;
- TupleTableSlot *newSlot;
- int i;
-
- trigtuple = GetTupleForTrigger(estate, relinfo, tupleid, &newSlot);
- if (trigtuple == NULL)
- return false;
-
- /* Allocate cache space for fmgr lookup info, if not done yet */
- if (relinfo->ri_TrigFunctions == NULL)
- {
- relinfo->ri_TrigFunctions = (FmgrInfo *)
- palloc(trigdesc->numtriggers * sizeof(FmgrInfo));
- MemSet(relinfo->ri_TrigFunctions, 0,
- trigdesc->numtriggers * sizeof(FmgrInfo));
- }
-
- LocTriggerData.type = T_TriggerData;
- LocTriggerData.tg_event = TRIGGER_EVENT_DELETE | TRIGGER_EVENT_ROW | TRIGGER_EVENT_BEFORE;
- LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
- LocTriggerData.tg_newtuple = NULL;
- for (i = 0; i < ntrigs; i++)
- {
- Trigger *trigger = &trigdesc->triggers[tgindx[i]];
-
- if (!trigger->tgenabled)
- continue;
- LocTriggerData.tg_trigtuple = trigtuple;
- LocTriggerData.tg_trigger = trigger;
- newtuple = ExecCallTriggerFunc(&LocTriggerData,
- relinfo->ri_TrigFunctions + tgindx[i],
- GetPerTupleMemoryContext(estate));
- if (newtuple == NULL)
- break;
- if (newtuple != trigtuple)
- heap_freetuple(newtuple);
- }
- heap_freetuple(trigtuple);
-
- return (newtuple == NULL) ? false : true;
-}
-
-void
-ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
- ItemPointer tupleid)
-{
- TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
-
- if (trigdesc->n_after_row[TRIGGER_EVENT_DELETE] > 0)
- {
- HeapTuple trigtuple = GetTupleForTrigger(estate, relinfo,
- tupleid, NULL);
-
- DeferredTriggerSaveEvent(relinfo, TRIGGER_EVENT_DELETE,
- trigtuple, NULL);
- heap_freetuple(trigtuple);
- }
-}
-
-HeapTuple
-ExecBRUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
- ItemPointer tupleid, HeapTuple newtuple)
-{
- TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
- int ntrigs = trigdesc->n_before_row[TRIGGER_EVENT_UPDATE];
- int *tgindx = trigdesc->tg_before_row[TRIGGER_EVENT_UPDATE];
- TriggerData LocTriggerData;
- HeapTuple trigtuple;
- HeapTuple oldtuple;
- HeapTuple intuple = newtuple;
- TupleTableSlot *newSlot;
- int i;
-
- trigtuple = GetTupleForTrigger(estate, relinfo, tupleid, &newSlot);
- if (trigtuple == NULL)
- return NULL;
-
- /*
- * In READ COMMITTED isolevel it's possible that newtuple was changed
- * due to concurrent update.
- */
- if (newSlot != NULL)
- intuple = newtuple = ExecRemoveJunk(estate->es_junkFilter, newSlot);
-
- /* Allocate cache space for fmgr lookup info, if not done yet */
- if (relinfo->ri_TrigFunctions == NULL)
- {
- relinfo->ri_TrigFunctions = (FmgrInfo *)
- palloc(trigdesc->numtriggers * sizeof(FmgrInfo));
- MemSet(relinfo->ri_TrigFunctions, 0,
- trigdesc->numtriggers * sizeof(FmgrInfo));
- }
-
- LocTriggerData.type = T_TriggerData;
- LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE | TRIGGER_EVENT_ROW | TRIGGER_EVENT_BEFORE;
- LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
- for (i = 0; i < ntrigs; i++)
- {
- Trigger *trigger = &trigdesc->triggers[tgindx[i]];
-
- if (!trigger->tgenabled)
- continue;
- LocTriggerData.tg_trigtuple = trigtuple;
- LocTriggerData.tg_newtuple = oldtuple = newtuple;
- LocTriggerData.tg_trigger = trigger;
- newtuple = ExecCallTriggerFunc(&LocTriggerData,
- relinfo->ri_TrigFunctions + tgindx[i],
- GetPerTupleMemoryContext(estate));
- if (oldtuple != newtuple && oldtuple != intuple)
- heap_freetuple(oldtuple);
- if (newtuple == NULL)
- break;
- }
- heap_freetuple(trigtuple);
- return newtuple;
-}
-
-void
-ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
- ItemPointer tupleid, HeapTuple newtuple)
-{
- TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
-
- if (trigdesc->n_after_row[TRIGGER_EVENT_UPDATE] > 0)
- {
- HeapTuple trigtuple = GetTupleForTrigger(estate, relinfo,
- tupleid, NULL);
-
- DeferredTriggerSaveEvent(relinfo, TRIGGER_EVENT_UPDATE,
- trigtuple, newtuple);
- heap_freetuple(trigtuple);
- }
-}
-
-
-static HeapTuple
-GetTupleForTrigger(EState *estate, ResultRelInfo *relinfo,
- ItemPointer tid, TupleTableSlot **newSlot)
-{
- Relation relation = relinfo->ri_RelationDesc;
- HeapTupleData tuple;
- HeapTuple result;
- Buffer buffer;
-
- if (newSlot != NULL)
- {
- int test;
-
- /*
- * mark tuple for update
- */
- *newSlot = NULL;
- tuple.t_self = *tid;
-ltrmark:;
- test = heap_mark4update(relation, &tuple, &buffer,
- GetCurrentCommandId());
- switch (test)
- {
- case HeapTupleSelfUpdated:
- ReleaseBuffer(buffer);
- return (NULL);
-
- case HeapTupleMayBeUpdated:
- break;
-
- case HeapTupleUpdated:
- ReleaseBuffer(buffer);
- if (XactIsoLevel == XACT_SERIALIZABLE)
- elog(ERROR, "Can't serialize access due to concurrent update");
- else if (!(ItemPointerEquals(&(tuple.t_self), tid)))
- {
- TupleTableSlot *epqslot = EvalPlanQual(estate,
- relinfo->ri_RangeTableIndex,
- &(tuple.t_self));
-
- if (!(TupIsNull(epqslot)))
- {
- *tid = tuple.t_self;
- *newSlot = epqslot;
- goto ltrmark;
- }
- }
-
- /*
- * if tuple was deleted or PlanQual failed for updated
- * tuple - we have not process this tuple!
- */
- return (NULL);
-
- default:
- ReleaseBuffer(buffer);
- elog(ERROR, "Unknown status %u from heap_mark4update", test);
- return (NULL);
- }
- }
- else
- {
- PageHeader dp;
- ItemId lp;
-
- buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
-
- if (!BufferIsValid(buffer))
- elog(ERROR, "GetTupleForTrigger: failed ReadBuffer");
-
- dp = (PageHeader) BufferGetPage(buffer);
- lp = PageGetItemId(dp, ItemPointerGetOffsetNumber(tid));
-
- Assert(ItemIdIsUsed(lp));
-
- tuple.t_datamcxt = NULL;
- tuple.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
- tuple.t_len = ItemIdGetLength(lp);
- tuple.t_self = *tid;
- }
-
- result = heap_copytuple(&tuple);
- ReleaseBuffer(buffer);
-
- return result;
-}
-
-
-/* ----------
- * Deferred trigger stuff
- * ----------
- */
-
-
-/* ----------
- * Internal data to the deferred trigger mechanism is held
- * during entire session in a global context created at startup and
- * over statements/commands in a separate context which
- * is created at transaction start and destroyed at transaction end.
- * ----------
- */
-static MemoryContext deftrig_gcxt = NULL;
-static MemoryContext deftrig_cxt = NULL;
-
-/* ----------
- * Global data that tells which triggers are actually in
- * state IMMEDIATE or DEFERRED.
- * ----------
- */
-static bool deftrig_dfl_all_isset = false;
-static bool deftrig_dfl_all_isdeferred = false;
-static List *deftrig_dfl_trigstates = NIL;
-
-static bool deftrig_all_isset;
-static bool deftrig_all_isdeferred;
-static List *deftrig_trigstates;
-
-/* ----------
- * The list of pending deferred trigger events during the current transaction.
- *
- * deftrig_events is the head, deftrig_event_tail is the last entry.
- * Because this can grow pretty large, we don't use separate List nodes,
- * but instead thread the list through the dte_next fields of the member
- * nodes. Saves just a few bytes per entry, but that adds up.
- *
- * XXX Need to be able to shove this data out to a file if it grows too
- * large...
- * ----------
- */
-static DeferredTriggerEvent deftrig_events;
-static DeferredTriggerEvent deftrig_event_tail;
-
-
-/* ----------
- * deferredTriggerCheckState()
- *
- * Returns true if the trigger identified by tgoid is actually
- * in state DEFERRED.
- * ----------
- */
-static bool
-deferredTriggerCheckState(Oid tgoid, int32 itemstate)
-{
- MemoryContext oldcxt;
- List *sl;
- DeferredTriggerStatus trigstate;
-
- /*
- * Not deferrable triggers (i.e. normal AFTER ROW triggers and
- * constraints declared NOT DEFERRABLE, the state is allways false.
- */
- if ((itemstate & TRIGGER_DEFERRED_DEFERRABLE) == 0)
- return false;
-
- /*
- * Lookup if we know an individual state for this trigger
- */
- foreach(sl, deftrig_trigstates)
- {
- trigstate = (DeferredTriggerStatus) lfirst(sl);
- if (trigstate->dts_tgoid == tgoid)
- return trigstate->dts_tgisdeferred;
- }
-
- /*
- * No individual state known - so if the user issued a SET CONSTRAINT
- * ALL ..., we return that instead of the triggers default state.
- */
- if (deftrig_all_isset)
- return deftrig_all_isdeferred;
-
- /*
- * No ALL state known either, remember the default state as the
- * current and return that.
- */
- oldcxt = MemoryContextSwitchTo(deftrig_cxt);
-
- trigstate = (DeferredTriggerStatus)
- palloc(sizeof(DeferredTriggerStatusData));
- trigstate->dts_tgoid = tgoid;
- trigstate->dts_tgisdeferred =
- ((itemstate & TRIGGER_DEFERRED_INITDEFERRED) != 0);
- deftrig_trigstates = lappend(deftrig_trigstates, trigstate);
-
- MemoryContextSwitchTo(oldcxt);
-
- return trigstate->dts_tgisdeferred;
-}
-
-
-/* ----------
- * deferredTriggerAddEvent()
- *
- * Add a new trigger event to the queue.
- * ----------
- */
-static void
-deferredTriggerAddEvent(DeferredTriggerEvent event)
-{
- /*
- * Since the event list could grow quite long, we keep track of the
- * list tail and append there, rather than just doing a stupid
- * "lappend". This avoids O(N^2) behavior for large numbers of events.
- */
- event->dte_next = NULL;
- if (deftrig_event_tail == NULL)
- {
- /* first list entry */
- deftrig_events = event;
- deftrig_event_tail = event;
- }
- else
- {
- deftrig_event_tail->dte_next = event;
- deftrig_event_tail = event;
- }
-}
-
-
-/* ----------
- * DeferredTriggerExecute()
- *
- * Fetch the required tuples back from the heap and fire one
- * single trigger function.
- *
- * Frequently, this will be fired many times in a row for triggers of
- * a single relation. Therefore, we cache the open relation and provide
- * fmgr lookup cache space at the caller level.
- *
- * event: event currently being fired.
- * itemno: item within event currently being fired.
- * rel: open relation for event.
- * finfo: array of fmgr lookup cache entries (one per trigger of relation).
- * per_tuple_context: memory context to call trigger function in.
- * ----------
- */
-static void
-DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
- Relation rel, FmgrInfo *finfo,
- MemoryContext per_tuple_context)
-{
- Oid tgoid = event->dte_item[itemno].dti_tgoid;
- TriggerDesc *trigdesc = rel->trigdesc;
- TriggerData LocTriggerData;
- HeapTupleData oldtuple;
- HeapTupleData newtuple;
- HeapTuple rettuple;
- Buffer oldbuffer;
- Buffer newbuffer;
- int tgindx;
-
- /*
- * Fetch the required OLD and NEW tuples.
- */
- if (ItemPointerIsValid(&(event->dte_oldctid)))
- {
- ItemPointerCopy(&(event->dte_oldctid), &(oldtuple.t_self));
- if (!heap_fetch(rel, SnapshotAny, &oldtuple, &oldbuffer, false, NULL))
- elog(ERROR, "DeferredTriggerExecute: failed to fetch old tuple");
- }
-
- if (ItemPointerIsValid(&(event->dte_newctid)))
- {
- ItemPointerCopy(&(event->dte_newctid), &(newtuple.t_self));
- if (!heap_fetch(rel, SnapshotAny, &newtuple, &newbuffer, false, NULL))
- elog(ERROR, "DeferredTriggerExecute: failed to fetch new tuple");
- }
-
- /*
- * Setup the trigger information
- */
- LocTriggerData.type = T_TriggerData;
- LocTriggerData.tg_event = (event->dte_event & TRIGGER_EVENT_OPMASK) |
- TRIGGER_EVENT_ROW;
- LocTriggerData.tg_relation = rel;
-
- LocTriggerData.tg_trigger = NULL;
- for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
- {
- if (trigdesc->triggers[tgindx].tgoid == tgoid)
- {
- LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
- break;
- }
- }
- if (LocTriggerData.tg_trigger == NULL)
- elog(ERROR, "DeferredTriggerExecute: can't find trigger %u", tgoid);
-
- switch (event->dte_event & TRIGGER_EVENT_OPMASK)
- {
- case TRIGGER_EVENT_INSERT:
- LocTriggerData.tg_trigtuple = &newtuple;
- LocTriggerData.tg_newtuple = NULL;
- break;
-
- case TRIGGER_EVENT_UPDATE:
- LocTriggerData.tg_trigtuple = &oldtuple;
- LocTriggerData.tg_newtuple = &newtuple;
- break;
-
- case TRIGGER_EVENT_DELETE:
- LocTriggerData.tg_trigtuple = &oldtuple;
- LocTriggerData.tg_newtuple = NULL;
- break;
- }
-
- /*
- * Call the trigger and throw away an eventually returned updated
- * tuple.
- */
- rettuple = ExecCallTriggerFunc(&LocTriggerData,
- finfo + tgindx,
- per_tuple_context);
- if (rettuple != NULL && rettuple != &oldtuple && rettuple != &newtuple)
- heap_freetuple(rettuple);
-
- /*
- * Might have been a referential integrity constraint trigger. Reset
- * the snapshot overriding flag.
- */
- ReferentialIntegritySnapshotOverride = false;
-
- /*
- * Release buffers
- */
- if (ItemPointerIsValid(&(event->dte_oldctid)))
- ReleaseBuffer(oldbuffer);
- if (ItemPointerIsValid(&(event->dte_newctid)))
- ReleaseBuffer(newbuffer);
-}
-
-
-/* ----------
- * deferredTriggerInvokeEvents()
- *
- * Scan the event queue for not yet invoked triggers. Check if they
- * should be invoked now and do so.
- * ----------
- */
-static void
-deferredTriggerInvokeEvents(bool immediate_only)
-{
- DeferredTriggerEvent event,
- prev_event = NULL;
- MemoryContext per_tuple_context;
- Relation rel = NULL;
- FmgrInfo *finfo = NULL;
-
- /*
- * If immediate_only is true, we remove fully-processed events from
- * the event queue to recycle space. If immediate_only is false,
- * we are going to discard the whole event queue on return anyway,
- * so no need to bother with "retail" pfree's.
- *
- * In a scenario with many commands in a transaction and many
- * deferred-to-end-of-transaction triggers, it could get annoying
- * to rescan all the deferred triggers at each command end.
- * To speed this up, we could remember the actual end of the queue at
- * EndQuery and examine only events that are newer. On state changes
- * we simply reset the saved position to the beginning of the queue
- * and process all events once with the new states.
- */
-
- /* Make a per-tuple memory context for trigger function calls */
- per_tuple_context =
- AllocSetContextCreate(CurrentMemoryContext,
- "DeferredTriggerTupleContext",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
-
- event = deftrig_events;
- while (event != NULL)
- {
- bool still_deferred_ones = false;
- DeferredTriggerEvent next_event;
- int i;
-
- /*
- * Check if event is already completely done.
- */
- if (! (event->dte_event & (TRIGGER_DEFERRED_DONE |
- TRIGGER_DEFERRED_CANCELED)))
- {
- MemoryContextReset(per_tuple_context);
-
- /*
- * Check each trigger item in the event.
- */
- for (i = 0; i < event->dte_n_items; i++)
- {
- if (event->dte_item[i].dti_state & TRIGGER_DEFERRED_DONE)
- continue;
-
- /*
- * This trigger item hasn't been called yet. Check if we
- * should call it now.
- */
- if (immediate_only &&
- deferredTriggerCheckState(event->dte_item[i].dti_tgoid,
- event->dte_item[i].dti_state))
- {
- still_deferred_ones = true;
- continue;
- }
-
- /*
- * So let's fire it... but first, open the correct relation
- * if this is not the same relation as before.
- */
- if (rel == NULL || rel->rd_id != event->dte_relid)
- {
- if (rel)
- heap_close(rel, NoLock);
- if (finfo)
- pfree(finfo);
-
- /*
- * We assume that an appropriate lock is still held by the
- * executor, so grab no new lock here.
- */
- rel = heap_open(event->dte_relid, NoLock);
-
- /*
- * Allocate space to cache fmgr lookup info for triggers
- * of this relation.
- */
- finfo = (FmgrInfo *)
- palloc(rel->trigdesc->numtriggers * sizeof(FmgrInfo));
- MemSet(finfo, 0,
- rel->trigdesc->numtriggers * sizeof(FmgrInfo));
- }
-
- DeferredTriggerExecute(event, i, rel, finfo,
- per_tuple_context);
-
- event->dte_item[i].dti_state |= TRIGGER_DEFERRED_DONE;
- } /* end loop over items within event */
- }
-
- /*
- * If it's now completely done, throw it away.
- *
- * NB: it's possible the trigger calls above added more events to the
- * queue, or that calls we will do later will want to add more,
- * so we have to be careful about maintaining list validity here.
- */
- next_event = event->dte_next;
-
- if (still_deferred_ones)
- {
- /* Not done, keep in list */
- prev_event = event;
- }
- else
- {
- /* Done */
- if (immediate_only)
- {
- /* delink it from list and free it */
- if (prev_event)
- prev_event->dte_next = next_event;
- else
- deftrig_events = next_event;
- pfree(event);
- }
- else
- {
- /*
- * We will clean up later, but just for paranoia's sake,
- * mark the event done.
- */
- event->dte_event |= TRIGGER_DEFERRED_DONE;
- }
- }
-
- event = next_event;
- }
-
- /* Update list tail pointer in case we just deleted tail event */
- deftrig_event_tail = prev_event;
-
- /* Release working resources */
- if (rel)
- heap_close(rel, NoLock);
- if (finfo)
- pfree(finfo);
- MemoryContextDelete(per_tuple_context);
-}
-
-
-/* ----------
- * DeferredTriggerInit()
- *
- * Initialize the deferred trigger mechanism. This is called during
- * backend startup and is guaranteed to be before the first of all
- * transactions.
- * ----------
- */
-void
-DeferredTriggerInit(void)
-{
- /*
- * Since this context will never be reset, give it a minsize of 0.
- * This avoids using any memory if the session never stores anything.
- */
- deftrig_gcxt = AllocSetContextCreate(TopMemoryContext,
- "DeferredTriggerSession",
- 0,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
-}
-
-
-/* ----------
- * DeferredTriggerBeginXact()
- *
- * Called at transaction start (either BEGIN or implicit for single
- * statement outside of transaction block).
- * ----------
- */
-void
-DeferredTriggerBeginXact(void)
-{
- MemoryContext oldcxt;
- List *l;
- DeferredTriggerStatus dflstat;
- DeferredTriggerStatus stat;
-
- if (deftrig_cxt != NULL)
- elog(ERROR,
- "DeferredTriggerBeginXact() called while inside transaction");
-
- /*
- * Create the per transaction memory context and copy all states from
- * the per session context to here. Set the minsize to 0 to avoid
- * wasting memory if there is no deferred trigger data.
- */
- deftrig_cxt = AllocSetContextCreate(TopTransactionContext,
- "DeferredTriggerXact",
- 0,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
- oldcxt = MemoryContextSwitchTo(deftrig_cxt);
-
- deftrig_all_isset = deftrig_dfl_all_isset;
- deftrig_all_isdeferred = deftrig_dfl_all_isdeferred;
-
- deftrig_trigstates = NIL;
- foreach(l, deftrig_dfl_trigstates)
- {
- dflstat = (DeferredTriggerStatus) lfirst(l);
- stat = (DeferredTriggerStatus)
- palloc(sizeof(DeferredTriggerStatusData));
-
- stat->dts_tgoid = dflstat->dts_tgoid;
- stat->dts_tgisdeferred = dflstat->dts_tgisdeferred;
-
- deftrig_trigstates = lappend(deftrig_trigstates, stat);
- }
-
- MemoryContextSwitchTo(oldcxt);
-
- deftrig_events = NULL;
- deftrig_event_tail = NULL;
-}
-
-
-/* ----------
- * DeferredTriggerEndQuery()
- *
- * Called after one query sent down by the user has completely been
- * processed. At this time we invoke all outstanding IMMEDIATE triggers.
- * ----------
- */
-void
-DeferredTriggerEndQuery(void)
-{
- /*
- * Ignore call if we aren't in a transaction.
- */
- if (deftrig_cxt == NULL)
- return;
-
- deferredTriggerInvokeEvents(true);
-}
-
-
-/* ----------
- * DeferredTriggerEndXact()
- *
- * Called just before the current transaction is committed. At this
- * time we invoke all DEFERRED triggers and tidy up.
- * ----------
- */
-void
-DeferredTriggerEndXact(void)
-{
- /*
- * Ignore call if we aren't in a transaction.
- */
- if (deftrig_cxt == NULL)
- return;
-
- deferredTriggerInvokeEvents(false);
-
- MemoryContextDelete(deftrig_cxt);
- deftrig_cxt = NULL;
-}
-
-
-/* ----------
- * DeferredTriggerAbortXact()
- *
- * The current transaction has entered the abort state.
- * All outstanding triggers are canceled so we simply throw
- * away anything we know.
- * ----------
- */
-void
-DeferredTriggerAbortXact(void)
-{
- /*
- * Ignore call if we aren't in a transaction.
- */
- if (deftrig_cxt == NULL)
- return;
-
- MemoryContextDelete(deftrig_cxt);
- deftrig_cxt = NULL;
-}
-
-
-/* ----------
- * DeferredTriggerSetState()
- *
- * Called for the users SET CONSTRAINTS ... utility command.
- * ----------
- */
-void
-DeferredTriggerSetState(ConstraintsSetStmt *stmt)
-{
- Relation tgrel;
- List *l;
- List *ls;
- List *loid = NIL;
- MemoryContext oldcxt;
- bool found;
- DeferredTriggerStatus state;
-
- /*
- * Handle SET CONSTRAINTS ALL ...
- */
- if (stmt->constraints == NIL)
- {
- if (!IsTransactionBlock())
- {
- /*
- * ... outside of a transaction block
- *
- * Drop all information about individual trigger states per
- * session.
- */
- l = deftrig_dfl_trigstates;
- while (l != NIL)
- {
- List *next = lnext(l);
-
- pfree(lfirst(l));
- pfree(l);
- l = next;
- }
- deftrig_dfl_trigstates = NIL;
-
- /*
- * Set the session ALL state to known.
- */
- deftrig_dfl_all_isset = true;
- deftrig_dfl_all_isdeferred = stmt->deferred;
-
- return;
- }
- else
- {
- /*
- * ... inside of a transaction block
- *
- * Drop all information about individual trigger states per
- * transaction.
- */
- l = deftrig_trigstates;
- while (l != NIL)
- {
- List *next = lnext(l);
-
- pfree(lfirst(l));
- pfree(l);
- l = next;
- }
- deftrig_trigstates = NIL;
-
- /*
- * Set the per transaction ALL state to known.
- */
- deftrig_all_isset = true;
- deftrig_all_isdeferred = stmt->deferred;
-
- return;
- }
- }
-
- /* ----------
- * Handle SET CONSTRAINTS constraint-name [, ...]
- * First lookup all trigger Oid's for the constraint names.
- * ----------
- */
- tgrel = heap_openr(TriggerRelationName, AccessShareLock);
-
- foreach(l, stmt->constraints)
- {
- char *cname = strVal(lfirst(l));
- ScanKeyData skey;
- SysScanDesc tgscan;
- HeapTuple htup;
-
- /*
- * Check that only named constraints are set explicitly
- */
- if (strlen(cname) == 0)
- elog(ERROR, "unnamed constraints cannot be set explicitly");
-
- /*
- * Setup to scan pg_trigger by tgconstrname ...
- */
- ScanKeyEntryInitialize(&skey,
- (bits16) 0x0,
- (AttrNumber) Anum_pg_trigger_tgconstrname,
- (RegProcedure) F_NAMEEQ,
- PointerGetDatum(cname));
-
- tgscan = systable_beginscan(tgrel, TriggerConstrNameIndex, true,
- SnapshotNow, 1, &skey);
-
- /*
- * ... and search for the constraint trigger row
- */
- found = false;
-
- while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
- {
- Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
- Oid constr_oid;
-
- /*
- * If we found some, check that they fit the deferrability but
- * skip ON <event> RESTRICT ones, since they are silently
- * never deferrable.
- */
- if (stmt->deferred && !pg_trigger->tgdeferrable &&
- pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_UPD &&
- pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_DEL)
- elog(ERROR, "Constraint '%s' is not deferrable",
- cname);
-
- constr_oid = htup->t_data->t_oid;
- loid = lappendi(loid, constr_oid);
- found = true;
- }
-
- systable_endscan(tgscan);
-
- /*
- * Not found ?
- */
- if (!found)
- elog(ERROR, "Constraint '%s' does not exist", cname);
- }
- heap_close(tgrel, AccessShareLock);
-
- if (!IsTransactionBlock())
- {
- /*
- * Outside of a transaction block set the trigger states of
- * individual triggers on session level.
- */
- oldcxt = MemoryContextSwitchTo(deftrig_gcxt);
-
- foreach(l, loid)
- {
- found = false;
- foreach(ls, deftrig_dfl_trigstates)
- {
- state = (DeferredTriggerStatus) lfirst(ls);
- if (state->dts_tgoid == (Oid) lfirsti(l))
- {
- state->dts_tgisdeferred = stmt->deferred;
- found = true;
- break;
- }
- }
- if (!found)
- {
- state = (DeferredTriggerStatus)
- palloc(sizeof(DeferredTriggerStatusData));
- state->dts_tgoid = (Oid) lfirsti(l);
- state->dts_tgisdeferred = stmt->deferred;
-
- deftrig_dfl_trigstates =
- lappend(deftrig_dfl_trigstates, state);
- }
- }
-
- MemoryContextSwitchTo(oldcxt);
-
- return;
- }
- else
- {
- /*
- * Inside of a transaction block set the trigger states of
- * individual triggers on transaction level.
- */
- oldcxt = MemoryContextSwitchTo(deftrig_cxt);
-
- foreach(l, loid)
- {
- found = false;
- foreach(ls, deftrig_trigstates)
- {
- state = (DeferredTriggerStatus) lfirst(ls);
- if (state->dts_tgoid == (Oid) lfirsti(l))
- {
- state->dts_tgisdeferred = stmt->deferred;
- found = true;
- break;
- }
- }
- if (!found)
- {
- state = (DeferredTriggerStatus)
- palloc(sizeof(DeferredTriggerStatusData));
- state->dts_tgoid = (Oid) lfirsti(l);
- state->dts_tgisdeferred = stmt->deferred;
-
- deftrig_trigstates =
- lappend(deftrig_trigstates, state);
- }
- }
-
- MemoryContextSwitchTo(oldcxt);
-
- return;
- }
-}
-
-
-/* ----------
- * DeferredTriggerSaveEvent()
- *
- * Called by ExecAR...Triggers() to add the event to the queue.
- *
- * NOTE: should be called only if we've determined that an event must
- * be added to the queue.
- * ----------
- */
-static void
-DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event,
- HeapTuple oldtup, HeapTuple newtup)
-{
- Relation rel = relinfo->ri_RelationDesc;
- TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
- MemoryContext oldcxt;
- DeferredTriggerEvent new_event;
- int new_size;
- int i;
- int ntriggers;
- int *tgindx;
- ItemPointerData oldctid;
- ItemPointerData newctid;
- TriggerData LocTriggerData;
-
- if (deftrig_cxt == NULL)
- elog(ERROR,
- "DeferredTriggerSaveEvent() called outside of transaction");
-
- /*
- * Get the CTID's of OLD and NEW
- */
- if (oldtup != NULL)
- ItemPointerCopy(&(oldtup->t_self), &(oldctid));
- else
- ItemPointerSetInvalid(&(oldctid));
- if (newtup != NULL)
- ItemPointerCopy(&(newtup->t_self), &(newctid));
- else
- ItemPointerSetInvalid(&(newctid));
-
- /*
- * Create a new event
- */
- oldcxt = MemoryContextSwitchTo(deftrig_cxt);
-
- ntriggers = trigdesc->n_after_row[event];
- tgindx = trigdesc->tg_after_row[event];
- new_size = offsetof(DeferredTriggerEventData, dte_item[0]) +
- ntriggers * sizeof(DeferredTriggerEventItem);
-
- new_event = (DeferredTriggerEvent) palloc(new_size);
- new_event->dte_next = NULL;
- new_event->dte_event = event & TRIGGER_EVENT_OPMASK;
- new_event->dte_relid = rel->rd_id;
- ItemPointerCopy(&oldctid, &(new_event->dte_oldctid));
- ItemPointerCopy(&newctid, &(new_event->dte_newctid));
- new_event->dte_n_items = ntriggers;
- for (i = 0; i < ntriggers; i++)
- {
- Trigger *trigger = &trigdesc->triggers[tgindx[i]];
-
- new_event->dte_item[i].dti_tgoid = trigger->tgoid;
- new_event->dte_item[i].dti_state =
- ((trigger->tgdeferrable) ?
- TRIGGER_DEFERRED_DEFERRABLE : 0) |
- ((trigger->tginitdeferred) ?
- TRIGGER_DEFERRED_INITDEFERRED : 0) |
- ((trigdesc->n_before_row[event] > 0) ?
- TRIGGER_DEFERRED_HAS_BEFORE : 0);
- }
-
- MemoryContextSwitchTo(oldcxt);
-
- switch (event & TRIGGER_EVENT_OPMASK)
- {
- case TRIGGER_EVENT_INSERT:
- /* nothing to do */
- break;
-
- case TRIGGER_EVENT_UPDATE:
- /*
- * Check if one of the referenced keys is changed.
- */
- for (i = 0; i < ntriggers; i++)
- {
- Trigger *trigger = &trigdesc->triggers[tgindx[i]];
- bool is_ri_trigger;
- bool key_unchanged;
-
- /*
- * We are interested in RI_FKEY triggers only.
- */
- switch (trigger->tgfoid)
- {
- case F_RI_FKEY_NOACTION_UPD:
- case F_RI_FKEY_CASCADE_UPD:
- case F_RI_FKEY_RESTRICT_UPD:
- case F_RI_FKEY_SETNULL_UPD:
- case F_RI_FKEY_SETDEFAULT_UPD:
- is_ri_trigger = true;
- break;
-
- default:
- is_ri_trigger = false;
- break;
- }
- if (!is_ri_trigger)
- continue;
-
- LocTriggerData.type = T_TriggerData;
- LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE;
- LocTriggerData.tg_relation = rel;
- LocTriggerData.tg_trigtuple = oldtup;
- LocTriggerData.tg_newtuple = newtup;
- LocTriggerData.tg_trigger = trigger;
-
- key_unchanged = RI_FKey_keyequal_upd(&LocTriggerData);
-
- if (key_unchanged)
- {
- /*
- * The key hasn't changed, so no need later to invoke
- * the trigger at all.
- */
- new_event->dte_item[i].dti_state |= TRIGGER_DEFERRED_DONE;
- }
- }
-
- break;
-
- case TRIGGER_EVENT_DELETE:
- /* nothing to do */
- break;
- }
-
- /*
- * Add the new event to the queue.
- */
- deferredTriggerAddEvent(new_event);
-}
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
deleted file mode 100644
index 6aa5fae182e..00000000000
--- a/src/backend/commands/typecmds.c
+++ /dev/null
@@ -1,679 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * typecmds.c
- * Routines for SQL commands that manipulate types (and domains).
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/typecmds.c,v 1.3 2002/05/03 00:32:16 tgl Exp $
- *
- * DESCRIPTION
- * The "DefineFoo" routines take the parse tree and pick out the
- * appropriate arguments/flags, passing the results to the
- * corresponding "FooDefine" routines (in src/catalog) that do
- * the actual catalog-munging. These routines also verify permission
- * of the user to execute the command.
- *
- * NOTES
- * These things must be defined and committed in the following order:
- * "create function":
- * input/output, recv/send procedures
- * "create type":
- * type
- * "create operator":
- * operators
- *
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include "access/heapam.h"
-#include "catalog/catname.h"
-#include "catalog/heap.h"
-#include "catalog/namespace.h"
-#include "catalog/pg_type.h"
-#include "commands/comment.h"
-#include "commands/defrem.h"
-#include "miscadmin.h"
-#include "parser/parse.h"
-#include "parser/parse_func.h"
-#include "parser/parse_type.h"
-#include "utils/acl.h"
-#include "utils/builtins.h"
-#include "utils/fmgroids.h"
-#include "utils/lsyscache.h"
-#include "utils/syscache.h"
-
-
-static Oid findTypeIOFunction(List *procname, bool isOutput);
-
-
-/*
- * DefineType
- * Registers a new type.
- */
-void
-DefineType(List *names, List *parameters)
-{
- char *typeName;
- Oid typeNamespace;
- AclResult aclresult;
- int16 internalLength = -1; /* int2 */
- int16 externalLength = -1; /* int2 */
- Oid elemType = InvalidOid;
- List *inputName = NIL;
- List *outputName = NIL;
- List *sendName = NIL;
- List *receiveName = NIL;
- char *defaultValue = NULL;
- bool byValue = false;
- char delimiter = DEFAULT_TYPDELIM;
- char alignment = 'i'; /* default alignment */
- char storage = 'p'; /* default TOAST storage method */
- Oid inputOid;
- Oid outputOid;
- Oid sendOid;
- Oid receiveOid;
- char *shadow_type;
- List *pl;
- Oid typoid;
-
- /* Convert list of names to a name and namespace */
- typeNamespace = QualifiedNameGetCreationNamespace(names, &typeName);
-
- /* Check we have creation rights in target namespace */
- aclresult = pg_namespace_aclcheck(typeNamespace, GetUserId(), ACL_CREATE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, get_namespace_name(typeNamespace));
-
- /*
- * Type names must be one character shorter than other names, allowing
- * room to create the corresponding array type name with prepended
- * "_".
- */
- if (strlen(typeName) > (NAMEDATALEN - 2))
- elog(ERROR, "DefineType: type names must be %d characters or less",
- NAMEDATALEN - 2);
-
- foreach(pl, parameters)
- {
- DefElem *defel = (DefElem *) lfirst(pl);
-
- if (strcasecmp(defel->defname, "internallength") == 0)
- internalLength = defGetTypeLength(defel);
- else if (strcasecmp(defel->defname, "externallength") == 0)
- externalLength = defGetTypeLength(defel);
- else if (strcasecmp(defel->defname, "input") == 0)
- inputName = defGetQualifiedName(defel);
- else if (strcasecmp(defel->defname, "output") == 0)
- outputName = defGetQualifiedName(defel);
- else if (strcasecmp(defel->defname, "send") == 0)
- sendName = defGetQualifiedName(defel);
- else if (strcasecmp(defel->defname, "receive") == 0)
- receiveName = defGetQualifiedName(defel);
- else if (strcasecmp(defel->defname, "delimiter") == 0)
- {
- char *p = defGetString(defel);
-
- delimiter = p[0];
- }
- else if (strcasecmp(defel->defname, "element") == 0)
- elemType = typenameTypeId(defGetTypeName(defel));
- else if (strcasecmp(defel->defname, "default") == 0)
- defaultValue = defGetString(defel);
- else if (strcasecmp(defel->defname, "passedbyvalue") == 0)
- byValue = true;
- else if (strcasecmp(defel->defname, "alignment") == 0)
- {
- char *a = defGetString(defel);
-
- /*
- * Note: if argument was an unquoted identifier, parser will
- * have applied translations to it, so be prepared to
- * recognize translated type names as well as the nominal
- * form.
- */
- if (strcasecmp(a, "double") == 0 ||
- strcasecmp(a, "float8") == 0 ||
- strcasecmp(a, "pg_catalog.float8") == 0)
- alignment = 'd';
- else if (strcasecmp(a, "int4") == 0 ||
- strcasecmp(a, "pg_catalog.int4") == 0)
- alignment = 'i';
- else if (strcasecmp(a, "int2") == 0 ||
- strcasecmp(a, "pg_catalog.int2") == 0)
- alignment = 's';
- else if (strcasecmp(a, "char") == 0 ||
- strcasecmp(a, "pg_catalog.bpchar") == 0)
- alignment = 'c';
- else
- elog(ERROR, "DefineType: \"%s\" alignment not recognized",
- a);
- }
- else if (strcasecmp(defel->defname, "storage") == 0)
- {
- char *a = defGetString(defel);
-
- if (strcasecmp(a, "plain") == 0)
- storage = 'p';
- else if (strcasecmp(a, "external") == 0)
- storage = 'e';
- else if (strcasecmp(a, "extended") == 0)
- storage = 'x';
- else if (strcasecmp(a, "main") == 0)
- storage = 'm';
- else
- elog(ERROR, "DefineType: \"%s\" storage not recognized",
- a);
- }
- else
- {
- elog(WARNING, "DefineType: attribute \"%s\" not recognized",
- defel->defname);
- }
- }
-
- /*
- * make sure we have our required definitions
- */
- if (inputName == NIL)
- elog(ERROR, "Define: \"input\" unspecified");
- if (outputName == NIL)
- elog(ERROR, "Define: \"output\" unspecified");
-
- /* Convert I/O proc names to OIDs */
- inputOid = findTypeIOFunction(inputName, false);
- outputOid = findTypeIOFunction(outputName, true);
- if (sendName)
- sendOid = findTypeIOFunction(sendName, true);
- else
- sendOid = outputOid;
- if (receiveName)
- receiveOid = findTypeIOFunction(receiveName, false);
- else
- receiveOid = inputOid;
-
- /*
- * now have TypeCreate do all the real work.
- */
- typoid =
- TypeCreate(typeName, /* type name */
- typeNamespace, /* namespace */
- InvalidOid, /* preassigned type oid (not done here) */
- InvalidOid, /* relation oid (n/a here) */
- internalLength, /* internal size */
- externalLength, /* external size */
- 'b', /* type-type (base type) */
- delimiter, /* array element delimiter */
- inputOid, /* input procedure */
- outputOid, /* output procedure */
- receiveOid, /* receive procedure */
- sendOid, /* send procedure */
- elemType, /* element type ID */
- InvalidOid, /* base type ID (only for domains) */
- defaultValue, /* default type value */
- NULL, /* no binary form available */
- byValue, /* passed by value */
- alignment, /* required alignment */
- storage, /* TOAST strategy */
- -1, /* typMod (Domains only) */
- 0, /* Array Dimensions of typbasetype */
- false); /* Type NOT NULL */
-
- /*
- * When we create a base type (as opposed to a complex type) we need
- * to have an array entry for it in pg_type as well.
- */
- shadow_type = makeArrayTypeName(typeName);
-
- /* alignment must be 'i' or 'd' for arrays */
- alignment = (alignment == 'd') ? 'd' : 'i';
-
- TypeCreate(shadow_type, /* type name */
- typeNamespace, /* namespace */
- InvalidOid, /* preassigned type oid (not done here) */
- InvalidOid, /* relation oid (n/a here) */
- -1, /* internal size */
- -1, /* external size */
- 'b', /* type-type (base type) */
- DEFAULT_TYPDELIM, /* array element delimiter */
- F_ARRAY_IN, /* input procedure */
- F_ARRAY_OUT, /* output procedure */
- F_ARRAY_IN, /* receive procedure */
- F_ARRAY_OUT, /* send procedure */
- typoid, /* element type ID */
- InvalidOid, /* base type ID */
- NULL, /* never a default type value */
- NULL, /* binary default isn't sent either */
- false, /* never passed by value */
- alignment, /* see above */
- 'x', /* ARRAY is always toastable */
- -1, /* typMod (Domains only) */
- 0, /* Array dimensions of typbasetype */
- false); /* Type NOT NULL */
-
- pfree(shadow_type);
-}
-
-
-/*
- * RemoveType
- * Removes a datatype.
- *
- * NOTE: since this tries to remove the associated array type too, it'll
- * only work on scalar types.
- */
-void
-RemoveType(List *names)
-{
- TypeName *typename;
- Relation relation;
- Oid typeoid;
- HeapTuple tup;
-
- /* Make a TypeName so we can use standard type lookup machinery */
- typename = makeNode(TypeName);
- typename->names = names;
- typename->typmod = -1;
- typename->arrayBounds = NIL;
-
- relation = heap_openr(TypeRelationName, RowExclusiveLock);
-
- /* Use LookupTypeName here so that shell types can be removed. */
- typeoid = LookupTypeName(typename);
- if (!OidIsValid(typeoid))
- elog(ERROR, "Type \"%s\" does not exist",
- TypeNameToString(typename));
-
- tup = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(typeoid),
- 0, 0, 0);
- if (!HeapTupleIsValid(tup))
- elog(ERROR, "Type \"%s\" does not exist",
- TypeNameToString(typename));
-
- /* Permission check: must own type or its namespace */
- if (!pg_type_ownercheck(typeoid, GetUserId()) &&
- !pg_namespace_ownercheck(((Form_pg_type) GETSTRUCT(tup))->typnamespace,
- GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, TypeNameToString(typename));
-
- /* Delete any comments associated with this type */
- DeleteComments(typeoid, RelationGetRelid(relation));
-
- /* Remove the type tuple from pg_type */
- simple_heap_delete(relation, &tup->t_self);
-
- ReleaseSysCache(tup);
-
- /* Now, delete the "array of" that type */
- typename->arrayBounds = makeList1(makeInteger(1));
-
- typeoid = LookupTypeName(typename);
- if (!OidIsValid(typeoid))
- elog(ERROR, "Type \"%s\" does not exist",
- TypeNameToString(typename));
-
- tup = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(typeoid),
- 0, 0, 0);
- if (!HeapTupleIsValid(tup))
- elog(ERROR, "Type \"%s\" does not exist",
- TypeNameToString(typename));
-
- DeleteComments(typeoid, RelationGetRelid(relation));
-
- simple_heap_delete(relation, &tup->t_self);
-
- ReleaseSysCache(tup);
-
- heap_close(relation, RowExclusiveLock);
-}
-
-
-/*
- * DefineDomain
- * Registers a new domain.
- */
-void
-DefineDomain(CreateDomainStmt *stmt)
-{
- char *domainName;
- Oid domainNamespace;
- AclResult aclresult;
- int16 internalLength;
- int16 externalLength;
- Oid inputProcedure;
- Oid outputProcedure;
- Oid receiveProcedure;
- Oid sendProcedure;
- bool byValue;
- char delimiter;
- char alignment;
- char storage;
- char typtype;
- Datum datum;
- bool isnull;
- char *defaultValue = NULL;
- char *defaultValueBin = NULL;
- bool typNotNull = false;
- Oid basetypelem;
- int32 typNDims = length(stmt->typename->arrayBounds);
- HeapTuple typeTup;
- List *schema = stmt->constraints;
- List *listptr;
-
- /* Convert list of names to a name and namespace */
- domainNamespace = QualifiedNameGetCreationNamespace(stmt->domainname,
- &domainName);
-
- /* Check we have creation rights in target namespace */
- aclresult = pg_namespace_aclcheck(domainNamespace, GetUserId(),
- ACL_CREATE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, get_namespace_name(domainNamespace));
-
- /*
- * Domainnames, unlike typenames don't need to account for the '_'
- * prefix. So they can be one character longer.
- */
- if (strlen(domainName) > (NAMEDATALEN - 1))
- elog(ERROR, "CREATE DOMAIN: domain names must be %d characters or less",
- NAMEDATALEN - 1);
-
- /*
- * Look up the base type.
- */
- typeTup = typenameType(stmt->typename);
-
- /*
- * What we really don't want is domains of domains. This could cause all sorts
- * of neat issues if we allow that.
- *
- * With testing, we may determine complex types should be allowed
- */
- typtype = ((Form_pg_type) GETSTRUCT(typeTup))->typtype;
- if (typtype != 'b')
- elog(ERROR, "DefineDomain: %s is not a basetype",
- TypeNameToString(stmt->typename));
-
- /* passed by value */
- byValue = ((Form_pg_type) GETSTRUCT(typeTup))->typbyval;
-
- /* Required Alignment */
- alignment = ((Form_pg_type) GETSTRUCT(typeTup))->typalign;
-
- /* TOAST Strategy */
- storage = ((Form_pg_type) GETSTRUCT(typeTup))->typstorage;
-
- /* Storage Length */
- internalLength = ((Form_pg_type) GETSTRUCT(typeTup))->typlen;
-
- /* External Length (unused) */
- externalLength = ((Form_pg_type) GETSTRUCT(typeTup))->typprtlen;
-
- /* Array element Delimiter */
- delimiter = ((Form_pg_type) GETSTRUCT(typeTup))->typdelim;
-
- /* I/O Functions */
- inputProcedure = ((Form_pg_type) GETSTRUCT(typeTup))->typinput;
- outputProcedure = ((Form_pg_type) GETSTRUCT(typeTup))->typoutput;
- receiveProcedure = ((Form_pg_type) GETSTRUCT(typeTup))->typreceive;
- sendProcedure = ((Form_pg_type) GETSTRUCT(typeTup))->typsend;
-
- /* Inherited default value */
- datum = SysCacheGetAttr(TYPEOID, typeTup,
- Anum_pg_type_typdefault, &isnull);
- if (!isnull)
- defaultValue = DatumGetCString(DirectFunctionCall1(textout, datum));
-
- /* Inherited default binary value */
- datum = SysCacheGetAttr(TYPEOID, typeTup,
- Anum_pg_type_typdefaultbin, &isnull);
- if (!isnull)
- defaultValueBin = DatumGetCString(DirectFunctionCall1(textout, datum));
-
- /*
- * Pull out the typelem name of the parent OID.
- *
- * This is what enables us to make a domain of an array
- */
- basetypelem = ((Form_pg_type) GETSTRUCT(typeTup))->typelem;
-
- /*
- * Run through constraints manually to avoid the additional
- * processing conducted by DefineRelation() and friends.
- *
- * Besides, we don't want any constraints to be cooked. We'll
- * do that when the table is created via MergeDomainAttributes().
- */
- foreach(listptr, schema)
- {
- Constraint *colDef = lfirst(listptr);
- bool nullDefined = false;
- Node *expr;
- ParseState *pstate;
-
- switch (colDef->contype)
- {
- /*
- * The inherited default value may be overridden by the user
- * with the DEFAULT <expr> statement.
- *
- * We have to search the entire constraint tree returned as we
- * don't want to cook or fiddle too much.
- */
- case CONSTR_DEFAULT:
- /* Create a dummy ParseState for transformExpr */
- pstate = make_parsestate(NULL);
- /*
- * Cook the colDef->raw_expr into an expression.
- * Note: Name is strictly for error message
- */
- expr = cookDefault(pstate, colDef->raw_expr,
- typeTup->t_data->t_oid,
- stmt->typename->typmod,
- domainName);
- /*
- * Expression must be stored as a nodeToString result,
- * but we also require a valid textual representation
- * (mainly to make life easier for pg_dump).
- */
- defaultValue = deparse_expression(expr,
- deparse_context_for(domainName,
- InvalidOid),
- false);
- defaultValueBin = nodeToString(expr);
- break;
-
- /*
- * Find the NULL constraint.
- */
- case CONSTR_NOTNULL:
- if (nullDefined) {
- elog(ERROR, "CREATE DOMAIN has conflicting NULL / NOT NULL constraint");
- } else {
- typNotNull = true;
- nullDefined = true;
- }
- break;
-
- case CONSTR_NULL:
- if (nullDefined) {
- elog(ERROR, "CREATE DOMAIN has conflicting NULL / NOT NULL constraint");
- } else {
- typNotNull = false;
- nullDefined = true;
- }
- break;
-
- case CONSTR_UNIQUE:
- elog(ERROR, "CREATE DOMAIN / UNIQUE indexes not supported");
- break;
-
- case CONSTR_PRIMARY:
- elog(ERROR, "CREATE DOMAIN / PRIMARY KEY indexes not supported");
- break;
-
- case CONSTR_CHECK:
- elog(ERROR, "DefineDomain: CHECK Constraints not supported");
- break;
-
- case CONSTR_ATTR_DEFERRABLE:
- case CONSTR_ATTR_NOT_DEFERRABLE:
- case CONSTR_ATTR_DEFERRED:
- case CONSTR_ATTR_IMMEDIATE:
- elog(ERROR, "DefineDomain: DEFERRABLE, NON DEFERRABLE, DEFERRED and IMMEDIATE not supported");
- break;
-
- default:
- elog(ERROR, "DefineDomain: unrecognized constraint node type");
- break;
- }
- }
-
- /*
- * Have TypeCreate do all the real work.
- */
- TypeCreate(domainName, /* type name */
- domainNamespace, /* namespace */
- InvalidOid, /* preassigned type oid (not done here) */
- InvalidOid, /* relation oid (n/a here) */
- internalLength, /* internal size */
- externalLength, /* external size */
- 'd', /* type-type (domain type) */
- delimiter, /* array element delimiter */
- inputProcedure, /* input procedure */
- outputProcedure, /* output procedure */
- receiveProcedure, /* receive procedure */
- sendProcedure, /* send procedure */
- basetypelem, /* element type ID */
- typeTup->t_data->t_oid, /* base type ID */
- defaultValue, /* default type value (text) */
- defaultValueBin, /* default type value (binary) */
- byValue, /* passed by value */
- alignment, /* required alignment */
- storage, /* TOAST strategy */
- stmt->typename->typmod, /* typeMod value */
- typNDims, /* Array dimensions for base type */
- typNotNull); /* Type NOT NULL */
-
- /*
- * Now we can clean up.
- */
- ReleaseSysCache(typeTup);
-}
-
-
-/*
- * RemoveDomain
- * Removes a domain.
- */
-void
-RemoveDomain(List *names, int behavior)
-{
- TypeName *typename;
- Relation relation;
- Oid typeoid;
- HeapTuple tup;
- char typtype;
-
- /* CASCADE unsupported */
- if (behavior == CASCADE)
- elog(ERROR, "DROP DOMAIN does not support the CASCADE keyword");
-
- /* Make a TypeName so we can use standard type lookup machinery */
- typename = makeNode(TypeName);
- typename->names = names;
- typename->typmod = -1;
- typename->arrayBounds = NIL;
-
- relation = heap_openr(TypeRelationName, RowExclusiveLock);
-
- typeoid = typenameTypeId(typename);
-
- tup = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(typeoid),
- 0, 0, 0);
- if (!HeapTupleIsValid(tup))
- elog(ERROR, "RemoveDomain: type '%s' does not exist",
- TypeNameToString(typename));
-
- /* Permission check: must own type or its namespace */
- if (!pg_type_ownercheck(typeoid, GetUserId()) &&
- !pg_namespace_ownercheck(((Form_pg_type) GETSTRUCT(tup))->typnamespace,
- GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, TypeNameToString(typename));
-
- /* Check that this is actually a domain */
- typtype = ((Form_pg_type) GETSTRUCT(tup))->typtype;
-
- if (typtype != 'd')
- elog(ERROR, "%s is not a domain",
- TypeNameToString(typename));
-
- /* Delete any comments associated with this type */
- DeleteComments(typeoid, RelationGetRelid(relation));
-
- /* Remove the type tuple from pg_type */
- simple_heap_delete(relation, &tup->t_self);
-
- ReleaseSysCache(tup);
-
- /* At present, domains don't have associated array types */
-
- heap_close(relation, RowExclusiveLock);
-}
-
-
-/*
- * Find a suitable I/O function for a type.
- */
-static Oid
-findTypeIOFunction(List *procname, bool isOutput)
-{
- Oid argList[FUNC_MAX_ARGS];
- int nargs;
- Oid procOid;
-
- /*
- * First look for a 1-argument func with all argtypes 0. This is
- * valid for all kinds of procedure.
- */
- MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
-
- procOid = LookupFuncName(procname, 1, argList);
-
- if (!OidIsValid(procOid))
- {
- /*
- * Alternatively, input procedures may take 3 args (data
- * value, element OID, atttypmod); the pg_proc argtype
- * signature is 0,OIDOID,INT4OID. Output procedures may
- * take 2 args (data value, element OID).
- */
- if (isOutput)
- {
- /* output proc */
- nargs = 2;
- argList[1] = OIDOID;
- }
- else
- {
- /* input proc */
- nargs = 3;
- argList[1] = OIDOID;
- argList[2] = INT4OID;
- }
- procOid = LookupFuncName(procname, nargs, argList);
-
- if (!OidIsValid(procOid))
- func_error("TypeCreate", procname, 1, argList, NULL);
- }
-
- return procOid;
-}
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
deleted file mode 100644
index fef3049a958..00000000000
--- a/src/backend/commands/user.c
+++ /dev/null
@@ -1,1525 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * user.c
- * Commands for manipulating users and groups.
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.105 2002/06/20 20:29:27 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <errno.h>
-#include <unistd.h>
-
-#include "access/heapam.h"
-#include "catalog/catname.h"
-#include "catalog/pg_database.h"
-#include "catalog/pg_shadow.h"
-#include "catalog/pg_group.h"
-#include "catalog/indexing.h"
-#include "commands/user.h"
-#include "libpq/crypt.h"
-#include "miscadmin.h"
-#include "storage/pmsignal.h"
-#include "utils/acl.h"
-#include "utils/array.h"
-#include "utils/builtins.h"
-#include "utils/fmgroids.h"
-#include "utils/guc.h"
-#include "utils/lsyscache.h"
-#include "utils/syscache.h"
-
-
-extern bool Password_encryption;
-
-static void CheckPgUserAclNotNull(void);
-static void UpdateGroupMembership(Relation group_rel, HeapTuple group_tuple,
- List *members);
-static IdList *IdListToArray(List *members);
-static List *IdArrayToList(IdList *oldarray);
-
-
-/*
- * fputs_quote
- *
- * Outputs string in quotes, with double-quotes duplicated.
- * We could use quote_ident(), but that expects varlena.
- */
-static void fputs_quote(char *str, FILE *fp)
-{
- fputc('"', fp);
- while (*str)
- {
- fputc(*str, fp);
- if (*str == '"')
- fputc('"', fp);
- str++;
- }
- fputc('"', fp);
-}
-
-
-
-/*
- * group_getfilename --- get full pathname of group file
- *
- * Note that result string is palloc'd, and should be freed by the caller.
- */
-char *
-group_getfilename(void)
-{
- int bufsize;
- char *pfnam;
-
- bufsize = strlen(DataDir) + strlen("/global/") +
- strlen(USER_GROUP_FILE) + 1;
- pfnam = (char *) palloc(bufsize);
- snprintf(pfnam, bufsize, "%s/global/%s", DataDir, USER_GROUP_FILE);
-
- return pfnam;
-}
-
-
-
-/*
- * Get full pathname of password file.
- * Note that result string is palloc'd, and should be freed by the caller.
- */
-char *
-user_getfilename(void)
-{
- int bufsize;
- char *pfnam;
-
- bufsize = strlen(DataDir) + strlen("/global/") +
- strlen(PWD_FILE) + 1;
- pfnam = (char *) palloc(bufsize);
- snprintf(pfnam, bufsize, "%s/global/%s", DataDir, PWD_FILE);
-
- return pfnam;
-}
-
-
-
-/*
- * write_group_file for trigger update_pg_pwd_and_pg_group
- */
-static void
-write_group_file(Relation urel, Relation grel)
-{
- char *filename,
- *tempname;
- int bufsize;
- FILE *fp;
- mode_t oumask;
- HeapScanDesc scan;
- HeapTuple tuple;
- TupleDesc dsc = RelationGetDescr(grel);
-
- /*
- * Create a temporary filename to be renamed later. This prevents the
- * backend from clobbering the pg_group file while the postmaster might
- * be reading from it.
- */
- filename = group_getfilename();
- bufsize = strlen(filename) + 12;
- tempname = (char *) palloc(bufsize);
-
- snprintf(tempname, bufsize, "%s.%d", filename, MyProcPid);
- oumask = umask((mode_t) 077);
- fp = AllocateFile(tempname, "w");
- umask(oumask);
- if (fp == NULL)
- elog(ERROR, "write_group_file: unable to write %s: %m", tempname);
-
- /* read table */
- scan = heap_beginscan(grel, SnapshotSelf, 0, NULL);
- while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- Datum datum, grolist_datum;
- bool isnull;
- char *groname;
- IdList *grolist_p;
- AclId *aidp;
- int i, j,
- num;
- char *usename;
- bool first_user = true;
-
- datum = heap_getattr(tuple, Anum_pg_group_groname, dsc, &isnull);
- /* ignore NULL groupnames --- shouldn't happen */
- if (isnull)
- continue;
- groname = NameStr(*DatumGetName(datum));
-
- /*
- * Check for illegal characters in the group name.
- */
- i = strcspn(groname, "\n");
- if (groname[i] != '\0')
- {
- elog(LOG, "Invalid group name '%s'", groname);
- continue;
- }
-
- grolist_datum = heap_getattr(tuple, Anum_pg_group_grolist, dsc, &isnull);
- /* Ignore NULL group lists */
- if (isnull)
- continue;
-
- /* be sure the IdList is not toasted */
- grolist_p = DatumGetIdListP(grolist_datum);
-
- /* scan grolist */
- num = IDLIST_NUM(grolist_p);
- aidp = IDLIST_DAT(grolist_p);
- for (i = 0; i < num; ++i)
- {
- tuple = SearchSysCache(SHADOWSYSID,
- PointerGetDatum(aidp[i]),
- 0, 0, 0);
- if (HeapTupleIsValid(tuple))
- {
- usename = NameStr(((Form_pg_shadow) GETSTRUCT(tuple))->usename);
-
- /*
- * Check for illegal characters in the user name.
- */
- j = strcspn(usename, "\n");
- if (usename[j] != '\0')
- {
- elog(LOG, "Invalid user name '%s'", usename);
- continue;
- }
-
- /* File format is:
- * "dbname" "user1" "user2" "user3"
- */
- if (first_user)
- {
- fputs_quote(groname, fp);
- fputs("\t", fp);
- }
- else
- fputs(" ", fp);
-
- first_user = false;
- fputs_quote(usename, fp);
-
- ReleaseSysCache(tuple);
- }
- }
- if (!first_user)
- fputs("\n", fp);
- /* if IdList was toasted, free detoasted copy */
- if ((Pointer) grolist_p != DatumGetPointer(grolist_datum))
- pfree(grolist_p);
- }
- heap_endscan(scan);
-
- fflush(fp);
- if (ferror(fp))
- elog(ERROR, "%s: %m", tempname);
- FreeFile(fp);
-
- /*
- * Rename the temp file to its final name, deleting the old pg_pwd. We
- * expect that rename(2) is an atomic action.
- */
- if (rename(tempname, filename))
- elog(ERROR, "rename %s to %s: %m", tempname, filename);
-
- pfree((void *) tempname);
- pfree((void *) filename);
-}
-
-
-
-/*
- * write_password_file for trigger update_pg_pwd_and_pg_group
- *
- * copy the modified contents of pg_shadow to a file used by the postmaster
- * for user authentication. The file is stored as $PGDATA/global/pg_pwd.
- *
- * This function set is both a trigger function for direct updates to pg_shadow
- * as well as being called directly from create/alter/drop user.
- *
- * We raise an error to force transaction rollback if we detect an illegal
- * username or password --- illegal being defined as values that would
- * mess up the pg_pwd parser.
- */
-static void
-write_user_file(Relation urel)
-{
- char *filename,
- *tempname;
- int bufsize;
- FILE *fp;
- mode_t oumask;
- HeapScanDesc scan;
- HeapTuple tuple;
- TupleDesc dsc = RelationGetDescr(urel);
-
- /*
- * Create a temporary filename to be renamed later. This prevents the
- * backend from clobbering the pg_pwd file while the postmaster might
- * be reading from it.
- */
- filename = user_getfilename();
- bufsize = strlen(filename) + 12;
- tempname = (char *) palloc(bufsize);
-
- snprintf(tempname, bufsize, "%s.%d", filename, MyProcPid);
- oumask = umask((mode_t) 077);
- fp = AllocateFile(tempname, "w");
- umask(oumask);
- if (fp == NULL)
- elog(ERROR, "write_password_file: unable to write %s: %m", tempname);
-
- /* read table */
- scan = heap_beginscan(urel, SnapshotSelf, 0, NULL);
- while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- Datum datum;
- bool isnull;
- char *usename,
- *passwd,
- *valuntil;
- int i;
-
- datum = heap_getattr(tuple, Anum_pg_shadow_usename, dsc, &isnull);
- /* ignore NULL usernames (shouldn't happen) */
- if (isnull)
- continue;
- usename = NameStr(*DatumGetName(datum));
-
- datum = heap_getattr(tuple, Anum_pg_shadow_passwd, dsc, &isnull);
-
- /*
- * It can be argued that people having a null password shouldn't
- * be allowed to connect under password authentication, because
- * they need to have a password set up first. If you think
- * assuming an empty password in that case is better, change this
- * logic to look something like the code for valuntil.
- */
- if (isnull)
- continue;
-
- passwd = DatumGetCString(DirectFunctionCall1(textout, datum));
-
- datum = heap_getattr(tuple, Anum_pg_shadow_valuntil, dsc, &isnull);
- if (isnull)
- valuntil = pstrdup("");
- else
- valuntil = DatumGetCString(DirectFunctionCall1(nabstimeout, datum));
-
- /*
- * Check for illegal characters in the username and password.
- */
- i = strcspn(usename, "\n");
- if (usename[i] != '\0')
- elog(ERROR, "Invalid user name '%s'", usename);
- i = strcspn(passwd, "\n");
- if (passwd[i] != '\0')
- elog(ERROR, "Invalid user password '%s'", passwd);
-
- /*
- * The extra columns we emit here are not really necessary. To
- * remove them, the parser in backend/libpq/crypt.c would need to
- * be adjusted.
- */
- fputs_quote(usename, fp);
- fputs(" ", fp);
- fputs_quote(passwd, fp);
- fputs(" ", fp);
- fputs_quote(valuntil, fp);
- fputs("\n", fp);
-
- pfree(passwd);
- pfree(valuntil);
- }
- heap_endscan(scan);
-
- fflush(fp);
- if (ferror(fp))
- elog(ERROR, "%s: %m", tempname);
- FreeFile(fp);
-
- /*
- * Rename the temp file to its final name, deleting the old pg_pwd. We
- * expect that rename(2) is an atomic action.
- */
- if (rename(tempname, filename))
- elog(ERROR, "rename %s to %s: %m", tempname, filename);
-
- pfree((void *) tempname);
- pfree((void *) filename);
-}
-
-
-
-/* This is the wrapper for triggers. */
-Datum
-update_pg_pwd_and_pg_group(PG_FUNCTION_ARGS)
-{
- /*
- * ExclusiveLock ensures no one modifies pg_shadow while we read it,
- * and that only one backend rewrites the flat file at a time. It's
- * OK to allow normal reads of pg_shadow in parallel, however.
- */
- Relation urel = heap_openr(ShadowRelationName, ExclusiveLock);
- Relation grel = heap_openr(GroupRelationName, ExclusiveLock);
-
- write_user_file(urel);
- write_group_file(urel, grel);
- /* OK to release lock, since we did not modify the relation */
- heap_close(grel, ExclusiveLock);
- heap_close(urel, ExclusiveLock);
-
- /*
- * Signal the postmaster to reload its password & group-file cache.
- */
- SendPostmasterSignal(PMSIGNAL_PASSWORD_CHANGE);
-
- return PointerGetDatum(NULL);
-}
-
-
-
-/*
- * CREATE USER
- */
-void
-CreateUser(CreateUserStmt *stmt)
-{
- Relation pg_shadow_rel;
- TupleDesc pg_shadow_dsc;
- HeapScanDesc scan;
- HeapTuple tuple;
- Datum new_record[Natts_pg_shadow];
- char new_record_nulls[Natts_pg_shadow];
- bool user_exists = false,
- sysid_exists = false,
- havesysid = false;
- int max_id;
- List *item,
- *option;
- char *password = NULL; /* PostgreSQL user password */
- bool encrypt_password = Password_encryption; /* encrypt password? */
- char encrypted_password[MD5_PASSWD_LEN + 1];
- int sysid = 0; /* PgSQL system id (valid if havesysid) */
- bool createdb = false; /* Can the user create databases? */
- bool createuser = false; /* Can this user create users? */
- List *groupElts = NIL; /* The groups the user is a member of */
- char *validUntil = NULL; /* The time the login is valid
- * until */
- DefElem *dpassword = NULL;
- DefElem *dsysid = NULL;
- DefElem *dcreatedb = NULL;
- DefElem *dcreateuser = NULL;
- DefElem *dgroupElts = NULL;
- DefElem *dvalidUntil = NULL;
-
- /* Extract options from the statement node tree */
- foreach(option, stmt->options)
- {
- DefElem *defel = (DefElem *) lfirst(option);
-
- if (strcmp(defel->defname, "password") == 0 ||
- strcmp(defel->defname, "encryptedPassword") == 0 ||
- strcmp(defel->defname, "unencryptedPassword") == 0)
- {
- if (dpassword)
- elog(ERROR, "CREATE USER: conflicting options");
- dpassword = defel;
- if (strcmp(defel->defname, "encryptedPassword") == 0)
- encrypt_password = true;
- else if (strcmp(defel->defname, "unencryptedPassword") == 0)
- encrypt_password = false;
- }
- else if (strcmp(defel->defname, "sysid") == 0)
- {
- if (dsysid)
- elog(ERROR, "CREATE USER: conflicting options");
- dsysid = defel;
- }
- else if (strcmp(defel->defname, "createdb") == 0)
- {
- if (dcreatedb)
- elog(ERROR, "CREATE USER: conflicting options");
- dcreatedb = defel;
- }
- else if (strcmp(defel->defname, "createuser") == 0)
- {
- if (dcreateuser)
- elog(ERROR, "CREATE USER: conflicting options");
- dcreateuser = defel;
- }
- else if (strcmp(defel->defname, "groupElts") == 0)
- {
- if (dgroupElts)
- elog(ERROR, "CREATE USER: conflicting options");
- dgroupElts = defel;
- }
- else if (strcmp(defel->defname, "validUntil") == 0)
- {
- if (dvalidUntil)
- elog(ERROR, "CREATE USER: conflicting options");
- dvalidUntil = defel;
- }
- else
- elog(ERROR, "CREATE USER: option \"%s\" not recognized",
- defel->defname);
- }
-
- if (dcreatedb)
- createdb = intVal(dcreatedb->arg) != 0;
- if (dcreateuser)
- createuser = intVal(dcreateuser->arg) != 0;
- if (dsysid)
- {
- sysid = intVal(dsysid->arg);
- if (sysid <= 0)
- elog(ERROR, "user id must be positive");
- havesysid = true;
- }
- if (dvalidUntil)
- validUntil = strVal(dvalidUntil->arg);
- if (dpassword)
- password = strVal(dpassword->arg);
- if (dgroupElts)
- groupElts = (List *) dgroupElts->arg;
-
- /* Check some permissions first */
- if (password)
- CheckPgUserAclNotNull();
-
- if (!superuser())
- elog(ERROR, "CREATE USER: permission denied");
-
- if (strcmp(stmt->user, "public") == 0)
- elog(ERROR, "CREATE USER: user name \"%s\" is reserved",
- stmt->user);
-
- /*
- * Scan the pg_shadow relation to be certain the user or id doesn't
- * already exist. Note we secure exclusive lock, because we also need
- * to be sure of what the next usesysid should be, and we need to
- * protect our update of the flat password file.
- */
- pg_shadow_rel = heap_openr(ShadowRelationName, ExclusiveLock);
- pg_shadow_dsc = RelationGetDescr(pg_shadow_rel);
-
- scan = heap_beginscan(pg_shadow_rel, SnapshotNow, 0, NULL);
- max_id = 99; /* start auto-assigned ids at 100 */
- while (!user_exists && !sysid_exists &&
- (tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- Form_pg_shadow shadow_form = (Form_pg_shadow) GETSTRUCT(tuple);
- int32 this_sysid;
-
- user_exists = (strcmp(NameStr(shadow_form->usename), stmt->user) == 0);
-
- this_sysid = shadow_form->usesysid;
- if (havesysid) /* customized id wanted */
- sysid_exists = (this_sysid == sysid);
- else
- {
- /* pick 1 + max */
- if (this_sysid > max_id)
- max_id = this_sysid;
- }
- }
- heap_endscan(scan);
-
- if (user_exists)
- elog(ERROR, "CREATE USER: user name \"%s\" already exists",
- stmt->user);
- if (sysid_exists)
- elog(ERROR, "CREATE USER: sysid %d is already assigned", sysid);
-
- /* If no sysid given, use max existing id + 1 */
- if (!havesysid)
- sysid = max_id + 1;
-
- /*
- * Build a tuple to insert
- */
- MemSet(new_record, 0, sizeof(new_record));
- MemSet(new_record_nulls, ' ', sizeof(new_record_nulls));
-
- new_record[Anum_pg_shadow_usename - 1] =
- DirectFunctionCall1(namein, CStringGetDatum(stmt->user));
- new_record[Anum_pg_shadow_usesysid - 1] = Int32GetDatum(sysid);
- AssertState(BoolIsValid(createdb));
- new_record[Anum_pg_shadow_usecreatedb - 1] = BoolGetDatum(createdb);
- new_record[Anum_pg_shadow_usetrace - 1] = BoolGetDatum(false);
- AssertState(BoolIsValid(createuser));
- new_record[Anum_pg_shadow_usesuper - 1] = BoolGetDatum(createuser);
- /* superuser gets catupd right by default */
- new_record[Anum_pg_shadow_usecatupd - 1] = BoolGetDatum(createuser);
-
- if (password)
- {
- if (!encrypt_password || isMD5(password))
- new_record[Anum_pg_shadow_passwd - 1] =
- DirectFunctionCall1(textin, CStringGetDatum(password));
- else
- {
- if (!EncryptMD5(password, stmt->user, strlen(stmt->user),
- encrypted_password))
- elog(ERROR, "CREATE USER: password encryption failed");
- new_record[Anum_pg_shadow_passwd - 1] =
- DirectFunctionCall1(textin, CStringGetDatum(encrypted_password));
- }
- }
- else
- new_record_nulls[Anum_pg_shadow_passwd - 1] = 'n';
-
- if (validUntil)
- new_record[Anum_pg_shadow_valuntil - 1] =
- DirectFunctionCall1(nabstimein, CStringGetDatum(validUntil));
- else
- new_record_nulls[Anum_pg_shadow_valuntil - 1] = 'n';
-
- new_record_nulls[Anum_pg_shadow_useconfig - 1] = 'n';
-
- tuple = heap_formtuple(pg_shadow_dsc, new_record, new_record_nulls);
-
- /*
- * Insert new record in the pg_shadow table
- */
- simple_heap_insert(pg_shadow_rel, tuple);
-
- /*
- * Update indexes
- */
- if (RelationGetForm(pg_shadow_rel)->relhasindex)
- {
- Relation idescs[Num_pg_shadow_indices];
-
- CatalogOpenIndices(Num_pg_shadow_indices,
- Name_pg_shadow_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_shadow_indices, pg_shadow_rel,
- tuple);
- CatalogCloseIndices(Num_pg_shadow_indices, idescs);
- }
-
- /*
- * Add the user to the groups specified. We'll just call the below
- * AlterGroup for this.
- */
- foreach(item, groupElts)
- {
- AlterGroupStmt ags;
-
- ags.name = strVal(lfirst(item)); /* the group name to add
- * this in */
- ags.action = +1;
- ags.listUsers = makeList1(makeInteger(sysid));
- AlterGroup(&ags, "CREATE USER");
- }
-
- /*
- * Now we can clean up; but keep lock until commit.
- */
- heap_close(pg_shadow_rel, NoLock);
-
- /*
- * Write the updated pg_shadow and pg_group data to the flat file.
- */
- update_pg_pwd_and_pg_group(NULL);
-}
-
-
-
-/*
- * ALTER USER
- */
-void
-AlterUser(AlterUserStmt *stmt)
-{
- Datum new_record[Natts_pg_shadow];
- char new_record_nulls[Natts_pg_shadow];
- char new_record_repl[Natts_pg_shadow];
- Relation pg_shadow_rel;
- TupleDesc pg_shadow_dsc;
- HeapTuple tuple,
- new_tuple;
- List *option;
- char *password = NULL; /* PostgreSQL user password */
- bool encrypt_password = Password_encryption; /* encrypt password? */
- char encrypted_password[MD5_PASSWD_LEN + 1];
- int createdb = -1; /* Can the user create databases? */
- int createuser = -1; /* Can this user create users? */
- char *validUntil = NULL; /* The time the login is valid
- * until */
- DefElem *dpassword = NULL;
- DefElem *dcreatedb = NULL;
- DefElem *dcreateuser = NULL;
- DefElem *dvalidUntil = NULL;
-
- /* Extract options from the statement node tree */
- foreach(option, stmt->options)
- {
- DefElem *defel = (DefElem *) lfirst(option);
-
- if (strcmp(defel->defname, "password") == 0 ||
- strcmp(defel->defname, "encryptedPassword") == 0 ||
- strcmp(defel->defname, "unencryptedPassword") == 0)
- {
- if (dpassword)
- elog(ERROR, "ALTER USER: conflicting options");
- dpassword = defel;
- if (strcmp(defel->defname, "encryptedPassword") == 0)
- encrypt_password = true;
- else if (strcmp(defel->defname, "unencryptedPassword") == 0)
- encrypt_password = false;
- }
- else if (strcmp(defel->defname, "createdb") == 0)
- {
- if (dcreatedb)
- elog(ERROR, "ALTER USER: conflicting options");
- dcreatedb = defel;
- }
- else if (strcmp(defel->defname, "createuser") == 0)
- {
- if (dcreateuser)
- elog(ERROR, "ALTER USER: conflicting options");
- dcreateuser = defel;
- }
- else if (strcmp(defel->defname, "validUntil") == 0)
- {
- if (dvalidUntil)
- elog(ERROR, "ALTER USER: conflicting options");
- dvalidUntil = defel;
- }
- else
- elog(ERROR, "ALTER USER: option \"%s\" not recognized",
- defel->defname);
- }
-
- if (dcreatedb)
- createdb = intVal(dcreatedb->arg);
- if (dcreateuser)
- createuser = intVal(dcreateuser->arg);
- if (dvalidUntil)
- validUntil = strVal(dvalidUntil->arg);
- if (dpassword)
- password = strVal(dpassword->arg);
-
- if (password)
- CheckPgUserAclNotNull();
-
- /* must be superuser or just want to change your own password */
- if (!superuser() &&
- !(createdb < 0 &&
- createuser < 0 &&
- !validUntil &&
- password &&
- strcmp(GetUserNameFromId(GetUserId()), stmt->user) == 0))
- elog(ERROR, "ALTER USER: permission denied");
-
- /* changes to the flat password file cannot be rolled back */
- if (IsTransactionBlock() && password)
- elog(NOTICE, "ALTER USER: password changes cannot be rolled back");
-
- /*
- * Scan the pg_shadow relation to be certain the user exists. Note we
- * secure exclusive lock to protect our update of the flat password
- * file.
- */
- pg_shadow_rel = heap_openr(ShadowRelationName, ExclusiveLock);
- pg_shadow_dsc = RelationGetDescr(pg_shadow_rel);
-
- tuple = SearchSysCache(SHADOWNAME,
- PointerGetDatum(stmt->user),
- 0, 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "ALTER USER: user \"%s\" does not exist", stmt->user);
-
- /*
- * Build an updated tuple, perusing the information just obtained
- */
- MemSet(new_record, 0, sizeof(new_record));
- MemSet(new_record_nulls, ' ', sizeof(new_record_nulls));
- MemSet(new_record_repl, ' ', sizeof(new_record_repl));
-
- new_record[Anum_pg_shadow_usename - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(stmt->user));
- new_record_repl[Anum_pg_shadow_usename - 1] = 'r';
-
- /* createdb */
- if (createdb >= 0)
- {
- new_record[Anum_pg_shadow_usecreatedb - 1] = BoolGetDatum(createdb > 0);
- new_record_repl[Anum_pg_shadow_usecreatedb - 1] = 'r';
- }
-
- /*
- * createuser (superuser) and catupd
- *
- * XXX It's rather unclear how to handle catupd. It's probably best to
- * keep it equal to the superuser status, otherwise you could end up
- * with a situation where no existing superuser can alter the
- * catalogs, including pg_shadow!
- */
- if (createuser >= 0)
- {
- new_record[Anum_pg_shadow_usesuper - 1] = BoolGetDatum(createuser > 0);
- new_record_repl[Anum_pg_shadow_usesuper - 1] = 'r';
-
- new_record[Anum_pg_shadow_usecatupd - 1] = BoolGetDatum(createuser > 0);
- new_record_repl[Anum_pg_shadow_usecatupd - 1] = 'r';
- }
-
- /* password */
- if (password)
- {
- if (!encrypt_password || isMD5(password))
- new_record[Anum_pg_shadow_passwd - 1] =
- DirectFunctionCall1(textin, CStringGetDatum(password));
- else
- {
- if (!EncryptMD5(password, stmt->user, strlen(stmt->user),
- encrypted_password))
- elog(ERROR, "CREATE USER: password encryption failed");
- new_record[Anum_pg_shadow_passwd - 1] =
- DirectFunctionCall1(textin, CStringGetDatum(encrypted_password));
- }
- new_record_repl[Anum_pg_shadow_passwd - 1] = 'r';
- }
-
- /* valid until */
- if (validUntil)
- {
- new_record[Anum_pg_shadow_valuntil - 1] =
- DirectFunctionCall1(nabstimein, CStringGetDatum(validUntil));
- new_record_repl[Anum_pg_shadow_valuntil - 1] = 'r';
- }
-
- new_tuple = heap_modifytuple(tuple, pg_shadow_rel, new_record,
- new_record_nulls, new_record_repl);
- simple_heap_update(pg_shadow_rel, &tuple->t_self, new_tuple);
-
- /* Update indexes */
- if (RelationGetForm(pg_shadow_rel)->relhasindex)
- {
- Relation idescs[Num_pg_shadow_indices];
-
- CatalogOpenIndices(Num_pg_shadow_indices,
- Name_pg_shadow_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_shadow_indices, pg_shadow_rel,
- new_tuple);
- CatalogCloseIndices(Num_pg_shadow_indices, idescs);
- }
-
- ReleaseSysCache(tuple);
- heap_freetuple(new_tuple);
-
- /*
- * Now we can clean up.
- */
- heap_close(pg_shadow_rel, NoLock);
-
- /*
- * Write the updated pg_shadow and pg_group data to the flat file.
- */
- update_pg_pwd_and_pg_group(NULL);
-}
-
-
-/*
- * ALTER USER ... SET
- */
-void
-AlterUserSet(AlterUserSetStmt *stmt)
-{
- char *valuestr;
- HeapTuple oldtuple,
- newtuple;
- Relation rel;
- Datum repl_val[Natts_pg_shadow];
- char repl_null[Natts_pg_shadow];
- char repl_repl[Natts_pg_shadow];
- int i;
-
- valuestr = flatten_set_variable_args(stmt->variable, stmt->value);
-
- /*
- * RowExclusiveLock is sufficient, because we don't need to update
- * the flat password file.
- */
- rel = heap_openr(ShadowRelationName, RowExclusiveLock);
- oldtuple = SearchSysCache(SHADOWNAME,
- PointerGetDatum(stmt->user),
- 0, 0, 0);
- if (!HeapTupleIsValid(oldtuple))
- elog(ERROR, "user \"%s\" does not exist", stmt->user);
-
- if (!(superuser()
- || ((Form_pg_shadow) GETSTRUCT(oldtuple))->usesysid == GetUserId()))
- elog(ERROR, "permission denied");
-
- for (i = 0; i < Natts_pg_shadow; i++)
- repl_repl[i] = ' ';
-
- repl_repl[Anum_pg_shadow_useconfig-1] = 'r';
- if (strcmp(stmt->variable, "all")==0 && valuestr == NULL)
- /* RESET ALL */
- repl_null[Anum_pg_shadow_useconfig-1] = 'n';
- else
- {
- Datum datum;
- bool isnull;
- ArrayType *array;
-
- repl_null[Anum_pg_shadow_useconfig-1] = ' ';
-
- datum = SysCacheGetAttr(SHADOWNAME, oldtuple,
- Anum_pg_shadow_useconfig, &isnull);
-
- array = isnull ? ((ArrayType *) NULL) : DatumGetArrayTypeP(datum);
-
- if (valuestr)
- array = GUCArrayAdd(array, stmt->variable, valuestr);
- else
- array = GUCArrayDelete(array, stmt->variable);
-
- repl_val[Anum_pg_shadow_useconfig-1] = PointerGetDatum(array);
- }
-
- newtuple = heap_modifytuple(oldtuple, rel, repl_val, repl_null, repl_repl);
- simple_heap_update(rel, &oldtuple->t_self, newtuple);
-
- {
- Relation idescs[Num_pg_shadow_indices];
-
- CatalogOpenIndices(Num_pg_shadow_indices, Name_pg_shadow_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_shadow_indices, rel, newtuple);
- CatalogCloseIndices(Num_pg_shadow_indices, idescs);
- }
-
- ReleaseSysCache(oldtuple);
- heap_close(rel, RowExclusiveLock);
-}
-
-
-
-/*
- * DROP USER
- */
-void
-DropUser(DropUserStmt *stmt)
-{
- Relation pg_shadow_rel;
- TupleDesc pg_shadow_dsc;
- List *item;
-
- if (!superuser())
- elog(ERROR, "DROP USER: permission denied");
-
- if (IsTransactionBlock())
- elog(NOTICE, "DROP USER cannot be rolled back completely");
-
- /*
- * Scan the pg_shadow relation to find the usesysid of the user to be
- * deleted. Note we secure exclusive lock, because we need to protect
- * our update of the flat password file.
- */
- pg_shadow_rel = heap_openr(ShadowRelationName, ExclusiveLock);
- pg_shadow_dsc = RelationGetDescr(pg_shadow_rel);
-
- foreach(item, stmt->users)
- {
- const char *user = strVal(lfirst(item));
- HeapTuple tuple,
- tmp_tuple;
- Relation pg_rel;
- TupleDesc pg_dsc;
- ScanKeyData scankey;
- HeapScanDesc scan;
- int32 usesysid;
-
- tuple = SearchSysCache(SHADOWNAME,
- PointerGetDatum(user),
- 0, 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "DROP USER: user \"%s\" does not exist%s", user,
- (length(stmt->users) > 1) ? " (no users removed)" : "");
-
- usesysid = ((Form_pg_shadow) GETSTRUCT(tuple))->usesysid;
-
- if (usesysid == GetUserId())
- elog(ERROR, "current user cannot be dropped");
- if (usesysid == GetSessionUserId())
- elog(ERROR, "session user cannot be dropped");
-
- /*
- * Check if user still owns a database. If so, error out.
- *
- * (It used to be that this function would drop the database
- * automatically. This is not only very dangerous for people that
- * don't read the manual, it doesn't seem to be the behaviour one
- * would expect either.) -- petere 2000/01/14)
- */
- pg_rel = heap_openr(DatabaseRelationName, AccessShareLock);
- pg_dsc = RelationGetDescr(pg_rel);
-
- ScanKeyEntryInitialize(&scankey, 0x0,
- Anum_pg_database_datdba, F_INT4EQ,
- Int32GetDatum(usesysid));
-
- scan = heap_beginscan(pg_rel, SnapshotNow, 1, &scankey);
-
- if ((tmp_tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- char *dbname;
-
- dbname = NameStr(((Form_pg_database) GETSTRUCT(tmp_tuple))->datname);
- elog(ERROR, "DROP USER: user \"%s\" owns database \"%s\", cannot be removed%s",
- user, dbname,
- (length(stmt->users) > 1) ? " (no users removed)" : "");
- }
-
- heap_endscan(scan);
- heap_close(pg_rel, AccessShareLock);
-
- /*
- * Somehow we'd have to check for tables, views, etc. owned by the
- * user as well, but those could be spread out over all sorts of
- * databases which we don't have access to (easily).
- */
-
- /*
- * Remove the user from the pg_shadow table
- */
- simple_heap_delete(pg_shadow_rel, &tuple->t_self);
-
- ReleaseSysCache(tuple);
-
- /*
- * Remove user from groups
- *
- * try calling alter group drop user for every group
- */
- pg_rel = heap_openr(GroupRelationName, ExclusiveLock);
- pg_dsc = RelationGetDescr(pg_rel);
- scan = heap_beginscan(pg_rel, SnapshotNow, 0, NULL);
- while ((tmp_tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- AlterGroupStmt ags;
-
- /* the group name from which to try to drop the user: */
- ags.name = pstrdup(NameStr(((Form_pg_group) GETSTRUCT(tmp_tuple))->groname));
- ags.action = -1;
- ags.listUsers = makeList1(makeInteger(usesysid));
- AlterGroup(&ags, "DROP USER");
- }
- heap_endscan(scan);
- heap_close(pg_rel, ExclusiveLock);
-
- /*
- * Advance command counter so that later iterations of this loop
- * will see the changes already made. This is essential if, for
- * example, we are trying to drop two users who are members of the
- * same group --- the AlterGroup for the second user had better
- * see the tuple updated from the first one.
- */
- CommandCounterIncrement();
- }
-
- /*
- * Now we can clean up.
- */
- heap_close(pg_shadow_rel, NoLock);
-
- /*
- * Write the updated pg_shadow and pg_group data to the flat file.
- */
- update_pg_pwd_and_pg_group(NULL);
-}
-
-
-
-/*
- * CheckPgUserAclNotNull
- *
- * check to see if there is an ACL on pg_shadow
- */
-static void
-CheckPgUserAclNotNull(void)
-{
- HeapTuple htup;
-
- htup = SearchSysCache(RELOID,
- ObjectIdGetDatum(RelOid_pg_shadow),
- 0, 0, 0);
- if (!HeapTupleIsValid(htup))
- elog(ERROR, "CheckPgUserAclNotNull: \"%s\" not found",
- ShadowRelationName);
-
- if (heap_attisnull(htup, Anum_pg_class_relacl))
- elog(ERROR,
- "To use passwords, you have to revoke permissions on %s "
- "so normal users cannot read the passwords. "
- "Try 'REVOKE ALL ON \"%s\" FROM PUBLIC'.",
- ShadowRelationName, ShadowRelationName);
-
- ReleaseSysCache(htup);
-}
-
-
-
-/*
- * CREATE GROUP
- */
-void
-CreateGroup(CreateGroupStmt *stmt)
-{
- Relation pg_group_rel;
- HeapScanDesc scan;
- HeapTuple tuple;
- TupleDesc pg_group_dsc;
- bool group_exists = false,
- sysid_exists = false,
- havesysid = false;
- int max_id;
- Datum new_record[Natts_pg_group];
- char new_record_nulls[Natts_pg_group];
- List *item,
- *option,
- *newlist = NIL;
- IdList *grolist;
- int sysid = 0;
- List *userElts = NIL;
- DefElem *dsysid = NULL;
- DefElem *duserElts = NULL;
-
- foreach(option, stmt->options)
- {
- DefElem *defel = (DefElem *) lfirst(option);
-
- if (strcmp(defel->defname, "sysid") == 0)
- {
- if (dsysid)
- elog(ERROR, "CREATE GROUP: conflicting options");
- dsysid = defel;
- }
- else if (strcmp(defel->defname, "userElts") == 0)
- {
- if (duserElts)
- elog(ERROR, "CREATE GROUP: conflicting options");
- duserElts = defel;
- }
- else
- elog(ERROR, "CREATE GROUP: option \"%s\" not recognized",
- defel->defname);
- }
-
- if (dsysid)
- {
- sysid = intVal(dsysid->arg);
- if (sysid <= 0)
- elog(ERROR, "group id must be positive");
- havesysid = true;
- }
-
- if (duserElts)
- userElts = (List *) duserElts->arg;
-
- /*
- * Make sure the user can do this.
- */
- if (!superuser())
- elog(ERROR, "CREATE GROUP: permission denied");
-
- if (strcmp(stmt->name, "public") == 0)
- elog(ERROR, "CREATE GROUP: group name \"%s\" is reserved",
- stmt->name);
-
- pg_group_rel = heap_openr(GroupRelationName, ExclusiveLock);
- pg_group_dsc = RelationGetDescr(pg_group_rel);
-
- scan = heap_beginscan(pg_group_rel, SnapshotNow, 0, NULL);
- max_id = 99; /* start auto-assigned ids at 100 */
- while (!group_exists && !sysid_exists &&
- (tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- Form_pg_group group_form = (Form_pg_group) GETSTRUCT(tuple);
- int32 this_sysid;
-
- group_exists = (strcmp(NameStr(group_form->groname), stmt->name) == 0);
-
- this_sysid = group_form->grosysid;
- if (havesysid) /* customized id wanted */
- sysid_exists = (this_sysid == sysid);
- else
- {
- /* pick 1 + max */
- if (this_sysid > max_id)
- max_id = this_sysid;
- }
- }
- heap_endscan(scan);
-
- if (group_exists)
- elog(ERROR, "CREATE GROUP: group name \"%s\" already exists",
- stmt->name);
- if (sysid_exists)
- elog(ERROR, "CREATE GROUP: group sysid %d is already assigned",
- sysid);
-
- if (!havesysid)
- sysid = max_id + 1;
-
- /*
- * Translate the given user names to ids
- */
- foreach(item, userElts)
- {
- const char *groupuser = strVal(lfirst(item));
- int32 userid = get_usesysid(groupuser);
-
- if (!intMember(userid, newlist))
- newlist = lappendi(newlist, userid);
- }
-
- /* build an array to insert */
- if (newlist)
- grolist = IdListToArray(newlist);
- else
- grolist = NULL;
-
- /*
- * Form a tuple to insert
- */
- new_record[Anum_pg_group_groname - 1] =
- DirectFunctionCall1(namein, CStringGetDatum(stmt->name));
- new_record[Anum_pg_group_grosysid - 1] = Int32GetDatum(sysid);
- new_record[Anum_pg_group_grolist - 1] = PointerGetDatum(grolist);
-
- new_record_nulls[Anum_pg_group_groname - 1] = ' ';
- new_record_nulls[Anum_pg_group_grosysid - 1] = ' ';
- new_record_nulls[Anum_pg_group_grolist - 1] = grolist ? ' ' : 'n';
-
- tuple = heap_formtuple(pg_group_dsc, new_record, new_record_nulls);
-
- /*
- * Insert a new record in the pg_group table
- */
- simple_heap_insert(pg_group_rel, tuple);
-
- /*
- * Update indexes
- */
- if (RelationGetForm(pg_group_rel)->relhasindex)
- {
- Relation idescs[Num_pg_group_indices];
-
- CatalogOpenIndices(Num_pg_group_indices,
- Name_pg_group_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_group_indices, pg_group_rel,
- tuple);
- CatalogCloseIndices(Num_pg_group_indices, idescs);
- }
-
- heap_close(pg_group_rel, NoLock);
-
- /*
- * Write the updated pg_shadow and pg_group data to the flat file.
- */
- update_pg_pwd_and_pg_group(NULL);
-}
-
-
-
-/*
- * ALTER GROUP
- */
-void
-AlterGroup(AlterGroupStmt *stmt, const char *tag)
-{
- Relation pg_group_rel;
- TupleDesc pg_group_dsc;
- HeapTuple group_tuple;
- IdList *oldarray;
- Datum datum;
- bool null;
- List *newlist,
- *item;
-
- /*
- * Make sure the user can do this.
- */
- if (!superuser())
- elog(ERROR, "%s: permission denied", tag);
-
- pg_group_rel = heap_openr(GroupRelationName, ExclusiveLock);
- pg_group_dsc = RelationGetDescr(pg_group_rel);
-
- /*
- * Fetch existing tuple for group.
- */
- group_tuple = SearchSysCache(GRONAME,
- PointerGetDatum(stmt->name),
- 0, 0, 0);
- if (!HeapTupleIsValid(group_tuple))
- elog(ERROR, "%s: group \"%s\" does not exist", tag, stmt->name);
-
- /* Fetch old group membership. */
- datum = heap_getattr(group_tuple, Anum_pg_group_grolist,
- pg_group_dsc, &null);
- oldarray = null ? ((IdList *) NULL) : DatumGetIdListP(datum);
-
- /* initialize list with old array contents */
- newlist = IdArrayToList(oldarray);
-
- /*
- * Now decide what to do.
- */
- AssertState(stmt->action == +1 || stmt->action == -1);
-
- if (stmt->action == +1) /* add users, might also be invoked by
- * create user */
- {
- /*
- * convert the to be added usernames to sysids and add them to
- * the list
- */
- foreach(item, stmt->listUsers)
- {
- int32 sysid;
-
- if (strcmp(tag, "ALTER GROUP") == 0)
- {
- /* Get the uid of the proposed user to add. */
- sysid = get_usesysid(strVal(lfirst(item)));
- }
- else if (strcmp(tag, "CREATE USER") == 0)
- {
- /*
- * in this case we already know the uid and it wouldn't be
- * in the cache anyway yet
- */
- sysid = intVal(lfirst(item));
- }
- else
- {
- elog(ERROR, "AlterGroup: unknown tag %s", tag);
- sysid = 0; /* keep compiler quiet */
- }
-
- if (!intMember(sysid, newlist))
- newlist = lappendi(newlist, sysid);
- else
- /*
- * we silently assume here that this error will only come
- * up in a ALTER GROUP statement
- */
- elog(WARNING, "%s: user \"%s\" is already in group \"%s\"",
- tag, strVal(lfirst(item)), stmt->name);
- }
-
- /* Do the update */
- UpdateGroupMembership(pg_group_rel, group_tuple, newlist);
- } /* endif alter group add user */
-
- else if (stmt->action == -1) /* drop users from group */
- {
- bool is_dropuser = strcmp(tag, "DROP USER") == 0;
-
- if (newlist == NIL)
- {
- if (!is_dropuser)
- elog(WARNING, "ALTER GROUP: group \"%s\" does not have any members", stmt->name);
- }
- else
- {
- /*
- * convert the to be dropped usernames to sysids and
- * remove them from the list
- */
- foreach(item, stmt->listUsers)
- {
- int32 sysid;
-
- if (!is_dropuser)
- {
- /* Get the uid of the proposed user to drop. */
- sysid = get_usesysid(strVal(lfirst(item)));
- }
- else
- {
- /* for dropuser we already know the uid */
- sysid = intVal(lfirst(item));
- }
- if (intMember(sysid, newlist))
- newlist = lremovei(sysid, newlist);
- else if (!is_dropuser)
- elog(WARNING, "ALTER GROUP: user \"%s\" is not in group \"%s\"", strVal(lfirst(item)), stmt->name);
- }
-
- /* Do the update */
- UpdateGroupMembership(pg_group_rel, group_tuple, newlist);
- } /* endif group not null */
- } /* endif alter group drop user */
-
- ReleaseSysCache(group_tuple);
-
- /*
- * Write the updated pg_shadow and pg_group data to the flat files.
- */
- heap_close(pg_group_rel, NoLock);
-
- /*
- * Write the updated pg_shadow and pg_group data to the flat file.
- */
- update_pg_pwd_and_pg_group(NULL);
-}
-
-/*
- * Subroutine for AlterGroup: given a pg_group tuple and a desired new
- * membership (expressed as an integer list), form and write an updated tuple.
- * The pg_group relation must be open and locked already.
- */
-static void
-UpdateGroupMembership(Relation group_rel, HeapTuple group_tuple,
- List *members)
-{
- IdList *newarray;
- Datum new_record[Natts_pg_group];
- char new_record_nulls[Natts_pg_group];
- char new_record_repl[Natts_pg_group];
- HeapTuple tuple;
-
- newarray = IdListToArray(members);
-
- /*
- * Form an updated tuple with the new array and write it back.
- */
- MemSet(new_record, 0, sizeof(new_record));
- MemSet(new_record_nulls, ' ', sizeof(new_record_nulls));
- MemSet(new_record_repl, ' ', sizeof(new_record_repl));
-
- new_record[Anum_pg_group_grolist - 1] = PointerGetDatum(newarray);
- new_record_repl[Anum_pg_group_grolist - 1] = 'r';
-
- tuple = heap_modifytuple(group_tuple, group_rel,
- new_record, new_record_nulls, new_record_repl);
-
- simple_heap_update(group_rel, &group_tuple->t_self, tuple);
-
- /* Update indexes */
- if (RelationGetForm(group_rel)->relhasindex)
- {
- Relation idescs[Num_pg_group_indices];
-
- CatalogOpenIndices(Num_pg_group_indices,
- Name_pg_group_indices, idescs);
- CatalogIndexInsert(idescs, Num_pg_group_indices, group_rel,
- tuple);
- CatalogCloseIndices(Num_pg_group_indices, idescs);
- }
-}
-
-
-/*
- * Convert an integer list of sysids to an array.
- */
-static IdList *
-IdListToArray(List *members)
-{
- int nmembers = length(members);
- IdList *newarray;
- List *item;
- int i;
-
- newarray = palloc(ARR_OVERHEAD(1) + nmembers * sizeof(int32));
- newarray->size = ARR_OVERHEAD(1) + nmembers * sizeof(int32);
- newarray->flags = 0;
- ARR_NDIM(newarray) = 1; /* one dimensional array */
- ARR_LBOUND(newarray)[0] = 1; /* axis starts at one */
- ARR_DIMS(newarray)[0] = nmembers; /* axis is this long */
- i = 0;
- foreach(item, members)
- {
- ((int *) ARR_DATA_PTR(newarray))[i++] = lfirsti(item);
- }
-
- return newarray;
-}
-
-/*
- * Convert an array of sysids to an integer list.
- */
-static List *
-IdArrayToList(IdList *oldarray)
-{
- List *newlist = NIL;
- int hibound,
- i;
-
- if (oldarray == NULL)
- return NIL;
-
- Assert(ARR_NDIM(oldarray) == 1);
-
- hibound = ARR_DIMS(oldarray)[0];
-
- for (i = 0; i < hibound; i++)
- {
- int32 sysid;
-
- sysid = ((int *) ARR_DATA_PTR(oldarray))[i];
- /* filter out any duplicates --- probably a waste of time */
- if (!intMember(sysid, newlist))
- newlist = lappendi(newlist, sysid);
- }
-
- return newlist;
-}
-
-
-/*
- * DROP GROUP
- */
-void
-DropGroup(DropGroupStmt *stmt)
-{
- Relation pg_group_rel;
- HeapTuple tuple;
-
- /*
- * Make sure the user can do this.
- */
- if (!superuser())
- elog(ERROR, "DROP GROUP: permission denied");
-
- /*
- * Drop the group.
- */
- pg_group_rel = heap_openr(GroupRelationName, ExclusiveLock);
-
- tuple = SearchSysCacheCopy(GRONAME,
- PointerGetDatum(stmt->name),
- 0, 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "DROP GROUP: group \"%s\" does not exist", stmt->name);
-
- simple_heap_delete(pg_group_rel, &tuple->t_self);
-
- heap_close(pg_group_rel, NoLock);
-
- /*
- * Write the updated pg_shadow and pg_group data to the flat file.
- */
- update_pg_pwd_and_pg_group(NULL);
-}
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
deleted file mode 100644
index 7e67099e442..00000000000
--- a/src/backend/commands/vacuum.c
+++ /dev/null
@@ -1,2944 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * vacuum.c
- * The postgres vacuum cleaner.
- *
- * This file includes the "full" version of VACUUM, as well as control code
- * used by all three of full VACUUM, lazy VACUUM, and ANALYZE. See
- * vacuumlazy.c and analyze.c for the rest of the code for the latter two.
- *
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.230 2002/06/20 20:29:27 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include <unistd.h>
-
-#include "access/clog.h"
-#include "access/genam.h"
-#include "access/heapam.h"
-#include "access/xlog.h"
-#include "catalog/catalog.h"
-#include "catalog/catname.h"
-#include "catalog/namespace.h"
-#include "catalog/pg_database.h"
-#include "catalog/pg_index.h"
-#include "commands/vacuum.h"
-#include "executor/executor.h"
-#include "miscadmin.h"
-#include "storage/freespace.h"
-#include "storage/sinval.h"
-#include "storage/smgr.h"
-#include "tcop/pquery.h"
-#include "utils/acl.h"
-#include "utils/builtins.h"
-#include "utils/fmgroids.h"
-#include "utils/inval.h"
-#include "utils/lsyscache.h"
-#include "utils/relcache.h"
-#include "utils/syscache.h"
-#include "pgstat.h"
-
-
-typedef struct VacPageData
-{
- BlockNumber blkno; /* BlockNumber of this Page */
- Size free; /* FreeSpace on this Page */
- uint16 offsets_used; /* Number of OffNums used by vacuum */
- uint16 offsets_free; /* Number of OffNums free or to be free */
- OffsetNumber offsets[1]; /* Array of free OffNums */
-} VacPageData;
-
-typedef VacPageData *VacPage;
-
-typedef struct VacPageListData
-{
- BlockNumber empty_end_pages; /* Number of "empty" end-pages */
- int num_pages; /* Number of pages in pagedesc */
- int num_allocated_pages; /* Number of allocated pages in
- * pagedesc */
- VacPage *pagedesc; /* Descriptions of pages */
-} VacPageListData;
-
-typedef VacPageListData *VacPageList;
-
-typedef struct VTupleLinkData
-{
- ItemPointerData new_tid;
- ItemPointerData this_tid;
-} VTupleLinkData;
-
-typedef VTupleLinkData *VTupleLink;
-
-typedef struct VTupleMoveData
-{
- ItemPointerData tid; /* tuple ID */
- VacPage vacpage; /* where to move */
- bool cleanVpd; /* clean vacpage before using */
-} VTupleMoveData;
-
-typedef VTupleMoveData *VTupleMove;
-
-typedef struct VRelStats
-{
- BlockNumber rel_pages;
- double rel_tuples;
- Size min_tlen;
- Size max_tlen;
- bool hasindex;
- int num_vtlinks;
- VTupleLink vtlinks;
-} VRelStats;
-
-
-static MemoryContext vac_context = NULL;
-
-static int elevel = -1;
-
-static TransactionId OldestXmin;
-static TransactionId FreezeLimit;
-
-static TransactionId initialOldestXmin;
-static TransactionId initialFreezeLimit;
-
-
-/* non-export function prototypes */
-static List *getrels(const RangeVar *vacrel, const char *stmttype);
-static void vac_update_dbstats(Oid dbid,
- TransactionId vacuumXID,
- TransactionId frozenXID);
-static void vac_truncate_clog(TransactionId vacuumXID,
- TransactionId frozenXID);
-static void vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind);
-static void full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt);
-static void scan_heap(VRelStats *vacrelstats, Relation onerel,
- VacPageList vacuum_pages, VacPageList fraged_pages);
-static void repair_frag(VRelStats *vacrelstats, Relation onerel,
- VacPageList vacuum_pages, VacPageList fraged_pages,
- int nindexes, Relation *Irel);
-static void vacuum_heap(VRelStats *vacrelstats, Relation onerel,
- VacPageList vacpagelist);
-static void vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage);
-static void vacuum_index(VacPageList vacpagelist, Relation indrel,
- double num_tuples, int keep_tuples);
-static void scan_index(Relation indrel, double num_tuples);
-static bool tid_reaped(ItemPointer itemptr, void *state);
-static bool dummy_tid_reaped(ItemPointer itemptr, void *state);
-static void vac_update_fsm(Relation onerel, VacPageList fraged_pages,
- BlockNumber rel_pages);
-static VacPage copy_vac_page(VacPage vacpage);
-static void vpage_insert(VacPageList vacpagelist, VacPage vpnew);
-static void *vac_bsearch(const void *key, const void *base,
- size_t nelem, size_t size,
- int (*compar) (const void *, const void *));
-static int vac_cmp_blk(const void *left, const void *right);
-static int vac_cmp_offno(const void *left, const void *right);
-static int vac_cmp_vtlinks(const void *left, const void *right);
-static bool enough_space(VacPage vacpage, Size len);
-
-
-/****************************************************************************
- * *
- * Code common to all flavors of VACUUM and ANALYZE *
- * *
- ****************************************************************************
- */
-
-
-/*
- * Primary entry point for VACUUM and ANALYZE commands.
- */
-void
-vacuum(VacuumStmt *vacstmt)
-{
- const char *stmttype = vacstmt->vacuum ? "VACUUM" : "ANALYZE";
- MemoryContext anl_context = NULL;
- List *vrl,
- *cur;
-
- if (vacstmt->verbose)
- elevel = INFO;
- else
- elevel = DEBUG1;
-
- /*
- * We cannot run VACUUM inside a user transaction block; if we were
- * inside a transaction, then our commit- and
- * start-transaction-command calls would not have the intended effect!
- * Furthermore, the forced commit that occurs before truncating the
- * relation's file would have the effect of committing the rest of the
- * user's transaction too, which would certainly not be the desired
- * behavior.
- */
- if (vacstmt->vacuum && IsTransactionBlock())
- elog(ERROR, "%s cannot run inside a BEGIN/END block", stmttype);
-
- /* Running VACUUM from a function would free the function context */
- if (vacstmt->vacuum && !MemoryContextContains(QueryContext, vacstmt))
- elog(ERROR, "%s cannot be executed from a function", stmttype);
-
- /*
- * Send info about dead objects to the statistics collector
- */
- if (vacstmt->vacuum)
- pgstat_vacuum_tabstat();
-
- /*
- * Create special memory context for cross-transaction storage.
- *
- * Since it is a child of QueryContext, it will go away eventually even
- * if we suffer an error; there's no need for special abort cleanup
- * logic.
- */
- vac_context = AllocSetContextCreate(QueryContext,
- "Vacuum",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
-
- /*
- * If we are running only ANALYZE, we don't need per-table transactions,
- * but we still need a memory context with table lifetime.
- */
- if (vacstmt->analyze && !vacstmt->vacuum)
- anl_context = AllocSetContextCreate(QueryContext,
- "Analyze",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
-
- /* Build list of relations to process (note this lives in vac_context) */
- vrl = getrels(vacstmt->relation, stmttype);
-
- /*
- * Formerly, there was code here to prevent more than one VACUUM from
- * executing concurrently in the same database. However, there's no
- * good reason to prevent that, and manually removing lockfiles after
- * a vacuum crash was a pain for dbadmins. So, forget about lockfiles,
- * and just rely on the locks we grab on each target table
- * to ensure that there aren't two VACUUMs running on the same table
- * at the same time.
- */
-
- /*
- * The strangeness with committing and starting transactions here is due
- * to wanting to run each table's VACUUM as a separate transaction, so
- * that we don't hold locks unnecessarily long. Also, if we are doing
- * VACUUM ANALYZE, the ANALYZE part runs as a separate transaction from
- * the VACUUM to further reduce locking.
- *
- * vacuum_rel expects to be entered with no transaction active; it will
- * start and commit its own transaction. But we are called by an SQL
- * command, and so we are executing inside a transaction already. We
- * commit the transaction started in PostgresMain() here, and start
- * another one before exiting to match the commit waiting for us back in
- * PostgresMain().
- *
- * In the case of an ANALYZE statement (no vacuum, just analyze) it's
- * okay to run the whole thing in the outer transaction, and so we skip
- * transaction start/stop operations.
- */
- if (vacstmt->vacuum)
- {
- if (vacstmt->relation == NULL)
- {
- /*
- * It's a database-wide VACUUM.
- *
- * Compute the initially applicable OldestXmin and FreezeLimit
- * XIDs, so that we can record these values at the end of the
- * VACUUM. Note that individual tables may well be processed with
- * newer values, but we can guarantee that no (non-shared)
- * relations are processed with older ones.
- *
- * It is okay to record non-shared values in pg_database, even though
- * we may vacuum shared relations with older cutoffs, because only
- * the minimum of the values present in pg_database matters. We
- * can be sure that shared relations have at some time been
- * vacuumed with cutoffs no worse than the global minimum; for, if
- * there is a backend in some other DB with xmin = OLDXMIN that's
- * determining the cutoff with which we vacuum shared relations,
- * it is not possible for that database to have a cutoff newer
- * than OLDXMIN recorded in pg_database.
- */
- vacuum_set_xid_limits(vacstmt, false,
- &initialOldestXmin, &initialFreezeLimit);
- }
-
- /* matches the StartTransaction in PostgresMain() */
- CommitTransactionCommand();
- }
-
- /*
- * Loop to process each selected relation.
- */
- foreach(cur, vrl)
- {
- Oid relid = (Oid) lfirsti(cur);
-
- if (vacstmt->vacuum)
- vacuum_rel(relid, vacstmt, RELKIND_RELATION);
- if (vacstmt->analyze)
- {
- MemoryContext old_context = NULL;
-
- /*
- * If we vacuumed, use new transaction for analyze. Otherwise,
- * we can use the outer transaction, but we still need to call
- * analyze_rel in a memory context that will be cleaned up on
- * return (else we leak memory while processing multiple tables).
- */
- if (vacstmt->vacuum)
- StartTransactionCommand();
- else
- old_context = MemoryContextSwitchTo(anl_context);
-
- analyze_rel(relid, vacstmt);
-
- if (vacstmt->vacuum)
- CommitTransactionCommand();
- else
- {
- MemoryContextSwitchTo(old_context);
- MemoryContextResetAndDeleteChildren(anl_context);
- }
- }
- }
-
- /*
- * Finish up processing.
- */
- if (vacstmt->vacuum)
- {
- /* here, we are not in a transaction */
-
- /* matches the CommitTransaction in PostgresMain() */
- StartTransactionCommand();
-
- /*
- * If we did a database-wide VACUUM, update the database's pg_database
- * row with info about the transaction IDs used, and try to truncate
- * pg_clog.
- */
- if (vacstmt->relation == NULL)
- {
- vac_update_dbstats(MyDatabaseId,
- initialOldestXmin, initialFreezeLimit);
- vac_truncate_clog(initialOldestXmin, initialFreezeLimit);
- }
- }
-
- /*
- * Clean up working storage --- note we must do this after
- * StartTransactionCommand, else we might be trying to delete the
- * active context!
- */
- MemoryContextDelete(vac_context);
- vac_context = NULL;
-
- if (anl_context)
- MemoryContextDelete(anl_context);
-}
-
-/*
- * Build a list of Oids for each relation to be processed
- *
- * The list is built in vac_context so that it will survive across our
- * per-relation transactions.
- */
-static List *
-getrels(const RangeVar *vacrel, const char *stmttype)
-{
- List *vrl = NIL;
- MemoryContext oldcontext;
-
- if (vacrel)
- {
- /* Process specific relation */
- Oid relid;
-
- relid = RangeVarGetRelid(vacrel, false);
-
- /* Make a relation list entry for this guy */
- oldcontext = MemoryContextSwitchTo(vac_context);
- vrl = lappendi(vrl, relid);
- MemoryContextSwitchTo(oldcontext);
- }
- else
- {
- /* Process all plain relations listed in pg_class */
- Relation pgclass;
- HeapScanDesc scan;
- HeapTuple tuple;
- ScanKeyData key;
-
- ScanKeyEntryInitialize(&key, 0x0,
- Anum_pg_class_relkind,
- F_CHAREQ,
- CharGetDatum(RELKIND_RELATION));
-
- pgclass = heap_openr(RelationRelationName, AccessShareLock);
-
- scan = heap_beginscan(pgclass, SnapshotNow, 1, &key);
-
- while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- /* Make a relation list entry for this guy */
- oldcontext = MemoryContextSwitchTo(vac_context);
- vrl = lappendi(vrl, tuple->t_data->t_oid);
- MemoryContextSwitchTo(oldcontext);
- }
-
- heap_endscan(scan);
- heap_close(pgclass, AccessShareLock);
- }
-
- return vrl;
-}
-
-/*
- * vacuum_set_xid_limits() -- compute oldest-Xmin and freeze cutoff points
- */
-void
-vacuum_set_xid_limits(VacuumStmt *vacstmt, bool sharedRel,
- TransactionId *oldestXmin,
- TransactionId *freezeLimit)
-{
- TransactionId limit;
-
- *oldestXmin = GetOldestXmin(sharedRel);
-
- Assert(TransactionIdIsNormal(*oldestXmin));
-
- if (vacstmt->freeze)
- {
- /* FREEZE option: use oldest Xmin as freeze cutoff too */
- limit = *oldestXmin;
- }
- else
- {
- /*
- * Normal case: freeze cutoff is well in the past, to wit, about
- * halfway to the wrap horizon
- */
- limit = GetCurrentTransactionId() - (MaxTransactionId >> 2);
- }
-
- /*
- * Be careful not to generate a "permanent" XID
- */
- if (!TransactionIdIsNormal(limit))
- limit = FirstNormalTransactionId;
-
- /*
- * Ensure sane relationship of limits
- */
- if (TransactionIdFollows(limit, *oldestXmin))
- {
- elog(WARNING, "oldest Xmin is far in the past --- close open transactions soon to avoid wraparound problems");
- limit = *oldestXmin;
- }
-
- *freezeLimit = limit;
-}
-
-
-/*
- * vac_update_relstats() -- update statistics for one relation
- *
- * Update the whole-relation statistics that are kept in its pg_class
- * row. There are additional stats that will be updated if we are
- * doing ANALYZE, but we always update these stats. This routine works
- * for both index and heap relation entries in pg_class.
- *
- * We violate no-overwrite semantics here by storing new values for the
- * statistics columns directly into the pg_class tuple that's already on
- * the page. The reason for this is that if we updated these tuples in
- * the usual way, vacuuming pg_class itself wouldn't work very well ---
- * by the time we got done with a vacuum cycle, most of the tuples in
- * pg_class would've been obsoleted. Of course, this only works for
- * fixed-size never-null columns, but these are.
- *
- * This routine is shared by full VACUUM, lazy VACUUM, and stand-alone
- * ANALYZE.
- */
-void
-vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
- bool hasindex)
-{
- Relation rd;
- HeapTupleData rtup;
- HeapTuple ctup;
- Form_pg_class pgcform;
- Buffer buffer;
-
- /*
- * update number of tuples and number of pages in pg_class
- */
- rd = heap_openr(RelationRelationName, RowExclusiveLock);
-
- ctup = SearchSysCache(RELOID,
- ObjectIdGetDatum(relid),
- 0, 0, 0);
- if (!HeapTupleIsValid(ctup))
- elog(ERROR, "pg_class entry for relid %u vanished during vacuuming",
- relid);
-
- /* get the buffer cache tuple */
- rtup.t_self = ctup->t_self;
- ReleaseSysCache(ctup);
- if (!heap_fetch(rd, SnapshotNow, &rtup, &buffer, false, NULL))
- elog(ERROR, "pg_class entry for relid %u vanished during vacuuming",
- relid);
-
- /* overwrite the existing statistics in the tuple */
- pgcform = (Form_pg_class) GETSTRUCT(&rtup);
- pgcform->relpages = (int32) num_pages;
- pgcform->reltuples = num_tuples;
- pgcform->relhasindex = hasindex;
-
- /*
- * If we have discovered that there are no indexes, then there's no
- * primary key either. This could be done more thoroughly...
- */
- if (!hasindex)
- pgcform->relhaspkey = false;
-
- /*
- * Invalidate the tuple in the catcaches; this also arranges to flush
- * the relation's relcache entry. (If we fail to commit for some reason,
- * no flush will occur, but no great harm is done since there are no
- * noncritical state updates here.)
- */
- CacheInvalidateHeapTuple(rd, &rtup);
-
- /* Write the buffer */
- WriteBuffer(buffer);
-
- heap_close(rd, RowExclusiveLock);
-}
-
-
-/*
- * vac_update_dbstats() -- update statistics for one database
- *
- * Update the whole-database statistics that are kept in its pg_database
- * row.
- *
- * We violate no-overwrite semantics here by storing new values for the
- * statistics columns directly into the tuple that's already on the page.
- * As with vac_update_relstats, this avoids leaving dead tuples behind
- * after a VACUUM; which is good since GetRawDatabaseInfo
- * can get confused by finding dead tuples in pg_database.
- *
- * This routine is shared by full and lazy VACUUM. Note that it is only
- * applied after a database-wide VACUUM operation.
- */
-static void
-vac_update_dbstats(Oid dbid,
- TransactionId vacuumXID,
- TransactionId frozenXID)
-{
- Relation relation;
- ScanKeyData entry[1];
- HeapScanDesc scan;
- HeapTuple tuple;
- Form_pg_database dbform;
-
- relation = heap_openr(DatabaseRelationName, RowExclusiveLock);
-
- /* Must use a heap scan, since there's no syscache for pg_database */
- ScanKeyEntryInitialize(&entry[0], 0x0,
- ObjectIdAttributeNumber, F_OIDEQ,
- ObjectIdGetDatum(dbid));
-
- scan = heap_beginscan(relation, SnapshotNow, 1, entry);
-
- tuple = heap_getnext(scan, ForwardScanDirection);
-
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "database %u does not exist", dbid);
-
- dbform = (Form_pg_database) GETSTRUCT(tuple);
-
- /* overwrite the existing statistics in the tuple */
- dbform->datvacuumxid = vacuumXID;
- dbform->datfrozenxid = frozenXID;
-
- /* invalidate the tuple in the cache and write the buffer */
- CacheInvalidateHeapTuple(relation, tuple);
- WriteNoReleaseBuffer(scan->rs_cbuf);
-
- heap_endscan(scan);
-
- heap_close(relation, RowExclusiveLock);
-}
-
-
-/*
- * vac_truncate_clog() -- attempt to truncate the commit log
- *
- * Scan pg_database to determine the system-wide oldest datvacuumxid,
- * and use it to truncate the transaction commit log (pg_clog).
- * Also generate a warning if the system-wide oldest datfrozenxid
- * seems to be in danger of wrapping around.
- *
- * The passed XIDs are simply the ones I just wrote into my pg_database
- * entry. They're used to initialize the "min" calculations.
- *
- * This routine is shared by full and lazy VACUUM. Note that it is only
- * applied after a database-wide VACUUM operation.
- */
-static void
-vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID)
-{
- TransactionId myXID;
- Relation relation;
- HeapScanDesc scan;
- HeapTuple tuple;
- int32 age;
- bool vacuumAlreadyWrapped = false;
- bool frozenAlreadyWrapped = false;
-
- myXID = GetCurrentTransactionId();
-
- relation = heap_openr(DatabaseRelationName, AccessShareLock);
-
- scan = heap_beginscan(relation, SnapshotNow, 0, NULL);
-
- while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
- {
- Form_pg_database dbform = (Form_pg_database) GETSTRUCT(tuple);
-
- /* Ignore non-connectable databases (eg, template0) */
- /* It's assumed that these have been frozen correctly */
- if (!dbform->datallowconn)
- continue;
-
- if (TransactionIdIsNormal(dbform->datvacuumxid))
- {
- if (TransactionIdPrecedes(myXID, dbform->datvacuumxid))
- vacuumAlreadyWrapped = true;
- else if (TransactionIdPrecedes(dbform->datvacuumxid, vacuumXID))
- vacuumXID = dbform->datvacuumxid;
- }
- if (TransactionIdIsNormal(dbform->datfrozenxid))
- {
- if (TransactionIdPrecedes(myXID, dbform->datfrozenxid))
- frozenAlreadyWrapped = true;
- else if (TransactionIdPrecedes(dbform->datfrozenxid, frozenXID))
- frozenXID = dbform->datfrozenxid;
- }
- }
-
- heap_endscan(scan);
-
- heap_close(relation, AccessShareLock);
-
- /*
- * Do not truncate CLOG if we seem to have suffered wraparound already;
- * the computed minimum XID might be bogus.
- */
- if (vacuumAlreadyWrapped)
- {
- elog(WARNING, "Some databases have not been vacuumed in over 2 billion transactions."
- "\n\tYou may have already suffered transaction-wraparound data loss.");
- return;
- }
-
- /* Truncate CLOG to the oldest vacuumxid */
- TruncateCLOG(vacuumXID);
-
- /* Give warning about impending wraparound problems */
- if (frozenAlreadyWrapped)
- {
- elog(WARNING, "Some databases have not been vacuumed in over 1 billion transactions."
- "\n\tBetter vacuum them soon, or you may have a wraparound failure.");
- }
- else
- {
- age = (int32) (myXID - frozenXID);
- if (age > (int32) ((MaxTransactionId >> 3) * 3))
- elog(WARNING, "Some databases have not been vacuumed in %d transactions."
- "\n\tBetter vacuum them within %d transactions,"
- "\n\tor you may have a wraparound failure.",
- age, (int32) (MaxTransactionId >> 1) - age);
- }
-}
-
-
-/****************************************************************************
- * *
- * Code common to both flavors of VACUUM *
- * *
- ****************************************************************************
- */
-
-
-/*
- * vacuum_rel() -- vacuum one heap relation
- *
- * Doing one heap at a time incurs extra overhead, since we need to
- * check that the heap exists again just before we vacuum it. The
- * reason that we do this is so that vacuuming can be spread across
- * many small transactions. Otherwise, two-phase locking would require
- * us to lock the entire database during one pass of the vacuum cleaner.
- *
- * At entry and exit, we are not inside a transaction.
- */
-static void
-vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
-{
- LOCKMODE lmode;
- Relation onerel;
- LockRelId onerelid;
- Oid toast_relid;
-
- /* Begin a transaction for vacuuming this relation */
- StartTransactionCommand();
-
- /*
- * Check for user-requested abort. Note we want this to be inside a
- * transaction, so xact.c doesn't issue useless WARNING.
- */
- CHECK_FOR_INTERRUPTS();
-
- /*
- * Race condition -- if the pg_class tuple has gone away since the
- * last time we saw it, we don't need to vacuum it.
- */
- if (!SearchSysCacheExists(RELOID,
- ObjectIdGetDatum(relid),
- 0, 0, 0))
- {
- CommitTransactionCommand();
- return;
- }
-
- /*
- * Determine the type of lock we want --- hard exclusive lock for a
- * FULL vacuum, but just ShareUpdateExclusiveLock for concurrent
- * vacuum. Either way, we can be sure that no other backend is
- * vacuuming the same table.
- */
- lmode = vacstmt->full ? AccessExclusiveLock : ShareUpdateExclusiveLock;
-
- /*
- * Open the class, get an appropriate lock on it, and check
- * permissions.
- *
- * We allow the user to vacuum a table if he is superuser, the table
- * owner, or the database owner (but in the latter case, only if it's
- * not a shared relation). pg_class_ownercheck includes the superuser case.
- *
- * Note we choose to treat permissions failure as a WARNING and keep
- * trying to vacuum the rest of the DB --- is this appropriate?
- */
- onerel = relation_open(relid, lmode);
-
- if (!(pg_class_ownercheck(RelationGetRelid(onerel), GetUserId()) ||
- (is_dbadmin(MyDatabaseId) && !onerel->rd_rel->relisshared)))
- {
- elog(WARNING, "Skipping \"%s\" --- only table or database owner can VACUUM it",
- RelationGetRelationName(onerel));
- relation_close(onerel, lmode);
- CommitTransactionCommand();
- return;
- }
-
- /*
- * Check that it's a plain table; we used to do this in getrels() but
- * seems safer to check after we've locked the relation.
- */
- if (onerel->rd_rel->relkind != expected_relkind)
- {
- elog(WARNING, "Skipping \"%s\" --- can not process indexes, views or special system tables",
- RelationGetRelationName(onerel));
- relation_close(onerel, lmode);
- CommitTransactionCommand();
- return;
- }
-
- /*
- * Get a session-level lock too. This will protect our access to the
- * relation across multiple transactions, so that we can vacuum the
- * relation's TOAST table (if any) secure in the knowledge that no one
- * is deleting the parent relation.
- *
- * NOTE: this cannot block, even if someone else is waiting for access,
- * because the lock manager knows that both lock requests are from the
- * same process.
- */
- onerelid = onerel->rd_lockInfo.lockRelId;
- LockRelationForSession(&onerelid, lmode);
-
- /*
- * Remember the relation's TOAST relation for later
- */
- toast_relid = onerel->rd_rel->reltoastrelid;
-
- /*
- * Do the actual work --- either FULL or "lazy" vacuum
- */
- if (vacstmt->full)
- full_vacuum_rel(onerel, vacstmt);
- else
- lazy_vacuum_rel(onerel, vacstmt);
-
- /* all done with this class, but hold lock until commit */
- relation_close(onerel, NoLock);
-
- /*
- * Complete the transaction and free all temporary memory used.
- */
- CommitTransactionCommand();
-
- /*
- * If the relation has a secondary toast rel, vacuum that too while we
- * still hold the session lock on the master table. Note however that
- * "analyze" will not get done on the toast table. This is good,
- * because the toaster always uses hardcoded index access and
- * statistics are totally unimportant for toast relations.
- */
- if (toast_relid != InvalidOid)
- vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE);
-
- /*
- * Now release the session-level lock on the master table.
- */
- UnlockRelationForSession(&onerelid, lmode);
-}
-
-
-/****************************************************************************
- * *
- * Code for VACUUM FULL (only) *
- * *
- ****************************************************************************
- */
-
-
-/*
- * full_vacuum_rel() -- perform FULL VACUUM for one heap relation
- *
- * This routine vacuums a single heap, cleans out its indexes, and
- * updates its num_pages and num_tuples statistics.
- *
- * At entry, we have already established a transaction and opened
- * and locked the relation.
- */
-static void
-full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
-{
- VacPageListData vacuum_pages; /* List of pages to vacuum and/or
- * clean indexes */
- VacPageListData fraged_pages; /* List of pages with space enough
- * for re-using */
- Relation *Irel;
- int nindexes,
- i;
- VRelStats *vacrelstats;
- bool reindex = false;
-
- if (IsIgnoringSystemIndexes() &&
- IsSystemRelation(onerel))
- reindex = true;
-
- vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared,
- &OldestXmin, &FreezeLimit);
-
- /*
- * Set up statistics-gathering machinery.
- */
- vacrelstats = (VRelStats *) palloc(sizeof(VRelStats));
- vacrelstats->rel_pages = 0;
- vacrelstats->rel_tuples = 0;
- vacrelstats->hasindex = false;
-
- /* scan the heap */
- vacuum_pages.num_pages = fraged_pages.num_pages = 0;
- scan_heap(vacrelstats, onerel, &vacuum_pages, &fraged_pages);
-
- /* Now open all indexes of the relation */
- vac_open_indexes(onerel, &nindexes, &Irel);
- if (!Irel)
- reindex = false;
- else if (!RelationGetForm(onerel)->relhasindex)
- reindex = true;
- if (nindexes > 0)
- vacrelstats->hasindex = true;
-
-#ifdef NOT_USED
-
- /*
- * reindex in VACUUM is dangerous under WAL. ifdef out until it
- * becomes safe.
- */
- if (reindex)
- {
- vac_close_indexes(nindexes, Irel);
- Irel = (Relation *) NULL;
- activate_indexes_of_a_table(RelationGetRelid(onerel), false);
- }
-#endif /* NOT_USED */
-
- /* Clean/scan index relation(s) */
- if (Irel != (Relation *) NULL)
- {
- if (vacuum_pages.num_pages > 0)
- {
- for (i = 0; i < nindexes; i++)
- vacuum_index(&vacuum_pages, Irel[i],
- vacrelstats->rel_tuples, 0);
- }
- else
- {
- /* just scan indexes to update statistic */
- for (i = 0; i < nindexes; i++)
- scan_index(Irel[i], vacrelstats->rel_tuples);
- }
- }
-
- if (fraged_pages.num_pages > 0)
- {
- /* Try to shrink heap */
- repair_frag(vacrelstats, onerel, &vacuum_pages, &fraged_pages,
- nindexes, Irel);
- vac_close_indexes(nindexes, Irel);
- }
- else
- {
- vac_close_indexes(nindexes, Irel);
- if (vacuum_pages.num_pages > 0)
- {
- /* Clean pages from vacuum_pages list */
- vacuum_heap(vacrelstats, onerel, &vacuum_pages);
- }
- else
- {
- /*
- * Flush dirty pages out to disk. We must do this even if we
- * didn't do anything else, because we want to ensure that all
- * tuples have correct on-row commit status on disk (see
- * bufmgr.c's comments for FlushRelationBuffers()).
- */
- i = FlushRelationBuffers(onerel, vacrelstats->rel_pages);
- if (i < 0)
- elog(ERROR, "VACUUM (full_vacuum_rel): FlushRelationBuffers returned %d",
- i);
- }
- }
-
-#ifdef NOT_USED
- if (reindex)
- activate_indexes_of_a_table(RelationGetRelid(onerel), true);
-#endif /* NOT_USED */
-
- /* update shared free space map with final free space info */
- vac_update_fsm(onerel, &fraged_pages, vacrelstats->rel_pages);
-
- /* update statistics in pg_class */
- vac_update_relstats(RelationGetRelid(onerel), vacrelstats->rel_pages,
- vacrelstats->rel_tuples, vacrelstats->hasindex);
-}
-
-
-/*
- * scan_heap() -- scan an open heap relation
- *
- * This routine sets commit status bits, constructs vacuum_pages (list
- * of pages we need to compact free space on and/or clean indexes of
- * deleted tuples), constructs fraged_pages (list of pages with free
- * space that tuples could be moved into), and calculates statistics
- * on the number of live tuples in the heap.
- */
-static void
-scan_heap(VRelStats *vacrelstats, Relation onerel,
- VacPageList vacuum_pages, VacPageList fraged_pages)
-{
- BlockNumber nblocks,
- blkno;
- ItemId itemid;
- Buffer buf;
- HeapTupleData tuple;
- OffsetNumber offnum,
- maxoff;
- bool pgchanged,
- tupgone,
- notup;
- char *relname;
- VacPage vacpage,
- vacpagecopy;
- BlockNumber empty_pages,
- new_pages,
- changed_pages,
- empty_end_pages;
- double num_tuples,
- tups_vacuumed,
- nkeep,
- nunused;
- double free_size,
- usable_free_size;
- Size min_tlen = MaxTupleSize;
- Size max_tlen = 0;
- int i;
- bool do_shrinking = true;
- VTupleLink vtlinks = (VTupleLink) palloc(100 * sizeof(VTupleLinkData));
- int num_vtlinks = 0;
- int free_vtlinks = 100;
- VacRUsage ru0;
-
- vac_init_rusage(&ru0);
-
- relname = RelationGetRelationName(onerel);
- elog(elevel, "--Relation %s.%s--",
- get_namespace_name(RelationGetNamespace(onerel)),
- relname);
-
- empty_pages = new_pages = changed_pages = empty_end_pages = 0;
- num_tuples = tups_vacuumed = nkeep = nunused = 0;
- free_size = 0;
-
- nblocks = RelationGetNumberOfBlocks(onerel);
-
- /*
- * We initially create each VacPage item in a maximal-sized workspace,
- * then copy the workspace into a just-large-enough copy.
- */
- vacpage = (VacPage) palloc(sizeof(VacPageData) + MaxOffsetNumber * sizeof(OffsetNumber));
-
- for (blkno = 0; blkno < nblocks; blkno++)
- {
- Page page,
- tempPage = NULL;
- bool do_reap,
- do_frag;
-
- CHECK_FOR_INTERRUPTS();
-
- buf = ReadBuffer(onerel, blkno);
- page = BufferGetPage(buf);
-
- vacpage->blkno = blkno;
- vacpage->offsets_used = 0;
- vacpage->offsets_free = 0;
-
- if (PageIsNew(page))
- {
- elog(WARNING, "Rel %s: Uninitialized page %u - fixing",
- relname, blkno);
- PageInit(page, BufferGetPageSize(buf), 0);
- vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
- free_size += (vacpage->free - sizeof(ItemIdData));
- new_pages++;
- empty_end_pages++;
- vacpagecopy = copy_vac_page(vacpage);
- vpage_insert(vacuum_pages, vacpagecopy);
- vpage_insert(fraged_pages, vacpagecopy);
- WriteBuffer(buf);
- continue;
- }
-
- if (PageIsEmpty(page))
- {
- vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
- free_size += (vacpage->free - sizeof(ItemIdData));
- empty_pages++;
- empty_end_pages++;
- vacpagecopy = copy_vac_page(vacpage);
- vpage_insert(vacuum_pages, vacpagecopy);
- vpage_insert(fraged_pages, vacpagecopy);
- ReleaseBuffer(buf);
- continue;
- }
-
- pgchanged = false;
- notup = true;
- maxoff = PageGetMaxOffsetNumber(page);
- for (offnum = FirstOffsetNumber;
- offnum <= maxoff;
- offnum = OffsetNumberNext(offnum))
- {
- uint16 sv_infomask;
-
- itemid = PageGetItemId(page, offnum);
-
- /*
- * Collect un-used items too - it's possible to have indexes
- * pointing here after crash.
- */
- if (!ItemIdIsUsed(itemid))
- {
- vacpage->offsets[vacpage->offsets_free++] = offnum;
- nunused += 1;
- continue;
- }
-
- tuple.t_datamcxt = NULL;
- tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
- tuple.t_len = ItemIdGetLength(itemid);
- ItemPointerSet(&(tuple.t_self), blkno, offnum);
-
- tupgone = false;
- sv_infomask = tuple.t_data->t_infomask;
-
- switch (HeapTupleSatisfiesVacuum(tuple.t_data, OldestXmin))
- {
- case HEAPTUPLE_DEAD:
- tupgone = true; /* we can delete the tuple */
- break;
- case HEAPTUPLE_LIVE:
-
- /*
- * Tuple is good. Consider whether to replace its
- * xmin value with FrozenTransactionId.
- */
- if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple.t_data)) &&
- TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
- FreezeLimit))
- {
- HeapTupleHeaderSetXmin(tuple.t_data, FrozenTransactionId);
- /* infomask should be okay already */
- Assert(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED);
- pgchanged = true;
- }
- break;
- case HEAPTUPLE_RECENTLY_DEAD:
-
- /*
- * If tuple is recently deleted then we must not
- * remove it from relation.
- */
- nkeep += 1;
-
- /*
- * If we do shrinking and this tuple is updated one
- * then remember it to construct updated tuple
- * dependencies.
- */
- if (do_shrinking &&
- !(ItemPointerEquals(&(tuple.t_self),
- &(tuple.t_data->t_ctid))))
- {
- if (free_vtlinks == 0)
- {
- free_vtlinks = 1000;
- vtlinks = (VTupleLink) repalloc(vtlinks,
- (free_vtlinks + num_vtlinks) *
- sizeof(VTupleLinkData));
- }
- vtlinks[num_vtlinks].new_tid = tuple.t_data->t_ctid;
- vtlinks[num_vtlinks].this_tid = tuple.t_self;
- free_vtlinks--;
- num_vtlinks++;
- }
- break;
- case HEAPTUPLE_INSERT_IN_PROGRESS:
-
- /*
- * This should not happen, since we hold exclusive
- * lock on the relation; shouldn't we raise an error?
- */
- elog(WARNING, "Rel %s: TID %u/%u: InsertTransactionInProgress %u - can't shrink relation",
- relname, blkno, offnum, HeapTupleHeaderGetXmin(tuple.t_data));
- do_shrinking = false;
- break;
- case HEAPTUPLE_DELETE_IN_PROGRESS:
-
- /*
- * This should not happen, since we hold exclusive
- * lock on the relation; shouldn't we raise an error?
- */
- elog(WARNING, "Rel %s: TID %u/%u: DeleteTransactionInProgress %u - can't shrink relation",
- relname, blkno, offnum, HeapTupleHeaderGetXmax(tuple.t_data));
- do_shrinking = false;
- break;
- default:
- elog(ERROR, "Unexpected HeapTupleSatisfiesVacuum result");
- break;
- }
-
- /* check for hint-bit update by HeapTupleSatisfiesVacuum */
- if (sv_infomask != tuple.t_data->t_infomask)
- pgchanged = true;
-
- /*
- * Other checks...
- */
- if (!OidIsValid(tuple.t_data->t_oid) &&
- onerel->rd_rel->relhasoids)
- elog(WARNING, "Rel %s: TID %u/%u: OID IS INVALID. TUPGONE %d.",
- relname, blkno, offnum, (int) tupgone);
-
- if (tupgone)
- {
- ItemId lpp;
-
- /*
- * Here we are building a temporary copy of the page with
- * dead tuples removed. Below we will apply
- * PageRepairFragmentation to the copy, so that we can
- * determine how much space will be available after
- * removal of dead tuples. But note we are NOT changing
- * the real page yet...
- */
- if (tempPage == (Page) NULL)
- {
- Size pageSize;
-
- pageSize = PageGetPageSize(page);
- tempPage = (Page) palloc(pageSize);
- memcpy(tempPage, page, pageSize);
- }
-
- /* mark it unused on the temp page */
- lpp = PageGetItemId(tempPage, offnum);
- lpp->lp_flags &= ~LP_USED;
-
- vacpage->offsets[vacpage->offsets_free++] = offnum;
- tups_vacuumed += 1;
- }
- else
- {
- num_tuples += 1;
- notup = false;
- if (tuple.t_len < min_tlen)
- min_tlen = tuple.t_len;
- if (tuple.t_len > max_tlen)
- max_tlen = tuple.t_len;
- }
- } /* scan along page */
-
- if (tempPage != (Page) NULL)
- {
- /* Some tuples are removable; figure free space after removal */
- PageRepairFragmentation(tempPage, NULL);
- vacpage->free = ((PageHeader) tempPage)->pd_upper - ((PageHeader) tempPage)->pd_lower;
- pfree(tempPage);
- do_reap = true;
- }
- else
- {
- /* Just use current available space */
- vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
- /* Need to reap the page if it has ~LP_USED line pointers */
- do_reap = (vacpage->offsets_free > 0);
- }
-
- free_size += vacpage->free;
-
- /*
- * Add the page to fraged_pages if it has a useful amount of free
- * space. "Useful" means enough for a minimal-sized tuple. But we
- * don't know that accurately near the start of the relation, so
- * add pages unconditionally if they have >= BLCKSZ/10 free space.
- */
- do_frag = (vacpage->free >= min_tlen || vacpage->free >= BLCKSZ / 10);
-
- if (do_reap || do_frag)
- {
- vacpagecopy = copy_vac_page(vacpage);
- if (do_reap)
- vpage_insert(vacuum_pages, vacpagecopy);
- if (do_frag)
- vpage_insert(fraged_pages, vacpagecopy);
- }
-
- if (notup)
- empty_end_pages++;
- else
- empty_end_pages = 0;
-
- if (pgchanged)
- {
- WriteBuffer(buf);
- changed_pages++;
- }
- else
- ReleaseBuffer(buf);
- }
-
- pfree(vacpage);
-
- /* save stats in the rel list for use later */
- vacrelstats->rel_tuples = num_tuples;
- vacrelstats->rel_pages = nblocks;
- if (num_tuples == 0)
- min_tlen = max_tlen = 0;
- vacrelstats->min_tlen = min_tlen;
- vacrelstats->max_tlen = max_tlen;
-
- vacuum_pages->empty_end_pages = empty_end_pages;
- fraged_pages->empty_end_pages = empty_end_pages;
-
- /*
- * Clear the fraged_pages list if we found we couldn't shrink. Else,
- * remove any "empty" end-pages from the list, and compute usable free
- * space = free space in remaining pages.
- */
- if (do_shrinking)
- {
- Assert((BlockNumber) fraged_pages->num_pages >= empty_end_pages);
- fraged_pages->num_pages -= empty_end_pages;
- usable_free_size = 0;
- for (i = 0; i < fraged_pages->num_pages; i++)
- usable_free_size += fraged_pages->pagedesc[i]->free;
- }
- else
- {
- fraged_pages->num_pages = 0;
- usable_free_size = 0;
- }
-
- if (usable_free_size > 0 && num_vtlinks > 0)
- {
- qsort((char *) vtlinks, num_vtlinks, sizeof(VTupleLinkData),
- vac_cmp_vtlinks);
- vacrelstats->vtlinks = vtlinks;
- vacrelstats->num_vtlinks = num_vtlinks;
- }
- else
- {
- vacrelstats->vtlinks = NULL;
- vacrelstats->num_vtlinks = 0;
- pfree(vtlinks);
- }
-
- elog(elevel, "Pages %u: Changed %u, reaped %u, Empty %u, New %u; \
-Tup %.0f: Vac %.0f, Keep/VTL %.0f/%u, UnUsed %.0f, MinLen %lu, MaxLen %lu; \
-Re-using: Free/Avail. Space %.0f/%.0f; EndEmpty/Avail. Pages %u/%u.\n\t%s",
- nblocks, changed_pages, vacuum_pages->num_pages, empty_pages,
- new_pages, num_tuples, tups_vacuumed,
- nkeep, vacrelstats->num_vtlinks,
- nunused, (unsigned long) min_tlen, (unsigned long) max_tlen,
- free_size, usable_free_size,
- empty_end_pages, fraged_pages->num_pages,
- vac_show_rusage(&ru0));
-
-}
-
-
-/*
- * repair_frag() -- try to repair relation's fragmentation
- *
- * This routine marks dead tuples as unused and tries re-use dead space
- * by moving tuples (and inserting indexes if needed). It constructs
- * Nvacpagelist list of free-ed pages (moved tuples) and clean indexes
- * for them after committing (in hack-manner - without losing locks
- * and freeing memory!) current transaction. It truncates relation
- * if some end-blocks are gone away.
- */
-static void
-repair_frag(VRelStats *vacrelstats, Relation onerel,
- VacPageList vacuum_pages, VacPageList fraged_pages,
- int nindexes, Relation *Irel)
-{
- TransactionId myXID;
- CommandId myCID;
- Buffer buf,
- cur_buffer;
- BlockNumber nblocks,
- blkno;
- BlockNumber last_move_dest_block = 0,
- last_vacuum_block;
- Page page,
- ToPage = NULL;
- OffsetNumber offnum,
- maxoff,
- newoff,
- max_offset;
- ItemId itemid,
- newitemid;
- HeapTupleData tuple,
- newtup;
- TupleDesc tupdesc;
- ResultRelInfo *resultRelInfo;
- EState *estate;
- TupleTable tupleTable;
- TupleTableSlot *slot;
- VacPageListData Nvacpagelist;
- VacPage cur_page = NULL,
- last_vacuum_page,
- vacpage,
- *curpage;
- int cur_item = 0;
- int i;
- Size tuple_len;
- int num_moved,
- num_fraged_pages,
- vacuumed_pages;
- int checked_moved,
- num_tuples,
- keep_tuples = 0;
- bool isempty,
- dowrite,
- chain_tuple_moved;
- VacRUsage ru0;
-
- vac_init_rusage(&ru0);
-
- myXID = GetCurrentTransactionId();
- myCID = GetCurrentCommandId();
-
- tupdesc = RelationGetDescr(onerel);
-
- /*
- * We need a ResultRelInfo and an EState so we can use the regular
- * executor's index-entry-making machinery.
- */
- resultRelInfo = makeNode(ResultRelInfo);
- resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
- resultRelInfo->ri_RelationDesc = onerel;
- resultRelInfo->ri_TrigDesc = NULL; /* we don't fire triggers */
-
- ExecOpenIndices(resultRelInfo);
-
- estate = CreateExecutorState();
- estate->es_result_relations = resultRelInfo;
- estate->es_num_result_relations = 1;
- estate->es_result_relation_info = resultRelInfo;
-
- /* Set up a dummy tuple table too */
- tupleTable = ExecCreateTupleTable(1);
- slot = ExecAllocTableSlot(tupleTable);
- ExecSetSlotDescriptor(slot, tupdesc, false);
-
- Nvacpagelist.num_pages = 0;
- num_fraged_pages = fraged_pages->num_pages;
- Assert((BlockNumber) vacuum_pages->num_pages >= vacuum_pages->empty_end_pages);
- vacuumed_pages = vacuum_pages->num_pages - vacuum_pages->empty_end_pages;
- if (vacuumed_pages > 0)
- {
- /* get last reaped page from vacuum_pages */
- last_vacuum_page = vacuum_pages->pagedesc[vacuumed_pages - 1];
- last_vacuum_block = last_vacuum_page->blkno;
- }
- else
- {
- last_vacuum_page = NULL;
- last_vacuum_block = InvalidBlockNumber;
- }
- cur_buffer = InvalidBuffer;
- num_moved = 0;
-
- vacpage = (VacPage) palloc(sizeof(VacPageData) + MaxOffsetNumber * sizeof(OffsetNumber));
- vacpage->offsets_used = vacpage->offsets_free = 0;
-
- /*
- * Scan pages backwards from the last nonempty page, trying to move
- * tuples down to lower pages. Quit when we reach a page that we have
- * moved any tuples onto, or the first page if we haven't moved
- * anything, or when we find a page we cannot completely empty (this
- * last condition is handled by "break" statements within the loop).
- *
- * NB: this code depends on the vacuum_pages and fraged_pages lists being
- * in order by blkno.
- */
- nblocks = vacrelstats->rel_pages;
- for (blkno = nblocks - vacuum_pages->empty_end_pages - 1;
- blkno > last_move_dest_block;
- blkno--)
- {
- CHECK_FOR_INTERRUPTS();
-
- /*
- * Forget fraged_pages pages at or after this one; they're no
- * longer useful as move targets, since we only want to move down.
- * Note that since we stop the outer loop at last_move_dest_block,
- * pages removed here cannot have had anything moved onto them
- * already.
- *
- * Also note that we don't change the stored fraged_pages list, only
- * our local variable num_fraged_pages; so the forgotten pages are
- * still available to be loaded into the free space map later.
- */
- while (num_fraged_pages > 0 &&
- fraged_pages->pagedesc[num_fraged_pages - 1]->blkno >= blkno)
- {
- Assert(fraged_pages->pagedesc[num_fraged_pages - 1]->offsets_used == 0);
- --num_fraged_pages;
- }
-
- /*
- * Process this page of relation.
- */
- buf = ReadBuffer(onerel, blkno);
- page = BufferGetPage(buf);
-
- vacpage->offsets_free = 0;
-
- isempty = PageIsEmpty(page);
-
- dowrite = false;
-
- /* Is the page in the vacuum_pages list? */
- if (blkno == last_vacuum_block)
- {
- if (last_vacuum_page->offsets_free > 0)
- {
- /* there are dead tuples on this page - clean them */
- Assert(!isempty);
- LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
- vacuum_page(onerel, buf, last_vacuum_page);
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- dowrite = true;
- }
- else
- Assert(isempty);
- --vacuumed_pages;
- if (vacuumed_pages > 0)
- {
- /* get prev reaped page from vacuum_pages */
- last_vacuum_page = vacuum_pages->pagedesc[vacuumed_pages - 1];
- last_vacuum_block = last_vacuum_page->blkno;
- }
- else
- {
- last_vacuum_page = NULL;
- last_vacuum_block = InvalidBlockNumber;
- }
- if (isempty)
- {
- ReleaseBuffer(buf);
- continue;
- }
- }
- else
- Assert(!isempty);
-
- chain_tuple_moved = false; /* no one chain-tuple was moved
- * off this page, yet */
- vacpage->blkno = blkno;
- maxoff = PageGetMaxOffsetNumber(page);
- for (offnum = FirstOffsetNumber;
- offnum <= maxoff;
- offnum = OffsetNumberNext(offnum))
- {
- itemid = PageGetItemId(page, offnum);
-
- if (!ItemIdIsUsed(itemid))
- continue;
-
- tuple.t_datamcxt = NULL;
- tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
- tuple_len = tuple.t_len = ItemIdGetLength(itemid);
- ItemPointerSet(&(tuple.t_self), blkno, offnum);
-
- if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
- {
- if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
- elog(ERROR, "Invalid XVAC in tuple header");
- if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
- elog(ERROR, "HEAP_MOVED_IN was not expected");
-
- /*
- * If this (chain) tuple is moved by me already then I
- * have to check is it in vacpage or not - i.e. is it
- * moved while cleaning this page or some previous one.
- */
- if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
- {
- if (keep_tuples == 0)
- continue;
- if (chain_tuple_moved) /* some chains was moved
- * while */
- { /* cleaning this page */
- Assert(vacpage->offsets_free > 0);
- for (i = 0; i < vacpage->offsets_free; i++)
- {
- if (vacpage->offsets[i] == offnum)
- break;
- }
- if (i >= vacpage->offsets_free) /* not found */
- {
- vacpage->offsets[vacpage->offsets_free++] = offnum;
- keep_tuples--;
- }
- }
- else
- {
- vacpage->offsets[vacpage->offsets_free++] = offnum;
- keep_tuples--;
- }
- continue;
- }
- elog(ERROR, "HEAP_MOVED_OFF was expected");
- }
-
- /*
- * If this tuple is in the chain of tuples created in updates
- * by "recent" transactions then we have to move all chain of
- * tuples to another places.
- */
- if ((tuple.t_data->t_infomask & HEAP_UPDATED &&
- !TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
- OldestXmin)) ||
- (!(tuple.t_data->t_infomask & HEAP_XMAX_INVALID) &&
- !(ItemPointerEquals(&(tuple.t_self),
- &(tuple.t_data->t_ctid)))))
- {
- Buffer Cbuf = buf;
- Page Cpage;
- ItemId Citemid;
- ItemPointerData Ctid;
- HeapTupleData tp = tuple;
- Size tlen = tuple_len;
- VTupleMove vtmove = (VTupleMove)
- palloc(100 * sizeof(VTupleMoveData));
- int num_vtmove = 0;
- int free_vtmove = 100;
- VacPage to_vacpage = NULL;
- int to_item = 0;
- bool freeCbuf = false;
- int ti;
-
- if (vacrelstats->vtlinks == NULL)
- elog(ERROR, "No one parent tuple was found");
- if (cur_buffer != InvalidBuffer)
- {
- WriteBuffer(cur_buffer);
- cur_buffer = InvalidBuffer;
- }
-
- /*
- * If this tuple is in the begin/middle of the chain then
- * we have to move to the end of chain.
- */
- while (!(tp.t_data->t_infomask & HEAP_XMAX_INVALID) &&
- !(ItemPointerEquals(&(tp.t_self),
- &(tp.t_data->t_ctid))))
- {
- Ctid = tp.t_data->t_ctid;
- if (freeCbuf)
- ReleaseBuffer(Cbuf);
- freeCbuf = true;
- Cbuf = ReadBuffer(onerel,
- ItemPointerGetBlockNumber(&Ctid));
- Cpage = BufferGetPage(Cbuf);
- Citemid = PageGetItemId(Cpage,
- ItemPointerGetOffsetNumber(&Ctid));
- if (!ItemIdIsUsed(Citemid))
- {
- /*
- * This means that in the middle of chain there
- * was tuple updated by older (than OldestXmin)
- * xaction and this tuple is already deleted by
- * me. Actually, upper part of chain should be
- * removed and seems that this should be handled
- * in scan_heap(), but it's not implemented at the
- * moment and so we just stop shrinking here.
- */
- ReleaseBuffer(Cbuf);
- pfree(vtmove);
- vtmove = NULL;
- elog(WARNING, "Child itemid in update-chain marked as unused - can't continue repair_frag");
- break;
- }
- tp.t_datamcxt = NULL;
- tp.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid);
- tp.t_self = Ctid;
- tlen = tp.t_len = ItemIdGetLength(Citemid);
- }
- if (vtmove == NULL)
- break;
- /* first, can chain be moved ? */
- for (;;)
- {
- if (to_vacpage == NULL ||
- !enough_space(to_vacpage, tlen))
- {
- for (i = 0; i < num_fraged_pages; i++)
- {
- if (enough_space(fraged_pages->pagedesc[i], tlen))
- break;
- }
-
- if (i == num_fraged_pages)
- {
- /* can't move item anywhere */
- for (i = 0; i < num_vtmove; i++)
- {
- Assert(vtmove[i].vacpage->offsets_used > 0);
- (vtmove[i].vacpage->offsets_used)--;
- }
- num_vtmove = 0;
- break;
- }
- to_item = i;
- to_vacpage = fraged_pages->pagedesc[to_item];
- }
- to_vacpage->free -= MAXALIGN(tlen);
- if (to_vacpage->offsets_used >= to_vacpage->offsets_free)
- to_vacpage->free -= MAXALIGN(sizeof(ItemIdData));
- (to_vacpage->offsets_used)++;
- if (free_vtmove == 0)
- {
- free_vtmove = 1000;
- vtmove = (VTupleMove) repalloc(vtmove,
- (free_vtmove + num_vtmove) *
- sizeof(VTupleMoveData));
- }
- vtmove[num_vtmove].tid = tp.t_self;
- vtmove[num_vtmove].vacpage = to_vacpage;
- if (to_vacpage->offsets_used == 1)
- vtmove[num_vtmove].cleanVpd = true;
- else
- vtmove[num_vtmove].cleanVpd = false;
- free_vtmove--;
- num_vtmove++;
-
- /* All done ? */
- if (!(tp.t_data->t_infomask & HEAP_UPDATED) ||
- TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data),
- OldestXmin))
- break;
-
- /* Well, try to find tuple with old row version */
- for (;;)
- {
- Buffer Pbuf;
- Page Ppage;
- ItemId Pitemid;
- HeapTupleData Ptp;
- VTupleLinkData vtld,
- *vtlp;
-
- vtld.new_tid = tp.t_self;
- vtlp = (VTupleLink)
- vac_bsearch((void *) &vtld,
- (void *) (vacrelstats->vtlinks),
- vacrelstats->num_vtlinks,
- sizeof(VTupleLinkData),
- vac_cmp_vtlinks);
- if (vtlp == NULL)
- elog(ERROR, "Parent tuple was not found");
- tp.t_self = vtlp->this_tid;
- Pbuf = ReadBuffer(onerel,
- ItemPointerGetBlockNumber(&(tp.t_self)));
- Ppage = BufferGetPage(Pbuf);
- Pitemid = PageGetItemId(Ppage,
- ItemPointerGetOffsetNumber(&(tp.t_self)));
- if (!ItemIdIsUsed(Pitemid))
- elog(ERROR, "Parent itemid marked as unused");
- Ptp.t_datamcxt = NULL;
- Ptp.t_data = (HeapTupleHeader) PageGetItem(Ppage, Pitemid);
- Assert(ItemPointerEquals(&(vtld.new_tid),
- &(Ptp.t_data->t_ctid)));
-
- /*
- * Read above about cases when
- * !ItemIdIsUsed(Citemid) (child item is
- * removed)... Due to the fact that at the moment
- * we don't remove unuseful part of update-chain,
- * it's possible to get too old parent row here.
- * Like as in the case which caused this problem,
- * we stop shrinking here. I could try to find
- * real parent row but want not to do it because
- * of real solution will be implemented anyway,
- * latter, and we are too close to 6.5 release. -
- * vadim 06/11/99
- */
- if (!(TransactionIdEquals(HeapTupleHeaderGetXmax(Ptp.t_data),
- HeapTupleHeaderGetXmin(tp.t_data))))
- {
- if (freeCbuf)
- ReleaseBuffer(Cbuf);
- freeCbuf = false;
- ReleaseBuffer(Pbuf);
- for (i = 0; i < num_vtmove; i++)
- {
- Assert(vtmove[i].vacpage->offsets_used > 0);
- (vtmove[i].vacpage->offsets_used)--;
- }
- num_vtmove = 0;
- elog(WARNING, "Too old parent tuple found - can't continue repair_frag");
- break;
- }
-#ifdef NOT_USED /* I'm not sure that this will wotk
- * properly... */
-
- /*
- * If this tuple is updated version of row and it
- * was created by the same transaction then no one
- * is interested in this tuple - mark it as
- * removed.
- */
- if (Ptp.t_data->t_infomask & HEAP_UPDATED &&
- TransactionIdEquals(HeapTupleHeaderGetXmin(Ptp.t_data),
- HeapTupleHeaderGetXmax(Ptp.t_data)))
- {
- Ptp.t_data->t_infomask &=
- ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
- Ptp.t_data->t_infomask |= HEAP_MOVED_OFF;
- HeapTupleHeaderSetXvac(Ptp.t_data, myXID);
- WriteBuffer(Pbuf);
- continue;
- }
-#endif
- tp.t_datamcxt = Ptp.t_datamcxt;
- tp.t_data = Ptp.t_data;
- tlen = tp.t_len = ItemIdGetLength(Pitemid);
- if (freeCbuf)
- ReleaseBuffer(Cbuf);
- Cbuf = Pbuf;
- freeCbuf = true;
- break;
- }
- if (num_vtmove == 0)
- break;
- }
- if (freeCbuf)
- ReleaseBuffer(Cbuf);
- if (num_vtmove == 0) /* chain can't be moved */
- {
- pfree(vtmove);
- break;
- }
- ItemPointerSetInvalid(&Ctid);
- for (ti = 0; ti < num_vtmove; ti++)
- {
- VacPage destvacpage = vtmove[ti].vacpage;
-
- /* Get page to move from */
- tuple.t_self = vtmove[ti].tid;
- Cbuf = ReadBuffer(onerel,
- ItemPointerGetBlockNumber(&(tuple.t_self)));
-
- /* Get page to move to */
- cur_buffer = ReadBuffer(onerel, destvacpage->blkno);
-
- LockBuffer(cur_buffer, BUFFER_LOCK_EXCLUSIVE);
- if (cur_buffer != Cbuf)
- LockBuffer(Cbuf, BUFFER_LOCK_EXCLUSIVE);
-
- ToPage = BufferGetPage(cur_buffer);
- Cpage = BufferGetPage(Cbuf);
-
- Citemid = PageGetItemId(Cpage,
- ItemPointerGetOffsetNumber(&(tuple.t_self)));
- tuple.t_datamcxt = NULL;
- tuple.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid);
- tuple_len = tuple.t_len = ItemIdGetLength(Citemid);
-
- /*
- * make a copy of the source tuple, and then mark the
- * source tuple MOVED_OFF.
- */
- heap_copytuple_with_tuple(&tuple, &newtup);
-
- /*
- * register invalidation of source tuple in catcaches.
- */
- CacheInvalidateHeapTuple(onerel, &tuple);
-
- /* NO ELOG(ERROR) TILL CHANGES ARE LOGGED */
- START_CRIT_SECTION();
-
- tuple.t_data->t_infomask &=
- ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
- tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
- HeapTupleHeaderSetXvac(tuple.t_data, myXID);
-
- /*
- * If this page was not used before - clean it.
- *
- * NOTE: a nasty bug used to lurk here. It is possible
- * for the source and destination pages to be the same
- * (since this tuple-chain member can be on a page
- * lower than the one we're currently processing in
- * the outer loop). If that's true, then after
- * vacuum_page() the source tuple will have been
- * moved, and tuple.t_data will be pointing at
- * garbage. Therefore we must do everything that uses
- * tuple.t_data BEFORE this step!!
- *
- * This path is different from the other callers of
- * vacuum_page, because we have already incremented
- * the vacpage's offsets_used field to account for the
- * tuple(s) we expect to move onto the page. Therefore
- * vacuum_page's check for offsets_used == 0 is wrong.
- * But since that's a good debugging check for all
- * other callers, we work around it here rather than
- * remove it.
- */
- if (!PageIsEmpty(ToPage) && vtmove[ti].cleanVpd)
- {
- int sv_offsets_used = destvacpage->offsets_used;
-
- destvacpage->offsets_used = 0;
- vacuum_page(onerel, cur_buffer, destvacpage);
- destvacpage->offsets_used = sv_offsets_used;
- }
-
- /*
- * Update the state of the copied tuple, and store it
- * on the destination page.
- */
- newtup.t_data->t_infomask &=
- ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
- newtup.t_data->t_infomask |= HEAP_MOVED_IN;
- HeapTupleHeaderSetXvac(newtup.t_data, myXID);
- newoff = PageAddItem(ToPage, (Item) newtup.t_data, tuple_len,
- InvalidOffsetNumber, LP_USED);
- if (newoff == InvalidOffsetNumber)
- {
- elog(PANIC, "moving chain: failed to add item with len = %lu to page %u",
- (unsigned long) tuple_len, destvacpage->blkno);
- }
- newitemid = PageGetItemId(ToPage, newoff);
- pfree(newtup.t_data);
- newtup.t_datamcxt = NULL;
- newtup.t_data = (HeapTupleHeader) PageGetItem(ToPage, newitemid);
- ItemPointerSet(&(newtup.t_self), destvacpage->blkno, newoff);
-
- {
- XLogRecPtr recptr =
- log_heap_move(onerel, Cbuf, tuple.t_self,
- cur_buffer, &newtup);
-
- if (Cbuf != cur_buffer)
- {
- PageSetLSN(Cpage, recptr);
- PageSetSUI(Cpage, ThisStartUpID);
- }
- PageSetLSN(ToPage, recptr);
- PageSetSUI(ToPage, ThisStartUpID);
- }
- END_CRIT_SECTION();
-
- if (destvacpage->blkno > last_move_dest_block)
- last_move_dest_block = destvacpage->blkno;
-
- /*
- * Set new tuple's t_ctid pointing to itself for last
- * tuple in chain, and to next tuple in chain
- * otherwise.
- */
- if (!ItemPointerIsValid(&Ctid))
- newtup.t_data->t_ctid = newtup.t_self;
- else
- newtup.t_data->t_ctid = Ctid;
- Ctid = newtup.t_self;
-
- num_moved++;
-
- /*
- * Remember that we moved tuple from the current page
- * (corresponding index tuple will be cleaned).
- */
- if (Cbuf == buf)
- vacpage->offsets[vacpage->offsets_free++] =
- ItemPointerGetOffsetNumber(&(tuple.t_self));
- else
- keep_tuples++;
-
- LockBuffer(cur_buffer, BUFFER_LOCK_UNLOCK);
- if (cur_buffer != Cbuf)
- LockBuffer(Cbuf, BUFFER_LOCK_UNLOCK);
-
- /* Create index entries for the moved tuple */
- if (resultRelInfo->ri_NumIndices > 0)
- {
- ExecStoreTuple(&newtup, slot, InvalidBuffer, false);
- ExecInsertIndexTuples(slot, &(newtup.t_self),
- estate, true);
- }
-
- WriteBuffer(cur_buffer);
- WriteBuffer(Cbuf);
- }
- cur_buffer = InvalidBuffer;
- pfree(vtmove);
- chain_tuple_moved = true;
- continue;
- }
-
- /* try to find new page for this tuple */
- if (cur_buffer == InvalidBuffer ||
- !enough_space(cur_page, tuple_len))
- {
- if (cur_buffer != InvalidBuffer)
- {
- WriteBuffer(cur_buffer);
- cur_buffer = InvalidBuffer;
- }
- for (i = 0; i < num_fraged_pages; i++)
- {
- if (enough_space(fraged_pages->pagedesc[i], tuple_len))
- break;
- }
- if (i == num_fraged_pages)
- break; /* can't move item anywhere */
- cur_item = i;
- cur_page = fraged_pages->pagedesc[cur_item];
- cur_buffer = ReadBuffer(onerel, cur_page->blkno);
- LockBuffer(cur_buffer, BUFFER_LOCK_EXCLUSIVE);
- ToPage = BufferGetPage(cur_buffer);
- /* if this page was not used before - clean it */
- if (!PageIsEmpty(ToPage) && cur_page->offsets_used == 0)
- vacuum_page(onerel, cur_buffer, cur_page);
- }
- else
- LockBuffer(cur_buffer, BUFFER_LOCK_EXCLUSIVE);
-
- LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
-
- /* copy tuple */
- heap_copytuple_with_tuple(&tuple, &newtup);
-
- /*
- * register invalidation of source tuple in catcaches.
- *
- * (Note: we do not need to register the copied tuple,
- * because we are not changing the tuple contents and
- * so there cannot be any need to flush negative
- * catcache entries.)
- */
- CacheInvalidateHeapTuple(onerel, &tuple);
-
- /* NO ELOG(ERROR) TILL CHANGES ARE LOGGED */
- START_CRIT_SECTION();
-
- /*
- * Mark new tuple as moved_in by vacuum and store vacuum XID
- * in t_cmin !!!
- */
- newtup.t_data->t_infomask &=
- ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
- newtup.t_data->t_infomask |= HEAP_MOVED_IN;
- HeapTupleHeaderSetXvac(newtup.t_data, myXID);
-
- /* add tuple to the page */
- newoff = PageAddItem(ToPage, (Item) newtup.t_data, tuple_len,
- InvalidOffsetNumber, LP_USED);
- if (newoff == InvalidOffsetNumber)
- {
- elog(PANIC, "failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)",
- (unsigned long) tuple_len,
- cur_page->blkno, (unsigned long) cur_page->free,
- cur_page->offsets_used, cur_page->offsets_free);
- }
- newitemid = PageGetItemId(ToPage, newoff);
- pfree(newtup.t_data);
- newtup.t_datamcxt = NULL;
- newtup.t_data = (HeapTupleHeader) PageGetItem(ToPage, newitemid);
- ItemPointerSet(&(newtup.t_data->t_ctid), cur_page->blkno, newoff);
- newtup.t_self = newtup.t_data->t_ctid;
-
- /*
- * Mark old tuple as moved_off by vacuum and store vacuum XID
- * in t_cmin !!!
- */
- tuple.t_data->t_infomask &=
- ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
- tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
- HeapTupleHeaderSetXvac(tuple.t_data, myXID);
-
- {
- XLogRecPtr recptr =
- log_heap_move(onerel, buf, tuple.t_self,
- cur_buffer, &newtup);
-
- PageSetLSN(page, recptr);
- PageSetSUI(page, ThisStartUpID);
- PageSetLSN(ToPage, recptr);
- PageSetSUI(ToPage, ThisStartUpID);
- }
- END_CRIT_SECTION();
-
- cur_page->offsets_used++;
- num_moved++;
- cur_page->free = ((PageHeader) ToPage)->pd_upper - ((PageHeader) ToPage)->pd_lower;
- if (cur_page->blkno > last_move_dest_block)
- last_move_dest_block = cur_page->blkno;
-
- vacpage->offsets[vacpage->offsets_free++] = offnum;
-
- LockBuffer(cur_buffer, BUFFER_LOCK_UNLOCK);
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-
- /* insert index' tuples if needed */
- if (resultRelInfo->ri_NumIndices > 0)
- {
- ExecStoreTuple(&newtup, slot, InvalidBuffer, false);
- ExecInsertIndexTuples(slot, &(newtup.t_self), estate, true);
- }
- } /* walk along page */
-
- if (offnum < maxoff && keep_tuples > 0)
- {
- OffsetNumber off;
-
- for (off = OffsetNumberNext(offnum);
- off <= maxoff;
- off = OffsetNumberNext(off))
- {
- itemid = PageGetItemId(page, off);
- if (!ItemIdIsUsed(itemid))
- continue;
- tuple.t_datamcxt = NULL;
- tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
- if (tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)
- continue;
- if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
- elog(ERROR, "Invalid XVAC in tuple header (4)");
- if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
- elog(ERROR, "HEAP_MOVED_IN was not expected (2)");
- if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
- {
- /* some chains was moved while */
- if (chain_tuple_moved)
- { /* cleaning this page */
- Assert(vacpage->offsets_free > 0);
- for (i = 0; i < vacpage->offsets_free; i++)
- {
- if (vacpage->offsets[i] == off)
- break;
- }
- if (i >= vacpage->offsets_free) /* not found */
- {
- vacpage->offsets[vacpage->offsets_free++] = off;
- Assert(keep_tuples > 0);
- keep_tuples--;
- }
- }
- else
- {
- vacpage->offsets[vacpage->offsets_free++] = off;
- Assert(keep_tuples > 0);
- keep_tuples--;
- }
- }
- }
- }
-
- if (vacpage->offsets_free > 0) /* some tuples were moved */
- {
- if (chain_tuple_moved) /* else - they are ordered */
- {
- qsort((char *) (vacpage->offsets), vacpage->offsets_free,
- sizeof(OffsetNumber), vac_cmp_offno);
- }
- vpage_insert(&Nvacpagelist, copy_vac_page(vacpage));
- WriteBuffer(buf);
- }
- else if (dowrite)
- WriteBuffer(buf);
- else
- ReleaseBuffer(buf);
-
- if (offnum <= maxoff)
- break; /* some item(s) left */
-
- } /* walk along relation */
-
- blkno++; /* new number of blocks */
-
- if (cur_buffer != InvalidBuffer)
- {
- Assert(num_moved > 0);
- WriteBuffer(cur_buffer);
- }
-
- if (num_moved > 0)
- {
- /*
- * We have to commit our tuple movings before we truncate the
- * relation. Ideally we should do Commit/StartTransactionCommand
- * here, relying on the session-level table lock to protect our
- * exclusive access to the relation. However, that would require
- * a lot of extra code to close and re-open the relation, indexes,
- * etc. For now, a quick hack: record status of current
- * transaction as committed, and continue.
- */
- RecordTransactionCommit();
- }
-
- /*
- * We are not going to move any more tuples across pages, but we still
- * need to apply vacuum_page to compact free space in the remaining
- * pages in vacuum_pages list. Note that some of these pages may also
- * be in the fraged_pages list, and may have had tuples moved onto
- * them; if so, we already did vacuum_page and needn't do it again.
- */
- for (i = 0, curpage = vacuum_pages->pagedesc;
- i < vacuumed_pages;
- i++, curpage++)
- {
- CHECK_FOR_INTERRUPTS();
- Assert((*curpage)->blkno < blkno);
- if ((*curpage)->offsets_used == 0)
- {
- /* this page was not used as a move target, so must clean it */
- buf = ReadBuffer(onerel, (*curpage)->blkno);
- LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
- page = BufferGetPage(buf);
- if (!PageIsEmpty(page))
- vacuum_page(onerel, buf, *curpage);
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
- }
- }
-
- /*
- * Now scan all the pages that we moved tuples onto and update tuple
- * status bits. This is not really necessary, but will save time for
- * future transactions examining these tuples.
- *
- * XXX WARNING that this code fails to clear HEAP_MOVED_OFF tuples from
- * pages that were move source pages but not move dest pages. One
- * also wonders whether it wouldn't be better to skip this step and
- * let the tuple status updates happen someplace that's not holding an
- * exclusive lock on the relation.
- */
- checked_moved = 0;
- for (i = 0, curpage = fraged_pages->pagedesc;
- i < num_fraged_pages;
- i++, curpage++)
- {
- CHECK_FOR_INTERRUPTS();
- Assert((*curpage)->blkno < blkno);
- if ((*curpage)->blkno > last_move_dest_block)
- break; /* no need to scan any further */
- if ((*curpage)->offsets_used == 0)
- continue; /* this page was never used as a move dest */
- buf = ReadBuffer(onerel, (*curpage)->blkno);
- LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
- page = BufferGetPage(buf);
- num_tuples = 0;
- max_offset = PageGetMaxOffsetNumber(page);
- for (newoff = FirstOffsetNumber;
- newoff <= max_offset;
- newoff = OffsetNumberNext(newoff))
- {
- itemid = PageGetItemId(page, newoff);
- if (!ItemIdIsUsed(itemid))
- continue;
- tuple.t_datamcxt = NULL;
- tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
- if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
- {
- if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
- elog(ERROR, "Invalid XVAC in tuple header (2)");
- if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
- {
- tuple.t_data->t_infomask |= HEAP_XMIN_COMMITTED;
- num_tuples++;
- }
- else if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
- tuple.t_data->t_infomask |= HEAP_XMIN_INVALID;
- else
- elog(ERROR, "HEAP_MOVED_OFF/HEAP_MOVED_IN was expected");
- }
- }
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
- Assert((*curpage)->offsets_used == num_tuples);
- checked_moved += num_tuples;
- }
- Assert(num_moved == checked_moved);
-
- elog(elevel, "Rel %s: Pages: %u --> %u; Tuple(s) moved: %u.\n\t%s",
- RelationGetRelationName(onerel),
- nblocks, blkno, num_moved,
- vac_show_rusage(&ru0));
-
- /*
- * Reflect the motion of system tuples to catalog cache here.
- */
- CommandCounterIncrement();
-
- if (Nvacpagelist.num_pages > 0)
- {
- /* vacuum indexes again if needed */
- if (Irel != (Relation *) NULL)
- {
- VacPage *vpleft,
- *vpright,
- vpsave;
-
- /* re-sort Nvacpagelist.pagedesc */
- for (vpleft = Nvacpagelist.pagedesc,
- vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1;
- vpleft < vpright; vpleft++, vpright--)
- {
- vpsave = *vpleft;
- *vpleft = *vpright;
- *vpright = vpsave;
- }
- Assert(keep_tuples >= 0);
- for (i = 0; i < nindexes; i++)
- vacuum_index(&Nvacpagelist, Irel[i],
- vacrelstats->rel_tuples, keep_tuples);
- }
-
- /* clean moved tuples from last page in Nvacpagelist list */
- if (vacpage->blkno == (blkno - 1) &&
- vacpage->offsets_free > 0)
- {
- OffsetNumber unbuf[BLCKSZ / sizeof(OffsetNumber)];
- OffsetNumber *unused = unbuf;
- int uncnt;
-
- buf = ReadBuffer(onerel, vacpage->blkno);
- LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
- page = BufferGetPage(buf);
- num_tuples = 0;
- maxoff = PageGetMaxOffsetNumber(page);
- for (offnum = FirstOffsetNumber;
- offnum <= maxoff;
- offnum = OffsetNumberNext(offnum))
- {
- itemid = PageGetItemId(page, offnum);
- if (!ItemIdIsUsed(itemid))
- continue;
- tuple.t_datamcxt = NULL;
- tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
-
- if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
- {
- if (HeapTupleHeaderGetXvac(tuple.t_data) != myXID)
- elog(ERROR, "Invalid XVAC in tuple header (3)");
- if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
- {
- itemid->lp_flags &= ~LP_USED;
- num_tuples++;
- }
- else
- elog(ERROR, "HEAP_MOVED_OFF was expected (2)");
- }
-
- }
- Assert(vacpage->offsets_free == num_tuples);
- START_CRIT_SECTION();
- uncnt = PageRepairFragmentation(page, unused);
- {
- XLogRecPtr recptr;
-
- recptr = log_heap_clean(onerel, buf, (char *) unused,
- (char *) (&(unused[uncnt])) - (char *) unused);
- PageSetLSN(page, recptr);
- PageSetSUI(page, ThisStartUpID);
- }
- END_CRIT_SECTION();
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
- }
-
- /* now - free new list of reaped pages */
- curpage = Nvacpagelist.pagedesc;
- for (i = 0; i < Nvacpagelist.num_pages; i++, curpage++)
- pfree(*curpage);
- pfree(Nvacpagelist.pagedesc);
- }
-
- /*
- * Flush dirty pages out to disk. We do this unconditionally, even if
- * we don't need to truncate, because we want to ensure that all
- * tuples have correct on-row commit status on disk (see bufmgr.c's
- * comments for FlushRelationBuffers()).
- */
- i = FlushRelationBuffers(onerel, blkno);
- if (i < 0)
- elog(ERROR, "VACUUM (repair_frag): FlushRelationBuffers returned %d",
- i);
-
- /* truncate relation, if needed */
- if (blkno < nblocks)
- {
- blkno = smgrtruncate(DEFAULT_SMGR, onerel, blkno);
- onerel->rd_nblocks = blkno; /* update relcache immediately */
- onerel->rd_targblock = InvalidBlockNumber;
- vacrelstats->rel_pages = blkno; /* set new number of blocks */
- }
-
- /* clean up */
- pfree(vacpage);
- if (vacrelstats->vtlinks != NULL)
- pfree(vacrelstats->vtlinks);
-
- ExecDropTupleTable(tupleTable, true);
-
- ExecCloseIndices(resultRelInfo);
-}
-
-/*
- * vacuum_heap() -- free dead tuples
- *
- * This routine marks dead tuples as unused and truncates relation
- * if there are "empty" end-blocks.
- */
-static void
-vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
-{
- Buffer buf;
- VacPage *vacpage;
- BlockNumber relblocks;
- int nblocks;
- int i;
-
- nblocks = vacuum_pages->num_pages;
- nblocks -= vacuum_pages->empty_end_pages; /* nothing to do with them */
-
- for (i = 0, vacpage = vacuum_pages->pagedesc; i < nblocks; i++, vacpage++)
- {
- CHECK_FOR_INTERRUPTS();
- if ((*vacpage)->offsets_free > 0)
- {
- buf = ReadBuffer(onerel, (*vacpage)->blkno);
- LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
- vacuum_page(onerel, buf, *vacpage);
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
- }
- }
-
- /*
- * Flush dirty pages out to disk. We do this unconditionally, even if
- * we don't need to truncate, because we want to ensure that all
- * tuples have correct on-row commit status on disk (see bufmgr.c's
- * comments for FlushRelationBuffers()).
- */
- Assert(vacrelstats->rel_pages >= vacuum_pages->empty_end_pages);
- relblocks = vacrelstats->rel_pages - vacuum_pages->empty_end_pages;
-
- i = FlushRelationBuffers(onerel, relblocks);
- if (i < 0)
- elog(ERROR, "VACUUM (vacuum_heap): FlushRelationBuffers returned %d",
- i);
-
- /* truncate relation if there are some empty end-pages */
- if (vacuum_pages->empty_end_pages > 0)
- {
- elog(elevel, "Rel %s: Pages: %u --> %u.",
- RelationGetRelationName(onerel),
- vacrelstats->rel_pages, relblocks);
- relblocks = smgrtruncate(DEFAULT_SMGR, onerel, relblocks);
- onerel->rd_nblocks = relblocks; /* update relcache immediately */
- onerel->rd_targblock = InvalidBlockNumber;
- vacrelstats->rel_pages = relblocks; /* set new number of
- * blocks */
- }
-}
-
-/*
- * vacuum_page() -- free dead tuples on a page
- * and repair its fragmentation.
- */
-static void
-vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
-{
- OffsetNumber unbuf[BLCKSZ / sizeof(OffsetNumber)];
- OffsetNumber *unused = unbuf;
- int uncnt;
- Page page = BufferGetPage(buffer);
- ItemId itemid;
- int i;
-
- /* There shouldn't be any tuples moved onto the page yet! */
- Assert(vacpage->offsets_used == 0);
-
- START_CRIT_SECTION();
- for (i = 0; i < vacpage->offsets_free; i++)
- {
- itemid = PageGetItemId(page, vacpage->offsets[i]);
- itemid->lp_flags &= ~LP_USED;
- }
- uncnt = PageRepairFragmentation(page, unused);
- {
- XLogRecPtr recptr;
-
- recptr = log_heap_clean(onerel, buffer, (char *) unused,
- (char *) (&(unused[uncnt])) - (char *) unused);
- PageSetLSN(page, recptr);
- PageSetSUI(page, ThisStartUpID);
- }
- END_CRIT_SECTION();
-}
-
-/*
- * scan_index() -- scan one index relation to update statistic.
- *
- * We use this when we have no deletions to do.
- */
-static void
-scan_index(Relation indrel, double num_tuples)
-{
- IndexBulkDeleteResult *stats;
- VacRUsage ru0;
-
- vac_init_rusage(&ru0);
-
- /*
- * Even though we're not planning to delete anything, use the
- * ambulkdelete call, so that the scan happens within the index AM for
- * more speed.
- */
- stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL);
-
- if (!stats)
- return;
-
- /* now update statistics in pg_class */
- vac_update_relstats(RelationGetRelid(indrel),
- stats->num_pages, stats->num_index_tuples,
- false);
-
- elog(elevel, "Index %s: Pages %u; Tuples %.0f.\n\t%s",
- RelationGetRelationName(indrel),
- stats->num_pages, stats->num_index_tuples,
- vac_show_rusage(&ru0));
-
- /*
- * Check for tuple count mismatch. If the index is partial, then it's
- * OK for it to have fewer tuples than the heap; else we got trouble.
- */
- if (stats->num_index_tuples != num_tuples)
- {
- if (stats->num_index_tuples > num_tuples ||
- !vac_is_partial_index(indrel))
- elog(WARNING, "Index %s: NUMBER OF INDEX' TUPLES (%.0f) IS NOT THE SAME AS HEAP' (%.0f).\
-\n\tRecreate the index.",
- RelationGetRelationName(indrel),
- stats->num_index_tuples, num_tuples);
- }
-
- pfree(stats);
-}
-
-/*
- * vacuum_index() -- vacuum one index relation.
- *
- * Vpl is the VacPageList of the heap we're currently vacuuming.
- * It's locked. Indrel is an index relation on the vacuumed heap.
- *
- * We don't bother to set locks on the index relation here, since
- * the parent table is exclusive-locked already.
- *
- * Finally, we arrange to update the index relation's statistics in
- * pg_class.
- */
-static void
-vacuum_index(VacPageList vacpagelist, Relation indrel,
- double num_tuples, int keep_tuples)
-{
- IndexBulkDeleteResult *stats;
- VacRUsage ru0;
-
- vac_init_rusage(&ru0);
-
- /* Do bulk deletion */
- stats = index_bulk_delete(indrel, tid_reaped, (void *) vacpagelist);
-
- if (!stats)
- return;
-
- /* now update statistics in pg_class */
- vac_update_relstats(RelationGetRelid(indrel),
- stats->num_pages, stats->num_index_tuples,
- false);
-
- elog(elevel, "Index %s: Pages %u; Tuples %.0f: Deleted %.0f.\n\t%s",
- RelationGetRelationName(indrel), stats->num_pages,
- stats->num_index_tuples - keep_tuples, stats->tuples_removed,
- vac_show_rusage(&ru0));
-
- /*
- * Check for tuple count mismatch. If the index is partial, then it's
- * OK for it to have fewer tuples than the heap; else we got trouble.
- */
- if (stats->num_index_tuples != num_tuples + keep_tuples)
- {
- if (stats->num_index_tuples > num_tuples + keep_tuples ||
- !vac_is_partial_index(indrel))
- elog(WARNING, "Index %s: NUMBER OF INDEX' TUPLES (%.0f) IS NOT THE SAME AS HEAP' (%.0f).\
-\n\tRecreate the index.",
- RelationGetRelationName(indrel),
- stats->num_index_tuples, num_tuples);
- }
-
- pfree(stats);
-}
-
-/*
- * tid_reaped() -- is a particular tid reaped?
- *
- * This has the right signature to be an IndexBulkDeleteCallback.
- *
- * vacpagelist->VacPage_array is sorted in right order.
- */
-static bool
-tid_reaped(ItemPointer itemptr, void *state)
-{
- VacPageList vacpagelist = (VacPageList) state;
- OffsetNumber ioffno;
- OffsetNumber *voff;
- VacPage vp,
- *vpp;
- VacPageData vacpage;
-
- vacpage.blkno = ItemPointerGetBlockNumber(itemptr);
- ioffno = ItemPointerGetOffsetNumber(itemptr);
-
- vp = &vacpage;
- vpp = (VacPage *) vac_bsearch((void *) &vp,
- (void *) (vacpagelist->pagedesc),
- vacpagelist->num_pages,
- sizeof(VacPage),
- vac_cmp_blk);
-
- if (vpp == NULL)
- return false;
-
- /* ok - we are on a partially or fully reaped page */
- vp = *vpp;
-
- if (vp->offsets_free == 0)
- {
- /* this is EmptyPage, so claim all tuples on it are reaped!!! */
- return true;
- }
-
- voff = (OffsetNumber *) vac_bsearch((void *) &ioffno,
- (void *) (vp->offsets),
- vp->offsets_free,
- sizeof(OffsetNumber),
- vac_cmp_offno);
-
- if (voff == NULL)
- return false;
-
- /* tid is reaped */
- return true;
-}
-
-/*
- * Dummy version for scan_index.
- */
-static bool
-dummy_tid_reaped(ItemPointer itemptr, void *state)
-{
- return false;
-}
-
-/*
- * Update the shared Free Space Map with the info we now have about
- * free space in the relation, discarding any old info the map may have.
- */
-static void
-vac_update_fsm(Relation onerel, VacPageList fraged_pages,
- BlockNumber rel_pages)
-{
- int nPages = fraged_pages->num_pages;
- int i;
- BlockNumber *pages;
- Size *spaceAvail;
-
- /* +1 to avoid palloc(0) */
- pages = (BlockNumber *) palloc((nPages + 1) * sizeof(BlockNumber));
- spaceAvail = (Size *) palloc((nPages + 1) * sizeof(Size));
-
- for (i = 0; i < nPages; i++)
- {
- pages[i] = fraged_pages->pagedesc[i]->blkno;
- spaceAvail[i] = fraged_pages->pagedesc[i]->free;
-
- /*
- * fraged_pages may contain entries for pages that we later
- * decided to truncate from the relation; don't enter them into
- * the map!
- */
- if (pages[i] >= rel_pages)
- {
- nPages = i;
- break;
- }
- }
-
- MultiRecordFreeSpace(&onerel->rd_node,
- 0, MaxBlockNumber,
- nPages, pages, spaceAvail);
- pfree(pages);
- pfree(spaceAvail);
-}
-
-/* Copy a VacPage structure */
-static VacPage
-copy_vac_page(VacPage vacpage)
-{
- VacPage newvacpage;
-
- /* allocate a VacPageData entry */
- newvacpage = (VacPage) palloc(sizeof(VacPageData) +
- vacpage->offsets_free * sizeof(OffsetNumber));
-
- /* fill it in */
- if (vacpage->offsets_free > 0)
- memcpy(newvacpage->offsets, vacpage->offsets,
- vacpage->offsets_free * sizeof(OffsetNumber));
- newvacpage->blkno = vacpage->blkno;
- newvacpage->free = vacpage->free;
- newvacpage->offsets_used = vacpage->offsets_used;
- newvacpage->offsets_free = vacpage->offsets_free;
-
- return newvacpage;
-}
-
-/*
- * Add a VacPage pointer to a VacPageList.
- *
- * As a side effect of the way that scan_heap works,
- * higher pages come after lower pages in the array
- * (and highest tid on a page is last).
- */
-static void
-vpage_insert(VacPageList vacpagelist, VacPage vpnew)
-{
-#define PG_NPAGEDESC 1024
-
- /* allocate a VacPage entry if needed */
- if (vacpagelist->num_pages == 0)
- {
- vacpagelist->pagedesc = (VacPage *) palloc(PG_NPAGEDESC * sizeof(VacPage));
- vacpagelist->num_allocated_pages = PG_NPAGEDESC;
- }
- else if (vacpagelist->num_pages >= vacpagelist->num_allocated_pages)
- {
- vacpagelist->num_allocated_pages *= 2;
- vacpagelist->pagedesc = (VacPage *) repalloc(vacpagelist->pagedesc, vacpagelist->num_allocated_pages * sizeof(VacPage));
- }
- vacpagelist->pagedesc[vacpagelist->num_pages] = vpnew;
- (vacpagelist->num_pages)++;
-}
-
-/*
- * vac_bsearch: just like standard C library routine bsearch(),
- * except that we first test to see whether the target key is outside
- * the range of the table entries. This case is handled relatively slowly
- * by the normal binary search algorithm (ie, no faster than any other key)
- * but it occurs often enough in VACUUM to be worth optimizing.
- */
-static void *
-vac_bsearch(const void *key, const void *base,
- size_t nelem, size_t size,
- int (*compar) (const void *, const void *))
-{
- int res;
- const void *last;
-
- if (nelem == 0)
- return NULL;
- res = compar(key, base);
- if (res < 0)
- return NULL;
- if (res == 0)
- return (void *) base;
- if (nelem > 1)
- {
- last = (const void *) ((const char *) base + (nelem - 1) * size);
- res = compar(key, last);
- if (res > 0)
- return NULL;
- if (res == 0)
- return (void *) last;
- }
- if (nelem <= 2)
- return NULL; /* already checked 'em all */
- return bsearch(key, base, nelem, size, compar);
-}
-
-/*
- * Comparator routines for use with qsort() and bsearch().
- */
-static int
-vac_cmp_blk(const void *left, const void *right)
-{
- BlockNumber lblk,
- rblk;
-
- lblk = (*((VacPage *) left))->blkno;
- rblk = (*((VacPage *) right))->blkno;
-
- if (lblk < rblk)
- return -1;
- if (lblk == rblk)
- return 0;
- return 1;
-}
-
-static int
-vac_cmp_offno(const void *left, const void *right)
-{
- if (*(OffsetNumber *) left < *(OffsetNumber *) right)
- return -1;
- if (*(OffsetNumber *) left == *(OffsetNumber *) right)
- return 0;
- return 1;
-}
-
-static int
-vac_cmp_vtlinks(const void *left, const void *right)
-{
- if (((VTupleLink) left)->new_tid.ip_blkid.bi_hi <
- ((VTupleLink) right)->new_tid.ip_blkid.bi_hi)
- return -1;
- if (((VTupleLink) left)->new_tid.ip_blkid.bi_hi >
- ((VTupleLink) right)->new_tid.ip_blkid.bi_hi)
- return 1;
- /* bi_hi-es are equal */
- if (((VTupleLink) left)->new_tid.ip_blkid.bi_lo <
- ((VTupleLink) right)->new_tid.ip_blkid.bi_lo)
- return -1;
- if (((VTupleLink) left)->new_tid.ip_blkid.bi_lo >
- ((VTupleLink) right)->new_tid.ip_blkid.bi_lo)
- return 1;
- /* bi_lo-es are equal */
- if (((VTupleLink) left)->new_tid.ip_posid <
- ((VTupleLink) right)->new_tid.ip_posid)
- return -1;
- if (((VTupleLink) left)->new_tid.ip_posid >
- ((VTupleLink) right)->new_tid.ip_posid)
- return 1;
- return 0;
-}
-
-
-void
-vac_open_indexes(Relation relation, int *nindexes, Relation **Irel)
-{
- List *indexoidlist,
- *indexoidscan;
- int i;
-
- indexoidlist = RelationGetIndexList(relation);
-
- *nindexes = length(indexoidlist);
-
- if (*nindexes > 0)
- *Irel = (Relation *) palloc(*nindexes * sizeof(Relation));
- else
- *Irel = NULL;
-
- i = 0;
- foreach(indexoidscan, indexoidlist)
- {
- Oid indexoid = lfirsti(indexoidscan);
-
- (*Irel)[i] = index_open(indexoid);
- i++;
- }
-
- freeList(indexoidlist);
-}
-
-
-void
-vac_close_indexes(int nindexes, Relation *Irel)
-{
- if (Irel == (Relation *) NULL)
- return;
-
- while (nindexes--)
- index_close(Irel[nindexes]);
- pfree(Irel);
-}
-
-
-/*
- * Is an index partial (ie, could it contain fewer tuples than the heap?)
- */
-bool
-vac_is_partial_index(Relation indrel)
-{
- /*
- * If the index's AM doesn't support nulls, it's partial for our
- * purposes
- */
- if (!indrel->rd_am->amindexnulls)
- return true;
-
- /* Otherwise, look to see if there's a partial-index predicate */
- return (VARSIZE(&indrel->rd_index->indpred) > VARHDRSZ);
-}
-
-
-static bool
-enough_space(VacPage vacpage, Size len)
-{
- len = MAXALIGN(len);
-
- if (len > vacpage->free)
- return false;
-
- /* if there are free itemid(s) and len <= free_space... */
- if (vacpage->offsets_used < vacpage->offsets_free)
- return true;
-
- /* noff_used >= noff_free and so we'll have to allocate new itemid */
- if (len + sizeof(ItemIdData) <= vacpage->free)
- return true;
-
- return false;
-}
-
-
-/*
- * Initialize usage snapshot.
- */
-void
-vac_init_rusage(VacRUsage *ru0)
-{
- struct timezone tz;
-
- getrusage(RUSAGE_SELF, &ru0->ru);
- gettimeofday(&ru0->tv, &tz);
-}
-
-/*
- * Compute elapsed time since ru0 usage snapshot, and format into
- * a displayable string. Result is in a static string, which is
- * tacky, but no one ever claimed that the Postgres backend is
- * threadable...
- */
-const char *
-vac_show_rusage(VacRUsage *ru0)
-{
- static char result[100];
- VacRUsage ru1;
-
- vac_init_rusage(&ru1);
-
- if (ru1.tv.tv_usec < ru0->tv.tv_usec)
- {
- ru1.tv.tv_sec--;
- ru1.tv.tv_usec += 1000000;
- }
- if (ru1.ru.ru_stime.tv_usec < ru0->ru.ru_stime.tv_usec)
- {
- ru1.ru.ru_stime.tv_sec--;
- ru1.ru.ru_stime.tv_usec += 1000000;
- }
- if (ru1.ru.ru_utime.tv_usec < ru0->ru.ru_utime.tv_usec)
- {
- ru1.ru.ru_utime.tv_sec--;
- ru1.ru.ru_utime.tv_usec += 1000000;
- }
-
- snprintf(result, sizeof(result),
- "CPU %d.%02ds/%d.%02du sec elapsed %d.%02d sec.",
- (int) (ru1.ru.ru_stime.tv_sec - ru0->ru.ru_stime.tv_sec),
- (int) (ru1.ru.ru_stime.tv_usec - ru0->ru.ru_stime.tv_usec) / 10000,
- (int) (ru1.ru.ru_utime.tv_sec - ru0->ru.ru_utime.tv_sec),
- (int) (ru1.ru.ru_utime.tv_usec - ru0->ru.ru_utime.tv_usec) / 10000,
- (int) (ru1.tv.tv_sec - ru0->tv.tv_sec),
- (int) (ru1.tv.tv_usec - ru0->tv.tv_usec) / 10000);
-
- return result;
-}
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
deleted file mode 100644
index 7e3b82fdd74..00000000000
--- a/src/backend/commands/vacuumlazy.c
+++ /dev/null
@@ -1,1120 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * vacuumlazy.c
- * Concurrent ("lazy") vacuuming.
- *
- *
- * The major space usage for LAZY VACUUM is storage for the array of dead
- * tuple TIDs, with the next biggest need being storage for per-disk-page
- * free space info. We want to ensure we can vacuum even the very largest
- * relations with finite memory space usage. To do that, we set upper bounds
- * on the number of tuples and pages we will keep track of at once.
- *
- * We are willing to use at most VacuumMem memory space to keep track of
- * dead tuples. We initially allocate an array of TIDs of that size.
- * If the array threatens to overflow, we suspend the heap scan phase
- * and perform a pass of index cleanup and page compaction, then resume
- * the heap scan with an empty TID array.
- *
- * We can limit the storage for page free space to MaxFSMPages entries,
- * since that's the most the free space map will be willing to remember
- * anyway. If the relation has fewer than that many pages with free space,
- * life is easy: just build an array of per-page info. If it has more,
- * we store the free space info as a heap ordered by amount of free space,
- * so that we can discard the pages with least free space to ensure we never
- * have more than MaxFSMPages entries in all. The surviving page entries
- * are passed to the free space map at conclusion of the scan.
- *
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.16 2002/06/20 20:29:27 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include "access/genam.h"
-#include "access/heapam.h"
-#include "access/xlog.h"
-#include "commands/vacuum.h"
-#include "miscadmin.h"
-#include "storage/freespace.h"
-#include "storage/sinval.h"
-#include "storage/smgr.h"
-#include "utils/lsyscache.h"
-
-
-/*
- * Space/time tradeoff parameters: do these need to be user-tunable?
- *
- * A page with less than PAGE_SPACE_THRESHOLD free space will be forgotten
- * immediately, and not even passed to the free space map. Removing the
- * uselessly small entries early saves cycles, and in particular reduces
- * the amount of time we spend holding the FSM lock when we finally call
- * MultiRecordFreeSpace. Since the FSM will ignore pages below its own
- * runtime threshold anyway, there's no point in making this really small.
- * XXX Is it worth trying to measure average tuple size, and using that to
- * set the threshold? Problem is we don't know average tuple size very
- * accurately for the first few pages...
- *
- * To consider truncating the relation, we want there to be at least
- * relsize / REL_TRUNCATE_FRACTION potentially-freeable pages.
- */
-#define PAGE_SPACE_THRESHOLD ((Size) (BLCKSZ / 32))
-
-#define REL_TRUNCATE_FRACTION 16
-
-/* MAX_TUPLES_PER_PAGE can be a conservative upper limit */
-#define MAX_TUPLES_PER_PAGE ((int) (BLCKSZ / sizeof(HeapTupleHeaderData)))
-
-
-typedef struct LVRelStats
-{
- /* Overall statistics about rel */
- BlockNumber rel_pages;
- double rel_tuples;
- BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
- /* List of TIDs of tuples we intend to delete */
- /* NB: this list is ordered by TID address */
- int num_dead_tuples; /* current # of entries */
- int max_dead_tuples; /* # slots allocated in array */
- ItemPointer dead_tuples; /* array of ItemPointerData */
- /* Array or heap of per-page info about free space */
- /* We use a simple array until it fills up, then convert to heap */
- bool fs_is_heap; /* are we using heap organization? */
- int num_free_pages; /* current # of entries */
- int max_free_pages; /* # slots allocated in arrays */
- BlockNumber *free_pages; /* array or heap of block numbers */
- Size *free_spaceavail; /* array or heap of available space */
-} LVRelStats;
-
-
-static int elevel = -1;
-
-static TransactionId OldestXmin;
-static TransactionId FreezeLimit;
-
-
-/* non-export function prototypes */
-static void lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
- Relation *Irel, int nindexes);
-static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
-static void lazy_scan_index(Relation indrel, LVRelStats *vacrelstats);
-static void lazy_vacuum_index(Relation indrel, LVRelStats *vacrelstats);
-static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
- int tupindex, LVRelStats *vacrelstats);
-static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
-static BlockNumber count_nondeletable_pages(Relation onerel,
- LVRelStats *vacrelstats);
-static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
-static void lazy_record_dead_tuple(LVRelStats *vacrelstats,
- ItemPointer itemptr);
-static void lazy_record_free_space(LVRelStats *vacrelstats,
- BlockNumber page, Size avail);
-static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
-static bool dummy_tid_reaped(ItemPointer itemptr, void *state);
-static void lazy_update_fsm(Relation onerel, LVRelStats *vacrelstats);
-static int vac_cmp_itemptr(const void *left, const void *right);
-
-
-/*
- * lazy_vacuum_rel() -- perform LAZY VACUUM for one heap relation
- *
- * This routine vacuums a single heap, cleans out its indexes, and
- * updates its num_pages and num_tuples statistics.
- *
- * At entry, we have already established a transaction and opened
- * and locked the relation.
- */
-void
-lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
-{
- LVRelStats *vacrelstats;
- Relation *Irel;
- int nindexes;
- bool hasindex;
- BlockNumber possibly_freeable;
-
- if (vacstmt->verbose)
- elevel = INFO;
- else
- elevel = DEBUG1;
-
- vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared,
- &OldestXmin, &FreezeLimit);
-
- vacrelstats = (LVRelStats *) palloc(sizeof(LVRelStats));
- MemSet(vacrelstats, 0, sizeof(LVRelStats));
-
- /* Open all indexes of the relation */
- vac_open_indexes(onerel, &nindexes, &Irel);
- hasindex = (nindexes > 0);
-
- /* Do the vacuuming */
- lazy_scan_heap(onerel, vacrelstats, Irel, nindexes);
-
- /* Done with indexes */
- vac_close_indexes(nindexes, Irel);
-
- /*
- * Optionally truncate the relation.
- *
- * Don't even think about it unless we have a shot at releasing a goodly
- * number of pages. Otherwise, the time taken isn't worth it.
- */
- possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
- if (possibly_freeable > vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)
- lazy_truncate_heap(onerel, vacrelstats);
-
- /* Update shared free space map with final free space info */
- lazy_update_fsm(onerel, vacrelstats);
-
- /* Update statistics in pg_class */
- vac_update_relstats(RelationGetRelid(onerel), vacrelstats->rel_pages,
- vacrelstats->rel_tuples, hasindex);
-}
-
-
-/*
- * lazy_scan_heap() -- scan an open heap relation
- *
- * This routine sets commit status bits, builds lists of dead tuples
- * and pages with free space, and calculates statistics on the number
- * of live tuples in the heap. When done, or when we run low on space
- * for dead-tuple TIDs, invoke vacuuming of indexes and heap.
- */
-static void
-lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
- Relation *Irel, int nindexes)
-{
- BlockNumber nblocks,
- blkno;
- HeapTupleData tuple;
- char *relname;
- BlockNumber empty_pages,
- changed_pages;
- double num_tuples,
- tups_vacuumed,
- nkeep,
- nunused;
- bool did_vacuum_index = false;
- int i;
- VacRUsage ru0;
-
- vac_init_rusage(&ru0);
-
- relname = RelationGetRelationName(onerel);
- elog(elevel, "--Relation %s.%s--",
- get_namespace_name(RelationGetNamespace(onerel)),
- relname);
-
- empty_pages = changed_pages = 0;
- num_tuples = tups_vacuumed = nkeep = nunused = 0;
-
- nblocks = RelationGetNumberOfBlocks(onerel);
- vacrelstats->rel_pages = nblocks;
- vacrelstats->nonempty_pages = 0;
-
- lazy_space_alloc(vacrelstats, nblocks);
-
- for (blkno = 0; blkno < nblocks; blkno++)
- {
- Buffer buf;
- Page page;
- OffsetNumber offnum,
- maxoff;
- bool pgchanged,
- tupgone,
- hastup;
- int prev_dead_count;
-
- CHECK_FOR_INTERRUPTS();
-
- /*
- * If we are close to overrunning the available space for
- * dead-tuple TIDs, pause and do a cycle of vacuuming before we
- * tackle this page.
- */
- if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MAX_TUPLES_PER_PAGE &&
- vacrelstats->num_dead_tuples > 0)
- {
- /* Remove index entries */
- for (i = 0; i < nindexes; i++)
- lazy_vacuum_index(Irel[i], vacrelstats);
- did_vacuum_index = true;
- /* Remove tuples from heap */
- lazy_vacuum_heap(onerel, vacrelstats);
- /* Forget the now-vacuumed tuples, and press on */
- vacrelstats->num_dead_tuples = 0;
- }
-
- buf = ReadBuffer(onerel, blkno);
-
- /* In this phase we only need shared access to the buffer */
- LockBuffer(buf, BUFFER_LOCK_SHARE);
-
- page = BufferGetPage(buf);
-
- if (PageIsNew(page))
- {
- /* Not sure we still need to handle this case, but... */
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
- if (PageIsNew(page))
- {
- elog(WARNING, "Rel %s: Uninitialized page %u - fixing",
- relname, blkno);
- PageInit(page, BufferGetPageSize(buf), 0);
- lazy_record_free_space(vacrelstats, blkno,
- PageGetFreeSpace(page));
- }
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
- continue;
- }
-
- if (PageIsEmpty(page))
- {
- empty_pages++;
- lazy_record_free_space(vacrelstats, blkno,
- PageGetFreeSpace(page));
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- ReleaseBuffer(buf);
- continue;
- }
-
- pgchanged = false;
- hastup = false;
- prev_dead_count = vacrelstats->num_dead_tuples;
- maxoff = PageGetMaxOffsetNumber(page);
- for (offnum = FirstOffsetNumber;
- offnum <= maxoff;
- offnum = OffsetNumberNext(offnum))
- {
- ItemId itemid;
- uint16 sv_infomask;
-
- itemid = PageGetItemId(page, offnum);
-
- if (!ItemIdIsUsed(itemid))
- {
- nunused += 1;
- continue;
- }
-
- tuple.t_datamcxt = NULL;
- tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
- tuple.t_len = ItemIdGetLength(itemid);
- ItemPointerSet(&(tuple.t_self), blkno, offnum);
-
- tupgone = false;
- sv_infomask = tuple.t_data->t_infomask;
-
- switch (HeapTupleSatisfiesVacuum(tuple.t_data, OldestXmin))
- {
- case HEAPTUPLE_DEAD:
- tupgone = true; /* we can delete the tuple */
- break;
- case HEAPTUPLE_LIVE:
-
- /*
- * Tuple is good. Consider whether to replace its
- * xmin value with FrozenTransactionId.
- *
- * NB: Since we hold only a shared buffer lock here, we
- * are assuming that TransactionId read/write is
- * atomic. This is not the only place that makes such
- * an assumption. It'd be possible to avoid the
- * assumption by momentarily acquiring exclusive lock,
- * but for the moment I see no need to.
- */
- if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple.t_data)) &&
- TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
- FreezeLimit))
- {
- HeapTupleHeaderSetXmin(tuple.t_data, FrozenTransactionId);
- /* infomask should be okay already */
- Assert(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED);
- pgchanged = true;
- }
- break;
- case HEAPTUPLE_RECENTLY_DEAD:
-
- /*
- * If tuple is recently deleted then we must not
- * remove it from relation.
- */
- nkeep += 1;
- break;
- case HEAPTUPLE_INSERT_IN_PROGRESS:
- /* This is an expected case during concurrent vacuum */
- break;
- case HEAPTUPLE_DELETE_IN_PROGRESS:
- /* This is an expected case during concurrent vacuum */
- break;
- default:
- elog(ERROR, "Unexpected HeapTupleSatisfiesVacuum result");
- break;
- }
-
- /* check for hint-bit update by HeapTupleSatisfiesVacuum */
- if (sv_infomask != tuple.t_data->t_infomask)
- pgchanged = true;
-
- /*
- * Other checks...
- */
- if (!OidIsValid(tuple.t_data->t_oid) &&
- onerel->rd_rel->relhasoids)
- elog(WARNING, "Rel %s: TID %u/%u: OID IS INVALID. TUPGONE %d.",
- relname, blkno, offnum, (int) tupgone);
-
- if (tupgone)
- {
- lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
- tups_vacuumed += 1;
- }
- else
- {
- num_tuples += 1;
- hastup = true;
- }
- } /* scan along page */
-
- /*
- * If we remembered any tuples for deletion, then the page will be
- * visited again by lazy_vacuum_heap, which will compute and
- * record its post-compaction free space. If not, then we're done
- * with this page, so remember its free space as-is.
- */
- if (vacrelstats->num_dead_tuples == prev_dead_count)
- {
- lazy_record_free_space(vacrelstats, blkno,
- PageGetFreeSpace(page));
- }
-
- /* Remember the location of the last page with nonremovable tuples */
- if (hastup)
- vacrelstats->nonempty_pages = blkno + 1;
-
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-
- if (pgchanged)
- {
- SetBufferCommitInfoNeedsSave(buf);
- changed_pages++;
- }
-
- ReleaseBuffer(buf);
- }
-
- /* save stats for use later */
- vacrelstats->rel_tuples = num_tuples;
-
- /* If any tuples need to be deleted, perform final vacuum cycle */
- /* XXX put a threshold on min nuber of tuples here? */
- if (vacrelstats->num_dead_tuples > 0)
- {
- /* Remove index entries */
- for (i = 0; i < nindexes; i++)
- lazy_vacuum_index(Irel[i], vacrelstats);
- /* Remove tuples from heap */
- lazy_vacuum_heap(onerel, vacrelstats);
- }
- else if (!did_vacuum_index)
- {
- /* Scan indexes just to update pg_class statistics about them */
- for (i = 0; i < nindexes; i++)
- lazy_scan_index(Irel[i], vacrelstats);
- }
-
- elog(elevel, "Pages %u: Changed %u, Empty %u; \
-Tup %.0f: Vac %.0f, Keep %.0f, UnUsed %.0f.\n\tTotal %s",
- nblocks, changed_pages, empty_pages,
- num_tuples, tups_vacuumed, nkeep, nunused,
- vac_show_rusage(&ru0));
-}
-
-
-/*
- * lazy_vacuum_heap() -- second pass over the heap
- *
- * This routine marks dead tuples as unused and compacts out free
- * space on their pages. Pages not having dead tuples recorded from
- * lazy_scan_heap are not visited at all.
- *
- * Note: the reason for doing this as a second pass is we cannot remove
- * the tuples until we've removed their index entries, and we want to
- * process index entry removal in batches as large as possible.
- */
-static void
-lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
-{
- int tupindex;
- int npages;
- VacRUsage ru0;
-
- vac_init_rusage(&ru0);
- npages = 0;
-
- tupindex = 0;
- while (tupindex < vacrelstats->num_dead_tuples)
- {
- BlockNumber tblk;
- Buffer buf;
- Page page;
-
- CHECK_FOR_INTERRUPTS();
-
- tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
- buf = ReadBuffer(onerel, tblk);
- LockBufferForCleanup(buf);
- tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats);
- /* Now that we've compacted the page, record its available space */
- page = BufferGetPage(buf);
- lazy_record_free_space(vacrelstats, tblk,
- PageGetFreeSpace(page));
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
- npages++;
- }
-
- elog(elevel, "Removed %d tuples in %d pages.\n\t%s", tupindex, npages,
- vac_show_rusage(&ru0));
-}
-
-/*
- * lazy_vacuum_page() -- free dead tuples on a page
- * and repair its fragmentation.
- *
- * Caller is expected to handle reading, locking, and writing the buffer.
- *
- * tupindex is the index in vacrelstats->dead_tuples of the first dead
- * tuple for this page. We assume the rest follow sequentially.
- * The return value is the first tupindex after the tuples of this page.
- */
-static int
-lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
- int tupindex, LVRelStats *vacrelstats)
-{
- OffsetNumber unbuf[BLCKSZ / sizeof(OffsetNumber)];
- OffsetNumber *unused = unbuf;
- int uncnt;
- Page page = BufferGetPage(buffer);
- ItemId itemid;
-
- START_CRIT_SECTION();
- for (; tupindex < vacrelstats->num_dead_tuples; tupindex++)
- {
- BlockNumber tblk;
- OffsetNumber toff;
-
- tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
- if (tblk != blkno)
- break; /* past end of tuples for this block */
- toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]);
- itemid = PageGetItemId(page, toff);
- itemid->lp_flags &= ~LP_USED;
- }
-
- uncnt = PageRepairFragmentation(page, unused);
-
- {
- XLogRecPtr recptr;
-
- recptr = log_heap_clean(onerel, buffer, (char *) unused,
- (char *) (&(unused[uncnt])) - (char *) unused);
- PageSetLSN(page, recptr);
- PageSetSUI(page, ThisStartUpID);
- }
- END_CRIT_SECTION();
-
- return tupindex;
-}
-
-/*
- * lazy_scan_index() -- scan one index relation to update pg_class statistic.
- *
- * We use this when we have no deletions to do.
- */
-static void
-lazy_scan_index(Relation indrel, LVRelStats *vacrelstats)
-{
- IndexBulkDeleteResult *stats;
- VacRUsage ru0;
-
- vac_init_rusage(&ru0);
-
- /*
- * If the index is not partial, skip the scan, and just assume it has
- * the same number of tuples as the heap.
- */
- if (!vac_is_partial_index(indrel))
- {
- vac_update_relstats(RelationGetRelid(indrel),
- RelationGetNumberOfBlocks(indrel),
- vacrelstats->rel_tuples,
- false);
- return;
- }
-
- /*
- * If index is unsafe for concurrent access, must lock it; but a
- * shared lock should be sufficient.
- */
- if (!indrel->rd_am->amconcurrent)
- LockRelation(indrel, AccessShareLock);
-
- /*
- * Even though we're not planning to delete anything, use the
- * ambulkdelete call, so that the scan happens within the index AM for
- * more speed.
- */
- stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL);
-
- /*
- * Release lock acquired above.
- */
- if (!indrel->rd_am->amconcurrent)
- UnlockRelation(indrel, AccessShareLock);
-
- if (!stats)
- return;
-
- /* now update statistics in pg_class */
- vac_update_relstats(RelationGetRelid(indrel),
- stats->num_pages, stats->num_index_tuples,
- false);
-
- elog(elevel, "Index %s: Pages %u; Tuples %.0f.\n\t%s",
- RelationGetRelationName(indrel),
- stats->num_pages, stats->num_index_tuples,
- vac_show_rusage(&ru0));
-
- pfree(stats);
-}
-
-/*
- * lazy_vacuum_index() -- vacuum one index relation.
- *
- * Delete all the index entries pointing to tuples listed in
- * vacrelstats->dead_tuples.
- *
- * Finally, we arrange to update the index relation's statistics in
- * pg_class.
- */
-static void
-lazy_vacuum_index(Relation indrel, LVRelStats *vacrelstats)
-{
- IndexBulkDeleteResult *stats;
- VacRUsage ru0;
-
- vac_init_rusage(&ru0);
-
- /*
- * If index is unsafe for concurrent access, must lock it.
- */
- if (!indrel->rd_am->amconcurrent)
- LockRelation(indrel, AccessExclusiveLock);
-
- /* Do bulk deletion */
- stats = index_bulk_delete(indrel, lazy_tid_reaped, (void *) vacrelstats);
-
- /*
- * Release lock acquired above.
- */
- if (!indrel->rd_am->amconcurrent)
- UnlockRelation(indrel, AccessExclusiveLock);
-
- /* now update statistics in pg_class */
- if (stats)
- {
- vac_update_relstats(RelationGetRelid(indrel),
- stats->num_pages, stats->num_index_tuples,
- false);
-
- elog(elevel, "Index %s: Pages %u; Tuples %.0f: Deleted %.0f.\n\t%s",
- RelationGetRelationName(indrel), stats->num_pages,
- stats->num_index_tuples, stats->tuples_removed,
- vac_show_rusage(&ru0));
-
- pfree(stats);
- }
-}
-
-/*
- * lazy_truncate_heap - try to truncate off any empty pages at the end
- */
-static void
-lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
-{
- BlockNumber old_rel_pages = vacrelstats->rel_pages;
- BlockNumber new_rel_pages;
- BlockNumber *pages;
- Size *spaceavail;
- int n;
- int i,
- j;
- VacRUsage ru0;
-
- vac_init_rusage(&ru0);
-
- /*
- * We need full exclusive lock on the relation in order to do
- * truncation. If we can't get it, give up rather than waiting --- we
- * don't want to block other backends, and we don't want to deadlock
- * (which is quite possible considering we already hold a lower-grade
- * lock).
- */
- if (!ConditionalLockRelation(onerel, AccessExclusiveLock))
- return;
-
- /*
- * Now that we have exclusive lock, look to see if the rel has grown
- * whilst we were vacuuming with non-exclusive lock. If so, give up;
- * the newly added pages presumably contain non-deletable tuples.
- */
- new_rel_pages = RelationGetNumberOfBlocks(onerel);
- if (new_rel_pages != old_rel_pages)
- {
- /* might as well use the latest news when we update pg_class stats */
- vacrelstats->rel_pages = new_rel_pages;
- UnlockRelation(onerel, AccessExclusiveLock);
- return;
- }
-
- /*
- * Scan backwards from the end to verify that the end pages actually
- * contain nothing we need to keep. This is *necessary*, not
- * optional, because other backends could have added tuples to these
- * pages whilst we were vacuuming.
- */
- new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
-
- if (new_rel_pages >= old_rel_pages)
- {
- /* can't do anything after all */
- UnlockRelation(onerel, AccessExclusiveLock);
- return;
- }
-
- /*
- * Okay to truncate.
- *
- * First, flush any shared buffers for the blocks we intend to delete.
- * FlushRelationBuffers is a bit more than we need for this, since it
- * will also write out dirty buffers for blocks we aren't deleting,
- * but it's the closest thing in bufmgr's API.
- */
- i = FlushRelationBuffers(onerel, new_rel_pages);
- if (i < 0)
- elog(ERROR, "VACUUM (lazy_truncate_heap): FlushRelationBuffers returned %d",
- i);
-
- /*
- * Do the physical truncation.
- */
- new_rel_pages = smgrtruncate(DEFAULT_SMGR, onerel, new_rel_pages);
- onerel->rd_nblocks = new_rel_pages; /* update relcache immediately */
- onerel->rd_targblock = InvalidBlockNumber;
- vacrelstats->rel_pages = new_rel_pages; /* save new number of
- * blocks */
-
- /*
- * Drop free-space info for removed blocks; these must not get entered
- * into the FSM!
- */
- pages = vacrelstats->free_pages;
- spaceavail = vacrelstats->free_spaceavail;
- n = vacrelstats->num_free_pages;
- j = 0;
- for (i = 0; i < n; i++)
- {
- if (pages[i] < new_rel_pages)
- {
- pages[j] = pages[i];
- spaceavail[j] = spaceavail[i];
- j++;
- }
- }
- vacrelstats->num_free_pages = j;
-
- /*
- * We keep the exclusive lock until commit (perhaps not necessary)?
- */
-
- elog(elevel, "Truncated %u --> %u pages.\n\t%s", old_rel_pages,
- new_rel_pages, vac_show_rusage(&ru0));
-}
-
-/*
- * Rescan end pages to verify that they are (still) empty of needed tuples.
- *
- * Returns number of nondeletable pages (last nonempty page + 1).
- */
-static BlockNumber
-count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
-{
- BlockNumber blkno;
- HeapTupleData tuple;
-
- /* Strange coding of loop control is needed because blkno is unsigned */
- blkno = vacrelstats->rel_pages;
- while (blkno > vacrelstats->nonempty_pages)
- {
- Buffer buf;
- Page page;
- OffsetNumber offnum,
- maxoff;
- bool pgchanged,
- tupgone,
- hastup;
-
- CHECK_FOR_INTERRUPTS();
-
- blkno--;
-
- buf = ReadBuffer(onerel, blkno);
-
- /* In this phase we only need shared access to the buffer */
- LockBuffer(buf, BUFFER_LOCK_SHARE);
-
- page = BufferGetPage(buf);
-
- if (PageIsNew(page) || PageIsEmpty(page))
- {
- /* PageIsNew robably shouldn't happen... */
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- ReleaseBuffer(buf);
- continue;
- }
-
- pgchanged = false;
- hastup = false;
- maxoff = PageGetMaxOffsetNumber(page);
- for (offnum = FirstOffsetNumber;
- offnum <= maxoff;
- offnum = OffsetNumberNext(offnum))
- {
- ItemId itemid;
- uint16 sv_infomask;
-
- itemid = PageGetItemId(page, offnum);
-
- if (!ItemIdIsUsed(itemid))
- continue;
-
- tuple.t_datamcxt = NULL;
- tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
- tuple.t_len = ItemIdGetLength(itemid);
- ItemPointerSet(&(tuple.t_self), blkno, offnum);
-
- tupgone = false;
- sv_infomask = tuple.t_data->t_infomask;
-
- switch (HeapTupleSatisfiesVacuum(tuple.t_data, OldestXmin))
- {
- case HEAPTUPLE_DEAD:
- tupgone = true; /* we can delete the tuple */
- break;
- case HEAPTUPLE_LIVE:
- /* Shouldn't be necessary to re-freeze anything */
- break;
- case HEAPTUPLE_RECENTLY_DEAD:
-
- /*
- * If tuple is recently deleted then we must not
- * remove it from relation.
- */
- break;
- case HEAPTUPLE_INSERT_IN_PROGRESS:
- /* This is an expected case during concurrent vacuum */
- break;
- case HEAPTUPLE_DELETE_IN_PROGRESS:
- /* This is an expected case during concurrent vacuum */
- break;
- default:
- elog(ERROR, "Unexpected HeapTupleSatisfiesVacuum result");
- break;
- }
-
- /* check for hint-bit update by HeapTupleSatisfiesVacuum */
- if (sv_infomask != tuple.t_data->t_infomask)
- pgchanged = true;
-
- if (!tupgone)
- {
- hastup = true;
- break; /* can stop scanning */
- }
- } /* scan along page */
-
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-
- if (pgchanged)
- WriteBuffer(buf);
- else
- ReleaseBuffer(buf);
-
- /* Done scanning if we found a tuple here */
- if (hastup)
- return blkno + 1;
- }
-
- /*
- * If we fall out of the loop, all the previously-thought-to-be-empty
- * pages really are; we need not bother to look at the last
- * known-nonempty page.
- */
- return vacrelstats->nonempty_pages;
-}
-
-/*
- * lazy_space_alloc - space allocation decisions for lazy vacuum
- *
- * See the comments at the head of this file for rationale.
- */
-static void
-lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
-{
- int maxtuples;
- int maxpages;
-
- maxtuples = (int) ((VacuumMem * 1024L) / sizeof(ItemPointerData));
- /* stay sane if small VacuumMem */
- if (maxtuples < MAX_TUPLES_PER_PAGE)
- maxtuples = MAX_TUPLES_PER_PAGE;
-
- vacrelstats->num_dead_tuples = 0;
- vacrelstats->max_dead_tuples = maxtuples;
- vacrelstats->dead_tuples = (ItemPointer)
- palloc(maxtuples * sizeof(ItemPointerData));
-
- maxpages = MaxFSMPages;
- /* No need to allocate more pages than the relation has blocks */
- if (relblocks < (BlockNumber) maxpages)
- maxpages = (int) relblocks;
- /* avoid palloc(0) */
- if (maxpages < 1)
- maxpages = 1;
-
- vacrelstats->fs_is_heap = false;
- vacrelstats->num_free_pages = 0;
- vacrelstats->max_free_pages = maxpages;
- vacrelstats->free_pages = (BlockNumber *)
- palloc(maxpages * sizeof(BlockNumber));
- vacrelstats->free_spaceavail = (Size *)
- palloc(maxpages * sizeof(Size));
-}
-
-/*
- * lazy_record_dead_tuple - remember one deletable tuple
- */
-static void
-lazy_record_dead_tuple(LVRelStats *vacrelstats,
- ItemPointer itemptr)
-{
- /*
- * The array shouldn't overflow under normal behavior, but perhaps it
- * could if we are given a really small VacuumMem. In that case, just
- * forget the last few tuples.
- */
- if (vacrelstats->num_dead_tuples < vacrelstats->max_dead_tuples)
- {
- vacrelstats->dead_tuples[vacrelstats->num_dead_tuples] = *itemptr;
- vacrelstats->num_dead_tuples++;
- }
-}
-
-/*
- * lazy_record_free_space - remember free space on one page
- */
-static void
-lazy_record_free_space(LVRelStats *vacrelstats,
- BlockNumber page,
- Size avail)
-{
- BlockNumber *pages;
- Size *spaceavail;
- int n;
-
- /* Ignore pages with little free space */
- if (avail < PAGE_SPACE_THRESHOLD)
- return;
-
- /* Copy pointers to local variables for notational simplicity */
- pages = vacrelstats->free_pages;
- spaceavail = vacrelstats->free_spaceavail;
- n = vacrelstats->max_free_pages;
-
- /* If we haven't filled the array yet, just keep adding entries */
- if (vacrelstats->num_free_pages < n)
- {
- pages[vacrelstats->num_free_pages] = page;
- spaceavail[vacrelstats->num_free_pages] = avail;
- vacrelstats->num_free_pages++;
- return;
- }
-
- /*----------
- * The rest of this routine works with "heap" organization of the
- * free space arrays, wherein we maintain the heap property
- * spaceavail[(j-1) div 2] <= spaceavail[j] for 0 < j < n.
- * In particular, the zero'th element always has the smallest available
- * space and can be discarded to make room for a new page with more space.
- * See Knuth's discussion of heap-based priority queues, sec 5.2.3;
- * but note he uses 1-origin array subscripts, not 0-origin.
- *----------
- */
-
- /* If we haven't yet converted the array to heap organization, do it */
- if (!vacrelstats->fs_is_heap)
- {
- /*
- * Scan backwards through the array, "sift-up" each value into its
- * correct position. We can start the scan at n/2-1 since each
- * entry above that position has no children to worry about.
- */
- int l = n / 2;
-
- while (--l >= 0)
- {
- BlockNumber R = pages[l];
- Size K = spaceavail[l];
- int i; /* i is where the "hole" is */
-
- i = l;
- for (;;)
- {
- int j = 2 * i + 1;
-
- if (j >= n)
- break;
- if (j + 1 < n && spaceavail[j] > spaceavail[j + 1])
- j++;
- if (K <= spaceavail[j])
- break;
- pages[i] = pages[j];
- spaceavail[i] = spaceavail[j];
- i = j;
- }
- pages[i] = R;
- spaceavail[i] = K;
- }
-
- vacrelstats->fs_is_heap = true;
- }
-
- /* If new page has more than zero'th entry, insert it into heap */
- if (avail > spaceavail[0])
- {
- /*
- * Notionally, we replace the zero'th entry with the new data, and
- * then sift-up to maintain the heap property. Physically, the
- * new data doesn't get stored into the arrays until we find the
- * right location for it.
- */
- int i = 0; /* i is where the "hole" is */
-
- for (;;)
- {
- int j = 2 * i + 1;
-
- if (j >= n)
- break;
- if (j + 1 < n && spaceavail[j] > spaceavail[j + 1])
- j++;
- if (avail <= spaceavail[j])
- break;
- pages[i] = pages[j];
- spaceavail[i] = spaceavail[j];
- i = j;
- }
- pages[i] = page;
- spaceavail[i] = avail;
- }
-}
-
-/*
- * lazy_tid_reaped() -- is a particular tid deletable?
- *
- * This has the right signature to be an IndexBulkDeleteCallback.
- *
- * Assumes dead_tuples array is in sorted order.
- */
-static bool
-lazy_tid_reaped(ItemPointer itemptr, void *state)
-{
- LVRelStats *vacrelstats = (LVRelStats *) state;
- ItemPointer res;
-
- res = (ItemPointer) bsearch((void *) itemptr,
- (void *) vacrelstats->dead_tuples,
- vacrelstats->num_dead_tuples,
- sizeof(ItemPointerData),
- vac_cmp_itemptr);
-
- return (res != NULL);
-}
-
-/*
- * Dummy version for lazy_scan_index.
- */
-static bool
-dummy_tid_reaped(ItemPointer itemptr, void *state)
-{
- return false;
-}
-
-/*
- * Update the shared Free Space Map with the info we now have about
- * free space in the relation, discarding any old info the map may have.
- */
-static void
-lazy_update_fsm(Relation onerel, LVRelStats *vacrelstats)
-{
- /*
- * Since MultiRecordFreeSpace doesn't currently impose any
- * restrictions on the ordering of the input, we can just pass it the
- * arrays as-is, whether they are in heap or linear order.
- */
- MultiRecordFreeSpace(&onerel->rd_node,
- 0, MaxBlockNumber,
- vacrelstats->num_free_pages,
- vacrelstats->free_pages,
- vacrelstats->free_spaceavail);
-}
-
-/*
- * Comparator routines for use with qsort() and bsearch().
- */
-static int
-vac_cmp_itemptr(const void *left, const void *right)
-{
- BlockNumber lblk,
- rblk;
- OffsetNumber loff,
- roff;
-
- lblk = ItemPointerGetBlockNumber((ItemPointer) left);
- rblk = ItemPointerGetBlockNumber((ItemPointer) right);
-
- if (lblk < rblk)
- return -1;
- if (lblk > rblk)
- return 1;
-
- loff = ItemPointerGetOffsetNumber((ItemPointer) left);
- roff = ItemPointerGetOffsetNumber((ItemPointer) right);
-
- if (loff < roff)
- return -1;
- if (loff > roff)
- return 1;
-
- return 0;
-}
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
deleted file mode 100644
index 70e7e88d60c..00000000000
--- a/src/backend/commands/variable.c
+++ /dev/null
@@ -1,587 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * variable.c
- * Routines for handling specialized SET variables.
- *
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.69 2002/06/20 20:29:27 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-
-#include "postgres.h"
-
-#include <ctype.h>
-#include <time.h>
-
-#include "access/xact.h"
-#include "catalog/pg_shadow.h"
-#include "commands/variable.h"
-#include "miscadmin.h"
-#include "utils/builtins.h"
-#include "utils/guc.h"
-#include "utils/syscache.h"
-#include "utils/tqual.h"
-
-#ifdef MULTIBYTE
-#include "mb/pg_wchar.h"
-#else
-/* Grand unified hard-coded badness */
-#define pg_get_client_encoding_name() "SQL_ASCII"
-#define GetDatabaseEncodingName() "SQL_ASCII"
-#endif
-
-
-/*
- * DATESTYLE
- */
-
-/*
- * assign_datestyle: GUC assign_hook for datestyle
- */
-const char *
-assign_datestyle(const char *value, bool doit, bool interactive)
-{
- int newDateStyle = DateStyle;
- bool newEuroDates = EuroDates;
- bool ok = true;
- int dcnt = 0,
- ecnt = 0;
- char *rawstring;
- char *result;
- List *elemlist;
- List *l;
-
- /* Need a modifiable copy of string */
- rawstring = pstrdup(value);
-
- /* Parse string into list of identifiers */
- if (!SplitIdentifierString(rawstring, ',', &elemlist))
- {
- /* syntax error in list */
- pfree(rawstring);
- freeList(elemlist);
- if (interactive)
- elog(ERROR, "SET DATESTYLE: invalid list syntax");
- return NULL;
- }
-
- foreach(l, elemlist)
- {
- char *tok = (char *) lfirst(l);
-
- /* Ugh. Somebody ought to write a table driven version -- mjl */
-
- if (strcasecmp(tok, "ISO") == 0)
- {
- newDateStyle = USE_ISO_DATES;
- dcnt++;
- }
- else if (strcasecmp(tok, "SQL") == 0)
- {
- newDateStyle = USE_SQL_DATES;
- dcnt++;
- }
- else if (strncasecmp(tok, "POSTGRESQL", 8) == 0)
- {
- newDateStyle = USE_POSTGRES_DATES;
- dcnt++;
- }
- else if (strcasecmp(tok, "GERMAN") == 0)
- {
- newDateStyle = USE_GERMAN_DATES;
- dcnt++;
- if ((ecnt > 0) && (!newEuroDates))
- ok = false;
- newEuroDates = TRUE;
- }
- else if (strncasecmp(tok, "EURO", 4) == 0)
- {
- newEuroDates = TRUE;
- ecnt++;
- }
- else if (strcasecmp(tok, "US") == 0
- || strncasecmp(tok, "NONEURO", 7) == 0)
- {
- newEuroDates = FALSE;
- ecnt++;
- if ((dcnt > 0) && (newDateStyle == USE_GERMAN_DATES))
- ok = false;
- }
- else if (strcasecmp(tok, "DEFAULT") == 0)
- {
- /*
- * Easiest way to get the current DEFAULT state is to fetch
- * the DEFAULT string from guc.c and recursively parse it.
- *
- * We can't simply "return assign_datestyle(...)" because we
- * need to handle constructs like "DEFAULT, ISO".
- */
- int saveDateStyle = DateStyle;
- bool saveEuroDates = EuroDates;
- const char *subval;
-
- subval = assign_datestyle(GetConfigOptionResetString("datestyle"),
- true, interactive);
- newDateStyle = DateStyle;
- newEuroDates = EuroDates;
- DateStyle = saveDateStyle;
- EuroDates = saveEuroDates;
- if (!subval)
- {
- ok = false;
- break;
- }
- /* Here we know that our own return value is always malloc'd */
- /* when doit is true */
- free((char *) subval);
- dcnt++;
- ecnt++;
- }
- else
- {
- if (interactive)
- elog(ERROR, "SET DATESTYLE: unrecognized keyword %s", tok);
- ok = false;
- break;
- }
- }
-
- if (dcnt > 1 || ecnt > 1)
- ok = false;
-
- pfree(rawstring);
- freeList(elemlist);
-
- if (!ok)
- {
- if (interactive)
- elog(ERROR, "SET DATESTYLE: conflicting specifications");
- return NULL;
- }
-
- /*
- * If we aren't going to do the assignment, just return OK indicator.
- */
- if (!doit)
- return value;
-
- /*
- * Prepare the canonical string to return. GUC wants it malloc'd.
- */
- result = (char *) malloc(32);
- if (!result)
- return NULL;
-
- switch (newDateStyle)
- {
- case USE_ISO_DATES:
- strcpy(result, "ISO");
- break;
- case USE_SQL_DATES:
- strcpy(result, "SQL");
- break;
- case USE_GERMAN_DATES:
- strcpy(result, "GERMAN");
- break;
- default:
- strcpy(result, "POSTGRESQL");
- break;
- }
- strcat(result, newEuroDates ? ", EURO" : ", US");
-
- /*
- * Finally, it's safe to assign to the global variables;
- * the assignment cannot fail now.
- */
- DateStyle = newDateStyle;
- EuroDates = newEuroDates;
-
- return result;
-}
-
-/*
- * show_datestyle: GUC show_hook for datestyle
- */
-const char *
-show_datestyle(void)
-{
- static char buf[64];
-
- switch (DateStyle)
- {
- case USE_ISO_DATES:
- strcpy(buf, "ISO");
- break;
- case USE_SQL_DATES:
- strcpy(buf, "SQL");
- break;
- case USE_GERMAN_DATES:
- strcpy(buf, "German");
- break;
- default:
- strcpy(buf, "Postgres");
- break;
- };
- strcat(buf, " with ");
- strcat(buf, ((EuroDates) ? "European" : "US (NonEuropean)"));
- strcat(buf, " conventions");
-
- return buf;
-}
-
-
-/*
- * TIMEZONE
- */
-
-/*
- * Storage for TZ env var is allocated with an arbitrary size of 64 bytes.
- */
-static char tzbuf[64];
-
-/*
- * assign_timezone: GUC assign_hook for timezone
- */
-const char *
-assign_timezone(const char *value, bool doit, bool interactive)
-{
- char *result;
- char *endptr;
- double hours;
-
- /*
- * Check for INTERVAL 'foo'
- */
- if (strncasecmp(value, "interval", 8) == 0)
- {
- const char *valueptr = value;
- char *val;
- Interval *interval;
-
- valueptr += 8;
- while (isspace((unsigned char) *valueptr))
- valueptr++;
- if (*valueptr++ != '\'')
- return NULL;
- val = pstrdup(valueptr);
- /* Check and remove trailing quote */
- endptr = strchr(val, '\'');
- if (!endptr || endptr[1] != '\0')
- {
- pfree(val);
- return NULL;
- }
- *endptr = '\0';
- /*
- * Try to parse it. XXX an invalid interval format will result in
- * elog, which is not desirable for GUC. We did what we could to
- * guard against this in flatten_set_variable_args, but a string
- * coming in from postgresql.conf might contain anything.
- */
- interval = DatumGetIntervalP(DirectFunctionCall3(interval_in,
- CStringGetDatum(val),
- ObjectIdGetDatum(InvalidOid),
- Int32GetDatum(-1)));
- pfree(val);
- if (interval->month != 0)
- {
- if (interactive)
- elog(ERROR, "SET TIME ZONE: illegal INTERVAL; month not allowed");
- pfree(interval);
- return NULL;
- }
- if (doit)
- {
- CTimeZone = interval->time;
- HasCTZSet = true;
- }
- pfree(interval);
- }
- else
- {
- /*
- * Try it as a numeric number of hours (possibly fractional).
- */
- hours = strtod(value, &endptr);
- if (endptr != value && *endptr == '\0')
- {
- if (doit)
- {
- CTimeZone = hours * 3600;
- HasCTZSet = true;
- }
- }
- else if (strcasecmp(value, "UNKNOWN") == 0)
- {
- /*
- * Clear any TZ value we may have established.
- *
- * unsetenv() works fine, but is BSD, not POSIX, and is not
- * available under Solaris, among others. Apparently putenv()
- * called as below clears the process-specific environment
- * variables. Other reasonable arguments to putenv() (e.g.
- * "TZ=", "TZ", "") result in a core dump (under Linux anyway).
- * - thomas 1998-01-26
- */
- if (doit)
- {
- if (tzbuf[0] == 'T')
- {
- strcpy(tzbuf, "=");
- if (putenv(tzbuf) != 0)
- elog(ERROR, "Unable to clear TZ environment variable");
- tzset();
- }
- HasCTZSet = false;
- }
- }
- else
- {
- /*
- * Otherwise assume it is a timezone name.
- *
- * XXX unfortunately we have no reasonable way to check whether a
- * timezone name is good, so we have to just assume that it is.
- */
- if (doit)
- {
- strcpy(tzbuf, "TZ=");
- strncat(tzbuf, value, sizeof(tzbuf)-4);
- if (putenv(tzbuf) != 0) /* shouldn't happen? */
- elog(LOG, "assign_timezone: putenv failed");
- tzset();
- HasCTZSet = false;
- }
- }
- }
-
- /*
- * If we aren't going to do the assignment, just return OK indicator.
- */
- if (!doit)
- return value;
-
- /*
- * Prepare the canonical string to return. GUC wants it malloc'd.
- */
- result = (char *) malloc(sizeof(tzbuf));
- if (!result)
- return NULL;
-
- if (HasCTZSet)
- {
- snprintf(result, sizeof(tzbuf), "%.5f",
- (double) CTimeZone / 3600.0);
- }
- else if (tzbuf[0] == 'T')
- {
- strcpy(result, tzbuf + 3);
- }
- else
- {
- strcpy(result, "UNKNOWN");
- }
-
- return result;
-}
-
-/*
- * show_timezone: GUC show_hook for timezone
- */
-const char *
-show_timezone(void)
-{
- char *tzn;
-
- if (HasCTZSet)
- {
- Interval interval;
-
- interval.month = 0;
- interval.time = CTimeZone;
-
- tzn = DatumGetCString(DirectFunctionCall1(interval_out,
- IntervalPGetDatum(&interval)));
- }
- else
- tzn = getenv("TZ");
-
- if (tzn != NULL)
- return tzn;
-
- return "unknown";
-}
-
-
-/*
- * SET TRANSACTION ISOLATION LEVEL
- */
-
-const char *
-assign_XactIsoLevel(const char *value, bool doit, bool interactive)
-{
- if (doit && interactive && SerializableSnapshot != NULL)
- elog(ERROR, "SET TRANSACTION ISOLATION LEVEL must be called before any query");
-
- if (strcmp(value, "serializable") == 0)
- { if (doit) XactIsoLevel = XACT_SERIALIZABLE; }
- else if (strcmp(value, "read committed") == 0)
- { if (doit) XactIsoLevel = XACT_READ_COMMITTED; }
- else if (strcmp(value, "default") == 0)
- { if (doit) XactIsoLevel = DefaultXactIsoLevel; }
- else
- return NULL;
-
- return value;
-}
-
-const char *
-show_XactIsoLevel(void)
-{
- if (XactIsoLevel == XACT_SERIALIZABLE)
- return "SERIALIZABLE";
- else
- return "READ COMMITTED";
-}
-
-
-/*
- * Random number seed
- */
-
-bool
-assign_random_seed(double value, bool doit, bool interactive)
-{
- /* Can't really roll back on error, so ignore non-interactive setting */
- if (doit && interactive)
- DirectFunctionCall1(setseed, Float8GetDatum(value));
- return true;
-}
-
-const char *
-show_random_seed(void)
-{
- return "unavailable";
-}
-
-
-/*
- * MULTIBYTE-related functions
- *
- * If MULTIBYTE support was not compiled, we still allow these variables
- * to exist, but you can't set them to anything but "SQL_ASCII". This
- * minimizes interoperability problems between non-MB servers and MB-enabled
- * clients.
- */
-
-const char *
-assign_client_encoding(const char *value, bool doit, bool interactive)
-{
-#ifdef MULTIBYTE
- int encoding;
- int old_encoding = 0;
-
- encoding = pg_valid_client_encoding(value);
- if (encoding < 0)
- return NULL;
- /*
- * Ugly API here ... can't test validity without setting new encoding...
- */
- if (!doit)
- old_encoding = pg_get_client_encoding();
- if (pg_set_client_encoding(encoding) < 0)
- {
- if (interactive)
- elog(ERROR, "Conversion between %s and %s is not supported",
- value, GetDatabaseEncodingName());
- return NULL;
- }
- if (!doit)
- pg_set_client_encoding(old_encoding);
-#else
- if (strcasecmp(value, pg_get_client_encoding_name()) != 0)
- return NULL;
-#endif
-
- return value;
-}
-
-
-const char *
-assign_server_encoding(const char *value, bool doit, bool interactive)
-{
- if (interactive)
- elog(ERROR, "SET SERVER_ENCODING is not supported");
- /* Pretend never to fail in noninteractive case */
- return value;
-}
-
-const char *
-show_server_encoding(void)
-{
- return GetDatabaseEncodingName();
-}
-
-
-/*
- * SET SESSION AUTHORIZATION
- *
- * Note: when resetting session auth after an error, we can't expect to do
- * catalog lookups. Hence, the stored form of the value is always a numeric
- * userid that can be re-used directly.
- */
-const char *
-assign_session_authorization(const char *value, bool doit, bool interactive)
-{
- Oid usesysid;
- char *endptr;
- char *result;
-
- usesysid = (Oid) strtoul(value, &endptr, 10);
-
- if (endptr != value && *endptr == '\0' && OidIsValid(usesysid))
- {
- /* use the numeric user ID */
- }
- else
- {
- HeapTuple userTup;
-
- userTup = SearchSysCache(SHADOWNAME,
- PointerGetDatum(value),
- 0, 0, 0);
- if (!HeapTupleIsValid(userTup))
- {
- if (interactive)
- elog(ERROR, "user \"%s\" does not exist", value);
- return NULL;
- }
-
- usesysid = ((Form_pg_shadow) GETSTRUCT(userTup))->usesysid;
-
- ReleaseSysCache(userTup);
- }
-
- if (doit)
- SetSessionAuthorization(usesysid);
-
- result = (char *) malloc(32);
- if (!result)
- return NULL;
-
- snprintf(result, 32, "%lu", (unsigned long) usesysid);
-
- return result;
-}
-
-const char *
-show_session_authorization(void)
-{
- return GetUserNameFromId(GetSessionUserId());
-}
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
deleted file mode 100644
index e21b72a87a5..00000000000
--- a/src/backend/commands/view.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * view.c
- * use rewrite rules to construct views
- *
- * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- * $Id: view.c,v 1.64 2002/06/20 20:29:27 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include "access/xact.h"
-#include "catalog/heap.h"
-#include "catalog/namespace.h"
-#include "commands/tablecmds.h"
-#include "commands/view.h"
-#include "miscadmin.h"
-#include "nodes/makefuncs.h"
-#include "parser/parse_relation.h"
-#include "parser/parse_type.h"
-#include "rewrite/rewriteDefine.h"
-#include "rewrite/rewriteManip.h"
-#include "rewrite/rewriteRemove.h"
-#include "rewrite/rewriteSupport.h"
-#include "utils/syscache.h"
-
-
-/*---------------------------------------------------------------------
- * DefineVirtualRelation
- *
- * Create the "view" relation.
- * `DefineRelation' does all the work, we just provide the correct
- * arguments!
- *
- * If the relation already exists, then 'DefineRelation' will abort
- * the xact...
- *---------------------------------------------------------------------
- */
-static Oid
-DefineVirtualRelation(const RangeVar *relation, List *tlist)
-{
- CreateStmt *createStmt = makeNode(CreateStmt);
- List *attrList,
- *t;
-
- /*
- * create a list of ColumnDef nodes based on the names and types of
- * the (non-junk) targetlist items from the view's SELECT list.
- */
- attrList = NIL;
- foreach(t, tlist)
- {
- TargetEntry *entry = lfirst(t);
- Resdom *res = entry->resdom;
-
- if (!res->resjunk)
- {
- ColumnDef *def = makeNode(ColumnDef);
- TypeName *typename = makeNode(TypeName);
-
- def->colname = pstrdup(res->resname);
-
- typename->typeid = res->restype;
- typename->typmod = res->restypmod;
- def->typename = typename;
-
- def->is_not_null = false;
- def->raw_default = NULL;
- def->cooked_default = NULL;
- def->constraints = NIL;
-
- attrList = lappend(attrList, def);
- }
- }
-
- if (attrList == NIL)
- elog(ERROR, "attempted to define virtual relation with no attrs");
-
- /*
- * now create the parameters for keys/inheritance etc. All of them are
- * nil...
- */
- createStmt->relation = (RangeVar *) relation;
- createStmt->tableElts = attrList;
- createStmt->inhRelations = NIL;
- createStmt->constraints = NIL;
- createStmt->hasoids = false;
-
- /*
- * finally create the relation...
- */
- return DefineRelation(createStmt, RELKIND_VIEW);
-}
-
-static RuleStmt *
-FormViewRetrieveRule(const RangeVar *view, Query *viewParse)
-{
- RuleStmt *rule;
-
- /*
- * Create a RuleStmt that corresponds to the suitable rewrite rule
- * args for DefineQueryRewrite();
- */
- rule = makeNode(RuleStmt);
- rule->relation = copyObject((RangeVar *) view);
- rule->rulename = pstrdup(ViewSelectRuleName);
- rule->whereClause = NULL;
- rule->event = CMD_SELECT;
- rule->instead = true;
- rule->actions = makeList1(viewParse);
-
- return rule;
-}
-
-static void
-DefineViewRules(const RangeVar *view, Query *viewParse)
-{
- RuleStmt *retrieve_rule;
-
-#ifdef NOTYET
- RuleStmt *replace_rule;
- RuleStmt *append_rule;
- RuleStmt *delete_rule;
-#endif
-
- retrieve_rule = FormViewRetrieveRule(view, viewParse);
-
-#ifdef NOTYET
-
- replace_rule = FormViewReplaceRule(view, viewParse);
- append_rule = FormViewAppendRule(view, viewParse);
- delete_rule = FormViewDeleteRule(view, viewParse);
-#endif
-
- DefineQueryRewrite(retrieve_rule);
-
-#ifdef NOTYET
- DefineQueryRewrite(replace_rule);
- DefineQueryRewrite(append_rule);
- DefineQueryRewrite(delete_rule);
-#endif
-
-}
-
-/*---------------------------------------------------------------
- * UpdateRangeTableOfViewParse
- *
- * Update the range table of the given parsetree.
- * This update consists of adding two new entries IN THE BEGINNING
- * of the range table (otherwise the rule system will die a slow,
- * horrible and painful death, and we do not want that now, do we?)
- * one for the OLD relation and one for the NEW one (both of
- * them refer in fact to the "view" relation).
- *
- * Of course we must also increase the 'varnos' of all the Var nodes
- * by 2...
- *
- * These extra RT entries are not actually used in the query,
- * except for run-time permission checking.
- *---------------------------------------------------------------
- */
-static Query *
-UpdateRangeTableOfViewParse(Oid viewOid, Query *viewParse)
-{
- List *new_rt;
- RangeTblEntry *rt_entry1,
- *rt_entry2;
-
- /*
- * Make a copy of the given parsetree. It's not so much that we don't
- * want to scribble on our input, it's that the parser has a bad habit
- * of outputting multiple links to the same subtree for constructs
- * like BETWEEN, and we mustn't have OffsetVarNodes increment the
- * varno of a Var node twice. copyObject will expand any
- * multiply-referenced subtree into multiple copies.
- */
- viewParse = (Query *) copyObject(viewParse);
-
- /*
- * Create the 2 new range table entries and form the new range
- * table... OLD first, then NEW....
- */
- rt_entry1 = addRangeTableEntryForRelation(NULL, viewOid,
- makeAlias("*OLD*", NIL),
- false, false);
- rt_entry2 = addRangeTableEntryForRelation(NULL, viewOid,
- makeAlias("*NEW*", NIL),
- false, false);
- /* Must override addRangeTableEntry's default access-check flags */
- rt_entry1->checkForRead = false;
- rt_entry2->checkForRead = false;
-
- new_rt = lcons(rt_entry1, lcons(rt_entry2, viewParse->rtable));
-
- viewParse->rtable = new_rt;
-
- /*
- * Now offset all var nodes by 2, and jointree RT indexes too.
- */
- OffsetVarNodes((Node *) viewParse, 2, 0);
-
- return viewParse;
-}
-
-/*-------------------------------------------------------------------
- * DefineView
- *
- * - takes a "viewname", "parsetree" pair and then
- * 1) construct the "virtual" relation
- * 2) commit the command but NOT the transaction,
- * so that the relation exists
- * before the rules are defined.
- * 2) define the "n" rules specified in the PRS2 paper
- * over the "virtual" relation
- *-------------------------------------------------------------------
- */
-void
-DefineView(const RangeVar *view, Query *viewParse)
-{
- Oid viewOid;
-
- /*
- * Create the view relation
- *
- * NOTE: if it already exists, the xact will be aborted.
- */
- viewOid = DefineVirtualRelation(view, viewParse->targetList);
-
- /*
- * The relation we have just created is not visible to any other
- * commands running with the same transaction & command id. So,
- * increment the command id counter (but do NOT pfree any memory!!!!)
- */
- CommandCounterIncrement();
-
- /*
- * The range table of 'viewParse' does not contain entries for the
- * "OLD" and "NEW" relations. So... add them!
- */
- viewParse = UpdateRangeTableOfViewParse(viewOid, viewParse);
-
- /*
- * Now create the rules associated with the view.
- */
- DefineViewRules(view, viewParse);
-}
-
-/*------------------------------------------------------------------
- * RemoveView
- *
- * Remove a view given its name
- *------------------------------------------------------------------
- */
-void
-RemoveView(const RangeVar *view)
-{
- Oid viewOid;
-
- viewOid = RangeVarGetRelid(view, false);
- /*
- * We just have to drop the relation; the associated rules will be
- * cleaned up automatically.
- */
- heap_drop_with_catalog(viewOid, allowSystemTableMods);
-}