summaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/common/heaptuple.c188
-rw-r--r--src/backend/access/common/indextuple.c37
-rw-r--r--src/backend/access/common/printtup.c21
-rw-r--r--src/backend/access/common/tupdesc.c52
-rw-r--r--src/backend/access/gist/gistproc.c4
-rw-r--r--src/backend/access/hash/hash.c63
-rw-r--r--src/backend/access/hash/hashfunc.c31
-rw-r--r--src/backend/access/hash/hashinsert.c29
-rw-r--r--src/backend/access/hash/hashovfl.c58
-rw-r--r--src/backend/access/hash/hashpage.c134
-rw-r--r--src/backend/access/hash/hashscan.c8
-rw-r--r--src/backend/access/hash/hashsearch.c38
-rw-r--r--src/backend/access/heap/heapam.c428
-rw-r--r--src/backend/access/heap/hio.c91
-rw-r--r--src/backend/access/heap/tuptoaster.c98
-rw-r--r--src/backend/access/index/genam.c12
-rw-r--r--src/backend/access/index/indexam.c85
-rw-r--r--src/backend/access/nbtree/nbtinsert.c268
-rw-r--r--src/backend/access/nbtree/nbtpage.c194
-rw-r--r--src/backend/access/nbtree/nbtree.c180
-rw-r--r--src/backend/access/nbtree/nbtsearch.c279
-rw-r--r--src/backend/access/nbtree/nbtsort.c138
-rw-r--r--src/backend/access/nbtree/nbtutils.c86
-rw-r--r--src/backend/access/nbtree/nbtxlog.c20
-rw-r--r--src/backend/access/rtree/rtget.c40
-rw-r--r--src/backend/access/rtree/rtproc.c6
-rw-r--r--src/backend/access/rtree/rtree.c184
-rw-r--r--src/backend/access/rtree/rtscan.c21
-rw-r--r--src/backend/access/transam/clog.c22
-rw-r--r--src/backend/access/transam/multixact.c380
-rw-r--r--src/backend/access/transam/slru.c132
-rw-r--r--src/backend/access/transam/subtrans.c21
-rw-r--r--src/backend/access/transam/transam.c40
-rw-r--r--src/backend/access/transam/twophase.c310
-rw-r--r--src/backend/access/transam/twophase_rmgr.c38
-rw-r--r--src/backend/access/transam/varsup.c138
-rw-r--r--src/backend/access/transam/xact.c639
-rw-r--r--src/backend/access/transam/xlog.c1129
-rw-r--r--src/backend/access/transam/xlogutils.c28
-rw-r--r--src/backend/bootstrap/bootstrap.c49
-rw-r--r--src/backend/catalog/aclchk.c279
-rw-r--r--src/backend/catalog/catalog.c20
-rw-r--r--src/backend/catalog/dependency.c179
-rw-r--r--src/backend/catalog/heap.c258
-rw-r--r--src/backend/catalog/index.c326
-rw-r--r--src/backend/catalog/indexing.c10
-rw-r--r--src/backend/catalog/namespace.c258
-rw-r--r--src/backend/catalog/pg_aggregate.c80
-rw-r--r--src/backend/catalog/pg_constraint.c51
-rw-r--r--src/backend/catalog/pg_conversion.c19
-rw-r--r--src/backend/catalog/pg_depend.c35
-rw-r--r--src/backend/catalog/pg_operator.c62
-rw-r--r--src/backend/catalog/pg_proc.c90
-rw-r--r--src/backend/catalog/pg_shdepend.c219
-rw-r--r--src/backend/catalog/pg_type.c41
-rw-r--r--src/backend/commands/aggregatecmds.c53
-rw-r--r--src/backend/commands/alter.c18
-rw-r--r--src/backend/commands/analyze.c305
-rw-r--r--src/backend/commands/async.c213
-rw-r--r--src/backend/commands/cluster.c184
-rw-r--r--src/backend/commands/comment.c62
-rw-r--r--src/backend/commands/conversioncmds.c21
-rw-r--r--src/backend/commands/copy.c539
-rw-r--r--src/backend/commands/dbcommands.c186
-rw-r--r--src/backend/commands/define.c8
-rw-r--r--src/backend/commands/explain.c90
-rw-r--r--src/backend/commands/functioncmds.c114
-rw-r--r--src/backend/commands/indexcmds.c166
-rw-r--r--src/backend/commands/lockcmds.c10
-rw-r--r--src/backend/commands/opclasscmds.c76
-rw-r--r--src/backend/commands/operatorcmds.c27
-rw-r--r--src/backend/commands/portalcmds.c77
-rw-r--r--src/backend/commands/prepare.c82
-rw-r--r--src/backend/commands/proclang.c52
-rw-r--r--src/backend/commands/schemacmds.c57
-rw-r--r--src/backend/commands/sequence.c86
-rw-r--r--src/backend/commands/tablecmds.c954
-rw-r--r--src/backend/commands/tablespace.c139
-rw-r--r--src/backend/commands/trigger.c386
-rw-r--r--src/backend/commands/typecmds.c255
-rw-r--r--src/backend/commands/user.c174
-rw-r--r--src/backend/commands/vacuum.c605
-rw-r--r--src/backend/commands/vacuumlazy.c190
-rw-r--r--src/backend/commands/variable.c110
-rw-r--r--src/backend/commands/view.c77
-rw-r--r--src/backend/executor/execAmi.c21
-rw-r--r--src/backend/executor/execGrouping.c34
-rw-r--r--src/backend/executor/execJunk.c32
-rw-r--r--src/backend/executor/execMain.c341
-rw-r--r--src/backend/executor/execProcnode.c20
-rw-r--r--src/backend/executor/execQual.c443
-rw-r--r--src/backend/executor/execScan.c43
-rw-r--r--src/backend/executor/execTuples.c58
-rw-r--r--src/backend/executor/execUtils.c106
-rw-r--r--src/backend/executor/functions.c204
-rw-r--r--src/backend/executor/instrument.c6
-rw-r--r--src/backend/executor/nodeAgg.c255
-rw-r--r--src/backend/executor/nodeAppend.c51
-rw-r--r--src/backend/executor/nodeBitmapAnd.c18
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c69
-rw-r--r--src/backend/executor/nodeBitmapIndexscan.c48
-rw-r--r--src/backend/executor/nodeBitmapOr.c14
-rw-r--r--src/backend/executor/nodeFunctionscan.c31
-rw-r--r--src/backend/executor/nodeGroup.c31
-rw-r--r--src/backend/executor/nodeHash.c84
-rw-r--r--src/backend/executor/nodeHashjoin.c162
-rw-r--r--src/backend/executor/nodeIndexscan.c155
-rw-r--r--src/backend/executor/nodeLimit.c36
-rw-r--r--src/backend/executor/nodeMaterial.c46
-rw-r--r--src/backend/executor/nodeMergejoin.c289
-rw-r--r--src/backend/executor/nodeNestloop.c61
-rw-r--r--src/backend/executor/nodeResult.c32
-rw-r--r--src/backend/executor/nodeSeqscan.c60
-rw-r--r--src/backend/executor/nodeSetOp.c24
-rw-r--r--src/backend/executor/nodeSort.c25
-rw-r--r--src/backend/executor/nodeSubplan.c243
-rw-r--r--src/backend/executor/nodeSubqueryscan.c26
-rw-r--r--src/backend/executor/nodeTidscan.c31
-rw-r--r--src/backend/executor/nodeUnique.c34
-rw-r--r--src/backend/executor/spi.c118
-rw-r--r--src/backend/lib/dllist.c6
-rw-r--r--src/backend/lib/stringinfo.c39
-rw-r--r--src/backend/libpq/auth.c93
-rw-r--r--src/backend/libpq/be-fsstubs.c45
-rw-r--r--src/backend/libpq/be-secure.c70
-rw-r--r--src/backend/libpq/crypt.c8
-rw-r--r--src/backend/libpq/hba.c106
-rw-r--r--src/backend/libpq/md5.c6
-rw-r--r--src/backend/libpq/pqcomm.c112
-rw-r--r--src/backend/libpq/pqformat.c11
-rw-r--r--src/backend/libpq/pqsignal.c11
-rw-r--r--src/backend/main/main.c114
-rw-r--r--src/backend/nodes/bitmapset.c8
-rw-r--r--src/backend/nodes/copyfuncs.c19
-rw-r--r--src/backend/nodes/equalfuncs.c61
-rw-r--r--src/backend/nodes/list.c24
-rw-r--r--src/backend/nodes/makefuncs.c15
-rw-r--r--src/backend/nodes/outfuncs.c27
-rw-r--r--src/backend/nodes/print.c6
-rw-r--r--src/backend/nodes/read.c21
-rw-r--r--src/backend/nodes/readfuncs.c50
-rw-r--r--src/backend/nodes/tidbitmap.c108
-rw-r--r--src/backend/optimizer/geqo/geqo_erx.c45
-rw-r--r--src/backend/optimizer/geqo/geqo_eval.c91
-rw-r--r--src/backend/optimizer/geqo/geqo_main.c9
-rw-r--r--src/backend/optimizer/geqo/geqo_misc.c10
-rw-r--r--src/backend/optimizer/geqo/geqo_pool.c20
-rw-r--r--src/backend/optimizer/geqo/geqo_recombination.c10
-rw-r--r--src/backend/optimizer/geqo/geqo_selection.c15
-rw-r--r--src/backend/optimizer/path/allpaths.c127
-rw-r--r--src/backend/optimizer/path/clausesel.c88
-rw-r--r--src/backend/optimizer/path/costsize.c470
-rw-r--r--src/backend/optimizer/path/indxpath.c371
-rw-r--r--src/backend/optimizer/path/joinpath.c171
-rw-r--r--src/backend/optimizer/path/joinrels.c135
-rw-r--r--src/backend/optimizer/path/orindxpath.c34
-rw-r--r--src/backend/optimizer/path/pathkeys.c291
-rw-r--r--src/backend/optimizer/path/tidpath.c6
-rw-r--r--src/backend/optimizer/plan/createplan.c401
-rw-r--r--src/backend/optimizer/plan/initsplan.c231
-rw-r--r--src/backend/optimizer/plan/planagg.c126
-rw-r--r--src/backend/optimizer/plan/planmain.c124
-rw-r--r--src/backend/optimizer/plan/planner.c354
-rw-r--r--src/backend/optimizer/plan/setrefs.c158
-rw-r--r--src/backend/optimizer/plan/subselect.c264
-rw-r--r--src/backend/optimizer/prep/prepjointree.c164
-rw-r--r--src/backend/optimizer/prep/prepqual.c61
-rw-r--r--src/backend/optimizer/prep/preptlist.c105
-rw-r--r--src/backend/optimizer/prep/prepunion.c189
-rw-r--r--src/backend/optimizer/util/clauses.c367
-rw-r--r--src/backend/optimizer/util/pathnode.c175
-rw-r--r--src/backend/optimizer/util/plancat.c132
-rw-r--r--src/backend/optimizer/util/predtest.c122
-rw-r--r--src/backend/optimizer/util/relnode.c84
-rw-r--r--src/backend/optimizer/util/restrictinfo.c67
-rw-r--r--src/backend/optimizer/util/tlist.c4
-rw-r--r--src/backend/optimizer/util/var.c38
-rw-r--r--src/backend/parser/analyze.c534
-rw-r--r--src/backend/parser/keywords.c6
-rw-r--r--src/backend/parser/parse_agg.c79
-rw-r--r--src/backend/parser/parse_clause.c257
-rw-r--r--src/backend/parser/parse_coerce.c246
-rw-r--r--src/backend/parser/parse_expr.c176
-rw-r--r--src/backend/parser/parse_func.c241
-rw-r--r--src/backend/parser/parse_node.c38
-rw-r--r--src/backend/parser/parse_oper.c108
-rw-r--r--src/backend/parser/parse_relation.c156
-rw-r--r--src/backend/parser/parse_target.c113
-rw-r--r--src/backend/parser/parse_type.c18
-rw-r--r--src/backend/parser/scansup.c24
-rw-r--r--src/backend/port/beos/sem.c22
-rw-r--r--src/backend/port/beos/shm.c11
-rw-r--r--src/backend/port/beos/support.c18
-rw-r--r--src/backend/port/dynloader/aix.c48
-rw-r--r--src/backend/port/dynloader/aix.h4
-rw-r--r--src/backend/port/dynloader/bsdi.c11
-rw-r--r--src/backend/port/dynloader/bsdi.h3
-rw-r--r--src/backend/port/dynloader/hpux.c4
-rw-r--r--src/backend/port/dynloader/linux.c11
-rw-r--r--src/backend/port/dynloader/ultrix4.c10
-rw-r--r--src/backend/port/dynloader/win32.c48
-rw-r--r--src/backend/port/ipc_test.c6
-rw-r--r--src/backend/port/posix_sema.c75
-rw-r--r--src/backend/port/qnx4/sem.c7
-rw-r--r--src/backend/port/qnx4/shm.c14
-rw-r--r--src/backend/port/sysv_sema.c126
-rw-r--r--src/backend/port/sysv_shmem.c96
-rw-r--r--src/backend/port/win32/error.c4
-rw-r--r--src/backend/port/win32/security.c38
-rw-r--r--src/backend/port/win32/sema.c6
-rw-r--r--src/backend/port/win32/shmem.c4
-rw-r--r--src/backend/port/win32/signal.c32
-rw-r--r--src/backend/port/win32/socket.c14
-rw-r--r--src/backend/postmaster/autovacuum.c270
-rw-r--r--src/backend/postmaster/bgwriter.c109
-rw-r--r--src/backend/postmaster/fork_process.c35
-rw-r--r--src/backend/postmaster/pgarch.c53
-rw-r--r--src/backend/postmaster/pgstat.c471
-rw-r--r--src/backend/postmaster/postmaster.c685
-rw-r--r--src/backend/postmaster/syslogger.c120
-rw-r--r--src/backend/regex/regc_color.c6
-rw-r--r--src/backend/regex/regc_cvec.c5
-rw-r--r--src/backend/regex/regc_lex.c11
-rw-r--r--src/backend/regex/regc_locale.c12
-rw-r--r--src/backend/regex/regc_nfa.c19
-rw-r--r--src/backend/regex/regcomp.c37
-rw-r--r--src/backend/regex/rege_dfa.c8
-rw-r--r--src/backend/regex/regexec.c17
-rw-r--r--src/backend/rewrite/rewriteDefine.c124
-rw-r--r--src/backend/rewrite/rewriteHandler.c321
-rw-r--r--src/backend/rewrite/rewriteManip.c104
-rw-r--r--src/backend/rewrite/rewriteRemove.c10
-rw-r--r--src/backend/rewrite/rewriteSupport.c5
-rw-r--r--src/backend/storage/buffer/buf_init.c6
-rw-r--r--src/backend/storage/buffer/buf_table.c8
-rw-r--r--src/backend/storage/buffer/bufmgr.c284
-rw-r--r--src/backend/storage/buffer/freelist.c38
-rw-r--r--src/backend/storage/buffer/localbuf.c24
-rw-r--r--src/backend/storage/file/buffile.c37
-rw-r--r--src/backend/storage/file/fd.c93
-rw-r--r--src/backend/storage/freespace/freespace.c179
-rw-r--r--src/backend/storage/ipc/ipc.c23
-rw-r--r--src/backend/storage/ipc/ipci.c19
-rw-r--r--src/backend/storage/ipc/pmsignal.c8
-rw-r--r--src/backend/storage/ipc/procarray.c129
-rw-r--r--src/backend/storage/ipc/shmem.c51
-rw-r--r--src/backend/storage/ipc/sinval.c99
-rw-r--r--src/backend/storage/ipc/sinvaladt.c22
-rw-r--r--src/backend/storage/large_object/inv_api.c37
-rw-r--r--src/backend/storage/lmgr/deadlock.c99
-rw-r--r--src/backend/storage/lmgr/lmgr.c34
-rw-r--r--src/backend/storage/lmgr/lock.c270
-rw-r--r--src/backend/storage/lmgr/lwlock.c93
-rw-r--r--src/backend/storage/lmgr/proc.c225
-rw-r--r--src/backend/storage/lmgr/s_lock.c120
-rw-r--r--src/backend/storage/lmgr/spin.c11
-rw-r--r--src/backend/storage/page/bufpage.c64
-rw-r--r--src/backend/storage/smgr/md.c171
-rw-r--r--src/backend/storage/smgr/smgr.c128
-rw-r--r--src/backend/tcop/dest.c7
-rw-r--r--src/backend/tcop/fastpath.c43
-rw-r--r--src/backend/tcop/postgres.c644
-rw-r--r--src/backend/tcop/pquery.c220
-rw-r--r--src/backend/tcop/utility.c92
-rw-r--r--src/backend/utils/adt/acl.c266
-rw-r--r--src/backend/utils/adt/array_userfuncs.c43
-rw-r--r--src/backend/utils/adt/arrayfuncs.c342
-rw-r--r--src/backend/utils/adt/ascii.c6
-rw-r--r--src/backend/utils/adt/cash.c24
-rw-r--r--src/backend/utils/adt/char.c8
-rw-r--r--src/backend/utils/adt/date.c195
-rw-r--r--src/backend/utils/adt/datetime.c337
-rw-r--r--src/backend/utils/adt/datum.c11
-rw-r--r--src/backend/utils/adt/dbsize.c142
-rw-r--r--src/backend/utils/adt/encode.c8
-rw-r--r--src/backend/utils/adt/float.c122
-rw-r--r--src/backend/utils/adt/format_type.c42
-rw-r--r--src/backend/utils/adt/formatting.c339
-rw-r--r--src/backend/utils/adt/genfile.c57
-rw-r--r--src/backend/utils/adt/geo_ops.c135
-rw-r--r--src/backend/utils/adt/inet_net_ntop.c20
-rw-r--r--src/backend/utils/adt/inet_net_pton.c5
-rw-r--r--src/backend/utils/adt/int.c148
-rw-r--r--src/backend/utils/adt/int8.c140
-rw-r--r--src/backend/utils/adt/like.c46
-rw-r--r--src/backend/utils/adt/like_match.c42
-rw-r--r--src/backend/utils/adt/lockfuncs.c29
-rw-r--r--src/backend/utils/adt/mac.c6
-rw-r--r--src/backend/utils/adt/misc.c21
-rw-r--r--src/backend/utils/adt/nabstime.c147
-rw-r--r--src/backend/utils/adt/name.c6
-rw-r--r--src/backend/utils/adt/network.c28
-rw-r--r--src/backend/utils/adt/numeric.c328
-rw-r--r--src/backend/utils/adt/numutils.c16
-rw-r--r--src/backend/utils/adt/oid.c28
-rw-r--r--src/backend/utils/adt/oracle_compat.c74
-rw-r--r--src/backend/utils/adt/pg_locale.c27
-rw-r--r--src/backend/utils/adt/pg_lzcompress.c102
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c12
-rw-r--r--src/backend/utils/adt/quote.c6
-rw-r--r--src/backend/utils/adt/regexp.c63
-rw-r--r--src/backend/utils/adt/regproc.c133
-rw-r--r--src/backend/utils/adt/ri_triggers.c504
-rw-r--r--src/backend/utils/adt/rowtypes.c61
-rw-r--r--src/backend/utils/adt/ruleutils.c367
-rw-r--r--src/backend/utils/adt/selfuncs.c748
-rw-r--r--src/backend/utils/adt/timestamp.c509
-rw-r--r--src/backend/utils/adt/varbit.c50
-rw-r--r--src/backend/utils/adt/varchar.c44
-rw-r--r--src/backend/utils/adt/varlena.c214
-rw-r--r--src/backend/utils/cache/catcache.c195
-rw-r--r--src/backend/utils/cache/inval.c93
-rw-r--r--src/backend/utils/cache/lsyscache.c40
-rw-r--r--src/backend/utils/cache/relcache.c408
-rw-r--r--src/backend/utils/cache/syscache.c77
-rw-r--r--src/backend/utils/cache/typcache.c41
-rw-r--r--src/backend/utils/error/assert.c6
-rw-r--r--src/backend/utils/error/elog.c198
-rw-r--r--src/backend/utils/fmgr/dfmgr.c16
-rw-r--r--src/backend/utils/fmgr/fmgr.c99
-rw-r--r--src/backend/utils/fmgr/funcapi.c83
-rw-r--r--src/backend/utils/hash/dynahash.c78
-rw-r--r--src/backend/utils/hash/hashfn.c8
-rw-r--r--src/backend/utils/hash/pg_crc.c6
-rw-r--r--src/backend/utils/init/flatfiles.c193
-rw-r--r--src/backend/utils/init/miscinit.c153
-rw-r--r--src/backend/utils/init/postinit.c115
-rw-r--r--src/backend/utils/mb/conv.c13
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c14
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c35
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_win1250/utf8_and_win1250.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_win1252/utf8_and_win1252.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_win1256/utf8_and_win1256.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_win1258/utf8_and_win1258.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_win874/utf8_and_win874.c4
-rw-r--r--src/backend/utils/mb/encnames.c22
-rw-r--r--src/backend/utils/mb/mbutils.c30
-rw-r--r--src/backend/utils/mb/wchar.c123
-rw-r--r--src/backend/utils/misc/guc.c436
-rw-r--r--src/backend/utils/misc/pg_rusage.c6
-rw-r--r--src/backend/utils/misc/ps_status.c48
-rw-r--r--src/backend/utils/misc/superuser.c8
-rw-r--r--src/backend/utils/mmgr/aset.c128
-rw-r--r--src/backend/utils/mmgr/mcxt.c39
-rw-r--r--src/backend/utils/mmgr/portalmem.c107
-rw-r--r--src/backend/utils/resowner/resowner.c68
-rw-r--r--src/backend/utils/sort/logtape.c104
-rw-r--r--src/backend/utils/sort/tuplesort.c326
-rw-r--r--src/backend/utils/sort/tuplestore.c90
-rw-r--r--src/backend/utils/time/tqual.c64
358 files changed, 19583 insertions, 20482 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 8b9714184c2..2ba59ab5e9e 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -6,7 +6,7 @@
*
* NOTE: there is massive duplication of code in this module to
* support both the convention that a null is marked by a bool TRUE,
- * and the convention that a null is marked by a char 'n'. The latter
+ * and the convention that a null is marked by a char 'n'. The latter
* convention is deprecated but it'll probably be a long time before
* we can get rid of it entirely.
*
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.99 2005/03/21 01:23:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.100 2005/10/15 02:49:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -452,8 +452,8 @@ nocachegetattr(HeapTuple tuple,
int j;
/*
- * In for(), we test <= and not < because we want to see if we
- * can go past it in initializing offsets.
+ * In for(), we test <= and not < because we want to see if we can
+ * go past it in initializing offsets.
*/
for (j = 0; j <= attnum; j++)
{
@@ -467,10 +467,9 @@ nocachegetattr(HeapTuple tuple,
}
/*
- * If slow is false, and we got here, we know that we have a tuple
- * with no nulls or var-widths before the target attribute. If
- * possible, we also want to initialize the remainder of the attribute
- * cached offset values.
+ * If slow is false, and we got here, we know that we have a tuple with no
+ * nulls or var-widths before the target attribute. If possible, we also
+ * want to initialize the remainder of the attribute cached offset values.
*/
if (!slow)
{
@@ -513,11 +512,11 @@ nocachegetattr(HeapTuple tuple,
/*
* Now we know that we have to walk the tuple CAREFULLY.
*
- * Note - This loop is a little tricky. For each non-null attribute,
- * we have to first account for alignment padding before the attr,
- * then advance over the attr based on its length. Nulls have no
- * storage and no alignment padding either. We can use/set attcacheoff
- * until we pass either a null or a var-width attribute.
+ * Note - This loop is a little tricky. For each non-null attribute, we
+ * have to first account for alignment padding before the attr, then
+ * advance over the attr based on its length. Nulls have no storage
+ * and no alignment padding either. We can use/set attcacheoff until
+ * we pass either a null or a var-width attribute.
*/
for (i = 0; i < attnum; i++)
@@ -597,15 +596,13 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
break;
/*
- * If the attribute number is 0, then we are supposed to
- * return the entire tuple as a row-type Datum. (Using zero
- * for this purpose is unclean since it risks confusion with
- * "invalid attr" result codes, but it's not worth changing
- * now.)
+ * If the attribute number is 0, then we are supposed to return
+ * the entire tuple as a row-type Datum. (Using zero for this
+ * purpose is unclean since it risks confusion with "invalid attr"
+ * result codes, but it's not worth changing now.)
*
- * We have to make a copy of the tuple so we can safely insert
- * the Datum overhead fields, which are not set in on-disk
- * tuples.
+ * We have to make a copy of the tuple so we can safely insert the
+ * Datum overhead fields, which are not set in on-disk tuples.
*/
case InvalidAttrNumber:
{
@@ -708,15 +705,15 @@ heap_form_tuple(TupleDesc tupleDescriptor,
numberOfAttributes, MaxTupleAttributeNumber)));
/*
- * Check for nulls and embedded tuples; expand any toasted attributes
- * in embedded tuples. This preserves the invariant that toasting can
- * only go one level deep.
+ * Check for nulls and embedded tuples; expand any toasted attributes in
+ * embedded tuples. This preserves the invariant that toasting can only
+ * go one level deep.
*
* We can skip calling toast_flatten_tuple_attribute() if the attribute
* couldn't possibly be of composite type. All composite datums are
- * varlena and have alignment 'd'; furthermore they aren't arrays.
- * Also, if an attribute is already toasted, it must have been sent to
- * disk already and so cannot contain toasted attributes.
+ * varlena and have alignment 'd'; furthermore they aren't arrays. Also,
+ * if an attribute is already toasted, it must have been sent to disk
+ * already and so cannot contain toasted attributes.
*/
for (i = 0; i < numberOfAttributes; i++)
{
@@ -757,8 +754,8 @@ heap_form_tuple(TupleDesc tupleDescriptor,
tuple->t_data = td = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
/*
- * And fill in the information. Note we fill the Datum fields even
- * though this tuple may never become a Datum.
+ * And fill in the information. Note we fill the Datum fields even though
+ * this tuple may never become a Datum.
*/
tuple->t_len = len;
ItemPointerSetInvalid(&(tuple->t_self));
@@ -816,15 +813,15 @@ heap_formtuple(TupleDesc tupleDescriptor,
numberOfAttributes, MaxTupleAttributeNumber)));
/*
- * Check for nulls and embedded tuples; expand any toasted attributes
- * in embedded tuples. This preserves the invariant that toasting can
- * only go one level deep.
+ * Check for nulls and embedded tuples; expand any toasted attributes in
+ * embedded tuples. This preserves the invariant that toasting can only
+ * go one level deep.
*
* We can skip calling toast_flatten_tuple_attribute() if the attribute
* couldn't possibly be of composite type. All composite datums are
- * varlena and have alignment 'd'; furthermore they aren't arrays.
- * Also, if an attribute is already toasted, it must have been sent to
- * disk already and so cannot contain toasted attributes.
+ * varlena and have alignment 'd'; furthermore they aren't arrays. Also,
+ * if an attribute is already toasted, it must have been sent to disk
+ * already and so cannot contain toasted attributes.
*/
for (i = 0; i < numberOfAttributes; i++)
{
@@ -865,8 +862,8 @@ heap_formtuple(TupleDesc tupleDescriptor,
tuple->t_data = td = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
/*
- * And fill in the information. Note we fill the Datum fields even
- * though this tuple may never become a Datum.
+ * And fill in the information. Note we fill the Datum fields even though
+ * this tuple may never become a Datum.
*/
tuple->t_len = len;
ItemPointerSetInvalid(&(tuple->t_self));
@@ -917,15 +914,15 @@ heap_modify_tuple(HeapTuple tuple,
HeapTuple newTuple;
/*
- * allocate and fill values and isnull arrays from either the tuple or
- * the repl information, as appropriate.
+ * allocate and fill values and isnull arrays from either the tuple or the
+ * repl information, as appropriate.
*
* NOTE: it's debatable whether to use heap_deform_tuple() here or just
- * heap_getattr() only the non-replaced colums. The latter could win
- * if there are many replaced columns and few non-replaced ones.
- * However, heap_deform_tuple costs only O(N) while the heap_getattr
- * way would cost O(N^2) if there are many non-replaced columns, so it
- * seems better to err on the side of linear cost.
+ * heap_getattr() only the non-replaced colums. The latter could win if
+ * there are many replaced columns and few non-replaced ones. However,
+ * heap_deform_tuple costs only O(N) while the heap_getattr way would cost
+ * O(N^2) if there are many non-replaced columns, so it seems better to
+ * err on the side of linear cost.
*/
values = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
isnull = (bool *) palloc(numberOfAttributes * sizeof(bool));
@@ -950,8 +947,8 @@ heap_modify_tuple(HeapTuple tuple,
pfree(isnull);
/*
- * copy the identification info of the old tuple: t_ctid, t_self, and
- * OID (if any)
+ * copy the identification info of the old tuple: t_ctid, t_self, and OID
+ * (if any)
*/
newTuple->t_data->t_ctid = tuple->t_data->t_ctid;
newTuple->t_self = tuple->t_self;
@@ -986,15 +983,15 @@ heap_modifytuple(HeapTuple tuple,
HeapTuple newTuple;
/*
- * allocate and fill values and nulls arrays from either the tuple or
- * the repl information, as appropriate.
+ * allocate and fill values and nulls arrays from either the tuple or the
+ * repl information, as appropriate.
*
* NOTE: it's debatable whether to use heap_deformtuple() here or just
- * heap_getattr() only the non-replaced colums. The latter could win
- * if there are many replaced columns and few non-replaced ones.
- * However, heap_deformtuple costs only O(N) while the heap_getattr
- * way would cost O(N^2) if there are many non-replaced columns, so it
- * seems better to err on the side of linear cost.
+ * heap_getattr() only the non-replaced colums. The latter could win if
+ * there are many replaced columns and few non-replaced ones. However,
+ * heap_deformtuple costs only O(N) while the heap_getattr way would cost
+ * O(N^2) if there are many non-replaced columns, so it seems better to
+ * err on the side of linear cost.
*/
values = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
nulls = (char *) palloc(numberOfAttributes * sizeof(char));
@@ -1022,8 +1019,8 @@ heap_modifytuple(HeapTuple tuple,
pfree(nulls);
/*
- * copy the identification info of the old tuple: t_ctid, t_self, and
- * OID (if any)
+ * copy the identification info of the old tuple: t_ctid, t_self, and OID
+ * (if any)
*/
newTuple->t_data->t_ctid = tuple->t_data->t_ctid;
newTuple->t_self = tuple->t_self;
@@ -1068,9 +1065,9 @@ heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc,
natts = tup->t_natts;
/*
- * In inheritance situations, it is possible that the given tuple
- * actually has more fields than the caller is expecting. Don't run
- * off the end of the caller's arrays.
+ * In inheritance situations, it is possible that the given tuple actually
+ * has more fields than the caller is expecting. Don't run off the end of
+ * the caller's arrays.
*/
natts = Min(natts, tdesc_natts);
@@ -1161,9 +1158,9 @@ heap_deformtuple(HeapTuple tuple,
natts = tup->t_natts;
/*
- * In inheritance situations, it is possible that the given tuple
- * actually has more fields than the caller is expecting. Don't run
- * off the end of the caller's arrays.
+ * In inheritance situations, it is possible that the given tuple actually
+ * has more fields than the caller is expecting. Don't run off the end of
+ * the caller's arrays.
*/
natts = Min(natts, tdesc_natts);
@@ -1228,22 +1225,22 @@ heap_deformtuple(HeapTuple tuple,
static void
slot_deform_tuple(TupleTableSlot *slot, int natts)
{
- HeapTuple tuple = slot->tts_tuple;
- TupleDesc tupleDesc = slot->tts_tupleDescriptor;
+ HeapTuple tuple = slot->tts_tuple;
+ TupleDesc tupleDesc = slot->tts_tupleDescriptor;
Datum *values = slot->tts_values;
bool *isnull = slot->tts_isnull;
- HeapTupleHeader tup = tuple->t_data;
+ HeapTupleHeader tup = tuple->t_data;
bool hasnulls = HeapTupleHasNulls(tuple);
Form_pg_attribute *att = tupleDesc->attrs;
int attnum;
- char *tp; /* ptr to tuple data */
- long off; /* offset in tuple data */
- bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
- bool slow; /* can we use/set attcacheoff? */
+ char *tp; /* ptr to tuple data */
+ long off; /* offset in tuple data */
+ bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
+ bool slow; /* can we use/set attcacheoff? */
/*
- * Check whether the first call for this tuple, and initialize or
- * restore loop state.
+ * Check whether the first call for this tuple, and initialize or restore
+ * loop state.
*/
attnum = slot->tts_nvalid;
if (attnum == 0)
@@ -1269,7 +1266,7 @@ slot_deform_tuple(TupleTableSlot *slot, int natts)
{
values[attnum] = (Datum) 0;
isnull[attnum] = true;
- slow = true; /* can't use attcacheoff anymore */
+ slow = true; /* can't use attcacheoff anymore */
continue;
}
@@ -1290,7 +1287,7 @@ slot_deform_tuple(TupleTableSlot *slot, int natts)
off = att_addlength(off, thisatt->attlen, tp + off);
if (thisatt->attlen <= 0)
- slow = true; /* can't use attcacheoff anymore */
+ slow = true; /* can't use attcacheoff anymore */
}
/*
@@ -1316,9 +1313,9 @@ slot_deform_tuple(TupleTableSlot *slot, int natts)
Datum
slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull)
{
- HeapTuple tuple = slot->tts_tuple;
- TupleDesc tupleDesc = slot->tts_tupleDescriptor;
- HeapTupleHeader tup;
+ HeapTuple tuple = slot->tts_tuple;
+ TupleDesc tupleDesc = slot->tts_tupleDescriptor;
+ HeapTupleHeader tup;
/*
* system attributes are handled by heap_getsysattr
@@ -1349,18 +1346,18 @@ slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull)
}
/*
- * otherwise we had better have a physical tuple (tts_nvalid should
- * equal natts in all virtual-tuple cases)
+ * otherwise we had better have a physical tuple (tts_nvalid should equal
+ * natts in all virtual-tuple cases)
*/
- if (tuple == NULL) /* internal error */
+ if (tuple == NULL) /* internal error */
elog(ERROR, "cannot extract attribute from empty tuple slot");
/*
* return NULL if attnum is out of range according to the tuple
*
- * (We have to check this separately because of various inheritance
- * and table-alteration scenarios: the tuple could be either longer
- * or shorter than the tupdesc.)
+ * (We have to check this separately because of various inheritance and
+ * table-alteration scenarios: the tuple could be either longer or shorter
+ * than the tupdesc.)
*/
tup = tuple->t_data;
if (attnum > tup->t_natts)
@@ -1379,10 +1376,9 @@ slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull)
}
/*
- * If the attribute's column has been dropped, we force a NULL
- * result. This case should not happen in normal use, but it could
- * happen if we are executing a plan cached before the column was
- * dropped.
+ * If the attribute's column has been dropped, we force a NULL result.
+ * This case should not happen in normal use, but it could happen if we
+ * are executing a plan cached before the column was dropped.
*/
if (tupleDesc->attrs[attnum - 1]->attisdropped)
{
@@ -1420,11 +1416,11 @@ slot_getallattrs(TupleTableSlot *slot)
return;
/*
- * otherwise we had better have a physical tuple (tts_nvalid should
- * equal natts in all virtual-tuple cases)
+ * otherwise we had better have a physical tuple (tts_nvalid should equal
+ * natts in all virtual-tuple cases)
*/
tuple = slot->tts_tuple;
- if (tuple == NULL) /* internal error */
+ if (tuple == NULL) /* internal error */
elog(ERROR, "cannot extract attribute from empty tuple slot");
/*
@@ -1467,11 +1463,11 @@ slot_getsomeattrs(TupleTableSlot *slot, int attnum)
elog(ERROR, "invalid attribute number %d", attnum);
/*
- * otherwise we had better have a physical tuple (tts_nvalid should
- * equal natts in all virtual-tuple cases)
+ * otherwise we had better have a physical tuple (tts_nvalid should equal
+ * natts in all virtual-tuple cases)
*/
tuple = slot->tts_tuple;
- if (tuple == NULL) /* internal error */
+ if (tuple == NULL) /* internal error */
elog(ERROR, "cannot extract attribute from empty tuple slot");
/*
@@ -1502,8 +1498,8 @@ slot_getsomeattrs(TupleTableSlot *slot, int attnum)
bool
slot_attisnull(TupleTableSlot *slot, int attnum)
{
- HeapTuple tuple = slot->tts_tuple;
- TupleDesc tupleDesc = slot->tts_tupleDescriptor;
+ HeapTuple tuple = slot->tts_tuple;
+ TupleDesc tupleDesc = slot->tts_tupleDescriptor;
/*
* system attributes are handled by heap_attisnull
@@ -1528,10 +1524,10 @@ slot_attisnull(TupleTableSlot *slot, int attnum)
return true;
/*
- * otherwise we had better have a physical tuple (tts_nvalid should
- * equal natts in all virtual-tuple cases)
+ * otherwise we had better have a physical tuple (tts_nvalid should equal
+ * natts in all virtual-tuple cases)
*/
- if (tuple == NULL) /* internal error */
+ if (tuple == NULL) /* internal error */
elog(ERROR, "cannot extract attribute from empty tuple slot");
/* and let the tuple tell it */
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index e5d19765e79..b3520baa2bc 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.74 2005/03/27 18:38:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.75 2005/10/15 02:49:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -70,20 +70,20 @@ index_form_tuple(TupleDesc tupleDescriptor,
continue;
/*
- * If value is stored EXTERNAL, must fetch it so we are not
- * depending on outside storage. This should be improved someday.
+ * If value is stored EXTERNAL, must fetch it so we are not depending
+ * on outside storage. This should be improved someday.
*/
if (VARATT_IS_EXTERNAL(values[i]))
{
untoasted_values[i] = PointerGetDatum(
- heap_tuple_fetch_attr(
- (varattrib *) DatumGetPointer(values[i])));
+ heap_tuple_fetch_attr(
+ (varattrib *) DatumGetPointer(values[i])));
untoasted_free[i] = true;
}
/*
- * If value is above size target, and is of a compressible
- * datatype, try to compress it in-line.
+ * If value is above size target, and is of a compressible datatype,
+ * try to compress it in-line.
*/
if (VARATT_SIZE(untoasted_values[i]) > TOAST_INDEX_TARGET &&
!VARATT_IS_EXTENDED(untoasted_values[i]) &&
@@ -149,23 +149,23 @@ index_form_tuple(TupleDesc tupleDescriptor,
/*
* We do this because heap_fill_tuple wants to initialize a "tupmask"
- * which is used for HeapTuples, but we want an indextuple infomask.
- * The only relevant info is the "has variable attributes" field.
- * We have already set the hasnull bit above.
+ * which is used for HeapTuples, but we want an indextuple infomask. The
+ * only relevant info is the "has variable attributes" field. We have
+ * already set the hasnull bit above.
*/
if (tupmask & HEAP_HASVARWIDTH)
infomask |= INDEX_VAR_MASK;
/*
- * Here we make sure that the size will fit in the field reserved for
- * it in t_info.
+ * Here we make sure that the size will fit in the field reserved for it
+ * in t_info.
*/
if ((size & INDEX_SIZE_MASK) != size)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("index row requires %lu bytes, maximum size is %lu",
- (unsigned long) size,
- (unsigned long) INDEX_SIZE_MASK)));
+ errmsg("index row requires %lu bytes, maximum size is %lu",
+ (unsigned long) size,
+ (unsigned long) INDEX_SIZE_MASK)));
infomask |= size;
@@ -322,10 +322,9 @@ nocache_index_getattr(IndexTuple tup,
}
/*
- * If slow is false, and we got here, we know that we have a tuple
- * with no nulls or var-widths before the target attribute. If
- * possible, we also want to initialize the remainder of the attribute
- * cached offset values.
+ * If slow is false, and we got here, we know that we have a tuple with no
+ * nulls or var-widths before the target attribute. If possible, we also
+ * want to initialize the remainder of the attribute cached offset values.
*/
if (!slow)
{
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index 9080d047fc2..96dfafb7cbf 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/printtup.c,v 1.91 2005/06/22 17:45:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/printtup.c,v 1.92 2005/10/15 02:49:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -78,9 +78,9 @@ printtup_create_DR(CommandDest dest, Portal portal)
else
{
/*
- * In protocol 2.0 the Bind message does not exist, so there is no
- * way for the columns to have different print formats; it's
- * sufficient to look at the first one.
+ * In protocol 2.0 the Bind message does not exist, so there is no way
+ * for the columns to have different print formats; it's sufficient to
+ * look at the first one.
*/
if (portal->formats && portal->formats[0] != 0)
self->pub.receiveSlot = printtup_internal_20;
@@ -113,8 +113,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
{
/*
- * Send portal name to frontend (obsolete cruft, gone in proto
- * 3.0)
+ * Send portal name to frontend (obsolete cruft, gone in proto 3.0)
*
* If portal name not specified, use "blank" portal.
*/
@@ -127,8 +126,8 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
}
/*
- * If this is a retrieve, and we are supposed to emit row
- * descriptions, then we send back the tuple descriptor of the tuples.
+ * If this is a retrieve, and we are supposed to emit row descriptions,
+ * then we send back the tuple descriptor of the tuples.
*/
if (operation == CMD_SELECT && myState->sendDescrip)
SendRowDescriptionMessage(typeinfo,
@@ -280,7 +279,7 @@ printtup_prepare_info(DR_printtup *myState, TupleDesc typeinfo, int numAttrs)
static void
printtup(TupleTableSlot *slot, DestReceiver *self)
{
- TupleDesc typeinfo = slot->tts_tupleDescriptor;
+ TupleDesc typeinfo = slot->tts_tupleDescriptor;
DR_printtup *myState = (DR_printtup *) self;
StringInfoData buf;
int natts = typeinfo->natts;
@@ -363,7 +362,7 @@ printtup(TupleTableSlot *slot, DestReceiver *self)
static void
printtup_20(TupleTableSlot *slot, DestReceiver *self)
{
- TupleDesc typeinfo = slot->tts_tupleDescriptor;
+ TupleDesc typeinfo = slot->tts_tupleDescriptor;
DR_printtup *myState = (DR_printtup *) self;
StringInfoData buf;
int natts = typeinfo->natts;
@@ -566,7 +565,7 @@ debugtup(TupleTableSlot *slot, DestReceiver *self)
static void
printtup_internal_20(TupleTableSlot *slot, DestReceiver *self)
{
- TupleDesc typeinfo = slot->tts_tupleDescriptor;
+ TupleDesc typeinfo = slot->tts_tupleDescriptor;
DR_printtup *myState = (DR_printtup *) self;
StringInfoData buf;
int natts = typeinfo->natts;
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index fedc7ec4894..cfa455beec9 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.111 2005/04/14 22:34:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.112 2005/10/15 02:49:08 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@@ -49,10 +49,10 @@ CreateTemplateTupleDesc(int natts, bool hasoid)
* Allocate enough memory for the tuple descriptor, including the
* attribute rows, and set up the attribute row pointers.
*
- * Note: we assume that sizeof(struct tupleDesc) is a multiple of
- * the struct pointer alignment requirement, and hence we don't need
- * to insert alignment padding between the struct and the array of
- * attribute row pointers.
+ * Note: we assume that sizeof(struct tupleDesc) is a multiple of the struct
+ * pointer alignment requirement, and hence we don't need to insert
+ * alignment padding between the struct and the array of attribute row
+ * pointers.
*/
attroffset = sizeof(struct tupleDesc) + natts * sizeof(Form_pg_attribute);
attroffset = MAXALIGN(attroffset);
@@ -273,16 +273,16 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
Form_pg_attribute attr2 = tupdesc2->attrs[i];
/*
- * We do not need to check every single field here: we can
- * disregard attrelid and attnum (which were used to place the row
- * in the attrs array in the first place). It might look like we
- * could dispense with checking attlen/attbyval/attalign, since these
- * are derived from atttypid; but in the case of dropped columns
- * we must check them (since atttypid will be zero for all dropped
- * columns) and in general it seems safer to check them always.
+ * We do not need to check every single field here: we can disregard
+ * attrelid and attnum (which were used to place the row in the attrs
+ * array in the first place). It might look like we could dispense
+ * with checking attlen/attbyval/attalign, since these are derived
+ * from atttypid; but in the case of dropped columns we must check
+ * them (since atttypid will be zero for all dropped columns) and in
+ * general it seems safer to check them always.
*
- * attcacheoff must NOT be checked since it's possibly not set
- * in both copies.
+ * attcacheoff must NOT be checked since it's possibly not set in both
+ * copies.
*/
if (strcmp(NameStr(attr1->attname), NameStr(attr2->attname)) != 0)
return false;
@@ -332,9 +332,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
AttrDefault *defval2 = constr2->defval;
/*
- * We can't assume that the items are always read from the
- * system catalogs in the same order; so use the adnum field
- * to identify the matching item to compare.
+ * We can't assume that the items are always read from the system
+ * catalogs in the same order; so use the adnum field to identify
+ * the matching item to compare.
*/
for (j = 0; j < n; defval2++, j++)
{
@@ -355,9 +355,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
ConstrCheck *check2 = constr2->check;
/*
- * Similarly, don't assume that the checks are always read in
- * the same order; match them up by name and contents. (The
- * name *should* be unique, but...)
+ * Similarly, don't assume that the checks are always read in the
+ * same order; match them up by name and contents. (The name
+ * *should* be unique, but...)
*/
for (j = 0; j < n; check2++, j++)
{
@@ -407,8 +407,8 @@ TupleDescInitEntry(TupleDesc desc,
/*
* Note: attributeName can be NULL, because the planner doesn't always
- * fill in valid resname values in targetlists, particularly for
- * resjunk attributes.
+ * fill in valid resname values in targetlists, particularly for resjunk
+ * attributes.
*/
if (attributeName != NULL)
namestrcpy(&(att->attname), attributeName);
@@ -482,8 +482,8 @@ BuildDescForRelation(List *schema)
ColumnDef *entry = lfirst(l);
/*
- * for each entry in the list, get the name and type information
- * from the list and have TupleDescInitEntry fill in the attribute
+ * for each entry in the list, get the name and type information from
+ * the list and have TupleDescInitEntry fill in the attribute
* information we need.
*/
attnum++;
@@ -508,8 +508,8 @@ BuildDescForRelation(List *schema)
desc->attrs[attnum - 1]->attnotnull = entry->is_not_null;
/*
- * Note we copy only pre-cooked default expressions. Digestion of
- * raw ones is someone else's problem.
+ * Note we copy only pre-cooked default expressions. Digestion of raw
+ * ones is someone else's problem.
*/
if (entry->cooked_default != NULL)
{
diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c
index 5978c8af4cc..b9e0469b05b 100644
--- a/src/backend/access/gist/gistproc.c
+++ b/src/backend/access/gist/gistproc.c
@@ -10,7 +10,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.2 2005/09/22 20:44:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.3 2005/10/15 02:49:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -26,7 +26,7 @@ typedef struct
{
BOX *key;
int pos;
-} KBsort;
+} KBsort;
static int compare_KB(const void *a, const void *b);
static bool gist_box_leaf_consistent(BOX *key, BOX *query,
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index f8611ce46a0..2cff9509b6a 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.80 2005/06/06 17:01:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.81 2005/10/15 02:49:08 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@@ -55,8 +55,8 @@ hashbuild(PG_FUNCTION_ARGS)
HashBuildState buildstate;
/*
- * We expect to be called exactly once for any index relation. If
- * that's not the case, big trouble's what we have.
+ * We expect to be called exactly once for any index relation. If that's
+ * not the case, big trouble's what we have.
*/
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
@@ -70,7 +70,7 @@ hashbuild(PG_FUNCTION_ARGS)
/* do the heap scan */
reltuples = IndexBuildHeapScan(heap, index, indexInfo,
- hashbuildCallback, (void *) &buildstate);
+ hashbuildCallback, (void *) &buildstate);
/* since we just counted the # of tuples, may as well update stats */
IndexCloseAndUpdateStats(heap, reltuples, index, buildstate.indtuples);
@@ -141,12 +141,12 @@ hashinsert(PG_FUNCTION_ARGS)
/*
* If the single index key is null, we don't insert it into the index.
- * Hash tables support scans on '='. Relational algebra says that A =
- * B returns null if either A or B is null. This means that no
- * qualification used in an index scan could ever return true on a
- * null attribute. It also means that indices can't be used by ISNULL
- * or NOTNULL scans, but that's an artifact of the strategy map
- * architecture chosen in 1986, not of the way nulls are handled here.
+ * Hash tables support scans on '='. Relational algebra says that A = B
+ * returns null if either A or B is null. This means that no
+ * qualification used in an index scan could ever return true on a null
+ * attribute. It also means that indices can't be used by ISNULL or
+ * NOTNULL scans, but that's an artifact of the strategy map architecture
+ * chosen in 1986, not of the way nulls are handled here.
*/
if (IndexTupleHasNulls(itup))
{
@@ -180,16 +180,16 @@ hashgettuple(PG_FUNCTION_ARGS)
bool res;
/*
- * We hold pin but not lock on current buffer while outside the hash
- * AM. Reacquire the read lock here.
+ * We hold pin but not lock on current buffer while outside the hash AM.
+ * Reacquire the read lock here.
*/
if (BufferIsValid(so->hashso_curbuf))
_hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ);
/*
- * If we've already initialized this scan, we can just advance it in
- * the appropriate direction. If we haven't done so yet, we call a
- * routine to get the first item in the scan.
+ * If we've already initialized this scan, we can just advance it in the
+ * appropriate direction. If we haven't done so yet, we call a routine to
+ * get the first item in the scan.
*/
if (ItemPointerIsValid(&(scan->currentItemData)))
{
@@ -199,17 +199,16 @@ hashgettuple(PG_FUNCTION_ARGS)
if (scan->kill_prior_tuple)
{
/*
- * Yes, so mark it by setting the LP_DELETE bit in the item
- * flags.
+ * Yes, so mark it by setting the LP_DELETE bit in the item flags.
*/
offnum = ItemPointerGetOffsetNumber(&(scan->currentItemData));
page = BufferGetPage(so->hashso_curbuf);
PageGetItemId(page, offnum)->lp_flags |= LP_DELETE;
/*
- * Since this can be redone later if needed, it's treated the
- * same as a commit-hint-bit status update for heap tuples: we
- * mark the buffer dirty but don't make a WAL log entry.
+ * Since this can be redone later if needed, it's treated the same
+ * as a commit-hint-bit status update for heap tuples: we mark the
+ * buffer dirty but don't make a WAL log entry.
*/
SetBufferCommitInfoNeedsSave(so->hashso_curbuf);
}
@@ -256,7 +255,7 @@ Datum
hashgetmulti(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
- ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1);
+ ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1);
int32 max_tids = PG_GETARG_INT32(2);
int32 *returned_tids = (int32 *) PG_GETARG_POINTER(3);
HashScanOpaque so = (HashScanOpaque) scan->opaque;
@@ -265,8 +264,8 @@ hashgetmulti(PG_FUNCTION_ARGS)
int32 ntids = 0;
/*
- * We hold pin but not lock on current buffer while outside the hash
- * AM. Reacquire the read lock here.
+ * We hold pin but not lock on current buffer while outside the hash AM.
+ * Reacquire the read lock here.
*/
if (BufferIsValid(so->hashso_curbuf))
_hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ);
@@ -280,6 +279,7 @@ hashgetmulti(PG_FUNCTION_ARGS)
res = _hash_next(scan, ForwardScanDirection);
else
res = _hash_first(scan, ForwardScanDirection);
+
/*
* Skip killed tuples if asked to.
*/
@@ -505,12 +505,12 @@ hashbulkdelete(PG_FUNCTION_ARGS)
num_index_tuples = 0;
/*
- * Read the metapage to fetch original bucket and tuple counts. Also,
- * we keep a copy of the last-seen metapage so that we can use its
- * hashm_spares[] values to compute bucket page addresses. This is a
- * bit hokey but perfectly safe, since the interesting entries in the
- * spares array cannot change under us; and it beats rereading the
- * metapage for each bucket.
+ * Read the metapage to fetch original bucket and tuple counts. Also, we
+ * keep a copy of the last-seen metapage so that we can use its
+ * hashm_spares[] values to compute bucket page addresses. This is a bit
+ * hokey but perfectly safe, since the interesting entries in the spares
+ * array cannot change under us; and it beats rereading the metapage for
+ * each bucket.
*/
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
metap = (HashMetaPage) BufferGetPage(metabuf);
@@ -569,7 +569,7 @@ loop_top:
ItemPointer htup;
hitem = (HashItem) PageGetItem(page,
- PageGetItemId(page, offno));
+ PageGetItemId(page, offno));
htup = &(hitem->hash_itup.t_tid);
if (callback(htup, callback_state))
{
@@ -641,8 +641,7 @@ loop_top:
{
/*
* Otherwise, our count is untrustworthy since we may have
- * double-scanned tuples in split buckets. Proceed by
- * dead-reckoning.
+ * double-scanned tuples in split buckets. Proceed by dead-reckoning.
*/
if (metap->hashm_ntuples > tuples_removed)
metap->hashm_ntuples -= tuples_removed;
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index 05ca3bcdb12..2ffca5efe6a 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.44 2005/05/25 21:40:40 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.45 2005/10/15 02:49:08 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -46,11 +46,11 @@ hashint8(PG_FUNCTION_ARGS)
{
/*
* The idea here is to produce a hash value compatible with the values
- * produced by hashint4 and hashint2 for logically equivalent inputs;
- * this is necessary if we ever hope to support cross-type hash joins
- * across these input types. Since all three types are signed, we can
- * xor the high half of the int8 value if the sign is positive, or the
- * complement of the high half when the sign is negative.
+ * produced by hashint4 and hashint2 for logically equivalent inputs; this
+ * is necessary if we ever hope to support cross-type hash joins across
+ * these input types. Since all three types are signed, we can xor the
+ * high half of the int8 value if the sign is positive, or the complement
+ * of the high half when the sign is negative.
*/
#ifndef INT64_IS_BUSTED
int64 val = PG_GETARG_INT64(0);
@@ -78,9 +78,9 @@ hashfloat4(PG_FUNCTION_ARGS)
float4 key = PG_GETARG_FLOAT4(0);
/*
- * On IEEE-float machines, minus zero and zero have different bit
- * patterns but should compare as equal. We must ensure that they
- * have the same hash value, which is most easily done this way:
+ * On IEEE-float machines, minus zero and zero have different bit patterns
+ * but should compare as equal. We must ensure that they have the same
+ * hash value, which is most easily done this way:
*/
if (key == (float4) 0)
PG_RETURN_UINT32(0);
@@ -94,9 +94,9 @@ hashfloat8(PG_FUNCTION_ARGS)
float8 key = PG_GETARG_FLOAT8(0);
/*
- * On IEEE-float machines, minus zero and zero have different bit
- * patterns but should compare as equal. We must ensure that they
- * have the same hash value, which is most easily done this way:
+ * On IEEE-float machines, minus zero and zero have different bit patterns
+ * but should compare as equal. We must ensure that they have the same
+ * hash value, which is most easily done this way:
*/
if (key == (float8) 0)
PG_RETURN_UINT32(0);
@@ -126,8 +126,7 @@ hashname(PG_FUNCTION_ARGS)
char *key = NameStr(*PG_GETARG_NAME(0));
int keylen = strlen(key);
- Assert(keylen < NAMEDATALEN); /* else it's not truncated
- * correctly */
+ Assert(keylen < NAMEDATALEN); /* else it's not truncated correctly */
return hash_any((unsigned char *) key, keylen);
}
@@ -139,8 +138,8 @@ hashtext(PG_FUNCTION_ARGS)
Datum result;
/*
- * Note: this is currently identical in behavior to hashvarlena, but
- * it seems likely that we may need to do something different in non-C
+ * Note: this is currently identical in behavior to hashvarlena, but it
+ * seems likely that we may need to do something different in non-C
* locales. (See also hashbpchar, if so.)
*/
result = hash_any((unsigned char *) VARDATA(key),
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index 860376cd481..7637c3566cb 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.37 2005/08/10 21:36:45 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.38 2005/10/15 02:49:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,8 +50,8 @@ _hash_doinsert(Relation rel, HashItem hitem)
bool isnull;
/*
- * Compute the hash key for the item. We do this first so as not to
- * need to hold any locks while running the hash function.
+ * Compute the hash key for the item. We do this first so as not to need
+ * to hold any locks while running the hash function.
*/
itup = &(hitem->hash_itup);
if (rel->rd_rel->relnatts != 1)
@@ -64,12 +64,12 @@ _hash_doinsert(Relation rel, HashItem hitem)
itemsz = IndexTupleDSize(hitem->hash_itup)
+ (sizeof(HashItemData) - sizeof(IndexTupleData));
- itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but
- * we need to be consistent */
+ itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we
+ * need to be consistent */
/*
- * Acquire shared split lock so we can compute the target bucket
- * safely (see README).
+ * Acquire shared split lock so we can compute the target bucket safely
+ * (see README).
*/
_hash_getlock(rel, 0, HASH_SHARE);
@@ -79,9 +79,9 @@ _hash_doinsert(Relation rel, HashItem hitem)
_hash_checkpage(rel, (Page) metap, LH_META_PAGE);
/*
- * Check whether the item can fit on a hash page at all. (Eventually,
- * we ought to try to apply TOAST methods if not.) Note that at this
- * point, itemsz doesn't include the ItemId.
+ * Check whether the item can fit on a hash page at all. (Eventually, we
+ * ought to try to apply TOAST methods if not.) Note that at this point,
+ * itemsz doesn't include the ItemId.
*/
if (itemsz > HashMaxItemSize((Page) metap))
ereport(ERROR,
@@ -89,7 +89,7 @@ _hash_doinsert(Relation rel, HashItem hitem)
errmsg("index row size %lu exceeds hash maximum %lu",
(unsigned long) itemsz,
(unsigned long) HashMaxItemSize((Page) metap)),
- errhint("Values larger than a buffer page cannot be indexed.")));
+ errhint("Values larger than a buffer page cannot be indexed.")));
/*
* Compute the target bucket number, and convert to block number.
@@ -105,8 +105,7 @@ _hash_doinsert(Relation rel, HashItem hitem)
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
/*
- * Acquire share lock on target bucket; then we can release split
- * lock.
+ * Acquire share lock on target bucket; then we can release split lock.
*/
_hash_getlock(rel, blkno, HASH_SHARE);
@@ -130,8 +129,8 @@ _hash_doinsert(Relation rel, HashItem hitem)
if (BlockNumberIsValid(nextblkno))
{
/*
- * ovfl page exists; go get it. if it doesn't have room,
- * we'll find out next pass through the loop test above.
+ * ovfl page exists; go get it. if it doesn't have room, we'll
+ * find out next pass through the loop test above.
*/
_hash_relbuf(rel, buf);
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE);
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index 1b8b798b45d..7289d9a0b35 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.46 2005/05/11 01:26:01 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.47 2005/10/15 02:49:08 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@@ -44,8 +44,8 @@ bitno_to_blkno(HashMetaPage metap, uint32 ovflbitnum)
/* loop */ ;
/*
- * Convert to absolute page number by adding the number of bucket
- * pages that exist before this split point.
+ * Convert to absolute page number by adding the number of bucket pages
+ * that exist before this split point.
*/
return (BlockNumber) ((1 << i) + ovflbitnum);
}
@@ -252,10 +252,10 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
/*
* We create the new bitmap page with all pages marked "in use".
* Actually two pages in the new bitmap's range will exist
- * immediately: the bitmap page itself, and the following page
- * which is the one we return to the caller. Both of these are
- * correctly marked "in use". Subsequent pages do not exist yet,
- * but it is convenient to pre-mark them as "in use" too.
+ * immediately: the bitmap page itself, and the following page which
+ * is the one we return to the caller. Both of these are correctly
+ * marked "in use". Subsequent pages do not exist yet, but it is
+ * convenient to pre-mark them as "in use" too.
*/
_hash_initbitmap(rel, metap, bitno_to_blkno(metap, bit));
@@ -265,8 +265,8 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
else
{
/*
- * Nothing to do here; since the page was past the last used page,
- * we know its bitmap bit was preinitialized to "in use".
+ * Nothing to do here; since the page was past the last used page, we
+ * know its bitmap bit was preinitialized to "in use".
*/
}
@@ -275,8 +275,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
/*
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
- * changing it if someone moved it while we were searching bitmap
- * pages.
+ * changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
metap->hashm_firstfree = bit + 1;
@@ -305,8 +304,7 @@ found:
/*
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
- * changing it if someone moved it while we were searching bitmap
- * pages.
+ * changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
{
@@ -394,10 +392,10 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
_hash_wrtbuf(rel, ovflbuf);
/*
- * Fix up the bucket chain. this is a doubly-linked list, so we must
- * fix up the bucket chain members behind and ahead of the overflow
- * page being deleted. No concurrency issues since we hold exclusive
- * lock on the entire bucket.
+ * Fix up the bucket chain. this is a doubly-linked list, so we must fix
+ * up the bucket chain members behind and ahead of the overflow page being
+ * deleted. No concurrency issues since we hold exclusive lock on the
+ * entire bucket.
*/
if (BlockNumberIsValid(prevblkno))
{
@@ -488,12 +486,11 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno)
/*
* It is okay to write-lock the new bitmap page while holding metapage
- * write lock, because no one else could be contending for the new
- * page.
+ * write lock, because no one else could be contending for the new page.
*
- * There is some loss of concurrency in possibly doing I/O for the new
- * page while holding the metapage lock, but this path is taken so
- * seldom that it's not worth worrying about.
+ * There is some loss of concurrency in possibly doing I/O for the new page
+ * while holding the metapage lock, but this path is taken so seldom that
+ * it's not worth worrying about.
*/
buf = _hash_getbuf(rel, blkno, HASH_WRITE);
pg = BufferGetPage(buf);
@@ -586,8 +583,8 @@ _hash_squeezebucket(Relation rel,
}
/*
- * find the last page in the bucket chain by starting at the base
- * bucket page and working forward.
+ * find the last page in the bucket chain by starting at the base bucket
+ * page and working forward.
*/
ropaque = wopaque;
do
@@ -655,22 +652,21 @@ _hash_squeezebucket(Relation rel,
/*
* delete the tuple from the "read" page. PageIndexTupleDelete
- * repacks the ItemId array, so 'roffnum' will be "advanced"
- * to the "next" ItemId.
+ * repacks the ItemId array, so 'roffnum' will be "advanced" to
+ * the "next" ItemId.
*/
PageIndexTupleDelete(rpage, roffnum);
}
/*
- * if the "read" page is now empty because of the deletion (or
- * because it was empty when we got to it), free it.
+ * if the "read" page is now empty because of the deletion (or because
+ * it was empty when we got to it), free it.
*
* Tricky point here: if our read and write pages are adjacent in the
* bucket chain, our write lock on wbuf will conflict with
* _hash_freeovflpage's attempt to update the sibling links of the
- * removed page. However, in that case we are done anyway, so we
- * can simply drop the write lock before calling
- * _hash_freeovflpage.
+ * removed page. However, in that case we are done anyway, so we can
+ * simply drop the write lock before calling _hash_freeovflpage.
*/
if (PageIsEmpty(rpage))
{
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 883f2a73121..b40c20b480b 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.51 2005/06/09 21:01:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.52 2005/10/15 02:49:08 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@@ -240,13 +240,13 @@ _hash_metapinit(Relation rel)
RelationGetRelationName(rel));
/*
- * Determine the target fill factor (tuples per bucket) for this
- * index. The idea is to make the fill factor correspond to pages
- * about 3/4ths full. We can compute it exactly if the index datatype
- * is fixed-width, but for var-width there's some guessing involved.
+ * Determine the target fill factor (tuples per bucket) for this index.
+ * The idea is to make the fill factor correspond to pages about 3/4ths
+ * full. We can compute it exactly if the index datatype is fixed-width,
+ * but for var-width there's some guessing involved.
*/
data_width = get_typavgwidth(RelationGetDescr(rel)->attrs[0]->atttypid,
- RelationGetDescr(rel)->attrs[0]->atttypmod);
+ RelationGetDescr(rel)->attrs[0]->atttypmod);
item_width = MAXALIGN(sizeof(HashItemData)) + MAXALIGN(data_width) +
sizeof(ItemIdData); /* include the line pointer */
ffactor = (BLCKSZ * 3 / 4) / item_width;
@@ -289,9 +289,8 @@ _hash_metapinit(Relation rel)
metap->hashm_procid = index_getprocid(rel, 1, HASHPROC);
/*
- * We initialize the index with two buckets, 0 and 1, occupying
- * physical blocks 1 and 2. The first freespace bitmap page is in
- * block 3.
+ * We initialize the index with two buckets, 0 and 1, occupying physical
+ * blocks 1 and 2. The first freespace bitmap page is in block 3.
*/
metap->hashm_maxbucket = metap->hashm_lowmask = 1; /* nbuckets - 1 */
metap->hashm_highmask = 3; /* (nbuckets << 1) - 1 */
@@ -321,8 +320,8 @@ _hash_metapinit(Relation rel)
}
/*
- * Initialize first bitmap page. Can't do this until we create the
- * first two buckets, else smgr will complain.
+ * Initialize first bitmap page. Can't do this until we create the first
+ * two buckets, else smgr will complain.
*/
_hash_initbitmap(rel, metap, 3);
@@ -367,15 +366,14 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* Obtain the page-zero lock to assert the right to begin a split (see
* README).
*
- * Note: deadlock should be impossible here. Our own backend could only
- * be holding bucket sharelocks due to stopped indexscans; those will
- * not block other holders of the page-zero lock, who are only
- * interested in acquiring bucket sharelocks themselves. Exclusive
- * bucket locks are only taken here and in hashbulkdelete, and neither
- * of these operations needs any additional locks to complete. (If,
- * due to some flaw in this reasoning, we manage to deadlock anyway,
- * it's okay to error out; the index will be left in a consistent
- * state.)
+ * Note: deadlock should be impossible here. Our own backend could only be
+ * holding bucket sharelocks due to stopped indexscans; those will not
+ * block other holders of the page-zero lock, who are only interested in
+ * acquiring bucket sharelocks themselves. Exclusive bucket locks are
+ * only taken here and in hashbulkdelete, and neither of these operations
+ * needs any additional locks to complete. (If, due to some flaw in this
+ * reasoning, we manage to deadlock anyway, it's okay to error out; the
+ * index will be left in a consistent state.)
*/
_hash_getlock(rel, 0, HASH_EXCLUSIVE);
@@ -386,8 +384,8 @@ _hash_expandtable(Relation rel, Buffer metabuf)
_hash_checkpage(rel, (Page) metap, LH_META_PAGE);
/*
- * Check to see if split is still needed; someone else might have
- * already done one while we waited for the lock.
+ * Check to see if split is still needed; someone else might have already
+ * done one while we waited for the lock.
*
* Make sure this stays in sync with _hash_doinsert()
*/
@@ -402,11 +400,11 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
*
- * Ideally we would lock the new bucket too before proceeding, but if we
- * are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping
- * isn't correct yet. For simplicity we update the metapage first and
- * then lock. This should be okay because no one else should be
- * trying to lock the new bucket yet...
+ * Ideally we would lock the new bucket too before proceeding, but if we are
+ * about to cross a splitpoint then the BUCKET_TO_BLKNO mapping isn't
+ * correct yet. For simplicity we update the metapage first and then
+ * lock. This should be okay because no one else should be trying to lock
+ * the new bucket yet...
*/
new_bucket = metap->hashm_maxbucket + 1;
old_bucket = (new_bucket & metap->hashm_lowmask);
@@ -420,14 +418,13 @@ _hash_expandtable(Relation rel, Buffer metabuf)
goto fail;
/*
- * Okay to proceed with split. Update the metapage bucket mapping
- * info.
+ * Okay to proceed with split. Update the metapage bucket mapping info.
*
- * Since we are scribbling on the metapage data right in the shared
- * buffer, any failure in this next little bit leaves us with a big
- * problem: the metapage is effectively corrupt but could get written
- * back to disk. We don't really expect any failure, but just to be
- * sure, establish a critical section.
+ * Since we are scribbling on the metapage data right in the shared buffer,
+ * any failure in this next little bit leaves us with a big problem: the
+ * metapage is effectively corrupt but could get written back to disk. We
+ * don't really expect any failure, but just to be sure, establish a
+ * critical section.
*/
START_CRIT_SECTION();
@@ -443,8 +440,8 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* If the split point is increasing (hashm_maxbucket's log base 2
* increases), we need to adjust the hashm_spares[] array and
- * hashm_ovflpoint so that future overflow pages will be created
- * beyond this new batch of bucket pages.
+ * hashm_ovflpoint so that future overflow pages will be created beyond
+ * this new batch of bucket pages.
*
* XXX should initialize new bucket pages to prevent out-of-order page
* creation? Don't wanna do it right here though.
@@ -471,10 +468,9 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Copy bucket mapping info now; this saves re-accessing the meta page
* inside _hash_splitbucket's inner loop. Note that once we drop the
- * split lock, other splits could begin, so these values might be out
- * of date before _hash_splitbucket finishes. That's okay, since all
- * it needs is to tell which of these two buckets to map hashkeys
- * into.
+ * split lock, other splits could begin, so these values might be out of
+ * date before _hash_splitbucket finishes. That's okay, since all it
+ * needs is to tell which of these two buckets to map hashkeys into.
*/
maxbucket = metap->hashm_maxbucket;
highmask = metap->hashm_highmask;
@@ -554,9 +550,9 @@ _hash_splitbucket(Relation rel,
TupleDesc itupdesc = RelationGetDescr(rel);
/*
- * It should be okay to simultaneously write-lock pages from each
- * bucket, since no one else can be trying to acquire buffer lock on
- * pages of either bucket.
+ * It should be okay to simultaneously write-lock pages from each bucket,
+ * since no one else can be trying to acquire buffer lock on pages of
+ * either bucket.
*/
oblkno = start_oblkno;
nblkno = start_nblkno;
@@ -578,17 +574,17 @@ _hash_splitbucket(Relation rel,
nopaque->hasho_filler = HASHO_FILL;
/*
- * Partition the tuples in the old bucket between the old bucket and
- * the new bucket, advancing along the old bucket's overflow bucket
- * chain and adding overflow pages to the new bucket as needed.
+ * Partition the tuples in the old bucket between the old bucket and the
+ * new bucket, advancing along the old bucket's overflow bucket chain and
+ * adding overflow pages to the new bucket as needed.
*/
ooffnum = FirstOffsetNumber;
omaxoffnum = PageGetMaxOffsetNumber(opage);
for (;;)
{
/*
- * at each iteration through this loop, each of these variables
- * should be up-to-date: obuf opage oopaque ooffnum omaxoffnum
+ * at each iteration through this loop, each of these variables should
+ * be up-to-date: obuf opage oopaque ooffnum omaxoffnum
*/
/* check if we're at the end of the page */
@@ -600,8 +596,8 @@ _hash_splitbucket(Relation rel,
break;
/*
- * we ran out of tuples on this particular page, but we have
- * more overflow pages; advance to next page.
+ * we ran out of tuples on this particular page, but we have more
+ * overflow pages; advance to next page.
*/
_hash_wrtbuf(rel, obuf);
@@ -618,8 +614,7 @@ _hash_splitbucket(Relation rel,
* Re-hash the tuple to determine which bucket it now belongs in.
*
* It is annoying to call the hash function while holding locks, but
- * releasing and relocking the page for each tuple is unappealing
- * too.
+ * releasing and relocking the page for each tuple is unappealing too.
*/
hitem = (HashItem) PageGetItem(opage, PageGetItemId(opage, ooffnum));
itup = &(hitem->hash_itup);
@@ -632,9 +627,9 @@ _hash_splitbucket(Relation rel,
if (bucket == nbucket)
{
/*
- * insert the tuple into the new bucket. if it doesn't fit on
- * the current page in the new bucket, we must allocate a new
- * overflow page and place the tuple on that page instead.
+ * insert the tuple into the new bucket. if it doesn't fit on the
+ * current page in the new bucket, we must allocate a new overflow
+ * page and place the tuple on that page instead.
*/
itemsz = IndexTupleDSize(hitem->hash_itup)
+ (sizeof(HashItemData) - sizeof(IndexTupleData));
@@ -659,13 +654,13 @@ _hash_splitbucket(Relation rel,
RelationGetRelationName(rel));
/*
- * now delete the tuple from the old bucket. after this
- * section of code, 'ooffnum' will actually point to the
- * ItemId to which we would point if we had advanced it before
- * the deletion (PageIndexTupleDelete repacks the ItemId
- * array). this also means that 'omaxoffnum' is exactly one
- * less than it used to be, so we really can just decrement it
- * instead of calling PageGetMaxOffsetNumber.
+ * now delete the tuple from the old bucket. after this section
+ * of code, 'ooffnum' will actually point to the ItemId to which
+ * we would point if we had advanced it before the deletion
+ * (PageIndexTupleDelete repacks the ItemId array). this also
+ * means that 'omaxoffnum' is exactly one less than it used to be,
+ * so we really can just decrement it instead of calling
+ * PageGetMaxOffsetNumber.
*/
PageIndexTupleDelete(opage, ooffnum);
omaxoffnum = OffsetNumberPrev(omaxoffnum);
@@ -673,9 +668,9 @@ _hash_splitbucket(Relation rel,
else
{
/*
- * the tuple stays on this page. we didn't move anything, so
- * we didn't delete anything and therefore we don't have to
- * change 'omaxoffnum'.
+ * the tuple stays on this page. we didn't move anything, so we
+ * didn't delete anything and therefore we don't have to change
+ * 'omaxoffnum'.
*/
Assert(bucket == obucket);
ooffnum = OffsetNumberNext(ooffnum);
@@ -683,11 +678,10 @@ _hash_splitbucket(Relation rel,
}
/*
- * We're at the end of the old bucket chain, so we're done
- * partitioning the tuples. Before quitting, call _hash_squeezebucket
- * to ensure the tuples remaining in the old bucket (including the
- * overflow pages) are packed as tightly as possible. The new bucket
- * is already tight.
+ * We're at the end of the old bucket chain, so we're done partitioning
+ * the tuples. Before quitting, call _hash_squeezebucket to ensure the
+ * tuples remaining in the old bucket (including the overflow pages) are
+ * packed as tightly as possible. The new bucket is already tight.
*/
_hash_wrtbuf(rel, obuf);
_hash_wrtbuf(rel, nbuf);
diff --git a/src/backend/access/hash/hashscan.c b/src/backend/access/hash/hashscan.c
index 782c087e3bc..213eaf89fcd 100644
--- a/src/backend/access/hash/hashscan.c
+++ b/src/backend/access/hash/hashscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.38 2004/12/31 21:59:13 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.39 2005/10/15 02:49:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,9 +44,9 @@ ReleaseResources_hash(void)
HashScanList next;
/*
- * Note: this should be a no-op during normal query shutdown. However,
- * in an abort situation ExecutorEnd is not called and so there may be
- * open index scans to clean up.
+ * Note: this should be a no-op during normal query shutdown. However, in
+ * an abort situation ExecutorEnd is not called and so there may be open
+ * index scans to clean up.
*/
prev = NULL;
diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c
index 9aaf70b0a9e..fac46d79022 100644
--- a/src/backend/access/hash/hashsearch.c
+++ b/src/backend/access/hash/hashsearch.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.39 2005/10/06 02:29:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.40 2005/10/15 02:49:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -137,33 +137,32 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
ItemPointerSetInvalid(current);
/*
- * We do not support hash scans with no index qualification, because
- * we would have to read the whole index rather than just one bucket.
- * That creates a whole raft of problems, since we haven't got a
- * practical way to lock all the buckets against splits or
- * compactions.
+ * We do not support hash scans with no index qualification, because we
+ * would have to read the whole index rather than just one bucket. That
+ * creates a whole raft of problems, since we haven't got a practical way
+ * to lock all the buckets against splits or compactions.
*/
if (scan->numberOfKeys < 1)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("hash indexes do not support whole-index scans")));
+ errmsg("hash indexes do not support whole-index scans")));
/*
- * If the constant in the index qual is NULL, assume it cannot match
- * any items in the index.
+ * If the constant in the index qual is NULL, assume it cannot match any
+ * items in the index.
*/
if (scan->keyData[0].sk_flags & SK_ISNULL)
return false;
/*
- * Okay to compute the hash key. We want to do this before acquiring
- * any locks, in case a user-defined hash function happens to be slow.
+ * Okay to compute the hash key. We want to do this before acquiring any
+ * locks, in case a user-defined hash function happens to be slow.
*/
hashkey = _hash_datum2hashkey(rel, scan->keyData[0].sk_argument);
/*
- * Acquire shared split lock so we can compute the target bucket
- * safely (see README).
+ * Acquire shared split lock so we can compute the target bucket safely
+ * (see README).
*/
_hash_getlock(rel, 0, HASH_SHARE);
@@ -186,8 +185,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
_hash_relbuf(rel, metabuf);
/*
- * Acquire share lock on target bucket; then we can release split
- * lock.
+ * Acquire share lock on target bucket; then we can release split lock.
*/
_hash_getlock(rel, blkno, HASH_SHARE);
@@ -263,9 +261,9 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
bucket = opaque->hasho_bucket;
/*
- * If _hash_step is called from _hash_first, current will not be
- * valid, so we can't dereference it. However, in that case, we
- * presumably want to start at the beginning/end of the page...
+ * If _hash_step is called from _hash_first, current will not be valid, so
+ * we can't dereference it. However, in that case, we presumably want to
+ * start at the beginning/end of the page...
*/
maxoff = PageGetMaxOffsetNumber(page);
if (ItemPointerIsValid(current))
@@ -276,8 +274,8 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
/*
* 'offnum' now points to the last tuple we have seen (if any).
*
- * continue to step through tuples until: 1) we get to the end of the
- * bucket chain or 2) we find a valid tuple.
+ * continue to step through tuples until: 1) we get to the end of the bucket
+ * chain or 2) we find a valid tuple.
*/
do
{
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 185918d03aa..6c669ed62b4 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.199 2005/10/06 02:29:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.200 2005/10/15 02:49:08 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -54,7 +54,7 @@
static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
- ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move);
+ ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move);
/* ----------------------------------------------------------------
@@ -272,8 +272,8 @@ heapgettup(Relation relation,
/* 'dir' is now non-zero */
/*
- * calculate line pointer and number of remaining items to check on
- * this page.
+ * calculate line pointer and number of remaining items to check on this
+ * page.
*/
lpp = PageGetItemId(dp, lineoff);
if (dir < 0)
@@ -282,8 +282,8 @@ heapgettup(Relation relation,
linesleft = lines - lineoff;
/*
- * advance the scan until we find a qualifying tuple or run out of
- * stuff to scan
+ * advance the scan until we find a qualifying tuple or run out of stuff
+ * to scan
*/
for (;;)
{
@@ -321,15 +321,14 @@ heapgettup(Relation relation,
}
else
{
- ++lpp; /* move forward in this page's ItemId
- * array */
+ ++lpp; /* move forward in this page's ItemId array */
++lineoff;
}
}
/*
- * if we get here, it means we've exhausted the items on this page
- * and it's time to move to the next.
+ * if we get here, it means we've exhausted the items on this page and
+ * it's time to move to the next.
*/
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
@@ -506,15 +505,15 @@ relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
/*
* Check for shared-cache-inval messages before trying to open the
- * relation. This is needed to cover the case where the name
- * identifies a rel that has been dropped and recreated since the
- * start of our transaction: if we don't flush the old syscache entry
- * then we'll latch onto that entry and suffer an error when we do
- * LockRelation. Note that relation_open does not need to do this,
- * since a relation's OID never changes.
+ * relation. This is needed to cover the case where the name identifies a
+ * rel that has been dropped and recreated since the start of our
+ * transaction: if we don't flush the old syscache entry then we'll latch
+ * onto that entry and suffer an error when we do LockRelation. Note that
+ * relation_open does not need to do this, since a relation's OID never
+ * changes.
*
- * We skip this if asked for NoLock, on the assumption that the caller
- * has already ensured some appropriate lock is held.
+ * We skip this if asked for NoLock, on the assumption that the caller has
+ * already ensured some appropriate lock is held.
*/
if (lockmode != NoLock)
AcceptInvalidationMessages();
@@ -633,9 +632,9 @@ heap_beginscan(Relation relation, Snapshot snapshot,
/*
* increment relation ref count while scanning relation
*
- * This is just to make really sure the relcache entry won't go away
- * while the scan has a pointer to it. Caller should be holding the
- * rel open anyway, so this is redundant in all normal scenarios...
+ * This is just to make really sure the relcache entry won't go away while
+ * the scan has a pointer to it. Caller should be holding the rel open
+ * anyway, so this is redundant in all normal scenarios...
*/
RelationIncrementReferenceCount(relation);
@@ -649,8 +648,8 @@ heap_beginscan(Relation relation, Snapshot snapshot,
scan->rs_nkeys = nkeys;
/*
- * we do this here instead of in initscan() because heap_rescan also
- * calls initscan() and we don't want to allocate memory again
+ * we do this here instead of in initscan() because heap_rescan also calls
+ * initscan() and we don't want to allocate memory again
*/
if (nkeys > 0)
scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
@@ -763,8 +762,8 @@ heap_getnext(HeapScanDesc scan, ScanDirection direction)
}
/*
- * if we get here it means we have a new current scan tuple, so point
- * to the proper return buffer and return the tuple.
+ * if we get here it means we have a new current scan tuple, so point to
+ * the proper return buffer and return the tuple.
*/
HEAPDEBUG_3; /* heap_getnext returning tuple */
@@ -859,8 +858,8 @@ heap_release_fetch(Relation relation,
dp = (PageHeader) BufferGetPage(buffer);
/*
- * We'd better check for out-of-range offnum in case of VACUUM since
- * the TID was obtained.
+ * We'd better check for out-of-range offnum in case of VACUUM since the
+ * TID was obtained.
*/
offnum = ItemPointerGetOffsetNumber(tid);
if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
@@ -952,7 +951,7 @@ heap_release_fetch(Relation relation,
* possibly uncommitted version.
*
* *tid is both an input and an output parameter: it is updated to
- * show the latest version of the row. Note that it will not be changed
+ * show the latest version of the row. Note that it will not be changed
* if no version of the row passes the snapshot test.
*/
void
@@ -960,7 +959,7 @@ heap_get_latest_tid(Relation relation,
Snapshot snapshot,
ItemPointer tid)
{
- BlockNumber blk;
+ BlockNumber blk;
ItemPointerData ctid;
TransactionId priorXmax;
@@ -969,10 +968,10 @@ heap_get_latest_tid(Relation relation,
return;
/*
- * Since this can be called with user-supplied TID, don't trust the
- * input too much. (RelationGetNumberOfBlocks is an expensive check,
- * so we don't check t_ctid links again this way. Note that it would
- * not do to call it just once and save the result, either.)
+ * Since this can be called with user-supplied TID, don't trust the input
+ * too much. (RelationGetNumberOfBlocks is an expensive check, so we
+ * don't check t_ctid links again this way. Note that it would not do to
+ * call it just once and save the result, either.)
*/
blk = ItemPointerGetBlockNumber(tid);
if (blk >= RelationGetNumberOfBlocks(relation))
@@ -980,9 +979,9 @@ heap_get_latest_tid(Relation relation,
blk, RelationGetRelationName(relation));
/*
- * Loop to chase down t_ctid links. At top of loop, ctid is the
- * tuple we need to examine, and *tid is the TID we will return if
- * ctid turns out to be bogus.
+ * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
+ * need to examine, and *tid is the TID we will return if ctid turns out
+ * to be bogus.
*
* Note that we will loop until we reach the end of the t_ctid chain.
* Depending on the snapshot passed, there might be at most one visible
@@ -1008,8 +1007,8 @@ heap_get_latest_tid(Relation relation,
/*
* Check for bogus item number. This is not treated as an error
- * condition because it can happen while following a t_ctid link.
- * We just assume that the prior tid is OK and return it unchanged.
+ * condition because it can happen while following a t_ctid link. We
+ * just assume that the prior tid is OK and return it unchanged.
*/
offnum = ItemPointerGetOffsetNumber(&ctid);
if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
@@ -1037,7 +1036,7 @@ heap_get_latest_tid(Relation relation,
* tuple. Check for XMIN match.
*/
if (TransactionIdIsValid(priorXmax) &&
- !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
+ !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
@@ -1068,7 +1067,7 @@ heap_get_latest_tid(Relation relation,
priorXmax = HeapTupleHeaderGetXmax(tp.t_data);
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
- } /* end of loop */
+ } /* end of loop */
}
/*
@@ -1102,13 +1101,12 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
#endif
/*
- * If the object id of this tuple has already been assigned, trust
- * the caller. There are a couple of ways this can happen. At
- * initial db creation, the backend program sets oids for tuples.
- * When we define an index, we set the oid. Finally, in the
- * future, we may allow users to set their own object ids in order
- * to support a persistent object store (objects need to contain
- * pointers to one another).
+ * If the object id of this tuple has already been assigned, trust the
+ * caller. There are a couple of ways this can happen. At initial db
+ * creation, the backend program sets oids for tuples. When we define
+ * an index, we set the oid. Finally, in the future, we may allow
+ * users to set their own object ids in order to support a persistent
+ * object store (objects need to contain pointers to one another).
*/
if (!OidIsValid(HeapTupleGetOid(tup)))
HeapTupleSetOid(tup, GetNewOid(relation));
@@ -1129,8 +1127,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/*
* If the new tuple is too big for storage or contains already toasted
- * out-of-line attributes from some other relation, invoke the
- * toaster.
+ * out-of-line attributes from some other relation, invoke the toaster.
*/
if (HeapTupleHasExternal(tup) ||
(MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD))
@@ -1172,9 +1169,9 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
xlhdr.t_hoff = tup->t_data->t_hoff;
/*
- * note we mark rdata[1] as belonging to buffer; if XLogInsert
- * decides to write the whole page to the xlog, we don't need to
- * store xl_heap_header in the xlog.
+ * note we mark rdata[1] as belonging to buffer; if XLogInsert decides
+ * to write the whole page to the xlog, we don't need to store
+ * xl_heap_header in the xlog.
*/
rdata[1].data = (char *) &xlhdr;
rdata[1].len = SizeOfHeapHeader;
@@ -1190,9 +1187,9 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
rdata[2].next = NULL;
/*
- * If this is the single and first tuple on page, we can reinit
- * the page instead of restoring the whole thing. Set flag, and
- * hide buffer references from XLogInsert.
+ * If this is the single and first tuple on page, we can reinit the
+ * page instead of restoring the whole thing. Set flag, and hide
+ * buffer references from XLogInsert.
*/
if (ItemPointerGetOffsetNumber(&(tup->t_self)) == FirstOffsetNumber &&
PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
@@ -1213,10 +1210,10 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
WriteBuffer(buffer);
/*
- * If tuple is cachable, mark it for invalidation from the caches in
- * case we abort. Note it is OK to do this after WriteBuffer releases
- * the buffer, because the "tup" data structure is all in local
- * memory, not in the shared buffer.
+ * If tuple is cachable, mark it for invalidation from the caches in case
+ * we abort. Note it is OK to do this after WriteBuffer releases the
+ * buffer, because the "tup" data structure is all in local memory, not in
+ * the shared buffer.
*/
CacheInvalidateHeapTuple(relation, tup);
@@ -1268,7 +1265,7 @@ heap_delete(Relation relation, ItemPointer tid,
ItemPointer ctid, TransactionId *update_xmax,
CommandId cid, Snapshot crosscheck, bool wait)
{
- HTSU_Result result;
+ HTSU_Result result;
TransactionId xid = GetCurrentTransactionId();
ItemId lp;
HeapTupleData tp;
@@ -1301,7 +1298,7 @@ l1:
else if (result == HeapTupleBeingUpdated && wait)
{
TransactionId xwait;
- uint16 infomask;
+ uint16 infomask;
/* must copy state data before unlocking buffer */
xwait = HeapTupleHeaderGetXmax(tp.t_data);
@@ -1310,13 +1307,13 @@ l1:
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
/*
- * Acquire tuple lock to establish our priority for the tuple
- * (see heap_lock_tuple). LockTuple will release us when we are
+ * Acquire tuple lock to establish our priority for the tuple (see
+ * heap_lock_tuple). LockTuple will release us when we are
* next-in-line for the tuple.
*
- * If we are forced to "start over" below, we keep the tuple lock;
- * this arranges that we stay at the head of the line while
- * rechecking tuple state.
+ * If we are forced to "start over" below, we keep the tuple lock; this
+ * arranges that we stay at the head of the line while rechecking
+ * tuple state.
*/
if (!have_tuple_lock)
{
@@ -1347,12 +1344,12 @@ l1:
goto l1;
/*
- * You might think the multixact is necessarily done here, but
- * not so: it could have surviving members, namely our own xact
- * or other subxacts of this backend. It is legal for us to
- * delete the tuple in either case, however (the latter case is
- * essentially a situation of upgrading our former shared lock
- * to exclusive). We don't bother changing the on-disk hint bits
+ * You might think the multixact is necessarily done here, but not
+ * so: it could have surviving members, namely our own xact or
+ * other subxacts of this backend. It is legal for us to delete
+ * the tuple in either case, however (the latter case is
+ * essentially a situation of upgrading our former shared lock to
+ * exclusive). We don't bother changing the on-disk hint bits
* since we are about to overwrite the xmax altogether.
*/
}
@@ -1385,8 +1382,8 @@ l1:
}
/*
- * We may overwrite if previous xmax aborted, or if it committed
- * but only locked the tuple without updating it.
+ * We may overwrite if previous xmax aborted, or if it committed but
+ * only locked the tuple without updating it.
*/
if (tp.t_data->t_infomask & (HEAP_XMAX_INVALID |
HEAP_IS_LOCKED))
@@ -1467,18 +1464,18 @@ l1:
/*
* If the tuple has toasted out-of-line attributes, we need to delete
- * those items too. We have to do this before WriteBuffer because we
- * need to look at the contents of the tuple, but it's OK to release
- * the context lock on the buffer first.
+ * those items too. We have to do this before WriteBuffer because we need
+ * to look at the contents of the tuple, but it's OK to release the
+ * context lock on the buffer first.
*/
if (HeapTupleHasExternal(&tp))
heap_tuple_toast_attrs(relation, NULL, &tp);
/*
* Mark tuple for invalidation from system caches at next command
- * boundary. We have to do this before WriteBuffer because we need to
- * look at the contents of the tuple, so we need to hold our refcount
- * on the buffer.
+ * boundary. We have to do this before WriteBuffer because we need to look
+ * at the contents of the tuple, so we need to hold our refcount on the
+ * buffer.
*/
CacheInvalidateHeapTuple(relation, &tp);
@@ -1506,7 +1503,7 @@ l1:
void
simple_heap_delete(Relation relation, ItemPointer tid)
{
- HTSU_Result result;
+ HTSU_Result result;
ItemPointerData update_ctid;
TransactionId update_xmax;
@@ -1569,7 +1566,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
ItemPointer ctid, TransactionId *update_xmax,
CommandId cid, Snapshot crosscheck, bool wait)
{
- HTSU_Result result;
+ HTSU_Result result;
TransactionId xid = GetCurrentTransactionId();
ItemId lp;
HeapTupleData oldtup;
@@ -1598,8 +1595,8 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* Note: beyond this point, use oldtup not otid to refer to old tuple.
* otid may very well point at newtup->t_self, which we will overwrite
- * with the new tuple's location, so there's great risk of confusion
- * if we use otid anymore.
+ * with the new tuple's location, so there's great risk of confusion if we
+ * use otid anymore.
*/
l2:
@@ -1614,7 +1611,7 @@ l2:
else if (result == HeapTupleBeingUpdated && wait)
{
TransactionId xwait;
- uint16 infomask;
+ uint16 infomask;
/* must copy state data before unlocking buffer */
xwait = HeapTupleHeaderGetXmax(oldtup.t_data);
@@ -1623,13 +1620,13 @@ l2:
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
/*
- * Acquire tuple lock to establish our priority for the tuple
- * (see heap_lock_tuple). LockTuple will release us when we are
+ * Acquire tuple lock to establish our priority for the tuple (see
+ * heap_lock_tuple). LockTuple will release us when we are
* next-in-line for the tuple.
*
- * If we are forced to "start over" below, we keep the tuple lock;
- * this arranges that we stay at the head of the line while
- * rechecking tuple state.
+ * If we are forced to "start over" below, we keep the tuple lock; this
+ * arranges that we stay at the head of the line while rechecking
+ * tuple state.
*/
if (!have_tuple_lock)
{
@@ -1660,12 +1657,12 @@ l2:
goto l2;
/*
- * You might think the multixact is necessarily done here, but
- * not so: it could have surviving members, namely our own xact
- * or other subxacts of this backend. It is legal for us to
- * update the tuple in either case, however (the latter case is
- * essentially a situation of upgrading our former shared lock
- * to exclusive). We don't bother changing the on-disk hint bits
+ * You might think the multixact is necessarily done here, but not
+ * so: it could have surviving members, namely our own xact or
+ * other subxacts of this backend. It is legal for us to update
+ * the tuple in either case, however (the latter case is
+ * essentially a situation of upgrading our former shared lock to
+ * exclusive). We don't bother changing the on-disk hint bits
* since we are about to overwrite the xmax altogether.
*/
}
@@ -1698,8 +1695,8 @@ l2:
}
/*
- * We may overwrite if previous xmax aborted, or if it committed
- * but only locked the tuple without updating it.
+ * We may overwrite if previous xmax aborted, or if it committed but
+ * only locked the tuple without updating it.
*/
if (oldtup.t_data->t_infomask & (HEAP_XMAX_INVALID |
HEAP_IS_LOCKED))
@@ -1753,15 +1750,15 @@ l2:
HeapTupleHeaderSetCmax(newtup->t_data, 0); /* for cleanliness */
/*
- * If the toaster needs to be activated, OR if the new tuple will not
- * fit on the same page as the old, then we need to release the
- * context lock (but not the pin!) on the old tuple's buffer while we
- * are off doing TOAST and/or table-file-extension work. We must mark
- * the old tuple to show that it's already being updated, else other
- * processes may try to update it themselves.
+ * If the toaster needs to be activated, OR if the new tuple will not fit
+ * on the same page as the old, then we need to release the context lock
+ * (but not the pin!) on the old tuple's buffer while we are off doing
+ * TOAST and/or table-file-extension work. We must mark the old tuple to
+ * show that it's already being updated, else other processes may try to
+ * update it themselves.
*
- * We need to invoke the toaster if there are already any out-of-line
- * toasted values present, or if the new tuple is over-threshold.
+ * We need to invoke the toaster if there are already any out-of-line toasted
+ * values present, or if the new tuple is over-threshold.
*/
need_toast = (HeapTupleHasExternal(&oldtup) ||
HeapTupleHasExternal(newtup) ||
@@ -1790,22 +1787,21 @@ l2:
}
/*
- * Now, do we need a new page for the tuple, or not? This is a
- * bit tricky since someone else could have added tuples to the
- * page while we weren't looking. We have to recheck the
- * available space after reacquiring the buffer lock. But don't
- * bother to do that if the former amount of free space is still
- * not enough; it's unlikely there's more free now than before.
+ * Now, do we need a new page for the tuple, or not? This is a bit
+ * tricky since someone else could have added tuples to the page while
+ * we weren't looking. We have to recheck the available space after
+ * reacquiring the buffer lock. But don't bother to do that if the
+ * former amount of free space is still not enough; it's unlikely
+ * there's more free now than before.
*
* What's more, if we need to get a new page, we will need to acquire
- * buffer locks on both old and new pages. To avoid deadlock
- * against some other backend trying to get the same two locks in
- * the other order, we must be consistent about the order we get
- * the locks in. We use the rule "lock the lower-numbered page of
- * the relation first". To implement this, we must do
- * RelationGetBufferForTuple while not holding the lock on the old
- * page, and we must rely on it to get the locks on both pages in
- * the correct order.
+ * buffer locks on both old and new pages. To avoid deadlock against
+ * some other backend trying to get the same two locks in the other
+ * order, we must be consistent about the order we get the locks in.
+ * We use the rule "lock the lower-numbered page of the relation
+ * first". To implement this, we must do RelationGetBufferForTuple
+ * while not holding the lock on the old page, and we must rely on it
+ * to get the locks on both pages in the correct order.
*/
if (newtupsize > pagefree)
{
@@ -1823,8 +1819,8 @@ l2:
{
/*
* Rats, it doesn't fit anymore. We must now unlock and
- * relock to avoid deadlock. Fortunately, this path
- * should seldom be taken.
+ * relock to avoid deadlock. Fortunately, this path should
+ * seldom be taken.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
newbuf = RelationGetBufferForTuple(relation, newtup->t_len,
@@ -1845,9 +1841,9 @@ l2:
}
/*
- * At this point newbuf and buffer are both pinned and locked, and
- * newbuf has enough space for the new tuple. If they are the same
- * buffer, only one pin is held.
+ * At this point newbuf and buffer are both pinned and locked, and newbuf
+ * has enough space for the new tuple. If they are the same buffer, only
+ * one pin is held.
*/
/* NO EREPORT(ERROR) from here till changes are logged */
@@ -1897,8 +1893,8 @@ l2:
/*
* Mark old tuple for invalidation from system caches at next command
- * boundary. We have to do this before WriteBuffer because we need to
- * look at the contents of the tuple, so we need to hold our refcount.
+ * boundary. We have to do this before WriteBuffer because we need to look
+ * at the contents of the tuple, so we need to hold our refcount.
*/
CacheInvalidateHeapTuple(relation, &oldtup);
@@ -1907,10 +1903,10 @@ l2:
WriteBuffer(buffer);
/*
- * If new tuple is cachable, mark it for invalidation from the caches
- * in case we abort. Note it is OK to do this after WriteBuffer
- * releases the buffer, because the "newtup" data structure is all in
- * local memory, not in the shared buffer.
+ * If new tuple is cachable, mark it for invalidation from the caches in
+ * case we abort. Note it is OK to do this after WriteBuffer releases the
+ * buffer, because the "newtup" data structure is all in local memory, not
+ * in the shared buffer.
*/
CacheInvalidateHeapTuple(relation, newtup);
@@ -1936,7 +1932,7 @@ l2:
void
simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
{
- HTSU_Result result;
+ HTSU_Result result;
ItemPointerData update_ctid;
TransactionId update_xmax;
@@ -2012,7 +2008,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
* waiter gets the tuple, potentially leading to indefinite starvation of
* some waiters. The possibility of share-locking makes the problem much
* worse --- a steady stream of share-lockers can easily block an exclusive
- * locker forever. To provide more reliable semantics about who gets a
+ * locker forever. To provide more reliable semantics about who gets a
* tuple-level lock first, we use the standard lock manager. The protocol
* for waiting for a tuple-level lock is really
* LockTuple()
@@ -2020,7 +2016,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
* mark tuple as locked by me
* UnlockTuple()
* When there are multiple waiters, arbitration of who is to get the lock next
- * is provided by LockTuple(). However, at most one tuple-level lock will
+ * is provided by LockTuple(). However, at most one tuple-level lock will
* be held or awaited per backend at any time, so we don't risk overflow
* of the lock table. Note that incoming share-lockers are required to
* do LockTuple as well, if there is any conflict, to ensure that they don't
@@ -2032,11 +2028,11 @@ heap_lock_tuple(Relation relation, HeapTuple tuple, Buffer *buffer,
ItemPointer ctid, TransactionId *update_xmax,
CommandId cid, LockTupleMode mode, bool nowait)
{
- HTSU_Result result;
+ HTSU_Result result;
ItemPointer tid = &(tuple->t_self);
ItemId lp;
PageHeader dp;
- TransactionId xid;
+ TransactionId xid;
uint16 new_infomask;
LOCKMODE tuple_lock_type;
bool have_tuple_lock = false;
@@ -2067,7 +2063,7 @@ l3:
else if (result == HeapTupleBeingUpdated)
{
TransactionId xwait;
- uint16 infomask;
+ uint16 infomask;
/* must copy state data before unlocking buffer */
xwait = HeapTupleHeaderGetXmax(tuple->t_data);
@@ -2077,12 +2073,12 @@ l3:
/*
* Acquire tuple lock to establish our priority for the tuple.
- * LockTuple will release us when we are next-in-line for the
- * tuple. We must do this even if we are share-locking.
+ * LockTuple will release us when we are next-in-line for the tuple.
+ * We must do this even if we are share-locking.
*
- * If we are forced to "start over" below, we keep the tuple lock;
- * this arranges that we stay at the head of the line while
- * rechecking tuple state.
+ * If we are forced to "start over" below, we keep the tuple lock; this
+ * arranges that we stay at the head of the line while rechecking
+ * tuple state.
*/
if (!have_tuple_lock)
{
@@ -2091,8 +2087,8 @@ l3:
if (!ConditionalLockTuple(relation, tid, tuple_lock_type))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
- errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
+ errmsg("could not obtain lock on row in relation \"%s\"",
+ RelationGetRelationName(relation))));
}
else
LockTuple(relation, tid, tuple_lock_type);
@@ -2108,8 +2104,8 @@ l3:
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
/*
- * Make sure it's still a shared lock, else start over. (It's
- * OK if the ownership of the shared lock has changed, though.)
+ * Make sure it's still a shared lock, else start over. (It's OK
+ * if the ownership of the shared lock has changed, though.)
*/
if (!(tuple->t_data->t_infomask & HEAP_XMAX_SHARED_LOCK))
goto l3;
@@ -2122,8 +2118,8 @@ l3:
if (!ConditionalMultiXactIdWait((MultiXactId) xwait))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
- errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
+ errmsg("could not obtain lock on row in relation \"%s\"",
+ RelationGetRelationName(relation))));
}
else
MultiXactIdWait((MultiXactId) xwait);
@@ -2131,9 +2127,9 @@ l3:
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
/*
- * If xwait had just locked the tuple then some other xact
- * could update this tuple before we get to this point.
- * Check for xmax change, and start over if so.
+ * If xwait had just locked the tuple then some other xact could
+ * update this tuple before we get to this point. Check for xmax
+ * change, and start over if so.
*/
if (!(tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
!TransactionIdEquals(HeapTupleHeaderGetXmax(tuple->t_data),
@@ -2141,12 +2137,12 @@ l3:
goto l3;
/*
- * You might think the multixact is necessarily done here, but
- * not so: it could have surviving members, namely our own xact
- * or other subxacts of this backend. It is legal for us to
- * lock the tuple in either case, however. We don't bother
- * changing the on-disk hint bits since we are about to
- * overwrite the xmax altogether.
+ * You might think the multixact is necessarily done here, but not
+ * so: it could have surviving members, namely our own xact or
+ * other subxacts of this backend. It is legal for us to lock the
+ * tuple in either case, however. We don't bother changing the
+ * on-disk hint bits since we are about to overwrite the xmax
+ * altogether.
*/
}
else
@@ -2157,8 +2153,8 @@ l3:
if (!ConditionalXactLockTableWait(xwait))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
- errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
+ errmsg("could not obtain lock on row in relation \"%s\"",
+ RelationGetRelationName(relation))));
}
else
XactLockTableWait(xwait);
@@ -2166,9 +2162,9 @@ l3:
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
/*
- * xwait is done, but if xwait had just locked the tuple then
- * some other xact could update this tuple before we get to
- * this point. Check for xmax change, and start over if so.
+ * xwait is done, but if xwait had just locked the tuple then some
+ * other xact could update this tuple before we get to this point.
+ * Check for xmax change, and start over if so.
*/
if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
!TransactionIdEquals(HeapTupleHeaderGetXmax(tuple->t_data),
@@ -2188,10 +2184,10 @@ l3:
}
/*
- * We may lock if previous xmax aborted, or if it committed
- * but only locked the tuple without updating it. The case where
- * we didn't wait because we are joining an existing shared lock
- * is correctly handled, too.
+ * We may lock if previous xmax aborted, or if it committed but only
+ * locked the tuple without updating it. The case where we didn't
+ * wait because we are joining an existing shared lock is correctly
+ * handled, too.
*/
if (tuple->t_data->t_infomask & (HEAP_XMAX_INVALID |
HEAP_IS_LOCKED))
@@ -2213,9 +2209,9 @@ l3:
}
/*
- * Compute the new xmax and infomask to store into the tuple. Note we
- * do not modify the tuple just yet, because that would leave it in the
- * wrong state if multixact.c elogs.
+ * Compute the new xmax and infomask to store into the tuple. Note we do
+ * not modify the tuple just yet, because that would leave it in the wrong
+ * state if multixact.c elogs.
*/
xid = GetCurrentTransactionId();
@@ -2229,17 +2225,16 @@ l3:
if (mode == LockTupleShared)
{
- TransactionId xmax = HeapTupleHeaderGetXmax(tuple->t_data);
+ TransactionId xmax = HeapTupleHeaderGetXmax(tuple->t_data);
uint16 old_infomask = tuple->t_data->t_infomask;
/*
* If this is the first acquisition of a shared lock in the current
- * transaction, set my per-backend OldestMemberMXactId setting.
- * We can be certain that the transaction will never become a
- * member of any older MultiXactIds than that. (We have to do this
- * even if we end up just using our own TransactionId below, since
- * some other backend could incorporate our XID into a MultiXact
- * immediately afterwards.)
+ * transaction, set my per-backend OldestMemberMXactId setting. We can
+ * be certain that the transaction will never become a member of any
+ * older MultiXactIds than that. (We have to do this even if we end
+ * up just using our own TransactionId below, since some other backend
+ * could incorporate our XID into a MultiXact immediately afterwards.)
*/
MultiXactIdSetOldestMember();
@@ -2249,14 +2244,14 @@ l3:
* Check to see if we need a MultiXactId because there are multiple
* lockers.
*
- * HeapTupleSatisfiesUpdate will have set the HEAP_XMAX_INVALID
- * bit if the xmax was a MultiXactId but it was not running anymore.
- * There is a race condition, which is that the MultiXactId may have
- * finished since then, but that uncommon case is handled within
+ * HeapTupleSatisfiesUpdate will have set the HEAP_XMAX_INVALID bit if
+ * the xmax was a MultiXactId but it was not running anymore. There is
+ * a race condition, which is that the MultiXactId may have finished
+ * since then, but that uncommon case is handled within
* MultiXactIdExpand.
*
- * There is a similar race condition possible when the old xmax was
- * a regular TransactionId. We test TransactionIdIsInProgress again
+ * There is a similar race condition possible when the old xmax was a
+ * regular TransactionId. We test TransactionIdIsInProgress again
* just to narrow the window, but it's still possible to end up
* creating an unnecessary MultiXactId. Fortunately this is harmless.
*/
@@ -2277,10 +2272,10 @@ l3:
{
/*
* If the old locker is ourselves, we'll just mark the
- * tuple again with our own TransactionId. However we
- * have to consider the possibility that we had
- * exclusive rather than shared lock before --- if so,
- * be careful to preserve the exclusivity of the lock.
+ * tuple again with our own TransactionId. However we
+ * have to consider the possibility that we had exclusive
+ * rather than shared lock before --- if so, be careful to
+ * preserve the exclusivity of the lock.
*/
if (!(old_infomask & HEAP_XMAX_SHARED_LOCK))
{
@@ -2303,9 +2298,9 @@ l3:
else
{
/*
- * Can get here iff HeapTupleSatisfiesUpdate saw the old
- * xmax as running, but it finished before
- * TransactionIdIsInProgress() got to run. Treat it like
+ * Can get here iff HeapTupleSatisfiesUpdate saw the old xmax
+ * as running, but it finished before
+ * TransactionIdIsInProgress() got to run. Treat it like
* there's no locker in the tuple.
*/
}
@@ -2329,8 +2324,8 @@ l3:
/*
* Store transaction information of xact locking the tuple.
*
- * Note: our CID is meaningless if storing a MultiXactId, but no harm
- * in storing it anyway.
+ * Note: our CID is meaningless if storing a MultiXactId, but no harm in
+ * storing it anyway.
*/
tuple->t_data->t_infomask = new_infomask;
HeapTupleHeaderSetXmax(tuple->t_data, xid);
@@ -2339,8 +2334,8 @@ l3:
tuple->t_data->t_ctid = *tid;
/*
- * XLOG stuff. You might think that we don't need an XLOG record because
- * there is no state change worth restoring after a crash. You would be
+ * XLOG stuff. You might think that we don't need an XLOG record because
+ * there is no state change worth restoring after a crash. You would be
* wrong however: we have just written either a TransactionId or a
* MultiXactId that may never have been seen on disk before, and we need
* to make sure that there are XLOG entries covering those ID numbers.
@@ -2473,8 +2468,8 @@ log_heap_clean(Relation reln, Buffer buffer, OffsetNumber *unused, int uncnt)
/*
* The unused-offsets array is not actually in the buffer, but pretend
- * that it is. When XLogInsert stores the whole buffer, the offsets
- * array need not be stored too.
+ * that it is. When XLogInsert stores the whole buffer, the offsets array
+ * need not be stored too.
*/
if (uncnt > 0)
{
@@ -2500,11 +2495,10 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
Buffer newbuf, HeapTuple newtup, bool move)
{
/*
- * Note: xlhdr is declared to have adequate size and correct alignment
- * for an xl_heap_header. However the two tids, if present at all,
- * will be packed in with no wasted space after the xl_heap_header;
- * they aren't necessarily aligned as implied by this struct
- * declaration.
+ * Note: xlhdr is declared to have adequate size and correct alignment for
+ * an xl_heap_header. However the two tids, if present at all, will be
+ * packed in with no wasted space after the xl_heap_header; they aren't
+ * necessarily aligned as implied by this struct declaration.
*/
struct
{
@@ -2555,8 +2549,8 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
}
/*
- * As with insert records, we need not store the rdata[2] segment if
- * we decide to store the whole buffer instead.
+ * As with insert records, we need not store the rdata[2] segment if we
+ * decide to store the whole buffer instead.
*/
rdata[2].data = (char *) &xlhdr;
rdata[2].len = hsize;
@@ -2655,8 +2649,8 @@ heap_xlog_newpage(XLogRecPtr lsn, XLogRecord *record)
Page page;
/*
- * Note: the NEWPAGE log record is used for both heaps and indexes, so
- * do not do anything that assumes we are touching a heap.
+ * Note: the NEWPAGE log record is used for both heaps and indexes, so do
+ * not do anything that assumes we are touching a heap.
*/
if (record->xl_info & XLR_BKP_BLOCK_1)
@@ -2699,7 +2693,7 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
return;
buffer = XLogReadBuffer(false, reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(PANIC, "heap_delete_redo: no block");
@@ -2707,7 +2701,7 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
if (PageIsNew((PageHeader) page))
elog(PANIC, "heap_delete_redo: uninitialized page");
- if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
+ if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
@@ -2749,7 +2743,7 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
struct
{
HeapTupleHeaderData hdr;
- char data[MaxTupleSize];
+ char data[MaxTupleSize];
} tbuf;
HeapTupleHeader htup;
xl_heap_header xlhdr;
@@ -2764,7 +2758,7 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
return;
buffer = XLogReadBuffer(true, reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
return;
@@ -2776,7 +2770,7 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
if (record->xl_info & XLOG_HEAP_INIT_PAGE)
PageInit(page, BufferGetPageSize(buffer), 0);
- if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
+ if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
@@ -2835,7 +2829,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move)
struct
{
HeapTupleHeaderData hdr;
- char data[MaxTupleSize];
+ char data[MaxTupleSize];
} tbuf;
xl_heap_header xlhdr;
int hsize;
@@ -2850,7 +2844,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move)
/* Deal with old tuple version */
buffer = XLogReadBuffer(false, reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(PANIC, "heap_update_redo: no block");
@@ -2858,7 +2852,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move)
if (PageIsNew((PageHeader) page))
elog(PANIC, "heap_update_redo: uninitialized old page");
- if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
+ if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
@@ -2928,7 +2922,7 @@ newsame:;
if (record->xl_info & XLOG_HEAP_INIT_PAGE)
PageInit(page, BufferGetPageSize(buffer), 0);
- if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
+ if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
@@ -2961,7 +2955,7 @@ newsame:;
if (move)
{
- TransactionId xid[2]; /* xmax, xmin */
+ TransactionId xid[2]; /* xmax, xmin */
memcpy((char *) xid,
(char *) xlrec + SizeOfHeapUpdate + SizeOfHeapHeader,
@@ -3008,7 +3002,7 @@ heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record)
return;
buffer = XLogReadBuffer(false, reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(PANIC, "heap_lock_redo: no block");
@@ -3016,7 +3010,7 @@ heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record)
if (PageIsNew((PageHeader) page))
elog(PANIC, "heap_lock_redo: uninitialized page");
- if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
+ if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
@@ -3081,7 +3075,7 @@ static void
out_target(char *buf, xl_heaptid *target)
{
sprintf(buf + strlen(buf), "rel %u/%u/%u; tid %u/%u",
- target->node.spcNode, target->node.dbNode, target->node.relNode,
+ target->node.spcNode, target->node.dbNode, target->node.relNode,
ItemPointerGetBlockNumber(&(target->tid)),
ItemPointerGetOffsetNumber(&(target->tid)));
}
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index fc1b0afd21e..800ee4a805b 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.57 2005/06/20 18:37:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.58 2005/10/15 02:49:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -80,7 +80,7 @@ RelationPutHeapTuple(Relation relation,
* enough there). In that case, the page will be pinned and locked only once.
*
* If use_fsm is true (the normal case), we use FSM to help us find free
- * space. If use_fsm is false, we always append a new empty page to the
+ * space. If use_fsm is false, we always append a new empty page to the
* end of the relation if the tuple won't fit on the current target page.
* This can save some cycles when we know the relation is new and doesn't
* contain useful amounts of free space.
@@ -122,22 +122,20 @@ RelationGetBufferForTuple(Relation relation, Size len,
if (otherBuffer != InvalidBuffer)
otherBlock = BufferGetBlockNumber(otherBuffer);
else
- otherBlock = InvalidBlockNumber; /* just to keep compiler
- * quiet */
+ otherBlock = InvalidBlockNumber; /* just to keep compiler quiet */
/*
- * We first try to put the tuple on the same page we last inserted a
- * tuple on, as cached in the relcache entry. If that doesn't work,
- * we ask the shared Free Space Map to locate a suitable page. Since
- * the FSM's info might be out of date, we have to be prepared to loop
- * around and retry multiple times. (To insure this isn't an infinite
- * loop, we must update the FSM with the correct amount of free space
- * on each page that proves not to be suitable.) If the FSM has no
- * record of a page with enough free space, we give up and extend the
- * relation.
+ * We first try to put the tuple on the same page we last inserted a tuple
+ * on, as cached in the relcache entry. If that doesn't work, we ask the
+ * shared Free Space Map to locate a suitable page. Since the FSM's info
+ * might be out of date, we have to be prepared to loop around and retry
+ * multiple times. (To insure this isn't an infinite loop, we must update
+ * the FSM with the correct amount of free space on each page that proves
+ * not to be suitable.) If the FSM has no record of a page with enough
+ * free space, we give up and extend the relation.
*
- * When use_fsm is false, we either put the tuple onto the existing
- * target page or extend the relation.
+ * When use_fsm is false, we either put the tuple onto the existing target
+ * page or extend the relation.
*/
targetBlock = relation->rd_targblock;
@@ -151,9 +149,9 @@ RelationGetBufferForTuple(Relation relation, Size len,
targetBlock = GetPageWithFreeSpace(&relation->rd_node, len);
/*
- * If the FSM knows nothing of the rel, try the last page before
- * we give up and extend. This avoids one-tuple-per-page syndrome
- * during bootstrapping or in a recently-started system.
+ * If the FSM knows nothing of the rel, try the last page before we
+ * give up and extend. This avoids one-tuple-per-page syndrome during
+ * bootstrapping or in a recently-started system.
*/
if (targetBlock == InvalidBlockNumber)
{
@@ -168,8 +166,8 @@ RelationGetBufferForTuple(Relation relation, Size len,
{
/*
* Read and exclusive-lock the target block, as well as the other
- * block if one was given, taking suitable care with lock ordering
- * and the possibility they are the same block.
+ * block if one was given, taking suitable care with lock ordering and
+ * the possibility they are the same block.
*/
if (otherBuffer == InvalidBuffer)
{
@@ -199,8 +197,8 @@ RelationGetBufferForTuple(Relation relation, Size len,
}
/*
- * Now we can check to see if there's enough free space here. If
- * so, we're done.
+ * Now we can check to see if there's enough free space here. If so,
+ * we're done.
*/
pageHeader = (Page) BufferGetPage(buffer);
pageFreeSpace = PageGetFreeSpace(pageHeader);
@@ -213,9 +211,9 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Not enough space, so we must give up our page locks and pin (if
- * any) and prepare to look elsewhere. We don't care which order
- * we unlock the two buffers in, so this can be slightly simpler
- * than the code above.
+ * any) and prepare to look elsewhere. We don't care which order we
+ * unlock the two buffers in, so this can be slightly simpler than the
+ * code above.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
if (otherBuffer == InvalidBuffer)
@@ -231,8 +229,8 @@ RelationGetBufferForTuple(Relation relation, Size len,
break;
/*
- * Update FSM as to condition of this page, and ask for another
- * page to try.
+ * Update FSM as to condition of this page, and ask for another page
+ * to try.
*/
targetBlock = RecordAndGetPageWithFreeSpace(&relation->rd_node,
targetBlock,
@@ -243,10 +241,10 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Have to extend the relation.
*
- * We have to use a lock to ensure no one else is extending the rel at
- * the same time, else we will both try to initialize the same new
- * page. We can skip locking for new or temp relations, however,
- * since no one else could be accessing them.
+ * We have to use a lock to ensure no one else is extending the rel at the
+ * same time, else we will both try to initialize the same new page. We
+ * can skip locking for new or temp relations, however, since no one else
+ * could be accessing them.
*/
needLock = !RELATION_IS_LOCAL(relation);
@@ -254,17 +252,16 @@ RelationGetBufferForTuple(Relation relation, Size len,
LockRelationForExtension(relation, ExclusiveLock);
/*
- * XXX This does an lseek - rather expensive - but at the moment it is
- * the only way to accurately determine how many blocks are in a
- * relation. Is it worth keeping an accurate file length in shared
- * memory someplace, rather than relying on the kernel to do it for
- * us?
+ * XXX This does an lseek - rather expensive - but at the moment it is the
+ * only way to accurately determine how many blocks are in a relation. Is
+ * it worth keeping an accurate file length in shared memory someplace,
+ * rather than relying on the kernel to do it for us?
*/
buffer = ReadBuffer(relation, P_NEW);
/*
- * We can be certain that locking the otherBuffer first is OK, since
- * it must have a lower page number.
+ * We can be certain that locking the otherBuffer first is OK, since it
+ * must have a lower page number.
*/
if (otherBuffer != InvalidBuffer)
LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
@@ -275,10 +272,10 @@ RelationGetBufferForTuple(Relation relation, Size len,
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
- * Release the file-extension lock; it's now OK for someone else to
- * extend the relation some more. Note that we cannot release this
- * lock before we have buffer lock on the new page, or we risk a
- * race condition against vacuumlazy.c --- see comments therein.
+ * Release the file-extension lock; it's now OK for someone else to extend
+ * the relation some more. Note that we cannot release this lock before
+ * we have buffer lock on the new page, or we risk a race condition
+ * against vacuumlazy.c --- see comments therein.
*/
if (needLock)
UnlockRelationForExtension(relation, ExclusiveLock);
@@ -299,11 +296,11 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Remember the new page as our target for future insertions.
*
- * XXX should we enter the new page into the free space map immediately,
- * or just keep it for this backend's exclusive use in the short run
- * (until VACUUM sees it)? Seems to depend on whether you expect the
- * current backend to make more insertions or not, which is probably a
- * good bet most of the time. So for now, don't add it to FSM yet.
+ * XXX should we enter the new page into the free space map immediately, or
+ * just keep it for this backend's exclusive use in the short run (until
+ * VACUUM sees it)? Seems to depend on whether you expect the current
+ * backend to make more insertions or not, which is probably a good bet
+ * most of the time. So for now, don't add it to FSM yet.
*/
relation->rd_targblock = BufferGetBlockNumber(buffer);
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index 02da8446cd0..fd20f111b80 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.52 2005/08/12 01:35:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.53 2005/10/15 02:49:09 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -90,8 +90,7 @@ heap_tuple_fetch_attr(varattrib *attr)
else
{
/*
- * This is a plain value inside of the main tuple - why am I
- * called?
+ * This is a plain value inside of the main tuple - why am I called?
*/
result = attr;
}
@@ -154,8 +153,7 @@ heap_tuple_untoast_attr(varattrib *attr)
else
/*
- * This is a plain value inside of the main tuple - why am I
- * called?
+ * This is a plain value inside of the main tuple - why am I called?
*/
return attr;
@@ -255,8 +253,8 @@ toast_raw_datum_size(Datum value)
else if (VARATT_IS_EXTERNAL(attr))
{
/*
- * an uncompressed external attribute has rawsize including the
- * header (not too consistent!)
+ * an uncompressed external attribute has rawsize including the header
+ * (not too consistent!)
*/
result = attr->va_content.va_external.va_rawsize;
}
@@ -274,26 +272,26 @@ toast_raw_datum_size(Datum value)
* Return the physical storage size (possibly compressed) of a varlena datum
* ----------
*/
-Size
+Size
toast_datum_size(Datum value)
{
- varattrib *attr = (varattrib *) DatumGetPointer(value);
+ varattrib *attr = (varattrib *) DatumGetPointer(value);
Size result;
if (VARATT_IS_EXTERNAL(attr))
{
/*
* Attribute is stored externally - return the extsize whether
- * compressed or not. We do not count the size of the toast
- * pointer ... should we?
+ * compressed or not. We do not count the size of the toast pointer
+ * ... should we?
*/
result = attr->va_content.va_external.va_extsize;
}
else
{
/*
- * Attribute is stored inline either compressed or not, just
- * calculate the size of the datum in either case.
+ * Attribute is stored inline either compressed or not, just calculate
+ * the size of the datum in either case.
*/
result = VARSIZE(attr);
}
@@ -321,12 +319,12 @@ toast_delete(Relation rel, HeapTuple oldtup)
* Get the tuple descriptor and break down the tuple into fields.
*
* NOTE: it's debatable whether to use heap_deformtuple() here or just
- * heap_getattr() only the varlena columns. The latter could win if
- * there are few varlena columns and many non-varlena ones. However,
- * heap_deformtuple costs only O(N) while the heap_getattr way would
- * cost O(N^2) if there are many varlena columns, so it seems better
- * to err on the side of linear cost. (We won't even be here unless
- * there's at least one varlena column, by the way.)
+ * heap_getattr() only the varlena columns. The latter could win if there
+ * are few varlena columns and many non-varlena ones. However,
+ * heap_deformtuple costs only O(N) while the heap_getattr way would cost
+ * O(N^2) if there are many varlena columns, so it seems better to err on
+ * the side of linear cost. (We won't even be here unless there's at
+ * least one varlena column, by the way.)
*/
tupleDesc = rel->rd_att;
att = tupleDesc->attrs;
@@ -336,8 +334,8 @@ toast_delete(Relation rel, HeapTuple oldtup)
heap_deform_tuple(oldtup, tupleDesc, toast_values, toast_isnull);
/*
- * Check for external stored attributes and delete them from the
- * secondary relation.
+ * Check for external stored attributes and delete them from the secondary
+ * relation.
*/
for (i = 0; i < numAttrs; i++)
{
@@ -447,9 +445,9 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
else
{
/*
- * This attribute isn't changed by this update so we
- * reuse the original reference to the old value in
- * the new tuple.
+ * This attribute isn't changed by this update so we reuse
+ * the original reference to the old value in the new
+ * tuple.
*/
toast_action[i] = 'p';
toast_sizes[i] = VARATT_SIZE(toast_values[i]);
@@ -582,16 +580,15 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
else
{
/*
- * incompressible data, ignore on subsequent compression
- * passes
+ * incompressible data, ignore on subsequent compression passes
*/
toast_action[i] = 'x';
}
}
/*
- * Second we look for attributes of attstorage 'x' or 'e' that are
- * still inline.
+ * Second we look for attributes of attstorage 'x' or 'e' that are still
+ * inline.
*/
while (MAXALIGN(heap_compute_data_size(tupleDesc,
toast_values, toast_isnull)) >
@@ -696,8 +693,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
else
{
/*
- * incompressible data, ignore on subsequent compression
- * passes
+ * incompressible data, ignore on subsequent compression passes
*/
toast_action[i] = 'x';
}
@@ -755,8 +751,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
}
/*
- * In the case we toasted any values, we need to build a new heap
- * tuple with the changed values.
+ * In the case we toasted any values, we need to build a new heap tuple
+ * with the changed values.
*/
if (need_change)
{
@@ -798,8 +794,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
has_nulls ? newtup->t_data->t_bits : NULL);
/*
- * In the case we modified a previously modified tuple again, free
- * the memory from the previous run
+ * In the case we modified a previously modified tuple again, free the
+ * memory from the previous run
*/
if ((char *) olddata != ((char *) newtup + HEAPTUPLESIZE))
pfree(olddata);
@@ -906,8 +902,8 @@ toast_flatten_tuple_attribute(Datum value,
return value;
/*
- * Calculate the new size of the tuple. Header size should not
- * change, but data size might.
+ * Calculate the new size of the tuple. Header size should not change,
+ * but data size might.
*/
new_len = offsetof(HeapTupleHeaderData, t_bits);
if (has_nulls)
@@ -1007,9 +1003,9 @@ toast_save_datum(Relation rel, Datum value)
int32 data_todo;
/*
- * Open the toast relation and its index. We can use the index to
- * check uniqueness of the OID we assign to the toasted item, even
- * though it has additional columns besides OID.
+ * Open the toast relation and its index. We can use the index to check
+ * uniqueness of the OID we assign to the toasted item, even though it has
+ * additional columns besides OID.
*/
toastrel = heap_open(rel->rd_rel->reltoastrelid, RowExclusiveLock);
toasttupDesc = toastrel->rd_att;
@@ -1082,11 +1078,11 @@ toast_save_datum(Relation rel, Datum value)
/*
* Create the index entry. We cheat a little here by not using
- * FormIndexDatum: this relies on the knowledge that the index
- * columns are the same as the initial columns of the table.
+ * FormIndexDatum: this relies on the knowledge that the index columns
+ * are the same as the initial columns of the table.
*
- * Note also that there had better not be any user-created index on
- * the TOAST table, since we don't bother to update anything else.
+ * Note also that there had better not be any user-created index on the
+ * TOAST table, since we don't bother to update anything else.
*/
index_insert(toastidx, t_values, t_isnull,
&(toasttup->t_self),
@@ -1148,7 +1144,7 @@ toast_delete_datum(Relation rel, Datum value)
ScanKeyInit(&toastkey,
(AttrNumber) 1,
BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
+ ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
/*
* Find the chunks by index
@@ -1219,14 +1215,14 @@ toast_fetch_datum(varattrib *attr)
ScanKeyInit(&toastkey,
(AttrNumber) 1,
BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
+ ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
/*
* Read the chunks by index
*
- * Note that because the index is actually on (valueid, chunkidx) we will
- * see the chunks in chunkidx order, even though we didn't explicitly
- * ask for it.
+ * Note that because the index is actually on (valueid, chunkidx) we will see
+ * the chunks in chunkidx order, even though we didn't explicitly ask for
+ * it.
*/
nextidx = 0;
@@ -1367,13 +1363,13 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length)
toastidx = index_open(toastrel->rd_rel->reltoastidxid);
/*
- * Setup a scan key to fetch from the index. This is either two keys
- * or three depending on the number of chunks.
+ * Setup a scan key to fetch from the index. This is either two keys or
+ * three depending on the number of chunks.
*/
ScanKeyInit(&toastkey[0],
(AttrNumber) 1,
BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
+ ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
/*
* Use equality condition for one chunk, a range condition otherwise:
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index 90e910f343f..ed604f9c5dc 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.48 2005/05/27 23:31:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.49 2005/10/15 02:49:09 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
@@ -78,15 +78,15 @@ RelationGetIndexScan(Relation indexRelation,
scan->numberOfKeys = nkeys;
/*
- * We allocate the key space here, but the AM is responsible for
- * actually filling it from the passed key array.
+ * We allocate the key space here, but the AM is responsible for actually
+ * filling it from the passed key array.
*/
if (nkeys > 0)
scan->keyData = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
else
scan->keyData = NULL;
- scan->is_multiscan = false; /* caller may change this */
+ scan->is_multiscan = false; /* caller may change this */
scan->kill_prior_tuple = false;
scan->ignore_killed_tuples = true; /* default setting */
scan->keys_are_unique = false; /* may be set by index AM */
@@ -203,8 +203,8 @@ systable_beginscan(Relation heapRelation,
/*
* Change attribute numbers to be index column numbers.
*
- * This code could be generalized to search for the index key numbers
- * to substitute, but for now there's no need.
+ * This code could be generalized to search for the index key numbers to
+ * substitute, but for now there's no need.
*/
for (i = 0; i < nkeys; i++)
{
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 7bf7fcd22f0..bd2e3bdd06e 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.85 2005/10/06 02:29:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.86 2005/10/15 02:49:09 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relation OID
@@ -111,7 +111,7 @@ do { \
} while(0)
static IndexScanDesc index_beginscan_internal(Relation indexRelation,
- int nkeys, ScanKey key);
+ int nkeys, ScanKey key);
/* ----------------------------------------------------------------
@@ -122,14 +122,14 @@ static IndexScanDesc index_beginscan_internal(Relation indexRelation,
/* ----------------
* index_open - open an index relation by relation OID
*
- * Note: we acquire no lock on the index. A lock is not needed when
+ * Note: we acquire no lock on the index. A lock is not needed when
* simply examining the index reldesc; the index's schema information
* is considered to be protected by the lock that the caller had better
- * be holding on the parent relation. Some type of lock should be
+ * be holding on the parent relation. Some type of lock should be
* obtained on the index before physically accessing it, however.
* This is handled automatically for most uses by index_beginscan
* and index_endscan for scan cases, or by ExecOpenIndices and
- * ExecCloseIndices for update cases. Other callers will need to
+ * ExecCloseIndices for update cases. Other callers will need to
* obtain their own locks.
*
* This is a convenience routine adapted for indexscan use.
@@ -241,8 +241,8 @@ index_beginscan(Relation heapRelation,
scan = index_beginscan_internal(indexRelation, nkeys, key);
/*
- * Save additional parameters into the scandesc. Everything else was
- * set up by RelationGetIndexScan.
+ * Save additional parameters into the scandesc. Everything else was set
+ * up by RelationGetIndexScan.
*/
scan->is_multiscan = false;
scan->heapRelation = heapRelation;
@@ -267,8 +267,8 @@ index_beginscan_multi(Relation indexRelation,
scan = index_beginscan_internal(indexRelation, nkeys, key);
/*
- * Save additional parameters into the scandesc. Everything else was
- * set up by RelationGetIndexScan.
+ * Save additional parameters into the scandesc. Everything else was set
+ * up by RelationGetIndexScan.
*/
scan->is_multiscan = true;
scan->xs_snapshot = snapshot;
@@ -294,14 +294,14 @@ index_beginscan_internal(Relation indexRelation,
* Acquire AccessShareLock for the duration of the scan
*
* Note: we could get an SI inval message here and consequently have to
- * rebuild the relcache entry. The refcount increment above ensures
- * that we will rebuild it and not just flush it...
+ * rebuild the relcache entry. The refcount increment above ensures that
+ * we will rebuild it and not just flush it...
*/
LockRelation(indexRelation, AccessShareLock);
/*
- * LockRelation can clean rd_aminfo structure, so fill procedure
- * after LockRelation
+ * LockRelation can clean rd_aminfo structure, so fill procedure after
+ * LockRelation
*/
GET_REL_PROCEDURE(ambeginscan);
@@ -425,8 +425,8 @@ index_restrpos(IndexScanDesc scan)
/*
* We do not reset got_tuple; so if the scan is actually being
- * short-circuited by index_getnext, the effective position
- * restoration is done by restoring unique_tuple_pos.
+ * short-circuited by index_getnext, the effective position restoration is
+ * done by restoring unique_tuple_pos.
*/
scan->unique_tuple_pos = scan->unique_tuple_mark;
@@ -454,19 +454,19 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
/*
* If we already got a tuple and it must be unique, there's no need to
- * make the index AM look through any additional tuples. (This can
- * save a useful amount of work in scenarios where there are many dead
- * tuples due to heavy update activity.)
+ * make the index AM look through any additional tuples. (This can save a
+ * useful amount of work in scenarios where there are many dead tuples due
+ * to heavy update activity.)
*
* To do this we must keep track of the logical scan position
* (before/on/after tuple). Also, we have to be sure to release scan
- * resources before returning NULL; if we fail to do so then a
- * multi-index scan can easily run the system out of free buffers. We
- * can release index-level resources fairly cheaply by calling
- * index_rescan. This means there are two persistent states as far as
- * the index AM is concerned: on-tuple and rescanned. If we are
- * actually asked to re-fetch the single tuple, we have to go through
- * a fresh indexscan startup, which penalizes that (infrequent) case.
+ * resources before returning NULL; if we fail to do so then a multi-index
+ * scan can easily run the system out of free buffers. We can release
+ * index-level resources fairly cheaply by calling index_rescan. This
+ * means there are two persistent states as far as the index AM is
+ * concerned: on-tuple and rescanned. If we are actually asked to
+ * re-fetch the single tuple, we have to go through a fresh indexscan
+ * startup, which penalizes that (infrequent) case.
*/
if (scan->keys_are_unique && scan->got_tuple)
{
@@ -485,19 +485,18 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
if (new_tuple_pos == 0)
{
/*
- * We are moving onto the unique tuple from having been off
- * it. We just fall through and let the index AM do the work.
- * Note we should get the right answer regardless of scan
- * direction.
+ * We are moving onto the unique tuple from having been off it. We
+ * just fall through and let the index AM do the work. Note we
+ * should get the right answer regardless of scan direction.
*/
scan->unique_tuple_pos = 0; /* need to update position */
}
else
{
/*
- * Moving off the tuple; must do amrescan to release
- * index-level pins before we return NULL. Since index_rescan
- * will reset my state, must save and restore...
+ * Moving off the tuple; must do amrescan to release index-level
+ * pins before we return NULL. Since index_rescan will reset my
+ * state, must save and restore...
*/
int unique_tuple_mark = scan->unique_tuple_mark;
@@ -520,8 +519,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
bool found;
/*
- * The AM's gettuple proc finds the next tuple matching the scan
- * keys.
+ * The AM's gettuple proc finds the next tuple matching the scan keys.
*/
found = DatumGetBool(FunctionCall2(procedure,
PointerGetDatum(scan),
@@ -556,9 +554,9 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
continue;
/*
- * If we can't see it, maybe no one else can either. Check to see
- * if the tuple is dead to all transactions. If so, signal the
- * index AM to not return it on future indexscans.
+ * If we can't see it, maybe no one else can either. Check to see if
+ * the tuple is dead to all transactions. If so, signal the index AM
+ * to not return it on future indexscans.
*
* We told heap_release_fetch to keep a pin on the buffer, so we can
* re-access the tuple here. But we must re-lock the buffer first.
@@ -576,8 +574,8 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
scan->got_tuple = true;
/*
- * If we just fetched a known-unique tuple, then subsequent calls will
- * go through the short-circuit code above. unique_tuple_pos has been
+ * If we just fetched a known-unique tuple, then subsequent calls will go
+ * through the short-circuit code above. unique_tuple_pos has been
* initialized to 0, which is the correct state ("on row").
*/
@@ -805,11 +803,10 @@ index_getprocinfo(Relation irel,
procId = loc[procindex];
/*
- * Complain if function was not found during
- * IndexSupportInitialize. This should not happen unless the
- * system tables contain bogus entries for the index opclass. (If
- * an AM wants to allow a support function to be optional, it can
- * use index_getprocid.)
+ * Complain if function was not found during IndexSupportInitialize.
+ * This should not happen unless the system tables contain bogus
+ * entries for the index opclass. (If an AM wants to allow a support
+ * function to be optional, it can use index_getprocid.)
*/
if (!RegProcedureIsValid(procId))
elog(ERROR, "missing support function %d for attribute %d of index \"%s\"",
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index c73ba358ec1..33c7612aac5 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.126 2005/10/12 17:18:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.127 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,30 +93,29 @@ top:
/*
* If the page was split between the time that we surrendered our read
- * lock and acquired our write lock, then this page may no longer be
- * the right place for the key we want to insert. In this case, we
- * need to move right in the tree. See Lehman and Yao for an
- * excruciatingly precise description.
+ * lock and acquired our write lock, then this page may no longer be the
+ * right place for the key we want to insert. In this case, we need to
+ * move right in the tree. See Lehman and Yao for an excruciatingly
+ * precise description.
*/
buf = _bt_moveright(rel, buf, natts, itup_scankey, false, BT_WRITE);
/*
- * If we're not allowing duplicates, make sure the key isn't already
- * in the index.
+ * If we're not allowing duplicates, make sure the key isn't already in
+ * the index.
*
- * NOTE: obviously, _bt_check_unique can only detect keys that are
- * already in the index; so it cannot defend against concurrent
- * insertions of the same key. We protect against that by means of
- * holding a write lock on the target page. Any other would-be
- * inserter of the same key must acquire a write lock on the same
- * target page, so only one would-be inserter can be making the check
- * at one time. Furthermore, once we are past the check we hold write
- * locks continuously until we have performed our insertion, so no
- * later inserter can fail to see our insertion. (This requires some
- * care in _bt_insertonpg.)
+ * NOTE: obviously, _bt_check_unique can only detect keys that are already in
+ * the index; so it cannot defend against concurrent insertions of the
+ * same key. We protect against that by means of holding a write lock on
+ * the target page. Any other would-be inserter of the same key must
+ * acquire a write lock on the same target page, so only one would-be
+ * inserter can be making the check at one time. Furthermore, once we are
+ * past the check we hold write locks continuously until we have performed
+ * our insertion, so no later inserter can fail to see our insertion.
+ * (This requires some care in _bt_insertonpg.)
*
- * If we must wait for another xact, we release the lock while waiting,
- * and then must start over completely.
+ * If we must wait for another xact, we release the lock while waiting, and
+ * then must start over completely.
*/
if (index_is_unique)
{
@@ -167,8 +166,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
maxoff = PageGetMaxOffsetNumber(page);
/*
- * Find first item >= proposed new item. Note we could also get a
- * pointer to end-of-page here.
+ * Find first item >= proposed new item. Note we could also get a pointer
+ * to end-of-page here.
*/
offset = _bt_binsrch(rel, buf, natts, itup_scankey, false);
@@ -194,24 +193,24 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
/*
* We can skip items that are marked killed.
*
- * Formerly, we applied _bt_isequal() before checking the kill
- * flag, so as to fall out of the item loop as soon as
- * possible. However, in the presence of heavy update activity
- * an index may contain many killed items with the same key;
- * running _bt_isequal() on each killed item gets expensive.
- * Furthermore it is likely that the non-killed version of
- * each key appears first, so that we didn't actually get to
- * exit any sooner anyway. So now we just advance over killed
- * items as quickly as we can. We only apply _bt_isequal()
- * when we get to a non-killed item or the end of the page.
+ * Formerly, we applied _bt_isequal() before checking the kill flag,
+ * so as to fall out of the item loop as soon as possible.
+ * However, in the presence of heavy update activity an index may
+ * contain many killed items with the same key; running
+ * _bt_isequal() on each killed item gets expensive. Furthermore
+ * it is likely that the non-killed version of each key appears
+ * first, so that we didn't actually get to exit any sooner
+ * anyway. So now we just advance over killed items as quickly as
+ * we can. We only apply _bt_isequal() when we get to a non-killed
+ * item or the end of the page.
*/
if (!ItemIdDeleted(curitemid))
{
/*
- * _bt_compare returns 0 for (1,NULL) and (1,NULL) -
- * this's how we handling NULLs - and so we must not use
- * _bt_compare in real comparison, but only for
- * ordering/finding items on pages. - vadim 03/24/97
+ * _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
+ * how we handling NULLs - and so we must not use _bt_compare
+ * in real comparison, but only for ordering/finding items on
+ * pages. - vadim 03/24/97
*/
if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
break; /* we're past all the equal tuples */
@@ -246,15 +245,15 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
*/
ereport(ERROR,
(errcode(ERRCODE_UNIQUE_VIOLATION),
- errmsg("duplicate key violates unique constraint \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("duplicate key violates unique constraint \"%s\"",
+ RelationGetRelationName(rel))));
}
else if (htup.t_data != NULL)
{
/*
- * Hmm, if we can't see the tuple, maybe it can be
- * marked killed. This logic should match
- * index_getnext and btgettuple.
+ * Hmm, if we can't see the tuple, maybe it can be marked
+ * killed. This logic should match index_getnext and
+ * btgettuple.
*/
LockBuffer(hbuffer, BUFFER_LOCK_SHARE);
if (HeapTupleSatisfiesVacuum(htup.t_data, RecentGlobalXmin,
@@ -377,15 +376,15 @@ _bt_insertonpg(Relation rel,
itemsz = IndexTupleDSize(btitem->bti_itup)
+ (sizeof(BTItemData) - sizeof(IndexTupleData));
- itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but
- * we need to be consistent */
+ itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we
+ * need to be consistent */
/*
- * Check whether the item can fit on a btree page at all. (Eventually,
- * we ought to try to apply TOAST methods if not.) We actually need to
- * be able to fit three items on every page, so restrict any one item
- * to 1/3 the per-page available space. Note that at this point,
- * itemsz doesn't include the ItemId.
+ * Check whether the item can fit on a btree page at all. (Eventually, we
+ * ought to try to apply TOAST methods if not.) We actually need to be
+ * able to fit three items on every page, so restrict any one item to 1/3
+ * the per-page available space. Note that at this point, itemsz doesn't
+ * include the ItemId.
*/
if (itemsz > BTMaxItemSize(page))
ereport(ERROR,
@@ -393,9 +392,9 @@ _bt_insertonpg(Relation rel,
errmsg("index row size %lu exceeds btree maximum, %lu",
(unsigned long) itemsz,
(unsigned long) BTMaxItemSize(page)),
- errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
- "Consider a function index of an MD5 hash of the value, "
- "or use full text indexing.")));
+ errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
+ "Consider a function index of an MD5 hash of the value, "
+ "or use full text indexing.")));
/*
* Determine exactly where new item will go.
@@ -432,11 +431,11 @@ _bt_insertonpg(Relation rel,
/*
* step right to next non-dead page
*
- * must write-lock that page before releasing write lock on
- * current page; else someone else's _bt_check_unique scan
- * could fail to see our insertion. write locks on
- * intermediate dead pages won't do because we don't know when
- * they will get de-linked from the tree.
+ * must write-lock that page before releasing write lock on current
+ * page; else someone else's _bt_check_unique scan could fail to
+ * see our insertion. write locks on intermediate dead pages
+ * won't do because we don't know when they will get de-linked
+ * from the tree.
*/
Buffer rbuf = InvalidBuffer;
@@ -459,9 +458,9 @@ _bt_insertonpg(Relation rel,
}
/*
- * Now we are on the right page, so find the insert position. If
- * we moved right at all, we know we should insert at the start of
- * the page, else must find the position by searching.
+ * Now we are on the right page, so find the insert position. If we
+ * moved right at all, we know we should insert at the start of the
+ * page, else must find the position by searching.
*/
if (movedright)
newitemoff = P_FIRSTDATAKEY(lpageop);
@@ -472,9 +471,9 @@ _bt_insertonpg(Relation rel,
/*
* Do we need to split the page to fit the item on it?
*
- * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result,
- * so this comparison is correct even though we appear to be
- * accounting only for the item and not for its line pointer.
+ * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result, so
+ * this comparison is correct even though we appear to be accounting only
+ * for the item and not for its line pointer.
*/
if (PageGetFreeSpace(page) < itemsz)
{
@@ -522,12 +521,11 @@ _bt_insertonpg(Relation rel,
itup_blkno = BufferGetBlockNumber(buf);
/*
- * If we are doing this insert because we split a page that was
- * the only one on its tree level, but was not the root, it may
- * have been the "fast root". We need to ensure that the fast
- * root link points at or above the current page. We can safely
- * acquire a lock on the metapage here --- see comments for
- * _bt_newroot().
+ * If we are doing this insert because we split a page that was the
+ * only one on its tree level, but was not the root, it may have been
+ * the "fast root". We need to ensure that the fast root link points
+ * at or above the current page. We can safely acquire a lock on the
+ * metapage here --- see comments for _bt_newroot().
*/
if (split_only_page)
{
@@ -692,11 +690,11 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
lopaque->btpo.level = ropaque->btpo.level = oopaque->btpo.level;
/*
- * If the page we're splitting is not the rightmost page at its level
- * in the tree, then the first entry on the page is the high key for
- * the page. We need to copy that to the right half. Otherwise
- * (meaning the rightmost page case), all the items on the right half
- * will be user data.
+ * If the page we're splitting is not the rightmost page at its level in
+ * the tree, then the first entry on the page is the high key for the
+ * page. We need to copy that to the right half. Otherwise (meaning the
+ * rightmost page case), all the items on the right half will be user
+ * data.
*/
rightoff = P_HIKEY;
@@ -712,9 +710,9 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
}
/*
- * The "high key" for the new left page will be the first key that's
- * going to go into the new right page. This might be either the
- * existing data item at position firstright, or the incoming tuple.
+ * The "high key" for the new left page will be the first key that's going
+ * to go into the new right page. This might be either the existing data
+ * item at position firstright, or the incoming tuple.
*/
leftoff = P_HIKEY;
if (!newitemonleft && newitemoff == firstright)
@@ -806,8 +804,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
/*
* We have to grab the right sibling (if any) and fix the prev pointer
* there. We are guaranteed that this is deadlock-free since no other
- * writer will be holding a lock on that page and trying to move left,
- * and all readers release locks on a page before trying to fetch its
+ * writer will be holding a lock on that page and trying to move left, and
+ * all readers release locks on a page before trying to fetch its
* neighbors.
*/
@@ -821,8 +819,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
}
/*
- * Right sibling is locked, new siblings are prepared, but original
- * page is not updated yet. Log changes before continuing.
+ * Right sibling is locked, new siblings are prepared, but original page
+ * is not updated yet. Log changes before continuing.
*
* NO EREPORT(ERROR) till right sibling is updated.
*/
@@ -850,10 +848,10 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
xlrec.level = lopaque->btpo.level;
/*
- * Direct access to page is not good but faster - we should
- * implement some new func in page API. Note we only store the
- * tuples themselves, knowing that the item pointers are in the
- * same order and can be reconstructed by scanning the tuples.
+ * Direct access to page is not good but faster - we should implement
+ * some new func in page API. Note we only store the tuples
+ * themselves, knowing that the item pointers are in the same order
+ * and can be reconstructed by scanning the tuples.
*/
xlrec.leftlen = ((PageHeader) leftpage)->pd_special -
((PageHeader) leftpage)->pd_upper;
@@ -903,13 +901,13 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
}
/*
- * By here, the original data page has been split into two new halves,
- * and these are correct. The algorithm requires that the left page
- * never move during a split, so we copy the new left page back on top
- * of the original. Note that this is not a waste of time, since we
- * also require (in the page management code) that the center of a
- * page always be clean, and the most efficient way to guarantee this
- * is just to compact the data by reinserting it into a new left page.
+ * By here, the original data page has been split into two new halves, and
+ * these are correct. The algorithm requires that the left page never
+ * move during a split, so we copy the new left page back on top of the
+ * original. Note that this is not a waste of time, since we also require
+ * (in the page management code) that the center of a page always be
+ * clean, and the most efficient way to guarantee this is just to compact
+ * the data by reinserting it into a new left page.
*/
PageRestoreTempPage(leftpage, origpage);
@@ -984,13 +982,13 @@ _bt_findsplitloc(Relation rel,
MAXALIGN(sizeof(BTPageOpaqueData));
/*
- * Finding the best possible split would require checking all the
- * possible split points, because of the high-key and left-key special
- * cases. That's probably more work than it's worth; instead, stop as
- * soon as we find a "good-enough" split, where good-enough is defined
- * as an imbalance in free space of no more than pagesize/16
- * (arbitrary...) This should let us stop near the middle on most
- * pages, instead of plowing to the end.
+ * Finding the best possible split would require checking all the possible
+ * split points, because of the high-key and left-key special cases.
+ * That's probably more work than it's worth; instead, stop as soon as we
+ * find a "good-enough" split, where good-enough is defined as an
+ * imbalance in free space of no more than pagesize/16 (arbitrary...) This
+ * should let us stop near the middle on most pages, instead of plowing to
+ * the end.
*/
goodenough = leftspace / 16;
@@ -1006,8 +1004,8 @@ _bt_findsplitloc(Relation rel,
dataitemtotal = rightspace - (int) PageGetFreeSpace(page);
/*
- * Scan through the data items and calculate space usage for a split
- * at each possible position.
+ * Scan through the data items and calculate space usage for a split at
+ * each possible position.
*/
dataitemstoleft = 0;
maxoff = PageGetMaxOffsetNumber(page);
@@ -1024,9 +1022,9 @@ _bt_findsplitloc(Relation rel,
itemsz = MAXALIGN(ItemIdGetLength(itemid)) + sizeof(ItemIdData);
/*
- * We have to allow for the current item becoming the high key of
- * the left page; therefore it counts against left space as well
- * as right space.
+ * We have to allow for the current item becoming the high key of the
+ * left page; therefore it counts against left space as well as right
+ * space.
*/
leftfree = leftspace - dataitemstoleft - (int) itemsz;
rightfree = rightspace - (dataitemtotal - dataitemstoleft);
@@ -1058,8 +1056,8 @@ _bt_findsplitloc(Relation rel,
}
/*
- * I believe it is not possible to fail to find a feasible split, but
- * just in case ...
+ * I believe it is not possible to fail to find a feasible split, but just
+ * in case ...
*/
if (!state.have_split)
elog(ERROR, "could not find a feasible split point for \"%s\"",
@@ -1105,8 +1103,7 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
{
/*
* On a rightmost page, try to equalize right free space with
- * twice the left free space. See comments for
- * _bt_findsplitloc.
+ * twice the left free space. See comments for _bt_findsplitloc.
*/
delta = (2 * leftfree) - rightfree;
}
@@ -1153,19 +1150,18 @@ _bt_insert_parent(Relation rel,
bool is_only)
{
/*
- * Here we have to do something Lehman and Yao don't talk about: deal
- * with a root split and construction of a new root. If our stack is
- * empty then we have just split a node on what had been the root
- * level when we descended the tree. If it was still the root then we
- * perform a new-root construction. If it *wasn't* the root anymore,
- * search to find the next higher level that someone constructed
- * meanwhile, and find the right place to insert as for the normal
- * case.
+ * Here we have to do something Lehman and Yao don't talk about: deal with
+ * a root split and construction of a new root. If our stack is empty
+ * then we have just split a node on what had been the root level when we
+ * descended the tree. If it was still the root then we perform a
+ * new-root construction. If it *wasn't* the root anymore, search to find
+ * the next higher level that someone constructed meanwhile, and find the
+ * right place to insert as for the normal case.
*
- * If we have to search for the parent level, we do so by re-descending
- * from the root. This is not super-efficient, but it's rare enough
- * not to matter. (This path is also taken when called from WAL
- * recovery --- we have no stack in that case.)
+ * If we have to search for the parent level, we do so by re-descending from
+ * the root. This is not super-efficient, but it's rare enough not to
+ * matter. (This path is also taken when called from WAL recovery --- we
+ * have no stack in that case.)
*/
if (is_root)
{
@@ -1219,9 +1215,9 @@ _bt_insert_parent(Relation rel,
/*
* Find the parent buffer and get the parent page.
*
- * Oops - if we were moved right then we need to change stack item!
- * We want to find parent pointing to where we are, right ? -
- * vadim 05/27/97
+ * Oops - if we were moved right then we need to change stack item! We
+ * want to find parent pointing to where we are, right ? - vadim
+ * 05/27/97
*/
ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
bknum, P_HIKEY);
@@ -1291,9 +1287,9 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
maxoff = PageGetMaxOffsetNumber(page);
/*
- * start = InvalidOffsetNumber means "search the whole page".
- * We need this test anyway due to possibility that page has a
- * high key now when it didn't before.
+ * start = InvalidOffsetNumber means "search the whole page". We
+ * need this test anyway due to possibility that page has a high
+ * key now when it didn't before.
*/
if (start < minoff)
start = minoff;
@@ -1307,8 +1303,8 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
/*
* These loops will check every item on the page --- but in an
- * order that's attuned to the probability of where it
- * actually is. Scan to the right first, then to the left.
+ * order that's attuned to the probability of where it actually
+ * is. Scan to the right first, then to the left.
*/
for (offnum = start;
offnum <= maxoff;
@@ -1424,9 +1420,9 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
metad->btm_fastlevel = rootopaque->btpo.level;
/*
- * Create downlink item for left page (old root). Since this will be
- * the first item in a non-leaf page, it implicitly has minus-infinity
- * key value, so we need not store any actual key in it.
+ * Create downlink item for left page (old root). Since this will be the
+ * first item in a non-leaf page, it implicitly has minus-infinity key
+ * value, so we need not store any actual key in it.
*/
itemsz = sizeof(BTItemData);
new_item = (BTItem) palloc(itemsz);
@@ -1434,17 +1430,17 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
ItemPointerSet(&(new_item->bti_itup.t_tid), lbkno, P_HIKEY);
/*
- * Insert the left page pointer into the new root page. The root page
- * is the rightmost page on its level so there is no "high key" in it;
- * the two items will go into positions P_HIKEY and P_FIRSTKEY.
+ * Insert the left page pointer into the new root page. The root page is
+ * the rightmost page on its level so there is no "high key" in it; the
+ * two items will go into positions P_HIKEY and P_FIRSTKEY.
*/
if (PageAddItem(rootpage, (Item) new_item, itemsz, P_HIKEY, LP_USED) == InvalidOffsetNumber)
elog(PANIC, "failed to add leftkey to new root page");
pfree(new_item);
/*
- * Create downlink item for right page. The key for it is obtained
- * from the "high key" position in the left page.
+ * Create downlink item for right page. The key for it is obtained from
+ * the "high key" position in the left page.
*/
itemid = PageGetItemId(lpage, P_HIKEY);
itemsz = ItemIdGetLength(itemid);
@@ -1476,8 +1472,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
rdata[0].next = &(rdata[1]);
/*
- * Direct access to page is not good but faster - we should
- * implement some new func in page API.
+ * Direct access to page is not good but faster - we should implement
+ * some new func in page API.
*/
rdata[1].data = (char *) rootpage + ((PageHeader) rootpage)->pd_upper;
rdata[1].len = ((PageHeader) rootpage)->pd_special -
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 52d60abaec0..927860030c8 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.87 2005/08/12 14:34:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.88 2005/10/15 02:49:09 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -115,8 +115,8 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
metaopaque->btpo_flags = BTP_META;
/*
- * Set pd_lower just past the end of the metadata. This is not
- * essential but it makes the page look compressible to xlog.c.
+ * Set pd_lower just past the end of the metadata. This is not essential
+ * but it makes the page look compressible to xlog.c.
*/
((PageHeader) page)->pd_lower =
((char *) metad + sizeof(BTMetaPageData)) - (char *) page;
@@ -198,26 +198,26 @@ _bt_getroot(Relation rel, int access)
LockBuffer(metabuf, BT_WRITE);
/*
- * Race condition: if someone else initialized the metadata
- * between the time we released the read lock and acquired the
- * write lock, we must avoid doing it again.
+ * Race condition: if someone else initialized the metadata between
+ * the time we released the read lock and acquired the write lock, we
+ * must avoid doing it again.
*/
if (metad->btm_root != P_NONE)
{
/*
- * Metadata initialized by someone else. In order to
- * guarantee no deadlocks, we have to release the metadata
- * page and start all over again. (Is that really true? But
- * it's hardly worth trying to optimize this case.)
+ * Metadata initialized by someone else. In order to guarantee no
+ * deadlocks, we have to release the metadata page and start all
+ * over again. (Is that really true? But it's hardly worth trying
+ * to optimize this case.)
*/
_bt_relbuf(rel, metabuf);
return _bt_getroot(rel, access);
}
/*
- * Get, initialize, write, and leave a lock of the appropriate
- * type on the new root page. Since this is the first page in the
- * tree, it's a leaf as well as the root.
+ * Get, initialize, write, and leave a lock of the appropriate type on
+ * the new root page. Since this is the first page in the tree, it's
+ * a leaf as well as the root.
*/
rootbuf = _bt_getbuf(rel, P_NEW, BT_WRITE);
rootblkno = BufferGetBlockNumber(rootbuf);
@@ -266,9 +266,9 @@ _bt_getroot(Relation rel, int access)
_bt_wrtnorelbuf(rel, rootbuf);
/*
- * swap root write lock for read lock. There is no danger of
- * anyone else accessing the new root page while it's unlocked,
- * since no one else knows where it is yet.
+ * swap root write lock for read lock. There is no danger of anyone
+ * else accessing the new root page while it's unlocked, since no one
+ * else knows where it is yet.
*/
LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
LockBuffer(rootbuf, BT_READ);
@@ -312,8 +312,8 @@ _bt_getroot(Relation rel, int access)
}
/*
- * By here, we have a pin and read lock on the root page, and no lock
- * set on the metadata page. Return the root page's buffer.
+ * By here, we have a pin and read lock on the root page, and no lock set
+ * on the metadata page. Return the root page's buffer.
*/
return rootbuf;
}
@@ -435,27 +435,26 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
/*
* First see if the FSM knows of any free pages.
*
- * We can't trust the FSM's report unreservedly; we have to check
- * that the page is still free. (For example, an already-free
- * page could have been re-used between the time the last VACUUM
- * scanned it and the time the VACUUM made its FSM updates.)
+ * We can't trust the FSM's report unreservedly; we have to check that
+ * the page is still free. (For example, an already-free page could
+ * have been re-used between the time the last VACUUM scanned it and
+ * the time the VACUUM made its FSM updates.)
*
- * In fact, it's worse than that: we can't even assume that it's safe
- * to take a lock on the reported page. If somebody else has a
- * lock on it, or even worse our own caller does, we could
- * deadlock. (The own-caller scenario is actually not improbable.
- * Consider an index on a serial or timestamp column. Nearly all
- * splits will be at the rightmost page, so it's entirely likely
- * that _bt_split will call us while holding a lock on the page
- * most recently acquired from FSM. A VACUUM running concurrently
- * with the previous split could well have placed that page back
- * in FSM.)
+ * In fact, it's worse than that: we can't even assume that it's safe to
+ * take a lock on the reported page. If somebody else has a lock on
+ * it, or even worse our own caller does, we could deadlock. (The
+ * own-caller scenario is actually not improbable. Consider an index
+ * on a serial or timestamp column. Nearly all splits will be at the
+ * rightmost page, so it's entirely likely that _bt_split will call us
+ * while holding a lock on the page most recently acquired from FSM.
+ * A VACUUM running concurrently with the previous split could well
+ * have placed that page back in FSM.)
*
- * To get around that, we ask for only a conditional lock on the
- * reported page. If we fail, then someone else is using the
- * page, and we may reasonably assume it's not free. (If we
- * happen to be wrong, the worst consequence is the page will be
- * lost to use till the next VACUUM, which is no big problem.)
+ * To get around that, we ask for only a conditional lock on the reported
+ * page. If we fail, then someone else is using the page, and we may
+ * reasonably assume it's not free. (If we happen to be wrong, the
+ * worst consequence is the page will be lost to use till the next
+ * VACUUM, which is no big problem.)
*/
for (;;)
{
@@ -486,10 +485,10 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
/*
* Extend the relation by one page.
*
- * We have to use a lock to ensure no one else is extending the rel
- * at the same time, else we will both try to initialize the same
- * new page. We can skip locking for new or temp relations,
- * however, since no one else could be accessing them.
+ * We have to use a lock to ensure no one else is extending the rel at
+ * the same time, else we will both try to initialize the same new
+ * page. We can skip locking for new or temp relations, however,
+ * since no one else could be accessing them.
*/
needLock = !RELATION_IS_LOCAL(rel);
@@ -504,8 +503,8 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
/*
* Release the file-extension lock; it's now OK for someone else to
* extend the relation some more. Note that we cannot release this
- * lock before we have buffer lock on the new page, or we risk a
- * race condition against btvacuumcleanup --- see comments therein.
+ * lock before we have buffer lock on the new page, or we risk a race
+ * condition against btvacuumcleanup --- see comments therein.
*/
if (needLock)
UnlockRelationForExtension(rel, ExclusiveLock);
@@ -614,10 +613,10 @@ _bt_page_recyclable(Page page)
BTPageOpaque opaque;
/*
- * It's possible to find an all-zeroes page in an index --- for
- * example, a backend might successfully extend the relation one page
- * and then crash before it is able to make a WAL entry for adding the
- * page. If we find a zeroed page then reclaim it.
+ * It's possible to find an all-zeroes page in an index --- for example, a
+ * backend might successfully extend the relation one page and then crash
+ * before it is able to make a WAL entry for adding the page. If we find a
+ * zeroed page then reclaim it.
*/
if (PageIsNew(page))
return true;
@@ -672,9 +671,9 @@ _bt_delitems(Relation rel, Buffer buf,
rdata[0].next = &(rdata[1]);
/*
- * The target-offsets array is not in the buffer, but pretend that
- * it is. When XLogInsert stores the whole buffer, the offsets
- * array need not be stored too.
+ * The target-offsets array is not in the buffer, but pretend that it
+ * is. When XLogInsert stores the whole buffer, the offsets array
+ * need not be stored too.
*/
if (nitems > 0)
{
@@ -747,8 +746,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
BTPageOpaque opaque;
/*
- * We can never delete rightmost pages nor root pages. While at it,
- * check that page is not already deleted and is empty.
+ * We can never delete rightmost pages nor root pages. While at it, check
+ * that page is not already deleted and is empty.
*/
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -760,8 +759,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
}
/*
- * Save info about page, including a copy of its high key (it must
- * have one, being non-rightmost).
+ * Save info about page, including a copy of its high key (it must have
+ * one, being non-rightmost).
*/
target = BufferGetBlockNumber(buf);
targetlevel = opaque->btpo.level;
@@ -770,11 +769,11 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
targetkey = CopyBTItem((BTItem) PageGetItem(page, itemid));
/*
- * We need to get an approximate pointer to the page's parent page.
- * Use the standard search mechanism to search for the page's high
- * key; this will give us a link to either the current parent or
- * someplace to its left (if there are multiple equal high keys). To
- * avoid deadlocks, we'd better drop the target page lock first.
+ * We need to get an approximate pointer to the page's parent page. Use
+ * the standard search mechanism to search for the page's high key; this
+ * will give us a link to either the current parent or someplace to its
+ * left (if there are multiple equal high keys). To avoid deadlocks, we'd
+ * better drop the target page lock first.
*/
_bt_relbuf(rel, buf);
/* we need a scan key to do our search, so build one */
@@ -786,9 +785,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
_bt_relbuf(rel, lbuf);
/*
- * If we are trying to delete an interior page, _bt_search did more
- * than we needed. Locate the stack item pointing to our parent
- * level.
+ * If we are trying to delete an interior page, _bt_search did more than
+ * we needed. Locate the stack item pointing to our parent level.
*/
ilevel = 0;
for (;;)
@@ -803,16 +801,15 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
/*
* We have to lock the pages we need to modify in the standard order:
- * moving right, then up. Else we will deadlock against other
- * writers.
+ * moving right, then up. Else we will deadlock against other writers.
*
- * So, we need to find and write-lock the current left sibling of the
- * target page. The sibling that was current a moment ago could have
- * split, so we may have to move right. This search could fail if
- * either the sibling or the target page was deleted by someone else
- * meanwhile; if so, give up. (Right now, that should never happen,
- * since page deletion is only done in VACUUM and there shouldn't be
- * multiple VACUUMs concurrently on the same table.)
+ * So, we need to find and write-lock the current left sibling of the target
+ * page. The sibling that was current a moment ago could have split, so
+ * we may have to move right. This search could fail if either the
+ * sibling or the target page was deleted by someone else meanwhile; if
+ * so, give up. (Right now, that should never happen, since page deletion
+ * is only done in VACUUM and there shouldn't be multiple VACUUMs
+ * concurrently on the same table.)
*/
if (leftsib != P_NONE)
{
@@ -839,19 +836,18 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
lbuf = InvalidBuffer;
/*
- * Next write-lock the target page itself. It should be okay to take
- * just a write lock not a superexclusive lock, since no scans would
- * stop on an empty page.
+ * Next write-lock the target page itself. It should be okay to take just
+ * a write lock not a superexclusive lock, since no scans would stop on an
+ * empty page.
*/
buf = _bt_getbuf(rel, target, BT_WRITE);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
- * Check page is still empty etc, else abandon deletion. The empty
- * check is necessary since someone else might have inserted into it
- * while we didn't have it locked; the others are just for paranoia's
- * sake.
+ * Check page is still empty etc, else abandon deletion. The empty check
+ * is necessary since someone else might have inserted into it while we
+ * didn't have it locked; the others are just for paranoia's sake.
*/
if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) ||
P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
@@ -872,9 +868,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
rbuf = _bt_getbuf(rel, rightsib, BT_WRITE);
/*
- * Next find and write-lock the current parent of the target page.
- * This is essentially the same as the corresponding step of
- * splitting.
+ * Next find and write-lock the current parent of the target page. This is
+ * essentially the same as the corresponding step of splitting.
*/
ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
target, P_HIKEY);
@@ -887,8 +882,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
/*
* If the target is the rightmost child of its parent, then we can't
- * delete, unless it's also the only child --- in which case the
- * parent changes to half-dead status.
+ * delete, unless it's also the only child --- in which case the parent
+ * changes to half-dead status.
*/
page = BufferGetPage(pbuf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -917,11 +912,10 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
}
/*
- * If we are deleting the next-to-last page on the target's level,
- * then the rightsib is a candidate to become the new fast root. (In
- * theory, it might be possible to push the fast root even further
- * down, but the odds of doing so are slim, and the locking
- * considerations daunting.)
+ * If we are deleting the next-to-last page on the target's level, then
+ * the rightsib is a candidate to become the new fast root. (In theory, it
+ * might be possible to push the fast root even further down, but the odds
+ * of doing so are slim, and the locking considerations daunting.)
*
* We can safely acquire a lock on the metapage here --- see comments for
* _bt_newroot().
@@ -939,9 +933,9 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
metad = BTPageGetMeta(metapg);
/*
- * The expected case here is btm_fastlevel == targetlevel+1;
- * if the fastlevel is <= targetlevel, something is wrong, and
- * we choose to overwrite it to fix it.
+ * The expected case here is btm_fastlevel == targetlevel+1; if
+ * the fastlevel is <= targetlevel, something is wrong, and we
+ * choose to overwrite it to fix it.
*/
if (metad->btm_fastlevel > targetlevel + 1)
{
@@ -961,9 +955,9 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
/*
* Update parent. The normal case is a tad tricky because we want to
- * delete the target's downlink and the *following* key. Easiest way
- * is to copy the right sibling's downlink over the target downlink,
- * and then delete the following item.
+ * delete the target's downlink and the *following* key. Easiest way is
+ * to copy the right sibling's downlink over the target downlink, and then
+ * delete the following item.
*/
page = BufferGetPage(pbuf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -992,8 +986,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
}
/*
- * Update siblings' side-links. Note the target page's side-links
- * will continue to point to the siblings.
+ * Update siblings' side-links. Note the target page's side-links will
+ * continue to point to the siblings.
*/
if (BufferIsValid(lbuf))
{
@@ -1123,10 +1117,10 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
_bt_wrtbuf(rel, lbuf);
/*
- * If parent became half dead, recurse to try to delete it. Otherwise,
- * if right sibling is empty and is now the last child of the parent,
- * recurse to try to delete it. (These cases cannot apply at the same
- * time, though the second case might itself recurse to the first.)
+ * If parent became half dead, recurse to try to delete it. Otherwise, if
+ * right sibling is empty and is now the last child of the parent, recurse
+ * to try to delete it. (These cases cannot apply at the same time,
+ * though the second case might itself recurse to the first.)
*/
if (parent_half_dead)
{
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index d4232c847f8..10e2fe6190d 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.131 2005/09/02 19:02:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.132 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,9 +39,9 @@ typedef struct
BTSpool *spool;
/*
- * spool2 is needed only when the index is an unique index. Dead
- * tuples are put into spool2 instead of spool in order to avoid
- * uniqueness check.
+ * spool2 is needed only when the index is an unique index. Dead tuples
+ * are put into spool2 instead of spool in order to avoid uniqueness
+ * check.
*/
BTSpool *spool2;
double indtuples;
@@ -72,10 +72,10 @@ btbuild(PG_FUNCTION_ARGS)
BTBuildState buildstate;
/*
- * bootstrap processing does something strange, so don't use
- * sort/build for initial catalog indices. at some point i need to
- * look harder at this. (there is some kind of incremental processing
- * going on there.) -- pma 08/29/95
+ * bootstrap processing does something strange, so don't use sort/build
+ * for initial catalog indices. at some point i need to look harder at
+ * this. (there is some kind of incremental processing going on there.)
+ * -- pma 08/29/95
*/
buildstate.usefast = (FastBuild && IsNormalProcessingMode());
buildstate.isUnique = indexInfo->ii_Unique;
@@ -91,8 +91,8 @@ btbuild(PG_FUNCTION_ARGS)
#endif /* BTREE_BUILD_STATS */
/*
- * We expect to be called exactly once for any index relation. If
- * that's not the case, big trouble's what we have.
+ * We expect to be called exactly once for any index relation. If that's
+ * not the case, big trouble's what we have.
*/
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
@@ -103,8 +103,8 @@ btbuild(PG_FUNCTION_ARGS)
buildstate.spool = _bt_spoolinit(index, indexInfo->ii_Unique, false);
/*
- * If building a unique index, put dead tuples in a second spool
- * to keep them out of the uniqueness check.
+ * If building a unique index, put dead tuples in a second spool to
+ * keep them out of the uniqueness check.
*/
if (indexInfo->ii_Unique)
buildstate.spool2 = _bt_spoolinit(index, false, true);
@@ -129,8 +129,8 @@ btbuild(PG_FUNCTION_ARGS)
/*
* if we are doing bottom-up btree build, finish the build by (1)
- * completing the sort of the spool file, (2) inserting the sorted
- * tuples into btree pages and (3) building the upper levels.
+ * completing the sort of the spool file, (2) inserting the sorted tuples
+ * into btree pages and (3) building the upper levels.
*/
if (buildstate.usefast)
{
@@ -176,9 +176,8 @@ btbuildCallback(Relation index,
btitem = _bt_formitem(itup);
/*
- * if we are doing bottom-up btree build, we insert the index into a
- * spool file for subsequent processing. otherwise, we insert into
- * the btree.
+ * if we are doing bottom-up btree build, we insert the index into a spool
+ * file for subsequent processing. otherwise, we insert into the btree.
*/
if (buildstate->usefast)
{
@@ -248,16 +247,16 @@ btgettuple(PG_FUNCTION_ARGS)
bool res;
/*
- * If we've already initialized this scan, we can just advance it in
- * the appropriate direction. If we haven't done so yet, we call a
- * routine to get the first item in the scan.
+ * If we've already initialized this scan, we can just advance it in the
+ * appropriate direction. If we haven't done so yet, we call a routine to
+ * get the first item in the scan.
*/
if (ItemPointerIsValid(&(scan->currentItemData)))
{
/*
- * Restore scan position using heap TID returned by previous call
- * to btgettuple(). _bt_restscan() re-grabs the read lock on the
- * buffer, too.
+ * Restore scan position using heap TID returned by previous call to
+ * btgettuple(). _bt_restscan() re-grabs the read lock on the buffer,
+ * too.
*/
_bt_restscan(scan);
@@ -267,17 +266,16 @@ btgettuple(PG_FUNCTION_ARGS)
if (scan->kill_prior_tuple)
{
/*
- * Yes, so mark it by setting the LP_DELETE bit in the item
- * flags.
+ * Yes, so mark it by setting the LP_DELETE bit in the item flags.
*/
offnum = ItemPointerGetOffsetNumber(&(scan->currentItemData));
page = BufferGetPage(so->btso_curbuf);
PageGetItemId(page, offnum)->lp_flags |= LP_DELETE;
/*
- * Since this can be redone later if needed, it's treated the
- * same as a commit-hint-bit status update for heap tuples: we
- * mark the buffer dirty but don't make a WAL log entry.
+ * Since this can be redone later if needed, it's treated the same
+ * as a commit-hint-bit status update for heap tuples: we mark the
+ * buffer dirty but don't make a WAL log entry.
*/
SetBufferCommitInfoNeedsSave(so->btso_curbuf);
}
@@ -306,11 +304,11 @@ btgettuple(PG_FUNCTION_ARGS)
}
/*
- * Save heap TID to use it in _bt_restscan. Then release the read
- * lock on the buffer so that we aren't blocking other backends.
+ * Save heap TID to use it in _bt_restscan. Then release the read lock on
+ * the buffer so that we aren't blocking other backends.
*
- * NOTE: we do keep the pin on the buffer! This is essential to ensure
- * that someone else doesn't delete the index entry we are stopped on.
+ * NOTE: we do keep the pin on the buffer! This is essential to ensure that
+ * someone else doesn't delete the index entry we are stopped on.
*/
if (res)
{
@@ -333,7 +331,7 @@ Datum
btgetmulti(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
- ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1);
+ ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1);
int32 max_tids = PG_GETARG_INT32(2);
int32 *returned_tids = (int32 *) PG_GETARG_POINTER(3);
BTScanOpaque so = (BTScanOpaque) scan->opaque;
@@ -355,6 +353,7 @@ btgetmulti(PG_FUNCTION_ARGS)
res = _bt_next(scan, ForwardScanDirection);
else
res = _bt_first(scan, ForwardScanDirection);
+
/*
* Skip killed tuples if asked to.
*/
@@ -381,8 +380,8 @@ btgetmulti(PG_FUNCTION_ARGS)
}
/*
- * Save heap TID to use it in _bt_restscan. Then release the read
- * lock on the buffer so that we aren't blocking other backends.
+ * Save heap TID to use it in _bt_restscan. Then release the read lock on
+ * the buffer so that we aren't blocking other backends.
*/
if (res)
{
@@ -456,8 +455,8 @@ btrescan(PG_FUNCTION_ARGS)
}
/*
- * Reset the scan keys. Note that keys ordering stuff moved to
- * _bt_first. - vadim 05/05/97
+ * Reset the scan keys. Note that keys ordering stuff moved to _bt_first.
+ * - vadim 05/05/97
*/
if (scankey && scan->numberOfKeys > 0)
memmove(scan->keyData,
@@ -593,21 +592,20 @@ btbulkdelete(PG_FUNCTION_ARGS)
num_index_tuples = 0;
/*
- * The outer loop iterates over index leaf pages, the inner over items
- * on a leaf page. We issue just one _bt_delitems() call per page, so
- * as to minimize WAL traffic.
+ * The outer loop iterates over index leaf pages, the inner over items on
+ * a leaf page. We issue just one _bt_delitems() call per page, so as to
+ * minimize WAL traffic.
*
* Note that we exclusive-lock every leaf page containing data items, in
- * sequence left to right. It sounds attractive to only
- * exclusive-lock those containing items we need to delete, but
- * unfortunately that is not safe: we could then pass a stopped
- * indexscan, which could in rare cases lead to deleting the item it
- * needs to find when it resumes. (See _bt_restscan --- this could
- * only happen if an indexscan stops on a deletable item and then a
- * page split moves that item into a page further to its right, which
- * the indexscan will have no pin on.) We can skip obtaining
- * exclusive lock on empty pages though, since no indexscan could be
- * stopped on those.
+ * sequence left to right. It sounds attractive to only exclusive-lock
+ * those containing items we need to delete, but unfortunately that is not
+ * safe: we could then pass a stopped indexscan, which could in rare cases
+ * lead to deleting the item it needs to find when it resumes. (See
+ * _bt_restscan --- this could only happen if an indexscan stops on a
+ * deletable item and then a page split moves that item into a page
+ * further to its right, which the indexscan will have no pin on.) We can
+ * skip obtaining exclusive lock on empty pages though, since no indexscan
+ * could be stopped on those.
*/
buf = _bt_get_endpoint(rel, 0, false);
if (BufferIsValid(buf)) /* check for empty index */
@@ -632,15 +630,15 @@ btbulkdelete(PG_FUNCTION_ARGS)
if (minoff <= maxoff && !P_ISDELETED(opaque))
{
/*
- * Trade in the initial read lock for a super-exclusive
- * write lock on this page.
+ * Trade in the initial read lock for a super-exclusive write
+ * lock on this page.
*/
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBufferForCleanup(buf);
/*
- * Recompute minoff/maxoff, both of which could have
- * changed while we weren't holding the lock.
+ * Recompute minoff/maxoff, both of which could have changed
+ * while we weren't holding the lock.
*/
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
@@ -657,7 +655,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
ItemPointer htup;
btitem = (BTItem) PageGetItem(page,
- PageGetItemId(page, offnum));
+ PageGetItemId(page, offnum));
htup = &(btitem->bti_itup.t_tid);
if (callback(htup, callback_state))
{
@@ -670,8 +668,8 @@ btbulkdelete(PG_FUNCTION_ARGS)
}
/*
- * If we need to delete anything, do it and write the buffer;
- * else just release the buffer.
+ * If we need to delete anything, do it and write the buffer; else
+ * just release the buffer.
*/
nextpage = opaque->btpo_next;
if (ndeletable > 0)
@@ -725,19 +723,19 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
Assert(stats != NULL);
/*
- * First find out the number of pages in the index. We must acquire
- * the relation-extension lock while doing this to avoid a race
- * condition: if someone else is extending the relation, there is
- * a window where bufmgr/smgr have created a new all-zero page but
- * it hasn't yet been write-locked by _bt_getbuf(). If we manage to
- * scan such a page here, we'll improperly assume it can be recycled.
- * Taking the lock synchronizes things enough to prevent a problem:
- * either num_pages won't include the new page, or _bt_getbuf already
- * has write lock on the buffer and it will be fully initialized before
- * we can examine it. (See also vacuumlazy.c, which has the same issue.)
+ * First find out the number of pages in the index. We must acquire the
+ * relation-extension lock while doing this to avoid a race condition: if
+ * someone else is extending the relation, there is a window where
+ * bufmgr/smgr have created a new all-zero page but it hasn't yet been
+ * write-locked by _bt_getbuf(). If we manage to scan such a page here,
+ * we'll improperly assume it can be recycled. Taking the lock
+ * synchronizes things enough to prevent a problem: either num_pages won't
+ * include the new page, or _bt_getbuf already has write lock on the
+ * buffer and it will be fully initialized before we can examine it. (See
+ * also vacuumlazy.c, which has the same issue.)
*
- * We can skip locking for new or temp relations,
- * however, since no one else could be accessing them.
+ * We can skip locking for new or temp relations, however, since no one else
+ * could be accessing them.
*/
needLock = !RELATION_IS_LOCAL(rel);
@@ -807,12 +805,12 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
/*
* During VACUUM FULL it's okay to recycle deleted pages
- * immediately, since there can be no other transactions
- * scanning the index. Note that we will only recycle the
- * current page and not any parent pages that _bt_pagedel
- * might have recursed to; this seems reasonable in the name
- * of simplicity. (Trying to do otherwise would mean we'd
- * have to sort the list of recyclable pages we're building.)
+ * immediately, since there can be no other transactions scanning
+ * the index. Note that we will only recycle the current page and
+ * not any parent pages that _bt_pagedel might have recursed to;
+ * this seems reasonable in the name of simplicity. (Trying to do
+ * otherwise would mean we'd have to sort the list of recyclable
+ * pages we're building.)
*/
if (ndel && info->vacuum_full)
{
@@ -827,10 +825,10 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
}
/*
- * During VACUUM FULL, we truncate off any recyclable pages at the end
- * of the index. In a normal vacuum it'd be unsafe to do this except
- * by acquiring exclusive lock on the index and then rechecking all
- * the pages; doesn't seem worth it.
+ * During VACUUM FULL, we truncate off any recyclable pages at the end of
+ * the index. In a normal vacuum it'd be unsafe to do this except by
+ * acquiring exclusive lock on the index and then rechecking all the
+ * pages; doesn't seem worth it.
*/
if (info->vacuum_full && nFreePages > 0)
{
@@ -857,9 +855,9 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
}
/*
- * Update the shared Free Space Map with the info we now have about
- * free pages in the index, discarding any old info the map may have.
- * We do not need to sort the page numbers; they're in order already.
+ * Update the shared Free Space Map with the info we now have about free
+ * pages in the index, discarding any old info the map may have. We do not
+ * need to sort the page numbers; they're in order already.
*/
RecordIndexFreeSpace(&rel->rd_node, nFreePages, freePages);
@@ -915,15 +913,15 @@ _bt_restscan(IndexScanDesc scan)
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
- * We use this as flag when first index tuple on page is deleted but
- * we do not move left (this would slowdown vacuum) - so we set
+ * We use this as flag when first index tuple on page is deleted but we do
+ * not move left (this would slowdown vacuum) - so we set
* current->ip_posid before first index tuple on the current page
* (_bt_step will move it right)... XXX still needed?
*/
if (!ItemPointerIsValid(target))
{
ItemPointerSetOffsetNumber(current,
- OffsetNumberPrev(P_FIRSTDATAKEY(opaque)));
+ OffsetNumberPrev(P_FIRSTDATAKEY(opaque)));
return;
}
@@ -948,12 +946,12 @@ _bt_restscan(IndexScanDesc scan)
}
/*
- * The item we're looking for moved right at least one page, so
- * move right. We are careful here to pin and read-lock the next
- * non-dead page before releasing the current one. This ensures
- * that a concurrent btbulkdelete scan cannot pass our position
- * --- if it did, it might be able to reach and delete our target
- * item before we can find it again.
+ * The item we're looking for moved right at least one page, so move
+ * right. We are careful here to pin and read-lock the next non-dead
+ * page before releasing the current one. This ensures that a
+ * concurrent btbulkdelete scan cannot pass our position --- if it
+ * did, it might be able to reach and delete our target item before we
+ * can find it again.
*/
if (P_RIGHTMOST(opaque))
elog(ERROR, "failed to re-find previous key in \"%s\"",
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index c029824fa6f..06075dd3dda 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.94 2005/10/06 02:29:12 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.95 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -69,9 +69,9 @@ _bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey,
BTStack new_stack;
/*
- * Race -- the page we just grabbed may have split since we read
- * its pointer in the parent (or metapage). If it has, we may
- * need to move right to its new sibling. Do that.
+ * Race -- the page we just grabbed may have split since we read its
+ * pointer in the parent (or metapage). If it has, we may need to
+ * move right to its new sibling. Do that.
*/
*bufP = _bt_moveright(rel, *bufP, keysz, scankey, nextkey, BT_READ);
@@ -82,8 +82,8 @@ _bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey,
break;
/*
- * Find the appropriate item on the internal page, and get the
- * child page that it points to.
+ * Find the appropriate item on the internal page, and get the child
+ * page that it points to.
*/
offnum = _bt_binsrch(rel, *bufP, keysz, scankey, nextkey);
itemid = PageGetItemId(page, offnum);
@@ -94,13 +94,13 @@ _bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey,
/*
* We need to save the location of the index entry we chose in the
- * parent page on a stack. In case we split the tree, we'll use
- * the stack to work back up to the parent page. We also save the
- * actual downlink (TID) to uniquely identify the index entry, in
- * case it moves right while we're working lower in the tree. See
- * the paper by Lehman and Yao for how this is detected and
- * handled. (We use the child link to disambiguate duplicate keys
- * in the index -- Lehman and Yao disallow duplicate keys.)
+ * parent page on a stack. In case we split the tree, we'll use the
+ * stack to work back up to the parent page. We also save the actual
+ * downlink (TID) to uniquely identify the index entry, in case it
+ * moves right while we're working lower in the tree. See the paper
+ * by Lehman and Yao for how this is detected and handled. (We use the
+ * child link to disambiguate duplicate keys in the index -- Lehman
+ * and Yao disallow duplicate keys.)
*/
new_stack = (BTStack) palloc(sizeof(BTStackData));
new_stack->bts_blkno = par_blkno;
@@ -156,19 +156,18 @@ _bt_moveright(Relation rel,
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
- * When nextkey = false (normal case): if the scan key that brought us
- * to this page is > the high key stored on the page, then the page
- * has split and we need to move right. (If the scan key is equal to
- * the high key, we might or might not need to move right; have to
- * scan the page first anyway.)
+ * When nextkey = false (normal case): if the scan key that brought us to
+ * this page is > the high key stored on the page, then the page has split
+ * and we need to move right. (If the scan key is equal to the high key,
+ * we might or might not need to move right; have to scan the page first
+ * anyway.)
*
* When nextkey = true: move right if the scan key is >= page's high key.
*
- * The page could even have split more than once, so scan as far as
- * needed.
+ * The page could even have split more than once, so scan as far as needed.
*
- * We also have to move right if we followed a link that brought us to a
- * dead page.
+ * We also have to move right if we followed a link that brought us to a dead
+ * page.
*/
cmpval = nextkey ? 0 : 1;
@@ -242,24 +241,24 @@ _bt_binsrch(Relation rel,
high = PageGetMaxOffsetNumber(page);
/*
- * If there are no keys on the page, return the first available slot.
- * Note this covers two cases: the page is really empty (no keys), or
- * it contains only a high key. The latter case is possible after
- * vacuuming. This can never happen on an internal page, however,
- * since they are never empty (an internal page must have children).
+ * If there are no keys on the page, return the first available slot. Note
+ * this covers two cases: the page is really empty (no keys), or it
+ * contains only a high key. The latter case is possible after vacuuming.
+ * This can never happen on an internal page, however, since they are
+ * never empty (an internal page must have children).
*/
if (high < low)
return low;
/*
- * Binary search to find the first key on the page >= scan key, or
- * first key > scankey when nextkey is true.
+ * Binary search to find the first key on the page >= scan key, or first
+ * key > scankey when nextkey is true.
*
* For nextkey=false (cmpval=1), the loop invariant is: all slots before
* 'low' are < scan key, all slots at or after 'high' are >= scan key.
*
- * For nextkey=true (cmpval=0), the loop invariant is: all slots before
- * 'low' are <= scan key, all slots at or after 'high' are > scan key.
+ * For nextkey=true (cmpval=0), the loop invariant is: all slots before 'low'
+ * are <= scan key, all slots at or after 'high' are > scan key.
*
* We can fall out when high == low.
*/
@@ -285,15 +284,15 @@ _bt_binsrch(Relation rel,
* At this point we have high == low, but be careful: they could point
* past the last slot on the page.
*
- * On a leaf page, we always return the first key >= scan key (resp. >
- * scan key), which could be the last slot + 1.
+ * On a leaf page, we always return the first key >= scan key (resp. > scan
+ * key), which could be the last slot + 1.
*/
if (P_ISLEAF(opaque))
return low;
/*
- * On a non-leaf page, return the last key < scan key (resp. <= scan
- * key). There must be one if _bt_compare() is playing by the rules.
+ * On a non-leaf page, return the last key < scan key (resp. <= scan key).
+ * There must be one if _bt_compare() is playing by the rules.
*/
Assert(low > P_FIRSTDATAKEY(opaque));
@@ -337,8 +336,8 @@ _bt_compare(Relation rel,
int i;
/*
- * Force result ">" if target item is first data item on an internal
- * page --- see NOTE above.
+ * Force result ">" if target item is first data item on an internal page
+ * --- see NOTE above.
*/
if (!P_ISLEAF(opaque) && offnum == P_FIRSTDATAKEY(opaque))
return 1;
@@ -347,15 +346,15 @@ _bt_compare(Relation rel,
itup = &(btitem->bti_itup);
/*
- * The scan key is set up with the attribute number associated with
- * each term in the key. It is important that, if the index is
- * multi-key, the scan contain the first k key attributes, and that
- * they be in order. If you think about how multi-key ordering works,
- * you'll understand why this is.
+ * The scan key is set up with the attribute number associated with each
+ * term in the key. It is important that, if the index is multi-key, the
+ * scan contain the first k key attributes, and that they be in order. If
+ * you think about how multi-key ordering works, you'll understand why
+ * this is.
*
- * We don't test for violation of this condition here, however. The
- * initial setup for the index scan had better have gotten it right
- * (see _bt_first).
+ * We don't test for violation of this condition here, however. The initial
+ * setup for the index scan had better have gotten it right (see
+ * _bt_first).
*/
for (i = 1; i <= keysz; i++)
@@ -381,15 +380,15 @@ _bt_compare(Relation rel,
else
{
/*
- * The sk_func needs to be passed the index value as left arg
- * and the sk_argument as right arg (they might be of
- * different types). Since it is convenient for callers to
- * think of _bt_compare as comparing the scankey to the index
- * item, we have to flip the sign of the comparison result.
+ * The sk_func needs to be passed the index value as left arg and
+ * the sk_argument as right arg (they might be of different
+ * types). Since it is convenient for callers to think of
+ * _bt_compare as comparing the scankey to the index item, we have
+ * to flip the sign of the comparison result.
*
- * Note: curious-looking coding is to avoid overflow if
- * comparison function returns INT_MIN. There is no risk of
- * overflow for positive results.
+ * Note: curious-looking coding is to avoid overflow if comparison
+ * function returns INT_MIN. There is no risk of overflow for
+ * positive results.
*/
result = DatumGetInt32(FunctionCall2(&scankey->sk_func,
datum,
@@ -497,7 +496,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
bool goback;
bool continuescan;
ScanKey startKeys[INDEX_MAX_KEYS];
- ScanKeyData scankeys[INDEX_MAX_KEYS];
+ ScanKeyData scankeys[INDEX_MAX_KEYS];
int keysCount = 0;
int i;
StrategyNumber strat_total;
@@ -505,8 +504,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
pgstat_count_index_scan(&scan->xs_pgstat_info);
/*
- * Examine the scan keys and eliminate any redundant keys; also
- * discover how many keys must be matched to continue the scan.
+ * Examine the scan keys and eliminate any redundant keys; also discover
+ * how many keys must be matched to continue the scan.
*/
_bt_preprocess_keys(scan);
@@ -556,9 +555,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
ScanKey cur;
/*
- * chosen is the so-far-chosen key for the current attribute, if
- * any. We don't cast the decision in stone until we reach keys
- * for the next attribute.
+ * chosen is the so-far-chosen key for the current attribute, if any.
+ * We don't cast the decision in stone until we reach keys for the
+ * next attribute.
*/
curattr = 1;
chosen = NULL;
@@ -595,9 +594,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
}
/*
- * Done if that was the last attribute, or if next key
- * is not in sequence (implying no boundary key is available
- * for the next attribute).
+ * Done if that was the last attribute, or if next key is not
+ * in sequence (implying no boundary key is available for the
+ * next attribute).
*/
if (i >= so->numberOfKeys ||
cur->sk_attno != curattr + 1)
@@ -632,17 +631,17 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
}
/*
- * If we found no usable boundary keys, we have to start from one end
- * of the tree. Walk down that edge to the first or last key, and
- * scan from there.
+ * If we found no usable boundary keys, we have to start from one end of
+ * the tree. Walk down that edge to the first or last key, and scan from
+ * there.
*/
if (keysCount == 0)
return _bt_endpoint(scan, dir);
/*
* We want to start the scan somewhere within the index. Set up a
- * 3-way-comparison scankey we can use to search for the boundary
- * point we identified above.
+ * 3-way-comparison scankey we can use to search for the boundary point we
+ * identified above.
*/
Assert(keysCount <= INDEX_MAX_KEYS);
for (i = 0; i < keysCount; i++)
@@ -650,16 +649,15 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
ScanKey cur = startKeys[i];
/*
- * _bt_preprocess_keys disallows it, but it's place to add some
- * code later
+ * _bt_preprocess_keys disallows it, but it's place to add some code
+ * later
*/
if (cur->sk_flags & SK_ISNULL)
elog(ERROR, "btree doesn't support is(not)null, yet");
/*
- * If scankey operator is of default subtype, we can use the
- * cached comparison procedure; otherwise gotta look it up in the
- * catalogs.
+ * If scankey operator is of default subtype, we can use the cached
+ * comparison procedure; otherwise gotta look it up in the catalogs.
*/
if (cur->sk_subtype == InvalidOid)
{
@@ -692,13 +690,13 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
}
/*
- * Examine the selected initial-positioning strategy to determine
- * exactly where we need to start the scan, and set flag variables to
- * control the code below.
+ * Examine the selected initial-positioning strategy to determine exactly
+ * where we need to start the scan, and set flag variables to control the
+ * code below.
*
- * If nextkey = false, _bt_search and _bt_binsrch will locate the first
- * item >= scan key. If nextkey = true, they will locate the first
- * item > scan key.
+ * If nextkey = false, _bt_search and _bt_binsrch will locate the first item
+ * >= scan key. If nextkey = true, they will locate the first item > scan
+ * key.
*
* If goback = true, we will then step back one item, while if goback =
* false, we will start the scan on the located item.
@@ -710,10 +708,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
case BTLessStrategyNumber:
/*
- * Find first item >= scankey, then back up one to arrive at
- * last item < scankey. (Note: this positioning strategy is
- * only used for a backward scan, so that is always the
- * correct starting position.)
+ * Find first item >= scankey, then back up one to arrive at last
+ * item < scankey. (Note: this positioning strategy is only used
+ * for a backward scan, so that is always the correct starting
+ * position.)
*/
nextkey = false;
goback = true;
@@ -722,10 +720,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
case BTLessEqualStrategyNumber:
/*
- * Find first item > scankey, then back up one to arrive at
- * last item <= scankey. (Note: this positioning strategy is
- * only used for a backward scan, so that is always the
- * correct starting position.)
+ * Find first item > scankey, then back up one to arrive at last
+ * item <= scankey. (Note: this positioning strategy is only used
+ * for a backward scan, so that is always the correct starting
+ * position.)
*/
nextkey = true;
goback = true;
@@ -734,14 +732,14 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
case BTEqualStrategyNumber:
/*
- * If a backward scan was specified, need to start with last
- * equal item not first one.
+ * If a backward scan was specified, need to start with last equal
+ * item not first one.
*/
if (ScanDirectionIsBackward(dir))
{
/*
- * This is the same as the <= strategy. We will check at
- * the end whether the found item is actually =.
+ * This is the same as the <= strategy. We will check at the
+ * end whether the found item is actually =.
*/
nextkey = true;
goback = true;
@@ -749,8 +747,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
else
{
/*
- * This is the same as the >= strategy. We will check at
- * the end whether the found item is actually =.
+ * This is the same as the >= strategy. We will check at the
+ * end whether the found item is actually =.
*/
nextkey = false;
goback = false;
@@ -813,24 +811,24 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
ItemPointerSet(current, blkno, offnum);
/*
- * If nextkey = false, we are positioned at the first item >= scan
- * key, or possibly at the end of a page on which all the existing
- * items are less than the scan key and we know that everything on
- * later pages is greater than or equal to scan key.
+ * If nextkey = false, we are positioned at the first item >= scan key, or
+ * possibly at the end of a page on which all the existing items are less
+ * than the scan key and we know that everything on later pages is greater
+ * than or equal to scan key.
*
* If nextkey = true, we are positioned at the first item > scan key, or
- * possibly at the end of a page on which all the existing items are
- * less than or equal to the scan key and we know that everything on
- * later pages is greater than scan key.
+ * possibly at the end of a page on which all the existing items are less
+ * than or equal to the scan key and we know that everything on later
+ * pages is greater than scan key.
*
- * The actually desired starting point is either this item or the prior
- * one, or in the end-of-page case it's the first item on the next
- * page or the last item on this page. We apply _bt_step if needed to
- * get to the right place.
+ * The actually desired starting point is either this item or the prior one,
+ * or in the end-of-page case it's the first item on the next page or the
+ * last item on this page. We apply _bt_step if needed to get to the
+ * right place.
*
* If _bt_step fails (meaning we fell off the end of the index in one
- * direction or the other), then there are no matches so we just
- * return false.
+ * direction or the other), then there are no matches so we just return
+ * false.
*/
if (goback)
{
@@ -902,8 +900,8 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
BlockNumber blkno;
/*
- * Don't use ItemPointerGetOffsetNumber or you risk to get assertion
- * due to ability of ip_posid to be equal 0.
+ * Don't use ItemPointerGetOffsetNumber or you risk to get assertion due
+ * to ability of ip_posid to be equal 0.
*/
offnum = current->ip_posid;
@@ -954,9 +952,9 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
/*
* Walk left to the next page with data. This is much more
* complex than the walk-right case because of the possibility
- * that the page to our left splits while we are in flight to
- * it, plus the possibility that the page we were on gets
- * deleted after we leave it. See nbtree/README for details.
+ * that the page to our left splits while we are in flight to it,
+ * plus the possibility that the page we were on gets deleted
+ * after we leave it. See nbtree/README for details.
*/
for (;;)
{
@@ -973,9 +971,9 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
- * Okay, we managed to move left to a non-deleted page.
- * Done if it's not half-dead and not empty. Else loop
- * back and do it all again.
+ * Okay, we managed to move left to a non-deleted page. Done
+ * if it's not half-dead and not empty. Else loop back and do
+ * it all again.
*/
if (!P_IGNORE(opaque))
{
@@ -1043,15 +1041,14 @@ _bt_walk_left(Relation rel, Buffer buf)
/*
* If this isn't the page we want, walk right till we find what we
- * want --- but go no more than four hops (an arbitrary limit). If
- * we don't find the correct page by then, the most likely bet is
- * that the original page got deleted and isn't in the sibling
- * chain at all anymore, not that its left sibling got split more
- * than four times.
+ * want --- but go no more than four hops (an arbitrary limit). If we
+ * don't find the correct page by then, the most likely bet is that
+ * the original page got deleted and isn't in the sibling chain at all
+ * anymore, not that its left sibling got split more than four times.
*
- * Note that it is correct to test P_ISDELETED not P_IGNORE here,
- * because half-dead pages are still in the sibling chain. Caller
- * must reject half-dead pages if wanted.
+ * Note that it is correct to test P_ISDELETED not P_IGNORE here, because
+ * half-dead pages are still in the sibling chain. Caller must reject
+ * half-dead pages if wanted.
*/
tries = 0;
for (;;)
@@ -1077,9 +1074,9 @@ _bt_walk_left(Relation rel, Buffer buf)
{
/*
* It was deleted. Move right to first nondeleted page (there
- * must be one); that is the page that has acquired the
- * deleted one's keyspace, so stepping left from it will take
- * us where we want to be.
+ * must be one); that is the page that has acquired the deleted
+ * one's keyspace, so stepping left from it will take us where we
+ * want to be.
*/
for (;;)
{
@@ -1095,16 +1092,16 @@ _bt_walk_left(Relation rel, Buffer buf)
}
/*
- * Now return to top of loop, resetting obknum to point to
- * this nondeleted page, and try again.
+ * Now return to top of loop, resetting obknum to point to this
+ * nondeleted page, and try again.
*/
}
else
{
/*
- * It wasn't deleted; the explanation had better be that the
- * page to the left got split or deleted. Without this check,
- * we'd go into an infinite loop if there's anything wrong.
+ * It wasn't deleted; the explanation had better be that the page
+ * to the left got split or deleted. Without this check, we'd go
+ * into an infinite loop if there's anything wrong.
*/
if (opaque->btpo_prev == lblkno)
elog(ERROR, "could not find left sibling in \"%s\"",
@@ -1137,8 +1134,8 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
/*
* If we are looking for a leaf page, okay to descend from fast root;
- * otherwise better descend from true root. (There is no point in
- * being smarter about intermediate levels.)
+ * otherwise better descend from true root. (There is no point in being
+ * smarter about intermediate levels.)
*/
if (level == 0)
buf = _bt_getroot(rel, BT_READ);
@@ -1159,8 +1156,8 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
/*
* If we landed on a deleted page, step right to find a live page
* (there must be one). Also, if we want the rightmost page, step
- * right if needed to get to it (this could happen if the page
- * split since we obtained a pointer to it).
+ * right if needed to get to it (this could happen if the page split
+ * since we obtained a pointer to it).
*/
while (P_IGNORE(opaque) ||
(rightmost && !P_RIGHTMOST(opaque)))
@@ -1228,9 +1225,9 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
so = (BTScanOpaque) scan->opaque;
/*
- * Scan down to the leftmost or rightmost leaf page. This is a
- * simplified version of _bt_search(). We don't maintain a stack
- * since we know we won't need it.
+ * Scan down to the leftmost or rightmost leaf page. This is a simplified
+ * version of _bt_search(). We don't maintain a stack since we know we
+ * won't need it.
*/
buf = _bt_get_endpoint(rel, 0, ScanDirectionIsBackward(dir));
@@ -1261,8 +1258,7 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
Assert(P_RIGHTMOST(opaque));
start = PageGetMaxOffsetNumber(page);
- if (start < P_FIRSTDATAKEY(opaque)) /* watch out for empty
- * page */
+ if (start < P_FIRSTDATAKEY(opaque)) /* watch out for empty page */
start = P_FIRSTDATAKEY(opaque);
}
else
@@ -1276,8 +1272,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
so->btso_curbuf = buf;
/*
- * Left/rightmost page could be empty due to deletions, if so step
- * till we find a nonempty page.
+ * Left/rightmost page could be empty due to deletions, if so step till we
+ * find a nonempty page.
*/
if (start > maxoff)
{
@@ -1291,8 +1287,7 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
itup = &(btitem->bti_itup);
/*
- * Okay, we are on the first or last tuple. Does it pass all the
- * quals?
+ * Okay, we are on the first or last tuple. Does it pass all the quals?
*/
if (_bt_checkkeys(scan, itup, dir, &continuescan))
{
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index ee5acee5c3e..6ee5d42b63a 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -56,7 +56,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.94 2005/08/11 13:22:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.95 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -99,12 +99,10 @@ typedef struct BTPageState
{
Page btps_page; /* workspace for page building */
BlockNumber btps_blkno; /* block # to write this page at */
- BTItem btps_minkey; /* copy of minimum key (first item) on
- * page */
+ BTItem btps_minkey; /* copy of minimum key (first item) on page */
OffsetNumber btps_lastoff; /* last item offset loaded */
uint32 btps_level; /* tree level (0 = leaf) */
- Size btps_full; /* "full" if less than this much free
- * space */
+ Size btps_full; /* "full" if less than this much free space */
struct BTPageState *btps_next; /* link to parent level, if any */
} BTPageState;
@@ -157,21 +155,21 @@ _bt_spoolinit(Relation index, bool isunique, bool isdead)
btspool->isunique = isunique;
/*
- * We size the sort area as maintenance_work_mem rather than work_mem
- * to speed index creation. This should be OK since a single backend
- * can't run multiple index creations in parallel. Note that creation
- * of a unique index actually requires two BTSpool objects. We expect
- * that the second one (for dead tuples) won't get very full, so we
- * give it only work_mem.
+ * We size the sort area as maintenance_work_mem rather than work_mem to
+ * speed index creation. This should be OK since a single backend can't
+ * run multiple index creations in parallel. Note that creation of a
+ * unique index actually requires two BTSpool objects. We expect that the
+ * second one (for dead tuples) won't get very full, so we give it only
+ * work_mem.
*/
btKbytes = isdead ? work_mem : maintenance_work_mem;
btspool->sortstate = tuplesort_begin_index(index, isunique,
btKbytes, false);
/*
- * Currently, tuplesort provides sort functions on IndexTuples. If we
- * kept anything in a BTItem other than a regular IndexTuple, we'd
- * need to modify tuplesort to understand BTItems as such.
+ * Currently, tuplesort provides sort functions on IndexTuples. If we kept
+ * anything in a BTItem other than a regular IndexTuple, we'd need to
+ * modify tuplesort to understand BTItems as such.
*/
Assert(sizeof(BTItemData) == sizeof(IndexTupleData));
@@ -222,8 +220,8 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
wstate.index = btspool->index;
/*
- * We need to log index creation in WAL iff WAL archiving is enabled
- * AND it's not a temp index.
+ * We need to log index creation in WAL iff WAL archiving is enabled AND
+ * it's not a temp index.
*/
wstate.btws_use_wal = XLogArchivingActive() && !wstate.index->rd_istemp;
@@ -313,9 +311,9 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
/*
* If we have to write pages nonsequentially, fill in the space with
* zeroes until we come back and overwrite. This is not logically
- * necessary on standard Unix filesystems (unwritten space will read
- * as zeroes anyway), but it should help to avoid fragmentation. The
- * dummy pages aren't WAL-logged though.
+ * necessary on standard Unix filesystems (unwritten space will read as
+ * zeroes anyway), but it should help to avoid fragmentation. The dummy
+ * pages aren't WAL-logged though.
*/
while (blkno > wstate->btws_pages_written)
{
@@ -328,8 +326,8 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
/*
* Now write the page. We say isTemp = true even if it's not a temp
- * index, because there's no need for smgr to schedule an fsync for
- * this write; we'll do it ourselves before ending the build.
+ * index, because there's no need for smgr to schedule an fsync for this
+ * write; we'll do it ourselves before ending the build.
*/
smgrwrite(wstate->index->rd_smgr, blkno, (char *) page, true);
@@ -483,15 +481,15 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
btisz = MAXALIGN(btisz);
/*
- * Check whether the item can fit on a btree page at all. (Eventually,
- * we ought to try to apply TOAST methods if not.) We actually need to
- * be able to fit three items on every page, so restrict any one item
- * to 1/3 the per-page available space. Note that at this point, btisz
- * doesn't include the ItemId.
+ * Check whether the item can fit on a btree page at all. (Eventually, we
+ * ought to try to apply TOAST methods if not.) We actually need to be
+ * able to fit three items on every page, so restrict any one item to 1/3
+ * the per-page available space. Note that at this point, btisz doesn't
+ * include the ItemId.
*
- * NOTE: similar code appears in _bt_insertonpg() to defend against
- * oversize items being inserted into an already-existing index. But
- * during creation of an index, we don't go through there.
+ * NOTE: similar code appears in _bt_insertonpg() to defend against oversize
+ * items being inserted into an already-existing index. But during
+ * creation of an index, we don't go through there.
*/
if (btisz > BTMaxItemSize(npage))
ereport(ERROR,
@@ -499,9 +497,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
errmsg("index row size %lu exceeds btree maximum, %lu",
(unsigned long) btisz,
(unsigned long) BTMaxItemSize(npage)),
- errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
- "Consider a function index of an MD5 hash of the value, "
- "or use full text indexing.")));
+ errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
+ "Consider a function index of an MD5 hash of the value, "
+ "or use full text indexing.")));
if (pgspc < btisz || pgspc < state->btps_full)
{
@@ -523,11 +521,11 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
/*
* We copy the last item on the page into the new page, and then
- * rearrange the old page so that the 'last item' becomes its high
- * key rather than a true data item. There had better be at least
- * two items on the page already, else the page would be empty of
- * useful data. (Hence, we must allow pages to be packed at least
- * 2/3rds full; the 70% figure used above is close to minimum.)
+ * rearrange the old page so that the 'last item' becomes its high key
+ * rather than a true data item. There had better be at least two
+ * items on the page already, else the page would be empty of useful
+ * data. (Hence, we must allow pages to be packed at least 2/3rds
+ * full; the 70% figure used above is close to minimum.)
*/
Assert(last_off > P_FIRSTKEY);
ii = PageGetItemId(opage, last_off);
@@ -544,8 +542,8 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
/*
* Link the old page into its parent, using its minimum key. If we
- * don't have a parent, we have to create one; this adds a new
- * btree level.
+ * don't have a parent, we have to create one; this adds a new btree
+ * level.
*/
if (state->btps_next == NULL)
state->btps_next = _bt_pagestate(wstate, state->btps_level + 1);
@@ -557,9 +555,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
pfree(state->btps_minkey);
/*
- * Save a copy of the minimum key for the new page. We have to
- * copy it off the old page, not the new one, in case we are not
- * at leaf level.
+ * Save a copy of the minimum key for the new page. We have to copy
+ * it off the old page, not the new one, in case we are not at leaf
+ * level.
*/
state->btps_minkey = _bt_formitem(&(obti->bti_itup));
@@ -576,8 +574,8 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
}
/*
- * Write out the old page. We never need to touch it again, so we
- * can free the opage workspace too.
+ * Write out the old page. We never need to touch it again, so we can
+ * free the opage workspace too.
*/
_bt_blwritepage(wstate, opage, oblkno);
@@ -588,10 +586,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
}
/*
- * If the new item is the first for its page, stash a copy for later.
- * Note this will only happen for the first item on a level; on later
- * pages, the first item for a page is copied from the prior page in
- * the code above.
+ * If the new item is the first for its page, stash a copy for later. Note
+ * this will only happen for the first item on a level; on later pages,
+ * the first item for a page is copied from the prior page in the code
+ * above.
*/
if (last_off == P_HIKEY)
{
@@ -636,9 +634,9 @@ _bt_uppershutdown(BTWriteState *wstate, BTPageState *state)
* We have to link the last page on this level to somewhere.
*
* If we're at the top, it's the root, so attach it to the metapage.
- * Otherwise, add an entry for it to its parent using its minimum
- * key. This may cause the last page of the parent level to
- * split, but that's not a problem -- we haven't gotten to it yet.
+ * Otherwise, add an entry for it to its parent using its minimum key.
+ * This may cause the last page of the parent level to split, but
+ * that's not a problem -- we haven't gotten to it yet.
*/
if (s->btps_next == NULL)
{
@@ -657,8 +655,8 @@ _bt_uppershutdown(BTWriteState *wstate, BTPageState *state)
}
/*
- * This is the rightmost page, so the ItemId array needs to be
- * slid back one slot. Then we can dump out the page.
+ * This is the rightmost page, so the ItemId array needs to be slid
+ * back one slot. Then we can dump out the page.
*/
_bt_slideleft(s->btps_page);
_bt_blwritepage(wstate, s->btps_page, s->btps_blkno);
@@ -667,9 +665,9 @@ _bt_uppershutdown(BTWriteState *wstate, BTPageState *state)
/*
* As the last step in the process, construct the metapage and make it
- * point to the new root (unless we had no data at all, in which case
- * it's set to point to "P_NONE"). This changes the index to the
- * "valid" state by filling in a valid magic number in the metapage.
+ * point to the new root (unless we had no data at all, in which case it's
+ * set to point to "P_NONE"). This changes the index to the "valid" state
+ * by filling in a valid magic number in the metapage.
*/
metapage = (Page) palloc(BLCKSZ);
_bt_initmetapage(metapage, rootblkno, rootlevel);
@@ -748,7 +746,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
compare = DatumGetInt32(FunctionCall2(&entry->sk_func,
attrDatum1,
- attrDatum2));
+ attrDatum2));
if (compare > 0)
{
load1 = false;
@@ -772,7 +770,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
if (should_free)
pfree(bti);
bti = (BTItem) tuplesort_getindextuple(btspool->sortstate,
- true, &should_free);
+ true, &should_free);
}
else
{
@@ -780,7 +778,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
if (should_free2)
pfree(bti2);
bti2 = (BTItem) tuplesort_getindextuple(btspool2->sortstate,
- true, &should_free2);
+ true, &should_free2);
}
}
_bt_freeskey(indexScanKey);
@@ -789,7 +787,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
{
/* merge is unnecessary */
while ((bti = (BTItem) tuplesort_getindextuple(btspool->sortstate,
- true, &should_free)) != NULL)
+ true, &should_free)) != NULL)
{
/* When we see first tuple, create first index page */
if (state == NULL)
@@ -805,19 +803,19 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
_bt_uppershutdown(wstate, state);
/*
- * If the index isn't temp, we must fsync it down to disk before it's
- * safe to commit the transaction. (For a temp index we don't care
- * since the index will be uninteresting after a crash anyway.)
+ * If the index isn't temp, we must fsync it down to disk before it's safe
+ * to commit the transaction. (For a temp index we don't care since the
+ * index will be uninteresting after a crash anyway.)
*
* It's obvious that we must do this when not WAL-logging the build. It's
* less obvious that we have to do it even if we did WAL-log the index
- * pages. The reason is that since we're building outside shared
- * buffers, a CHECKPOINT occurring during the build has no way to
- * flush the previously written data to disk (indeed it won't know the
- * index even exists). A crash later on would replay WAL from the
- * checkpoint, therefore it wouldn't replay our earlier WAL entries.
- * If we do not fsync those pages here, they might still not be on
- * disk when the crash occurs.
+ * pages. The reason is that since we're building outside shared buffers,
+ * a CHECKPOINT occurring during the build has no way to flush the
+ * previously written data to disk (indeed it won't know the index even
+ * exists). A crash later on would replay WAL from the checkpoint,
+ * therefore it wouldn't replay our earlier WAL entries. If we do not
+ * fsync those pages here, they might still not be on disk when the crash
+ * occurs.
*/
if (!wstate->index->rd_istemp)
smgrimmedsync(wstate->index->rd_smgr);
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 9a5f8d7ac90..269213d21f7 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.63 2005/06/13 23:14:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.64 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -48,8 +48,8 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
bool null;
/*
- * We can use the cached (default) support procs since no
- * cross-type comparison can be needed.
+ * We can use the cached (default) support procs since no cross-type
+ * comparison can be needed.
*/
procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
arg = index_getattr(itup, i + 1, itupdesc, &null);
@@ -93,8 +93,8 @@ _bt_mkscankey_nodata(Relation rel)
FmgrInfo *procinfo;
/*
- * We can use the cached (default) support procs since no
- * cross-type comparison can be needed.
+ * We can use the cached (default) support procs since no cross-type
+ * comparison can be needed.
*/
procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
ScanKeyEntryInitializeWithInfo(&skey[i],
@@ -257,9 +257,9 @@ _bt_preprocess_keys(IndexScanDesc scan)
if (numberOfKeys == 1)
{
/*
- * We don't use indices for 'A is null' and 'A is not null'
- * currently and 'A < = > <> NULL' will always fail - so qual is
- * not OK if comparison value is NULL. - vadim 03/21/97
+ * We don't use indices for 'A is null' and 'A is not null' currently
+ * and 'A < = > <> NULL' will always fail - so qual is not OK if
+ * comparison value is NULL. - vadim 03/21/97
*/
if (cur->sk_flags & SK_ISNULL)
so->qual_ok = false;
@@ -286,20 +286,20 @@ _bt_preprocess_keys(IndexScanDesc scan)
/*
* Initialize for processing of keys for attr 1.
*
- * xform[i] points to the currently best scan key of strategy type i+1,
- * if any is found with a default operator subtype; it is NULL if we
- * haven't yet found such a key for this attr. Scan keys of
- * nondefault subtypes are transferred to the output with no
- * processing except for noting if they are of "=" type.
+ * xform[i] points to the currently best scan key of strategy type i+1, if
+ * any is found with a default operator subtype; it is NULL if we haven't
+ * yet found such a key for this attr. Scan keys of nondefault subtypes
+ * are transferred to the output with no processing except for noting if
+ * they are of "=" type.
*/
attno = 1;
memset(xform, 0, sizeof(xform));
hasOtherTypeEqual = false;
/*
- * Loop iterates from 0 to numberOfKeys inclusive; we use the last
- * pass to handle after-last-key processing. Actual exit from the
- * loop is at the "break" statement below.
+ * Loop iterates from 0 to numberOfKeys inclusive; we use the last pass to
+ * handle after-last-key processing. Actual exit from the loop is at the
+ * "break" statement below.
*/
for (i = 0;; cur++, i++)
{
@@ -319,8 +319,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
}
/*
- * If we are at the end of the keys for a particular attr, finish
- * up processing and emit the cleaned-up keys.
+ * If we are at the end of the keys for a particular attr, finish up
+ * processing and emit the cleaned-up keys.
*/
if (i == numberOfKeys || cur->sk_attno != attno)
{
@@ -331,9 +331,9 @@ _bt_preprocess_keys(IndexScanDesc scan)
elog(ERROR, "btree index keys must be ordered by attribute");
/*
- * If = has been specified, no other key will be used. In case
- * of key > 2 && key == 1 and so on we have to set qual_ok to
- * false before discarding the other keys.
+ * If = has been specified, no other key will be used. In case of
+ * key > 2 && key == 1 and so on we have to set qual_ok to false
+ * before discarding the other keys.
*/
if (xform[BTEqualStrategyNumber - 1])
{
@@ -411,8 +411,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
}
/*
- * If all attrs before this one had "=", include these keys
- * into the required-keys count.
+ * If all attrs before this one had "=", include these keys into
+ * the required-keys count.
*/
if (priorNumberOfEqualCols == attno - 1)
so->numberOfRequiredKeys = new_numberOfKeys;
@@ -526,11 +526,11 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
if (isNull)
{
/*
- * Since NULLs are sorted after non-NULLs, we know we have
- * reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. On a backward scan,
- * however, we should keep going.
+ * Since NULLs are sorted after non-NULLs, we know we have reached
+ * the upper limit of the range of values for this index attr. On
+ * a forward scan, we can stop if this qual is one of the "must
+ * match" subset. On a backward scan, however, we should keep
+ * going.
*/
if (ikey < so->numberOfRequiredKeys &&
ScanDirectionIsForward(dir))
@@ -547,24 +547,22 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
if (!DatumGetBool(test))
{
/*
- * Tuple fails this qual. If it's a required qual, then we
- * may be able to conclude no further tuples will pass,
- * either. We have to look at the scan direction and the qual
- * type.
+ * Tuple fails this qual. If it's a required qual, then we may be
+ * able to conclude no further tuples will pass, either. We have
+ * to look at the scan direction and the qual type.
*
- * Note: the only case in which we would keep going after failing
- * a required qual is if there are partially-redundant quals
- * that _bt_preprocess_keys() was unable to eliminate. For
- * example, given "x > 4 AND x > 10" where both are cross-type
- * comparisons and so not removable, we might start the scan
- * at the x = 4 boundary point. The "x > 10" condition will
- * fail until we pass x = 10, but we must not stop the scan on
- * its account.
+ * Note: the only case in which we would keep going after failing a
+ * required qual is if there are partially-redundant quals that
+ * _bt_preprocess_keys() was unable to eliminate. For example,
+ * given "x > 4 AND x > 10" where both are cross-type comparisons
+ * and so not removable, we might start the scan at the x = 4
+ * boundary point. The "x > 10" condition will fail until we pass
+ * x = 10, but we must not stop the scan on its account.
*
- * Note: because we stop the scan as soon as any required
- * equality qual fails, it is critical that equality quals be
- * used for the initial positioning in _bt_first() when they
- * are available. See comments in _bt_first().
+ * Note: because we stop the scan as soon as any required equality
+ * qual fails, it is critical that equality quals be used for the
+ * initial positioning in _bt_first() when they are available. See
+ * comments in _bt_first().
*/
if (ikey < so->numberOfRequiredKeys)
{
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index 078d8529241..61bf93a904b 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.22 2005/06/06 17:01:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.23 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -101,7 +101,7 @@ _bt_restore_page(Page page, char *from, int len)
(sizeof(BTItemData) - sizeof(IndexTupleData));
itemsz = MAXALIGN(itemsz);
if (PageAddItem(page, (Item) from, itemsz,
- FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
+ FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
elog(PANIC, "_bt_restore_page: can't add item to page");
from += itemsz;
}
@@ -136,8 +136,8 @@ _bt_restore_meta(Relation reln, XLogRecPtr lsn,
pageop->btpo_flags = BTP_META;
/*
- * Set pd_lower just past the end of the metadata. This is not
- * essential but it makes the page look compressible to xlog.c.
+ * Set pd_lower just past the end of the metadata. This is not essential
+ * but it makes the page look compressible to xlog.c.
*/
((PageHeader) metapg)->pd_lower =
((char *) md + sizeof(BTMetaPageData)) - (char *) metapg;
@@ -181,7 +181,7 @@ btree_xlog_insert(bool isleaf, bool ismeta,
if (!(record->xl_info & XLR_BKP_BLOCK_1))
{
buffer = XLogReadBuffer(false, reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(PANIC, "btree_insert_redo: block unfound");
page = (Page) BufferGetPage(buffer);
@@ -217,8 +217,8 @@ btree_xlog_insert(bool isleaf, bool ismeta,
if (!isleaf && incomplete_splits != NIL)
{
forget_matching_split(reln, xlrec->target.node,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)),
- ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)),
+ ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
false);
}
}
@@ -325,8 +325,8 @@ btree_xlog_split(bool onleft, bool isroot,
if (xlrec->level > 0 && incomplete_splits != NIL)
{
forget_matching_split(reln, xlrec->target.node,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)),
- ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)),
+ ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
false);
}
@@ -655,7 +655,7 @@ static void
out_target(char *buf, xl_btreetid *target)
{
sprintf(buf + strlen(buf), "rel %u/%u/%u; tid %u/%u",
- target->node.spcNode, target->node.dbNode, target->node.relNode,
+ target->node.spcNode, target->node.dbNode, target->node.relNode,
ItemPointerGetBlockNumber(&(target->tid)),
ItemPointerGetOffsetNumber(&(target->tid)));
}
diff --git a/src/backend/access/rtree/rtget.c b/src/backend/access/rtree/rtget.c
index 199a178c4fd..010a493d20e 100644
--- a/src/backend/access/rtree/rtget.c
+++ b/src/backend/access/rtree/rtget.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/rtree/rtget.c,v 1.36 2005/10/06 02:29:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/rtree/rtget.c,v 1.37 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,12 +32,12 @@ rtgettuple(PG_FUNCTION_ARGS)
IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
RTreeScanOpaque so = (RTreeScanOpaque) s->opaque;
- Page page;
+ Page page;
OffsetNumber offnum;
/*
- * If we've already produced a tuple and the executor has informed
- * us that it should be marked "killed", do so now.
+ * If we've already produced a tuple and the executor has informed us that
+ * it should be marked "killed", do so now.
*/
if (s->kill_prior_tuple && ItemPointerIsValid(&(s->currentItemData)))
{
@@ -48,14 +48,13 @@ rtgettuple(PG_FUNCTION_ARGS)
}
/*
- * Get the next tuple that matches the search key; if asked to
- * skip killed tuples, find the first non-killed tuple that
- * matches. Return as soon as we've run out of matches or we've
- * found an acceptable match.
+ * Get the next tuple that matches the search key; if asked to skip killed
+ * tuples, find the first non-killed tuple that matches. Return as soon as
+ * we've run out of matches or we've found an acceptable match.
*/
for (;;)
{
- bool res = rtnext(s, dir);
+ bool res = rtnext(s, dir);
if (res && s->ignore_killed_tuples)
{
@@ -73,7 +72,7 @@ Datum
rtgetmulti(PG_FUNCTION_ARGS)
{
IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
- ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1);
+ ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1);
int32 max_tids = PG_GETARG_INT32(2);
int32 *returned_tids = (int32 *) PG_GETARG_POINTER(3);
RTreeScanOpaque so = (RTreeScanOpaque) s->opaque;
@@ -86,7 +85,7 @@ rtgetmulti(PG_FUNCTION_ARGS)
res = rtnext(s, ForwardScanDirection);
if (res && s->ignore_killed_tuples)
{
- Page page;
+ Page page;
OffsetNumber offnum;
offnum = ItemPointerGetOffsetNumber(&(s->currentItemData));
@@ -201,12 +200,11 @@ rtnext(IndexScanDesc s, ScanDirection dir)
blk = ItemPointerGetBlockNumber(&(it->t_tid));
/*
- * Note that we release the pin on the page as we descend
- * down the tree, even though there's a good chance we'll
- * eventually need to re-read the buffer later in this
- * scan. This may or may not be optimal, but it doesn't
- * seem likely to make a huge performance difference
- * either way.
+ * Note that we release the pin on the page as we descend down the
+ * tree, even though there's a good chance we'll eventually need
+ * to re-read the buffer later in this scan. This may or may not
+ * be optimal, but it doesn't seem likely to make a huge
+ * performance difference either way.
*/
so->curbuf = ReleaseAndReadBuffer(so->curbuf, s->indexRelation, blk);
p = BufferGetPage(so->curbuf);
@@ -233,7 +231,7 @@ findnext(IndexScanDesc s, OffsetNumber n, ScanDirection dir)
IndexTuple it;
RTreePageOpaque po;
RTreeScanOpaque so;
- Page p;
+ Page p;
so = (RTreeScanOpaque) s->opaque;
p = BufferGetPage(so->curbuf);
@@ -242,8 +240,8 @@ findnext(IndexScanDesc s, OffsetNumber n, ScanDirection dir)
po = (RTreePageOpaque) PageGetSpecialPointer(p);
/*
- * If we modified the index during the scan, we may have a pointer to
- * a ghost tuple, before the scan. If this is the case, back up one.
+ * If we modified the index during the scan, we may have a pointer to a
+ * ghost tuple, before the scan. If this is the case, back up one.
*/
if (so->s_flags & RTS_CURBEFORE)
@@ -277,7 +275,7 @@ findnext(IndexScanDesc s, OffsetNumber n, ScanDirection dir)
}
if (n >= FirstOffsetNumber && n <= maxoff)
- return n; /* found a match on this page */
+ return n; /* found a match on this page */
else
return InvalidOffsetNumber; /* no match, go to next page */
}
diff --git a/src/backend/access/rtree/rtproc.c b/src/backend/access/rtree/rtproc.c
index d8d766f47d4..292dac6a130 100644
--- a/src/backend/access/rtree/rtproc.c
+++ b/src/backend/access/rtree/rtproc.c
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/rtree/rtproc.c,v 1.42 2004/12/31 21:59:26 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/access/rtree/rtproc.c,v 1.43 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -146,8 +146,8 @@ rt_poly_size(PG_FUNCTION_ARGS)
ydim;
/*
- * Can't just use GETARG because of possibility that input is NULL;
- * since POLYGON is toastable, GETARG will try to inspect its value
+ * Can't just use GETARG because of possibility that input is NULL; since
+ * POLYGON is toastable, GETARG will try to inspect its value
*/
if (aptr == NULL)
{
diff --git a/src/backend/access/rtree/rtree.c b/src/backend/access/rtree/rtree.c
index 3b96b9ebe2d..d684101d261 100644
--- a/src/backend/access/rtree/rtree.c
+++ b/src/backend/access/rtree/rtree.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/rtree/rtree.c,v 1.91 2005/08/10 21:36:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/rtree/rtree.c,v 1.92 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -121,8 +121,8 @@ rtbuild(PG_FUNCTION_ARGS)
initRtstate(&buildstate.rtState, index);
/*
- * We expect to be called exactly once for any index relation. If
- * that's not the case, big trouble's what we have.
+ * We expect to be called exactly once for any index relation. If that's
+ * not the case, big trouble's what we have.
*/
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
@@ -175,10 +175,10 @@ rtbuildCallback(Relation index,
/*
* Since we already have the index relation locked, we call rtdoinsert
- * directly. Normal access method calls dispatch through rtinsert,
- * which locks the relation for write. This is the right thing to do
- * if you're inserting single tups, but not when you're initializing
- * the whole index at once.
+ * directly. Normal access method calls dispatch through rtinsert, which
+ * locks the relation for write. This is the right thing to do if you're
+ * inserting single tups, but not when you're initializing the whole index
+ * at once.
*/
rtdoinsert(index, itup, &buildstate->rtState);
@@ -226,9 +226,8 @@ rtinsert(PG_FUNCTION_ARGS)
initRtstate(&rtState, r);
/*
- * Since rtree is not marked "amconcurrent" in pg_am, caller should
- * have acquired exclusive lock on index relation. We need no locking
- * here.
+ * Since rtree is not marked "amconcurrent" in pg_am, caller should have
+ * acquired exclusive lock on index relation. We need no locking here.
*/
rtdoinsert(r, itup, &rtState);
@@ -331,7 +330,7 @@ rttighten(Relation r,
p = BufferGetPage(b);
oldud = IndexTupleGetDatum(PageGetItem(p,
- PageGetItemId(p, stk->rts_child)));
+ PageGetItemId(p, stk->rts_child)));
FunctionCall2(&rtstate->sizeFn, oldud,
PointerGetDatum(&old_size));
@@ -342,8 +341,8 @@ rttighten(Relation r,
PointerGetDatum(&newd_size));
/*
- * If newd_size == 0 we have degenerate rectangles, so we don't know
- * if there was any change, so we have to assume there was.
+ * If newd_size == 0 we have degenerate rectangles, so we don't know if
+ * there was any change, so we have to assume there was.
*/
if ((newd_size == 0) || (newd_size != old_size))
{
@@ -370,8 +369,8 @@ rttighten(Relation r,
/*
* The user may be defining an index on variable-sized data (like
* polygons). If so, we need to get a constant-sized datum for
- * insertion on the internal page. We do this by calling the
- * union proc, which is required to return a rectangle.
+ * insertion on the internal page. We do this by calling the union
+ * proc, which is required to return a rectangle.
*/
tdatum = FunctionCall2(&rtstate->unionFn, datum, datum);
@@ -428,8 +427,8 @@ rtdosplit(Relation r,
/*
* The root of the tree is the first block in the relation. If we're
- * about to split the root, we need to do some hocus-pocus to enforce
- * this guarantee.
+ * about to split the root, we need to do some hocus-pocus to enforce this
+ * guarantee.
*/
if (BufferGetBlockNumber(buffer) == P_ROOT)
@@ -459,10 +458,9 @@ rtdosplit(Relation r,
newitemoff = OffsetNumberNext(maxoff);
/*
- * spl_left contains a list of the offset numbers of the tuples that
- * will go to the left page. For each offset number, get the tuple
- * item, then add the item to the left page. Similarly for the right
- * side.
+ * spl_left contains a list of the offset numbers of the tuples that will
+ * go to the left page. For each offset number, get the tuple item, then
+ * add the item to the left page. Similarly for the right side.
*/
/* fill left node */
@@ -525,13 +523,13 @@ rtdosplit(Relation r,
* introduced in its structure by splitting this page.
*
* 2) "Tighten" the bounding box of the pointer to the left page in the
- * parent node in the tree, if any. Since we moved a bunch of stuff
- * off the left page, we expect it to get smaller. This happens in
- * the internal insertion routine.
+ * parent node in the tree, if any. Since we moved a bunch of stuff off
+ * the left page, we expect it to get smaller. This happens in the
+ * internal insertion routine.
*
- * 3) Insert a pointer to the right page in the parent. This may cause
- * the parent to split. If it does, we need to repeat steps one and
- * two for each split node in the tree.
+ * 3) Insert a pointer to the right page in the parent. This may cause the
+ * parent to split. If it does, we need to repeat steps one and two for
+ * each split node in the tree.
*/
/* adjust active scans */
@@ -583,10 +581,10 @@ rtintinsert(Relation r,
old = (IndexTuple) PageGetItem(p, PageGetItemId(p, stk->rts_child));
/*
- * This is a hack. Right now, we force rtree internal keys to be
- * constant size. To fix this, need delete the old key and add both
- * left and right for the two new pages. The insertion of left may
- * force a split if the new left key is bigger than the old key.
+ * This is a hack. Right now, we force rtree internal keys to be constant
+ * size. To fix this, need delete the old key and add both left and right
+ * for the two new pages. The insertion of left may force a split if the
+ * new left key is bigger than the old key.
*/
if (IndexTupleSize(old) != IndexTupleSize(ltup))
@@ -603,8 +601,7 @@ rtintinsert(Relation r,
rttighten(r, stk->rts_parent, newdatum,
IndexTupleAttSize(ltup), rtstate);
rtdosplit(r, b, stk->rts_parent, rtup, rtstate);
- WriteBuffer(b); /* don't forget to release buffer! -
- * 01/31/94 */
+ WriteBuffer(b); /* don't forget to release buffer! - 01/31/94 */
}
else
{
@@ -716,16 +713,15 @@ rtpicksplit(Relation r,
int total_num_tuples,
num_tuples_without_seeds,
max_after_split; /* in Guttman's lingo, (M - m) */
- float diff; /* diff between cost of putting tuple left
- * or right */
+ float diff; /* diff between cost of putting tuple left or
+ * right */
SPLITCOST *cost_vector;
int n;
/*
- * First, make sure the new item is not so large that we can't
- * possibly fit it on a page, even by itself. (It's sufficient to
- * make this test here, since any oversize tuple must lead to a page
- * split attempt.)
+ * First, make sure the new item is not so large that we can't possibly
+ * fit it on a page, even by itself. (It's sufficient to make this test
+ * here, since any oversize tuple must lead to a page split attempt.)
*/
newitemsz = IndexTupleTotalSize(itup);
if (newitemsz > RTPageAvailSpace)
@@ -734,11 +730,10 @@ rtpicksplit(Relation r,
errmsg("index row size %lu exceeds rtree maximum, %lu",
(unsigned long) newitemsz,
(unsigned long) RTPageAvailSpace),
- errhint("Values larger than a buffer page cannot be indexed.")));
+ errhint("Values larger than a buffer page cannot be indexed.")));
maxoff = PageGetMaxOffsetNumber(page);
- newitemoff = OffsetNumberNext(maxoff); /* phony index for new
- * item */
+ newitemoff = OffsetNumberNext(maxoff); /* phony index for new item */
total_num_tuples = newitemoff;
num_tuples_without_seeds = total_num_tuples - 2;
max_after_split = total_num_tuples / 2; /* works for m = M/2 */
@@ -793,8 +788,7 @@ rtpicksplit(Relation r,
pfree(DatumGetPointer(inter_d));
/*
- * are these a more promising split that what we've already
- * seen?
+ * are these a more promising split that what we've already seen?
*/
if (size_waste > waste || firsttime)
{
@@ -809,10 +803,10 @@ rtpicksplit(Relation r,
if (firsttime)
{
/*
- * There is no possible split except to put the new item on its
- * own page. Since we still have to compute the union rectangles,
- * we play dumb and run through the split algorithm anyway,
- * setting seed_1 = first item on page and seed_2 = new item.
+ * There is no possible split except to put the new item on its own
+ * page. Since we still have to compute the union rectangles, we play
+ * dumb and run through the split algorithm anyway, setting seed_1 =
+ * first item on page and seed_2 = new item.
*/
seed_1 = FirstOffsetNumber;
seed_2 = newitemoff;
@@ -840,25 +834,23 @@ rtpicksplit(Relation r,
/*
* Now split up the regions between the two seeds.
*
- * The cost_vector array will contain hints for determining where each
- * tuple should go. Each record in the array will contain a boolean,
- * choose_left, that indicates which node the tuple prefers to be on,
- * and the absolute difference in cost between putting the tuple in
- * its favored node and in the other node.
+ * The cost_vector array will contain hints for determining where each tuple
+ * should go. Each record in the array will contain a boolean,
+ * choose_left, that indicates which node the tuple prefers to be on, and
+ * the absolute difference in cost between putting the tuple in its
+ * favored node and in the other node.
*
* Later, we will sort the cost_vector in descending order by cost
- * difference, and consider the tuples in that order for placement.
- * That way, the tuples that *really* want to be in one node or the
- * other get to choose first, and the tuples that don't really care
- * choose last.
+ * difference, and consider the tuples in that order for placement. That
+ * way, the tuples that *really* want to be in one node or the other get
+ * to choose first, and the tuples that don't really care choose last.
*
* First, build the cost_vector array. The new index tuple will also be
- * handled in this loop, and represented in the array, with
- * i==newitemoff.
+ * handled in this loop, and represented in the array, with i==newitemoff.
*
- * In the case of variable size tuples it is possible that we only have
- * the two seeds and no other tuples, in which case we don't do any of
- * this cost_vector stuff.
+ * In the case of variable size tuples it is possible that we only have the
+ * two seeds and no other tuples, in which case we don't do any of this
+ * cost_vector stuff.
*/
/* to keep compiler quiet */
@@ -908,13 +900,13 @@ rtpicksplit(Relation r,
}
/*
- * Now make the final decisions about where each tuple will go, and
- * build the vectors to return in the SPLITVEC record.
+ * Now make the final decisions about where each tuple will go, and build
+ * the vectors to return in the SPLITVEC record.
*
- * The cost_vector array contains (descriptions of) all the tuples, in
- * the order that we want to consider them, so we we just iterate
- * through it and place each tuple in left or right nodes, according
- * to the criteria described below.
+ * The cost_vector array contains (descriptions of) all the tuples, in the
+ * order that we want to consider them, so we we just iterate through it
+ * and place each tuple in left or right nodes, according to the criteria
+ * described below.
*/
left = v->spl_left;
@@ -923,8 +915,8 @@ rtpicksplit(Relation r,
v->spl_nright = 0;
/*
- * Place the seeds first. left avail space, left union, right avail
- * space, and right union have already been adjusted for the seeds.
+ * Place the seeds first. left avail space, left union, right avail space,
+ * and right union have already been adjusted for the seeds.
*/
*left++ = seed_1;
@@ -966,32 +958,30 @@ rtpicksplit(Relation r,
PointerGetDatum(&size_beta));
/*
- * We prefer the page that shows smaller enlargement of its union
- * area (Guttman's algorithm), but we must take care that at least
- * one page will still have room for the new item after this one
- * is added.
+ * We prefer the page that shows smaller enlargement of its union area
+ * (Guttman's algorithm), but we must take care that at least one page
+ * will still have room for the new item after this one is added.
*
- * (We know that all the old items together can fit on one page, so
- * we need not worry about any other problem than failing to fit
- * the new item.)
+ * (We know that all the old items together can fit on one page, so we
+ * need not worry about any other problem than failing to fit the new
+ * item.)
*
- * Guttman's algorithm actually has two factors to consider (in
- * order): 1. if one node has so many tuples already assigned to
- * it that the other needs all the rest in order to satisfy the
- * condition that neither node has fewer than m tuples, then that
- * is decisive; 2. otherwise, choose the page that shows the
- * smaller enlargement of its union area.
+ * Guttman's algorithm actually has two factors to consider (in order):
+ * 1. if one node has so many tuples already assigned to it that the
+ * other needs all the rest in order to satisfy the condition that
+ * neither node has fewer than m tuples, then that is decisive; 2.
+ * otherwise, choose the page that shows the smaller enlargement of
+ * its union area.
*
- * I have chosen m = M/2, where M is the maximum number of tuples on
- * a page. (Actually, this is only strictly true for fixed size
- * tuples. For variable size tuples, there still might have to be
- * only one tuple on a page, if it is really big. But even with
- * variable size tuples we still try to get m as close as possible
- * to M/2.)
+ * I have chosen m = M/2, where M is the maximum number of tuples on a
+ * page. (Actually, this is only strictly true for fixed size tuples.
+ * For variable size tuples, there still might have to be only one
+ * tuple on a page, if it is really big. But even with variable size
+ * tuples we still try to get m as close as possible to M/2.)
*
- * The question of which page shows the smaller enlargement of its
- * union area has already been answered, and the answer stored in
- * the choose_left field of the SPLITCOST record.
+ * The question of which page shows the smaller enlargement of its union
+ * area has already been answered, and the answer stored in the
+ * choose_left field of the SPLITCOST record.
*/
left_feasible = (left_avail_space >= item_1_sz &&
((left_avail_space - item_1_sz) >= newitemsz ||
@@ -1003,9 +993,8 @@ rtpicksplit(Relation r,
{
/*
* Both feasible, use Guttman's algorithm. First check the m
- * condition described above, and if that doesn't apply,
- * choose the page with the smaller enlargement of its union
- * area.
+ * condition described above, and if that doesn't apply, choose
+ * the page with the smaller enlargement of its union area.
*/
if (v->spl_nleft > max_after_split)
choose_left = false;
@@ -1153,9 +1142,8 @@ rtbulkdelete(PG_FUNCTION_ARGS)
num_index_tuples = 0;
/*
- * Since rtree is not marked "amconcurrent" in pg_am, caller should
- * have acquired exclusive lock on index relation. We need no locking
- * here.
+ * Since rtree is not marked "amconcurrent" in pg_am, caller should have
+ * acquired exclusive lock on index relation. We need no locking here.
*/
/*
diff --git a/src/backend/access/rtree/rtscan.c b/src/backend/access/rtree/rtscan.c
index 3f9f81befb0..577c6a64369 100644
--- a/src/backend/access/rtree/rtscan.c
+++ b/src/backend/access/rtree/rtscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/rtree/rtscan.c,v 1.59 2005/06/24 00:18:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/rtree/rtscan.c,v 1.60 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -123,11 +123,11 @@ rtrescan(PG_FUNCTION_ARGS)
/*
* Scans on internal pages use different operators than they do on
- * leaf pages. For example, if the user wants all boxes that
- * exactly match (x1,y1,x2,y2), then on internal pages we need to
- * find all boxes that contain (x1,y1,x2,y2). rtstrat.c knows
- * how to pick the opclass member to use for internal pages.
- * In some cases we need to negate the result of the opclass member.
+ * leaf pages. For example, if the user wants all boxes that exactly
+ * match (x1,y1,x2,y2), then on internal pages we need to find all
+ * boxes that contain (x1,y1,x2,y2). rtstrat.c knows how to pick the
+ * opclass member to use for internal pages. In some cases we need to
+ * negate the result of the opclass member.
*/
for (i = 0; i < s->numberOfKeys; i++)
{
@@ -333,9 +333,9 @@ ReleaseResources_rtree(void)
RTScanList next;
/*
- * Note: this should be a no-op during normal query shutdown. However,
- * in an abort situation ExecutorEnd is not called and so there may be
- * open index scans to clean up.
+ * Note: this should be a no-op during normal query shutdown. However, in
+ * an abort situation ExecutorEnd is not called and so there may be open
+ * index scans to clean up.
*/
prev = NULL;
@@ -440,8 +440,7 @@ adjustiptr(IndexScanDesc s,
else
{
/*
- * remember that we're before the current
- * tuple
+ * remember that we're before the current tuple
*/
ItemPointerSet(iptr, blkno, FirstOffsetNumber);
if (iptr == &(s->currentItemData))
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 4a2e1f55927..f29f460ade5 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -24,7 +24,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.32 2005/08/20 23:26:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.33 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -222,14 +222,14 @@ StartupCLOG(void)
/*
* Zero out the remainder of the current clog page. Under normal
* circumstances it should be zeroes already, but it seems at least
- * theoretically possible that XLOG replay will have settled on a
- * nextXID value that is less than the last XID actually used and
- * marked by the previous database lifecycle (since subtransaction
- * commit writes clog but makes no WAL entry). Let's just be safe.
- * (We need not worry about pages beyond the current one, since those
- * will be zeroed when first used. For the same reason, there is no
- * need to do anything when nextXid is exactly at a page boundary; and
- * it's likely that the "current" page doesn't exist yet in that case.)
+ * theoretically possible that XLOG replay will have settled on a nextXID
+ * value that is less than the last XID actually used and marked by the
+ * previous database lifecycle (since subtransaction commit writes clog
+ * but makes no WAL entry). Let's just be safe. (We need not worry about
+ * pages beyond the current one, since those will be zeroed when first
+ * used. For the same reason, there is no need to do anything when
+ * nextXid is exactly at a page boundary; and it's likely that the
+ * "current" page doesn't exist yet in that case.)
*/
if (TransactionIdToPgIndex(xid) != 0)
{
@@ -325,8 +325,8 @@ TruncateCLOG(TransactionId oldestXact)
int cutoffPage;
/*
- * The cutoff point is the start of the segment containing oldestXact.
- * We pass the *page* containing oldestXact to SimpleLruTruncate.
+ * The cutoff point is the start of the segment containing oldestXact. We
+ * pass the *page* containing oldestXact to SimpleLruTruncate.
*/
cutoffPage = TransactionIdToPage(oldestXact);
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index 1adaebb6d80..ffe14ed6bf1 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -4,15 +4,15 @@
* PostgreSQL multi-transaction-log manager
*
* The pg_multixact manager is a pg_clog-like manager that stores an array
- * of TransactionIds for each MultiXactId. It is a fundamental part of the
- * shared-row-lock implementation. A share-locked tuple stores a
+ * of TransactionIds for each MultiXactId. It is a fundamental part of the
+ * shared-row-lock implementation. A share-locked tuple stores a
* MultiXactId in its Xmax, and a transaction that needs to wait for the
* tuple to be unlocked can sleep on the potentially-several TransactionIds
* that compose the MultiXactId.
*
* We use two SLRU areas, one for storing the offsets at which the data
* starts for each MultiXactId in the other one. This trick allows us to
- * store variable length arrays of TransactionIds. (We could alternatively
+ * store variable length arrays of TransactionIds. (We could alternatively
* use one area containing counts and TransactionIds, with valid MultiXactId
* values pointing at slots containing counts; but that way seems less robust
* since it would get completely confused if someone inquired about a bogus
@@ -32,7 +32,7 @@
*
* Like clog.c, and unlike subtrans.c, we have to preserve state across
* crashes and ensure that MXID and offset numbering increases monotonically
- * across a crash. We do this in the same way as it's done for transaction
+ * across a crash. We do this in the same way as it's done for transaction
* IDs: the WAL record is guaranteed to contain evidence of every MXID we
* could need to worry about, and we just make sure that at the end of
* replay, the next-MXID and next-offset counters are at least as large as
@@ -42,7 +42,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.8 2005/08/20 23:26:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.9 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,13 +59,13 @@
/*
- * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is
+ * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is
* used everywhere else in Postgres.
*
* Note: because both MultiXactOffsets and TransactionIds are 32 bits and
* wrap around at 0xFFFFFFFF, MultiXact page numbering also wraps around at
* 0xFFFFFFFF/MULTIXACT_*_PER_PAGE, and segment numbering at
- * 0xFFFFFFFF/MULTIXACT_*_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need take no
+ * 0xFFFFFFFF/MULTIXACT_*_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need take no
* explicit notice of that fact in this module, except when comparing segment
* and page numbers in TruncateMultiXact
* (see MultiXact{Offset,Member}PagePrecedes).
@@ -92,11 +92,11 @@
static SlruCtlData MultiXactOffsetCtlData;
static SlruCtlData MultiXactMemberCtlData;
-#define MultiXactOffsetCtl (&MultiXactOffsetCtlData)
-#define MultiXactMemberCtl (&MultiXactMemberCtlData)
+#define MultiXactOffsetCtl (&MultiXactOffsetCtlData)
+#define MultiXactMemberCtl (&MultiXactMemberCtlData)
/*
- * MultiXact state shared across all backends. All this state is protected
+ * MultiXact state shared across all backends. All this state is protected
* by MultiXactGenLock. (We also use MultiXactOffsetControlLock and
* MultiXactMemberControlLock to guard accesses to the two sets of SLRU
* buffers. For concurrency's sake, we avoid holding more than one of these
@@ -105,50 +105,48 @@ static SlruCtlData MultiXactMemberCtlData;
typedef struct MultiXactStateData
{
/* next-to-be-assigned MultiXactId */
- MultiXactId nextMXact;
+ MultiXactId nextMXact;
/* next-to-be-assigned offset */
- MultiXactOffset nextOffset;
+ MultiXactOffset nextOffset;
/* the Offset SLRU area was last truncated at this MultiXactId */
- MultiXactId lastTruncationPoint;
+ MultiXactId lastTruncationPoint;
/*
- * Per-backend data starts here. We have two arrays stored in
- * the area immediately following the MultiXactStateData struct.
- * Each is indexed by BackendId. (Note: valid BackendIds run from 1 to
- * MaxBackends; element zero of each array is never used.)
+ * Per-backend data starts here. We have two arrays stored in the area
+ * immediately following the MultiXactStateData struct. Each is indexed by
+ * BackendId. (Note: valid BackendIds run from 1 to MaxBackends; element
+ * zero of each array is never used.)
*
- * OldestMemberMXactId[k] is the oldest MultiXactId each backend's
- * current transaction(s) could possibly be a member of, or
- * InvalidMultiXactId when the backend has no live transaction that
- * could possibly be a member of a MultiXact. Each backend sets its
- * entry to the current nextMXact counter just before first acquiring a
- * shared lock in a given transaction, and clears it at transaction end.
- * (This works because only during or after acquiring a shared lock
- * could an XID possibly become a member of a MultiXact, and that
- * MultiXact would have to be created during or after the lock
- * acquisition.)
+ * OldestMemberMXactId[k] is the oldest MultiXactId each backend's current
+ * transaction(s) could possibly be a member of, or InvalidMultiXactId
+ * when the backend has no live transaction that could possibly be a
+ * member of a MultiXact. Each backend sets its entry to the current
+ * nextMXact counter just before first acquiring a shared lock in a given
+ * transaction, and clears it at transaction end. (This works because only
+ * during or after acquiring a shared lock could an XID possibly become a
+ * member of a MultiXact, and that MultiXact would have to be created
+ * during or after the lock acquisition.)
*
- * OldestVisibleMXactId[k] is the oldest MultiXactId each backend's
- * current transaction(s) think is potentially live, or InvalidMultiXactId
- * when not in a transaction or not in a transaction that's paid any
- * attention to MultiXacts yet. This is computed when first needed in
- * a given transaction, and cleared at transaction end. We can compute
- * it as the minimum of the valid OldestMemberMXactId[] entries at the
- * time we compute it (using nextMXact if none are valid). Each backend
- * is required not to attempt to access any SLRU data for MultiXactIds
- * older than its own OldestVisibleMXactId[] setting; this is necessary
- * because the checkpointer could truncate away such data at any instant.
+ * OldestVisibleMXactId[k] is the oldest MultiXactId each backend's current
+ * transaction(s) think is potentially live, or InvalidMultiXactId when
+ * not in a transaction or not in a transaction that's paid any attention
+ * to MultiXacts yet. This is computed when first needed in a given
+ * transaction, and cleared at transaction end. We can compute it as the
+ * minimum of the valid OldestMemberMXactId[] entries at the time we
+ * compute it (using nextMXact if none are valid). Each backend is
+ * required not to attempt to access any SLRU data for MultiXactIds older
+ * than its own OldestVisibleMXactId[] setting; this is necessary because
+ * the checkpointer could truncate away such data at any instant.
*
- * The checkpointer can compute the safe truncation point as the oldest
- * valid value among all the OldestMemberMXactId[] and
- * OldestVisibleMXactId[] entries, or nextMXact if none are valid.
- * Clearly, it is not possible for any later-computed OldestVisibleMXactId
- * value to be older than this, and so there is no risk of truncating
- * data that is still needed.
+ * The checkpointer can compute the safe truncation point as the oldest valid
+ * value among all the OldestMemberMXactId[] and OldestVisibleMXactId[]
+ * entries, or nextMXact if none are valid. Clearly, it is not possible
+ * for any later-computed OldestVisibleMXactId value to be older than
+ * this, and so there is no risk of truncating data that is still needed.
*/
- MultiXactId perBackendXactIds[1]; /* VARIABLE LENGTH ARRAY */
+ MultiXactId perBackendXactIds[1]; /* VARIABLE LENGTH ARRAY */
} MultiXactStateData;
/* Pointers to the state data in shared memory */
@@ -176,13 +174,13 @@ static MultiXactId *OldestVisibleMXactId;
typedef struct mXactCacheEnt
{
struct mXactCacheEnt *next;
- MultiXactId multi;
- int nxids;
- TransactionId xids[1]; /* VARIABLE LENGTH ARRAY */
+ MultiXactId multi;
+ int nxids;
+ TransactionId xids[1]; /* VARIABLE LENGTH ARRAY */
} mXactCacheEnt;
-static mXactCacheEnt *MXactCache = NULL;
-static MemoryContext MXactContext = NULL;
+static mXactCacheEnt *MXactCache = NULL;
+static MemoryContext MXactContext = NULL;
#ifdef MULTIXACT_DEBUG
@@ -201,14 +199,15 @@ static MemoryContext MXactContext = NULL;
static void MultiXactIdSetOldestVisible(void);
static MultiXactId CreateMultiXactId(int nxids, TransactionId *xids);
static void RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
- int nxids, TransactionId *xids);
+ int nxids, TransactionId *xids);
static MultiXactId GetNewMultiXactId(int nxids, MultiXactOffset *offset);
/* MultiXact cache management */
static MultiXactId mXactCacheGetBySet(int nxids, TransactionId *xids);
-static int mXactCacheGetById(MultiXactId multi, TransactionId **xids);
+static int mXactCacheGetById(MultiXactId multi, TransactionId **xids);
static void mXactCachePut(MultiXactId multi, int nxids, TransactionId *xids);
-static int xidComparator(const void *arg1, const void *arg2);
+static int xidComparator(const void *arg1, const void *arg2);
+
#ifdef MULTIXACT_DEBUG
static char *mxid_to_string(MultiXactId multi, int nxids, TransactionId *xids);
#endif
@@ -220,7 +219,7 @@ static bool MultiXactOffsetPagePrecedes(int page1, int page2);
static bool MultiXactMemberPagePrecedes(int page1, int page2);
static bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2);
static bool MultiXactOffsetPrecedes(MultiXactOffset offset1,
- MultiXactOffset offset2);
+ MultiXactOffset offset2);
static void ExtendMultiXactOffset(MultiXactId multi);
static void ExtendMultiXactMember(MultiXactOffset offset, int nmembers);
static void TruncateMultiXact(void);
@@ -239,8 +238,8 @@ static void WriteMZeroPageXlogRec(int pageno, uint8 info);
MultiXactId
MultiXactIdCreate(TransactionId xid1, TransactionId xid2)
{
- MultiXactId newMulti;
- TransactionId xids[2];
+ MultiXactId newMulti;
+ TransactionId xids[2];
AssertArg(TransactionIdIsValid(xid1));
AssertArg(TransactionIdIsValid(xid2));
@@ -248,9 +247,9 @@ MultiXactIdCreate(TransactionId xid1, TransactionId xid2)
Assert(!TransactionIdEquals(xid1, xid2));
/*
- * Note: unlike MultiXactIdExpand, we don't bother to check that both
- * XIDs are still running. In typical usage, xid2 will be our own XID
- * and the caller just did a check on xid1, so it'd be wasted effort.
+ * Note: unlike MultiXactIdExpand, we don't bother to check that both XIDs
+ * are still running. In typical usage, xid2 will be our own XID and the
+ * caller just did a check on xid1, so it'd be wasted effort.
*/
xids[0] = xid1;
@@ -281,12 +280,12 @@ MultiXactIdCreate(TransactionId xid1, TransactionId xid2)
MultiXactId
MultiXactIdExpand(MultiXactId multi, TransactionId xid)
{
- MultiXactId newMulti;
- TransactionId *members;
- TransactionId *newMembers;
- int nmembers;
- int i;
- int j;
+ MultiXactId newMulti;
+ TransactionId *members;
+ TransactionId *newMembers;
+ int nmembers;
+ int i;
+ int j;
AssertArg(MultiXactIdIsValid(multi));
AssertArg(TransactionIdIsValid(xid));
@@ -313,8 +312,8 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid)
}
/*
- * If the TransactionId is already a member of the MultiXactId,
- * just return the existing MultiXactId.
+ * If the TransactionId is already a member of the MultiXactId, just
+ * return the existing MultiXactId.
*/
for (i = 0; i < nmembers; i++)
{
@@ -329,9 +328,9 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid)
/*
* Determine which of the members of the MultiXactId are still running,
- * and use them to create a new one. (Removing dead members is just
- * an optimization, but a useful one. Note we have the same race
- * condition here as above: j could be 0 at the end of the loop.)
+ * and use them to create a new one. (Removing dead members is just an
+ * optimization, but a useful one. Note we have the same race condition
+ * here as above: j could be 0 at the end of the loop.)
*/
newMembers = (TransactionId *)
palloc(sizeof(TransactionId) * (nmembers + 1));
@@ -355,7 +354,7 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid)
/*
* MultiXactIdIsRunning
- * Returns whether a MultiXactId is "running".
+ * Returns whether a MultiXactId is "running".
*
* We return true if at least one member of the given MultiXactId is still
* running. Note that a "false" result is certain not to change,
@@ -365,9 +364,9 @@ bool
MultiXactIdIsRunning(MultiXactId multi)
{
TransactionId *members;
- TransactionId myXid;
- int nmembers;
- int i;
+ TransactionId myXid;
+ int nmembers;
+ int i;
debug_elog3(DEBUG2, "IsRunning %u?", multi);
@@ -394,7 +393,7 @@ MultiXactIdIsRunning(MultiXactId multi)
/*
* This could be made faster by having another entry point in procarray.c,
- * walking the PGPROC array only once for all the members. But in most
+ * walking the PGPROC array only once for all the members. But in most
* cases nmembers should be small enough that it doesn't much matter.
*/
for (i = 0; i < nmembers; i++)
@@ -436,19 +435,19 @@ MultiXactIdSetOldestMember(void)
/*
* You might think we don't need to acquire a lock here, since
- * fetching and storing of TransactionIds is probably atomic,
- * but in fact we do: suppose we pick up nextMXact and then
- * lose the CPU for a long time. Someone else could advance
- * nextMXact, and then another someone else could compute an
- * OldestVisibleMXactId that would be after the value we are
- * going to store when we get control back. Which would be wrong.
+ * fetching and storing of TransactionIds is probably atomic, but in
+ * fact we do: suppose we pick up nextMXact and then lose the CPU for
+ * a long time. Someone else could advance nextMXact, and then
+ * another someone else could compute an OldestVisibleMXactId that
+ * would be after the value we are going to store when we get control
+ * back. Which would be wrong.
*/
LWLockAcquire(MultiXactGenLock, LW_EXCLUSIVE);
/*
* We have to beware of the possibility that nextMXact is in the
- * wrapped-around state. We don't fix the counter itself here,
- * but we must be sure to store a valid value in our array entry.
+ * wrapped-around state. We don't fix the counter itself here, but we
+ * must be sure to store a valid value in our array entry.
*/
nextMXact = MultiXactState->nextMXact;
if (nextMXact < FirstMultiXactId)
@@ -475,7 +474,7 @@ MultiXactIdSetOldestMember(void)
* The value to set is the oldest of nextMXact and all the valid per-backend
* OldestMemberMXactId[] entries. Because of the locking we do, we can be
* certain that no subsequent call to MultiXactIdSetOldestMember can set
- * an OldestMemberMXactId[] entry older than what we compute here. Therefore
+ * an OldestMemberMXactId[] entry older than what we compute here. Therefore
* there is no live transaction, now or later, that can be a member of any
* MultiXactId older than the OldestVisibleMXactId we compute here.
*/
@@ -485,14 +484,14 @@ MultiXactIdSetOldestVisible(void)
if (!MultiXactIdIsValid(OldestVisibleMXactId[MyBackendId]))
{
MultiXactId oldestMXact;
- int i;
+ int i;
LWLockAcquire(MultiXactGenLock, LW_EXCLUSIVE);
/*
* We have to beware of the possibility that nextMXact is in the
- * wrapped-around state. We don't fix the counter itself here,
- * but we must be sure to store a valid value in our array entry.
+ * wrapped-around state. We don't fix the counter itself here, but we
+ * must be sure to store a valid value in our array entry.
*/
oldestMXact = MultiXactState->nextMXact;
if (oldestMXact < FirstMultiXactId)
@@ -535,17 +534,17 @@ void
MultiXactIdWait(MultiXactId multi)
{
TransactionId *members;
- int nmembers;
+ int nmembers;
nmembers = GetMultiXactIdMembers(multi, &members);
if (nmembers >= 0)
{
- int i;
+ int i;
for (i = 0; i < nmembers; i++)
{
- TransactionId member = members[i];
+ TransactionId member = members[i];
debug_elog4(DEBUG2, "MultiXactIdWait: waiting for %d (%u)",
i, member);
@@ -564,19 +563,19 @@ MultiXactIdWait(MultiXactId multi)
bool
ConditionalMultiXactIdWait(MultiXactId multi)
{
- bool result = true;
+ bool result = true;
TransactionId *members;
- int nmembers;
+ int nmembers;
nmembers = GetMultiXactIdMembers(multi, &members);
if (nmembers >= 0)
{
- int i;
+ int i;
for (i = 0; i < nmembers; i++)
{
- TransactionId member = members[i];
+ TransactionId member = members[i];
debug_elog4(DEBUG2, "ConditionalMultiXactIdWait: trying %d (%u)",
i, member);
@@ -596,7 +595,7 @@ ConditionalMultiXactIdWait(MultiXactId multi)
/*
* CreateMultiXactId
- * Make a new MultiXactId
+ * Make a new MultiXactId
*
* Make XLOG, SLRU and cache entries for a new MultiXactId, recording the
* given TransactionIds as members. Returns the newly created MultiXactId.
@@ -606,7 +605,7 @@ ConditionalMultiXactIdWait(MultiXactId multi)
static MultiXactId
CreateMultiXactId(int nxids, TransactionId *xids)
{
- MultiXactId multi;
+ MultiXactId multi;
MultiXactOffset offset;
XLogRecData rdata[2];
xl_multixact_create xlrec;
@@ -641,15 +640,15 @@ CreateMultiXactId(int nxids, TransactionId *xids)
/*
* Make an XLOG entry describing the new MXID.
*
- * Note: we need not flush this XLOG entry to disk before proceeding.
- * The only way for the MXID to be referenced from any data page is
- * for heap_lock_tuple() to have put it there, and heap_lock_tuple()
- * generates an XLOG record that must follow ours. The normal LSN
- * interlock between the data page and that XLOG record will ensure
- * that our XLOG record reaches disk first. If the SLRU members/offsets
- * data reaches disk sooner than the XLOG record, we do not care because
- * we'll overwrite it with zeroes unless the XLOG record is there too;
- * see notes at top of this file.
+ * Note: we need not flush this XLOG entry to disk before proceeding. The
+ * only way for the MXID to be referenced from any data page is for
+ * heap_lock_tuple() to have put it there, and heap_lock_tuple() generates
+ * an XLOG record that must follow ours. The normal LSN interlock between
+ * the data page and that XLOG record will ensure that our XLOG record
+ * reaches disk first. If the SLRU members/offsets data reaches disk
+ * sooner than the XLOG record, we do not care because we'll overwrite it
+ * with zeroes unless the XLOG record is there too; see notes at top of
+ * this file.
*/
xlrec.mid = multi;
xlrec.moff = offset;
@@ -702,9 +701,9 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
/*
* Note: we pass the MultiXactId to SimpleLruReadPage as the "transaction"
* to complain about if there's any I/O error. This is kinda bogus, but
- * since the errors will always give the full pathname, it should be
- * clear enough that a MultiXactId is really involved. Perhaps someday
- * we'll take the trouble to generalize the slru.c error reporting code.
+ * since the errors will always give the full pathname, it should be clear
+ * enough that a MultiXactId is really involved. Perhaps someday we'll
+ * take the trouble to generalize the slru.c error reporting code.
*/
slotno = SimpleLruReadPage(MultiXactOffsetCtl, pageno, multi);
offptr = (MultiXactOffset *) MultiXactOffsetCtl->shared->page_buffer[slotno];
@@ -750,7 +749,7 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
* GetNewMultiXactId
* Get the next MultiXactId.
*
- * Also, reserve the needed amount of space in the "members" area. The
+ * Also, reserve the needed amount of space in the "members" area. The
* starting offset of the reserved space is returned in *offset.
*
* This may generate XLOG records for expansion of the offsets and/or members
@@ -761,7 +760,7 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
static MultiXactId
GetNewMultiXactId(int nxids, MultiXactOffset *offset)
{
- MultiXactId result;
+ MultiXactId result;
debug_elog3(DEBUG2, "GetNew: for %d xids", nxids);
@@ -785,8 +784,8 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
* Advance counter. As in GetNewTransactionId(), this must not happen
* until after ExtendMultiXactOffset has succeeded!
*
- * We don't care about MultiXactId wraparound here; it will be handled by
- * the next iteration. But note that nextMXact may be InvalidMultiXactId
+ * We don't care about MultiXactId wraparound here; it will be handled by the
+ * next iteration. But note that nextMXact may be InvalidMultiXactId
* after this routine exits, so anyone else looking at the variable must
* be prepared to deal with that.
*/
@@ -809,7 +808,7 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
/*
* GetMultiXactIdMembers
- * Returns the set of TransactionIds that make up a MultiXactId
+ * Returns the set of TransactionIds that make up a MultiXactId
*
* We return -1 if the MultiXactId is too old to possibly have any members
* still running; in that case we have not actually looked them up, and
@@ -822,13 +821,13 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
int prev_pageno;
int entryno;
int slotno;
- MultiXactOffset *offptr;
- MultiXactOffset offset;
+ MultiXactOffset *offptr;
+ MultiXactOffset offset;
int length;
int i;
- MultiXactId nextMXact;
- MultiXactId tmpMXact;
- MultiXactOffset nextOffset;
+ MultiXactId nextMXact;
+ MultiXactId tmpMXact;
+ MultiXactOffset nextOffset;
TransactionId *ptr;
debug_elog3(DEBUG2, "GetMembers: asked for %u", multi);
@@ -850,13 +849,13 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* We check known limits on MultiXact before resorting to the SLRU area.
*
- * An ID older than our OldestVisibleMXactId[] entry can't possibly still
- * be running, and we'd run the risk of trying to read already-truncated
- * SLRU data if we did try to examine it.
+ * An ID older than our OldestVisibleMXactId[] entry can't possibly still be
+ * running, and we'd run the risk of trying to read already-truncated SLRU
+ * data if we did try to examine it.
*
- * Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
- * seen, it implies undetected ID wraparound has occurred. We just
- * silently assume that such an ID is no longer running.
+ * Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is seen,
+ * it implies undetected ID wraparound has occurred. We just silently
+ * assume that such an ID is no longer running.
*
* Shared lock is enough here since we aren't modifying any global state.
* Also, we can examine our own OldestVisibleMXactId without the lock,
@@ -880,9 +879,9 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
}
/*
- * Before releasing the lock, save the current counter values, because
- * the target MultiXactId may be just one less than nextMXact. We will
- * need to use nextOffset as the endpoint if so.
+ * Before releasing the lock, save the current counter values, because the
+ * target MultiXactId may be just one less than nextMXact. We will need
+ * to use nextOffset as the endpoint if so.
*/
nextMXact = MultiXactState->nextMXact;
nextOffset = MultiXactState->nextOffset;
@@ -902,11 +901,11 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* How many members do we need to read? If we are at the end of the
- * assigned MultiXactIds, use the offset just saved above. Else we
- * need to check the MultiXactId following ours.
+ * assigned MultiXactIds, use the offset just saved above. Else we need
+ * to check the MultiXactId following ours.
*
- * Use the same increment rule as GetNewMultiXactId(), that is, don't
- * handle wraparound explicitly until needed.
+ * Use the same increment rule as GetNewMultiXactId(), that is, don't handle
+ * wraparound explicitly until needed.
*/
tmpMXact = multi + 1;
@@ -974,9 +973,9 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* mXactCacheGetBySet
- * returns a MultiXactId from the cache based on the set of
- * TransactionIds that compose it, or InvalidMultiXactId if
- * none matches.
+ * returns a MultiXactId from the cache based on the set of
+ * TransactionIds that compose it, or InvalidMultiXactId if
+ * none matches.
*
* This is helpful, for example, if two transactions want to lock a huge
* table. By using the cache, the second will use the same MultiXactId
@@ -988,7 +987,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
static MultiXactId
mXactCacheGetBySet(int nxids, TransactionId *xids)
{
- mXactCacheEnt *entry;
+ mXactCacheEnt *entry;
debug_elog3(DEBUG2, "CacheGet: looking for %s",
mxid_to_string(InvalidMultiXactId, nxids, xids));
@@ -1015,8 +1014,8 @@ mXactCacheGetBySet(int nxids, TransactionId *xids)
/*
* mXactCacheGetById
- * returns the composing TransactionId set from the cache for a
- * given MultiXactId, if present.
+ * returns the composing TransactionId set from the cache for a
+ * given MultiXactId, if present.
*
* If successful, *xids is set to the address of a palloc'd copy of the
* TransactionId set. Return value is number of members, or -1 on failure.
@@ -1024,7 +1023,7 @@ mXactCacheGetBySet(int nxids, TransactionId *xids)
static int
mXactCacheGetById(MultiXactId multi, TransactionId **xids)
{
- mXactCacheEnt *entry;
+ mXactCacheEnt *entry;
debug_elog3(DEBUG2, "CacheGet: looking for %u", multi);
@@ -1032,7 +1031,7 @@ mXactCacheGetById(MultiXactId multi, TransactionId **xids)
{
if (entry->multi == multi)
{
- TransactionId *ptr;
+ TransactionId *ptr;
Size size;
size = sizeof(TransactionId) * entry->nxids;
@@ -1042,7 +1041,7 @@ mXactCacheGetById(MultiXactId multi, TransactionId **xids)
memcpy(ptr, entry->xids, size);
debug_elog3(DEBUG2, "CacheGet: found %s",
- mxid_to_string(multi, entry->nxids, entry->xids));
+ mxid_to_string(multi, entry->nxids, entry->xids));
return entry->nxids;
}
}
@@ -1053,12 +1052,12 @@ mXactCacheGetById(MultiXactId multi, TransactionId **xids)
/*
* mXactCachePut
- * Add a new MultiXactId and its composing set into the local cache.
+ * Add a new MultiXactId and its composing set into the local cache.
*/
static void
mXactCachePut(MultiXactId multi, int nxids, TransactionId *xids)
{
- mXactCacheEnt *entry;
+ mXactCacheEnt *entry;
debug_elog3(DEBUG2, "CachePut: storing %s",
mxid_to_string(multi, nxids, xids));
@@ -1092,7 +1091,7 @@ mXactCachePut(MultiXactId multi, int nxids, TransactionId *xids)
/*
* xidComparator
- * qsort comparison function for XIDs
+ * qsort comparison function for XIDs
*
* We don't need to use wraparound comparison for XIDs, and indeed must
* not do so since that does not respect the triangle inequality! Any
@@ -1101,8 +1100,8 @@ mXactCachePut(MultiXactId multi, int nxids, TransactionId *xids)
static int
xidComparator(const void *arg1, const void *arg2)
{
- TransactionId xid1 = * (const TransactionId *) arg1;
- TransactionId xid2 = * (const TransactionId *) arg2;
+ TransactionId xid1 = *(const TransactionId *) arg1;
+ TransactionId xid2 = *(const TransactionId *) arg2;
if (xid1 > xid2)
return 1;
@@ -1115,8 +1114,9 @@ xidComparator(const void *arg1, const void *arg2)
static char *
mxid_to_string(MultiXactId multi, int nxids, TransactionId *xids)
{
- char *str = palloc(15 * (nxids + 1) + 4);
- int i;
+ char *str = palloc(15 * (nxids + 1) + 4);
+ int i;
+
snprintf(str, 47, "%u %d[%u", multi, nxids, xids[0]);
for (i = 1; i < nxids; i++)
@@ -1137,18 +1137,18 @@ void
AtEOXact_MultiXact(void)
{
/*
- * Reset our OldestMemberMXactId and OldestVisibleMXactId values,
- * both of which should only be valid while within a transaction.
+ * Reset our OldestMemberMXactId and OldestVisibleMXactId values, both of
+ * which should only be valid while within a transaction.
*
- * We assume that storing a MultiXactId is atomic and so we need
- * not take MultiXactGenLock to do this.
+ * We assume that storing a MultiXactId is atomic and so we need not take
+ * MultiXactGenLock to do this.
*/
OldestMemberMXactId[MyBackendId] = InvalidMultiXactId;
OldestVisibleMXactId[MyBackendId] = InvalidMultiXactId;
/*
- * Discard the local MultiXactId cache. Since MXactContext was created
- * as a child of TopTransactionContext, we needn't delete it explicitly.
+ * Discard the local MultiXactId cache. Since MXactContext was created as
+ * a child of TopTransactionContext, we needn't delete it explicitly.
*/
MXactContext = NULL;
MXactCache = NULL;
@@ -1156,7 +1156,7 @@ AtEOXact_MultiXact(void)
/*
* Initialization of shared memory for MultiXact. We use two SLRU areas,
- * thus double memory. Also, reserve space for the shared MultiXactState
+ * thus double memory. Also, reserve space for the shared MultiXactState
* struct and the per-backend MultiXactId arrays (two of those, too).
*/
Size
@@ -1178,7 +1178,7 @@ MultiXactShmemSize(void)
void
MultiXactShmemInit(void)
{
- bool found;
+ bool found;
debug_elog2(DEBUG2, "Shared Memory Init for MultiXact");
@@ -1205,8 +1205,8 @@ MultiXactShmemInit(void)
Assert(found);
/*
- * Set up array pointers. Note that perBackendXactIds[0] is wasted
- * space since we only use indexes 1..MaxBackends in each array.
+ * Set up array pointers. Note that perBackendXactIds[0] is wasted space
+ * since we only use indexes 1..MaxBackends in each array.
*/
OldestMemberMXactId = MultiXactState->perBackendXactIds;
OldestVisibleMXactId = OldestMemberMXactId + MaxBackends;
@@ -1214,7 +1214,7 @@ MultiXactShmemInit(void)
/*
* This func must be called ONCE on system install. It creates the initial
- * MultiXact segments. (The MultiXacts directories are assumed to have been
+ * MultiXact segments. (The MultiXacts directories are assumed to have been
* created by initdb, and MultiXactShmemInit must have been called already.)
*/
void
@@ -1287,7 +1287,7 @@ ZeroMultiXactMemberPage(int pageno, bool writeXlog)
* This must be called ONCE during postmaster or standalone-backend startup.
*
* StartupXLOG has already established nextMXact/nextOffset by calling
- * MultiXactSetNextMXact and/or MultiXactAdvanceNextMXact. Note that we
+ * MultiXactSetNextMXact and/or MultiXactAdvanceNextMXact. Note that we
* may already have replayed WAL data into the SLRU files.
*
* We don't need any locks here, really; the SLRU locks are taken
@@ -1311,14 +1311,14 @@ StartupMultiXact(void)
MultiXactOffsetCtl->shared->latest_page_number = pageno;
/*
- * Zero out the remainder of the current offsets page. See notes
- * in StartupCLOG() for motivation.
+ * Zero out the remainder of the current offsets page. See notes in
+ * StartupCLOG() for motivation.
*/
entryno = MultiXactIdToOffsetEntry(multi);
if (entryno != 0)
{
int slotno;
- MultiXactOffset *offptr;
+ MultiXactOffset *offptr;
slotno = SimpleLruReadPage(MultiXactOffsetCtl, pageno, multi);
offptr = (MultiXactOffset *) MultiXactOffsetCtl->shared->page_buffer[slotno];
@@ -1341,14 +1341,14 @@ StartupMultiXact(void)
MultiXactMemberCtl->shared->latest_page_number = pageno;
/*
- * Zero out the remainder of the current members page. See notes
- * in StartupCLOG() for motivation.
+ * Zero out the remainder of the current members page. See notes in
+ * StartupCLOG() for motivation.
*/
entryno = MXOffsetToMemberEntry(offset);
if (entryno != 0)
{
int slotno;
- TransactionId *xidptr;
+ TransactionId *xidptr;
slotno = SimpleLruReadPage(MultiXactMemberCtl, pageno, offset);
xidptr = (TransactionId *) MultiXactMemberCtl->shared->page_buffer[slotno];
@@ -1499,14 +1499,14 @@ static void
ExtendMultiXactMember(MultiXactOffset offset, int nmembers)
{
/*
- * It's possible that the members span more than one page of the
- * members file, so we loop to ensure we consider each page. The
- * coding is not optimal if the members span several pages, but
- * that seems unusual enough to not worry much about.
+ * It's possible that the members span more than one page of the members
+ * file, so we loop to ensure we consider each page. The coding is not
+ * optimal if the members span several pages, but that seems unusual
+ * enough to not worry much about.
*/
while (nmembers > 0)
{
- int entryno;
+ int entryno;
/*
* Only zero when at first entry of a page.
@@ -1514,7 +1514,7 @@ ExtendMultiXactMember(MultiXactOffset offset, int nmembers)
entryno = MXOffsetToMemberEntry(offset);
if (entryno == 0)
{
- int pageno;
+ int pageno;
pageno = MXOffsetToMemberPage(offset);
@@ -1536,7 +1536,7 @@ ExtendMultiXactMember(MultiXactOffset offset, int nmembers)
* Remove all MultiXactOffset and MultiXactMember segments before the oldest
* ones still of interest.
*
- * This is called only during checkpoints. We assume no more than one
+ * This is called only during checkpoints. We assume no more than one
* backend does this at a time.
*
* XXX do we have any issues with needing to checkpoint here?
@@ -1545,23 +1545,23 @@ static void
TruncateMultiXact(void)
{
MultiXactId nextMXact;
- MultiXactOffset nextOffset;
+ MultiXactOffset nextOffset;
MultiXactId oldestMXact;
- MultiXactOffset oldestOffset;
+ MultiXactOffset oldestOffset;
int cutoffPage;
int i;
/*
- * First, compute where we can safely truncate. Per notes above,
- * this is the oldest valid value among all the OldestMemberMXactId[] and
+ * First, compute where we can safely truncate. Per notes above, this is
+ * the oldest valid value among all the OldestMemberMXactId[] and
* OldestVisibleMXactId[] entries, or nextMXact if none are valid.
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
/*
* We have to beware of the possibility that nextMXact is in the
- * wrapped-around state. We don't fix the counter itself here,
- * but we must be sure to use a valid value in our calculation.
+ * wrapped-around state. We don't fix the counter itself here, but we
+ * must be sure to use a valid value in our calculation.
*/
nextMXact = MultiXactState->nextMXact;
if (nextMXact < FirstMultiXactId)
@@ -1597,9 +1597,9 @@ TruncateMultiXact(void)
return;
/*
- * We need to determine where to truncate MultiXactMember. If we
- * found a valid oldest MultiXactId, read its starting offset;
- * otherwise we use the nextOffset value we saved above.
+ * We need to determine where to truncate MultiXactMember. If we found a
+ * valid oldest MultiXactId, read its starting offset; otherwise we use
+ * the nextOffset value we saved above.
*/
if (oldestMXact == nextMXact)
oldestOffset = nextOffset;
@@ -1608,7 +1608,7 @@ TruncateMultiXact(void)
int pageno;
int slotno;
int entryno;
- MultiXactOffset *offptr;
+ MultiXactOffset *offptr;
LWLockAcquire(MultiXactOffsetControlLock, LW_EXCLUSIVE);
@@ -1624,8 +1624,8 @@ TruncateMultiXact(void)
}
/*
- * The cutoff point is the start of the segment containing oldestMXact.
- * We pass the *page* containing oldestMXact to SimpleLruTruncate.
+ * The cutoff point is the start of the segment containing oldestMXact. We
+ * pass the *page* containing oldestMXact to SimpleLruTruncate.
*/
cutoffPage = MultiXactIdToOffsetPage(oldestMXact);
@@ -1677,8 +1677,8 @@ MultiXactOffsetPagePrecedes(int page1, int page2)
static bool
MultiXactMemberPagePrecedes(int page1, int page2)
{
- MultiXactOffset offset1;
- MultiXactOffset offset2;
+ MultiXactOffset offset1;
+ MultiXactOffset offset2;
offset1 = ((MultiXactOffset) page1) * MULTIXACT_MEMBERS_PER_PAGE;
offset2 = ((MultiXactOffset) page2) * MULTIXACT_MEMBERS_PER_PAGE;
@@ -1695,7 +1695,7 @@ MultiXactMemberPagePrecedes(int page1, int page2)
static bool
MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
{
- int32 diff = (int32) (multi1 - multi2);
+ int32 diff = (int32) (multi1 - multi2);
return (diff < 0);
}
@@ -1706,7 +1706,7 @@ MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
static bool
MultiXactOffsetPrecedes(MultiXactOffset offset1, MultiXactOffset offset2)
{
- int32 diff = (int32) (offset1 - offset2);
+ int32 diff = (int32) (offset1 - offset2);
return (diff < 0);
}
@@ -1783,9 +1783,9 @@ multixact_redo(XLogRecPtr lsn, XLogRecord *record)
MultiXactAdvanceNextMXact(xlrec->mid + 1, xlrec->moff + xlrec->nxids);
/*
- * Make sure nextXid is beyond any XID mentioned in the record.
- * This should be unnecessary, since any XID found here ought to
- * have other evidence in the XLOG, but let's be safe.
+ * Make sure nextXid is beyond any XID mentioned in the record. This
+ * should be unnecessary, since any XID found here ought to have other
+ * evidence in the XLOG, but let's be safe.
*/
max_xid = record->xl_xid;
for (i = 0; i < xlrec->nxids; i++)
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index 67d9d3f54f3..5891890b764 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -48,7 +48,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.27 2005/08/20 23:26:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.28 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -186,8 +186,8 @@ SimpleLruInit(SlruCtl ctl, const char *name,
Assert(found);
/*
- * Initialize the unshared control struct, including directory path.
- * We assume caller set PagePrecedes.
+ * Initialize the unshared control struct, including directory path. We
+ * assume caller set PagePrecedes.
*/
ctl->shared = shared;
ctl->do_fsync = true; /* default behavior */
@@ -351,11 +351,11 @@ SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
LWLockAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE);
/*
- * Check to see if someone else already did the write, or took the
- * buffer away from us. If so, do nothing. NOTE: we really should
- * never see WRITE_IN_PROGRESS here, since that state should only
- * occur while the writer is holding the buffer lock. But accept it
- * so that we have a recovery path if a writer aborts.
+ * Check to see if someone else already did the write, or took the buffer
+ * away from us. If so, do nothing. NOTE: we really should never see
+ * WRITE_IN_PROGRESS here, since that state should only occur while the
+ * writer is holding the buffer lock. But accept it so that we have a
+ * recovery path if a writer aborts.
*/
if (shared->page_number[slotno] != pageno ||
(shared->page_status[slotno] != SLRU_PAGE_DIRTY &&
@@ -368,15 +368,14 @@ SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
/*
* Mark the slot write-busy. After this point, a transaction status
- * update on this page will mark it dirty again. NB: we are assuming
- * that read/write of the page status field is atomic, since we change
- * the state while not holding control lock. However, we cannot set
- * this state any sooner, or we'd possibly fool a previous writer into
- * thinking he's successfully dumped the page when he hasn't.
- * (Scenario: other writer starts, page is redirtied, we come along
- * and set WRITE_IN_PROGRESS again, other writer completes and sets
- * CLEAN because redirty info has been lost, then we think it's clean
- * too.)
+ * update on this page will mark it dirty again. NB: we are assuming that
+ * read/write of the page status field is atomic, since we change the
+ * state while not holding control lock. However, we cannot set this
+ * state any sooner, or we'd possibly fool a previous writer into thinking
+ * he's successfully dumped the page when he hasn't. (Scenario: other
+ * writer starts, page is redirtied, we come along and set
+ * WRITE_IN_PROGRESS again, other writer completes and sets CLEAN because
+ * redirty info has been lost, then we think it's clean too.)
*/
shared->page_status[slotno] = SLRU_PAGE_WRITE_IN_PROGRESS;
@@ -436,8 +435,8 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
* In a crash-and-restart situation, it's possible for us to receive
* commands to set the commit status of transactions whose bits are in
* already-truncated segments of the commit log (see notes in
- * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the
- * case where the file doesn't exist, and return zeroes instead.
+ * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
+ * where the file doesn't exist, and return zeroes instead.
*/
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
if (fd < 0)
@@ -528,17 +527,16 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata)
{
/*
* If the file doesn't already exist, we should create it. It is
- * possible for this to need to happen when writing a page that's
- * not first in its segment; we assume the OS can cope with that.
- * (Note: it might seem that it'd be okay to create files only
- * when SimpleLruZeroPage is called for the first page of a
- * segment. However, if after a crash and restart the REDO logic
- * elects to replay the log from a checkpoint before the latest
- * one, then it's possible that we will get commands to set
- * transaction status of transactions that have already been
- * truncated from the commit log. Easiest way to deal with that is
- * to accept references to nonexistent files here and in
- * SlruPhysicalReadPage.)
+ * possible for this to need to happen when writing a page that's not
+ * first in its segment; we assume the OS can cope with that. (Note:
+ * it might seem that it'd be okay to create files only when
+ * SimpleLruZeroPage is called for the first page of a segment.
+ * However, if after a crash and restart the REDO logic elects to
+ * replay the log from a checkpoint before the latest one, then it's
+ * possible that we will get commands to set transaction status of
+ * transactions that have already been truncated from the commit log.
+ * Easiest way to deal with that is to accept references to
+ * nonexistent files here and in SlruPhysicalReadPage.)
*/
SlruFileName(ctl, path, segno);
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
@@ -635,49 +633,49 @@ SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid)
case SLRU_OPEN_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
+ errmsg("could not access status of transaction %u", xid),
errdetail("could not open file \"%s\": %m",
path)));
break;
case SLRU_CREATE_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
+ errmsg("could not access status of transaction %u", xid),
errdetail("could not create file \"%s\": %m",
path)));
break;
case SLRU_SEEK_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
- errdetail("could not seek in file \"%s\" to offset %u: %m",
- path, offset)));
+ errmsg("could not access status of transaction %u", xid),
+ errdetail("could not seek in file \"%s\" to offset %u: %m",
+ path, offset)));
break;
case SLRU_READ_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
- errdetail("could not read from file \"%s\" at offset %u: %m",
- path, offset)));
+ errmsg("could not access status of transaction %u", xid),
+ errdetail("could not read from file \"%s\" at offset %u: %m",
+ path, offset)));
break;
case SLRU_WRITE_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
- errdetail("could not write to file \"%s\" at offset %u: %m",
- path, offset)));
+ errmsg("could not access status of transaction %u", xid),
+ errdetail("could not write to file \"%s\" at offset %u: %m",
+ path, offset)));
break;
case SLRU_FSYNC_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
+ errmsg("could not access status of transaction %u", xid),
errdetail("could not fsync file \"%s\": %m",
path)));
break;
case SLRU_CLOSE_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
+ errmsg("could not access status of transaction %u", xid),
errdetail("could not close file \"%s\": %m",
path)));
break;
@@ -723,8 +721,8 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
}
/*
- * If we find any EMPTY slot, just select that one. Else locate
- * the least-recently-used slot that isn't the latest page.
+ * If we find any EMPTY slot, just select that one. Else locate the
+ * least-recently-used slot that isn't the latest page.
*/
for (slotno = 0; slotno < NUM_SLRU_BUFFERS; slotno++)
{
@@ -745,10 +743,10 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
return bestslot;
/*
- * We need to do I/O. Normal case is that we have to write it
- * out, but it's possible in the worst case to have selected a
- * read-busy page. In that case we use SimpleLruReadPage to wait
- * for the read to complete.
+ * We need to do I/O. Normal case is that we have to write it out,
+ * but it's possible in the worst case to have selected a read-busy
+ * page. In that case we use SimpleLruReadPage to wait for the read
+ * to complete.
*/
if (shared->page_status[bestslot] == SLRU_PAGE_READ_IN_PROGRESS)
(void) SimpleLruReadPage(ctl, shared->page_number[bestslot],
@@ -757,9 +755,9 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
SimpleLruWritePage(ctl, bestslot, NULL);
/*
- * Now loop back and try again. This is the easiest way of
- * dealing with corner cases such as the victim page being
- * re-dirtied while we wrote it.
+ * Now loop back and try again. This is the easiest way of dealing
+ * with corner cases such as the victim page being re-dirtied while we
+ * wrote it.
*/
}
}
@@ -789,9 +787,9 @@ SimpleLruFlush(SlruCtl ctl, bool checkpoint)
SimpleLruWritePage(ctl, slotno, &fdata);
/*
- * When called during a checkpoint, we cannot assert that the slot
- * is clean now, since another process might have re-dirtied it
- * already. That's okay.
+ * When called during a checkpoint, we cannot assert that the slot is
+ * clean now, since another process might have re-dirtied it already.
+ * That's okay.
*/
Assert(checkpoint ||
shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
@@ -841,10 +839,10 @@ SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
cutoffPage -= cutoffPage % SLRU_PAGES_PER_SEGMENT;
/*
- * Scan shared memory and remove any pages preceding the cutoff page,
- * to ensure we won't rewrite them later. (Since this is normally
- * called in or just after a checkpoint, any dirty pages should have
- * been flushed already ... we're just being extra careful here.)
+ * Scan shared memory and remove any pages preceding the cutoff page, to
+ * ensure we won't rewrite them later. (Since this is normally called in
+ * or just after a checkpoint, any dirty pages should have been flushed
+ * already ... we're just being extra careful here.)
*/
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
@@ -852,16 +850,16 @@ restart:;
/*
* While we are holding the lock, make an important safety check: the
- * planned cutoff point must be <= the current endpoint page.
- * Otherwise we have already wrapped around, and proceeding with the
- * truncation would risk removing the current segment.
+ * planned cutoff point must be <= the current endpoint page. Otherwise we
+ * have already wrapped around, and proceeding with the truncation would
+ * risk removing the current segment.
*/
if (ctl->PagePrecedes(shared->latest_page_number, cutoffPage))
{
LWLockRelease(shared->ControlLock);
ereport(LOG,
- (errmsg("could not truncate directory \"%s\": apparent wraparound",
- ctl->Dir)));
+ (errmsg("could not truncate directory \"%s\": apparent wraparound",
+ ctl->Dir)));
return;
}
@@ -882,9 +880,9 @@ restart:;
}
/*
- * Hmm, we have (or may have) I/O operations acting on the page,
- * so we've got to wait for them to finish and then start again.
- * This is the same logic as in SlruSelectLRUPage.
+ * Hmm, we have (or may have) I/O operations acting on the page, so
+ * we've got to wait for them to finish and then start again. This is
+ * the same logic as in SlruSelectLRUPage.
*/
if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS)
(void) SimpleLruReadPage(ctl, shared->page_number[slotno],
diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c
index 9b450350360..7671eb6a45e 100644
--- a/src/backend/access/transam/subtrans.c
+++ b/src/backend/access/transam/subtrans.c
@@ -22,7 +22,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.10 2005/08/20 23:26:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.11 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -234,9 +234,8 @@ StartupSUBTRANS(TransactionId oldestActiveXID)
/*
* Since we don't expect pg_subtrans to be valid across crashes, we
* initialize the currently-active page(s) to zeroes during startup.
- * Whenever we advance into a new page, ExtendSUBTRANS will likewise
- * zero the new page without regard to whatever was previously on
- * disk.
+ * Whenever we advance into a new page, ExtendSUBTRANS will likewise zero
+ * the new page without regard to whatever was previously on disk.
*/
LWLockAcquire(SubtransControlLock, LW_EXCLUSIVE);
@@ -262,8 +261,8 @@ ShutdownSUBTRANS(void)
/*
* Flush dirty SUBTRANS pages to disk
*
- * This is not actually necessary from a correctness point of view. We do
- * it merely as a debugging aid.
+ * This is not actually necessary from a correctness point of view. We do it
+ * merely as a debugging aid.
*/
SimpleLruFlush(SubTransCtl, false);
}
@@ -277,9 +276,9 @@ CheckPointSUBTRANS(void)
/*
* Flush dirty SUBTRANS pages to disk
*
- * This is not actually necessary from a correctness point of view. We do
- * it merely to improve the odds that writing of dirty pages is done
- * by the checkpoint process and not by backends.
+ * This is not actually necessary from a correctness point of view. We do it
+ * merely to improve the odds that writing of dirty pages is done by the
+ * checkpoint process and not by backends.
*/
SimpleLruFlush(SubTransCtl, true);
}
@@ -329,8 +328,8 @@ TruncateSUBTRANS(TransactionId oldestXact)
int cutoffPage;
/*
- * The cutoff point is the start of the segment containing oldestXact.
- * We pass the *page* containing oldestXact to SimpleLruTruncate.
+ * The cutoff point is the start of the segment containing oldestXact. We
+ * pass the *page* containing oldestXact to SimpleLruTruncate.
*/
cutoffPage = TransactionIdToPage(oldestXact);
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index 5fa6f82daf4..59852520521 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.65 2005/06/17 22:32:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.66 2005/10/15 02:49:09 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@@ -54,8 +54,8 @@ TransactionLogFetch(TransactionId transactionId)
XidStatus xidstatus;
/*
- * Before going to the commit log manager, check our single item cache
- * to see if we didn't just check the transaction status a moment ago.
+ * Before going to the commit log manager, check our single item cache to
+ * see if we didn't just check the transaction status a moment ago.
*/
if (TransactionIdEquals(transactionId, cachedFetchXid))
return cachedFetchXidStatus;
@@ -78,8 +78,8 @@ TransactionLogFetch(TransactionId transactionId)
xidstatus = TransactionIdGetStatus(transactionId);
/*
- * DO NOT cache status for unfinished or sub-committed transactions!
- * We only cache status that is guaranteed not to change.
+ * DO NOT cache status for unfinished or sub-committed transactions! We
+ * only cache status that is guaranteed not to change.
*/
if (xidstatus != TRANSACTION_STATUS_IN_PROGRESS &&
xidstatus != TRANSACTION_STATUS_SUB_COMMITTED)
@@ -169,18 +169,18 @@ TransactionIdDidCommit(TransactionId transactionId)
return true;
/*
- * If it's marked subcommitted, we have to check the parent
- * recursively. However, if it's older than TransactionXmin, we can't
- * look at pg_subtrans; instead assume that the parent crashed without
- * cleaning up its children.
+ * If it's marked subcommitted, we have to check the parent recursively.
+ * However, if it's older than TransactionXmin, we can't look at
+ * pg_subtrans; instead assume that the parent crashed without cleaning up
+ * its children.
*
- * Originally we Assert'ed that the result of SubTransGetParent was
- * not zero. However with the introduction of prepared transactions,
- * there can be a window just after database startup where we do not
- * have complete knowledge in pg_subtrans of the transactions after
- * TransactionXmin. StartupSUBTRANS() has ensured that any missing
- * information will be zeroed. Since this case should not happen under
- * normal conditions, it seems reasonable to emit a WARNING for it.
+ * Originally we Assert'ed that the result of SubTransGetParent was not zero.
+ * However with the introduction of prepared transactions, there can be a
+ * window just after database startup where we do not have complete
+ * knowledge in pg_subtrans of the transactions after TransactionXmin.
+ * StartupSUBTRANS() has ensured that any missing information will be
+ * zeroed. Since this case should not happen under normal conditions, it
+ * seems reasonable to emit a WARNING for it.
*/
if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED)
{
@@ -225,10 +225,10 @@ TransactionIdDidAbort(TransactionId transactionId)
return true;
/*
- * If it's marked subcommitted, we have to check the parent
- * recursively. However, if it's older than TransactionXmin, we can't
- * look at pg_subtrans; instead assume that the parent crashed without
- * cleaning up its children.
+ * If it's marked subcommitted, we have to check the parent recursively.
+ * However, if it's older than TransactionXmin, we can't look at
+ * pg_subtrans; instead assume that the parent crashed without cleaning up
+ * its children.
*/
if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED)
{
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 05590da14ed..0ece348e184 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.14 2005/10/13 22:55:55 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.15 2005/10/15 02:49:09 momjian Exp $
*
* NOTES
* Each global transaction is associated with a global transaction
@@ -64,7 +64,7 @@
#define TWOPHASE_DIR "pg_twophase"
/* GUC variable, can't be changed after startup */
-int max_prepared_xacts = 5;
+int max_prepared_xacts = 5;
/*
* This struct describes one global transaction that is in prepared state
@@ -97,7 +97,7 @@ int max_prepared_xacts = 5;
* entry will remain in prepXacts until recycled. We can detect recyclable
* entries by checking for valid = false and locking_xid no longer active.
*
- * typedef struct GlobalTransactionData *GlobalTransaction appears in
+ * typedef struct GlobalTransactionData *GlobalTransaction appears in
* twophase.h
*/
#define GIDSIZE 200
@@ -105,12 +105,12 @@ int max_prepared_xacts = 5;
typedef struct GlobalTransactionData
{
PGPROC proc; /* dummy proc */
- TimestampTz prepared_at; /* time of preparation */
+ TimestampTz prepared_at; /* time of preparation */
XLogRecPtr prepare_lsn; /* XLOG offset of prepare record */
Oid owner; /* ID of user that executed the xact */
TransactionId locking_xid; /* top-level XID of backend working on xact */
bool valid; /* TRUE if fully prepared */
- char gid[GIDSIZE]; /* The GID assigned to the prepared xact */
+ char gid[GIDSIZE]; /* The GID assigned to the prepared xact */
} GlobalTransactionData;
/*
@@ -123,30 +123,30 @@ typedef struct TwoPhaseStateData
SHMEM_OFFSET freeGXacts;
/* Number of valid prepXacts entries. */
- int numPrepXacts;
+ int numPrepXacts;
/*
* There are max_prepared_xacts items in this array, but C wants a
* fixed-size array.
*/
- GlobalTransaction prepXacts[1]; /* VARIABLE LENGTH ARRAY */
+ GlobalTransaction prepXacts[1]; /* VARIABLE LENGTH ARRAY */
} TwoPhaseStateData; /* VARIABLE LENGTH STRUCT */
static TwoPhaseStateData *TwoPhaseState;
static void RecordTransactionCommitPrepared(TransactionId xid,
- int nchildren,
- TransactionId *children,
- int nrels,
- RelFileNode *rels);
+ int nchildren,
+ TransactionId *children,
+ int nrels,
+ RelFileNode *rels);
static void RecordTransactionAbortPrepared(TransactionId xid,
- int nchildren,
- TransactionId *children,
- int nrels,
- RelFileNode *rels);
+ int nchildren,
+ TransactionId *children,
+ int nrels,
+ RelFileNode *rels);
static void ProcessRecords(char *bufptr, TransactionId xid,
- const TwoPhaseCallback callbacks[]);
+ const TwoPhaseCallback callbacks[]);
/*
@@ -171,7 +171,7 @@ TwoPhaseShmemSize(void)
void
TwoPhaseShmemInit(void)
{
- bool found;
+ bool found;
TwoPhaseState = ShmemInitStruct("Prepared Transaction Table",
TwoPhaseShmemSize(),
@@ -190,7 +190,7 @@ TwoPhaseShmemInit(void)
*/
gxacts = (GlobalTransaction)
((char *) TwoPhaseState +
- MAXALIGN(offsetof(TwoPhaseStateData, prepXacts) +
+ MAXALIGN(offsetof(TwoPhaseStateData, prepXacts) +
sizeof(GlobalTransaction) * max_prepared_xacts));
for (i = 0; i < max_prepared_xacts; i++)
{
@@ -205,7 +205,7 @@ TwoPhaseShmemInit(void)
/*
* MarkAsPreparing
- * Reserve the GID for the given transaction.
+ * Reserve the GID for the given transaction.
*
* Internally, this creates a gxact struct and puts it into the active array.
* NOTE: this is also used when reloading a gxact after a crash; so avoid
@@ -215,8 +215,8 @@ GlobalTransaction
MarkAsPreparing(TransactionId xid, const char *gid,
TimestampTz prepared_at, Oid owner, Oid databaseid)
{
- GlobalTransaction gxact;
- int i;
+ GlobalTransaction gxact;
+ int i;
if (strlen(gid) >= GIDSIZE)
ereport(ERROR,
@@ -227,10 +227,9 @@ MarkAsPreparing(TransactionId xid, const char *gid,
LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
/*
- * First, find and recycle any gxacts that failed during prepare.
- * We do this partly to ensure we don't mistakenly say their GIDs
- * are still reserved, and partly so we don't fail on out-of-slots
- * unnecessarily.
+ * First, find and recycle any gxacts that failed during prepare. We do
+ * this partly to ensure we don't mistakenly say their GIDs are still
+ * reserved, and partly so we don't fail on out-of-slots unnecessarily.
*/
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
@@ -360,13 +359,13 @@ MarkAsPrepared(GlobalTransaction gxact)
static GlobalTransaction
LockGXact(const char *gid, Oid user)
{
- int i;
+ int i;
LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
- GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
+ GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
/* Ignore not-yet-valid GIDs */
if (!gxact->valid)
@@ -380,15 +379,15 @@ LockGXact(const char *gid, Oid user)
if (TransactionIdIsActive(gxact->locking_xid))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("prepared transaction with identifier \"%s\" is busy",
- gid)));
+ errmsg("prepared transaction with identifier \"%s\" is busy",
+ gid)));
gxact->locking_xid = InvalidTransactionId;
}
if (user != gxact->owner && !superuser_arg(user))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to finish prepared transaction"),
+ errmsg("permission denied to finish prepared transaction"),
errhint("Must be superuser or the user that prepared the transaction.")));
/* OK for me to lock it */
@@ -403,8 +402,8 @@ LockGXact(const char *gid, Oid user)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("prepared transaction with identifier \"%s\" does not exist",
- gid)));
+ errmsg("prepared transaction with identifier \"%s\" does not exist",
+ gid)));
/* NOTREACHED */
return NULL;
@@ -419,7 +418,7 @@ LockGXact(const char *gid, Oid user)
static void
RemoveGXact(GlobalTransaction gxact)
{
- int i;
+ int i;
LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
@@ -449,7 +448,7 @@ RemoveGXact(GlobalTransaction gxact)
/*
* TransactionIdIsPrepared
* True iff transaction associated with the identifier is prepared
- * for two-phase commit
+ * for two-phase commit
*
* Note: only gxacts marked "valid" are considered; but notice we do not
* check the locking status.
@@ -459,14 +458,14 @@ RemoveGXact(GlobalTransaction gxact)
static bool
TransactionIdIsPrepared(TransactionId xid)
{
- bool result = false;
- int i;
+ bool result = false;
+ int i;
LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
- GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
+ GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
if (gxact->valid && gxact->proc.xid == xid)
{
@@ -496,8 +495,8 @@ static int
GetPreparedTransactionList(GlobalTransaction *gxacts)
{
GlobalTransaction array;
- int num;
- int i;
+ int num;
+ int i;
LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
@@ -526,13 +525,13 @@ GetPreparedTransactionList(GlobalTransaction *gxacts)
typedef struct
{
GlobalTransaction array;
- int ngxacts;
- int currIdx;
+ int ngxacts;
+ int currIdx;
} Working_State;
/*
* pg_prepared_xact
- * Produce a view with one row per prepared transaction.
+ * Produce a view with one row per prepared transaction.
*
* This function is here so we don't have to export the
* GlobalTransactionData struct definition.
@@ -552,8 +551,7 @@ pg_prepared_xact(PG_FUNCTION_ARGS)
funcctx = SRF_FIRSTCALL_INIT();
/*
- * Switch to memory context appropriate for multiple function
- * calls
+ * Switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@@ -574,8 +572,8 @@ pg_prepared_xact(PG_FUNCTION_ARGS)
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
/*
- * Collect all the 2PC status information that we will format and
- * send out as a result set.
+ * Collect all the 2PC status information that we will format and send
+ * out as a result set.
*/
status = (Working_State *) palloc(sizeof(Working_State));
funcctx->user_fctx = (void *) status;
@@ -644,7 +642,7 @@ TwoPhaseGetDummyProc(TransactionId xid)
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
- GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
+ GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
if (gxact->proc.xid == xid)
{
@@ -665,7 +663,7 @@ TwoPhaseGetDummyProc(TransactionId xid)
}
/************************************************************************/
-/* State file support */
+/* State file support */
/************************************************************************/
#define TwoPhaseFilePath(path, xid) \
@@ -674,14 +672,14 @@ TwoPhaseGetDummyProc(TransactionId xid)
/*
* 2PC state file format:
*
- * 1. TwoPhaseFileHeader
- * 2. TransactionId[] (subtransactions)
+ * 1. TwoPhaseFileHeader
+ * 2. TransactionId[] (subtransactions)
* 3. RelFileNode[] (files to be deleted at commit)
* 4. RelFileNode[] (files to be deleted at abort)
- * 5. TwoPhaseRecordOnDisk
- * 6. ...
- * 7. TwoPhaseRecordOnDisk (end sentinel, rmid == TWOPHASE_RM_END_ID)
- * 8. CRC32
+ * 5. TwoPhaseRecordOnDisk
+ * 6. ...
+ * 7. TwoPhaseRecordOnDisk (end sentinel, rmid == TWOPHASE_RM_END_ID)
+ * 8. CRC32
*
* Each segment except the final CRC32 is MAXALIGN'd.
*/
@@ -693,16 +691,16 @@ TwoPhaseGetDummyProc(TransactionId xid)
typedef struct TwoPhaseFileHeader
{
- uint32 magic; /* format identifier */
- uint32 total_len; /* actual file length */
- TransactionId xid; /* original transaction XID */
- Oid database; /* OID of database it was in */
- TimestampTz prepared_at; /* time of preparation */
- Oid owner; /* user running the transaction */
- int32 nsubxacts; /* number of following subxact XIDs */
- int32 ncommitrels; /* number of delete-on-commit rels */
- int32 nabortrels; /* number of delete-on-abort rels */
- char gid[GIDSIZE]; /* GID for transaction */
+ uint32 magic; /* format identifier */
+ uint32 total_len; /* actual file length */
+ TransactionId xid; /* original transaction XID */
+ Oid database; /* OID of database it was in */
+ TimestampTz prepared_at; /* time of preparation */
+ Oid owner; /* user running the transaction */
+ int32 nsubxacts; /* number of following subxact XIDs */
+ int32 ncommitrels; /* number of delete-on-commit rels */
+ int32 nabortrels; /* number of delete-on-abort rels */
+ char gid[GIDSIZE]; /* GID for transaction */
} TwoPhaseFileHeader;
/*
@@ -713,9 +711,9 @@ typedef struct TwoPhaseFileHeader
*/
typedef struct TwoPhaseRecordOnDisk
{
- uint32 len; /* length of rmgr data */
- TwoPhaseRmgrId rmid; /* resource manager for this record */
- uint16 info; /* flag bits for use by rmgr */
+ uint32 len; /* length of rmgr data */
+ TwoPhaseRmgrId rmid; /* resource manager for this record */
+ uint16 info; /* flag bits for use by rmgr */
} TwoPhaseRecordOnDisk;
/*
@@ -728,9 +726,9 @@ static struct xllist
{
XLogRecData *head; /* first data block in the chain */
XLogRecData *tail; /* last block in chain */
- uint32 bytes_free; /* free bytes left in tail block */
- uint32 total_len; /* total data bytes in chain */
-} records;
+ uint32 bytes_free; /* free bytes left in tail block */
+ uint32 total_len; /* total data bytes in chain */
+} records;
/*
@@ -744,7 +742,7 @@ static struct xllist
static void
save_state_data(const void *data, uint32 len)
{
- uint32 padlen = MAXALIGN(len);
+ uint32 padlen = MAXALIGN(len);
if (padlen > records.bytes_free)
{
@@ -772,7 +770,7 @@ save_state_data(const void *data, uint32 len)
void
StartPrepare(GlobalTransaction gxact)
{
- TransactionId xid = gxact->proc.xid;
+ TransactionId xid = gxact->proc.xid;
TwoPhaseFileHeader hdr;
TransactionId *children;
RelFileNode *commitrels;
@@ -833,13 +831,13 @@ StartPrepare(GlobalTransaction gxact)
void
EndPrepare(GlobalTransaction gxact)
{
- TransactionId xid = gxact->proc.xid;
+ TransactionId xid = gxact->proc.xid;
TwoPhaseFileHeader *hdr;
- char path[MAXPGPATH];
- XLogRecData *record;
- pg_crc32 statefile_crc;
- pg_crc32 bogus_crc;
- int fd;
+ char path[MAXPGPATH];
+ XLogRecData *record;
+ pg_crc32 statefile_crc;
+ pg_crc32 bogus_crc;
+ int fd;
/* Add the end sentinel to the list of 2PC records */
RegisterTwoPhaseRecord(TWOPHASE_RM_END_ID, 0,
@@ -853,10 +851,10 @@ EndPrepare(GlobalTransaction gxact)
/*
* Create the 2PC state file.
*
- * Note: because we use BasicOpenFile(), we are responsible for ensuring
- * the FD gets closed in any error exit path. Once we get into the
- * critical section, though, it doesn't matter since any failure causes
- * PANIC anyway.
+ * Note: because we use BasicOpenFile(), we are responsible for ensuring the
+ * FD gets closed in any error exit path. Once we get into the critical
+ * section, though, it doesn't matter since any failure causes PANIC
+ * anyway.
*/
TwoPhaseFilePath(path, xid);
@@ -887,11 +885,10 @@ EndPrepare(GlobalTransaction gxact)
FIN_CRC32(statefile_crc);
/*
- * Write a deliberately bogus CRC to the state file; this is just
- * paranoia to catch the case where four more bytes will run us out of
- * disk space.
+ * Write a deliberately bogus CRC to the state file; this is just paranoia
+ * to catch the case where four more bytes will run us out of disk space.
*/
- bogus_crc = ~ statefile_crc;
+ bogus_crc = ~statefile_crc;
if ((write(fd, &bogus_crc, sizeof(pg_crc32))) != sizeof(pg_crc32))
{
@@ -914,11 +911,11 @@ EndPrepare(GlobalTransaction gxact)
* The state file isn't valid yet, because we haven't written the correct
* CRC yet. Before we do that, insert entry in WAL and flush it to disk.
*
- * Between the time we have written the WAL entry and the time we write
- * out the correct state file CRC, we have an inconsistency: the xact is
- * prepared according to WAL but not according to our on-disk state.
- * We use a critical section to force a PANIC if we are unable to complete
- * the write --- then, WAL replay should repair the inconsistency. The
+ * Between the time we have written the WAL entry and the time we write out
+ * the correct state file CRC, we have an inconsistency: the xact is
+ * prepared according to WAL but not according to our on-disk state. We
+ * use a critical section to force a PANIC if we are unable to complete
+ * the write --- then, WAL replay should repair the inconsistency. The
* odds of a PANIC actually occurring should be very tiny given that we
* were able to write the bogus CRC above.
*
@@ -956,16 +953,16 @@ EndPrepare(GlobalTransaction gxact)
errmsg("could not close twophase state file: %m")));
/*
- * Mark the prepared transaction as valid. As soon as xact.c marks
- * MyProc as not running our XID (which it will do immediately after
- * this function returns), others can commit/rollback the xact.
+ * Mark the prepared transaction as valid. As soon as xact.c marks MyProc
+ * as not running our XID (which it will do immediately after this
+ * function returns), others can commit/rollback the xact.
*
* NB: a side effect of this is to make a dummy ProcArray entry for the
* prepared XID. This must happen before we clear the XID from MyProc,
* else there is a window where the XID is not running according to
- * TransactionIdInProgress, and onlookers would be entitled to assume
- * the xact crashed. Instead we have a window where the same XID
- * appears twice in ProcArray, which is OK.
+ * TransactionIdInProgress, and onlookers would be entitled to assume the
+ * xact crashed. Instead we have a window where the same XID appears
+ * twice in ProcArray, which is OK.
*/
MarkAsPrepared(gxact);
@@ -1011,9 +1008,10 @@ ReadTwoPhaseFile(TransactionId xid)
char *buf;
TwoPhaseFileHeader *hdr;
int fd;
- struct stat stat;
+ struct stat stat;
uint32 crc_offset;
- pg_crc32 calc_crc, file_crc;
+ pg_crc32 calc_crc,
+ file_crc;
TwoPhaseFilePath(path, xid);
@@ -1028,9 +1026,8 @@ ReadTwoPhaseFile(TransactionId xid)
}
/*
- * Check file length. We can determine a lower bound pretty easily.
- * We set an upper bound mainly to avoid palloc() failure on a corrupt
- * file.
+ * Check file length. We can determine a lower bound pretty easily. We
+ * set an upper bound mainly to avoid palloc() failure on a corrupt file.
*/
if (fstat(fd, &stat))
{
@@ -1107,17 +1104,17 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
{
GlobalTransaction gxact;
TransactionId xid;
- char *buf;
- char *bufptr;
+ char *buf;
+ char *bufptr;
TwoPhaseFileHeader *hdr;
TransactionId *children;
RelFileNode *commitrels;
RelFileNode *abortrels;
- int i;
+ int i;
/*
- * Validate the GID, and lock the GXACT to ensure that two backends
- * do not try to commit the same GID at once.
+ * Validate the GID, and lock the GXACT to ensure that two backends do not
+ * try to commit the same GID at once.
*/
gxact = LockGXact(gid, GetUserId());
xid = gxact->proc.xid;
@@ -1148,10 +1145,10 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
/*
* The order of operations here is critical: make the XLOG entry for
* commit or abort, then mark the transaction committed or aborted in
- * pg_clog, then remove its PGPROC from the global ProcArray (which
- * means TransactionIdIsInProgress will stop saying the prepared xact
- * is in progress), then run the post-commit or post-abort callbacks.
- * The callbacks will release the locks the transaction held.
+ * pg_clog, then remove its PGPROC from the global ProcArray (which means
+ * TransactionIdIsInProgress will stop saying the prepared xact is in
+ * progress), then run the post-commit or post-abort callbacks. The
+ * callbacks will release the locks the transaction held.
*/
if (isCommit)
RecordTransactionCommitPrepared(xid,
@@ -1165,18 +1162,18 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
ProcArrayRemove(&gxact->proc);
/*
- * In case we fail while running the callbacks, mark the gxact invalid
- * so no one else will try to commit/rollback, and so it can be recycled
- * properly later. It is still locked by our XID so it won't go away yet.
+ * In case we fail while running the callbacks, mark the gxact invalid so
+ * no one else will try to commit/rollback, and so it can be recycled
+ * properly later. It is still locked by our XID so it won't go away yet.
*
* (We assume it's safe to do this without taking TwoPhaseStateLock.)
*/
gxact->valid = false;
/*
- * We have to remove any files that were supposed to be dropped.
- * For consistency with the regular xact.c code paths, must do this
- * before releasing locks, so do it before running the callbacks.
+ * We have to remove any files that were supposed to be dropped. For
+ * consistency with the regular xact.c code paths, must do this before
+ * releasing locks, so do it before running the callbacks.
*
* NB: this code knows that we couldn't be dropping any temp rels ...
*/
@@ -1228,8 +1225,8 @@ ProcessRecords(char *bufptr, TransactionId xid,
bufptr += MAXALIGN(sizeof(TwoPhaseRecordOnDisk));
if (callbacks[record->rmid] != NULL)
- callbacks[record->rmid](xid, record->info,
- (void *) bufptr, record->len);
+ callbacks[record->rmid] (xid, record->info,
+ (void *) bufptr, record->len);
bufptr += MAXALIGN(record->len);
}
@@ -1244,15 +1241,15 @@ ProcessRecords(char *bufptr, TransactionId xid,
void
RemoveTwoPhaseFile(TransactionId xid, bool giveWarning)
{
- char path[MAXPGPATH];
+ char path[MAXPGPATH];
TwoPhaseFilePath(path, xid);
if (unlink(path))
if (errno != ENOENT || giveWarning)
ereport(WARNING,
(errcode_for_file_access(),
- errmsg("could not remove two-phase state file \"%s\": %m",
- path)));
+ errmsg("could not remove two-phase state file \"%s\": %m",
+ path)));
}
/*
@@ -1300,8 +1297,8 @@ RecreateTwoPhaseFile(TransactionId xid, void *content, int len)
}
/*
- * We must fsync the file because the end-of-replay checkpoint will
- * not do so, there being no GXACT in shared memory yet to tell it to.
+ * We must fsync the file because the end-of-replay checkpoint will not do
+ * so, there being no GXACT in shared memory yet to tell it to.
*/
if (pg_fsync(fd) != 0)
{
@@ -1343,15 +1340,15 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
int i;
/*
- * We don't want to hold the TwoPhaseStateLock while doing I/O,
- * so we grab it just long enough to make a list of the XIDs that
- * require fsyncing, and then do the I/O afterwards.
+ * We don't want to hold the TwoPhaseStateLock while doing I/O, so we grab
+ * it just long enough to make a list of the XIDs that require fsyncing,
+ * and then do the I/O afterwards.
*
- * This approach creates a race condition: someone else could delete
- * a GXACT between the time we release TwoPhaseStateLock and the time
- * we try to open its state file. We handle this by special-casing
- * ENOENT failures: if we see that, we verify that the GXACT is no
- * longer valid, and if so ignore the failure.
+ * This approach creates a race condition: someone else could delete a GXACT
+ * between the time we release TwoPhaseStateLock and the time we try to
+ * open its state file. We handle this by special-casing ENOENT failures:
+ * if we see that, we verify that the GXACT is no longer valid, and if so
+ * ignore the failure.
*/
if (max_prepared_xacts <= 0)
return; /* nothing to do */
@@ -1362,9 +1359,9 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
- GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
+ GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
- if (gxact->valid &&
+ if (gxact->valid &&
XLByteLE(gxact->prepare_lsn, redo_horizon))
xids[nxids++] = gxact->proc.xid;
}
@@ -1374,7 +1371,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
for (i = 0; i < nxids; i++)
{
TransactionId xid = xids[i];
- int fd;
+ int fd;
TwoPhaseFilePath(path, xid);
@@ -1424,7 +1421,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
*
* We throw away any prepared xacts with main XID beyond nextXid --- if any
* are present, it suggests that the DBA has done a PITR recovery to an
- * earlier point in time without cleaning out pg_twophase. We dare not
+ * earlier point in time without cleaning out pg_twophase. We dare not
* try to recover such prepared xacts since they likely depend on database
* state that doesn't exist now.
*
@@ -1442,7 +1439,7 @@ PrescanPreparedTransactions(void)
{
TransactionId origNextXid = ShmemVariableCache->nextXid;
TransactionId result = origNextXid;
- DIR *cldir;
+ DIR *cldir;
struct dirent *clde;
cldir = AllocateDir(TWOPHASE_DIR);
@@ -1452,10 +1449,10 @@ PrescanPreparedTransactions(void)
strspn(clde->d_name, "0123456789ABCDEF") == 8)
{
TransactionId xid;
- char *buf;
- TwoPhaseFileHeader *hdr;
+ char *buf;
+ TwoPhaseFileHeader *hdr;
TransactionId *subxids;
- int i;
+ int i;
xid = (TransactionId) strtoul(clde->d_name, NULL, 16);
@@ -1541,8 +1538,8 @@ PrescanPreparedTransactions(void)
void
RecoverPreparedTransactions(void)
{
- char dir[MAXPGPATH];
- DIR *cldir;
+ char dir[MAXPGPATH];
+ DIR *cldir;
struct dirent *clde;
snprintf(dir, MAXPGPATH, "%s", TWOPHASE_DIR);
@@ -1554,12 +1551,12 @@ RecoverPreparedTransactions(void)
strspn(clde->d_name, "0123456789ABCDEF") == 8)
{
TransactionId xid;
- char *buf;
- char *bufptr;
- TwoPhaseFileHeader *hdr;
+ char *buf;
+ char *bufptr;
+ TwoPhaseFileHeader *hdr;
TransactionId *subxids;
- GlobalTransaction gxact;
- int i;
+ GlobalTransaction gxact;
+ int i;
xid = (TransactionId) strtoul(clde->d_name, NULL, 16);
@@ -1598,8 +1595,8 @@ RecoverPreparedTransactions(void)
/*
* Reconstruct subtrans state for the transaction --- needed
- * because pg_subtrans is not preserved over a restart. Note
- * that we are linking all the subtransactions directly to the
+ * because pg_subtrans is not preserved over a restart. Note that
+ * we are linking all the subtransactions directly to the
* top-level XID; there may originally have been a more complex
* hierarchy, but there's no need to restore that exactly.
*/
@@ -1609,12 +1606,12 @@ RecoverPreparedTransactions(void)
/*
* Recreate its GXACT and dummy PGPROC
*
- * Note: since we don't have the PREPARE record's WAL location
- * at hand, we leave prepare_lsn zeroes. This means the GXACT
- * will be fsync'd on every future checkpoint. We assume this
+ * Note: since we don't have the PREPARE record's WAL location at
+ * hand, we leave prepare_lsn zeroes. This means the GXACT will
+ * be fsync'd on every future checkpoint. We assume this
* situation is infrequent enough that the performance cost is
- * negligible (especially since we know the state file has
- * already been fsynced).
+ * negligible (especially since we know the state file has already
+ * been fsynced).
*/
gxact = MarkAsPreparing(xid, hdr->gid,
hdr->prepared_at,
@@ -1773,12 +1770,11 @@ RecordTransactionAbortPrepared(TransactionId xid,
XLogFlush(recptr);
/*
- * Mark the transaction aborted in clog. This is not absolutely
- * necessary but we may as well do it while we are here.
+ * Mark the transaction aborted in clog. This is not absolutely necessary
+ * but we may as well do it while we are here.
*/
TransactionIdAbort(xid);
TransactionIdAbortTree(nchildren, children);
END_CRIT_SECTION();
}
-
diff --git a/src/backend/access/transam/twophase_rmgr.c b/src/backend/access/transam/twophase_rmgr.c
index e78f8b2fbb3..eab442404f9 100644
--- a/src/backend/access/transam/twophase_rmgr.c
+++ b/src/backend/access/transam/twophase_rmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/twophase_rmgr.c,v 1.1 2005/06/17 22:32:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/twophase_rmgr.c,v 1.2 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -21,29 +21,29 @@
#include "utils/inval.h"
-const TwoPhaseCallback twophase_recover_callbacks[TWOPHASE_RM_MAX_ID + 1] =
+const TwoPhaseCallback twophase_recover_callbacks[TWOPHASE_RM_MAX_ID + 1] =
{
- NULL, /* END ID */
- lock_twophase_recover, /* Lock */
- NULL, /* Inval */
- NULL, /* flat file update */
- NULL /* notify/listen */
+ NULL, /* END ID */
+ lock_twophase_recover, /* Lock */
+ NULL, /* Inval */
+ NULL, /* flat file update */
+ NULL /* notify/listen */
};
-const TwoPhaseCallback twophase_postcommit_callbacks[TWOPHASE_RM_MAX_ID + 1] =
+const TwoPhaseCallback twophase_postcommit_callbacks[TWOPHASE_RM_MAX_ID + 1] =
{
- NULL, /* END ID */
- lock_twophase_postcommit, /* Lock */
- inval_twophase_postcommit, /* Inval */
- flatfile_twophase_postcommit, /* flat file update */
- notify_twophase_postcommit /* notify/listen */
+ NULL, /* END ID */
+ lock_twophase_postcommit, /* Lock */
+ inval_twophase_postcommit, /* Inval */
+ flatfile_twophase_postcommit, /* flat file update */
+ notify_twophase_postcommit /* notify/listen */
};
-const TwoPhaseCallback twophase_postabort_callbacks[TWOPHASE_RM_MAX_ID + 1] =
+const TwoPhaseCallback twophase_postabort_callbacks[TWOPHASE_RM_MAX_ID + 1] =
{
- NULL, /* END ID */
- lock_twophase_postabort, /* Lock */
- NULL, /* Inval */
- NULL, /* flat file update */
- NULL /* notify/listen */
+ NULL, /* END ID */
+ lock_twophase_postabort, /* Lock */
+ NULL, /* Inval */
+ NULL, /* flat file update */
+ NULL /* notify/listen */
};
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 99d9213af0c..bff646afb61 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -6,7 +6,7 @@
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.66 2005/08/22 16:59:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.67 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -49,21 +49,21 @@ GetNewTransactionId(bool isSubXact)
xid = ShmemVariableCache->nextXid;
/*
- * Check to see if it's safe to assign another XID. This protects
- * against catastrophic data loss due to XID wraparound. The basic
- * rules are: warn if we're past xidWarnLimit, and refuse to execute
- * transactions if we're past xidStopLimit, unless we are running in
- * a standalone backend (which gives an escape hatch to the DBA who
- * ignored all those warnings).
+ * Check to see if it's safe to assign another XID. This protects against
+ * catastrophic data loss due to XID wraparound. The basic rules are:
+ * warn if we're past xidWarnLimit, and refuse to execute transactions if
+ * we're past xidStopLimit, unless we are running in a standalone backend
+ * (which gives an escape hatch to the DBA who ignored all those
+ * warnings).
*
- * Test is coded to fall out as fast as possible during normal operation,
- * ie, when the warn limit is set and we haven't violated it.
+ * Test is coded to fall out as fast as possible during normal operation, ie,
+ * when the warn limit is set and we haven't violated it.
*/
if (TransactionIdFollowsOrEquals(xid, ShmemVariableCache->xidWarnLimit) &&
TransactionIdIsValid(ShmemVariableCache->xidWarnLimit))
{
if (IsUnderPostmaster &&
- TransactionIdFollowsOrEquals(xid, ShmemVariableCache->xidStopLimit))
+ TransactionIdFollowsOrEquals(xid, ShmemVariableCache->xidStopLimit))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("database is not accepting queries to avoid wraparound data loss in database \"%s\"",
@@ -72,20 +72,19 @@ GetNewTransactionId(bool isSubXact)
NameStr(ShmemVariableCache->limit_datname))));
else
ereport(WARNING,
- (errmsg("database \"%s\" must be vacuumed within %u transactions",
- NameStr(ShmemVariableCache->limit_datname),
- ShmemVariableCache->xidWrapLimit - xid),
- errhint("To avoid a database shutdown, execute a full-database VACUUM in \"%s\".",
- NameStr(ShmemVariableCache->limit_datname))));
+ (errmsg("database \"%s\" must be vacuumed within %u transactions",
+ NameStr(ShmemVariableCache->limit_datname),
+ ShmemVariableCache->xidWrapLimit - xid),
+ errhint("To avoid a database shutdown, execute a full-database VACUUM in \"%s\".",
+ NameStr(ShmemVariableCache->limit_datname))));
}
/*
* If we are allocating the first XID of a new page of the commit log,
- * zero out that commit-log page before returning. We must do this
- * while holding XidGenLock, else another xact could acquire and
- * commit a later XID before we zero the page. Fortunately, a page of
- * the commit log holds 32K or more transactions, so we don't have to
- * do this very often.
+ * zero out that commit-log page before returning. We must do this while
+ * holding XidGenLock, else another xact could acquire and commit a later
+ * XID before we zero the page. Fortunately, a page of the commit log
+ * holds 32K or more transactions, so we don't have to do this very often.
*
* Extend pg_subtrans too.
*/
@@ -93,45 +92,43 @@ GetNewTransactionId(bool isSubXact)
ExtendSUBTRANS(xid);
/*
- * Now advance the nextXid counter. This must not happen until after
- * we have successfully completed ExtendCLOG() --- if that routine
- * fails, we want the next incoming transaction to try it again. We
- * cannot assign more XIDs until there is CLOG space for them.
+ * Now advance the nextXid counter. This must not happen until after we
+ * have successfully completed ExtendCLOG() --- if that routine fails, we
+ * want the next incoming transaction to try it again. We cannot assign
+ * more XIDs until there is CLOG space for them.
*/
TransactionIdAdvance(ShmemVariableCache->nextXid);
/*
- * We must store the new XID into the shared PGPROC array before
- * releasing XidGenLock. This ensures that when GetSnapshotData calls
+ * We must store the new XID into the shared PGPROC array before releasing
+ * XidGenLock. This ensures that when GetSnapshotData calls
* ReadNewTransactionId, all active XIDs before the returned value of
- * nextXid are already present in PGPROC. Else we have a race
- * condition.
+ * nextXid are already present in PGPROC. Else we have a race condition.
*
* XXX by storing xid into MyProc without acquiring ProcArrayLock, we are
* relying on fetch/store of an xid to be atomic, else other backends
* might see a partially-set xid here. But holding both locks at once
- * would be a nasty concurrency hit (and in fact could cause a
- * deadlock against GetSnapshotData). So for now, assume atomicity.
- * Note that readers of PGPROC xid field should be careful to fetch
- * the value only once, rather than assume they can read it multiple
- * times and get the same answer each time.
+ * would be a nasty concurrency hit (and in fact could cause a deadlock
+ * against GetSnapshotData). So for now, assume atomicity. Note that
+ * readers of PGPROC xid field should be careful to fetch the value only
+ * once, rather than assume they can read it multiple times and get the
+ * same answer each time.
*
* The same comments apply to the subxact xid count and overflow fields.
*
- * A solution to the atomic-store problem would be to give each PGPROC
- * its own spinlock used only for fetching/storing that PGPROC's xid
- * and related fields.
+ * A solution to the atomic-store problem would be to give each PGPROC its
+ * own spinlock used only for fetching/storing that PGPROC's xid and
+ * related fields.
*
* If there's no room to fit a subtransaction XID into PGPROC, set the
* cache-overflowed flag instead. This forces readers to look in
- * pg_subtrans to map subtransaction XIDs up to top-level XIDs. There
- * is a race-condition window, in that the new XID will not appear as
- * running until its parent link has been placed into pg_subtrans.
- * However, that will happen before anyone could possibly have a
- * reason to inquire about the status of the XID, so it seems OK.
- * (Snapshots taken during this window *will* include the parent XID,
- * so they will deliver the correct answer later on when someone does
- * have a reason to inquire.)
+ * pg_subtrans to map subtransaction XIDs up to top-level XIDs. There is a
+ * race-condition window, in that the new XID will not appear as running
+ * until its parent link has been placed into pg_subtrans. However, that
+ * will happen before anyone could possibly have a reason to inquire about
+ * the status of the XID, so it seems OK. (Snapshots taken during this
+ * window *will* include the parent XID, so they will deliver the correct
+ * answer later on when someone does have a reason to inquire.)
*/
if (MyProc != NULL)
{
@@ -197,27 +194,26 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid,
xidWrapLimit += FirstNormalTransactionId;
/*
- * We'll refuse to continue assigning XIDs in interactive mode once
- * we get within 1M transactions of data loss. This leaves lots
- * of room for the DBA to fool around fixing things in a standalone
- * backend, while not being significant compared to total XID space.
- * (Note that since vacuuming requires one transaction per table
- * cleaned, we had better be sure there's lots of XIDs left...)
+ * We'll refuse to continue assigning XIDs in interactive mode once we get
+ * within 1M transactions of data loss. This leaves lots of room for the
+ * DBA to fool around fixing things in a standalone backend, while not
+ * being significant compared to total XID space. (Note that since
+ * vacuuming requires one transaction per table cleaned, we had better be
+ * sure there's lots of XIDs left...)
*/
xidStopLimit = xidWrapLimit - 1000000;
if (xidStopLimit < FirstNormalTransactionId)
xidStopLimit -= FirstNormalTransactionId;
/*
- * We'll start complaining loudly when we get within 10M transactions
- * of the stop point. This is kind of arbitrary, but if you let your
- * gas gauge get down to 1% of full, would you be looking for the
- * next gas station? We need to be fairly liberal about this number
- * because there are lots of scenarios where most transactions are
- * done by automatic clients that won't pay attention to warnings.
- * (No, we're not gonna make this configurable. If you know enough to
- * configure it, you know enough to not get in this kind of trouble in
- * the first place.)
+ * We'll start complaining loudly when we get within 10M transactions of
+ * the stop point. This is kind of arbitrary, but if you let your gas
+ * gauge get down to 1% of full, would you be looking for the next gas
+ * station? We need to be fairly liberal about this number because there
+ * are lots of scenarios where most transactions are done by automatic
+ * clients that won't pay attention to warnings. (No, we're not gonna make
+ * this configurable. If you know enough to configure it, you know enough
+ * to not get in this kind of trouble in the first place.)
*/
xidWarnLimit = xidStopLimit - 10000000;
if (xidWarnLimit < FirstNormalTransactionId)
@@ -234,16 +230,16 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid,
/* Log the info */
ereport(LOG,
- (errmsg("transaction ID wrap limit is %u, limited by database \"%s\"",
- xidWrapLimit, NameStr(*oldest_datname))));
+ (errmsg("transaction ID wrap limit is %u, limited by database \"%s\"",
+ xidWrapLimit, NameStr(*oldest_datname))));
/* Give an immediate warning if past the wrap warn point */
if (TransactionIdFollowsOrEquals(curXid, xidWarnLimit))
ereport(WARNING,
- (errmsg("database \"%s\" must be vacuumed within %u transactions",
- NameStr(*oldest_datname),
- xidWrapLimit - curXid),
- errhint("To avoid a database shutdown, execute a full-database VACUUM in \"%s\".",
- NameStr(*oldest_datname))));
+ (errmsg("database \"%s\" must be vacuumed within %u transactions",
+ NameStr(*oldest_datname),
+ xidWrapLimit - curXid),
+ errhint("To avoid a database shutdown, execute a full-database VACUUM in \"%s\".",
+ NameStr(*oldest_datname))));
}
@@ -272,11 +268,11 @@ GetNewObjectId(void)
* right after a wrap occurs, so as to avoid a possibly large number of
* iterations in GetNewOid.) Note we are relying on unsigned comparison.
*
- * During initdb, we start the OID generator at FirstBootstrapObjectId,
- * so we only enforce wrapping to that point when in bootstrap or
- * standalone mode. The first time through this routine after normal
- * postmaster start, the counter will be forced up to FirstNormalObjectId.
- * This mechanism leaves the OIDs between FirstBootstrapObjectId and
+ * During initdb, we start the OID generator at FirstBootstrapObjectId, so we
+ * only enforce wrapping to that point when in bootstrap or standalone
+ * mode. The first time through this routine after normal postmaster
+ * start, the counter will be forced up to FirstNormalObjectId. This
+ * mechanism leaves the OIDs between FirstBootstrapObjectId and
* FirstNormalObjectId available for automatic assignment during initdb,
* while ensuring they will never conflict with user-assigned OIDs.
*/
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index eabcb117cc5..ea19e075640 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.214 2005/08/20 23:45:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.215 2005/10/15 02:49:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -110,15 +110,14 @@ typedef enum TBlockState
*/
typedef struct TransactionStateData
{
- TransactionId transactionId; /* my XID, or Invalid if none */
+ TransactionId transactionId; /* my XID, or Invalid if none */
SubTransactionId subTransactionId; /* my subxact ID */
char *name; /* savepoint name, if any */
int savepointLevel; /* savepoint level */
TransState state; /* low-level state */
TBlockState blockState; /* high-level state */
int nestingLevel; /* nest depth */
- MemoryContext curTransactionContext; /* my xact-lifetime
- * context */
+ MemoryContext curTransactionContext; /* my xact-lifetime context */
ResourceOwner curTransactionOwner; /* my query resources */
List *childXids; /* subcommitted child XIDs */
Oid currentUser; /* subxact start current_user */
@@ -219,8 +218,8 @@ static void AtStart_Memory(void);
static void AtStart_ResourceOwner(void);
static void CallXactCallbacks(XactEvent event);
static void CallSubXactCallbacks(SubXactEvent event,
- SubTransactionId mySubid,
- SubTransactionId parentSubid);
+ SubTransactionId mySubid,
+ SubTransactionId parentSubid);
static void CleanupTransaction(void);
static void CommitTransaction(void);
static void RecordTransactionAbort(void);
@@ -349,18 +348,18 @@ AssignSubTransactionId(TransactionState s)
/*
* Generate a new Xid and record it in PG_PROC and pg_subtrans.
*
- * NB: we must make the subtrans entry BEFORE the Xid appears anywhere
- * in shared storage other than PG_PROC; because if there's no room for
- * it in PG_PROC, the subtrans entry is needed to ensure that other
- * backends see the Xid as "running". See GetNewTransactionId.
+ * NB: we must make the subtrans entry BEFORE the Xid appears anywhere in
+ * shared storage other than PG_PROC; because if there's no room for it in
+ * PG_PROC, the subtrans entry is needed to ensure that other backends see
+ * the Xid as "running". See GetNewTransactionId.
*/
s->transactionId = GetNewTransactionId(true);
SubTransSetParent(s->transactionId, s->parent->transactionId);
/*
- * Acquire lock on the transaction XID. (We assume this cannot block.)
- * We have to be sure that the lock is assigned to the transaction's
+ * Acquire lock on the transaction XID. (We assume this cannot block.) We
+ * have to be sure that the lock is assigned to the transaction's
* ResourceOwner.
*/
currentOwner = CurrentResourceOwner;
@@ -453,22 +452,22 @@ TransactionIdIsCurrentTransactionId(TransactionId xid)
/*
* We always say that BootstrapTransactionId is "not my transaction ID"
- * even when it is (ie, during bootstrap). Along with the fact that
+ * even when it is (ie, during bootstrap). Along with the fact that
* transam.c always treats BootstrapTransactionId as already committed,
- * this causes the tqual.c routines to see all tuples as committed,
- * which is what we need during bootstrap. (Bootstrap mode only inserts
- * tuples, it never updates or deletes them, so all tuples can be presumed
- * good immediately.)
+ * this causes the tqual.c routines to see all tuples as committed, which
+ * is what we need during bootstrap. (Bootstrap mode only inserts tuples,
+ * it never updates or deletes them, so all tuples can be presumed good
+ * immediately.)
*/
if (xid == BootstrapTransactionId)
return false;
/*
- * We will return true for the Xid of the current subtransaction, any
- * of its subcommitted children, any of its parents, or any of their
- * previously subcommitted children. However, a transaction being
- * aborted is no longer "current", even though it may still have an
- * entry on the state stack.
+ * We will return true for the Xid of the current subtransaction, any of
+ * its subcommitted children, any of its parents, or any of their
+ * previously subcommitted children. However, a transaction being aborted
+ * is no longer "current", even though it may still have an entry on the
+ * state stack.
*/
for (s = CurrentTransactionState; s != NULL; s = s->parent)
{
@@ -498,12 +497,12 @@ void
CommandCounterIncrement(void)
{
currentCommandId += 1;
- if (currentCommandId == FirstCommandId) /* check for overflow */
+ if (currentCommandId == FirstCommandId) /* check for overflow */
{
currentCommandId -= 1;
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("cannot have more than 2^32-1 commands in a transaction")));
+ errmsg("cannot have more than 2^32-1 commands in a transaction")));
}
/* Propagate new command ID into static snapshots, if set */
@@ -607,16 +606,15 @@ AtSubStart_Memory(void)
Assert(CurTransactionContext != NULL);
/*
- * Create a CurTransactionContext, which will be used to hold data
- * that survives subtransaction commit but disappears on
- * subtransaction abort. We make it a child of the immediate parent's
- * CurTransactionContext.
+ * Create a CurTransactionContext, which will be used to hold data that
+ * survives subtransaction commit but disappears on subtransaction abort.
+ * We make it a child of the immediate parent's CurTransactionContext.
*/
CurTransactionContext = AllocSetContextCreate(CurTransactionContext,
"CurTransactionContext",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
s->curTransactionContext = CurTransactionContext;
/* Make the CurTransactionContext active. */
@@ -634,8 +632,8 @@ AtSubStart_ResourceOwner(void)
Assert(s->parent != NULL);
/*
- * Create a resource owner for the subtransaction. We make it a child
- * of the immediate parent's resource owner.
+ * Create a resource owner for the subtransaction. We make it a child of
+ * the immediate parent's resource owner.
*/
s->curTransactionOwner =
ResourceOwnerCreate(s->parent->curTransactionOwner,
@@ -666,11 +664,10 @@ RecordTransactionCommit(void)
nchildren = xactGetCommittedChildren(&children);
/*
- * If we made neither any XLOG entries nor any temp-rel updates, and
- * have no files to be deleted, we can omit recording the transaction
- * commit at all. (This test includes the effects of subtransactions,
- * so the presence of committed subxacts need not alone force a
- * write.)
+ * If we made neither any XLOG entries nor any temp-rel updates, and have
+ * no files to be deleted, we can omit recording the transaction commit at
+ * all. (This test includes the effects of subtransactions, so the
+ * presence of committed subxacts need not alone force a write.)
*/
if (MyXactMadeXLogEntry || MyXactMadeTempRelUpdate || nrels > 0)
{
@@ -684,18 +681,17 @@ RecordTransactionCommit(void)
START_CRIT_SECTION();
/*
- * If our transaction made any transaction-controlled XLOG
- * entries, we need to lock out checkpoint start between writing
- * our XLOG record and updating pg_clog. Otherwise it is possible
- * for the checkpoint to set REDO after the XLOG record but fail
- * to flush the pg_clog update to disk, leading to loss of the
- * transaction commit if we crash a little later. Slightly klugy
- * fix for problem discovered 2004-08-10.
+ * If our transaction made any transaction-controlled XLOG entries, we
+ * need to lock out checkpoint start between writing our XLOG record
+ * and updating pg_clog. Otherwise it is possible for the checkpoint
+ * to set REDO after the XLOG record but fail to flush the pg_clog
+ * update to disk, leading to loss of the transaction commit if we
+ * crash a little later. Slightly klugy fix for problem discovered
+ * 2004-08-10.
*
- * (If it made no transaction-controlled XLOG entries, its XID
- * appears nowhere in permanent storage, so no one else will ever
- * care if it committed; so it doesn't matter if we lose the
- * commit flag.)
+ * (If it made no transaction-controlled XLOG entries, its XID appears
+ * nowhere in permanent storage, so no one else will ever care if it
+ * committed; so it doesn't matter if we lose the commit flag.)
*
* Note we only need a shared lock.
*/
@@ -704,8 +700,8 @@ RecordTransactionCommit(void)
LWLockAcquire(CheckpointStartLock, LW_SHARED);
/*
- * We only need to log the commit in XLOG if the transaction made
- * any transaction-controlled XLOG entries or will delete files.
+ * We only need to log the commit in XLOG if the transaction made any
+ * transaction-controlled XLOG entries or will delete files.
*/
if (madeTCentries || nrels > 0)
{
@@ -748,26 +744,26 @@ RecordTransactionCommit(void)
}
/*
- * We must flush our XLOG entries to disk if we made any XLOG
- * entries, whether in or out of transaction control. For
- * example, if we reported a nextval() result to the client, this
- * ensures that any XLOG record generated by nextval will hit the
- * disk before we report the transaction committed.
+ * We must flush our XLOG entries to disk if we made any XLOG entries,
+ * whether in or out of transaction control. For example, if we
+ * reported a nextval() result to the client, this ensures that any
+ * XLOG record generated by nextval will hit the disk before we report
+ * the transaction committed.
*
- * Note: if we generated a commit record above, MyXactMadeXLogEntry
- * will certainly be set now.
+ * Note: if we generated a commit record above, MyXactMadeXLogEntry will
+ * certainly be set now.
*/
if (MyXactMadeXLogEntry)
{
/*
* Sleep before flush! So we can flush more than one commit
- * records per single fsync. (The idea is some other backend
- * may do the XLogFlush while we're sleeping. This needs work
- * still, because on most Unixen, the minimum select() delay
- * is 10msec or more, which is way too long.)
+ * records per single fsync. (The idea is some other backend may
+ * do the XLogFlush while we're sleeping. This needs work still,
+ * because on most Unixen, the minimum select() delay is 10msec or
+ * more, which is way too long.)
*
- * We do not sleep if enableFsync is not turned on, nor if there
- * are fewer than CommitSiblings other backends with active
+ * We do not sleep if enableFsync is not turned on, nor if there are
+ * fewer than CommitSiblings other backends with active
* transactions.
*/
if (CommitDelay > 0 && enableFsync &&
@@ -778,14 +774,13 @@ RecordTransactionCommit(void)
}
/*
- * We must mark the transaction committed in clog if its XID
- * appears either in permanent rels or in local temporary rels. We
- * test this by seeing if we made transaction-controlled entries
- * *OR* local-rel tuple updates. Note that if we made only the
- * latter, we have not emitted an XLOG record for our commit, and
- * so in the event of a crash the clog update might be lost. This
- * is okay because no one else will ever care whether we
- * committed.
+ * We must mark the transaction committed in clog if its XID appears
+ * either in permanent rels or in local temporary rels. We test this
+ * by seeing if we made transaction-controlled entries *OR* local-rel
+ * tuple updates. Note that if we made only the latter, we have not
+ * emitted an XLOG record for our commit, and so in the event of a
+ * crash the clog update might be lost. This is okay because no one
+ * else will ever care whether we committed.
*/
if (madeTCentries || MyXactMadeTempRelUpdate)
{
@@ -833,9 +828,8 @@ static void
AtCommit_Memory(void)
{
/*
- * Now that we're "out" of a transaction, have the system allocate
- * things in the top memory context instead of per-transaction
- * contexts.
+ * Now that we're "out" of a transaction, have the system allocate things
+ * in the top memory context instead of per-transaction contexts.
*/
MemoryContextSwitchTo(TopMemoryContext);
@@ -870,9 +864,9 @@ AtSubCommit_Memory(void)
/*
* Ordinarily we cannot throw away the child's CurTransactionContext,
- * since the data it contains will be needed at upper commit. However,
- * if there isn't actually anything in it, we can throw it away. This
- * avoids a small memory leak in the common case of "trivial" subxacts.
+ * since the data it contains will be needed at upper commit. However, if
+ * there isn't actually anything in it, we can throw it away. This avoids
+ * a small memory leak in the common case of "trivial" subxacts.
*/
if (MemoryContextIsEmpty(s->curTransactionContext))
{
@@ -908,9 +902,10 @@ AtSubCommit_childXids(void)
{
s->parent->childXids = list_concat(s->parent->childXids,
s->childXids);
+
/*
- * list_concat doesn't free the list header for the second list;
- * do so here to avoid memory leakage (kluge)
+ * list_concat doesn't free the list header for the second list; do so
+ * here to avoid memory leakage (kluge)
*/
pfree(s->childXids);
s->childXids = NIL;
@@ -929,14 +924,14 @@ RecordSubTransactionCommit(void)
* We do not log the subcommit in XLOG; it doesn't matter until the
* top-level transaction commits.
*
- * We must mark the subtransaction subcommitted in clog if its XID
- * appears either in permanent rels or in local temporary rels. We
- * test this by seeing if we made transaction-controlled entries *OR*
- * local-rel tuple updates. (The test here actually covers the entire
- * transaction tree so far, so it may mark subtransactions that don't
- * really need it, but it's probably not worth being tenser. Note that
- * if a prior subtransaction dirtied these variables, then
- * RecordTransactionCommit will have to do the full pushup anyway...)
+ * We must mark the subtransaction subcommitted in clog if its XID appears
+ * either in permanent rels or in local temporary rels. We test this by
+ * seeing if we made transaction-controlled entries *OR* local-rel tuple
+ * updates. (The test here actually covers the entire transaction tree so
+ * far, so it may mark subtransactions that don't really need it, but it's
+ * probably not worth being tenser. Note that if a prior subtransaction
+ * dirtied these variables, then RecordTransactionCommit will have to do
+ * the full pushup anyway...)
*/
if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate)
{
@@ -974,9 +969,9 @@ RecordTransactionAbort(void)
/*
* If we made neither any transaction-controlled XLOG entries nor any
- * temp-rel updates, and are not going to delete any files, we can
- * omit recording the transaction abort at all. No one will ever care
- * that it aborted. (These tests cover our whole transaction tree.)
+ * temp-rel updates, and are not going to delete any files, we can omit
+ * recording the transaction abort at all. No one will ever care that it
+ * aborted. (These tests cover our whole transaction tree.)
*/
if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate || nrels > 0)
{
@@ -992,16 +987,16 @@ RecordTransactionAbort(void)
START_CRIT_SECTION();
/*
- * We only need to log the abort in XLOG if the transaction made
- * any transaction-controlled XLOG entries or will delete files.
- * (If it made no transaction-controlled XLOG entries, its XID
- * appears nowhere in permanent storage, so no one else will ever
- * care if it committed.)
+ * We only need to log the abort in XLOG if the transaction made any
+ * transaction-controlled XLOG entries or will delete files. (If it
+ * made no transaction-controlled XLOG entries, its XID appears
+ * nowhere in permanent storage, so no one else will ever care if it
+ * committed.)
*
- * We do not flush XLOG to disk unless deleting files, since the
- * default assumption after a crash would be that we aborted,
- * anyway. For the same reason, we don't need to worry about
- * interlocking against checkpoint start.
+ * We do not flush XLOG to disk unless deleting files, since the default
+ * assumption after a crash would be that we aborted, anyway. For the
+ * same reason, we don't need to worry about interlocking against
+ * checkpoint start.
*/
if (MyLastRecPtr.xrecoff != 0 || nrels > 0)
{
@@ -1047,8 +1042,8 @@ RecordTransactionAbort(void)
* Mark the transaction aborted in clog. This is not absolutely
* necessary but we may as well do it while we are here.
*
- * The ordering here isn't critical but it seems best to mark the
- * parent first. This assures an atomic transition of all the
+ * The ordering here isn't critical but it seems best to mark the parent
+ * first. This assures an atomic transition of all the
* subtransactions to aborted state from the point of view of
* concurrent TransactionIdDidAbort calls.
*/
@@ -1078,8 +1073,8 @@ AtAbort_Memory(void)
{
/*
* Make sure we are in a valid context (not a child of
- * TopTransactionContext...). Note that it is possible for this code
- * to be called when we aren't in a transaction at all; go directly to
+ * TopTransactionContext...). Note that it is possible for this code to
+ * be called when we aren't in a transaction at all; go directly to
* TopMemoryContext in that case.
*/
if (TopTransactionContext != NULL)
@@ -1087,8 +1082,8 @@ AtAbort_Memory(void)
MemoryContextSwitchTo(TopTransactionContext);
/*
- * We do not want to destroy the transaction's global state yet,
- * so we can't free any memory here.
+ * We do not want to destroy the transaction's global state yet, so we
+ * can't free any memory here.
*/
}
else
@@ -1114,8 +1109,8 @@ static void
AtAbort_ResourceOwner(void)
{
/*
- * Make sure we have a valid ResourceOwner, if possible (else it
- * will be NULL, which is OK)
+ * Make sure we have a valid ResourceOwner, if possible (else it will be
+ * NULL, which is OK)
*/
CurrentResourceOwner = TopTransactionResourceOwner;
}
@@ -1143,7 +1138,7 @@ AtSubAbort_childXids(void)
/*
* We keep the child-XID lists in TopTransactionContext (see
- * AtSubCommit_childXids). This means we'd better free the list
+ * AtSubCommit_childXids). This means we'd better free the list
* explicitly at abort to avoid leakage.
*/
list_free(s->childXids);
@@ -1168,11 +1163,11 @@ RecordSubTransactionAbort(void)
/*
* If we made neither any transaction-controlled XLOG entries nor any
- * temp-rel updates, and are not going to delete any files, we can
- * omit recording the transaction abort at all. No one will ever care
- * that it aborted. (These tests cover our whole transaction tree,
- * and therefore may mark subxacts that don't really need it, but it's
- * probably not worth being tenser.)
+ * temp-rel updates, and are not going to delete any files, we can omit
+ * recording the transaction abort at all. No one will ever care that it
+ * aborted. (These tests cover our whole transaction tree, and therefore
+ * may mark subxacts that don't really need it, but it's probably not
+ * worth being tenser.)
*
* In this case we needn't worry about marking subcommitted children as
* aborted, because they didn't mark themselves as subcommitted in the
@@ -1183,8 +1178,8 @@ RecordSubTransactionAbort(void)
START_CRIT_SECTION();
/*
- * We only need to log the abort in XLOG if the transaction made
- * any transaction-controlled XLOG entries or will delete files.
+ * We only need to log the abort in XLOG if the transaction made any
+ * transaction-controlled XLOG entries or will delete files.
*/
if (MyLastRecPtr.xrecoff != 0 || nrels > 0)
{
@@ -1238,11 +1233,10 @@ RecordSubTransactionAbort(void)
}
/*
- * We can immediately remove failed XIDs from PGPROC's cache of
- * running child XIDs. It's easiest to do it here while we have the
- * child XID array at hand, even though in the main-transaction case
- * the equivalent work happens just after return from
- * RecordTransactionAbort.
+ * We can immediately remove failed XIDs from PGPROC's cache of running
+ * child XIDs. It's easiest to do it here while we have the child XID
+ * array at hand, even though in the main-transaction case the equivalent
+ * work happens just after return from RecordTransactionAbort.
*/
XidCacheRemoveRunningXids(xid, nchildren, children);
@@ -1265,9 +1259,8 @@ static void
AtCleanup_Memory(void)
{
/*
- * Now that we're "out" of a transaction, have the system allocate
- * things in the top memory context instead of per-transaction
- * contexts.
+ * Now that we're "out" of a transaction, have the system allocate things
+ * in the top memory context instead of per-transaction contexts.
*/
MemoryContextSwitchTo(TopMemoryContext);
@@ -1304,9 +1297,9 @@ AtSubCleanup_Memory(void)
CurTransactionContext = s->parent->curTransactionContext;
/*
- * Delete the subxact local memory contexts. Its CurTransactionContext
- * can go too (note this also kills CurTransactionContexts from any
- * children of the subxact).
+ * Delete the subxact local memory contexts. Its CurTransactionContext can
+ * go too (note this also kills CurTransactionContexts from any children
+ * of the subxact).
*/
if (s->curTransactionContext)
MemoryContextDelete(s->curTransactionContext);
@@ -1344,11 +1337,10 @@ StartTransaction(void)
* start processing
*/
s->state = TRANS_START;
- s->transactionId = InvalidTransactionId; /* until assigned */
+ s->transactionId = InvalidTransactionId; /* until assigned */
/*
- * Make sure we've freed any old snapshot, and reset xact state
- * variables
+ * Make sure we've freed any old snapshot, and reset xact state variables
*/
FreeXactSnapshot();
XactIsoLevel = DefaultXactIsoLevel;
@@ -1386,10 +1378,10 @@ StartTransaction(void)
s->childXids = NIL;
/*
- * You might expect to see "s->currentUser = GetUserId();" here, but
- * you won't because it doesn't work during startup; the userid isn't
- * set yet during a backend's first transaction start. We only use
- * the currentUser field in sub-transaction state structs.
+ * You might expect to see "s->currentUser = GetUserId();" here, but you
+ * won't because it doesn't work during startup; the userid isn't set yet
+ * during a backend's first transaction start. We only use the
+ * currentUser field in sub-transaction state structs.
*
* prevXactReadOnly is also valid only in sub-transactions.
*/
@@ -1432,13 +1424,12 @@ CommitTransaction(void)
Assert(s->parent == NULL);
/*
- * Do pre-commit processing (most of this stuff requires database
- * access, and in fact could still cause an error...)
+ * Do pre-commit processing (most of this stuff requires database access,
+ * and in fact could still cause an error...)
*
- * It is possible for CommitHoldablePortals to invoke functions that
- * queue deferred triggers, and it's also possible that triggers create
- * holdable cursors. So we have to loop until there's nothing left to
- * do.
+ * It is possible for CommitHoldablePortals to invoke functions that queue
+ * deferred triggers, and it's also possible that triggers create holdable
+ * cursors. So we have to loop until there's nothing left to do.
*/
for (;;)
{
@@ -1525,19 +1516,19 @@ CommitTransaction(void)
}
/*
- * This is all post-commit cleanup. Note that if an error is raised
- * here, it's too late to abort the transaction. This should be just
+ * This is all post-commit cleanup. Note that if an error is raised here,
+ * it's too late to abort the transaction. This should be just
* noncritical resource releasing.
*
- * The ordering of operations is not entirely random. The idea is:
- * release resources visible to other backends (eg, files, buffer
- * pins); then release locks; then release backend-local resources. We
- * want to release locks at the point where any backend waiting for us
- * will see our transaction as being fully cleaned up.
+ * The ordering of operations is not entirely random. The idea is: release
+ * resources visible to other backends (eg, files, buffer pins); then
+ * release locks; then release backend-local resources. We want to release
+ * locks at the point where any backend waiting for us will see our
+ * transaction as being fully cleaned up.
*
- * Resources that can be associated with individual queries are handled
- * by the ResourceOwner mechanism. The other calls here are for
- * backend-wide state.
+ * Resources that can be associated with individual queries are handled by
+ * the ResourceOwner mechanism. The other calls here are for backend-wide
+ * state.
*/
CallXactCallbacks(XACT_EVENT_COMMIT);
@@ -1553,12 +1544,11 @@ CommitTransaction(void)
AtEOXact_RelationCache(true);
/*
- * Make catalog changes visible to all backends. This has to happen
- * after relcache references are dropped (see comments for
- * AtEOXact_RelationCache), but before locks are released (if anyone
- * is waiting for lock on a relation we've modified, we want them to
- * know about the catalog change before they start using the
- * relation).
+ * Make catalog changes visible to all backends. This has to happen after
+ * relcache references are dropped (see comments for
+ * AtEOXact_RelationCache), but before locks are released (if anyone is
+ * waiting for lock on a relation we've modified, we want them to know
+ * about the catalog change before they start using the relation).
*/
AtEOXact_Inval(true);
@@ -1621,10 +1611,10 @@ CommitTransaction(void)
static void
PrepareTransaction(void)
{
- TransactionState s = CurrentTransactionState;
- TransactionId xid = GetCurrentTransactionId();
- GlobalTransaction gxact;
- TimestampTz prepared_at;
+ TransactionState s = CurrentTransactionState;
+ TransactionId xid = GetCurrentTransactionId();
+ GlobalTransaction gxact;
+ TimestampTz prepared_at;
ShowTransactionState("PrepareTransaction");
@@ -1637,13 +1627,12 @@ PrepareTransaction(void)
Assert(s->parent == NULL);
/*
- * Do pre-commit processing (most of this stuff requires database
- * access, and in fact could still cause an error...)
+ * Do pre-commit processing (most of this stuff requires database access,
+ * and in fact could still cause an error...)
*
- * It is possible for PrepareHoldablePortals to invoke functions that
- * queue deferred triggers, and it's also possible that triggers create
- * holdable cursors. So we have to loop until there's nothing left to
- * do.
+ * It is possible for PrepareHoldablePortals to invoke functions that queue
+ * deferred triggers, and it's also possible that triggers create holdable
+ * cursors. So we have to loop until there's nothing left to do.
*/
for (;;)
{
@@ -1693,8 +1682,8 @@ PrepareTransaction(void)
BufmgrCommit();
/*
- * Reserve the GID for this transaction. This could fail if the
- * requested GID is invalid or already in use.
+ * Reserve the GID for this transaction. This could fail if the requested
+ * GID is invalid or already in use.
*/
gxact = MarkAsPreparing(xid, prepareGID, prepared_at,
GetUserId(), MyDatabaseId);
@@ -1707,14 +1696,14 @@ PrepareTransaction(void)
* want transaction abort to be able to clean up. (In particular, the
* AtPrepare routines may error out if they find cases they cannot
* handle.) State cleanup should happen in the PostPrepare routines
- * below. However, some modules can go ahead and clear state here
- * because they wouldn't do anything with it during abort anyway.
+ * below. However, some modules can go ahead and clear state here because
+ * they wouldn't do anything with it during abort anyway.
*
* Note: because the 2PC state file records will be replayed in the same
- * order they are made, the order of these calls has to match the order
- * in which we want things to happen during COMMIT PREPARED or
- * ROLLBACK PREPARED; in particular, pay attention to whether things
- * should happen before or after releasing the transaction's locks.
+ * order they are made, the order of these calls has to match the order in
+ * which we want things to happen during COMMIT PREPARED or ROLLBACK
+ * PREPARED; in particular, pay attention to whether things should happen
+ * before or after releasing the transaction's locks.
*/
StartPrepare(gxact);
@@ -1726,15 +1715,14 @@ PrepareTransaction(void)
/*
* Here is where we really truly prepare.
*
- * We have to record transaction prepares even if we didn't
- * make any updates, because the transaction manager might
- * get confused if we lose a global transaction.
+ * We have to record transaction prepares even if we didn't make any updates,
+ * because the transaction manager might get confused if we lose a global
+ * transaction.
*/
EndPrepare(gxact);
/*
- * Now we clean up backend-internal state and release internal
- * resources.
+ * Now we clean up backend-internal state and release internal resources.
*/
/* Break the chain of back-links in the XLOG records I output */
@@ -1743,9 +1731,9 @@ PrepareTransaction(void)
MyXactMadeTempRelUpdate = false;
/*
- * Let others know about no transaction in progress by me. This has
- * to be done *after* the prepared transaction has been marked valid,
- * else someone may think it is unlocked and recyclable.
+ * Let others know about no transaction in progress by me. This has to be
+ * done *after* the prepared transaction has been marked valid, else
+ * someone may think it is unlocked and recyclable.
*/
/* Lock ProcArrayLock because that's what GetSnapshotData uses. */
@@ -1762,7 +1750,7 @@ PrepareTransaction(void)
/*
* This is all post-transaction cleanup. Note that if an error is raised
* here, it's too late to abort the transaction. This should be just
- * noncritical resource releasing. See notes in CommitTransaction.
+ * noncritical resource releasing. See notes in CommitTransaction.
*/
CallXactCallbacks(XACT_EVENT_PREPARE);
@@ -1819,8 +1807,8 @@ PrepareTransaction(void)
s->childXids = NIL;
/*
- * done with 1st phase commit processing, set current transaction
- * state back to default
+ * done with 1st phase commit processing, set current transaction state
+ * back to default
*/
s->state = TRANS_DEFAULT;
@@ -1842,8 +1830,8 @@ AbortTransaction(void)
/*
* Release any LW locks we might be holding as quickly as possible.
* (Regular locks, however, must be held till we finish aborting.)
- * Releasing LW locks is critical since we might try to grab them
- * again while cleaning up!
+ * Releasing LW locks is critical since we might try to grab them again
+ * while cleaning up!
*/
LWLockReleaseAll();
@@ -1852,8 +1840,8 @@ AbortTransaction(void)
UnlockBuffers();
/*
- * Also clean up any open wait for lock, since the lock manager will
- * choke if we try to wait for another lock before doing this.
+ * Also clean up any open wait for lock, since the lock manager will choke
+ * if we try to wait for another lock before doing this.
*/
LockWaitCancel();
@@ -1866,8 +1854,8 @@ AbortTransaction(void)
Assert(s->parent == NULL);
/*
- * set the current transaction state information appropriately during
- * the abort processing
+ * set the current transaction state information appropriately during the
+ * abort processing
*/
s->state = TRANS_ABORT;
@@ -1876,15 +1864,14 @@ AbortTransaction(void)
AtAbort_ResourceOwner();
/*
- * Reset user id which might have been changed transiently. We cannot
- * use s->currentUser, since it may not be set yet; instead rely on
- * internal state of miscinit.c.
+ * Reset user id which might have been changed transiently. We cannot use
+ * s->currentUser, since it may not be set yet; instead rely on internal
+ * state of miscinit.c.
*
- * (Note: it is not necessary to restore session authorization here
- * because that can only be changed via GUC, and GUC will take care of
- * rolling it back if need be. However, an error within a SECURITY
- * DEFINER function could send control here with the wrong current
- * userid.)
+ * (Note: it is not necessary to restore session authorization here because
+ * that can only be changed via GUC, and GUC will take care of rolling it
+ * back if need be. However, an error within a SECURITY DEFINER function
+ * could send control here with the wrong current userid.)
*/
AtAbort_UserId();
@@ -1898,15 +1885,15 @@ AbortTransaction(void)
AtEOXact_UpdateFlatFiles(false);
/*
- * Advertise the fact that we aborted in pg_clog (assuming that we
- * got as far as assigning an XID to advertise).
+ * Advertise the fact that we aborted in pg_clog (assuming that we got as
+ * far as assigning an XID to advertise).
*/
if (TransactionIdIsValid(s->transactionId))
RecordTransactionAbort();
/*
- * Let others know about no transaction in progress by me. Note that
- * this must be done _before_ releasing locks we hold and _after_
+ * Let others know about no transaction in progress by me. Note that this
+ * must be done _before_ releasing locks we hold and _after_
* RecordTransactionAbort.
*/
if (MyProc != NULL)
@@ -2012,8 +1999,8 @@ StartTransactionCommand(void)
switch (s->blockState)
{
/*
- * if we aren't in a transaction block, we just do our usual
- * start transaction.
+ * if we aren't in a transaction block, we just do our usual start
+ * transaction.
*/
case TBLOCK_DEFAULT:
StartTransaction();
@@ -2021,23 +2008,23 @@ StartTransactionCommand(void)
break;
/*
- * We are somewhere in a transaction block or subtransaction
- * and about to start a new command. For now we do nothing,
- * but someday we may do command-local resource initialization.
- * (Note that any needed CommandCounterIncrement was done by
- * the previous CommitTransactionCommand.)
+ * We are somewhere in a transaction block or subtransaction and
+ * about to start a new command. For now we do nothing, but
+ * someday we may do command-local resource initialization. (Note
+ * that any needed CommandCounterIncrement was done by the
+ * previous CommitTransactionCommand.)
*/
case TBLOCK_INPROGRESS:
case TBLOCK_SUBINPROGRESS:
break;
/*
- * Here we are in a failed transaction block (one of
- * the commands caused an abort) so we do nothing but remain in
- * the abort state. Eventually we will get a ROLLBACK command
- * which will get us out of this state. (It is up to other
- * code to ensure that no commands other than ROLLBACK will be
- * processed in these states.)
+ * Here we are in a failed transaction block (one of the commands
+ * caused an abort) so we do nothing but remain in the abort
+ * state. Eventually we will get a ROLLBACK command which will
+ * get us out of this state. (It is up to other code to ensure
+ * that no commands other than ROLLBACK will be processed in these
+ * states.)
*/
case TBLOCK_ABORT:
case TBLOCK_SUBABORT:
@@ -2099,10 +2086,10 @@ CommitTransactionCommand(void)
break;
/*
- * We are completing a "BEGIN TRANSACTION" command, so we
- * change to the "transaction block in progress" state and
- * return. (We assume the BEGIN did nothing to the database,
- * so we need no CommandCounterIncrement.)
+ * We are completing a "BEGIN TRANSACTION" command, so we change
+ * to the "transaction block in progress" state and return. (We
+ * assume the BEGIN did nothing to the database, so we need no
+ * CommandCounterIncrement.)
*/
case TBLOCK_BEGIN:
s->blockState = TBLOCK_INPROGRESS;
@@ -2110,8 +2097,8 @@ CommitTransactionCommand(void)
/*
* This is the case when we have finished executing a command
- * someplace within a transaction block. We increment the
- * command counter and return.
+ * someplace within a transaction block. We increment the command
+ * counter and return.
*/
case TBLOCK_INPROGRESS:
case TBLOCK_SUBINPROGRESS:
@@ -2119,8 +2106,8 @@ CommitTransactionCommand(void)
break;
/*
- * We are completing a "COMMIT" command. Do it and return to
- * the idle state.
+ * We are completing a "COMMIT" command. Do it and return to the
+ * idle state.
*/
case TBLOCK_END:
CommitTransaction();
@@ -2128,17 +2115,17 @@ CommitTransactionCommand(void)
break;
/*
- * Here we are in the middle of a transaction block but one of
- * the commands caused an abort so we do nothing but remain in
- * the abort state. Eventually we will get a ROLLBACK comand.
+ * Here we are in the middle of a transaction block but one of the
+ * commands caused an abort so we do nothing but remain in the
+ * abort state. Eventually we will get a ROLLBACK comand.
*/
case TBLOCK_ABORT:
case TBLOCK_SUBABORT:
break;
/*
- * Here we were in an aborted transaction block and we just
- * got the ROLLBACK command from the user, so clean up the
+ * Here we were in an aborted transaction block and we just got
+ * the ROLLBACK command from the user, so clean up the
* already-aborted transaction and return to the idle state.
*/
case TBLOCK_ABORT_END:
@@ -2147,9 +2134,9 @@ CommitTransactionCommand(void)
break;
/*
- * Here we were in a perfectly good transaction block but the
- * user told us to ROLLBACK anyway. We have to abort the
- * transaction and then clean up.
+ * Here we were in a perfectly good transaction block but the user
+ * told us to ROLLBACK anyway. We have to abort the transaction
+ * and then clean up.
*/
case TBLOCK_ABORT_PENDING:
AbortTransaction();
@@ -2169,8 +2156,8 @@ CommitTransactionCommand(void)
/*
* We were just issued a SAVEPOINT inside a transaction block.
* Start a subtransaction. (DefineSavepoint already did
- * PushTransaction, so as to have someplace to put the
- * SUBBEGIN state.)
+ * PushTransaction, so as to have someplace to put the SUBBEGIN
+ * state.)
*/
case TBLOCK_SUBBEGIN:
StartSubTransaction();
@@ -2259,8 +2246,8 @@ CommitTransactionCommand(void)
break;
/*
- * Same as above, but the subtransaction had already failed,
- * so we don't need AbortSubTransaction.
+ * Same as above, but the subtransaction had already failed, so we
+ * don't need AbortSubTransaction.
*/
case TBLOCK_SUBABORT_RESTART:
{
@@ -2320,8 +2307,8 @@ AbortCurrentTransaction(void)
break;
/*
- * if we aren't in a transaction block, we just do the basic
- * abort & cleanup transaction.
+ * if we aren't in a transaction block, we just do the basic abort
+ * & cleanup transaction.
*/
case TBLOCK_STARTED:
AbortTransaction();
@@ -2330,11 +2317,11 @@ AbortCurrentTransaction(void)
break;
/*
- * If we are in TBLOCK_BEGIN it means something screwed up
- * right after reading "BEGIN TRANSACTION". We assume that
- * the user will interpret the error as meaning the BEGIN
- * failed to get him into a transaction block, so we should
- * abort and return to idle state.
+ * If we are in TBLOCK_BEGIN it means something screwed up right
+ * after reading "BEGIN TRANSACTION". We assume that the user
+ * will interpret the error as meaning the BEGIN failed to get him
+ * into a transaction block, so we should abort and return to idle
+ * state.
*/
case TBLOCK_BEGIN:
AbortTransaction();
@@ -2354,9 +2341,9 @@ AbortCurrentTransaction(void)
break;
/*
- * Here, we failed while trying to COMMIT. Clean up the
- * transaction and return to idle state (we do not want to
- * stay in the transaction).
+ * Here, we failed while trying to COMMIT. Clean up the
+ * transaction and return to idle state (we do not want to stay in
+ * the transaction).
*/
case TBLOCK_END:
AbortTransaction();
@@ -2365,9 +2352,9 @@ AbortCurrentTransaction(void)
break;
/*
- * Here, we are already in an aborted transaction state and
- * are waiting for a ROLLBACK, but for some reason we failed
- * again! So we just remain in the abort state.
+ * Here, we are already in an aborted transaction state and are
+ * waiting for a ROLLBACK, but for some reason we failed again!
+ * So we just remain in the abort state.
*/
case TBLOCK_ABORT:
case TBLOCK_SUBABORT:
@@ -2375,8 +2362,8 @@ AbortCurrentTransaction(void)
/*
* We are in a failed transaction and we got the ROLLBACK command.
- * We have already aborted, we just need to cleanup and go to
- * idle state.
+ * We have already aborted, we just need to cleanup and go to idle
+ * state.
*/
case TBLOCK_ABORT_END:
CleanupTransaction();
@@ -2395,8 +2382,8 @@ AbortCurrentTransaction(void)
/*
* Here, we failed while trying to PREPARE. Clean up the
- * transaction and return to idle state (we do not want to
- * stay in the transaction).
+ * transaction and return to idle state (we do not want to stay in
+ * the transaction).
*/
case TBLOCK_PREPARE:
AbortTransaction();
@@ -2406,8 +2393,8 @@ AbortCurrentTransaction(void)
/*
* We got an error inside a subtransaction. Abort just the
- * subtransaction, and go to the persistent SUBABORT state
- * until we get ROLLBACK.
+ * subtransaction, and go to the persistent SUBABORT state until
+ * we get ROLLBACK.
*/
case TBLOCK_SUBINPROGRESS:
AbortSubTransaction();
@@ -2416,7 +2403,7 @@ AbortCurrentTransaction(void)
/*
* If we failed while trying to create a subtransaction, clean up
- * the broken subtransaction and abort the parent. The same
+ * the broken subtransaction and abort the parent. The same
* applies if we get a failure while ending a subtransaction.
*/
case TBLOCK_SUBBEGIN:
@@ -2479,15 +2466,15 @@ PreventTransactionChain(void *stmtNode, const char *stmtType)
stmtType)));
/*
- * Are we inside a function call? If the statement's parameter block
- * was allocated in QueryContext, assume it is an interactive command.
+ * Are we inside a function call? If the statement's parameter block was
+ * allocated in QueryContext, assume it is an interactive command.
* Otherwise assume it is coming from a function.
*/
if (!MemoryContextContains(QueryContext, stmtNode))
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
/* translator: %s represents an SQL statement name */
- errmsg("%s cannot be executed from a function", stmtType)));
+ errmsg("%s cannot be executed from a function", stmtType)));
/* If we got past IsTransactionBlock test, should be in default state */
if (CurrentTransactionState->blockState != TBLOCK_DEFAULT &&
@@ -2529,8 +2516,8 @@ RequireTransactionChain(void *stmtNode, const char *stmtType)
return;
/*
- * Are we inside a function call? If the statement's parameter block
- * was allocated in QueryContext, assume it is an interactive command.
+ * Are we inside a function call? If the statement's parameter block was
+ * allocated in QueryContext, assume it is an interactive command.
* Otherwise assume it is coming from a function.
*/
if (!MemoryContextContains(QueryContext, stmtNode))
@@ -2556,8 +2543,8 @@ bool
IsInTransactionChain(void *stmtNode)
{
/*
- * Return true on same conditions that would make
- * PreventTransactionChain error out
+ * Return true on same conditions that would make PreventTransactionChain
+ * error out
*/
if (IsTransactionBlock())
return true;
@@ -2705,8 +2692,7 @@ BeginTransactionBlock(void)
switch (s->blockState)
{
/*
- * We are not inside a transaction block, so allow one to
- * begin.
+ * We are not inside a transaction block, so allow one to begin.
*/
case TBLOCK_STARTED:
s->blockState = TBLOCK_BEGIN;
@@ -2721,7 +2707,7 @@ BeginTransactionBlock(void)
case TBLOCK_SUBABORT:
ereport(WARNING,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- errmsg("there is already a transaction in progress")));
+ errmsg("there is already a transaction in progress")));
break;
/* These cases are invalid. */
@@ -2759,7 +2745,7 @@ bool
PrepareTransactionBlock(char *gid)
{
TransactionState s;
- bool result;
+ bool result;
/* Set up to commit the current transaction */
result = EndTransactionBlock();
@@ -2832,8 +2818,8 @@ EndTransactionBlock(void)
break;
/*
- * We are in a live subtransaction block. Set up to subcommit
- * all open subtransactions and then commit the main transaction.
+ * We are in a live subtransaction block. Set up to subcommit all
+ * open subtransactions and then commit the main transaction.
*/
case TBLOCK_SUBINPROGRESS:
while (s->parent != NULL)
@@ -2854,9 +2840,9 @@ EndTransactionBlock(void)
break;
/*
- * Here we are inside an aborted subtransaction. Treat the
- * COMMIT as ROLLBACK: set up to abort everything and exit
- * the main transaction.
+ * Here we are inside an aborted subtransaction. Treat the COMMIT
+ * as ROLLBACK: set up to abort everything and exit the main
+ * transaction.
*/
case TBLOCK_SUBABORT:
while (s->parent != NULL)
@@ -2927,9 +2913,9 @@ UserAbortTransactionBlock(void)
switch (s->blockState)
{
/*
- * We are inside a transaction block and we got a ROLLBACK
- * command from the user, so tell CommitTransactionCommand
- * to abort and exit the transaction block.
+ * We are inside a transaction block and we got a ROLLBACK command
+ * from the user, so tell CommitTransactionCommand to abort and
+ * exit the transaction block.
*/
case TBLOCK_INPROGRESS:
s->blockState = TBLOCK_ABORT_PENDING;
@@ -2937,17 +2923,17 @@ UserAbortTransactionBlock(void)
/*
* We are inside a failed transaction block and we got a ROLLBACK
- * command from the user. Abort processing is already done,
- * so CommitTransactionCommand just has to cleanup and go back
- * to idle state.
+ * command from the user. Abort processing is already done, so
+ * CommitTransactionCommand just has to cleanup and go back to
+ * idle state.
*/
case TBLOCK_ABORT:
s->blockState = TBLOCK_ABORT_END;
break;
/*
- * We are inside a subtransaction. Mark everything
- * up to top level as exitable.
+ * We are inside a subtransaction. Mark everything up to top
+ * level as exitable.
*/
case TBLOCK_SUBINPROGRESS:
case TBLOCK_SUBABORT:
@@ -2972,8 +2958,8 @@ UserAbortTransactionBlock(void)
break;
/*
- * The user issued ABORT when not inside a transaction. Issue
- * a WARNING and go to abort state. The upcoming call to
+ * The user issued ABORT when not inside a transaction. Issue a
+ * WARNING and go to abort state. The upcoming call to
* CommitTransactionCommand() will then put us back into the
* default state.
*/
@@ -3021,8 +3007,8 @@ DefineSavepoint(char *name)
s = CurrentTransactionState; /* changed by push */
/*
- * Savepoint names, like the TransactionState block itself,
- * live in TopTransactionContext.
+ * Savepoint names, like the TransactionState block itself, live
+ * in TopTransactionContext.
*/
if (name)
s->name = MemoryContextStrdup(TopTransactionContext, name);
@@ -3078,8 +3064,8 @@ ReleaseSavepoint(List *options)
break;
/*
- * We are in a non-aborted subtransaction. This is the only
- * valid case.
+ * We are in a non-aborted subtransaction. This is the only valid
+ * case.
*/
case TBLOCK_SUBINPROGRESS:
break;
@@ -3134,8 +3120,8 @@ ReleaseSavepoint(List *options)
/*
* Mark "commit pending" all subtransactions up to the target
- * subtransaction. The actual commits will happen when control gets
- * to CommitTransactionCommand.
+ * subtransaction. The actual commits will happen when control gets to
+ * CommitTransactionCommand.
*/
xact = CurrentTransactionState;
for (;;)
@@ -3232,8 +3218,8 @@ RollbackToSavepoint(List *options)
/*
* Mark "abort pending" all subtransactions up to the target
- * subtransaction. The actual aborts will happen when control gets
- * to CommitTransactionCommand.
+ * subtransaction. The actual aborts will happen when control gets to
+ * CommitTransactionCommand.
*/
xact = CurrentTransactionState;
for (;;)
@@ -3284,8 +3270,8 @@ BeginInternalSubTransaction(char *name)
s = CurrentTransactionState; /* changed by push */
/*
- * Savepoint names, like the TransactionState block itself,
- * live in TopTransactionContext.
+ * Savepoint names, like the TransactionState block itself, live
+ * in TopTransactionContext.
*/
if (name)
s->name = MemoryContextStrdup(TopTransactionContext, name);
@@ -3333,7 +3319,7 @@ ReleaseCurrentSubTransaction(void)
Assert(s->state == TRANS_INPROGRESS);
MemoryContextSwitchTo(CurTransactionContext);
CommitSubTransaction();
- s = CurrentTransactionState; /* changed by pop */
+ s = CurrentTransactionState; /* changed by pop */
Assert(s->state == TRANS_INPROGRESS);
}
@@ -3433,8 +3419,7 @@ AbortOutOfAnyTransaction(void)
break;
/*
- * In a subtransaction, so clean it up and abort parent
- * too
+ * In a subtransaction, so clean it up and abort parent too
*/
case TBLOCK_SUBBEGIN:
case TBLOCK_SUBINPROGRESS:
@@ -3667,9 +3652,9 @@ CommitSubTransaction(void)
s->parent->subTransactionId);
/*
- * We need to restore the upper transaction's read-only state, in case
- * the upper is read-write while the child is read-only; GUC will
- * incorrectly think it should leave the child state in place.
+ * We need to restore the upper transaction's read-only state, in case the
+ * upper is read-write while the child is read-only; GUC will incorrectly
+ * think it should leave the child state in place.
*/
XactReadOnly = s->prevXactReadOnly;
@@ -3706,8 +3691,8 @@ AbortSubTransaction(void)
/*
* Release any LW locks we might be holding as quickly as possible.
* (Regular locks, however, must be held till we finish aborting.)
- * Releasing LW locks is critical since we might try to grab them
- * again while cleaning up!
+ * Releasing LW locks is critical since we might try to grab them again
+ * while cleaning up!
*
* FIXME This may be incorrect --- Are there some locks we should keep?
* Buffer locks, for example? I don't think so but I'm not sure.
@@ -3726,8 +3711,8 @@ AbortSubTransaction(void)
AtSubAbort_ResourceOwner();
/*
- * We can skip all this stuff if the subxact failed before creating
- * a ResourceOwner...
+ * We can skip all this stuff if the subxact failed before creating a
+ * ResourceOwner...
*/
if (s->curTransactionOwner)
{
@@ -3777,25 +3762,23 @@ AbortSubTransaction(void)
}
/*
- * Reset user id which might have been changed transiently. Here we
- * want to restore to the userid that was current at subxact entry.
- * (As in AbortTransaction, we need not worry about the session
- * userid.)
+ * Reset user id which might have been changed transiently. Here we want
+ * to restore to the userid that was current at subxact entry. (As in
+ * AbortTransaction, we need not worry about the session userid.)
*
- * Must do this after AtEOXact_GUC to handle the case where we entered
- * the subxact inside a SECURITY DEFINER function (hence current and
- * session userids were different) and then session auth was changed
- * inside the subxact. GUC will reset both current and session
- * userids to the entry-time session userid. This is right in every
- * other scenario so it seems simplest to let GUC do that and fix it
- * here.
+ * Must do this after AtEOXact_GUC to handle the case where we entered the
+ * subxact inside a SECURITY DEFINER function (hence current and session
+ * userids were different) and then session auth was changed inside the
+ * subxact. GUC will reset both current and session userids to the
+ * entry-time session userid. This is right in every other scenario so it
+ * seems simplest to let GUC do that and fix it here.
*/
SetUserId(s->currentUser);
/*
- * Restore the upper transaction's read-only state, too. This should
- * be redundant with GUC's cleanup but we may as well do it for
- * consistency with the commit case.
+ * Restore the upper transaction's read-only state, too. This should be
+ * redundant with GUC's cleanup but we may as well do it for consistency
+ * with the commit case.
*/
XactReadOnly = s->prevXactReadOnly;
@@ -3846,11 +3829,11 @@ PushTransaction(void)
{
TransactionState p = CurrentTransactionState;
TransactionState s;
- Oid currentUser;
+ Oid currentUser;
/*
- * At present, GetUserId cannot fail, but let's not assume that. Get
- * the ID before entering the critical code sequence.
+ * At present, GetUserId cannot fail, but let's not assume that. Get the
+ * ID before entering the critical code sequence.
*/
currentUser = GetUserId();
@@ -3860,6 +3843,7 @@ PushTransaction(void)
s = (TransactionState)
MemoryContextAllocZero(TopTransactionContext,
sizeof(TransactionStateData));
+
/*
* Assign a subtransaction ID, watching out for counter wraparound.
*/
@@ -3872,11 +3856,12 @@ PushTransaction(void)
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("cannot have more than 2^32-1 subtransactions in a transaction")));
}
+
/*
* We can now stack a minimally valid subtransaction without fear of
* failure.
*/
- s->transactionId = InvalidTransactionId; /* until assigned */
+ s->transactionId = InvalidTransactionId; /* until assigned */
s->subTransactionId = currentSubTransactionId;
s->parent = p;
s->nestingLevel = p->nestingLevel + 1;
@@ -3889,10 +3874,10 @@ PushTransaction(void)
CurrentTransactionState = s;
/*
- * AbortSubTransaction and CleanupSubTransaction have to be able to
- * cope with the subtransaction from here on out; in particular they
- * should not assume that it necessarily has a transaction context,
- * resource owner, or XID.
+ * AbortSubTransaction and CleanupSubTransaction have to be able to cope
+ * with the subtransaction from here on out; in particular they should not
+ * assume that it necessarily has a transaction context, resource owner,
+ * or XID.
*/
}
@@ -3959,7 +3944,7 @@ ShowTransactionStateRec(TransactionState s)
/* use ereport to suppress computation if msg will not be printed */
ereport(DEBUG3,
(errmsg_internal("name: %s; blockState: %13s; state: %7s, xid/subid/cid: %u/%u/%u, nestlvl: %d, children: %s",
- PointerIsValid(s->name) ? s->name : "unnamed",
+ PointerIsValid(s->name) ? s->name : "unnamed",
BlockStateAsString(s->blockState),
TransStateAsString(s->state),
(unsigned int) s->transactionId,
@@ -4215,7 +4200,7 @@ xact_desc_commit(char *buf, xl_xact_commit *xlrec)
if (xlrec->nsubxacts > 0)
{
TransactionId *xacts = (TransactionId *)
- &xlrec->xnodes[xlrec->nrels];
+ &xlrec->xnodes[xlrec->nrels];
sprintf(buf + strlen(buf), "; subxacts:");
for (i = 0; i < xlrec->nsubxacts; i++)
@@ -4246,7 +4231,7 @@ xact_desc_abort(char *buf, xl_xact_abort *xlrec)
if (xlrec->nsubxacts > 0)
{
TransactionId *xacts = (TransactionId *)
- &xlrec->xnodes[xlrec->nrels];
+ &xlrec->xnodes[xlrec->nrels];
sprintf(buf + strlen(buf), "; subxacts:");
for (i = 0; i < xlrec->nsubxacts; i++)
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 878d7e21efc..7a37c656dc1 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.219 2005/10/03 00:28:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.220 2005/10/15 02:49:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,7 +51,7 @@
/*
* Because O_DIRECT bypasses the kernel buffers, and because we never
* read those buffers except during crash recovery, it is a win to use
- * it in all cases where we sync on each write(). We could allow O_DIRECT
+ * it in all cases where we sync on each write(). We could allow O_DIRECT
* with fsync(), but because skipping the kernel buffer forces writes out
* quickly, it seems best just to use it for O_SYNC. It is hard to imagine
* how fsync() could be a win for O_DIRECT compared to O_SYNC and O_DIRECT.
@@ -85,14 +85,14 @@
#if O_DSYNC != BARE_OPEN_SYNC_FLAG
#define OPEN_DATASYNC_FLAG (O_DSYNC | PG_O_DIRECT)
#endif
-#else /* !defined(OPEN_SYNC_FLAG) */
+#else /* !defined(OPEN_SYNC_FLAG) */
/* Win32 only has O_DSYNC */
#define OPEN_DATASYNC_FLAG (O_DSYNC | PG_O_DIRECT)
#endif
#endif
#if defined(OPEN_DATASYNC_FLAG)
-#define DEFAULT_SYNC_METHOD_STR "open_datasync"
+#define DEFAULT_SYNC_METHOD_STR "open_datasync"
#define DEFAULT_SYNC_METHOD SYNC_METHOD_OPEN
#define DEFAULT_SYNC_FLAGBIT OPEN_DATASYNC_FLAG
#elif defined(HAVE_FDATASYNC)
@@ -154,7 +154,7 @@ bool XLOG_DEBUG = false;
/* these are derived from XLOG_sync_method by assign_xlog_sync_method */
-int sync_method = DEFAULT_SYNC_METHOD;
+int sync_method = DEFAULT_SYNC_METHOD;
static int open_sync_bit = DEFAULT_SYNC_FLAGBIT;
#define XLOG_SYNC_BIT (enableFsync ? open_sync_bit : 0)
@@ -368,10 +368,9 @@ typedef struct XLogCtlData
XLogCtlWrite Write;
/*
- * These values do not change after startup, although the pointed-to
- * pages and xlblocks values certainly do. Permission to read/write
- * the pages and xlblocks values depends on WALInsertLock and
- * WALWriteLock.
+ * These values do not change after startup, although the pointed-to pages
+ * and xlblocks values certainly do. Permission to read/write the pages
+ * and xlblocks values depends on WALInsertLock and WALWriteLock.
*/
char *pages; /* buffers for unwritten XLOG pages */
XLogRecPtr *xlblocks; /* 1st byte ptr-s + BLCKSZ */
@@ -449,8 +448,8 @@ static char *readRecordBuf = NULL;
static uint32 readRecordBufSize = 0;
/* State information for XLOG reading */
-static XLogRecPtr ReadRecPtr; /* start of last record read */
-static XLogRecPtr EndRecPtr; /* end+1 of last record read */
+static XLogRecPtr ReadRecPtr; /* start of last record read */
+static XLogRecPtr EndRecPtr; /* end+1 of last record read */
static XLogRecord *nextRecord = NULL;
static TimeLineID lastPageTLI = 0;
@@ -467,7 +466,7 @@ static void exitArchiveRecovery(TimeLineID endTLI,
static bool recoveryStopsHere(XLogRecord *record, bool *includeThis);
static bool XLogCheckBuffer(XLogRecData *rdata,
- XLogRecPtr *lsn, BkpBlock *bkpb);
+ XLogRecPtr *lsn, BkpBlock *bkpb);
static bool AdvanceXLInsertBuffer(void);
static void XLogWrite(XLogwrtRqst WriteRqst, bool flexible);
static int XLogFileInit(uint32 log, uint32 seg,
@@ -481,7 +480,7 @@ static bool RestoreArchivedFile(char *path, const char *xlogfname,
const char *recovername, off_t expectedSize);
static int PreallocXlogFiles(XLogRecPtr endptr);
static void MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr,
- int *nsegsremoved, int *nsegsrecycled);
+ int *nsegsremoved, int *nsegsrecycled);
static void RemoveOldBackupHistory(void);
static XLogRecord *ReadRecord(XLogRecPtr *RecPtr, int emode);
static bool ValidXLOGHeader(XLogPageHeader hdr, int emode);
@@ -554,36 +553,34 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
}
/*
- * In bootstrap mode, we don't actually log anything but XLOG
- * resources; return a phony record pointer.
+ * In bootstrap mode, we don't actually log anything but XLOG resources;
+ * return a phony record pointer.
*/
if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
{
RecPtr.xlogid = 0;
- RecPtr.xrecoff = SizeOfXLogLongPHD; /* start of 1st chkpt
- * record */
+ RecPtr.xrecoff = SizeOfXLogLongPHD; /* start of 1st chkpt record */
return (RecPtr);
}
/*
* Here we scan the rdata chain, determine which buffers must be backed
* up, and compute the CRC values for the data. Note that the record
- * header isn't added into the CRC initially since we don't know the
- * final length or info bits quite yet. Thus, the CRC will represent
- * the CRC of the whole record in the order "rdata, then backup blocks,
- * then record header".
+ * header isn't added into the CRC initially since we don't know the final
+ * length or info bits quite yet. Thus, the CRC will represent the CRC of
+ * the whole record in the order "rdata, then backup blocks, then record
+ * header".
*
- * We may have to loop back to here if a race condition is detected
- * below. We could prevent the race by doing all this work while
- * holding the insert lock, but it seems better to avoid doing CRC
- * calculations while holding the lock. This means we have to be
- * careful about modifying the rdata chain until we know we aren't
- * going to loop back again. The only change we allow ourselves to
- * make earlier is to set rdt->data = NULL in chain items we have
- * decided we will have to back up the whole buffer for. This is OK
- * because we will certainly decide the same thing again for those
- * items if we do it over; doing it here saves an extra pass over the
- * chain later.
+ * We may have to loop back to here if a race condition is detected below. We
+ * could prevent the race by doing all this work while holding the insert
+ * lock, but it seems better to avoid doing CRC calculations while holding
+ * the lock. This means we have to be careful about modifying the rdata
+ * chain until we know we aren't going to loop back again. The only
+ * change we allow ourselves to make earlier is to set rdt->data = NULL in
+ * chain items we have decided we will have to back up the whole buffer
+ * for. This is OK because we will certainly decide the same thing again
+ * for those items if we do it over; doing it here saves an extra pass
+ * over the chain later.
*/
begin:;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@@ -680,12 +677,12 @@ begin:;
}
/*
- * NOTE: the test for len == 0 here is somewhat fishy, since in theory
- * all of the rmgr data might have been suppressed in favor of backup
- * blocks. Currently, all callers of XLogInsert provide at least some
- * not-in-a-buffer data and so len == 0 should never happen, but that
- * may not be true forever. If you need to remove the len == 0 check,
- * also remove the check for xl_len == 0 in ReadRecord, below.
+ * NOTE: the test for len == 0 here is somewhat fishy, since in theory all
+ * of the rmgr data might have been suppressed in favor of backup blocks.
+ * Currently, all callers of XLogInsert provide at least some
+ * not-in-a-buffer data and so len == 0 should never happen, but that may
+ * not be true forever. If you need to remove the len == 0 check, also
+ * remove the check for xl_len == 0 in ReadRecord, below.
*/
if (len == 0)
elog(PANIC, "invalid xlog record length %u", len);
@@ -718,9 +715,9 @@ begin:;
* Since the amount of data we write here is completely optional
* anyway, tell XLogWrite it can be "flexible" and stop at a
* convenient boundary. This allows writes triggered by this
- * mechanism to synchronize with the cache boundaries, so that
- * in a long transaction we'll basically dump alternating halves
- * of the buffer array.
+ * mechanism to synchronize with the cache boundaries, so that in
+ * a long transaction we'll basically dump alternating halves of
+ * the buffer array.
*/
LogwrtResult = XLogCtl->Write.LogwrtResult;
if (XLByteLT(LogwrtResult.Write, LogwrtRqst.Write))
@@ -733,10 +730,9 @@ begin:;
LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
/*
- * Check to see if my RedoRecPtr is out of date. If so, may have to
- * go back and recompute everything. This can only happen just after
- * a checkpoint, so it's better to be slow in this case and fast
- * otherwise.
+ * Check to see if my RedoRecPtr is out of date. If so, may have to go
+ * back and recompute everything. This can only happen just after a
+ * checkpoint, so it's better to be slow in this case and fast otherwise.
*/
if (!XLByteEQ(RedoRecPtr, Insert->RedoRecPtr))
{
@@ -751,8 +747,8 @@ begin:;
XLByteLE(dtbuf_lsn[i], RedoRecPtr))
{
/*
- * Oops, this buffer now needs to be backed up, but we
- * didn't think so above. Start over.
+ * Oops, this buffer now needs to be backed up, but we didn't
+ * think so above. Start over.
*/
LWLockRelease(WALInsertLock);
END_CRIT_SECTION();
@@ -762,15 +758,14 @@ begin:;
}
/*
- * Make additional rdata chain entries for the backup blocks, so that
- * we don't need to special-case them in the write loop. Note that we
- * have now irrevocably changed the input rdata chain. At the exit of
- * this loop, write_len includes the backup block data.
+ * Make additional rdata chain entries for the backup blocks, so that we
+ * don't need to special-case them in the write loop. Note that we have
+ * now irrevocably changed the input rdata chain. At the exit of this
+ * loop, write_len includes the backup block data.
*
- * Also set the appropriate info bits to show which buffers were backed
- * up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th
- * distinct buffer value (ignoring InvalidBuffer) appearing in the
- * rdata chain.
+ * Also set the appropriate info bits to show which buffers were backed up.
+ * The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct buffer
+ * value (ignoring InvalidBuffer) appearing in the rdata chain.
*/
write_len = len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@@ -822,8 +817,7 @@ begin:;
/*
* If there isn't enough space on the current XLOG page for a record
- * header, advance to the next page (leaving the unused space as
- * zeroes).
+ * header, advance to the next page (leaving the unused space as zeroes).
*/
updrqst = false;
freespace = INSERT_FREESPACE(Insert);
@@ -925,15 +919,15 @@ begin:;
freespace = INSERT_FREESPACE(Insert);
/*
- * The recptr I return is the beginning of the *next* record. This
- * will be stored as LSN for changed data pages...
+ * The recptr I return is the beginning of the *next* record. This will be
+ * stored as LSN for changed data pages...
*/
INSERT_RECPTR(RecPtr, Insert, curridx);
/* Need to update shared LogwrtRqst if some block was filled up */
if (freespace < SizeOfXLogRecord)
- updrqst = true; /* curridx is filled and available for
- * writing out */
+ updrqst = true; /* curridx is filled and available for writing
+ * out */
else
curridx = PrevBufIdx(curridx);
WriteRqst = XLogCtl->xlblocks[curridx];
@@ -975,9 +969,9 @@ XLogCheckBuffer(XLogRecData *rdata,
page = (PageHeader) BufferGetBlock(rdata->buffer);
/*
- * XXX We assume page LSN is first data on *every* page that can be
- * passed to XLogInsert, whether it otherwise has the standard page
- * layout or not.
+ * XXX We assume page LSN is first data on *every* page that can be passed
+ * to XLogInsert, whether it otherwise has the standard page layout or
+ * not.
*/
*lsn = page->pd_lsn;
@@ -1163,9 +1157,9 @@ AdvanceXLInsertBuffer(void)
LogwrtResult = Insert->LogwrtResult;
/*
- * Get ending-offset of the buffer page we need to replace (this may
- * be zero if the buffer hasn't been used yet). Fall through if it's
- * already written out.
+ * Get ending-offset of the buffer page we need to replace (this may be
+ * zero if the buffer hasn't been used yet). Fall through if it's already
+ * written out.
*/
OldPageRqstPtr = XLogCtl->xlblocks[nextidx];
if (!XLByteLE(OldPageRqstPtr, LogwrtResult.Write))
@@ -1208,9 +1202,8 @@ AdvanceXLInsertBuffer(void)
else
{
/*
- * Have to write buffers while holding insert lock. This
- * is not good, so only write as much as we absolutely
- * must.
+ * Have to write buffers while holding insert lock. This is
+ * not good, so only write as much as we absolutely must.
*/
WriteRqst.Write = OldPageRqstPtr;
WriteRqst.Flush.xlogid = 0;
@@ -1223,8 +1216,8 @@ AdvanceXLInsertBuffer(void)
}
/*
- * Now the next buffer slot is free and we can set it up to be the
- * next output page.
+ * Now the next buffer slot is free and we can set it up to be the next
+ * output page.
*/
NewPageEndPtr = XLogCtl->xlblocks[Insert->curridx];
if (NewPageEndPtr.xrecoff >= XLogFileSize)
@@ -1237,24 +1230,27 @@ AdvanceXLInsertBuffer(void)
NewPageEndPtr.xrecoff += BLCKSZ;
XLogCtl->xlblocks[nextidx] = NewPageEndPtr;
NewPage = (XLogPageHeader) (XLogCtl->pages + nextidx * (Size) BLCKSZ);
+
Insert->curridx = nextidx;
Insert->currpage = NewPage;
- Insert->currpos = ((char *) NewPage) + SizeOfXLogShortPHD;
+
+ Insert->currpos = ((char *) NewPage) +SizeOfXLogShortPHD;
/*
- * Be sure to re-zero the buffer so that bytes beyond what we've
- * written will look like zeroes and not valid XLOG records...
+ * Be sure to re-zero the buffer so that bytes beyond what we've written
+ * will look like zeroes and not valid XLOG records...
*/
MemSet((char *) NewPage, 0, BLCKSZ);
/*
* Fill the new page's header
*/
- NewPage->xlp_magic = XLOG_PAGE_MAGIC;
+ NewPage ->xlp_magic = XLOG_PAGE_MAGIC;
+
/* NewPage->xlp_info = 0; */ /* done by memset */
- NewPage->xlp_tli = ThisTimeLineID;
- NewPage->xlp_pageaddr.xlogid = NewPageEndPtr.xlogid;
- NewPage->xlp_pageaddr.xrecoff = NewPageEndPtr.xrecoff - BLCKSZ;
+ NewPage ->xlp_tli = ThisTimeLineID;
+ NewPage ->xlp_pageaddr.xlogid = NewPageEndPtr.xlogid;
+ NewPage ->xlp_pageaddr.xrecoff = NewPageEndPtr.xrecoff - BLCKSZ;
/*
* If first page of an XLOG segment file, make it a long header.
@@ -1265,8 +1261,9 @@ AdvanceXLInsertBuffer(void)
NewLongPage->xlp_sysid = ControlFile->system_identifier;
NewLongPage->xlp_seg_size = XLogSegSize;
- NewPage->xlp_info |= XLP_LONG_HEADER;
- Insert->currpos = ((char *) NewPage) + SizeOfXLogLongPHD;
+ NewPage ->xlp_info |= XLP_LONG_HEADER;
+
+ Insert->currpos = ((char *) NewPage) +SizeOfXLogLongPHD;
}
return update_needed;
@@ -1298,19 +1295,18 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
Assert(CritSectionCount > 0);
/*
- * Update local LogwrtResult (caller probably did this already,
- * but...)
+ * Update local LogwrtResult (caller probably did this already, but...)
*/
LogwrtResult = Write->LogwrtResult;
/*
* Since successive pages in the xlog cache are consecutively allocated,
* we can usually gather multiple pages together and issue just one
- * write() call. npages is the number of pages we have determined can
- * be written together; startidx is the cache block index of the first
- * one, and startoffset is the file offset at which it should go.
- * The latter two variables are only valid when npages > 0, but we must
- * initialize all of them to keep the compiler quiet.
+ * write() call. npages is the number of pages we have determined can be
+ * written together; startidx is the cache block index of the first one,
+ * and startoffset is the file offset at which it should go. The latter
+ * two variables are only valid when npages > 0, but we must initialize
+ * all of them to keep the compiler quiet.
*/
npages = 0;
startidx = 0;
@@ -1320,18 +1316,17 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
* Within the loop, curridx is the cache block index of the page to
* consider writing. We advance Write->curridx only after successfully
* writing pages. (Right now, this refinement is useless since we are
- * going to PANIC if any error occurs anyway; but someday it may come
- * in useful.)
+ * going to PANIC if any error occurs anyway; but someday it may come in
+ * useful.)
*/
curridx = Write->curridx;
while (XLByteLT(LogwrtResult.Write, WriteRqst.Write))
{
/*
- * Make sure we're not ahead of the insert process. This could
- * happen if we're passed a bogus WriteRqst.Write that is past the
- * end of the last page that's been initialized by
- * AdvanceXLInsertBuffer.
+ * Make sure we're not ahead of the insert process. This could happen
+ * if we're passed a bogus WriteRqst.Write that is past the end of the
+ * last page that's been initialized by AdvanceXLInsertBuffer.
*/
if (!XLByteLT(LogwrtResult.Write, XLogCtl->xlblocks[curridx]))
elog(PANIC, "xlog write request %X/%X is past end of log %X/%X",
@@ -1355,8 +1350,8 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
if (close(openLogFile))
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not close log file %u, segment %u: %m",
- openLogId, openLogSeg)));
+ errmsg("could not close log file %u, segment %u: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
XLByteToPrevSeg(LogwrtResult.Write, openLogId, openLogSeg);
@@ -1379,13 +1374,13 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
UpdateControlFile();
/*
- * Signal bgwriter to start a checkpoint if it's been
- * too long since the last one. (We look at local copy of
- * RedoRecPtr which might be a little out of date, but
- * should be close enough for this purpose.)
+ * Signal bgwriter to start a checkpoint if it's been too long
+ * since the last one. (We look at local copy of RedoRecPtr
+ * which might be a little out of date, but should be close
+ * enough for this purpose.)
*
- * A straight computation of segment number could overflow
- * 32 bits. Rather than assuming we have working 64-bit
+ * A straight computation of segment number could overflow 32
+ * bits. Rather than assuming we have working 64-bit
* arithmetic, we compare the highest-order bits separately,
* and force a checkpoint immediately when they change.
*/
@@ -1434,10 +1429,10 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
npages++;
/*
- * Dump the set if this will be the last loop iteration, or if
- * we are at the last page of the cache area (since the next page
- * won't be contiguous in memory), or if we are at the end of the
- * logfile segment.
+ * Dump the set if this will be the last loop iteration, or if we are
+ * at the last page of the cache area (since the next page won't be
+ * contiguous in memory), or if we are at the end of the logfile
+ * segment.
*/
finishing_seg = !ispartialpage &&
(startoffset + npages * BLCKSZ) >= XLogSegSize;
@@ -1496,7 +1491,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
if (finishing_seg)
{
issue_xlog_fsync();
- LogwrtResult.Flush = LogwrtResult.Write; /* end of page */
+ LogwrtResult.Flush = LogwrtResult.Write; /* end of page */
if (XLogArchivingActive())
XLogArchiveNotifySeg(openLogId, openLogSeg);
@@ -1526,20 +1521,20 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
XLByteLT(LogwrtResult.Flush, LogwrtResult.Write))
{
/*
- * Could get here without iterating above loop, in which case we
- * might have no open file or the wrong one. However, we do not
- * need to fsync more than one file.
+ * Could get here without iterating above loop, in which case we might
+ * have no open file or the wrong one. However, we do not need to
+ * fsync more than one file.
*/
if (sync_method != SYNC_METHOD_OPEN)
{
if (openLogFile >= 0 &&
- !XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
+ !XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
{
if (close(openLogFile))
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not close log file %u, segment %u: %m",
- openLogId, openLogSeg)));
+ errmsg("could not close log file %u, segment %u: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
if (openLogFile < 0)
@@ -1557,8 +1552,8 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
* Update shared-memory status
*
* We make sure that the shared 'request' values do not fall behind the
- * 'result' values. This is not absolutely essential, but it saves
- * some code in a couple of places.
+ * 'result' values. This is not absolutely essential, but it saves some
+ * code in a couple of places.
*/
{
/* use volatile pointer to prevent code rearrangement */
@@ -1608,11 +1603,10 @@ XLogFlush(XLogRecPtr record)
/*
* Since fsync is usually a horribly expensive operation, we try to
- * piggyback as much data as we can on each fsync: if we see any more
- * data entered into the xlog buffer, we'll write and fsync that too,
- * so that the final value of LogwrtResult.Flush is as large as
- * possible. This gives us some chance of avoiding another fsync
- * immediately after.
+ * piggyback as much data as we can on each fsync: if we see any more data
+ * entered into the xlog buffer, we'll write and fsync that too, so that
+ * the final value of LogwrtResult.Flush is as large as possible. This
+ * gives us some chance of avoiding another fsync immediately after.
*/
/* initialize to given target; may increase below */
@@ -1669,31 +1663,29 @@ XLogFlush(XLogRecPtr record)
/*
* If we still haven't flushed to the request point then we have a
- * problem; most likely, the requested flush point is past end of
- * XLOG. This has been seen to occur when a disk page has a corrupted
- * LSN.
+ * problem; most likely, the requested flush point is past end of XLOG.
+ * This has been seen to occur when a disk page has a corrupted LSN.
*
- * Formerly we treated this as a PANIC condition, but that hurts the
- * system's robustness rather than helping it: we do not want to take
- * down the whole system due to corruption on one data page. In
- * particular, if the bad page is encountered again during recovery
- * then we would be unable to restart the database at all! (This
- * scenario has actually happened in the field several times with 7.1
- * releases. Note that we cannot get here while InRedo is true, but if
- * the bad page is brought in and marked dirty during recovery then
- * CreateCheckPoint will try to flush it at the end of recovery.)
+ * Formerly we treated this as a PANIC condition, but that hurts the system's
+ * robustness rather than helping it: we do not want to take down the
+ * whole system due to corruption on one data page. In particular, if the
+ * bad page is encountered again during recovery then we would be unable
+ * to restart the database at all! (This scenario has actually happened
+ * in the field several times with 7.1 releases. Note that we cannot get
+ * here while InRedo is true, but if the bad page is brought in and marked
+ * dirty during recovery then CreateCheckPoint will try to flush it at the
+ * end of recovery.)
*
- * The current approach is to ERROR under normal conditions, but only
- * WARNING during recovery, so that the system can be brought up even
- * if there's a corrupt LSN. Note that for calls from xact.c, the
- * ERROR will be promoted to PANIC since xact.c calls this routine
- * inside a critical section. However, calls from bufmgr.c are not
- * within critical sections and so we will not force a restart for a
- * bad LSN on a data page.
+ * The current approach is to ERROR under normal conditions, but only WARNING
+ * during recovery, so that the system can be brought up even if there's a
+ * corrupt LSN. Note that for calls from xact.c, the ERROR will be
+ * promoted to PANIC since xact.c calls this routine inside a critical
+ * section. However, calls from bufmgr.c are not within critical sections
+ * and so we will not force a restart for a bad LSN on a data page.
*/
if (XLByteLT(LogwrtResult.Flush, record))
elog(InRecovery ? WARNING : ERROR,
- "xlog flush request %X/%X is not satisfied --- flushed only to %X/%X",
+ "xlog flush request %X/%X is not satisfied --- flushed only to %X/%X",
record.xlogid, record.xrecoff,
LogwrtResult.Flush.xlogid, LogwrtResult.Flush.xrecoff);
}
@@ -1734,8 +1726,7 @@ XLogFileInit(uint32 log, uint32 seg,
XLogFilePath(path, ThisTimeLineID, log, seg);
/*
- * Try to use existent file (checkpoint maker may have created it
- * already)
+ * Try to use existent file (checkpoint maker may have created it already)
*/
if (*use_existent)
{
@@ -1754,10 +1745,10 @@ XLogFileInit(uint32 log, uint32 seg,
}
/*
- * Initialize an empty (all zeroes) segment. NOTE: it is possible
- * that another process is doing the same thing. If so, we will end
- * up pre-creating an extra log segment. That seems OK, and better
- * than holding the lock throughout this lengthy process.
+ * Initialize an empty (all zeroes) segment. NOTE: it is possible that
+ * another process is doing the same thing. If so, we will end up
+ * pre-creating an extra log segment. That seems OK, and better than
+ * holding the lock throughout this lengthy process.
*/
snprintf(tmppath, MAXPGPATH, XLOGDIR "/xlogtemp.%d", (int) getpid());
@@ -1772,13 +1763,13 @@ XLogFileInit(uint32 log, uint32 seg,
errmsg("could not create file \"%s\": %m", tmppath)));
/*
- * Zero-fill the file. We have to do this the hard way to ensure that
- * all the file space has really been allocated --- on platforms that
- * allow "holes" in files, just seeking to the end doesn't allocate
- * intermediate space. This way, we know that we have all the space
- * and (after the fsync below) that all the indirect blocks are down
- * on disk. Therefore, fdatasync(2) or O_DSYNC will be sufficient to
- * sync future writes to the log file.
+ * Zero-fill the file. We have to do this the hard way to ensure that all
+ * the file space has really been allocated --- on platforms that allow
+ * "holes" in files, just seeking to the end doesn't allocate intermediate
+ * space. This way, we know that we have all the space and (after the
+ * fsync below) that all the indirect blocks are down on disk. Therefore,
+ * fdatasync(2) or O_DSYNC will be sufficient to sync future writes to the
+ * log file.
*/
MemSet(zbuffer, 0, sizeof(zbuffer));
for (nbytes = 0; nbytes < XLogSegSize; nbytes += sizeof(zbuffer))
@@ -1789,8 +1780,7 @@ XLogFileInit(uint32 log, uint32 seg,
int save_errno = errno;
/*
- * If we fail to make the file, delete it to release disk
- * space
+ * If we fail to make the file, delete it to release disk space
*/
unlink(tmppath);
/* if write didn't set errno, assume problem is no disk space */
@@ -1798,7 +1788,7 @@ XLogFileInit(uint32 log, uint32 seg,
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write to file \"%s\": %m", tmppath)));
+ errmsg("could not write to file \"%s\": %m", tmppath)));
}
}
@@ -1816,9 +1806,9 @@ XLogFileInit(uint32 log, uint32 seg,
* Now move the segment into place with its final name.
*
* If caller didn't want to use a pre-existing file, get rid of any
- * pre-existing file. Otherwise, cope with possibility that someone
- * else has created the file while we were filling ours: if so, use
- * ours to pre-create a future log segment.
+ * pre-existing file. Otherwise, cope with possibility that someone else
+ * has created the file while we were filling ours: if so, use ours to
+ * pre-create a future log segment.
*/
installed_log = log;
installed_seg = seg;
@@ -1840,8 +1830,8 @@ XLogFileInit(uint32 log, uint32 seg,
if (fd < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
- path, log, seg)));
+ errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
+ path, log, seg)));
return (fd);
}
@@ -1908,7 +1898,7 @@ XLogFileCopy(uint32 log, uint32 seg,
errmsg("could not read file \"%s\": %m", path)));
else
ereport(ERROR,
- (errmsg("not enough data in file \"%s\"", path)));
+ (errmsg("not enough data in file \"%s\"", path)));
}
errno = 0;
if ((int) write(fd, buffer, sizeof(buffer)) != (int) sizeof(buffer))
@@ -1916,8 +1906,7 @@ XLogFileCopy(uint32 log, uint32 seg,
int save_errno = errno;
/*
- * If we fail to make the file, delete it to release disk
- * space
+ * If we fail to make the file, delete it to release disk space
*/
unlink(tmppath);
/* if write didn't set errno, assume problem is no disk space */
@@ -1925,7 +1914,7 @@ XLogFileCopy(uint32 log, uint32 seg,
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write to file \"%s\": %m", tmppath)));
+ errmsg("could not write to file \"%s\": %m", tmppath)));
}
}
@@ -2057,8 +2046,8 @@ XLogFileOpen(uint32 log, uint32 seg)
if (fd < 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
- path, log, seg)));
+ errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
+ path, log, seg)));
return fd;
}
@@ -2075,14 +2064,14 @@ XLogFileRead(uint32 log, uint32 seg, int emode)
int fd;
/*
- * Loop looking for a suitable timeline ID: we might need to read any
- * of the timelines listed in expectedTLIs.
+ * Loop looking for a suitable timeline ID: we might need to read any of
+ * the timelines listed in expectedTLIs.
*
* We expect curFileTLI on entry to be the TLI of the preceding file in
- * sequence, or 0 if there was no predecessor. We do not allow
- * curFileTLI to go backwards; this prevents us from picking up the
- * wrong file when a parent timeline extends to higher segment numbers
- * than the child we want to read.
+ * sequence, or 0 if there was no predecessor. We do not allow curFileTLI
+ * to go backwards; this prevents us from picking up the wrong file when a
+ * parent timeline extends to higher segment numbers than the child we
+ * want to read.
*/
foreach(cell, expectedTLIs)
{
@@ -2111,8 +2100,8 @@ XLogFileRead(uint32 log, uint32 seg, int emode)
if (errno != ENOENT) /* unexpected failure? */
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
- path, log, seg)));
+ errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
+ path, log, seg)));
}
/* Couldn't find it. For simplicity, complain about front timeline */
@@ -2120,8 +2109,8 @@ XLogFileRead(uint32 log, uint32 seg, int emode)
errno = ENOENT;
ereport(emode,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
- path, log, seg)));
+ errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
+ path, log, seg)));
return -1;
}
@@ -2152,29 +2141,27 @@ RestoreArchivedFile(char *path, const char *xlogfname,
struct stat stat_buf;
/*
- * When doing archive recovery, we always prefer an archived log file
- * even if a file of the same name exists in XLOGDIR. The reason is
- * that the file in XLOGDIR could be an old, un-filled or
- * partly-filled version that was copied and restored as part of
- * backing up $PGDATA.
+ * When doing archive recovery, we always prefer an archived log file even
+ * if a file of the same name exists in XLOGDIR. The reason is that the
+ * file in XLOGDIR could be an old, un-filled or partly-filled version
+ * that was copied and restored as part of backing up $PGDATA.
*
* We could try to optimize this slightly by checking the local copy
- * lastchange timestamp against the archived copy, but we have no API
- * to do this, nor can we guarantee that the lastchange timestamp was
- * preserved correctly when we copied to archive. Our aim is
- * robustness, so we elect not to do this.
+ * lastchange timestamp against the archived copy, but we have no API to
+ * do this, nor can we guarantee that the lastchange timestamp was
+ * preserved correctly when we copied to archive. Our aim is robustness,
+ * so we elect not to do this.
*
- * If we cannot obtain the log file from the archive, however, we will
- * try to use the XLOGDIR file if it exists. This is so that we can
- * make use of log segments that weren't yet transferred to the
- * archive.
+ * If we cannot obtain the log file from the archive, however, we will try to
+ * use the XLOGDIR file if it exists. This is so that we can make use of
+ * log segments that weren't yet transferred to the archive.
*
- * Notice that we don't actually overwrite any files when we copy back
- * from archive because the recoveryRestoreCommand may inadvertently
- * restore inappropriate xlogs, or they may be corrupt, so we may wish
- * to fallback to the segments remaining in current XLOGDIR later. The
- * copy-from-archive filename is always the same, ensuring that we
- * don't run out of disk space on long recoveries.
+ * Notice that we don't actually overwrite any files when we copy back from
+ * archive because the recoveryRestoreCommand may inadvertently restore
+ * inappropriate xlogs, or they may be corrupt, so we may wish to fallback
+ * to the segments remaining in current XLOGDIR later. The
+ * copy-from-archive filename is always the same, ensuring that we don't
+ * run out of disk space on long recoveries.
*/
snprintf(xlogpath, MAXPGPATH, XLOGDIR "/%s", recovername);
@@ -2259,11 +2246,11 @@ RestoreArchivedFile(char *path, const char *xlogfname,
* command apparently succeeded, but let's make sure the file is
* really there now and has the correct size.
*
- * XXX I made wrong-size a fatal error to ensure the DBA would notice
- * it, but is that too strong? We could try to plow ahead with a
- * local copy of the file ... but the problem is that there
- * probably isn't one, and we'd incorrectly conclude we've reached
- * the end of WAL and we're done recovering ...
+ * XXX I made wrong-size a fatal error to ensure the DBA would notice it,
+ * but is that too strong? We could try to plow ahead with a local
+ * copy of the file ... but the problem is that there probably isn't
+ * one, and we'd incorrectly conclude we've reached the end of WAL and
+ * we're done recovering ...
*/
if (stat(xlogpath, &stat_buf) == 0)
{
@@ -2296,18 +2283,17 @@ RestoreArchivedFile(char *path, const char *xlogfname,
/*
* remember, we rollforward UNTIL the restore fails so failure here is
* just part of the process... that makes it difficult to determine
- * whether the restore failed because there isn't an archive to
- * restore, or because the administrator has specified the restore
- * program incorrectly. We have to assume the former.
+ * whether the restore failed because there isn't an archive to restore,
+ * or because the administrator has specified the restore program
+ * incorrectly. We have to assume the former.
*/
ereport(DEBUG2,
- (errmsg("could not restore file \"%s\" from archive: return code %d",
- xlogfname, rc)));
+ (errmsg("could not restore file \"%s\" from archive: return code %d",
+ xlogfname, rc)));
/*
- * if an archived file is not available, there might still be a
- * version of this file in XLOGDIR, so return that as the filename to
- * open.
+ * if an archived file is not available, there might still be a version of
+ * this file in XLOGDIR, so return that as the filename to open.
*
* In many recovery scenarios we expect this to fail also, but if so that
* just means we've reached the end of WAL.
@@ -2375,8 +2361,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr,
if (xldir == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open transaction log directory \"%s\": %m",
- XLOGDIR)));
+ errmsg("could not open transaction log directory \"%s\": %m",
+ XLOGDIR)));
XLogFileName(lastoff, ThisTimeLineID, log, seg);
@@ -2384,14 +2370,14 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr,
{
/*
* We ignore the timeline part of the XLOG segment identifiers in
- * deciding whether a segment is still needed. This ensures that
- * we won't prematurely remove a segment from a parent timeline.
- * We could probably be a little more proactive about removing
- * segments of non-parent timelines, but that would be a whole lot
- * more complicated.
+ * deciding whether a segment is still needed. This ensures that we
+ * won't prematurely remove a segment from a parent timeline. We could
+ * probably be a little more proactive about removing segments of
+ * non-parent timelines, but that would be a whole lot more
+ * complicated.
*
- * We use the alphanumeric sorting property of the filenames to
- * decide which ones are earlier than the lastoff segment.
+ * We use the alphanumeric sorting property of the filenames to decide
+ * which ones are earlier than the lastoff segment.
*/
if (strlen(xlde->d_name) == 24 &&
strspn(xlde->d_name, "0123456789ABCDEF") == 24 &&
@@ -2409,16 +2395,16 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr,
snprintf(path, MAXPGPATH, XLOGDIR "/%s", xlde->d_name);
/*
- * Before deleting the file, see if it can be recycled as
- * a future log segment.
+ * Before deleting the file, see if it can be recycled as a
+ * future log segment.
*/
if (InstallXLogFileSegment(&endlogId, &endlogSeg, path,
true, &max_advance,
true))
{
ereport(DEBUG2,
- (errmsg("recycled transaction log file \"%s\"",
- xlde->d_name)));
+ (errmsg("recycled transaction log file \"%s\"",
+ xlde->d_name)));
(*nsegsrecycled)++;
/* Needn't recheck that slot on future iterations */
if (max_advance > 0)
@@ -2431,8 +2417,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr,
{
/* No need for any more future segments... */
ereport(DEBUG2,
- (errmsg("removing transaction log file \"%s\"",
- xlde->d_name)));
+ (errmsg("removing transaction log file \"%s\"",
+ xlde->d_name)));
unlink(path);
(*nsegsremoved)++;
}
@@ -2459,8 +2445,8 @@ RemoveOldBackupHistory(void)
if (xldir == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open transaction log directory \"%s\": %m",
- XLOGDIR)));
+ errmsg("could not open transaction log directory \"%s\": %m",
+ XLOGDIR)));
while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL)
{
@@ -2473,8 +2459,8 @@ RemoveOldBackupHistory(void)
if (!XLogArchivingActive() || XLogArchiveIsDone(xlde->d_name))
{
ereport(DEBUG2,
- (errmsg("removing transaction log backup history file \"%s\"",
- xlde->d_name)));
+ (errmsg("removing transaction log backup history file \"%s\"",
+ xlde->d_name)));
snprintf(path, MAXPGPATH, XLOGDIR "/%s", xlde->d_name);
unlink(path);
XLogArchiveCleanup(xlde->d_name);
@@ -2576,7 +2562,7 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
blk = (char *) XLogRecGetData(record) + len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
{
- uint32 blen;
+ uint32 blen;
if (!(record->xl_info & XLR_SET_BKP_BLOCK(i)))
continue;
@@ -2611,8 +2597,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
if (!EQ_CRC32(record->xl_crc, crc))
{
ereport(emode,
- (errmsg("incorrect resource manager data checksum in record at %X/%X",
- recptr.xlogid, recptr.xrecoff)));
+ (errmsg("incorrect resource manager data checksum in record at %X/%X",
+ recptr.xlogid, recptr.xrecoff)));
return false;
}
@@ -2647,12 +2633,11 @@ ReadRecord(XLogRecPtr *RecPtr, int emode)
if (readBuf == NULL)
{
/*
- * First time through, permanently allocate readBuf. We do it
- * this way, rather than just making a static array, for two
- * reasons: (1) no need to waste the storage in most
- * instantiations of the backend; (2) a static char array isn't
- * guaranteed to have any particular alignment, whereas malloc()
- * will provide MAXALIGN'd storage.
+ * First time through, permanently allocate readBuf. We do it this
+ * way, rather than just making a static array, for two reasons: (1)
+ * no need to waste the storage in most instantiations of the backend;
+ * (2) a static char array isn't guaranteed to have any particular
+ * alignment, whereas malloc() will provide MAXALIGN'd storage.
*/
readBuf = (char *) malloc(BLCKSZ);
Assert(readBuf != NULL);
@@ -2685,11 +2670,11 @@ ReadRecord(XLogRecPtr *RecPtr, int emode)
RecPtr->xlogid, RecPtr->xrecoff)));
/*
- * Since we are going to a random position in WAL, forget any
- * prior state about what timeline we were in, and allow it to be
- * any timeline in expectedTLIs. We also set a flag to allow
- * curFileTLI to go backwards (but we can't reset that variable
- * right here, since we might not change files at all).
+ * Since we are going to a random position in WAL, forget any prior
+ * state about what timeline we were in, and allow it to be any
+ * timeline in expectedTLIs. We also set a flag to allow curFileTLI
+ * to go backwards (but we can't reset that variable right here, since
+ * we might not change files at all).
*/
lastPageTLI = 0; /* see comment in ValidXLOGHeader */
randAccess = true; /* allow curFileTLI to go backwards too */
@@ -2741,9 +2726,9 @@ ReadRecord(XLogRecPtr *RecPtr, int emode)
if (targetRecOff == 0)
{
/*
- * Can only get here in the continuing-from-prev-page case,
- * because XRecOffIsValid eliminated the zero-page-offset case
- * otherwise. Need to skip over the new page's header.
+ * Can only get here in the continuing-from-prev-page case, because
+ * XRecOffIsValid eliminated the zero-page-offset case otherwise. Need
+ * to skip over the new page's header.
*/
tmpRecPtr.xrecoff += pageHeaderSize;
targetRecOff = pageHeaderSize;
@@ -2791,14 +2776,14 @@ got_record:;
{
ereport(emode,
(errmsg("invalid resource manager ID %u at %X/%X",
- record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff)));
+ record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff)));
goto next_record_is_invalid;
}
if (randAccess)
{
/*
- * We can't exactly verify the prev-link, but surely it should be
- * less than the record's own address.
+ * We can't exactly verify the prev-link, but surely it should be less
+ * than the record's own address.
*/
if (!XLByteLT(record->xl_prev, *RecPtr))
{
@@ -2812,9 +2797,9 @@ got_record:;
else
{
/*
- * Record's prev-link should exactly match our previous location.
- * This check guards against torn WAL pages where a stale but
- * valid-looking WAL record starts on a sector boundary.
+ * Record's prev-link should exactly match our previous location. This
+ * check guards against torn WAL pages where a stale but valid-looking
+ * WAL record starts on a sector boundary.
*/
if (!XLByteEQ(record->xl_prev, ReadRecPtr))
{
@@ -2827,11 +2812,10 @@ got_record:;
}
/*
- * Allocate or enlarge readRecordBuf as needed. To avoid useless
- * small increases, round its size to a multiple of BLCKSZ, and make
- * sure it's at least 4*BLCKSZ to start with. (That is enough for all
- * "normal" records, but very large commit or abort records might need
- * more space.)
+ * Allocate or enlarge readRecordBuf as needed. To avoid useless small
+ * increases, round its size to a multiple of BLCKSZ, and make sure it's
+ * at least 4*BLCKSZ to start with. (That is enough for all "normal"
+ * records, but very large commit or abort records might need more space.)
*/
total_len = record->xl_tot_len;
if (total_len > readRecordBufSize)
@@ -2927,7 +2911,7 @@ got_record:;
MAXALIGN(SizeOfXLogContRecord + contrecord->xl_rem_len))
{
nextRecord = (XLogRecord *) ((char *) contrecord +
- MAXALIGN(SizeOfXLogContRecord + contrecord->xl_rem_len));
+ MAXALIGN(SizeOfXLogContRecord + contrecord->xl_rem_len));
}
EndRecPtr.xlogid = readId;
EndRecPtr.xrecoff = readSeg * XLogSegSize + readOff +
@@ -2991,8 +2975,8 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode)
char sysident_str[32];
/*
- * Format sysids separately to keep platform-dependent format
- * code out of the translatable message string.
+ * Format sysids separately to keep platform-dependent format code
+ * out of the translatable message string.
*/
snprintf(fhdrident_str, sizeof(fhdrident_str), UINT64_FORMAT,
longhdr->xlp_sysid);
@@ -3000,15 +2984,15 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode)
ControlFile->system_identifier);
ereport(emode,
(errmsg("WAL file is from different system"),
- errdetail("WAL file SYSID is %s, pg_control SYSID is %s",
- fhdrident_str, sysident_str)));
+ errdetail("WAL file SYSID is %s, pg_control SYSID is %s",
+ fhdrident_str, sysident_str)));
return false;
}
if (longhdr->xlp_seg_size != XLogSegSize)
{
ereport(emode,
(errmsg("WAL file is from different system"),
- errdetail("Incorrect XLOG_SEG_SIZE in page header.")));
+ errdetail("Incorrect XLOG_SEG_SIZE in page header.")));
return false;
}
}
@@ -3018,7 +3002,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode)
{
ereport(emode,
(errmsg("unexpected pageaddr %X/%X in log file %u, segment %u, offset %u",
- hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff,
+ hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff,
readId, readSeg, readOff)));
return false;
}
@@ -3040,9 +3024,9 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode)
* immediate parent's TLI, we should never see TLI go backwards across
* successive pages of a consistent WAL sequence.
*
- * Of course this check should only be applied when advancing
- * sequentially across pages; therefore ReadRecord resets lastPageTLI
- * to zero when going to a random page.
+ * Of course this check should only be applied when advancing sequentially
+ * across pages; therefore ReadRecord resets lastPageTLI to zero when
+ * going to a random page.
*/
if (hdr->xlp_tli < lastPageTLI)
{
@@ -3123,7 +3107,7 @@ readTimeLineHistory(TimeLineID targetTLI)
tli <= (TimeLineID) linitial_int(result))
ereport(FATAL,
(errmsg("invalid data in history file: %s", fline),
- errhint("Timeline IDs must be in increasing sequence.")));
+ errhint("Timeline IDs must be in increasing sequence.")));
/* Build list with newest item first */
result = lcons_int((int) tli, result);
@@ -3137,7 +3121,7 @@ readTimeLineHistory(TimeLineID targetTLI)
targetTLI <= (TimeLineID) linitial_int(result))
ereport(FATAL,
(errmsg("invalid data in history file \"%s\"", path),
- errhint("Timeline IDs must be less than child timeline's ID.")));
+ errhint("Timeline IDs must be less than child timeline's ID.")));
result = lcons_int((int) targetTLI, result);
@@ -3196,8 +3180,8 @@ findNewestTimeLine(TimeLineID startTLI)
TimeLineID probeTLI;
/*
- * The algorithm is just to probe for the existence of timeline
- * history files. XXX is it useful to allow gaps in the sequence?
+ * The algorithm is just to probe for the existence of timeline history
+ * files. XXX is it useful to allow gaps in the sequence?
*/
newestTLI = startTLI;
@@ -3302,14 +3286,13 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI,
unlink(tmppath);
/*
- * if write didn't set errno, assume problem is no disk
- * space
+ * if write didn't set errno, assume problem is no disk space
*/
errno = save_errno ? save_errno : ENOSPC;
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write to file \"%s\": %m", tmppath)));
+ errmsg("could not write to file \"%s\": %m", tmppath)));
}
}
close(srcfd);
@@ -3454,11 +3437,11 @@ WriteControlFile(void)
FIN_CRC32(ControlFile->crc);
/*
- * We write out BLCKSZ bytes into pg_control, zero-padding the excess
- * over sizeof(ControlFileData). This reduces the odds of
- * premature-EOF errors when reading pg_control. We'll still fail
- * when we check the contents of the file, but hopefully with a more
- * specific error than "couldn't read pg_control".
+ * We write out BLCKSZ bytes into pg_control, zero-padding the excess over
+ * sizeof(ControlFileData). This reduces the odds of premature-EOF errors
+ * when reading pg_control. We'll still fail when we check the contents
+ * of the file, but hopefully with a more specific error than "couldn't
+ * read pg_control".
*/
if (sizeof(ControlFileData) > BLCKSZ)
ereport(PANIC,
@@ -3524,17 +3507,17 @@ ReadControlFile(void)
close(fd);
/*
- * Check for expected pg_control format version. If this is wrong,
- * the CRC check will likely fail because we'll be checking the wrong
- * number of bytes. Complaining about wrong version will probably be
- * more enlightening than complaining about wrong CRC.
+ * Check for expected pg_control format version. If this is wrong, the
+ * CRC check will likely fail because we'll be checking the wrong number
+ * of bytes. Complaining about wrong version will probably be more
+ * enlightening than complaining about wrong CRC.
*/
if (ControlFile->pg_control_version != PG_CONTROL_VERSION)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d,"
- " but the server was compiled with PG_CONTROL_VERSION %d.",
- ControlFile->pg_control_version, PG_CONTROL_VERSION),
+ " but the server was compiled with PG_CONTROL_VERSION %d.",
+ ControlFile->pg_control_version, PG_CONTROL_VERSION),
errhint("It looks like you need to initdb.")));
/* Now check the CRC. */
INIT_CRC32(crc);
@@ -3548,31 +3531,30 @@ ReadControlFile(void)
(errmsg("incorrect checksum in control file")));
/*
- * Do compatibility checking immediately. We do this here for 2
- * reasons:
+ * Do compatibility checking immediately. We do this here for 2 reasons:
*
- * (1) if the database isn't compatible with the backend executable, we
- * want to abort before we can possibly do any damage;
+ * (1) if the database isn't compatible with the backend executable, we want
+ * to abort before we can possibly do any damage;
*
* (2) this code is executed in the postmaster, so the setlocale() will
- * propagate to forked backends, which aren't going to read this file
- * for themselves. (These locale settings are considered critical
+ * propagate to forked backends, which aren't going to read this file for
+ * themselves. (These locale settings are considered critical
* compatibility items because they can affect sort order of indexes.)
*/
if (ControlFile->catalog_version_no != CATALOG_VERSION_NO)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with CATALOG_VERSION_NO %d,"
- " but the server was compiled with CATALOG_VERSION_NO %d.",
- ControlFile->catalog_version_no, CATALOG_VERSION_NO),
+ " but the server was compiled with CATALOG_VERSION_NO %d.",
+ ControlFile->catalog_version_no, CATALOG_VERSION_NO),
errhint("It looks like you need to initdb.")));
if (ControlFile->maxAlign != MAXIMUM_ALIGNOF)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
- errdetail("The database cluster was initialized with MAXALIGN %d,"
- " but the server was compiled with MAXALIGN %d.",
- ControlFile->maxAlign, MAXIMUM_ALIGNOF),
- errhint("It looks like you need to initdb.")));
+ errdetail("The database cluster was initialized with MAXALIGN %d,"
+ " but the server was compiled with MAXALIGN %d.",
+ ControlFile->maxAlign, MAXIMUM_ALIGNOF),
+ errhint("It looks like you need to initdb.")));
if (ControlFile->floatFormat != FLOATFORMAT_VALUE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
@@ -3581,76 +3563,76 @@ ReadControlFile(void)
if (ControlFile->blcksz != BLCKSZ)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
- errdetail("The database cluster was initialized with BLCKSZ %d,"
- " but the server was compiled with BLCKSZ %d.",
- ControlFile->blcksz, BLCKSZ),
- errhint("It looks like you need to recompile or initdb.")));
+ errdetail("The database cluster was initialized with BLCKSZ %d,"
+ " but the server was compiled with BLCKSZ %d.",
+ ControlFile->blcksz, BLCKSZ),
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->relseg_size != RELSEG_SIZE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
- errdetail("The database cluster was initialized with RELSEG_SIZE %d,"
- " but the server was compiled with RELSEG_SIZE %d.",
- ControlFile->relseg_size, RELSEG_SIZE),
- errhint("It looks like you need to recompile or initdb.")));
+ errdetail("The database cluster was initialized with RELSEG_SIZE %d,"
+ " but the server was compiled with RELSEG_SIZE %d.",
+ ControlFile->relseg_size, RELSEG_SIZE),
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->xlog_seg_size != XLOG_SEG_SIZE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with XLOG_SEG_SIZE %d,"
- " but the server was compiled with XLOG_SEG_SIZE %d.",
+ " but the server was compiled with XLOG_SEG_SIZE %d.",
ControlFile->xlog_seg_size, XLOG_SEG_SIZE),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->nameDataLen != NAMEDATALEN)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
- errdetail("The database cluster was initialized with NAMEDATALEN %d,"
- " but the server was compiled with NAMEDATALEN %d.",
- ControlFile->nameDataLen, NAMEDATALEN),
- errhint("It looks like you need to recompile or initdb.")));
+ errdetail("The database cluster was initialized with NAMEDATALEN %d,"
+ " but the server was compiled with NAMEDATALEN %d.",
+ ControlFile->nameDataLen, NAMEDATALEN),
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->indexMaxKeys != INDEX_MAX_KEYS)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with INDEX_MAX_KEYS %d,"
- " but the server was compiled with INDEX_MAX_KEYS %d.",
+ " but the server was compiled with INDEX_MAX_KEYS %d.",
ControlFile->indexMaxKeys, INDEX_MAX_KEYS),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
#ifdef HAVE_INT64_TIMESTAMP
if (ControlFile->enableIntTimes != TRUE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized without HAVE_INT64_TIMESTAMP"
- " but the server was compiled with HAVE_INT64_TIMESTAMP."),
- errhint("It looks like you need to recompile or initdb.")));
+ " but the server was compiled with HAVE_INT64_TIMESTAMP."),
+ errhint("It looks like you need to recompile or initdb.")));
#else
if (ControlFile->enableIntTimes != FALSE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with HAVE_INT64_TIMESTAMP"
- " but the server was compiled without HAVE_INT64_TIMESTAMP."),
- errhint("It looks like you need to recompile or initdb.")));
+ " but the server was compiled without HAVE_INT64_TIMESTAMP."),
+ errhint("It looks like you need to recompile or initdb.")));
#endif
if (ControlFile->localeBuflen != LOCALE_NAME_BUFLEN)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with LOCALE_NAME_BUFLEN %d,"
- " but the server was compiled with LOCALE_NAME_BUFLEN %d.",
+ " but the server was compiled with LOCALE_NAME_BUFLEN %d.",
ControlFile->localeBuflen, LOCALE_NAME_BUFLEN),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
if (setlocale(LC_COLLATE, ControlFile->lc_collate) == NULL)
ereport(FATAL,
- (errmsg("database files are incompatible with operating system"),
- errdetail("The database cluster was initialized with LC_COLLATE \"%s\","
- " which is not recognized by setlocale().",
- ControlFile->lc_collate),
- errhint("It looks like you need to initdb or install locale support.")));
+ (errmsg("database files are incompatible with operating system"),
+ errdetail("The database cluster was initialized with LC_COLLATE \"%s\","
+ " which is not recognized by setlocale().",
+ ControlFile->lc_collate),
+ errhint("It looks like you need to initdb or install locale support.")));
if (setlocale(LC_CTYPE, ControlFile->lc_ctype) == NULL)
ereport(FATAL,
- (errmsg("database files are incompatible with operating system"),
- errdetail("The database cluster was initialized with LC_CTYPE \"%s\","
- " which is not recognized by setlocale().",
- ControlFile->lc_ctype),
- errhint("It looks like you need to initdb or install locale support.")));
+ (errmsg("database files are incompatible with operating system"),
+ errdetail("The database cluster was initialized with LC_CTYPE \"%s\","
+ " which is not recognized by setlocale().",
+ ControlFile->lc_ctype),
+ errhint("It looks like you need to initdb or install locale support.")));
/* Make the fixed locale settings visible as GUC variables, too */
SetConfigOption("lc_collate", ControlFile->lc_collate,
@@ -3719,9 +3701,9 @@ XLOGShmemSize(void)
size = add_size(size, mul_size(BLCKSZ, XLOGbuffers));
/*
- * Note: we don't count ControlFileData, it comes out of the "slop
- * factor" added by CreateSharedMemoryAndSemaphores. This lets us
- * use this routine again below to compute the actual allocation size.
+ * Note: we don't count ControlFileData, it comes out of the "slop factor"
+ * added by CreateSharedMemoryAndSemaphores. This lets us use this
+ * routine again below to compute the actual allocation size.
*/
return size;
@@ -3749,9 +3731,9 @@ XLOGShmemInit(void)
memset(XLogCtl, 0, sizeof(XLogCtlData));
/*
- * Since XLogCtlData contains XLogRecPtr fields, its sizeof should be
- * a multiple of the alignment for same, so no extra alignment padding
- * is needed here.
+ * Since XLogCtlData contains XLogRecPtr fields, its sizeof should be a
+ * multiple of the alignment for same, so no extra alignment padding is
+ * needed here.
*/
allocptr = ((char *) XLogCtl) + sizeof(XLogCtlData);
XLogCtl->xlblocks = (XLogRecPtr *) allocptr;
@@ -3766,18 +3748,19 @@ XLOGShmemInit(void)
memset(XLogCtl->pages, 0, (Size) BLCKSZ * XLOGbuffers);
/*
- * Do basic initialization of XLogCtl shared data. (StartupXLOG will
- * fill in additional info.)
+ * Do basic initialization of XLogCtl shared data. (StartupXLOG will fill
+ * in additional info.)
*/
- XLogCtl->XLogCacheByte = (Size) BLCKSZ * XLOGbuffers;
+ XLogCtl->XLogCacheByte = (Size) BLCKSZ *XLOGbuffers;
+
XLogCtl->XLogCacheBlck = XLOGbuffers - 1;
XLogCtl->Insert.currpage = (XLogPageHeader) (XLogCtl->pages);
SpinLockInit(&XLogCtl->info_lck);
/*
- * If we are not in bootstrap mode, pg_control should already exist.
- * Read and validate it immediately (see comments in ReadControlFile()
- * for the reasons why).
+ * If we are not in bootstrap mode, pg_control should already exist. Read
+ * and validate it immediately (see comments in ReadControlFile() for the
+ * reasons why).
*/
if (!IsBootstrapProcessingMode())
ReadControlFile();
@@ -3801,17 +3784,16 @@ BootStrapXLOG(void)
pg_crc32 crc;
/*
- * Select a hopefully-unique system identifier code for this
- * installation. We use the result of gettimeofday(), including the
- * fractional seconds field, as being about as unique as we can easily
- * get. (Think not to use random(), since it hasn't been seeded and
- * there's no portable way to seed it other than the system clock
- * value...) The upper half of the uint64 value is just the tv_sec
- * part, while the lower half is the XOR of tv_sec and tv_usec. This
- * is to ensure that we don't lose uniqueness unnecessarily if
- * "uint64" is really only 32 bits wide. A person knowing this
- * encoding can determine the initialization time of the installation,
- * which could perhaps be useful sometimes.
+ * Select a hopefully-unique system identifier code for this installation.
+ * We use the result of gettimeofday(), including the fractional seconds
+ * field, as being about as unique as we can easily get. (Think not to
+ * use random(), since it hasn't been seeded and there's no portable way
+ * to seed it other than the system clock value...) The upper half of the
+ * uint64 value is just the tv_sec part, while the lower half is the XOR
+ * of tv_sec and tv_usec. This is to ensure that we don't lose uniqueness
+ * unnecessarily if "uint64" is really only 32 bits wide. A person
+ * knowing this encoding can determine the initialization time of the
+ * installation, which could perhaps be useful sometimes.
*/
gettimeofday(&tv, NULL);
sysidentifier = ((uint64) tv.tv_sec) << 32;
@@ -3821,7 +3803,7 @@ BootStrapXLOG(void)
ThisTimeLineID = 1;
/* page buffer must be aligned suitably for O_DIRECT */
- buffer = (char *) palloc(BLCKSZ + ALIGNOF_XLOG_BUFFER);
+ buffer = (char *) palloc(BLCKSZ + ALIGNOF_XLOG_BUFFER);
page = (XLogPageHeader) TYPEALIGN(ALIGNOF_XLOG_BUFFER, buffer);
memset(page, 0, BLCKSZ);
@@ -3882,18 +3864,18 @@ BootStrapXLOG(void)
errno = ENOSPC;
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not write bootstrap transaction log file: %m")));
+ errmsg("could not write bootstrap transaction log file: %m")));
}
if (pg_fsync(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not fsync bootstrap transaction log file: %m")));
+ errmsg("could not fsync bootstrap transaction log file: %m")));
if (close(openLogFile))
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not close bootstrap transaction log file: %m")));
+ errmsg("could not close bootstrap transaction log file: %m")));
openLogFile = -1;
@@ -4036,8 +4018,8 @@ readRecoveryCommandFile(void)
recoveryTargetXid = (TransactionId) strtoul(tok2, NULL, 0);
if (errno == EINVAL || errno == ERANGE)
ereport(FATAL,
- (errmsg("recovery_target_xid is not a valid number: \"%s\"",
- tok2)));
+ (errmsg("recovery_target_xid is not a valid number: \"%s\"",
+ tok2)));
ereport(LOG,
(errmsg("recovery_target_xid = %u",
recoveryTargetXid)));
@@ -4056,17 +4038,17 @@ readRecoveryCommandFile(void)
recoveryTargetExact = false;
/*
- * Convert the time string given by the user to the time_t
- * format. We use type abstime's input converter because we
- * know abstime has the same representation as time_t.
+ * Convert the time string given by the user to the time_t format.
+ * We use type abstime's input converter because we know abstime
+ * has the same representation as time_t.
*/
recoveryTargetTime = (time_t)
DatumGetAbsoluteTime(DirectFunctionCall1(abstimein,
- CStringGetDatum(tok2)));
+ CStringGetDatum(tok2)));
ereport(LOG,
(errmsg("recovery_target_time = %s",
- DatumGetCString(DirectFunctionCall1(abstimeout,
- AbsoluteTimeGetDatum((AbsoluteTime) recoveryTargetTime))))));
+ DatumGetCString(DirectFunctionCall1(abstimeout,
+ AbsoluteTimeGetDatum((AbsoluteTime) recoveryTargetTime))))));
}
else if (strcmp(tok1, "recovery_target_inclusive") == 0)
{
@@ -4095,7 +4077,7 @@ readRecoveryCommandFile(void)
ereport(FATAL,
(errmsg("syntax error in recovery command file: %s",
cmdline),
- errhint("Lines should have the format parameter = 'value'.")));
+ errhint("Lines should have the format parameter = 'value'.")));
/* Check that required parameters were supplied */
if (recoveryRestoreCommand == NULL)
@@ -4107,10 +4089,10 @@ readRecoveryCommandFile(void)
InArchiveRecovery = true;
/*
- * If user specified recovery_target_timeline, validate it or compute
- * the "latest" value. We can't do this until after we've gotten the
- * restore command and set InArchiveRecovery, because we need to fetch
- * timeline history files from the archive.
+ * If user specified recovery_target_timeline, validate it or compute the
+ * "latest" value. We can't do this until after we've gotten the restore
+ * command and set InArchiveRecovery, because we need to fetch timeline
+ * history files from the archive.
*/
if (rtliGiven)
{
@@ -4119,8 +4101,8 @@ readRecoveryCommandFile(void)
/* Timeline 1 does not have a history file, all else should */
if (rtli != 1 && !existsTimeLineHistory(rtli))
ereport(FATAL,
- (errmsg("recovery_target_timeline %u does not exist",
- rtli)));
+ (errmsg("recovery_target_timeline %u does not exist",
+ rtli)));
recoveryTargetTLI = rtli;
}
else
@@ -4146,9 +4128,9 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
InArchiveRecovery = false;
/*
- * We should have the ending log segment currently open. Verify, and
- * then close it (to avoid problems on Windows with trying to rename
- * or delete an open file).
+ * We should have the ending log segment currently open. Verify, and then
+ * close it (to avoid problems on Windows with trying to rename or delete
+ * an open file).
*/
Assert(readFile >= 0);
Assert(readId == endLogId);
@@ -4158,17 +4140,17 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
readFile = -1;
/*
- * If the segment was fetched from archival storage, we want to
- * replace the existing xlog segment (if any) with the archival
- * version. This is because whatever is in XLOGDIR is very possibly
- * older than what we have from the archives, since it could have come
- * from restoring a PGDATA backup. In any case, the archival version
- * certainly is more descriptive of what our current database state
- * is, because that is what we replayed from.
+ * If the segment was fetched from archival storage, we want to replace
+ * the existing xlog segment (if any) with the archival version. This is
+ * because whatever is in XLOGDIR is very possibly older than what we have
+ * from the archives, since it could have come from restoring a PGDATA
+ * backup. In any case, the archival version certainly is more
+ * descriptive of what our current database state is, because that is what
+ * we replayed from.
*
- * Note that if we are establishing a new timeline, ThisTimeLineID is
- * already set to the new value, and so we will create a new file
- * instead of overwriting any existing file.
+ * Note that if we are establishing a new timeline, ThisTimeLineID is already
+ * set to the new value, and so we will create a new file instead of
+ * overwriting any existing file.
*/
snprintf(recoveryPath, MAXPGPATH, XLOGDIR "/RECOVERYXLOG");
XLogFilePath(xlogpath, ThisTimeLineID, endLogId, endLogSeg);
@@ -4195,9 +4177,9 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
unlink(recoveryPath); /* ignore any error */
/*
- * If we are establishing a new timeline, we have to copy data
- * from the last WAL segment of the old timeline to create a
- * starting WAL segment for the new timeline.
+ * If we are establishing a new timeline, we have to copy data from
+ * the last WAL segment of the old timeline to create a starting WAL
+ * segment for the new timeline.
*/
if (endTLI != ThisTimeLineID)
XLogFileCopy(endLogId, endLogSeg,
@@ -4205,8 +4187,8 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
}
/*
- * Let's just make real sure there are not .ready or .done flags
- * posted for the new segment.
+ * Let's just make real sure there are not .ready or .done flags posted
+ * for the new segment.
*/
XLogFileName(xlogpath, ThisTimeLineID, endLogId, endLogSeg);
XLogArchiveCleanup(xlogpath);
@@ -4216,8 +4198,8 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
unlink(recoveryPath); /* ignore any error */
/*
- * Rename the config file out of the way, so that we don't
- * accidentally re-enter archive recovery mode in a subsequent crash.
+ * Rename the config file out of the way, so that we don't accidentally
+ * re-enter archive recovery mode in a subsequent crash.
*/
unlink(RECOVERY_COMMAND_DONE);
if (rename(RECOVERY_COMMAND_FILE, RECOVERY_COMMAND_DONE) != 0)
@@ -4278,9 +4260,9 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis)
* transactionid
*
* when testing for an xid, we MUST test for equality only, since
- * transactions are numbered in the order they start, not the
- * order they complete. A higher numbered xid will complete before
- * you about 50% of the time...
+ * transactions are numbered in the order they start, not the order
+ * they complete. A higher numbered xid will complete before you about
+ * 50% of the time...
*/
stopsHere = (record->xl_xid == recoveryTargetXid);
if (stopsHere)
@@ -4289,9 +4271,9 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis)
else
{
/*
- * there can be many transactions that share the same commit time,
- * so we stop after the last one, if we are inclusive, or stop at
- * the first one if we are exclusive
+ * there can be many transactions that share the same commit time, so
+ * we stop after the last one, if we are inclusive, or stop at the
+ * first one if we are exclusive
*/
if (recoveryTargetInclusive)
stopsHere = (recordXtime > recoveryTargetTime);
@@ -4312,22 +4294,22 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis)
if (recoveryStopAfter)
ereport(LOG,
(errmsg("recovery stopping after commit of transaction %u, time %s",
- recoveryStopXid, str_time(recoveryStopTime))));
+ recoveryStopXid, str_time(recoveryStopTime))));
else
ereport(LOG,
(errmsg("recovery stopping before commit of transaction %u, time %s",
- recoveryStopXid, str_time(recoveryStopTime))));
+ recoveryStopXid, str_time(recoveryStopTime))));
}
else
{
if (recoveryStopAfter)
ereport(LOG,
(errmsg("recovery stopping after abort of transaction %u, time %s",
- recoveryStopXid, str_time(recoveryStopTime))));
+ recoveryStopXid, str_time(recoveryStopTime))));
else
ereport(LOG,
(errmsg("recovery stopping before abort of transaction %u, time %s",
- recoveryStopXid, str_time(recoveryStopTime))));
+ recoveryStopXid, str_time(recoveryStopTime))));
}
}
@@ -4359,8 +4341,8 @@ StartupXLOG(void)
/*
* Read control file and check XLOG status looks valid.
*
- * Note: in most control paths, *ControlFile is already valid and we need
- * not do ReadControlFile() here, but might as well do it to be sure.
+ * Note: in most control paths, *ControlFile is already valid and we need not
+ * do ReadControlFile() here, but might as well do it to be sure.
*/
ReadControlFile();
@@ -4381,10 +4363,10 @@ StartupXLOG(void)
str_time(ControlFile->time))));
else if (ControlFile->state == DB_IN_RECOVERY)
ereport(LOG,
- (errmsg("database system was interrupted while in recovery at %s",
- str_time(ControlFile->time)),
- errhint("This probably means that some data is corrupted and"
- " you will have to use the last backup for recovery.")));
+ (errmsg("database system was interrupted while in recovery at %s",
+ str_time(ControlFile->time)),
+ errhint("This probably means that some data is corrupted and"
+ " you will have to use the last backup for recovery.")));
else if (ControlFile->state == DB_IN_PRODUCTION)
ereport(LOG,
(errmsg("database system was interrupted at %s",
@@ -4397,8 +4379,8 @@ StartupXLOG(void)
#endif
/*
- * Initialize on the assumption we want to recover to the same
- * timeline that's active according to pg_control.
+ * Initialize on the assumption we want to recover to the same timeline
+ * that's active according to pg_control.
*/
recoveryTargetTLI = ControlFile->checkPointCopy.ThisTimeLineID;
@@ -4417,7 +4399,7 @@ StartupXLOG(void)
* timeline.
*/
if (!list_member_int(expectedTLIs,
- (int) ControlFile->checkPointCopy.ThisTimeLineID))
+ (int) ControlFile->checkPointCopy.ThisTimeLineID))
ereport(FATAL,
(errmsg("requested timeline %u is not a child of database system timeline %u",
recoveryTargetTLI,
@@ -4426,30 +4408,29 @@ StartupXLOG(void)
if (read_backup_label(&checkPointLoc))
{
/*
- * When a backup_label file is present, we want to roll forward
- * from the checkpoint it identifies, rather than using
- * pg_control.
+ * When a backup_label file is present, we want to roll forward from
+ * the checkpoint it identifies, rather than using pg_control.
*/
record = ReadCheckpointRecord(checkPointLoc, 0);
if (record != NULL)
{
ereport(LOG,
(errmsg("checkpoint record is at %X/%X",
- checkPointLoc.xlogid, checkPointLoc.xrecoff)));
+ checkPointLoc.xlogid, checkPointLoc.xrecoff)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
{
ereport(PANIC,
- (errmsg("could not locate required checkpoint record"),
- errhint("If you are not restoring from a backup, try removing the file \"%s/backup_label\".", DataDir)));
+ (errmsg("could not locate required checkpoint record"),
+ errhint("If you are not restoring from a backup, try removing the file \"%s/backup_label\".", DataDir)));
}
}
else
{
/*
- * Get the last valid checkpoint record. If the latest one
- * according to pg_control is broken, try the next-to-last one.
+ * Get the last valid checkpoint record. If the latest one according
+ * to pg_control is broken, try the next-to-last one.
*/
checkPointLoc = ControlFile->checkPoint;
record = ReadCheckpointRecord(checkPointLoc, 1);
@@ -4457,7 +4438,7 @@ StartupXLOG(void)
{
ereport(LOG,
(errmsg("checkpoint record is at %X/%X",
- checkPointLoc.xlogid, checkPointLoc.xrecoff)));
+ checkPointLoc.xlogid, checkPointLoc.xrecoff)));
}
else
{
@@ -4466,14 +4447,13 @@ StartupXLOG(void)
if (record != NULL)
{
ereport(LOG,
- (errmsg("using previous checkpoint record at %X/%X",
- checkPointLoc.xlogid, checkPointLoc.xrecoff)));
- InRecovery = true; /* force recovery even if
- * SHUTDOWNED */
+ (errmsg("using previous checkpoint record at %X/%X",
+ checkPointLoc.xlogid, checkPointLoc.xrecoff)));
+ InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
ereport(PANIC,
- (errmsg("could not locate a valid checkpoint record")));
+ (errmsg("could not locate a valid checkpoint record")));
}
}
@@ -4482,10 +4462,10 @@ StartupXLOG(void)
wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN);
ereport(LOG,
- (errmsg("redo record is at %X/%X; undo record is at %X/%X; shutdown %s",
- checkPoint.redo.xlogid, checkPoint.redo.xrecoff,
- checkPoint.undo.xlogid, checkPoint.undo.xrecoff,
- wasShutdown ? "TRUE" : "FALSE")));
+ (errmsg("redo record is at %X/%X; undo record is at %X/%X; shutdown %s",
+ checkPoint.redo.xlogid, checkPoint.redo.xrecoff,
+ checkPoint.undo.xlogid, checkPoint.undo.xrecoff,
+ wasShutdown ? "TRUE" : "FALSE")));
ereport(LOG,
(errmsg("next transaction ID: %u; next OID: %u",
checkPoint.nextXid, checkPoint.nextOid)));
@@ -4502,9 +4482,9 @@ StartupXLOG(void)
MultiXactSetNextMXact(checkPoint.nextMulti, checkPoint.nextMultiOffset);
/*
- * We must replay WAL entries using the same TimeLineID they were
- * created under, so temporarily adopt the TLI indicated by the
- * checkpoint (see also xlog_redo()).
+ * We must replay WAL entries using the same TimeLineID they were created
+ * under, so temporarily adopt the TLI indicated by the checkpoint (see
+ * also xlog_redo()).
*/
ThisTimeLineID = checkPoint.ThisTimeLineID;
@@ -4518,15 +4498,15 @@ StartupXLOG(void)
/*
* Check whether we need to force recovery from WAL. If it appears to
- * have been a clean shutdown and we did not have a recovery.conf
- * file, then assume no recovery needed.
+ * have been a clean shutdown and we did not have a recovery.conf file,
+ * then assume no recovery needed.
*/
if (XLByteLT(checkPoint.undo, RecPtr) ||
XLByteLT(checkPoint.redo, RecPtr))
{
if (wasShutdown)
ereport(PANIC,
- (errmsg("invalid redo/undo record in shutdown checkpoint")));
+ (errmsg("invalid redo/undo record in shutdown checkpoint")));
InRecovery = true;
}
else if (ControlFile->state != DB_SHUTDOWNED)
@@ -4563,8 +4543,8 @@ StartupXLOG(void)
}
/*
- * Find the first record that logically follows the checkpoint ---
- * it might physically precede it, though.
+ * Find the first record that logically follows the checkpoint --- it
+ * might physically precede it, though.
*/
if (XLByteLT(checkPoint.redo, RecPtr))
{
@@ -4603,7 +4583,7 @@ StartupXLOG(void)
xlog_outrec(buf, record);
strcat(buf, " - ");
RmgrTable[record->xl_rmid].rm_desc(buf,
- record->xl_info, XLogRecGetData(record));
+ record->xl_info, XLogRecGetData(record));
elog(LOG, "%s", buf);
}
#endif
@@ -4621,7 +4601,7 @@ StartupXLOG(void)
/* nextXid must be beyond record's xid */
if (TransactionIdFollowsOrEquals(record->xl_xid,
- ShmemVariableCache->nextXid))
+ ShmemVariableCache->nextXid))
{
ShmemVariableCache->nextXid = record->xl_xid;
TransactionIdAdvance(ShmemVariableCache->nextXid);
@@ -4655,8 +4635,8 @@ StartupXLOG(void)
}
/*
- * Re-fetch the last valid or last applied record, so we can identify
- * the exact endpoint of what we consider the valid portion of WAL.
+ * Re-fetch the last valid or last applied record, so we can identify the
+ * exact endpoint of what we consider the valid portion of WAL.
*/
record = ReadRecord(&LastRec, PANIC);
EndOfLog = EndRecPtr;
@@ -4682,8 +4662,8 @@ StartupXLOG(void)
*
* If we stopped short of the end of WAL during recovery, then we are
* generating a new timeline and must assign it a unique new ID.
- * Otherwise, we can just extend the timeline we were in when we ran
- * out of WAL.
+ * Otherwise, we can just extend the timeline we were in when we ran out
+ * of WAL.
*/
if (needNewTimeLine)
{
@@ -4698,10 +4678,10 @@ StartupXLOG(void)
XLogCtl->ThisTimeLineID = ThisTimeLineID;
/*
- * We are now done reading the old WAL. Turn off archive fetching if
- * it was active, and make a writable copy of the last WAL segment.
- * (Note that we also have a copy of the last block of the old WAL in
- * readBuf; we will use that below.)
+ * We are now done reading the old WAL. Turn off archive fetching if it
+ * was active, and make a writable copy of the last WAL segment. (Note
+ * that we also have a copy of the last block of the old WAL in readBuf;
+ * we will use that below.)
*/
if (InArchiveRecovery)
exitArchiveRecovery(curFileTLI, endLogId, endLogSeg);
@@ -4724,9 +4704,9 @@ StartupXLOG(void)
((EndOfLog.xrecoff - 1) / BLCKSZ + 1) * BLCKSZ;
/*
- * Tricky point here: readBuf contains the *last* block that the
- * LastRec record spans, not the one it starts in. The last block is
- * indeed the one we want to use.
+ * Tricky point here: readBuf contains the *last* block that the LastRec
+ * record spans, not the one it starts in. The last block is indeed the
+ * one we want to use.
*/
Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - BLCKSZ) % XLogSegSize);
memcpy((char *) Insert->currpage, readBuf, BLCKSZ);
@@ -4752,9 +4732,8 @@ StartupXLOG(void)
else
{
/*
- * Whenever Write.LogwrtResult points to exactly the end of a
- * page, Write.curridx must point to the *next* page (see
- * XLogWrite()).
+ * Whenever Write.LogwrtResult points to exactly the end of a page,
+ * Write.curridx must point to the *next* page (see XLogWrite()).
*
* Note: it might seem we should do AdvanceXLInsertBuffer() here, but
* this is sufficient. The first actual attempt to insert a log
@@ -4785,17 +4764,16 @@ StartupXLOG(void)
pgstat_reset_all();
/*
- * Perform a new checkpoint to update our recovery activity to
- * disk.
+ * Perform a new checkpoint to update our recovery activity to disk.
*
- * Note that we write a shutdown checkpoint rather than an on-line
- * one. This is not particularly critical, but since we may be
- * assigning a new TLI, using a shutdown checkpoint allows us to
- * have the rule that TLI only changes in shutdown checkpoints,
- * which allows some extra error checking in xlog_redo.
+ * Note that we write a shutdown checkpoint rather than an on-line one.
+ * This is not particularly critical, but since we may be assigning a
+ * new TLI, using a shutdown checkpoint allows us to have the rule
+ * that TLI only changes in shutdown checkpoints, which allows some
+ * extra error checking in xlog_redo.
*
- * In case we had to use the secondary checkpoint, make sure that it
- * will still be shown as the secondary checkpoint after this
+ * In case we had to use the secondary checkpoint, make sure that it will
+ * still be shown as the secondary checkpoint after this
* CreateCheckPoint operation; we don't want the broken primary
* checkpoint to become prevCheckPoint...
*/
@@ -4810,8 +4788,8 @@ StartupXLOG(void)
XLogCloseRelationCache();
/*
- * Now that we've checkpointed the recovery, it's safe to flush
- * old backup_label, if present.
+ * Now that we've checkpointed the recovery, it's safe to flush old
+ * backup_label, if present.
*/
remove_backup_label();
}
@@ -4878,7 +4856,7 @@ ReadCheckpointRecord(XLogRecPtr RecPtr, int whichChkpt)
{
case 1:
ereport(LOG,
- (errmsg("invalid primary checkpoint link in control file")));
+ (errmsg("invalid primary checkpoint link in control file")));
break;
case 2:
ereport(LOG,
@@ -4886,7 +4864,7 @@ ReadCheckpointRecord(XLogRecPtr RecPtr, int whichChkpt)
break;
default:
ereport(LOG,
- (errmsg("invalid checkpoint link in backup_label file")));
+ (errmsg("invalid checkpoint link in backup_label file")));
break;
}
return NULL;
@@ -4927,7 +4905,7 @@ ReadCheckpointRecord(XLogRecPtr RecPtr, int whichChkpt)
break;
default:
ereport(LOG,
- (errmsg("invalid resource manager ID in checkpoint record")));
+ (errmsg("invalid resource manager ID in checkpoint record")));
break;
}
return NULL;
@@ -4939,11 +4917,11 @@ ReadCheckpointRecord(XLogRecPtr RecPtr, int whichChkpt)
{
case 1:
ereport(LOG,
- (errmsg("invalid xl_info in primary checkpoint record")));
+ (errmsg("invalid xl_info in primary checkpoint record")));
break;
case 2:
ereport(LOG,
- (errmsg("invalid xl_info in secondary checkpoint record")));
+ (errmsg("invalid xl_info in secondary checkpoint record")));
break;
default:
ereport(LOG,
@@ -4959,11 +4937,11 @@ ReadCheckpointRecord(XLogRecPtr RecPtr, int whichChkpt)
{
case 1:
ereport(LOG,
- (errmsg("invalid length of primary checkpoint record")));
+ (errmsg("invalid length of primary checkpoint record")));
break;
case 2:
ereport(LOG,
- (errmsg("invalid length of secondary checkpoint record")));
+ (errmsg("invalid length of secondary checkpoint record")));
break;
default:
ereport(LOG,
@@ -5084,10 +5062,10 @@ CreateCheckPoint(bool shutdown, bool force)
int nsegsrecycled = 0;
/*
- * Acquire CheckpointLock to ensure only one checkpoint happens at a
- * time. (This is just pro forma, since in the present system
- * structure there is only one process that is allowed to issue
- * checkpoints at any given time.)
+ * Acquire CheckpointLock to ensure only one checkpoint happens at a time.
+ * (This is just pro forma, since in the present system structure there is
+ * only one process that is allowed to issue checkpoints at any given
+ * time.)
*/
LWLockAcquire(CheckpointLock, LW_EXCLUSIVE);
@@ -5108,10 +5086,10 @@ CreateCheckPoint(bool shutdown, bool force)
checkPoint.time = time(NULL);
/*
- * We must hold CheckpointStartLock while determining the checkpoint
- * REDO pointer. This ensures that any concurrent transaction commits
- * will be either not yet logged, or logged and recorded in pg_clog.
- * See notes in RecordTransactionCommit().
+ * We must hold CheckpointStartLock while determining the checkpoint REDO
+ * pointer. This ensures that any concurrent transaction commits will be
+ * either not yet logged, or logged and recorded in pg_clog. See notes in
+ * RecordTransactionCommit().
*/
LWLockAcquire(CheckpointStartLock, LW_EXCLUSIVE);
@@ -5119,20 +5097,19 @@ CreateCheckPoint(bool shutdown, bool force)
LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
/*
- * If this isn't a shutdown or forced checkpoint, and we have not
- * inserted any XLOG records since the start of the last checkpoint,
- * skip the checkpoint. The idea here is to avoid inserting duplicate
- * checkpoints when the system is idle. That wastes log space, and
- * more importantly it exposes us to possible loss of both current and
- * previous checkpoint records if the machine crashes just as we're
- * writing the update. (Perhaps it'd make even more sense to
- * checkpoint only when the previous checkpoint record is in a
- * different xlog page?)
+ * If this isn't a shutdown or forced checkpoint, and we have not inserted
+ * any XLOG records since the start of the last checkpoint, skip the
+ * checkpoint. The idea here is to avoid inserting duplicate checkpoints
+ * when the system is idle. That wastes log space, and more importantly it
+ * exposes us to possible loss of both current and previous checkpoint
+ * records if the machine crashes just as we're writing the update.
+ * (Perhaps it'd make even more sense to checkpoint only when the previous
+ * checkpoint record is in a different xlog page?)
*
- * We have to make two tests to determine that nothing has happened since
- * the start of the last checkpoint: current insertion point must
- * match the end of the last checkpoint record, and its redo pointer
- * must point to itself.
+ * We have to make two tests to determine that nothing has happened since the
+ * start of the last checkpoint: current insertion point must match the
+ * end of the last checkpoint record, and its redo pointer must point to
+ * itself.
*/
if (!shutdown && !force)
{
@@ -5158,10 +5135,10 @@ CreateCheckPoint(bool shutdown, bool force)
/*
* Compute new REDO record ptr = location of next XLOG record.
*
- * NB: this is NOT necessarily where the checkpoint record itself will
- * be, since other backends may insert more XLOG records while we're
- * off doing the buffer flush work. Those XLOG records are logically
- * after the checkpoint, even though physically before it. Got that?
+ * NB: this is NOT necessarily where the checkpoint record itself will be,
+ * since other backends may insert more XLOG records while we're off doing
+ * the buffer flush work. Those XLOG records are logically after the
+ * checkpoint, even though physically before it. Got that?
*/
freespace = INSERT_FREESPACE(Insert);
if (freespace < SizeOfXLogRecord)
@@ -5173,16 +5150,15 @@ CreateCheckPoint(bool shutdown, bool force)
INSERT_RECPTR(checkPoint.redo, Insert, Insert->curridx);
/*
- * Here we update the shared RedoRecPtr for future XLogInsert calls;
- * this must be done while holding the insert lock AND the info_lck.
+ * Here we update the shared RedoRecPtr for future XLogInsert calls; this
+ * must be done while holding the insert lock AND the info_lck.
*
* Note: if we fail to complete the checkpoint, RedoRecPtr will be left
- * pointing past where it really needs to point. This is okay; the
- * only consequence is that XLogInsert might back up whole buffers
- * that it didn't really need to. We can't postpone advancing
- * RedoRecPtr because XLogInserts that happen while we are dumping
- * buffers must assume that their buffer changes are not included in
- * the checkpoint.
+ * pointing past where it really needs to point. This is okay; the only
+ * consequence is that XLogInsert might back up whole buffers that it
+ * didn't really need to. We can't postpone advancing RedoRecPtr because
+ * XLogInserts that happen while we are dumping buffers must assume that
+ * their buffer changes are not included in the checkpoint.
*/
{
/* use volatile pointer to prevent code rearrangement */
@@ -5219,15 +5195,15 @@ CreateCheckPoint(bool shutdown, bool force)
&checkPoint.nextMultiOffset);
/*
- * Having constructed the checkpoint record, ensure all shmem disk
- * buffers and commit-log buffers are flushed to disk.
+ * Having constructed the checkpoint record, ensure all shmem disk buffers
+ * and commit-log buffers are flushed to disk.
*
- * This I/O could fail for various reasons. If so, we will fail to
- * complete the checkpoint, but there is no reason to force a system
- * panic. Accordingly, exit critical section while doing it. (If
- * we are doing a shutdown checkpoint, we probably *should* panic ---
- * but that will happen anyway because we'll still be inside the
- * critical section established by ShutdownXLOG.)
+ * This I/O could fail for various reasons. If so, we will fail to complete
+ * the checkpoint, but there is no reason to force a system panic.
+ * Accordingly, exit critical section while doing it. (If we are doing a
+ * shutdown checkpoint, we probably *should* panic --- but that will
+ * happen anyway because we'll still be inside the critical section
+ * established by ShutdownXLOG.)
*/
END_CRIT_SECTION();
@@ -5260,8 +5236,8 @@ CreateCheckPoint(bool shutdown, bool force)
XLogFlush(recptr);
/*
- * We now have ProcLastRecPtr = start of actual checkpoint record,
- * recptr = end of actual checkpoint record.
+ * We now have ProcLastRecPtr = start of actual checkpoint record, recptr
+ * = end of actual checkpoint record.
*/
if (shutdown && !XLByteEQ(checkPoint.redo, ProcLastRecPtr))
ereport(PANIC,
@@ -5287,8 +5263,8 @@ CreateCheckPoint(bool shutdown, bool force)
LWLockRelease(ControlFileLock);
/*
- * We are now done with critical updates; no need for system panic if
- * we have trouble while fooling with offline log segments.
+ * We are now done with critical updates; no need for system panic if we
+ * have trouble while fooling with offline log segments.
*/
END_CRIT_SECTION();
@@ -5304,19 +5280,18 @@ CreateCheckPoint(bool shutdown, bool force)
}
/*
- * Make more log segments if needed. (Do this after deleting offline
- * log segments, to avoid having peak disk space usage higher than
- * necessary.)
+ * Make more log segments if needed. (Do this after deleting offline log
+ * segments, to avoid having peak disk space usage higher than necessary.)
*/
if (!shutdown)
nsegsadded = PreallocXlogFiles(recptr);
/*
- * Truncate pg_subtrans if possible. We can throw away all data
- * before the oldest XMIN of any running transaction. No future
- * transaction will attempt to reference any pg_subtrans entry older
- * than that (see Asserts in subtrans.c). During recovery, though, we
- * mustn't do this because StartupSUBTRANS hasn't been called yet.
+ * Truncate pg_subtrans if possible. We can throw away all data before
+ * the oldest XMIN of any running transaction. No future transaction will
+ * attempt to reference any pg_subtrans entry older than that (see Asserts
+ * in subtrans.c). During recovery, though, we mustn't do this because
+ * StartupSUBTRANS hasn't been called yet.
*/
if (!InRecovery)
TruncateSUBTRANS(GetOldestXmin(true));
@@ -5342,13 +5317,14 @@ XLogPutNextOid(Oid nextOid)
rdata.buffer = InvalidBuffer;
rdata.next = NULL;
(void) XLogInsert(RM_XLOG_ID, XLOG_NEXTOID, &rdata);
+
/*
* We need not flush the NEXTOID record immediately, because any of the
- * just-allocated OIDs could only reach disk as part of a tuple insert
- * or update that would have its own XLOG record that must follow the
- * NEXTOID record. Therefore, the standard buffer LSN interlock applied
- * to those records will ensure no such OID reaches disk before the
- * NEXTOID record does.
+ * just-allocated OIDs could only reach disk as part of a tuple insert or
+ * update that would have its own XLOG record that must follow the NEXTOID
+ * record. Therefore, the standard buffer LSN interlock applied to those
+ * records will ensure no such OID reaches disk before the NEXTOID record
+ * does.
*/
}
@@ -5384,8 +5360,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
checkPoint.nextMultiOffset);
/*
- * TLI may change in a shutdown checkpoint, but it shouldn't
- * decrease
+ * TLI may change in a shutdown checkpoint, but it shouldn't decrease
*/
if (checkPoint.ThisTimeLineID != ThisTimeLineID)
{
@@ -5394,7 +5369,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
(int) checkPoint.ThisTimeLineID))
ereport(PANIC,
(errmsg("unexpected timeline ID %u (after %u) in checkpoint record",
- checkPoint.ThisTimeLineID, ThisTimeLineID)));
+ checkPoint.ThisTimeLineID, ThisTimeLineID)));
/* Following WAL records should be run with new TLI */
ThisTimeLineID = checkPoint.ThisTimeLineID;
}
@@ -5441,7 +5416,7 @@ xlog_desc(char *buf, uint8 xl_info, char *rec)
checkpoint->nextOid,
checkpoint->nextMulti,
checkpoint->nextMultiOffset,
- (info == XLOG_CHECKPOINT_SHUTDOWN) ? "shutdown" : "online");
+ (info == XLOG_CHECKPOINT_SHUTDOWN) ? "shutdown" : "online");
}
else if (info == XLOG_NEXTOID)
{
@@ -5535,23 +5510,23 @@ assign_xlog_sync_method(const char *method, bool doit, GucSource source)
/*
* To ensure that no blocks escape unsynced, force an fsync on the
* currently open log segment (if any). Also, if the open flag is
- * changing, close the log file so it will be reopened (with new
- * flag bit) at next use.
+ * changing, close the log file so it will be reopened (with new flag
+ * bit) at next use.
*/
if (openLogFile >= 0)
{
if (pg_fsync(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not fsync log file %u, segment %u: %m",
- openLogId, openLogSeg)));
+ errmsg("could not fsync log file %u, segment %u: %m",
+ openLogId, openLogSeg)));
if (open_sync_bit != new_sync_bit)
{
if (close(openLogFile))
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not close log file %u, segment %u: %m",
- openLogId, openLogSeg)));
+ errmsg("could not close log file %u, segment %u: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
}
@@ -5575,16 +5550,16 @@ issue_xlog_fsync(void)
if (pg_fsync_no_writethrough(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not fsync log file %u, segment %u: %m",
- openLogId, openLogSeg)));
+ errmsg("could not fsync log file %u, segment %u: %m",
+ openLogId, openLogSeg)));
break;
#ifdef HAVE_FSYNC_WRITETHROUGH
case SYNC_METHOD_FSYNC_WRITETHROUGH:
if (pg_fsync_writethrough(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not fsync write-through log file %u, segment %u: %m",
- openLogId, openLogSeg)));
+ errmsg("could not fsync write-through log file %u, segment %u: %m",
+ openLogId, openLogSeg)));
break;
#endif
#ifdef HAVE_FDATASYNC
@@ -5592,8 +5567,8 @@ issue_xlog_fsync(void)
if (pg_fdatasync(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not fdatasync log file %u, segment %u: %m",
- openLogId, openLogSeg)));
+ errmsg("could not fdatasync log file %u, segment %u: %m",
+ openLogId, openLogSeg)));
break;
#endif
case SYNC_METHOD_OPEN:
@@ -5640,25 +5615,25 @@ pg_start_backup(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
(errmsg("WAL archiving is not active"),
- (errhint("archive_command must be defined before "
- "online backups can be made safely.")))));
+ (errhint("archive_command must be defined before "
+ "online backups can be made safely.")))));
backupidstr = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(backupid)));
+ PointerGetDatum(backupid)));
/*
- * Force a CHECKPOINT. This is not strictly necessary, but it seems
- * like a good idea to minimize the amount of past WAL needed to use
- * the backup. Also, this guarantees that two successive backup runs
- * will have different checkpoint positions and hence different
- * history file names, even if nothing happened in between.
+ * Force a CHECKPOINT. This is not strictly necessary, but it seems like
+ * a good idea to minimize the amount of past WAL needed to use the
+ * backup. Also, this guarantees that two successive backup runs will
+ * have different checkpoint positions and hence different history file
+ * names, even if nothing happened in between.
*/
RequestCheckpoint(true, false);
/*
- * Now we need to fetch the checkpoint record location, and also its
- * REDO pointer. The oldest point in WAL that would be needed to
- * restore starting from the checkpoint is precisely the REDO pointer.
+ * Now we need to fetch the checkpoint record location, and also its REDO
+ * pointer. The oldest point in WAL that would be needed to restore
+ * starting from the checkpoint is precisely the REDO pointer.
*/
LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
checkpointloc = ControlFile->checkPoint;
@@ -5669,10 +5644,10 @@ pg_start_backup(PG_FUNCTION_ARGS)
XLogFileName(xlogfilename, ThisTimeLineID, _logId, _logSeg);
/*
- * We deliberately use strftime/localtime not the src/timezone
- * functions, so that backup labels will consistently be recorded in
- * the same timezone regardless of TimeZone setting. This matches
- * elog.c's practice.
+ * We deliberately use strftime/localtime not the src/timezone functions,
+ * so that backup labels will consistently be recorded in the same
+ * timezone regardless of TimeZone setting. This matches elog.c's
+ * practice.
*/
stamp_time = time(NULL);
strftime(strfbuf, sizeof(strfbuf),
@@ -5680,8 +5655,7 @@ pg_start_backup(PG_FUNCTION_ARGS)
localtime(&stamp_time));
/*
- * Check for existing backup label --- implies a backup is already
- * running
+ * Check for existing backup label --- implies a backup is already running
*/
if (stat(BACKUP_LABEL_FILE, &stat_buf) != 0)
{
@@ -5725,7 +5699,7 @@ pg_start_backup(PG_FUNCTION_ARGS)
snprintf(xlogfilename, sizeof(xlogfilename), "%X/%X",
startpoint.xlogid, startpoint.xrecoff);
result = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum(xlogfilename)));
+ CStringGetDatum(xlogfilename)));
PG_RETURN_TEXT_P(result);
}
@@ -5762,8 +5736,8 @@ pg_stop_backup(PG_FUNCTION_ARGS)
(errmsg("must be superuser to run a backup"))));
/*
- * Get the current end-of-WAL position; it will be unsafe to use this
- * dump to restore to a point in advance of this time.
+ * Get the current end-of-WAL position; it will be unsafe to use this dump
+ * to restore to a point in advance of this time.
*/
LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
INSERT_RECPTR(stoppoint, Insert, Insert->curridx);
@@ -5773,10 +5747,10 @@ pg_stop_backup(PG_FUNCTION_ARGS)
XLogFileName(stopxlogfilename, ThisTimeLineID, _logId, _logSeg);
/*
- * We deliberately use strftime/localtime not the src/timezone
- * functions, so that backup labels will consistently be recorded in
- * the same timezone regardless of TimeZone setting. This matches
- * elog.c's practice.
+ * We deliberately use strftime/localtime not the src/timezone functions,
+ * so that backup labels will consistently be recorded in the same
+ * timezone regardless of TimeZone setting. This matches elog.c's
+ * practice.
*/
stamp_time = time(NULL);
strftime(strfbuf, sizeof(strfbuf),
@@ -5800,9 +5774,8 @@ pg_stop_backup(PG_FUNCTION_ARGS)
}
/*
- * Read and parse the START WAL LOCATION line (this code is pretty
- * crude, but we are not expecting any variability in the file
- * format).
+ * Read and parse the START WAL LOCATION line (this code is pretty crude,
+ * but we are not expecting any variability in the file format).
*/
if (fscanf(lfp, "START WAL LOCATION: %X/%X (file %24s)%c",
&startpoint.xlogid, &startpoint.xrecoff, startxlogfilename,
@@ -5869,7 +5842,7 @@ pg_stop_backup(PG_FUNCTION_ARGS)
snprintf(stopxlogfilename, sizeof(stopxlogfilename), "%X/%X",
stoppoint.xlogid, stoppoint.xrecoff);
result = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum(stopxlogfilename)));
+ CStringGetDatum(stopxlogfilename)));
PG_RETURN_TEXT_P(result);
}
@@ -5921,9 +5894,9 @@ read_backup_label(XLogRecPtr *checkPointLoc)
}
/*
- * Read and parse the START WAL LOCATION and CHECKPOINT lines (this
- * code is pretty crude, but we are not expecting any variability in
- * the file format).
+ * Read and parse the START WAL LOCATION and CHECKPOINT lines (this code
+ * is pretty crude, but we are not expecting any variability in the file
+ * format).
*/
if (fscanf(lfp, "START WAL LOCATION: %X/%X (file %08X%16s)%c",
&startpoint.xlogid, &startpoint.xrecoff, &tli,
@@ -5963,17 +5936,17 @@ read_backup_label(XLogRecPtr *checkPointLoc)
* Parse history file to identify stop point.
*/
if (fscanf(fp, "START WAL LOCATION: %X/%X (file %24s)%c",
- &startpoint.xlogid, &startpoint.xrecoff, startxlogfilename,
+ &startpoint.xlogid, &startpoint.xrecoff, startxlogfilename,
&ch) != 4 || ch != '\n')
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("invalid data in file \"%s\"", histfilename)));
+ errmsg("invalid data in file \"%s\"", histfilename)));
if (fscanf(fp, "STOP WAL LOCATION: %X/%X (file %24s)%c",
- &stoppoint.xlogid, &stoppoint.xrecoff, stopxlogfilename,
+ &stoppoint.xlogid, &stoppoint.xrecoff, stopxlogfilename,
&ch) != 4 || ch != '\n')
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("invalid data in file \"%s\"", histfilename)));
+ errmsg("invalid data in file \"%s\"", histfilename)));
recoveryMinXlogOffset = stoppoint;
if (ferror(fp) || FreeFile(fp))
ereport(FATAL,
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index 55caf84a04d..485aa52474d 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.38 2005/06/06 17:01:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.39 2005/10/15 02:49:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -121,7 +121,7 @@ _xl_remove_hash_entry(XLogRelDesc *rdesc)
rdesc->moreRecently->lessRecently = rdesc->lessRecently;
hentry = (XLogRelCacheEntry *) hash_search(_xlrelcache,
- (void *) &(rdesc->reldata.rd_node), HASH_REMOVE, NULL);
+ (void *) &(rdesc->reldata.rd_node), HASH_REMOVE, NULL);
if (hentry == NULL)
elog(PANIC, "_xl_remove_hash_entry: file was not found in cache");
@@ -211,11 +211,11 @@ XLogOpenRelation(RelFileNode rnode)
res->reldata.rd_node = rnode;
/*
- * We set up the lockRelId in case anything tries to lock the
- * dummy relation. Note that this is fairly bogus since relNode
- * may be different from the relation's OID. It shouldn't really
- * matter though, since we are presumably running by ourselves and
- * can't have any lock conflicts ...
+ * We set up the lockRelId in case anything tries to lock the dummy
+ * relation. Note that this is fairly bogus since relNode may be
+ * different from the relation's OID. It shouldn't really matter
+ * though, since we are presumably running by ourselves and can't have
+ * any lock conflicts ...
*/
res->reldata.rd_lockInfo.lockRelId.dbId = rnode.dbNode;
res->reldata.rd_lockInfo.lockRelId.relId = rnode.relNode;
@@ -233,13 +233,13 @@ XLogOpenRelation(RelFileNode rnode)
RelationOpenSmgr(&(res->reldata));
/*
- * Create the target file if it doesn't already exist. This lets
- * us cope if the replay sequence contains writes to a relation
- * that is later deleted. (The original coding of this routine
- * would instead return NULL, causing the writes to be suppressed.
- * But that seems like it risks losing valuable data if the
- * filesystem loses an inode during a crash. Better to write the
- * data until we are actually told to delete the file.)
+ * Create the target file if it doesn't already exist. This lets us
+ * cope if the replay sequence contains writes to a relation that is
+ * later deleted. (The original coding of this routine would instead
+ * return NULL, causing the writes to be suppressed. But that seems
+ * like it risks losing valuable data if the filesystem loses an inode
+ * during a crash. Better to write the data until we are actually
+ * told to delete the file.)
*/
smgrcreate(res->reldata.rd_smgr, res->reldata.rd_istemp, true);
}
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index 6f74ceaed72..9ea3d741112 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.206 2005/08/08 03:11:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.207 2005/10/15 02:49:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -379,9 +379,8 @@ BootstrapMain(int argc, char *argv[])
BaseInit();
/*
- * We aren't going to do the full InitPostgres pushups, but there
- * are a couple of things that need to get lit up even in a dummy
- * process.
+ * We aren't going to do the full InitPostgres pushups, but there are a
+ * couple of things that need to get lit up even in a dummy process.
*/
if (IsUnderPostmaster)
{
@@ -445,8 +444,8 @@ BootstrapMain(int argc, char *argv[])
/*
* In NOP mode, all we really want to do is create shared memory and
- * semaphores (just to prove we can do it with the current GUC
- * settings). So, quit now.
+ * semaphores (just to prove we can do it with the current GUC settings).
+ * So, quit now.
*/
if (xlogop == BS_XLOG_NOP)
proc_exit(0);
@@ -465,8 +464,8 @@ BootstrapMain(int argc, char *argv[])
/*
* Process bootstrap input.
*
- * the sed script boot.sed renamed yyparse to Int_yyparse for the
- * bootstrap parser to avoid conflicts with the normal SQL parser
+ * the sed script boot.sed renamed yyparse to Int_yyparse for the bootstrap
+ * parser to avoid conflicts with the normal SQL parser
*/
Int_yyparse();
@@ -537,8 +536,7 @@ bootstrap_signals(void)
pqsignal(SIGWINCH, SIG_DFL);
/*
- * Unblock signals (they were blocked when the postmaster forked
- * us)
+ * Unblock signals (they were blocked when the postmaster forked us)
*/
PG_SETMASK(&UnBlockSig);
}
@@ -733,12 +731,12 @@ DefineAttr(char *name, char *type, int attnum)
attrtypes[attnum]->attislocal = true;
/*
- * Mark as "not null" if type is fixed-width and prior columns are
- * too. This corresponds to case where column can be accessed directly
- * via C struct declaration.
+ * Mark as "not null" if type is fixed-width and prior columns are too.
+ * This corresponds to case where column can be accessed directly via C
+ * struct declaration.
*
- * oidvector and int2vector are also treated as not-nullable, even
- * though they are no longer fixed-width.
+ * oidvector and int2vector are also treated as not-nullable, even though
+ * they are no longer fixed-width.
*/
#define MARKNOTNULL(att) \
((att)->attlen > 0 || \
@@ -1005,8 +1003,7 @@ MapArrayTypeName(char *s)
{
int i,
j;
- static char newStr[NAMEDATALEN]; /* array type names < NAMEDATALEN
- * long */
+ static char newStr[NAMEDATALEN]; /* array type names < NAMEDATALEN long */
if (s == NULL || s[0] == '\0')
return s;
@@ -1095,8 +1092,8 @@ FindStr(char *str, int length, hashnode *mderef)
while (node != NULL)
{
/*
- * We must differentiate between string constants that might have
- * the same value as a identifier and the identifier itself.
+ * We must differentiate between string constants that might have the
+ * same value as a identifier and the identifier itself.
*/
if (!strcmp(str, strtable[node->strnum]))
{
@@ -1131,11 +1128,11 @@ AddStr(char *str, int strlength, int mderef)
elog(FATAL, "bootstrap string table overflow");
/*
- * Some of the utilites (eg, define type, create relation) assume that
- * the string they're passed is a NAMEDATALEN. We get array bound
- * read violations from purify if we don't allocate at least
- * NAMEDATALEN bytes for strings of this sort. Because we're lazy, we
- * allocate at least NAMEDATALEN bytes all the time.
+ * Some of the utilites (eg, define type, create relation) assume that the
+ * string they're passed is a NAMEDATALEN. We get array bound read
+ * violations from purify if we don't allocate at least NAMEDATALEN bytes
+ * for strings of this sort. Because we're lazy, we allocate at least
+ * NAMEDATALEN bytes all the time.
*/
if ((len = strlength + 1) < NAMEDATALEN)
@@ -1191,8 +1188,8 @@ index_register(Oid heap,
/*
* XXX mao 10/31/92 -- don't gc index reldescs, associated info at
- * bootstrap time. we'll declare the indices now, but want to create
- * them later.
+ * bootstrap time. we'll declare the indices now, but want to create them
+ * later.
*/
if (nogc == NULL)
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 689a2ff8196..15a197af81b 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.119 2005/10/10 18:49:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.120 2005/10/15 02:49:12 momjian Exp $
*
* NOTES
* See acl.h.
@@ -65,7 +65,7 @@ dumpacl(Acl *acl)
for (i = 0; i < ACL_NUM(acl); ++i)
elog(DEBUG2, " acl[%d]: %s", i,
DatumGetCString(DirectFunctionCall1(aclitemout,
- PointerGetDatum(aip + i))));
+ PointerGetDatum(aip + i))));
}
#endif /* ACLDEBUG */
@@ -101,9 +101,10 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
Acl *newer_acl;
if (grantee->rolname)
- aclitem.ai_grantee = get_roleid_checked(grantee->rolname);
+ aclitem. ai_grantee = get_roleid_checked(grantee->rolname);
+
else
- aclitem.ai_grantee = ACL_ID_PUBLIC;
+ aclitem. ai_grantee = ACL_ID_PUBLIC;
/*
* Grant options can only be granted to individual roles, not PUBLIC.
@@ -116,19 +117,18 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
errmsg("grant options can only be granted to roles")));
- aclitem.ai_grantor = grantorId;
+ aclitem. ai_grantor = grantorId;
/*
* The asymmetry in the conditions here comes from the spec. In
- * GRANT, the grant_option flag signals WITH GRANT OPTION, which
- * means to grant both the basic privilege and its grant option.
- * But in REVOKE, plain revoke revokes both the basic privilege
- * and its grant option, while REVOKE GRANT OPTION revokes only
- * the option.
+ * GRANT, the grant_option flag signals WITH GRANT OPTION, which means
+ * to grant both the basic privilege and its grant option. But in
+ * REVOKE, plain revoke revokes both the basic privilege and its grant
+ * option, while REVOKE GRANT OPTION revokes only the option.
*/
ACLITEM_SET_PRIVS_GOPTIONS(aclitem,
- (is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS,
- (!is_grant || grant_option) ? privileges : ACL_NO_RIGHTS);
+ (is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS,
+ (!is_grant || grant_option) ? privileges : ACL_NO_RIGHTS);
newer_acl = aclupdate(new_acl, &aclitem, modechg, ownerId, behavior);
@@ -221,8 +221,8 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt)
AclMode this_privileges;
Acl *old_acl;
Acl *new_acl;
- Oid grantorId;
- Oid ownerId;
+ Oid grantorId;
+ Oid ownerId;
HeapTuple newtuple;
Datum values[Natts_pg_class];
char nulls[Natts_pg_class];
@@ -257,8 +257,8 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt)
relvar->relname)));
/*
- * Get owner ID and working copy of existing ACL.
- * If there's no ACL, substitute the proper default.
+ * Get owner ID and working copy of existing ACL. If there's no ACL,
+ * substitute the proper default.
*/
ownerId = pg_class_tuple->relowner;
aclDatum = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_relacl,
@@ -275,8 +275,8 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt)
/*
* If we found no grant options, consider whether to issue a hard
- * error. Per spec, having any privilege at all on the object
- * will get you by here.
+ * error. Per spec, having any privilege at all on the object will
+ * get you by here.
*/
if (avail_goptions == ACL_NO_RIGHTS)
{
@@ -289,12 +289,12 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt)
}
/*
- * Restrict the operation to what we can actually grant or revoke,
- * and issue a warning if appropriate. (For REVOKE this isn't
- * quite what the spec says to do: the spec seems to want a
- * warning only if no privilege bits actually change in the ACL.
- * In practice that behavior seems much too noisy, as well as
- * inconsistent with the GRANT case.)
+ * Restrict the operation to what we can actually grant or revoke, and
+ * issue a warning if appropriate. (For REVOKE this isn't quite what
+ * the spec says to do: the spec seems to want a warning only if no
+ * privilege bits actually change in the ACL. In practice that
+ * behavior seems much too noisy, as well as inconsistent with the
+ * GRANT case.)
*/
this_privileges = privileges & ACL_OPTION_TO_PRIVS(avail_goptions);
if (stmt->is_grant)
@@ -323,8 +323,8 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt)
/*
* Generate new ACL.
*
- * We need the members of both old and new ACLs so we can correct
- * the shared dependency information.
+ * We need the members of both old and new ACLs so we can correct the
+ * shared dependency information.
*/
noldmembers = aclmembers(old_acl, &oldmembers);
@@ -411,8 +411,8 @@ ExecuteGrantStmt_Database(GrantStmt *stmt)
AclMode this_privileges;
Acl *old_acl;
Acl *new_acl;
- Oid grantorId;
- Oid ownerId;
+ Oid grantorId;
+ Oid ownerId;
HeapTuple newtuple;
Datum values[Natts_pg_database];
char nulls[Natts_pg_database];
@@ -436,8 +436,8 @@ ExecuteGrantStmt_Database(GrantStmt *stmt)
pg_database_tuple = (Form_pg_database) GETSTRUCT(tuple);
/*
- * Get owner ID and working copy of existing ACL.
- * If there's no ACL, substitute the proper default.
+ * Get owner ID and working copy of existing ACL. If there's no ACL,
+ * substitute the proper default.
*/
ownerId = pg_database_tuple->datdba;
aclDatum = heap_getattr(tuple, Anum_pg_database_datacl,
@@ -454,8 +454,8 @@ ExecuteGrantStmt_Database(GrantStmt *stmt)
/*
* If we found no grant options, consider whether to issue a hard
- * error. Per spec, having any privilege at all on the object
- * will get you by here.
+ * error. Per spec, having any privilege at all on the object will
+ * get you by here.
*/
if (avail_goptions == ACL_NO_RIGHTS)
{
@@ -468,12 +468,12 @@ ExecuteGrantStmt_Database(GrantStmt *stmt)
}
/*
- * Restrict the operation to what we can actually grant or revoke,
- * and issue a warning if appropriate. (For REVOKE this isn't
- * quite what the spec says to do: the spec seems to want a
- * warning only if no privilege bits actually change in the ACL.
- * In practice that behavior seems much too noisy, as well as
- * inconsistent with the GRANT case.)
+ * Restrict the operation to what we can actually grant or revoke, and
+ * issue a warning if appropriate. (For REVOKE this isn't quite what
+ * the spec says to do: the spec seems to want a warning only if no
+ * privilege bits actually change in the ACL. In practice that
+ * behavior seems much too noisy, as well as inconsistent with the
+ * GRANT case.)
*/
this_privileges = privileges & ACL_OPTION_TO_PRIVS(avail_goptions);
if (stmt->is_grant)
@@ -502,8 +502,8 @@ ExecuteGrantStmt_Database(GrantStmt *stmt)
/*
* Generate new ACL.
*
- * We need the members of both old and new ACLs so we can correct
- * the shared dependency information.
+ * We need the members of both old and new ACLs so we can correct the
+ * shared dependency information.
*/
noldmembers = aclmembers(old_acl, &oldmembers);
@@ -589,8 +589,8 @@ ExecuteGrantStmt_Function(GrantStmt *stmt)
AclMode this_privileges;
Acl *old_acl;
Acl *new_acl;
- Oid grantorId;
- Oid ownerId;
+ Oid grantorId;
+ Oid ownerId;
HeapTuple newtuple;
Datum values[Natts_pg_proc];
char nulls[Natts_pg_proc];
@@ -611,8 +611,8 @@ ExecuteGrantStmt_Function(GrantStmt *stmt)
pg_proc_tuple = (Form_pg_proc) GETSTRUCT(tuple);
/*
- * Get owner ID and working copy of existing ACL.
- * If there's no ACL, substitute the proper default.
+ * Get owner ID and working copy of existing ACL. If there's no ACL,
+ * substitute the proper default.
*/
ownerId = pg_proc_tuple->proowner;
aclDatum = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_proacl,
@@ -629,8 +629,8 @@ ExecuteGrantStmt_Function(GrantStmt *stmt)
/*
* If we found no grant options, consider whether to issue a hard
- * error. Per spec, having any privilege at all on the object
- * will get you by here.
+ * error. Per spec, having any privilege at all on the object will
+ * get you by here.
*/
if (avail_goptions == ACL_NO_RIGHTS)
{
@@ -643,12 +643,12 @@ ExecuteGrantStmt_Function(GrantStmt *stmt)
}
/*
- * Restrict the operation to what we can actually grant or revoke,
- * and issue a warning if appropriate. (For REVOKE this isn't
- * quite what the spec says to do: the spec seems to want a
- * warning only if no privilege bits actually change in the ACL.
- * In practice that behavior seems much too noisy, as well as
- * inconsistent with the GRANT case.)
+ * Restrict the operation to what we can actually grant or revoke, and
+ * issue a warning if appropriate. (For REVOKE this isn't quite what
+ * the spec says to do: the spec seems to want a warning only if no
+ * privilege bits actually change in the ACL. In practice that
+ * behavior seems much too noisy, as well as inconsistent with the
+ * GRANT case.)
*/
this_privileges = privileges & ACL_OPTION_TO_PRIVS(avail_goptions);
if (stmt->is_grant)
@@ -677,8 +677,8 @@ ExecuteGrantStmt_Function(GrantStmt *stmt)
/*
* Generate new ACL.
*
- * We need the members of both old and new ACLs so we can correct
- * the shared dependency information.
+ * We need the members of both old and new ACLs so we can correct the
+ * shared dependency information.
*/
noldmembers = aclmembers(old_acl, &oldmembers);
@@ -763,8 +763,8 @@ ExecuteGrantStmt_Language(GrantStmt *stmt)
AclMode this_privileges;
Acl *old_acl;
Acl *new_acl;
- Oid grantorId;
- Oid ownerId;
+ Oid grantorId;
+ Oid ownerId;
HeapTuple newtuple;
Datum values[Natts_pg_language];
char nulls[Natts_pg_language];
@@ -788,14 +788,14 @@ ExecuteGrantStmt_Language(GrantStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("language \"%s\" is not trusted", langname),
- errhint("Only superusers may use untrusted languages.")));
+ errhint("Only superusers may use untrusted languages.")));
/*
- * Get owner ID and working copy of existing ACL.
- * If there's no ACL, substitute the proper default.
+ * Get owner ID and working copy of existing ACL. If there's no ACL,
+ * substitute the proper default.
*
- * Note: for now, languages are treated as owned by the bootstrap
- * user. We should add an owner column to pg_language instead.
+ * Note: for now, languages are treated as owned by the bootstrap user.
+ * We should add an owner column to pg_language instead.
*/
ownerId = BOOTSTRAP_SUPERUSERID;
aclDatum = SysCacheGetAttr(LANGNAME, tuple, Anum_pg_language_lanacl,
@@ -812,8 +812,8 @@ ExecuteGrantStmt_Language(GrantStmt *stmt)
/*
* If we found no grant options, consider whether to issue a hard
- * error. Per spec, having any privilege at all on the object
- * will get you by here.
+ * error. Per spec, having any privilege at all on the object will
+ * get you by here.
*/
if (avail_goptions == ACL_NO_RIGHTS)
{
@@ -826,12 +826,12 @@ ExecuteGrantStmt_Language(GrantStmt *stmt)
}
/*
- * Restrict the operation to what we can actually grant or revoke,
- * and issue a warning if appropriate. (For REVOKE this isn't
- * quite what the spec says to do: the spec seems to want a
- * warning only if no privilege bits actually change in the ACL.
- * In practice that behavior seems much too noisy, as well as
- * inconsistent with the GRANT case.)
+ * Restrict the operation to what we can actually grant or revoke, and
+ * issue a warning if appropriate. (For REVOKE this isn't quite what
+ * the spec says to do: the spec seems to want a warning only if no
+ * privilege bits actually change in the ACL. In practice that
+ * behavior seems much too noisy, as well as inconsistent with the
+ * GRANT case.)
*/
this_privileges = privileges & ACL_OPTION_TO_PRIVS(avail_goptions);
if (stmt->is_grant)
@@ -860,8 +860,8 @@ ExecuteGrantStmt_Language(GrantStmt *stmt)
/*
* Generate new ACL.
*
- * We need the members of both old and new ACLs so we can correct
- * the shared dependency information.
+ * We need the members of both old and new ACLs so we can correct the
+ * shared dependency information.
*/
noldmembers = aclmembers(old_acl, &oldmembers);
@@ -946,8 +946,8 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt)
AclMode this_privileges;
Acl *old_acl;
Acl *new_acl;
- Oid grantorId;
- Oid ownerId;
+ Oid grantorId;
+ Oid ownerId;
HeapTuple newtuple;
Datum values[Natts_pg_namespace];
char nulls[Natts_pg_namespace];
@@ -968,8 +968,8 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt)
pg_namespace_tuple = (Form_pg_namespace) GETSTRUCT(tuple);
/*
- * Get owner ID and working copy of existing ACL.
- * If there's no ACL, substitute the proper default.
+ * Get owner ID and working copy of existing ACL. If there's no ACL,
+ * substitute the proper default.
*/
ownerId = pg_namespace_tuple->nspowner;
aclDatum = SysCacheGetAttr(NAMESPACENAME, tuple,
@@ -987,8 +987,8 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt)
/*
* If we found no grant options, consider whether to issue a hard
- * error. Per spec, having any privilege at all on the object
- * will get you by here.
+ * error. Per spec, having any privilege at all on the object will
+ * get you by here.
*/
if (avail_goptions == ACL_NO_RIGHTS)
{
@@ -1001,12 +1001,12 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt)
}
/*
- * Restrict the operation to what we can actually grant or revoke,
- * and issue a warning if appropriate. (For REVOKE this isn't
- * quite what the spec says to do: the spec seems to want a
- * warning only if no privilege bits actually change in the ACL.
- * In practice that behavior seems much too noisy, as well as
- * inconsistent with the GRANT case.)
+ * Restrict the operation to what we can actually grant or revoke, and
+ * issue a warning if appropriate. (For REVOKE this isn't quite what
+ * the spec says to do: the spec seems to want a warning only if no
+ * privilege bits actually change in the ACL. In practice that
+ * behavior seems much too noisy, as well as inconsistent with the
+ * GRANT case.)
*/
this_privileges = privileges & ACL_OPTION_TO_PRIVS(avail_goptions);
if (stmt->is_grant)
@@ -1035,8 +1035,8 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt)
/*
* Generate new ACL.
*
- * We need the members of both old and new ACLs so we can correct
- * the shared dependency information.
+ * We need the members of both old and new ACLs so we can correct the
+ * shared dependency information.
*/
noldmembers = aclmembers(old_acl, &oldmembers);
@@ -1103,8 +1103,8 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt)
if (priv & ~((AclMode) ACL_ALL_RIGHTS_TABLESPACE))
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
- errmsg("invalid privilege type %s for tablespace",
- privilege_to_string(priv))));
+ errmsg("invalid privilege type %s for tablespace",
+ privilege_to_string(priv))));
privileges |= priv;
}
}
@@ -1123,8 +1123,8 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt)
AclMode this_privileges;
Acl *old_acl;
Acl *new_acl;
- Oid grantorId;
- Oid ownerId;
+ Oid grantorId;
+ Oid ownerId;
HeapTuple newtuple;
Datum values[Natts_pg_tablespace];
char nulls[Natts_pg_tablespace];
@@ -1144,12 +1144,12 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("tablespace \"%s\" does not exist", spcname)));
+ errmsg("tablespace \"%s\" does not exist", spcname)));
pg_tablespace_tuple = (Form_pg_tablespace) GETSTRUCT(tuple);
/*
- * Get owner ID and working copy of existing ACL.
- * If there's no ACL, substitute the proper default.
+ * Get owner ID and working copy of existing ACL. If there's no ACL,
+ * substitute the proper default.
*/
ownerId = pg_tablespace_tuple->spcowner;
aclDatum = heap_getattr(tuple, Anum_pg_tablespace_spcacl,
@@ -1166,8 +1166,8 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt)
/*
* If we found no grant options, consider whether to issue a hard
- * error. Per spec, having any privilege at all on the object
- * will get you by here.
+ * error. Per spec, having any privilege at all on the object will
+ * get you by here.
*/
if (avail_goptions == ACL_NO_RIGHTS)
{
@@ -1180,12 +1180,12 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt)
}
/*
- * Restrict the operation to what we can actually grant or revoke,
- * and issue a warning if appropriate. (For REVOKE this isn't
- * quite what the spec says to do: the spec seems to want a
- * warning only if no privilege bits actually change in the ACL.
- * In practice that behavior seems much too noisy, as well as
- * inconsistent with the GRANT case.)
+ * Restrict the operation to what we can actually grant or revoke, and
+ * issue a warning if appropriate. (For REVOKE this isn't quite what
+ * the spec says to do: the spec seems to want a warning only if no
+ * privilege bits actually change in the ACL. In practice that
+ * behavior seems much too noisy, as well as inconsistent with the
+ * GRANT case.)
*/
this_privileges = privileges & ACL_OPTION_TO_PRIVS(avail_goptions);
if (stmt->is_grant)
@@ -1214,8 +1214,8 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt)
/*
* Generate new ACL.
*
- * We need the members of both old and new ACLs so we can correct
- * the shared dependency information.
+ * We need the members of both old and new ACLs so we can correct the
+ * shared dependency information.
*/
noldmembers = aclmembers(old_acl, &oldmembers);
@@ -1449,7 +1449,7 @@ pg_class_aclmask(Oid table_oid, Oid roleid,
Datum aclDatum;
bool isNull;
Acl *acl;
- Oid ownerId;
+ Oid ownerId;
/*
* Must get the relation's tuple from pg_class
@@ -1467,8 +1467,7 @@ pg_class_aclmask(Oid table_oid, Oid roleid,
/*
* Deny anyone permission to update a system catalog unless
* pg_authid.rolcatupdate is set. (This is to let superusers protect
- * themselves from themselves.) Also allow it if
- * allowSystemTableMods.
+ * themselves from themselves.) Also allow it if allowSystemTableMods.
*
* As of 7.4 we have some updatable system views; those shouldn't be
* protected in this way. Assume the view rules can take care of
@@ -1543,7 +1542,7 @@ pg_database_aclmask(Oid db_oid, Oid roleid,
Datum aclDatum;
bool isNull;
Acl *acl;
- Oid ownerId;
+ Oid ownerId;
/* Superusers bypass all permission checking. */
if (superuser_arg(roleid))
@@ -1607,7 +1606,7 @@ pg_proc_aclmask(Oid proc_oid, Oid roleid,
Datum aclDatum;
bool isNull;
Acl *acl;
- Oid ownerId;
+ Oid ownerId;
/* Superusers bypass all permission checking. */
if (superuser_arg(roleid))
@@ -1622,7 +1621,7 @@ pg_proc_aclmask(Oid proc_oid, Oid roleid,
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function with OID %u does not exist", proc_oid)));
+ errmsg("function with OID %u does not exist", proc_oid)));
ownerId = ((Form_pg_proc) GETSTRUCT(tuple))->proowner;
@@ -1663,7 +1662,7 @@ pg_language_aclmask(Oid lang_oid, Oid roleid,
Datum aclDatum;
bool isNull;
Acl *acl;
- Oid ownerId;
+ Oid ownerId;
/* Superusers bypass all permission checking. */
if (superuser_arg(roleid))
@@ -1678,7 +1677,7 @@ pg_language_aclmask(Oid lang_oid, Oid roleid,
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("language with OID %u does not exist", lang_oid)));
+ errmsg("language with OID %u does not exist", lang_oid)));
/* XXX pg_language should have an owner column, but doesn't */
ownerId = BOOTSTRAP_SUPERUSERID;
@@ -1720,30 +1719,30 @@ pg_namespace_aclmask(Oid nsp_oid, Oid roleid,
Datum aclDatum;
bool isNull;
Acl *acl;
- Oid ownerId;
+ Oid ownerId;
/* Superusers bypass all permission checking. */
if (superuser_arg(roleid))
return mask;
/*
- * If we have been assigned this namespace as a temp namespace, check
- * to make sure we have CREATE TEMP permission on the database, and if
- * so act as though we have all standard (but not GRANT OPTION)
- * permissions on the namespace. If we don't have CREATE TEMP, act as
- * though we have only USAGE (and not CREATE) rights.
+ * If we have been assigned this namespace as a temp namespace, check to
+ * make sure we have CREATE TEMP permission on the database, and if so act
+ * as though we have all standard (but not GRANT OPTION) permissions on
+ * the namespace. If we don't have CREATE TEMP, act as though we have
+ * only USAGE (and not CREATE) rights.
*
- * This may seem redundant given the check in InitTempTableNamespace, but
- * it really isn't since current user ID may have changed since then.
- * The upshot of this behavior is that a SECURITY DEFINER function can
- * create temp tables that can then be accessed (if permission is
- * granted) by code in the same session that doesn't have permissions
- * to create temp tables.
+ * This may seem redundant given the check in InitTempTableNamespace, but it
+ * really isn't since current user ID may have changed since then. The
+ * upshot of this behavior is that a SECURITY DEFINER function can create
+ * temp tables that can then be accessed (if permission is granted) by
+ * code in the same session that doesn't have permissions to create temp
+ * tables.
*
* XXX Would it be safe to ereport a special error message as
* InitTempTableNamespace does? Returning zero here means we'll get a
- * generic "permission denied for schema pg_temp_N" message, which is
- * not remarkably user-friendly.
+ * generic "permission denied for schema pg_temp_N" message, which is not
+ * remarkably user-friendly.
*/
if (isTempNamespace(nsp_oid))
{
@@ -1807,7 +1806,7 @@ pg_tablespace_aclmask(Oid spc_oid, Oid roleid,
Datum aclDatum;
bool isNull;
Acl *acl;
- Oid ownerId;
+ Oid ownerId;
/*
* Only shared relations can be stored in global space; don't let even
@@ -1835,7 +1834,7 @@ pg_tablespace_aclmask(Oid spc_oid, Oid roleid,
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("tablespace with OID %u does not exist", spc_oid)));
+ errmsg("tablespace with OID %u does not exist", spc_oid)));
ownerId = ((Form_pg_tablespace) GETSTRUCT(tuple))->spcowner;
@@ -1951,7 +1950,7 @@ bool
pg_class_ownercheck(Oid class_oid, Oid roleid)
{
HeapTuple tuple;
- Oid ownerId;
+ Oid ownerId;
/* Superusers bypass all permission checking. */
if (superuser_arg(roleid))
@@ -1963,7 +1962,7 @@ pg_class_ownercheck(Oid class_oid, Oid roleid)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("relation with OID %u does not exist", class_oid)));
+ errmsg("relation with OID %u does not exist", class_oid)));
ownerId = ((Form_pg_class) GETSTRUCT(tuple))->relowner;
@@ -1979,7 +1978,7 @@ bool
pg_type_ownercheck(Oid type_oid, Oid roleid)
{
HeapTuple tuple;
- Oid ownerId;
+ Oid ownerId;
/* Superusers bypass all permission checking. */
if (superuser_arg(roleid))
@@ -2007,7 +2006,7 @@ bool
pg_oper_ownercheck(Oid oper_oid, Oid roleid)
{
HeapTuple tuple;
- Oid ownerId;
+ Oid ownerId;
/* Superusers bypass all permission checking. */
if (superuser_arg(roleid))
@@ -2019,7 +2018,7 @@ pg_oper_ownercheck(Oid oper_oid, Oid roleid)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("operator with OID %u does not exist", oper_oid)));
+ errmsg("operator with OID %u does not exist", oper_oid)));
ownerId = ((Form_pg_operator) GETSTRUCT(tuple))->oprowner;
@@ -2035,7 +2034,7 @@ bool
pg_proc_ownercheck(Oid proc_oid, Oid roleid)
{
HeapTuple tuple;
- Oid ownerId;
+ Oid ownerId;
/* Superusers bypass all permission checking. */
if (superuser_arg(roleid))
@@ -2047,7 +2046,7 @@ pg_proc_ownercheck(Oid proc_oid, Oid roleid)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function with OID %u does not exist", proc_oid)));
+ errmsg("function with OID %u does not exist", proc_oid)));
ownerId = ((Form_pg_proc) GETSTRUCT(tuple))->proowner;
@@ -2063,7 +2062,7 @@ bool
pg_namespace_ownercheck(Oid nsp_oid, Oid roleid)
{
HeapTuple tuple;
- Oid ownerId;
+ Oid ownerId;
/* Superusers bypass all permission checking. */
if (superuser_arg(roleid))
@@ -2094,7 +2093,7 @@ pg_tablespace_ownercheck(Oid spc_oid, Oid roleid)
ScanKeyData entry[1];
HeapScanDesc scan;
HeapTuple spctuple;
- Oid spcowner;
+ Oid spcowner;
/* Superusers bypass all permission checking. */
if (superuser_arg(roleid))
@@ -2113,7 +2112,7 @@ pg_tablespace_ownercheck(Oid spc_oid, Oid roleid)
if (!HeapTupleIsValid(spctuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("tablespace with OID %u does not exist", spc_oid)));
+ errmsg("tablespace with OID %u does not exist", spc_oid)));
spcowner = ((Form_pg_tablespace) GETSTRUCT(spctuple))->spcowner;
@@ -2130,7 +2129,7 @@ bool
pg_opclass_ownercheck(Oid opc_oid, Oid roleid)
{
HeapTuple tuple;
- Oid ownerId;
+ Oid ownerId;
/* Superusers bypass all permission checking. */
if (superuser_arg(roleid))
@@ -2162,7 +2161,7 @@ pg_database_ownercheck(Oid db_oid, Oid roleid)
ScanKeyData entry[1];
HeapScanDesc scan;
HeapTuple dbtuple;
- Oid dba;
+ Oid dba;
/* Superusers bypass all permission checking. */
if (superuser_arg(roleid))
@@ -2198,7 +2197,7 @@ bool
pg_conversion_ownercheck(Oid conv_oid, Oid roleid)
{
HeapTuple tuple;
- Oid ownerId;
+ Oid ownerId;
/* Superusers bypass all permission checking. */
if (superuser_arg(roleid))
@@ -2210,7 +2209,7 @@ pg_conversion_ownercheck(Oid conv_oid, Oid roleid)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("conversion with OID %u does not exist", conv_oid)));
+ errmsg("conversion with OID %u does not exist", conv_oid)));
ownerId = ((Form_pg_conversion) GETSTRUCT(tuple))->conowner;
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index 0648b578e9f..69313ea86a2 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/catalog.c,v 1.63 2005/08/12 01:35:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/catalog.c,v 1.64 2005/10/15 02:49:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -233,7 +233,7 @@ IsReservedName(const char *name)
* Since the OID is not immediately inserted into the table, there is a
* race condition here; but a problem could occur only if someone else
* managed to cycle through 2^32 OIDs and generate the same OID before we
- * finish inserting our row. This seems unlikely to be a problem. Note
+ * finish inserting our row. This seems unlikely to be a problem. Note
* that if we had to *commit* the row to end the race condition, the risk
* would be rather higher; therefore we use SnapshotDirty in the test,
* so that we will see uncommitted rows.
@@ -259,9 +259,9 @@ GetNewOid(Relation relation)
if (!OidIsValid(oidIndex))
{
/*
- * System catalogs that have OIDs should *always* have a unique
- * OID index; we should only take this path for user tables.
- * Give a warning if it looks like somebody forgot an index.
+ * System catalogs that have OIDs should *always* have a unique OID
+ * index; we should only take this path for user tables. Give a
+ * warning if it looks like somebody forgot an index.
*/
if (IsSystemRelation(relation))
elog(WARNING, "generating possibly-non-unique OID for \"%s\"",
@@ -338,7 +338,7 @@ GetNewOidWithIndex(Relation relation, Relation indexrel)
Oid
GetNewRelFileNode(Oid reltablespace, bool relisshared, Relation pg_class)
{
- RelFileNode rnode;
+ RelFileNode rnode;
char *rpath;
int fd;
bool collides;
@@ -369,14 +369,14 @@ GetNewRelFileNode(Oid reltablespace, bool relisshared, Relation pg_class)
{
/*
* Here we have a little bit of a dilemma: if errno is something
- * other than ENOENT, should we declare a collision and loop?
- * In particular one might think this advisable for, say, EPERM.
+ * other than ENOENT, should we declare a collision and loop? In
+ * particular one might think this advisable for, say, EPERM.
* However there really shouldn't be any unreadable files in a
* tablespace directory, and if the EPERM is actually complaining
* that we can't read the directory itself, we'd be in an infinite
* loop. In practice it seems best to go ahead regardless of the
- * errno. If there is a colliding file we will get an smgr failure
- * when we attempt to create the new relation file.
+ * errno. If there is a colliding file we will get an smgr
+ * failure when we attempt to create the new relation file.
*/
collides = false;
}
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index 8060055ff72..92d72af0f9c 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.46 2005/10/02 23:50:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.47 2005/10/15 02:49:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -155,9 +155,9 @@ performDeletion(const ObjectAddress *object,
/*
* Construct a list of objects that are reachable by AUTO or INTERNAL
- * dependencies from the target object. These should be deleted
- * silently, even if the actual deletion pass first reaches one of
- * them via a non-auto dependency.
+ * dependencies from the target object. These should be deleted silently,
+ * even if the actual deletion pass first reaches one of them via a
+ * non-auto dependency.
*/
init_object_addresses(&oktodelete);
@@ -167,9 +167,9 @@ performDeletion(const ObjectAddress *object,
NULL, &oktodelete, depRel))
ereport(ERROR,
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
- errmsg("cannot drop %s because other objects depend on it",
- objDescription),
- errhint("Use DROP ... CASCADE to drop the dependent objects too.")));
+ errmsg("cannot drop %s because other objects depend on it",
+ objDescription),
+ errhint("Use DROP ... CASCADE to drop the dependent objects too.")));
term_object_addresses(&oktodelete);
@@ -209,17 +209,17 @@ deleteWhatDependsOn(const ObjectAddress *object,
/*
* Construct a list of objects that are reachable by AUTO or INTERNAL
- * dependencies from the target object. These should be deleted
- * silently, even if the actual deletion pass first reaches one of
- * them via a non-auto dependency.
+ * dependencies from the target object. These should be deleted silently,
+ * even if the actual deletion pass first reaches one of them via a
+ * non-auto dependency.
*/
init_object_addresses(&oktodelete);
findAutoDeletableObjects(object, &oktodelete, depRel);
/*
- * Now invoke only step 2 of recursiveDeletion: just recurse to the
- * stuff dependent on the given object.
+ * Now invoke only step 2 of recursiveDeletion: just recurse to the stuff
+ * dependent on the given object.
*/
if (!deleteDependentObjects(object, objDescription,
DROP_CASCADE,
@@ -263,9 +263,9 @@ findAutoDeletableObjects(const ObjectAddress *object,
ObjectAddress otherObject;
/*
- * If this object is already in oktodelete, then we already visited
- * it; don't do so again (this prevents infinite recursion if there's
- * a loop in pg_depend). Otherwise, add it.
+ * If this object is already in oktodelete, then we already visited it;
+ * don't do so again (this prevents infinite recursion if there's a loop
+ * in pg_depend). Otherwise, add it.
*/
if (object_address_present(object, oktodelete))
return;
@@ -273,11 +273,11 @@ findAutoDeletableObjects(const ObjectAddress *object,
/*
* Scan pg_depend records that link to this object, showing the things
- * that depend on it. For each one that is AUTO or INTERNAL, visit
- * the referencing object.
+ * that depend on it. For each one that is AUTO or INTERNAL, visit the
+ * referencing object.
*
- * When dropping a whole object (subId = 0), find pg_depend records for
- * its sub-objects too.
+ * When dropping a whole object (subId = 0), find pg_depend records for its
+ * sub-objects too.
*/
ScanKeyInit(&key[0],
Anum_pg_depend_refclassid,
@@ -322,8 +322,8 @@ findAutoDeletableObjects(const ObjectAddress *object,
/*
* For a PIN dependency we just ereport immediately; there
- * won't be any others to examine, and we aren't ever
- * going to let the user delete it.
+ * won't be any others to examine, and we aren't ever going to
+ * let the user delete it.
*/
ereport(ERROR,
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
@@ -406,13 +406,13 @@ recursiveDeletion(const ObjectAddress *object,
objDescription = getObjectDescription(object);
/*
- * Step 1: find and remove pg_depend records that link from this
- * object to others. We have to do this anyway, and doing it first
- * ensures that we avoid infinite recursion in the case of cycles.
- * Also, some dependency types require extra processing here.
+ * Step 1: find and remove pg_depend records that link from this object to
+ * others. We have to do this anyway, and doing it first ensures that we
+ * avoid infinite recursion in the case of cycles. Also, some dependency
+ * types require extra processing here.
*
- * When dropping a whole object (subId = 0), remove all pg_depend records
- * for its sub-objects too.
+ * When dropping a whole object (subId = 0), remove all pg_depend records for
+ * its sub-objects too.
*/
ScanKeyInit(&key[0],
Anum_pg_depend_classid,
@@ -456,41 +456,41 @@ recursiveDeletion(const ObjectAddress *object,
* This object is part of the internal implementation of
* another object. We have three cases:
*
- * 1. At the outermost recursion level, disallow the DROP.
- * (We just ereport here, rather than proceeding, since no
- * other dependencies are likely to be interesting.)
+ * 1. At the outermost recursion level, disallow the DROP. (We
+ * just ereport here, rather than proceeding, since no other
+ * dependencies are likely to be interesting.)
*/
if (callingObject == NULL)
{
char *otherObjDesc = getObjectDescription(&otherObject);
ereport(ERROR,
- (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
- errmsg("cannot drop %s because %s requires it",
- objDescription, otherObjDesc),
- errhint("You may drop %s instead.",
- otherObjDesc)));
+ (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
+ errmsg("cannot drop %s because %s requires it",
+ objDescription, otherObjDesc),
+ errhint("You may drop %s instead.",
+ otherObjDesc)));
}
/*
- * 2. When recursing from the other end of this
- * dependency, it's okay to continue with the deletion.
- * This holds when recursing from a whole object that
- * includes the nominal other end as a component, too.
+ * 2. When recursing from the other end of this dependency,
+ * it's okay to continue with the deletion. This holds when
+ * recursing from a whole object that includes the nominal
+ * other end as a component, too.
*/
if (callingObject->classId == otherObject.classId &&
callingObject->objectId == otherObject.objectId &&
- (callingObject->objectSubId == otherObject.objectSubId ||
- callingObject->objectSubId == 0))
+ (callingObject->objectSubId == otherObject.objectSubId ||
+ callingObject->objectSubId == 0))
break;
/*
* 3. When recursing from anyplace else, transform this
- * deletion request into a delete of the other object.
- * (This will be an error condition iff RESTRICT mode.) In
- * this case we finish deleting my dependencies except for
- * the INTERNAL link, which will be needed to cause the
- * owning object to recurse back to me.
+ * deletion request into a delete of the other object. (This
+ * will be an error condition iff RESTRICT mode.) In this case
+ * we finish deleting my dependencies except for the INTERNAL
+ * link, which will be needed to cause the owning object to
+ * recurse back to me.
*/
if (amOwned) /* shouldn't happen */
elog(ERROR, "multiple INTERNAL dependencies for %s",
@@ -502,8 +502,8 @@ recursiveDeletion(const ObjectAddress *object,
case DEPENDENCY_PIN:
/*
- * Should not happen; PIN dependencies should have zeroes
- * in the depender fields...
+ * Should not happen; PIN dependencies should have zeroes in
+ * the depender fields...
*/
elog(ERROR, "incorrect use of PIN dependency with %s",
objDescription);
@@ -521,10 +521,10 @@ recursiveDeletion(const ObjectAddress *object,
systable_endscan(scan);
/*
- * CommandCounterIncrement here to ensure that preceding changes are
- * all visible; in particular, that the above deletions of pg_depend
- * entries are visible. That prevents infinite recursion in case of a
- * dependency loop (which is perfectly legal).
+ * CommandCounterIncrement here to ensure that preceding changes are all
+ * visible; in particular, that the above deletions of pg_depend entries
+ * are visible. That prevents infinite recursion in case of a dependency
+ * loop (which is perfectly legal).
*/
CommandCounterIncrement();
@@ -562,11 +562,11 @@ recursiveDeletion(const ObjectAddress *object,
}
/*
- * Step 2: scan pg_depend records that link to this object, showing
- * the things that depend on it. Recursively delete those things.
- * Note it's important to delete the dependent objects before the
- * referenced one, since the deletion routines might do things like
- * try to update the pg_class record when deleting a check constraint.
+ * Step 2: scan pg_depend records that link to this object, showing the
+ * things that depend on it. Recursively delete those things. Note it's
+ * important to delete the dependent objects before the referenced one,
+ * since the deletion routines might do things like try to update the
+ * pg_class record when deleting a check constraint.
*/
if (!deleteDependentObjects(object, objDescription,
behavior, msglevel,
@@ -584,23 +584,21 @@ recursiveDeletion(const ObjectAddress *object,
doDeletion(object);
/*
- * Delete any comments associated with this object. (This is a
- * convenient place to do it instead of having every object type know
- * to do it.)
+ * Delete any comments associated with this object. (This is a convenient
+ * place to do it instead of having every object type know to do it.)
*/
DeleteComments(object->objectId, object->classId, object->objectSubId);
/*
- * Delete shared dependency references related to this object.
- * Sub-objects (columns) don't have dependencies on global objects,
- * so skip them.
+ * Delete shared dependency references related to this object. Sub-objects
+ * (columns) don't have dependencies on global objects, so skip them.
*/
if (object->objectSubId == 0)
deleteSharedDependencyRecordsFor(object->classId, object->objectId);
/*
- * CommandCounterIncrement here to ensure that preceding changes are
- * all visible.
+ * CommandCounterIncrement here to ensure that preceding changes are all
+ * visible.
*/
CommandCounterIncrement();
@@ -691,10 +689,10 @@ deleteDependentObjects(const ObjectAddress *object,
case DEPENDENCY_NORMAL:
/*
- * Perhaps there was another dependency path that would
- * have allowed silent deletion of the otherObject, had we
- * only taken that path first. In that case, act like this
- * link is AUTO, too.
+ * Perhaps there was another dependency path that would have
+ * allowed silent deletion of the otherObject, had we only
+ * taken that path first. In that case, act like this link is
+ * AUTO, too.
*/
if (object_address_present(&otherObject, oktodelete))
ereport(DEBUG2,
@@ -1023,7 +1021,7 @@ find_expr_references_walker(Node *node,
var->varattno > list_length(rte->joinaliasvars))
elog(ERROR, "invalid varattno %d", var->varattno);
find_expr_references_walker((Node *) list_nth(rte->joinaliasvars,
- var->varattno - 1),
+ var->varattno - 1),
context);
list_free(context->rtables);
context->rtables = save_rtables;
@@ -1037,9 +1035,9 @@ find_expr_references_walker(Node *node,
/*
* If it's a regclass or similar literal referring to an existing
- * object, add a reference to that object. (Currently, only the
- * regclass case has any likely use, but we may as well handle all
- * the OID-alias datatypes consistently.)
+ * object, add a reference to that object. (Currently, only the
+ * regclass case has any likely use, but we may as well handle all the
+ * OID-alias datatypes consistently.)
*/
if (!con->constisnull)
{
@@ -1156,11 +1154,10 @@ find_expr_references_walker(Node *node,
bool result;
/*
- * Add whole-relation refs for each plain relation mentioned in
- * the subquery's rtable. (Note: query_tree_walker takes care of
- * recursing into RTE_FUNCTION and RTE_SUBQUERY RTEs, so no need
- * to do that here. But keep it from looking at join alias
- * lists.)
+ * Add whole-relation refs for each plain relation mentioned in the
+ * subquery's rtable. (Note: query_tree_walker takes care of
+ * recursing into RTE_FUNCTION and RTE_SUBQUERY RTEs, so no need to do
+ * that here. But keep it from looking at join alias lists.)
*/
foreach(rtable, query->rtable)
{
@@ -1215,11 +1212,11 @@ eliminate_duplicate_dependencies(ObjectAddresses *addrs)
continue; /* identical, so drop thisobj */
/*
- * If we have a whole-object reference and a reference to a
- * part of the same object, we don't need the whole-object
- * reference (for example, we don't need to reference both
- * table foo and column foo.bar). The whole-object reference
- * will always appear first in the sorted list.
+ * If we have a whole-object reference and a reference to a part
+ * of the same object, we don't need the whole-object reference
+ * (for example, we don't need to reference both table foo and
+ * column foo.bar). The whole-object reference will always appear
+ * first in the sorted list.
*/
if (priorobj->objectSubId == 0)
{
@@ -1469,8 +1466,8 @@ getObjectDescription(const ObjectAddress *object)
getRelationDescription(&buffer, object->objectId);
if (object->objectSubId != 0)
appendStringInfo(&buffer, _(" column %s"),
- get_relid_attribute_name(object->objectId,
- object->objectSubId));
+ get_relid_attribute_name(object->objectId,
+ object->objectSubId));
break;
case OCLASS_PROC:
@@ -1566,13 +1563,13 @@ getObjectDescription(const ObjectAddress *object)
HeapTuple conTup;
conTup = SearchSysCache(CONOID,
- ObjectIdGetDatum(object->objectId),
+ ObjectIdGetDatum(object->objectId),
0, 0, 0);
if (!HeapTupleIsValid(conTup))
elog(ERROR, "cache lookup failed for conversion %u",
object->objectId);
appendStringInfo(&buffer, _("conversion %s"),
- NameStr(((Form_pg_conversion) GETSTRUCT(conTup))->conname));
+ NameStr(((Form_pg_conversion) GETSTRUCT(conTup))->conname));
ReleaseSysCache(conTup);
break;
}
@@ -1621,13 +1618,13 @@ getObjectDescription(const ObjectAddress *object)
HeapTuple langTup;
langTup = SearchSysCache(LANGOID,
- ObjectIdGetDatum(object->objectId),
+ ObjectIdGetDatum(object->objectId),
0, 0, 0);
if (!HeapTupleIsValid(langTup))
elog(ERROR, "cache lookup failed for language %u",
object->objectId);
appendStringInfo(&buffer, _("language %s"),
- NameStr(((Form_pg_language) GETSTRUCT(langTup))->lanname));
+ NameStr(((Form_pg_language) GETSTRUCT(langTup))->lanname));
ReleaseSysCache(langTup);
break;
}
@@ -1646,7 +1643,7 @@ getObjectDescription(const ObjectAddress *object)
char *nspname;
opcTup = SearchSysCache(CLAOID,
- ObjectIdGetDatum(object->objectId),
+ ObjectIdGetDatum(object->objectId),
0, 0, 0);
if (!HeapTupleIsValid(opcTup))
elog(ERROR, "cache lookup failed for opclass %u",
@@ -1669,7 +1666,7 @@ getObjectDescription(const ObjectAddress *object)
appendStringInfo(&buffer, _("operator class %s for access method %s"),
quote_qualified_identifier(nspname,
- NameStr(opcForm->opcname)),
+ NameStr(opcForm->opcname)),
NameStr(amForm->amname));
ReleaseSysCache(amTup);
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index f5f030695be..15c0129c613 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.290 2005/08/26 03:07:12 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.291 2005/10/15 02:49:12 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -67,7 +67,7 @@ static void AddNewRelationTuple(Relation pg_class_desc,
Oid new_rel_oid, Oid new_type_oid,
Oid relowner,
char relkind);
-static Oid AddNewRelationType(const char *typeName,
+static Oid AddNewRelationType(const char *typeName,
Oid typeNamespace,
Oid new_rel_oid,
char new_rel_kind);
@@ -217,23 +217,24 @@ heap_create(const char *relname,
* sanity checks
*/
if (!allow_system_table_mods &&
- (IsSystemNamespace(relnamespace) || IsToastNamespace(relnamespace)) &&
+ (IsSystemNamespace(relnamespace) || IsToastNamespace(relnamespace)) &&
IsNormalProcessingMode())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to create \"%s.%s\"",
get_namespace_name(relnamespace), relname),
- errdetail("System catalog modifications are currently disallowed.")));
+ errdetail("System catalog modifications are currently disallowed.")));
/*
- * Decide if we need storage or not, and handle a couple other
- * special cases for particular relkinds.
+ * Decide if we need storage or not, and handle a couple other special
+ * cases for particular relkinds.
*/
switch (relkind)
{
case RELKIND_VIEW:
case RELKIND_COMPOSITE_TYPE:
create_storage = false;
+
/*
* Force reltablespace to zero if the relation has no physical
* storage. This is mainly just for cleanliness' sake.
@@ -242,6 +243,7 @@ heap_create(const char *relname,
break;
case RELKIND_SEQUENCE:
create_storage = true;
+
/*
* Force reltablespace to zero for sequences, since we don't
* support moving them around into different tablespaces.
@@ -257,8 +259,8 @@ heap_create(const char *relname,
* Never allow a pg_class entry to explicitly specify the database's
* default tablespace in reltablespace; force it to zero instead. This
* ensures that if the database is cloned with a different default
- * tablespace, the pg_class entry will still match where CREATE
- * DATABASE will put the physically copied relation.
+ * tablespace, the pg_class entry will still match where CREATE DATABASE
+ * will put the physically copied relation.
*
* Yes, this is a bit of a hack.
*/
@@ -276,8 +278,7 @@ heap_create(const char *relname,
shared_relation);
/*
- * have the storage manager create the relation's disk file, if
- * needed.
+ * have the storage manager create the relation's disk file, if needed.
*/
if (create_storage)
{
@@ -453,8 +454,8 @@ AddNewAttributeTuples(Oid new_rel_oid,
indstate = CatalogOpenIndexes(rel);
/*
- * First we add the user attributes. This is also a convenient place
- * to add dependencies on their datatypes.
+ * First we add the user attributes. This is also a convenient place to
+ * add dependencies on their datatypes.
*/
dpp = tupdesc->attrs;
for (i = 0; i < natts; i++)
@@ -488,10 +489,9 @@ AddNewAttributeTuples(Oid new_rel_oid,
}
/*
- * Next we add the system attributes. Skip OID if rel has no OIDs.
- * Skip all for a view or type relation. We don't bother with making
- * datatype dependencies here, since presumably all these types are
- * pinned.
+ * Next we add the system attributes. Skip OID if rel has no OIDs. Skip
+ * all for a view or type relation. We don't bother with making datatype
+ * dependencies here, since presumably all these types are pinned.
*/
if (relkind != RELKIND_VIEW && relkind != RELKIND_COMPOSITE_TYPE)
{
@@ -563,8 +563,8 @@ AddNewRelationTuple(Relation pg_class_desc,
HeapTuple tup;
/*
- * first we update some of the information in our uncataloged
- * relation's relation descriptor.
+ * first we update some of the information in our uncataloged relation's
+ * relation descriptor.
*/
new_rel_reltup = new_rel_desc->rd_rel;
@@ -632,28 +632,28 @@ AddNewRelationType(const char *typeName,
char new_rel_kind)
{
return
- TypeCreate(typeName, /* type name */
- typeNamespace, /* type namespace */
- new_rel_oid, /* relation oid */
+ TypeCreate(typeName, /* type name */
+ typeNamespace, /* type namespace */
+ new_rel_oid, /* relation oid */
new_rel_kind, /* relation kind */
- -1, /* internal size (varlena) */
- 'c', /* type-type (complex) */
- ',', /* default array delimiter */
- F_RECORD_IN, /* input procedure */
+ -1, /* internal size (varlena) */
+ 'c', /* type-type (complex) */
+ ',', /* default array delimiter */
+ F_RECORD_IN, /* input procedure */
F_RECORD_OUT, /* output procedure */
- F_RECORD_RECV, /* receive procedure */
- F_RECORD_SEND, /* send procedure */
- InvalidOid, /* analyze procedure - default */
- InvalidOid, /* array element type - irrelevant */
- InvalidOid, /* domain base type - irrelevant */
- NULL, /* default value - none */
- NULL, /* default binary representation */
- false, /* passed by reference */
- 'd', /* alignment - must be the largest! */
- 'x', /* fully TOASTable */
- -1, /* typmod */
- 0, /* array dimensions for typBaseType */
- false); /* Type NOT NULL */
+ F_RECORD_RECV, /* receive procedure */
+ F_RECORD_SEND, /* send procedure */
+ InvalidOid, /* analyze procedure - default */
+ InvalidOid, /* array element type - irrelevant */
+ InvalidOid, /* domain base type - irrelevant */
+ NULL, /* default value - none */
+ NULL, /* default binary representation */
+ false, /* passed by reference */
+ 'd', /* alignment - must be the largest! */
+ 'x', /* fully TOASTable */
+ -1, /* typmod */
+ 0, /* array dimensions for typBaseType */
+ false); /* Type NOT NULL */
}
/* --------------------------------
@@ -697,17 +697,17 @@ heap_create_with_catalog(const char *relname,
/*
* Allocate an OID for the relation, unless we were told what to use.
*
- * The OID will be the relfilenode as well, so make sure it doesn't
- * collide with either pg_class OIDs or existing physical files.
+ * The OID will be the relfilenode as well, so make sure it doesn't collide
+ * with either pg_class OIDs or existing physical files.
*/
if (!OidIsValid(relid))
relid = GetNewRelFileNode(reltablespace, shared_relation,
pg_class_desc);
/*
- * Create the relcache entry (mostly dummy at this point) and the
- * physical disk file. (If we fail further down, it's the smgr's
- * responsibility to remove the disk file again.)
+ * Create the relcache entry (mostly dummy at this point) and the physical
+ * disk file. (If we fail further down, it's the smgr's responsibility to
+ * remove the disk file again.)
*/
new_rel_desc = heap_create(relname,
relnamespace,
@@ -724,8 +724,8 @@ heap_create_with_catalog(const char *relname,
* since defining a relation also defines a complex type, we add a new
* system type corresponding to the new relation.
*
- * NOTE: we could get a unique-index failure here, in case the same name
- * has already been used for a type.
+ * NOTE: we could get a unique-index failure here, in case the same name has
+ * already been used for a type.
*/
new_type_oid = AddNewRelationType(relname,
relnamespace,
@@ -735,9 +735,9 @@ heap_create_with_catalog(const char *relname,
/*
* now create an entry in pg_class for the relation.
*
- * NOTE: we could get a unique-index failure here, in case someone else
- * is creating the same relation name in parallel but hadn't committed
- * yet when we checked for a duplicate name above.
+ * NOTE: we could get a unique-index failure here, in case someone else is
+ * creating the same relation name in parallel but hadn't committed yet
+ * when we checked for a duplicate name above.
*/
AddNewRelationTuple(pg_class_desc,
new_rel_desc,
@@ -747,8 +747,7 @@ heap_create_with_catalog(const char *relname,
relkind);
/*
- * now add tuples to pg_attribute for the attributes in our new
- * relation.
+ * now add tuples to pg_attribute for the attributes in our new relation.
*/
AddNewAttributeTuples(relid, new_rel_desc->rd_att, relkind,
oidislocal, oidinhcount);
@@ -779,10 +778,9 @@ heap_create_with_catalog(const char *relname,
/*
* store constraints and defaults passed in the tupdesc, if any.
*
- * NB: this may do a CommandCounterIncrement and rebuild the relcache
- * entry, so the relation must be valid and self-consistent at this
- * point. In particular, there are not yet constraints and defaults
- * anywhere.
+ * NB: this may do a CommandCounterIncrement and rebuild the relcache entry,
+ * so the relation must be valid and self-consistent at this point. In
+ * particular, there are not yet constraints and defaults anywhere.
*/
StoreConstraints(new_rel_desc, tupdesc);
@@ -793,8 +791,8 @@ heap_create_with_catalog(const char *relname,
register_on_commit_action(relid, oncommit);
/*
- * ok, the relation has been cataloged, so close our relations and
- * return the OID of the newly created relation.
+ * ok, the relation has been cataloged, so close our relations and return
+ * the OID of the newly created relation.
*/
heap_close(new_rel_desc, NoLock); /* do not unlock till end of xact */
heap_close(pg_class_desc, RowExclusiveLock);
@@ -923,11 +921,11 @@ RemoveAttributeById(Oid relid, AttrNumber attnum)
char newattname[NAMEDATALEN];
/*
- * Grab an exclusive lock on the target table, which we will NOT
- * release until end of transaction. (In the simple case where we are
- * directly dropping this column, AlterTableDropColumn already did
- * this ... but when cascading from a drop of some other object, we
- * may not have any lock.)
+ * Grab an exclusive lock on the target table, which we will NOT release
+ * until end of transaction. (In the simple case where we are directly
+ * dropping this column, AlterTableDropColumn already did this ... but
+ * when cascading from a drop of some other object, we may not have any
+ * lock.)
*/
rel = relation_open(relid, AccessExclusiveLock);
@@ -957,12 +955,12 @@ RemoveAttributeById(Oid relid, AttrNumber attnum)
/*
* Set the type OID to invalid. A dropped attribute's type link
- * cannot be relied on (once the attribute is dropped, the type
- * might be too). Fortunately we do not need the type row --- the
- * only really essential information is the type's typlen and
- * typalign, which are preserved in the attribute's attlen and
- * attalign. We set atttypid to zero here as a means of catching
- * code that incorrectly expects it to be valid.
+ * cannot be relied on (once the attribute is dropped, the type might
+ * be too). Fortunately we do not need the type row --- the only
+ * really essential information is the type's typlen and typalign,
+ * which are preserved in the attribute's attlen and attalign. We set
+ * atttypid to zero here as a means of catching code that incorrectly
+ * expects it to be valid.
*/
attStruct->atttypid = InvalidOid;
@@ -973,8 +971,7 @@ RemoveAttributeById(Oid relid, AttrNumber attnum)
attStruct->attstattarget = 0;
/*
- * Change the column name to something that isn't likely to
- * conflict
+ * Change the column name to something that isn't likely to conflict
*/
snprintf(newattname, sizeof(newattname),
"........pg.dropped.%d........", attnum);
@@ -987,9 +984,9 @@ RemoveAttributeById(Oid relid, AttrNumber attnum)
}
/*
- * Because updating the pg_attribute row will trigger a relcache flush
- * for the target relation, we need not do anything else to notify
- * other backends of the change.
+ * Because updating the pg_attribute row will trigger a relcache flush for
+ * the target relation, we need not do anything else to notify other
+ * backends of the change.
*/
heap_close(attr_rel, RowExclusiveLock);
@@ -1118,8 +1115,8 @@ RemoveAttrDefaultById(Oid attrdefId)
CatalogUpdateIndexes(attr_rel, tuple);
/*
- * Our update of the pg_attribute row will force a relcache rebuild,
- * so there's nothing else to do here.
+ * Our update of the pg_attribute row will force a relcache rebuild, so
+ * there's nothing else to do here.
*/
heap_close(attr_rel, RowExclusiveLock);
@@ -1157,9 +1154,9 @@ heap_drop_with_catalog(Oid relid)
}
/*
- * Close relcache entry, but *keep* AccessExclusiveLock on the
- * relation until transaction commit. This ensures no one else will
- * try to do something with the doomed relation.
+ * Close relcache entry, but *keep* AccessExclusiveLock on the relation
+ * until transaction commit. This ensures no one else will try to do
+ * something with the doomed relation.
*/
relation_close(rel, NoLock);
@@ -1170,10 +1167,10 @@ heap_drop_with_catalog(Oid relid)
/*
* Flush the relation from the relcache. We want to do this before
- * starting to remove catalog entries, just to be certain that no
- * relcache entry rebuild will happen partway through. (That should
- * not really matter, since we don't do CommandCounterIncrement here,
- * but let's be safe.)
+ * starting to remove catalog entries, just to be certain that no relcache
+ * entry rebuild will happen partway through. (That should not really
+ * matter, since we don't do CommandCounterIncrement here, but let's be
+ * safe.)
*/
RelationForgetRelation(relid);
@@ -1228,8 +1225,8 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin)
* deparse it
*/
adsrc = deparse_expression(expr,
- deparse_context_for(RelationGetRelationName(rel),
- RelationGetRelid(rel)),
+ deparse_context_for(RelationGetRelationName(rel),
+ RelationGetRelid(rel)),
false, false);
/*
@@ -1238,9 +1235,9 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin)
values[Anum_pg_attrdef_adrelid - 1] = RelationGetRelid(rel);
values[Anum_pg_attrdef_adnum - 1] = attnum;
values[Anum_pg_attrdef_adbin - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(adbin));
+ CStringGetDatum(adbin));
values[Anum_pg_attrdef_adsrc - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(adsrc));
+ CStringGetDatum(adsrc));
adrel = heap_open(AttrDefaultRelationId, RowExclusiveLock);
@@ -1285,8 +1282,8 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin)
heap_freetuple(atttup);
/*
- * Make a dependency so that the pg_attrdef entry goes away if the
- * column (or whole table) is deleted.
+ * Make a dependency so that the pg_attrdef entry goes away if the column
+ * (or whole table) is deleted.
*/
colobject.classId = RelationRelationId;
colobject.objectId = RelationGetRelid(rel);
@@ -1325,16 +1322,15 @@ StoreRelCheck(Relation rel, char *ccname, char *ccbin)
* deparse it
*/
ccsrc = deparse_expression(expr,
- deparse_context_for(RelationGetRelationName(rel),
- RelationGetRelid(rel)),
+ deparse_context_for(RelationGetRelationName(rel),
+ RelationGetRelid(rel)),
false, false);
/*
* Find columns of rel that are used in ccbin
*
- * NB: pull_var_clause is okay here only because we don't allow
- * subselects in check constraints; it would fail to examine the
- * contents of subselects.
+ * NB: pull_var_clause is okay here only because we don't allow subselects in
+ * check constraints; it would fail to examine the contents of subselects.
*/
varList = pull_var_clause(expr, false);
keycount = list_length(varList);
@@ -1405,10 +1401,9 @@ StoreConstraints(Relation rel, TupleDesc tupdesc)
return; /* nothing to do */
/*
- * Deparsing of constraint expressions will fail unless the
- * just-created pg_attribute tuples for this relation are made
- * visible. So, bump the command counter. CAUTION: this will cause a
- * relcache entry rebuild.
+ * Deparsing of constraint expressions will fail unless the just-created
+ * pg_attribute tuples for this relation are made visible. So, bump the
+ * command counter. CAUTION: this will cause a relcache entry rebuild.
*/
CommandCounterIncrement();
@@ -1483,8 +1478,8 @@ AddRelationRawConstraints(Relation rel,
}
/*
- * Create a dummy ParseState and insert the target relation as its
- * sole rangetable entry. We need a ParseState for transformExpr.
+ * Create a dummy ParseState and insert the target relation as its sole
+ * rangetable entry. We need a ParseState for transformExpr.
*/
pstate = make_parsestate(NULL);
rte = addRangeTableEntryForRelation(pstate,
@@ -1546,8 +1541,8 @@ AddRelationRawConstraints(Relation rel,
if (list_length(pstate->p_rtable) != 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("only table \"%s\" can be referenced in check constraint",
- RelationGetRelationName(rel))));
+ errmsg("only table \"%s\" can be referenced in check constraint",
+ RelationGetRelationName(rel))));
/*
* No subplans or aggregates, either...
@@ -1559,7 +1554,7 @@ AddRelationRawConstraints(Relation rel,
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in check constraint")));
+ errmsg("cannot use aggregate function in check constraint")));
/*
* Check name uniqueness, or generate a name if none was given.
@@ -1576,8 +1571,8 @@ AddRelationRawConstraints(Relation rel,
ccname))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("constraint \"%s\" for relation \"%s\" already exists",
- ccname, RelationGetRelationName(rel))));
+ errmsg("constraint \"%s\" for relation \"%s\" already exists",
+ ccname, RelationGetRelationName(rel))));
/* Check against other new constraints */
/* Needed because we don't do CommandCounterIncrement in loop */
foreach(cell2, checknames)
@@ -1585,20 +1580,19 @@ AddRelationRawConstraints(Relation rel,
if (strcmp((char *) lfirst(cell2), ccname) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("check constraint \"%s\" already exists",
- ccname)));
+ errmsg("check constraint \"%s\" already exists",
+ ccname)));
}
}
else
{
/*
- * When generating a name, we want to create "tab_col_check"
- * for a column constraint and "tab_check" for a table
- * constraint. We no longer have any info about the syntactic
- * positioning of the constraint phrase, so we approximate
- * this by seeing whether the expression references more than
- * one column. (If the user played by the rules, the result
- * is the same...)
+ * When generating a name, we want to create "tab_col_check" for a
+ * column constraint and "tab_check" for a table constraint. We
+ * no longer have any info about the syntactic positioning of the
+ * constraint phrase, so we approximate this by seeing whether the
+ * expression references more than one column. (If the user
+ * played by the rules, the result is the same...)
*
* Note: pull_var_clause() doesn't descend into sublinks, but we
* eliminated those above; and anyway this only needs to be an
@@ -1644,11 +1638,11 @@ AddRelationRawConstraints(Relation rel,
}
/*
- * Update the count of constraints in the relation's pg_class tuple.
- * We do this even if there was no change, in order to ensure that an
- * SI update message is sent out for the pg_class tuple, which will
- * force other backends to rebuild their relcache entries for the rel.
- * (This is critical if we added defaults but not constraints.)
+ * Update the count of constraints in the relation's pg_class tuple. We do
+ * this even if there was no change, in order to ensure that an SI update
+ * message is sent out for the pg_class tuple, which will force other
+ * backends to rebuild their relcache entries for the rel. (This is
+ * critical if we added defaults but not constraints.)
*/
SetRelationNumChecks(rel, numchecks);
@@ -1734,7 +1728,7 @@ cookDefault(ParseState *pstate,
if (contain_var_clause(expr))
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("cannot use column references in default expression")));
+ errmsg("cannot use column references in default expression")));
/*
* It can't return a set either.
@@ -1754,12 +1748,12 @@ cookDefault(ParseState *pstate,
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in default expression")));
+ errmsg("cannot use aggregate function in default expression")));
/*
- * Coerce the expression to the correct type and typmod, if given.
- * This should match the parser's processing of non-defaulted
- * expressions --- see updateTargetListEntry().
+ * Coerce the expression to the correct type and typmod, if given. This
+ * should match the parser's processing of non-defaulted expressions ---
+ * see updateTargetListEntry().
*/
if (OidIsValid(atttypid))
{
@@ -1777,7 +1771,7 @@ cookDefault(ParseState *pstate,
attname,
format_type_be(atttypid),
format_type_be(type_id)),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
}
return expr;
@@ -1930,9 +1924,9 @@ RelationTruncateIndexes(Oid heapId)
index_build(heapRelation, currentIndex, indexInfo);
/*
- * index_build will close both the heap and index relations (but
- * not give up the locks we hold on them). We're done with this
- * index, but we must re-open the heap rel.
+ * index_build will close both the heap and index relations (but not
+ * give up the locks we hold on them). We're done with this index,
+ * but we must re-open the heap rel.
*/
heapRelation = heap_open(heapId, NoLock);
}
@@ -1947,7 +1941,7 @@ RelationTruncateIndexes(Oid heapId)
* This routine deletes all data within all the specified relations.
*
* This is not transaction-safe! There is another, transaction-safe
- * implementation in commands/tablecmds.c. We now use this only for
+ * implementation in commands/tablecmds.c. We now use this only for
* ON COMMIT truncation of temporary tables, where it doesn't matter.
*/
void
@@ -2039,8 +2033,8 @@ heap_truncate_check_FKs(List *relations, bool tempTables)
return;
/*
- * Otherwise, must scan pg_constraint. Right now, it is a seqscan
- * because there is no available index on confrelid.
+ * Otherwise, must scan pg_constraint. Right now, it is a seqscan because
+ * there is no available index on confrelid.
*/
fkeyRel = heap_open(ConstraintRelationId, AccessShareLock);
@@ -2056,16 +2050,16 @@ heap_truncate_check_FKs(List *relations, bool tempTables)
continue;
/* Not for one of our list of tables */
- if (! list_member_oid(oids, con->confrelid))
+ if (!list_member_oid(oids, con->confrelid))
continue;
/* The referencer should be in our list too */
- if (! list_member_oid(oids, con->conrelid))
+ if (!list_member_oid(oids, con->conrelid))
{
if (tempTables)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("unsupported ON COMMIT and foreign key combination"),
+ errmsg("unsupported ON COMMIT and foreign key combination"),
errdetail("Table \"%s\" references \"%s\" via foreign key constraint \"%s\", but they do not have the same ON COMMIT setting.",
get_rel_name(con->conrelid),
get_rel_name(con->confrelid),
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 3d543fa06c6..a25f34b85e0 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.260 2005/08/26 03:07:12 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.261 2005/10/15 02:49:12 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -91,9 +91,9 @@ ConstructTupleDescriptor(Relation heapRelation,
indexTupDesc = CreateTemplateTupleDesc(numatts, false);
/*
- * For simple index columns, we copy the pg_attribute row from the
- * parent relation and modify it as necessary. For expressions we
- * have to cons up a pg_attribute row the hard way.
+ * For simple index columns, we copy the pg_attribute row from the parent
+ * relation and modify it as necessary. For expressions we have to cons
+ * up a pg_attribute row the hard way.
*/
for (i = 0; i < numatts; i++)
{
@@ -114,7 +114,7 @@ ConstructTupleDescriptor(Relation heapRelation,
* here we are indexing on a system attribute (-1...-n)
*/
from = SystemAttributeDefinition(atnum,
- heapRelation->rd_rel->relhasoids);
+ heapRelation->rd_rel->relhasoids);
}
else
{
@@ -127,8 +127,8 @@ ConstructTupleDescriptor(Relation heapRelation,
}
/*
- * now that we've determined the "from", let's copy the tuple
- * desc data...
+ * now that we've determined the "from", let's copy the tuple desc
+ * data...
*/
memcpy(to, from, ATTRIBUTE_TUPLE_SIZE);
@@ -158,14 +158,13 @@ ConstructTupleDescriptor(Relation heapRelation,
indexpr_item = lnext(indexpr_item);
/*
- * Make the attribute's name "pg_expresssion_nnn" (maybe think
- * of something better later)
+ * Make the attribute's name "pg_expresssion_nnn" (maybe think of
+ * something better later)
*/
sprintf(NameStr(to->attname), "pg_expression_%d", i + 1);
/*
- * Lookup the expression type in pg_type for the type length
- * etc.
+ * Lookup the expression type in pg_type for the type length etc.
*/
keyType = exprType(indexkey);
tuple = SearchSysCache(TYPEOID,
@@ -193,15 +192,15 @@ ConstructTupleDescriptor(Relation heapRelation,
}
/*
- * We do not yet have the correct relation OID for the index, so
- * just set it invalid for now. InitializeAttributeOids() will
- * fix it later.
+ * We do not yet have the correct relation OID for the index, so just
+ * set it invalid for now. InitializeAttributeOids() will fix it
+ * later.
*/
to->attrelid = InvalidOid;
/*
- * Check the opclass to see if it provides a keytype (overriding
- * the attribute type).
+ * Check the opclass to see if it provides a keytype (overriding the
+ * attribute type).
*/
tuple = SearchSysCache(CLAOID,
ObjectIdGetDatum(classObjectId[i]),
@@ -311,8 +310,8 @@ AppendAttributeTuples(Relation indexRelation, int numatts)
for (i = 0; i < numatts; i++)
{
/*
- * There used to be very grotty code here to set these fields, but
- * I think it's unnecessary. They should be set already.
+ * There used to be very grotty code here to set these fields, but I
+ * think it's unnecessary. They should be set already.
*/
Assert(indexTupDesc->attrs[i]->attnum == i + 1);
Assert(indexTupDesc->attrs[i]->attcacheoff == -1);
@@ -380,8 +379,8 @@ UpdateIndexRelation(Oid indexoid,
exprsDatum = (Datum) 0;
/*
- * Convert the index predicate (if any) to a text datum. Note we
- * convert implicit-AND format to normal explicit-AND for storage.
+ * Convert the index predicate (if any) to a text datum. Note we convert
+ * implicit-AND format to normal explicit-AND for storage.
*/
if (indexInfo->ii_Predicate != NIL)
{
@@ -442,7 +441,7 @@ UpdateIndexRelation(Oid indexoid,
* index_create
*
* indexRelationId is normally InvalidOid to let this routine
- * generate an OID for the index. During bootstrap it may be
+ * generate an OID for the index. During bootstrap it may be
* nonzero to specify a preselected OID.
*
* Returns OID of the created index.
@@ -500,15 +499,14 @@ index_create(Oid heapRelationId,
* We cannot allow indexing a shared relation after initdb (because
* there's no way to make the entry in other databases' pg_class).
* Unfortunately we can't distinguish initdb from a manually started
- * standalone backend (toasting of shared rels happens after the
- * bootstrap phase, so checking IsBootstrapProcessingMode() won't
- * work). However, we can at least prevent this mistake under normal
- * multi-user operation.
+ * standalone backend (toasting of shared rels happens after the bootstrap
+ * phase, so checking IsBootstrapProcessingMode() won't work). However,
+ * we can at least prevent this mistake under normal multi-user operation.
*/
if (shared_relation && IsUnderPostmaster)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("shared indexes cannot be created after initdb")));
+ errmsg("shared indexes cannot be created after initdb")));
if (get_relname_relid(indexRelationName, namespaceId))
ereport(ERROR,
@@ -526,17 +524,17 @@ index_create(Oid heapRelationId,
/*
* Allocate an OID for the index, unless we were told what to use.
*
- * The OID will be the relfilenode as well, so make sure it doesn't
- * collide with either pg_class OIDs or existing physical files.
+ * The OID will be the relfilenode as well, so make sure it doesn't collide
+ * with either pg_class OIDs or existing physical files.
*/
if (!OidIsValid(indexRelationId))
indexRelationId = GetNewRelFileNode(tableSpaceId, shared_relation,
pg_class);
/*
- * create the index relation's relcache entry and physical disk file.
- * (If we fail further down, it's the smgr's responsibility to remove
- * the disk file again.)
+ * create the index relation's relcache entry and physical disk file. (If
+ * we fail further down, it's the smgr's responsibility to remove the disk
+ * file again.)
*/
indexRelation = heap_create(indexRelationName,
namespaceId,
@@ -557,8 +555,8 @@ index_create(Oid heapRelationId,
LockRelation(indexRelation, AccessExclusiveLock);
/*
- * Fill in fields of the index's pg_class entry that are not set
- * correctly by heap_create.
+ * Fill in fields of the index's pg_class entry that are not set correctly
+ * by heap_create.
*
* XXX should have a cleaner way to create cataloged indexes
*/
@@ -602,16 +600,16 @@ index_create(Oid heapRelationId,
/*
* Register constraint and dependencies for the index.
*
- * If the index is from a CONSTRAINT clause, construct a pg_constraint
- * entry. The index is then linked to the constraint, which in turn
- * is linked to the table. If it's not a CONSTRAINT, make the
- * dependency directly on the table.
+ * If the index is from a CONSTRAINT clause, construct a pg_constraint entry.
+ * The index is then linked to the constraint, which in turn is linked to
+ * the table. If it's not a CONSTRAINT, make the dependency directly on
+ * the table.
*
* We don't need a dependency on the namespace, because there'll be an
* indirect dependency via our parent table.
*
- * During bootstrap we can't register any dependencies, and we don't try
- * to make a constraint either.
+ * During bootstrap we can't register any dependencies, and we don't try to
+ * make a constraint either.
*/
if (!IsBootstrapProcessingMode())
{
@@ -697,7 +695,7 @@ index_create(Oid heapRelationId,
if (indexInfo->ii_Expressions)
{
recordDependencyOnSingleRelExpr(&myself,
- (Node *) indexInfo->ii_Expressions,
+ (Node *) indexInfo->ii_Expressions,
heapRelationId,
DEPENDENCY_NORMAL,
DEPENDENCY_AUTO);
@@ -707,7 +705,7 @@ index_create(Oid heapRelationId,
if (indexInfo->ii_Predicate)
{
recordDependencyOnSingleRelExpr(&myself,
- (Node *) indexInfo->ii_Predicate,
+ (Node *) indexInfo->ii_Predicate,
heapRelationId,
DEPENDENCY_NORMAL,
DEPENDENCY_AUTO);
@@ -721,10 +719,10 @@ index_create(Oid heapRelationId,
CommandCounterIncrement();
/*
- * In bootstrap mode, we have to fill in the index strategy structure
- * with information from the catalogs. If we aren't bootstrapping,
- * then the relcache entry has already been rebuilt thanks to sinval
- * update during CommandCounterIncrement.
+ * In bootstrap mode, we have to fill in the index strategy structure with
+ * information from the catalogs. If we aren't bootstrapping, then the
+ * relcache entry has already been rebuilt thanks to sinval update during
+ * CommandCounterIncrement.
*/
if (IsBootstrapProcessingMode())
RelationInitIndexAccessInfo(indexRelation);
@@ -732,17 +730,16 @@ index_create(Oid heapRelationId,
Assert(indexRelation->rd_indexcxt != NULL);
/*
- * If this is bootstrap (initdb) time, then we don't actually fill in
- * the index yet. We'll be creating more indexes and classes later,
- * so we delay filling them in until just before we're done with
- * bootstrapping. Similarly, if the caller specified skip_build then
- * filling the index is delayed till later (ALTER TABLE can save work
- * in some cases with this). Otherwise, we call the AM routine that
- * constructs the index.
+ * If this is bootstrap (initdb) time, then we don't actually fill in the
+ * index yet. We'll be creating more indexes and classes later, so we
+ * delay filling them in until just before we're done with bootstrapping.
+ * Similarly, if the caller specified skip_build then filling the index is
+ * delayed till later (ALTER TABLE can save work in some cases with this).
+ * Otherwise, we call the AM routine that constructs the index.
*
- * In normal processing mode, the heap and index relations are closed,
- * but we continue to hold the ShareLock on the heap and the exclusive
- * lock on the index that we acquired above, until end of transaction.
+ * In normal processing mode, the heap and index relations are closed, but we
+ * continue to hold the ShareLock on the heap and the exclusive lock on
+ * the index that we acquired above, until end of transaction.
*/
if (IsBootstrapProcessingMode())
{
@@ -784,13 +781,12 @@ index_drop(Oid indexId)
* To drop an index safely, we must grab exclusive lock on its parent
* table; otherwise there could be other backends using the index!
* Exclusive lock on the index alone is insufficient because another
- * backend might be in the midst of devising a query plan that will
- * use the index. The parser and planner take care to hold an
- * appropriate lock on the parent table while working, but having them
- * hold locks on all the indexes too seems overly expensive. We do grab
- * exclusive lock on the index too, just to be safe. Both locks must
- * be held till end of transaction, else other backends will still see
- * this index in pg_index.
+ * backend might be in the midst of devising a query plan that will use
+ * the index. The parser and planner take care to hold an appropriate
+ * lock on the parent table while working, but having them hold locks on
+ * all the indexes too seems overly expensive. We do grab exclusive lock
+ * on the index too, just to be safe. Both locks must be held till end of
+ * transaction, else other backends will still see this index in pg_index.
*/
heapId = IndexGetRelation(indexId);
userHeapRelation = heap_open(heapId, AccessExclusiveLock);
@@ -806,9 +802,9 @@ index_drop(Oid indexId)
userIndexRelation->rd_istemp);
/*
- * Close and flush the index's relcache entry, to ensure relcache
- * doesn't try to rebuild it while we're deleting catalog entries. We
- * keep the lock though.
+ * Close and flush the index's relcache entry, to ensure relcache doesn't
+ * try to rebuild it while we're deleting catalog entries. We keep the
+ * lock though.
*/
index_close(userIndexRelation);
@@ -833,8 +829,8 @@ index_drop(Oid indexId)
heap_close(indexRelation, RowExclusiveLock);
/*
- * if it has any expression columns, we might have stored statistics
- * about them.
+ * if it has any expression columns, we might have stored statistics about
+ * them.
*/
if (hasexprs)
RemoveStatistics(indexId, 0);
@@ -850,12 +846,11 @@ index_drop(Oid indexId)
DeleteRelationTuple(indexId);
/*
- * We are presently too lazy to attempt to compute the new correct
- * value of relhasindex (the next VACUUM will fix it if necessary). So
- * there is no need to update the pg_class tuple for the owning
- * relation. But we must send out a shared-cache-inval notice on the
- * owning relation to ensure other backends update their relcache
- * lists of indexes.
+ * We are presently too lazy to attempt to compute the new correct value
+ * of relhasindex (the next VACUUM will fix it if necessary). So there is
+ * no need to update the pg_class tuple for the owning relation. But we
+ * must send out a shared-cache-inval notice on the owning relation to
+ * ensure other backends update their relcache lists of indexes.
*/
CacheInvalidateRelcache(userHeapRelation);
@@ -926,7 +921,7 @@ BuildIndexInfo(Relation index)
* context must point to the heap tuple passed in.
*
* Notice we don't actually call index_form_tuple() here; we just prepare
- * its input arrays values[] and isnull[]. This is because the index AM
+ * its input arrays values[] and isnull[]. This is because the index AM
* may wish to alter the data before storage.
* ----------------
*/
@@ -974,7 +969,7 @@ FormIndexDatum(IndexInfo *indexInfo,
if (indexpr_item == NULL)
elog(ERROR, "wrong number of index expressions");
iDatum = ExecEvalExprSwitchContext((ExprState *) lfirst(indexpr_item),
- GetPerTupleExprContext(estate),
+ GetPerTupleExprContext(estate),
&isNull,
NULL);
indexpr_item = lnext(indexpr_item);
@@ -1013,9 +1008,9 @@ setRelhasindex(Oid relid, bool hasindex, bool isprimary, Oid reltoastidxid)
HeapScanDesc pg_class_scan = NULL;
/*
- * Find the tuple to update in pg_class. In bootstrap mode we can't
- * use heap_update, so cheat and overwrite the tuple in-place. In
- * normal processing, make a copy to scribble on.
+ * Find the tuple to update in pg_class. In bootstrap mode we can't use
+ * heap_update, so cheat and overwrite the tuple in-place. In normal
+ * processing, make a copy to scribble on.
*/
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
@@ -1135,7 +1130,7 @@ setNewRelfilenode(Relation relation)
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
- ObjectIdGetDatum(RelationGetRelid(relation)),
+ ObjectIdGetDatum(RelationGetRelid(relation)),
0, 0, 0);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "could not find tuple for relation %u",
@@ -1178,7 +1173,7 @@ setNewRelfilenode(Relation relation)
* advantage of the opportunity to update pg_class to ensure that the
* planner takes advantage of the index we just created. But, only
* update statistics during normal index definitions, not for indices
- * on system catalogs created during bootstrap processing. We must
+ * on system catalogs created during bootstrap processing. We must
* close the relations before updating statistics to guarantee that
* the relcache entries are flushed when we increment the command
* counter in UpdateStats(). But we do not release any locks on the
@@ -1188,8 +1183,8 @@ void
IndexCloseAndUpdateStats(Relation heap, double heapTuples,
Relation index, double indexTuples)
{
- Oid hrelid = RelationGetRelid(heap);
- Oid irelid = RelationGetRelid(index);
+ Oid hrelid = RelationGetRelid(heap);
+ Oid irelid = RelationGetRelid(index);
if (!IsNormalProcessingMode())
return;
@@ -1222,9 +1217,9 @@ UpdateStats(Oid relid, double reltuples)
/*
* This routine handles updates for both the heap and index relation
- * statistics. In order to guarantee that we're able to *see* the
- * index relation tuple, we bump the command counter id here. The
- * index relation tuple was created in the current transaction.
+ * statistics. In order to guarantee that we're able to *see* the index
+ * relation tuple, we bump the command counter id here. The index
+ * relation tuple was created in the current transaction.
*/
CommandCounterIncrement();
@@ -1232,8 +1227,8 @@ UpdateStats(Oid relid, double reltuples)
* CommandCounterIncrement() flushes invalid cache entries, including
* those for the heap and index relations for which we're updating
* statistics. Now that the cache is flushed, it's safe to open the
- * relation again. We need the relation open in order to figure out
- * how many blocks it contains.
+ * relation again. We need the relation open in order to figure out how
+ * many blocks it contains.
*/
/*
@@ -1242,17 +1237,17 @@ UpdateStats(Oid relid, double reltuples)
whichRel = relation_open(relid, ShareLock);
/*
- * Find the tuple to update in pg_class. Normally we make a copy of
- * the tuple using the syscache, modify it, and apply heap_update. But
- * in bootstrap mode we can't use heap_update, so we cheat and
- * overwrite the tuple in-place. (Note: as of PG 8.0 this isn't called
- * during bootstrap, but leave the code here for possible future use.)
+ * Find the tuple to update in pg_class. Normally we make a copy of the
+ * tuple using the syscache, modify it, and apply heap_update. But in
+ * bootstrap mode we can't use heap_update, so we cheat and overwrite the
+ * tuple in-place. (Note: as of PG 8.0 this isn't called during
+ * bootstrap, but leave the code here for possible future use.)
*
- * We also must cheat if reindexing pg_class itself, because the target
- * index may presently not be part of the set of indexes that
- * CatalogUpdateIndexes would update (see reindex_relation). In this
- * case the stats updates will not be WAL-logged and so could be lost
- * in a crash. This seems OK considering VACUUM does the same thing.
+ * We also must cheat if reindexing pg_class itself, because the target index
+ * may presently not be part of the set of indexes that
+ * CatalogUpdateIndexes would update (see reindex_relation). In this case
+ * the stats updates will not be WAL-logged and so could be lost in a
+ * crash. This seems OK considering VACUUM does the same thing.
*/
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
@@ -1284,9 +1279,9 @@ UpdateStats(Oid relid, double reltuples)
/*
* Update statistics in pg_class, if they changed. (Avoiding an
- * unnecessary update is not just a tiny performance improvement; it
- * also reduces the window wherein concurrent CREATE INDEX commands
- * may conflict.)
+ * unnecessary update is not just a tiny performance improvement; it also
+ * reduces the window wherein concurrent CREATE INDEX commands may
+ * conflict.)
*/
relpages = RelationGetNumberOfBlocks(whichRel);
@@ -1320,10 +1315,10 @@ UpdateStats(Oid relid, double reltuples)
heap_freetuple(tuple);
/*
- * We shouldn't have to do this, but we do... Modify the reldesc in
- * place with the new values so that the cache contains the latest
- * copy. (XXX is this really still necessary? The relcache will get
- * fixed at next CommandCounterIncrement, so why bother here?)
+ * We shouldn't have to do this, but we do... Modify the reldesc in place
+ * with the new values so that the cache contains the latest copy. (XXX
+ * is this really still necessary? The relcache will get fixed at next
+ * CommandCounterIncrement, so why bother here?)
*/
whichRel->rd_rel->relpages = (int32) relpages;
whichRel->rd_rel->reltuples = (float4) reltuples;
@@ -1405,8 +1400,8 @@ IndexBuildHeapScan(Relation heapRelation,
Assert(OidIsValid(indexRelation->rd_rel->relam));
/*
- * Need an EState for evaluation of index expressions and
- * partial-index predicates. Also a slot to hold the current tuple.
+ * Need an EState for evaluation of index expressions and partial-index
+ * predicates. Also a slot to hold the current tuple.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
@@ -1421,9 +1416,8 @@ IndexBuildHeapScan(Relation heapRelation,
estate);
/*
- * Ok, begin our scan of the base relation. We use SnapshotAny
- * because we must retrieve all tuples and do our own time qual
- * checks.
+ * Ok, begin our scan of the base relation. We use SnapshotAny because we
+ * must retrieve all tuples and do our own time qual checks.
*/
if (IsBootstrapProcessingMode())
{
@@ -1487,16 +1481,16 @@ IndexBuildHeapScan(Relation heapRelation,
case HEAPTUPLE_INSERT_IN_PROGRESS:
/*
- * Since caller should hold ShareLock or better, we
- * should not see any tuples inserted by open
- * transactions --- unless it's our own transaction.
- * (Consider INSERT followed by CREATE INDEX within a
- * transaction.) An exception occurs when reindexing
- * a system catalog, because we often release lock on
- * system catalogs before committing.
+ * Since caller should hold ShareLock or better, we should
+ * not see any tuples inserted by open transactions ---
+ * unless it's our own transaction. (Consider INSERT
+ * followed by CREATE INDEX within a transaction.) An
+ * exception occurs when reindexing a system catalog,
+ * because we often release lock on system catalogs before
+ * committing.
*/
if (!TransactionIdIsCurrentTransactionId(
- HeapTupleHeaderGetXmin(heapTuple->t_data))
+ HeapTupleHeaderGetXmin(heapTuple->t_data))
&& !IsSystemRelation(heapRelation))
elog(ERROR, "concurrent insert in progress");
indexIt = true;
@@ -1505,17 +1499,17 @@ IndexBuildHeapScan(Relation heapRelation,
case HEAPTUPLE_DELETE_IN_PROGRESS:
/*
- * Since caller should hold ShareLock or better, we
- * should not see any tuples deleted by open
- * transactions --- unless it's our own transaction.
- * (Consider DELETE followed by CREATE INDEX within a
- * transaction.) An exception occurs when reindexing
- * a system catalog, because we often release lock on
- * system catalogs before committing.
+ * Since caller should hold ShareLock or better, we should
+ * not see any tuples deleted by open transactions ---
+ * unless it's our own transaction. (Consider DELETE
+ * followed by CREATE INDEX within a transaction.) An
+ * exception occurs when reindexing a system catalog,
+ * because we often release lock on system catalogs before
+ * committing.
*/
Assert(!(heapTuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI));
if (!TransactionIdIsCurrentTransactionId(
- HeapTupleHeaderGetXmax(heapTuple->t_data))
+ HeapTupleHeaderGetXmax(heapTuple->t_data))
&& !IsSystemRelation(heapRelation))
elog(ERROR, "concurrent delete in progress");
indexIt = true;
@@ -1547,9 +1541,8 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* In a partial index, discard tuples that don't satisfy the
- * predicate. We can also discard recently-dead tuples, since
- * VACUUM doesn't complain about tuple count mismatch for partial
- * indexes.
+ * predicate. We can also discard recently-dead tuples, since VACUUM
+ * doesn't complain about tuple count mismatch for partial indexes.
*/
if (predicate != NIL)
{
@@ -1560,9 +1553,9 @@ IndexBuildHeapScan(Relation heapRelation,
}
/*
- * For the current heap tuple, extract all the attributes we use
- * in this index, and note which are null. This also performs
- * evaluation of any expressions needed.
+ * For the current heap tuple, extract all the attributes we use in
+ * this index, and note which are null. This also performs evaluation
+ * of any expressions needed.
*/
FormIndexDatum(indexInfo,
slot,
@@ -1571,9 +1564,9 @@ IndexBuildHeapScan(Relation heapRelation,
isnull);
/*
- * You'd think we should go ahead and build the index tuple here,
- * but some index AMs want to do further processing on the data
- * first. So pass the values[] and isnull[] arrays, instead.
+ * You'd think we should go ahead and build the index tuple here, but
+ * some index AMs want to do further processing on the data first. So
+ * pass the values[] and isnull[] arrays, instead.
*/
/* Call the AM's callback routine to process the tuple */
@@ -1631,27 +1624,27 @@ reindex_index(Oid indexId)
bool inplace;
/*
- * Open and lock the parent heap relation. ShareLock is sufficient
- * since we only need to be sure no schema or data changes are going on.
+ * Open and lock the parent heap relation. ShareLock is sufficient since
+ * we only need to be sure no schema or data changes are going on.
*/
heapId = IndexGetRelation(indexId);
heapRelation = heap_open(heapId, ShareLock);
/*
- * Open the target index relation and get an exclusive lock on it,
- * to ensure that no one else is touching this particular index.
+ * Open the target index relation and get an exclusive lock on it, to
+ * ensure that no one else is touching this particular index.
*/
iRel = index_open(indexId);
LockRelation(iRel, AccessExclusiveLock);
/*
- * If it's a shared index, we must do inplace processing (because we
- * have no way to update relfilenode in other databases). Otherwise
- * we can do it the normal transaction-safe way.
+ * If it's a shared index, we must do inplace processing (because we have
+ * no way to update relfilenode in other databases). Otherwise we can do
+ * it the normal transaction-safe way.
*
* Since inplace processing isn't crash-safe, we only allow it in a
- * standalone backend. (In the REINDEX TABLE and REINDEX DATABASE
- * cases, the caller should have detected this.)
+ * standalone backend. (In the REINDEX TABLE and REINDEX DATABASE cases,
+ * the caller should have detected this.)
*/
inplace = iRel->rd_rel->relisshared;
@@ -1688,8 +1681,8 @@ reindex_index(Oid indexId)
index_build(heapRelation, iRel, indexInfo);
/*
- * index_build will close both the heap and index relations (but
- * not give up the locks we hold on them). So we're done.
+ * index_build will close both the heap and index relations (but not
+ * give up the locks we hold on them). So we're done.
*/
}
PG_CATCH();
@@ -1721,8 +1714,8 @@ reindex_relation(Oid relid, bool toast_too)
ListCell *indexId;
/*
- * Open and lock the relation. ShareLock is sufficient since we only
- * need to prevent schema and data changes in it.
+ * Open and lock the relation. ShareLock is sufficient since we only need
+ * to prevent schema and data changes in it.
*/
rel = heap_open(relid, ShareLock);
@@ -1736,26 +1729,25 @@ reindex_relation(Oid relid, bool toast_too)
indexIds = RelationGetIndexList(rel);
/*
- * reindex_index will attempt to update the pg_class rows for the
- * relation and index. If we are processing pg_class itself, we want
- * to make sure that the updates do not try to insert index entries
- * into indexes we have not processed yet. (When we are trying to
- * recover from corrupted indexes, that could easily cause a crash.)
- * We can accomplish this because CatalogUpdateIndexes will use the
- * relcache's index list to know which indexes to update. We just
- * force the index list to be only the stuff we've processed.
+ * reindex_index will attempt to update the pg_class rows for the relation
+ * and index. If we are processing pg_class itself, we want to make sure
+ * that the updates do not try to insert index entries into indexes we
+ * have not processed yet. (When we are trying to recover from corrupted
+ * indexes, that could easily cause a crash.) We can accomplish this
+ * because CatalogUpdateIndexes will use the relcache's index list to know
+ * which indexes to update. We just force the index list to be only the
+ * stuff we've processed.
*
- * It is okay to not insert entries into the indexes we have not
- * processed yet because all of this is transaction-safe. If we fail
- * partway through, the updated rows are dead and it doesn't matter
- * whether they have index entries. Also, a new pg_class index will
- * be created with an entry for its own pg_class row because we do
- * setNewRelfilenode() before we do index_build().
+ * It is okay to not insert entries into the indexes we have not processed
+ * yet because all of this is transaction-safe. If we fail partway
+ * through, the updated rows are dead and it doesn't matter whether they
+ * have index entries. Also, a new pg_class index will be created with an
+ * entry for its own pg_class row because we do setNewRelfilenode() before
+ * we do index_build().
*
- * Note that we also clear pg_class's rd_oidindex until the loop is done,
- * so that that index can't be accessed either. This means we cannot
- * safely generate new relation OIDs while in the loop; shouldn't be a
- * problem.
+ * Note that we also clear pg_class's rd_oidindex until the loop is done, so
+ * that that index can't be accessed either. This means we cannot safely
+ * generate new relation OIDs while in the loop; shouldn't be a problem.
*/
is_pg_class = (RelationGetRelid(rel) == RelationRelationId);
doneIndexes = NIL;
@@ -1787,8 +1779,8 @@ reindex_relation(Oid relid, bool toast_too)
result = (indexIds != NIL);
/*
- * If the relation has a secondary toast rel, reindex that too while
- * we still hold the lock on the master table.
+ * If the relation has a secondary toast rel, reindex that too while we
+ * still hold the lock on the master table.
*/
if (toast_too && OidIsValid(toast_relid))
result |= reindex_relation(toast_relid, false);
diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c
index 905d99d9469..ad193dd7ffb 100644
--- a/src/backend/catalog/indexing.c
+++ b/src/backend/catalog/indexing.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/indexing.c,v 1.109 2005/03/21 01:24:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/indexing.c,v 1.110 2005/10/15 02:49:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -109,8 +109,8 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple)
Assert(indexInfo->ii_Predicate == NIL);
/*
- * FormIndexDatum fills in its values and isnull parameters with
- * the appropriate values for the column(s) of the index.
+ * FormIndexDatum fills in its values and isnull parameters with the
+ * appropriate values for the column(s) of the index.
*/
FormIndexDatum(indexInfo,
slot,
@@ -122,8 +122,8 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple)
* The index AM does the rest.
*/
index_insert(relationDescs[i], /* index relation */
- values, /* array of index Datums */
- isnull, /* is-null flags */
+ values, /* array of index Datums */
+ isnull, /* is-null flags */
&(heapTuple->t_self), /* tid of heap tuple */
heapRelation,
relationDescs[i]->rd_index->indisunique);
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index dc627e42880..0cafa9f9faf 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -13,7 +13,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.78 2005/10/06 22:43:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.79 2005/10/15 02:49:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -108,7 +108,7 @@ static bool namespaceSearchPathValid = true;
* command is first executed). Thereafter it's the OID of the temp namespace.
*
* myTempNamespaceSubID shows whether we've created the TEMP namespace in the
- * current subtransaction. The flag propagates up the subtransaction tree,
+ * current subtransaction. The flag propagates up the subtransaction tree,
* so the main transaction will correctly recognize the flag if all
* intermediate subtransactions commit. When it is InvalidSubTransactionId,
* we either haven't made the TEMP namespace yet, or have successfully
@@ -225,7 +225,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
- newRelation->catalogname, newRelation->schemaname,
+ newRelation->catalogname, newRelation->schemaname,
newRelation->relname)));
}
@@ -235,7 +235,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
if (newRelation->schemaname)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("temporary tables may not specify a schema name")));
+ errmsg("temporary tables may not specify a schema name")));
/* Initialize temp namespace if first time through */
if (!OidIsValid(myTempNamespace))
InitTempTableNamespace();
@@ -246,7 +246,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
{
/* use exact schema given */
namespaceId = GetSysCacheOid(NAMESPACENAME,
- CStringGetDatum(newRelation->schemaname),
+ CStringGetDatum(newRelation->schemaname),
0, 0, 0);
if (!OidIsValid(namespaceId))
ereport(ERROR,
@@ -322,9 +322,9 @@ RelationIsVisible(Oid relid)
recomputeNamespacePath();
/*
- * Quick check: if it ain't in the path at all, it ain't visible.
- * Items in the system namespace are surely in the path and so we
- * needn't even do list_member_oid() for them.
+ * Quick check: if it ain't in the path at all, it ain't visible. Items in
+ * the system namespace are surely in the path and so we needn't even do
+ * list_member_oid() for them.
*/
relnamespace = relform->relnamespace;
if (relnamespace != PG_CATALOG_NAMESPACE &&
@@ -333,9 +333,9 @@ RelationIsVisible(Oid relid)
else
{
/*
- * If it is in the path, it might still not be visible; it could
- * be hidden by another relation of the same name earlier in the
- * path. So we must do a slow check for conflicting relations.
+ * If it is in the path, it might still not be visible; it could be
+ * hidden by another relation of the same name earlier in the path. So
+ * we must do a slow check for conflicting relations.
*/
char *relname = NameStr(relform->relname);
ListCell *l;
@@ -420,9 +420,9 @@ TypeIsVisible(Oid typid)
recomputeNamespacePath();
/*
- * Quick check: if it ain't in the path at all, it ain't visible.
- * Items in the system namespace are surely in the path and so we
- * needn't even do list_member_oid() for them.
+ * Quick check: if it ain't in the path at all, it ain't visible. Items in
+ * the system namespace are surely in the path and so we needn't even do
+ * list_member_oid() for them.
*/
typnamespace = typform->typnamespace;
if (typnamespace != PG_CATALOG_NAMESPACE &&
@@ -431,9 +431,9 @@ TypeIsVisible(Oid typid)
else
{
/*
- * If it is in the path, it might still not be visible; it could
- * be hidden by another type of the same name earlier in the path.
- * So we must do a slow check for conflicting types.
+ * If it is in the path, it might still not be visible; it could be
+ * hidden by another type of the same name earlier in the path. So we
+ * must do a slow check for conflicting types.
*/
char *typname = NameStr(typform->typname);
ListCell *l;
@@ -545,14 +545,14 @@ FuncnameGetCandidates(List *names, int nargs)
/*
* Okay, it's in the search path, but does it have the same
- * arguments as something we already accepted? If so, keep
- * only the one that appears earlier in the search path.
+ * arguments as something we already accepted? If so, keep only
+ * the one that appears earlier in the search path.
*
* If we have an ordered list from SearchSysCacheList (the normal
- * case), then any conflicting proc must immediately adjoin
- * this one in the list, so we only need to look at the newest
- * result item. If we have an unordered list, we have to scan
- * the whole result list.
+ * case), then any conflicting proc must immediately adjoin this
+ * one in the list, so we only need to look at the newest result
+ * item. If we have an unordered list, we have to scan the whole
+ * result list.
*/
if (resultList)
{
@@ -575,9 +575,9 @@ FuncnameGetCandidates(List *names, int nargs)
prevResult = prevResult->next)
{
if (pronargs == prevResult->nargs &&
- memcmp(procform->proargtypes.values,
- prevResult->args,
- pronargs * sizeof(Oid)) == 0)
+ memcmp(procform->proargtypes.values,
+ prevResult->args,
+ pronargs * sizeof(Oid)) == 0)
break;
}
}
@@ -640,9 +640,9 @@ FunctionIsVisible(Oid funcid)
recomputeNamespacePath();
/*
- * Quick check: if it ain't in the path at all, it ain't visible.
- * Items in the system namespace are surely in the path and so we
- * needn't even do list_member_oid() for them.
+ * Quick check: if it ain't in the path at all, it ain't visible. Items in
+ * the system namespace are surely in the path and so we needn't even do
+ * list_member_oid() for them.
*/
pronamespace = procform->pronamespace;
if (pronamespace != PG_CATALOG_NAMESPACE &&
@@ -651,10 +651,10 @@ FunctionIsVisible(Oid funcid)
else
{
/*
- * If it is in the path, it might still not be visible; it could
- * be hidden by another proc of the same name and arguments
- * earlier in the path. So we must do a slow check to see if this
- * is the same proc that would be found by FuncnameGetCandidates.
+ * If it is in the path, it might still not be visible; it could be
+ * hidden by another proc of the same name and arguments earlier in
+ * the path. So we must do a slow check to see if this is the same
+ * proc that would be found by FuncnameGetCandidates.
*/
char *proname = NameStr(procform->proname);
int nargs = procform->pronargs;
@@ -733,13 +733,12 @@ OpernameGetCandidates(List *names, char oprkind)
/*
* In typical scenarios, most if not all of the operators found by the
- * catcache search will end up getting returned; and there can be
- * quite a few, for common operator names such as '=' or '+'. To
- * reduce the time spent in palloc, we allocate the result space as an
- * array large enough to hold all the operators. The original coding
- * of this routine did a separate palloc for each operator, but
- * profiling revealed that the pallocs used an unreasonably large
- * fraction of parsing time.
+ * catcache search will end up getting returned; and there can be quite a
+ * few, for common operator names such as '=' or '+'. To reduce the time
+ * spent in palloc, we allocate the result space as an array large enough
+ * to hold all the operators. The original coding of this routine did a
+ * separate palloc for each operator, but profiling revealed that the
+ * pallocs used an unreasonably large fraction of parsing time.
*/
#define SPACE_PER_OP MAXALIGN(sizeof(struct _FuncCandidateList) + sizeof(Oid))
@@ -780,14 +779,14 @@ OpernameGetCandidates(List *names, char oprkind)
/*
* Okay, it's in the search path, but does it have the same
- * arguments as something we already accepted? If so, keep
- * only the one that appears earlier in the search path.
+ * arguments as something we already accepted? If so, keep only
+ * the one that appears earlier in the search path.
*
* If we have an ordered list from SearchSysCacheList (the normal
- * case), then any conflicting oper must immediately adjoin
- * this one in the list, so we only need to look at the newest
- * result item. If we have an unordered list, we have to scan
- * the whole result list.
+ * case), then any conflicting oper must immediately adjoin this
+ * one in the list, so we only need to look at the newest result
+ * item. If we have an unordered list, we have to scan the whole
+ * result list.
*/
if (resultList)
{
@@ -870,9 +869,9 @@ OperatorIsVisible(Oid oprid)
recomputeNamespacePath();
/*
- * Quick check: if it ain't in the path at all, it ain't visible.
- * Items in the system namespace are surely in the path and so we
- * needn't even do list_member_oid() for them.
+ * Quick check: if it ain't in the path at all, it ain't visible. Items in
+ * the system namespace are surely in the path and so we needn't even do
+ * list_member_oid() for them.
*/
oprnamespace = oprform->oprnamespace;
if (oprnamespace != PG_CATALOG_NAMESPACE &&
@@ -881,11 +880,10 @@ OperatorIsVisible(Oid oprid)
else
{
/*
- * If it is in the path, it might still not be visible; it could
- * be hidden by another operator of the same name and arguments
- * earlier in the path. So we must do a slow check to see if this
- * is the same operator that would be found by
- * OpernameGetCandidates.
+ * If it is in the path, it might still not be visible; it could be
+ * hidden by another operator of the same name and arguments earlier
+ * in the path. So we must do a slow check to see if this is the same
+ * operator that would be found by OpernameGetCandidates.
*/
char *oprname = NameStr(oprform->oprname);
FuncCandidateList clist;
@@ -956,15 +954,14 @@ OpclassGetCandidates(Oid amid)
continue; /* opclass is not in search path */
/*
- * Okay, it's in the search path, but does it have the same name
- * as something we already accepted? If so, keep only the one
- * that appears earlier in the search path.
+ * Okay, it's in the search path, but does it have the same name as
+ * something we already accepted? If so, keep only the one that
+ * appears earlier in the search path.
*
- * If we have an ordered list from SearchSysCacheList (the normal
- * case), then any conflicting opclass must immediately adjoin
- * this one in the list, so we only need to look at the newest
- * result item. If we have an unordered list, we have to scan the
- * whole result list.
+ * If we have an ordered list from SearchSysCacheList (the normal case),
+ * then any conflicting opclass must immediately adjoin this one in
+ * the list, so we only need to look at the newest result item. If we
+ * have an unordered list, we have to scan the whole result list.
*/
if (resultList)
{
@@ -1083,9 +1080,9 @@ OpclassIsVisible(Oid opcid)
recomputeNamespacePath();
/*
- * Quick check: if it ain't in the path at all, it ain't visible.
- * Items in the system namespace are surely in the path and so we
- * needn't even do list_member_oid() for them.
+ * Quick check: if it ain't in the path at all, it ain't visible. Items in
+ * the system namespace are surely in the path and so we needn't even do
+ * list_member_oid() for them.
*/
opcnamespace = opcform->opcnamespace;
if (opcnamespace != PG_CATALOG_NAMESPACE &&
@@ -1094,10 +1091,10 @@ OpclassIsVisible(Oid opcid)
else
{
/*
- * If it is in the path, it might still not be visible; it could
- * be hidden by another opclass of the same name earlier in the
- * path. So we must do a slow check to see if this opclass would
- * be found by OpclassnameGetOpcid.
+ * If it is in the path, it might still not be visible; it could be
+ * hidden by another opclass of the same name earlier in the path. So
+ * we must do a slow check to see if this opclass would be found by
+ * OpclassnameGetOpcid.
*/
char *opcname = NameStr(opcform->opcname);
@@ -1164,9 +1161,9 @@ ConversionIsVisible(Oid conid)
recomputeNamespacePath();
/*
- * Quick check: if it ain't in the path at all, it ain't visible.
- * Items in the system namespace are surely in the path and so we
- * needn't even do list_member_oid() for them.
+ * Quick check: if it ain't in the path at all, it ain't visible. Items in
+ * the system namespace are surely in the path and so we needn't even do
+ * list_member_oid() for them.
*/
connamespace = conform->connamespace;
if (connamespace != PG_CATALOG_NAMESPACE &&
@@ -1175,10 +1172,10 @@ ConversionIsVisible(Oid conid)
else
{
/*
- * If it is in the path, it might still not be visible; it could
- * be hidden by another conversion of the same name earlier in the
- * path. So we must do a slow check to see if this conversion
- * would be found by ConversionGetConid.
+ * If it is in the path, it might still not be visible; it could be
+ * hidden by another conversion of the same name earlier in the path.
+ * So we must do a slow check to see if this conversion would be found
+ * by ConversionGetConid.
*/
char *conname = NameStr(conform->conname);
@@ -1226,14 +1223,14 @@ DeconstructQualifiedName(List *names,
if (strcmp(catalogname, get_database_name(MyDatabaseId)) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cross-database references are not implemented: %s",
- NameListToString(names))));
+ errmsg("cross-database references are not implemented: %s",
+ NameListToString(names))));
break;
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("improper qualified name (too many dotted names): %s",
- NameListToString(names))));
+ errmsg("improper qualified name (too many dotted names): %s",
+ NameListToString(names))));
break;
}
@@ -1373,8 +1370,8 @@ makeRangeVarFromNameList(List *names)
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("improper relation name (too many dotted names): %s",
- NameListToString(names))));
+ errmsg("improper relation name (too many dotted names): %s",
+ NameListToString(names))));
break;
}
@@ -1574,7 +1571,7 @@ FindDefaultConversionProc(int4 for_encoding, int4 to_encoding)
static void
recomputeNamespacePath(void)
{
- Oid roleid = GetUserId();
+ Oid roleid = GetUserId();
char *rawname;
List *namelist;
List *oidlist;
@@ -1602,9 +1599,9 @@ recomputeNamespacePath(void)
/*
* Convert the list of names to a list of OIDs. If any names are not
- * recognizable or we don't have read access, just leave them out of
- * the list. (We can't raise an error, since the search_path setting
- * has already been accepted.) Don't make duplicate entries, either.
+ * recognizable or we don't have read access, just leave them out of the
+ * list. (We can't raise an error, since the search_path setting has
+ * already been accepted.) Don't make duplicate entries, either.
*/
oidlist = NIL;
foreach(l, namelist)
@@ -1659,8 +1656,8 @@ recomputeNamespacePath(void)
firstNS = linitial_oid(oidlist);
/*
- * Add any implicitly-searched namespaces to the list. Note these go
- * on the front, not the back; also notice that we do not check USAGE
+ * Add any implicitly-searched namespaces to the list. Note these go on
+ * the front, not the back; also notice that we do not check USAGE
* permissions for these.
*/
if (!list_member_oid(oidlist, PG_CATALOG_NAMESPACE))
@@ -1675,8 +1672,8 @@ recomputeNamespacePath(void)
oidlist = lcons_oid(mySpecialNamespace, oidlist);
/*
- * Now that we've successfully built the new list of namespace OIDs,
- * save it in permanent storage.
+ * Now that we've successfully built the new list of namespace OIDs, save
+ * it in permanent storage.
*/
oldcxt = MemoryContextSwitchTo(TopMemoryContext);
newpath = list_copy(oidlist);
@@ -1717,14 +1714,13 @@ InitTempTableNamespace(void)
/*
* First, do permission check to see if we are authorized to make temp
- * tables. We use a nonstandard error message here since
- * "databasename: permission denied" might be a tad cryptic.
+ * tables. We use a nonstandard error message here since "databasename:
+ * permission denied" might be a tad cryptic.
*
- * Note that ACL_CREATE_TEMP rights are rechecked in
- * pg_namespace_aclmask; that's necessary since current user ID could
- * change during the session. But there's no need to make the
- * namespace in the first place until a temp table creation request is
- * made by someone with appropriate rights.
+ * Note that ACL_CREATE_TEMP rights are rechecked in pg_namespace_aclmask;
+ * that's necessary since current user ID could change during the session.
+ * But there's no need to make the namespace in the first place until a
+ * temp table creation request is made by someone with appropriate rights.
*/
if (pg_database_aclcheck(MyDatabaseId, GetUserId(),
ACL_CREATE_TEMP) != ACLCHECK_OK)
@@ -1741,13 +1737,12 @@ InitTempTableNamespace(void)
if (!OidIsValid(namespaceId))
{
/*
- * First use of this temp namespace in this database; create it.
- * The temp namespaces are always owned by the superuser. We
- * leave their permissions at default --- i.e., no access except
- * to superuser --- to ensure that unprivileged users can't peek
- * at other backends' temp tables. This works because the places
- * that access the temp namespace for my own backend skip
- * permissions checks on it.
+ * First use of this temp namespace in this database; create it. The
+ * temp namespaces are always owned by the superuser. We leave their
+ * permissions at default --- i.e., no access except to superuser ---
+ * to ensure that unprivileged users can't peek at other backends'
+ * temp tables. This works because the places that access the temp
+ * namespace for my own backend skip permissions checks on it.
*/
namespaceId = NamespaceCreate(namespaceName, BOOTSTRAP_SUPERUSERID);
/* Advance command counter to make namespace visible */
@@ -1756,16 +1751,16 @@ InitTempTableNamespace(void)
else
{
/*
- * If the namespace already exists, clean it out (in case the
- * former owner crashed without doing so).
+ * If the namespace already exists, clean it out (in case the former
+ * owner crashed without doing so).
*/
RemoveTempRelations(namespaceId);
}
/*
- * Okay, we've prepared the temp namespace ... but it's not committed
- * yet, so all our work could be undone by transaction rollback. Set
- * flag for AtEOXact_Namespace to know what to do.
+ * Okay, we've prepared the temp namespace ... but it's not committed yet,
+ * so all our work could be undone by transaction rollback. Set flag for
+ * AtEOXact_Namespace to know what to do.
*/
myTempNamespace = namespaceId;
@@ -1784,11 +1779,11 @@ AtEOXact_Namespace(bool isCommit)
{
/*
* If we abort the transaction in which a temp namespace was selected,
- * we'll have to do any creation or cleanout work over again. So,
- * just forget the namespace entirely until next time. On the other
- * hand, if we commit then register an exit callback to clean out the
- * temp tables at backend shutdown. (We only want to register the
- * callback once per session, so this is a good place to do it.)
+ * we'll have to do any creation or cleanout work over again. So, just
+ * forget the namespace entirely until next time. On the other hand, if
+ * we commit then register an exit callback to clean out the temp tables
+ * at backend shutdown. (We only want to register the callback once per
+ * session, so this is a good place to do it.)
*/
if (myTempNamespaceSubID != InvalidSubTransactionId)
{
@@ -1852,9 +1847,9 @@ RemoveTempRelations(Oid tempNamespaceId)
ObjectAddress object;
/*
- * We want to get rid of everything in the target namespace, but not
- * the namespace itself (deleting it only to recreate it later would
- * be a waste of cycles). We do this by finding everything that has a
+ * We want to get rid of everything in the target namespace, but not the
+ * namespace itself (deleting it only to recreate it later would be a
+ * waste of cycles). We do this by finding everything that has a
* dependency on the namespace.
*/
object.classId = NamespaceRelationId;
@@ -1916,15 +1911,13 @@ assign_search_path(const char *newval, bool doit, GucSource source)
/*
* Verify that all the names are either valid namespace names or
* "$user". We do not require $user to correspond to a valid
- * namespace. We do not check for USAGE rights, either; should
- * we?
+ * namespace. We do not check for USAGE rights, either; should we?
*
- * When source == PGC_S_TEST, we are checking the argument of an
- * ALTER DATABASE SET or ALTER USER SET command. It could be that
- * the intended use of the search path is for some other database,
- * so we should not error out if it mentions schemas not present
- * in the current database. We reduce the message to NOTICE
- * instead.
+ * When source == PGC_S_TEST, we are checking the argument of an ALTER
+ * DATABASE SET or ALTER USER SET command. It could be that the
+ * intended use of the search path is for some other database, so we
+ * should not error out if it mentions schemas not present in the
+ * current database. We reduce the message to NOTICE instead.
*/
foreach(l, namelist)
{
@@ -1937,7 +1930,7 @@ assign_search_path(const char *newval, bool doit, GucSource source)
0, 0, 0))
ereport((source == PGC_S_TEST) ? NOTICE : ERROR,
(errcode(ERRCODE_UNDEFINED_SCHEMA),
- errmsg("schema \"%s\" does not exist", curname)));
+ errmsg("schema \"%s\" does not exist", curname)));
}
}
@@ -1945,9 +1938,9 @@ assign_search_path(const char *newval, bool doit, GucSource source)
list_free(namelist);
/*
- * We mark the path as needing recomputation, but don't do anything
- * until it's needed. This avoids trying to do database access during
- * GUC initialization.
+ * We mark the path as needing recomputation, but don't do anything until
+ * it's needed. This avoids trying to do database access during GUC
+ * initialization.
*/
if (doit)
namespaceSearchPathValid = false;
@@ -1967,8 +1960,7 @@ InitializeSearchPath(void)
{
/*
* In bootstrap mode, the search path must be 'pg_catalog' so that
- * tables are created in the proper namespace; ignore the GUC
- * setting.
+ * tables are created in the proper namespace; ignore the GUC setting.
*/
MemoryContext oldcxt;
@@ -1983,8 +1975,8 @@ InitializeSearchPath(void)
else
{
/*
- * In normal mode, arrange for a callback on any syscache
- * invalidation of pg_namespace rows.
+ * In normal mode, arrange for a callback on any syscache invalidation
+ * of pg_namespace rows.
*/
CacheRegisterSyscacheCallback(NAMESPACEOID,
NamespaceCallback,
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index 26491e22a15..fb7562e3062 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.75 2005/04/14 20:03:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.76 2005/10/15 02:49:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -76,16 +76,16 @@ AggregateCreate(const char *aggName,
elog(ERROR, "aggregate must have a transition function");
/*
- * If transtype is polymorphic, basetype must be polymorphic also;
- * else we will have no way to deduce the actual transtype.
+ * If transtype is polymorphic, basetype must be polymorphic also; else we
+ * will have no way to deduce the actual transtype.
*/
if ((aggTransType == ANYARRAYOID || aggTransType == ANYELEMENTOID) &&
!(aggBaseType == ANYARRAYOID || aggBaseType == ANYELEMENTOID))
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot determine transition data type"),
- errdetail("An aggregate using \"anyarray\" or \"anyelement\" as "
- "transition type must have one of them as its base type.")));
+ errdetail("An aggregate using \"anyarray\" or \"anyelement\" as "
+ "transition type must have one of them as its base type.")));
/* handle transfn */
fnArgs[0] = aggTransType;
@@ -101,13 +101,13 @@ AggregateCreate(const char *aggName,
/*
* Return type of transfn (possibly after refinement by
- * enforce_generic_type_consistency, if transtype isn't polymorphic)
- * must exactly match declared transtype.
+ * enforce_generic_type_consistency, if transtype isn't polymorphic) must
+ * exactly match declared transtype.
*
- * In the non-polymorphic-transtype case, it might be okay to allow a
- * rettype that's binary-coercible to transtype, but I'm not quite
- * convinced that it's either safe or useful. When transtype is
- * polymorphic we *must* demand exact equality.
+ * In the non-polymorphic-transtype case, it might be okay to allow a rettype
+ * that's binary-coercible to transtype, but I'm not quite convinced that
+ * it's either safe or useful. When transtype is polymorphic we *must*
+ * demand exact equality.
*/
if (rettype != aggTransType)
ereport(ERROR,
@@ -124,10 +124,9 @@ AggregateCreate(const char *aggName,
proc = (Form_pg_proc) GETSTRUCT(tup);
/*
- * If the transfn is strict and the initval is NULL, make sure input
- * type and transtype are the same (or at least binary-compatible), so
- * that it's OK to use the first input value as the initial
- * transValue.
+ * If the transfn is strict and the initval is NULL, make sure input type
+ * and transtype are the same (or at least binary-compatible), so that
+ * it's OK to use the first input value as the initial transValue.
*/
if (proc->proisstrict && agginitval == NULL)
{
@@ -155,20 +154,20 @@ AggregateCreate(const char *aggName,
Assert(OidIsValid(finaltype));
/*
- * If finaltype (i.e. aggregate return type) is polymorphic, basetype
- * must be polymorphic also, else parser will fail to deduce result
- * type. (Note: given the previous test on transtype and basetype,
- * this cannot happen, unless someone has snuck a finalfn definition
- * into the catalogs that itself violates the rule against polymorphic
- * result with no polymorphic input.)
+ * If finaltype (i.e. aggregate return type) is polymorphic, basetype must
+ * be polymorphic also, else parser will fail to deduce result type.
+ * (Note: given the previous test on transtype and basetype, this cannot
+ * happen, unless someone has snuck a finalfn definition into the catalogs
+ * that itself violates the rule against polymorphic result with no
+ * polymorphic input.)
*/
if ((finaltype == ANYARRAYOID || finaltype == ANYELEMENTOID) &&
!(aggBaseType == ANYARRAYOID || aggBaseType == ANYELEMENTOID))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("cannot determine result data type"),
- errdetail("An aggregate returning \"anyarray\" or \"anyelement\" "
- "must have one of them as its base type.")));
+ errdetail("An aggregate returning \"anyarray\" or \"anyelement\" "
+ "must have one of them as its base type.")));
/* handle sortop, if supplied */
if (aggsortopName)
@@ -178,8 +177,7 @@ AggregateCreate(const char *aggName,
/*
* Everything looks okay. Try to create the pg_proc entry for the
- * aggregate. (This could fail if there's already a conflicting
- * entry.)
+ * aggregate. (This could fail if there's already a conflicting entry.)
*/
fnArgs[0] = aggBaseType;
@@ -198,7 +196,7 @@ AggregateCreate(const char *aggName,
false, /* isStrict (not needed for agg) */
PROVOLATILE_IMMUTABLE, /* volatility (not
* needed for agg) */
- buildoidvector(fnArgs, 1), /* paramTypes */
+ buildoidvector(fnArgs, 1), /* paramTypes */
PointerGetDatum(NULL), /* allParamTypes */
PointerGetDatum(NULL), /* parameterModes */
PointerGetDatum(NULL)); /* parameterNames */
@@ -235,10 +233,9 @@ AggregateCreate(const char *aggName,
heap_close(aggdesc, RowExclusiveLock);
/*
- * Create dependencies for the aggregate (above and beyond those
- * already made by ProcedureCreate). Note: we don't need an explicit
- * dependency on aggTransType since we depend on it indirectly through
- * transfn.
+ * Create dependencies for the aggregate (above and beyond those already
+ * made by ProcedureCreate). Note: we don't need an explicit dependency
+ * on aggTransType since we depend on it indirectly through transfn.
*/
myself.classId = ProcedureRelationId;
myself.objectId = procOid;
@@ -288,8 +285,8 @@ lookup_agg_function(List *fnName,
* func_get_detail looks up the function in the catalogs, does
* disambiguation for polymorphic functions, handles inheritance, and
* returns the funcid and type and set or singleton status of the
- * function's return value. it also returns the true argument types
- * to the function.
+ * function's return value. it also returns the true argument types to
+ * the function.
*/
fdresult = func_get_detail(fnName, NIL, nargs, input_types,
&fnOid, rettype, &retset,
@@ -300,21 +297,20 @@ lookup_agg_function(List *fnName,
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
errmsg("function %s does not exist",
- func_signature_string(fnName, nargs, input_types))));
+ func_signature_string(fnName, nargs, input_types))));
if (retset)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function %s returns a set",
- func_signature_string(fnName, nargs, input_types))));
+ func_signature_string(fnName, nargs, input_types))));
/*
- * If the given type(s) are all polymorphic, there's nothing we can
- * check. Otherwise, enforce consistency, and possibly refine the
- * result type.
+ * If the given type(s) are all polymorphic, there's nothing we can check.
+ * Otherwise, enforce consistency, and possibly refine the result type.
*/
if ((input_types[0] == ANYARRAYOID || input_types[0] == ANYELEMENTOID) &&
(nargs == 1 ||
- (input_types[1] == ANYARRAYOID || input_types[1] == ANYELEMENTOID)))
+ (input_types[1] == ANYARRAYOID || input_types[1] == ANYELEMENTOID)))
{
/* nothing to check here */
}
@@ -327,8 +323,8 @@ lookup_agg_function(List *fnName,
}
/*
- * func_get_detail will find functions requiring run-time argument
- * type coercion, but nodeAgg.c isn't prepared to deal with that
+ * func_get_detail will find functions requiring run-time argument type
+ * coercion, but nodeAgg.c isn't prepared to deal with that
*/
if (true_oid_array[0] != ANYARRAYOID &&
true_oid_array[0] != ANYELEMENTOID &&
@@ -336,7 +332,7 @@ lookup_agg_function(List *fnName,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function %s requires run-time type coercion",
- func_signature_string(fnName, nargs, true_oid_array))));
+ func_signature_string(fnName, nargs, true_oid_array))));
if (nargs == 2 &&
true_oid_array[1] != ANYARRAYOID &&
@@ -345,7 +341,7 @@ lookup_agg_function(List *fnName,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function %s requires run-time type coercion",
- func_signature_string(fnName, nargs, true_oid_array))));
+ func_signature_string(fnName, nargs, true_oid_array))));
/* Check aggregate creator has permission to call the function */
aclresult = pg_proc_aclcheck(fnOid, GetUserId(), ACL_EXECUTE);
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index b2cc3d5c474..cf18051f52d 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.26 2005/08/01 04:03:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.27 2005/10/15 02:49:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -140,7 +140,7 @@ CreateConstraintEntry(const char *constraintName,
*/
if (conBin)
values[Anum_pg_constraint_conbin - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(conBin));
+ CStringGetDatum(conBin));
else
nulls[Anum_pg_constraint_conbin - 1] = 'n';
@@ -149,7 +149,7 @@ CreateConstraintEntry(const char *constraintName,
*/
if (conSrc)
values[Anum_pg_constraint_consrc - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(conSrc));
+ CStringGetDatum(conSrc));
else
nulls[Anum_pg_constraint_consrc - 1] = 'n';
@@ -169,8 +169,8 @@ CreateConstraintEntry(const char *constraintName,
if (OidIsValid(relId))
{
/*
- * Register auto dependency from constraint to owning relation, or
- * to specific column(s) if any are mentioned.
+ * Register auto dependency from constraint to owning relation, or to
+ * specific column(s) if any are mentioned.
*/
ObjectAddress relobject;
@@ -210,8 +210,8 @@ CreateConstraintEntry(const char *constraintName,
if (OidIsValid(foreignRelId))
{
/*
- * Register normal dependency from constraint to foreign relation,
- * or to specific column(s) if any are mentioned.
+ * Register normal dependency from constraint to foreign relation, or
+ * to specific column(s) if any are mentioned.
*/
ObjectAddress relobject;
@@ -252,8 +252,8 @@ CreateConstraintEntry(const char *constraintName,
if (conExpr != NULL)
{
/*
- * Register dependencies from constraint to objects mentioned in
- * CHECK expression.
+ * Register dependencies from constraint to objects mentioned in CHECK
+ * expression.
*/
recordDependencyOnSingleRelExpr(&conobject, conExpr, relId,
DEPENDENCY_NORMAL,
@@ -450,15 +450,15 @@ RemoveConstraintById(Oid conId)
Relation rel;
/*
- * If the constraint is for a relation, open and exclusive-lock
- * the relation it's for.
+ * If the constraint is for a relation, open and exclusive-lock the
+ * relation it's for.
*/
rel = heap_open(con->conrelid, AccessExclusiveLock);
/*
- * We need to update the relcheck count if it is a check
- * constraint being dropped. This update will force backends to
- * rebuild relcache entries when we commit.
+ * We need to update the relcheck count if it is a check constraint
+ * being dropped. This update will force backends to rebuild relcache
+ * entries when we commit.
*/
if (con->contype == CONSTRAINT_CHECK)
{
@@ -495,11 +495,10 @@ RemoveConstraintById(Oid conId)
else if (OidIsValid(con->contypid))
{
/*
- * XXX for now, do nothing special when dropping a domain
- * constraint
+ * XXX for now, do nothing special when dropping a domain constraint
*
- * Probably there should be some form of locking on the domain type,
- * but we have no such concept at the moment.
+ * Probably there should be some form of locking on the domain type, but
+ * we have no such concept at the moment.
*/
}
else
@@ -531,9 +530,9 @@ GetConstraintNameForTrigger(Oid triggerId)
HeapTuple tup;
/*
- * We must grovel through pg_depend to find the owning constraint.
- * Perhaps pg_trigger should have a column for the owning constraint ...
- * but right now this is not performance-critical code.
+ * We must grovel through pg_depend to find the owning constraint. Perhaps
+ * pg_trigger should have a column for the owning constraint ... but right
+ * now this is not performance-critical code.
*/
depRel = heap_open(DependRelationId, AccessShareLock);
@@ -567,7 +566,7 @@ GetConstraintNameForTrigger(Oid triggerId)
heap_close(depRel, AccessShareLock);
if (!OidIsValid(constraintId))
- return NULL; /* no owning constraint found */
+ return NULL; /* no owning constraint found */
conRel = heap_open(ConstraintRelationId, AccessShareLock);
@@ -611,10 +610,10 @@ void
AlterConstraintNamespaces(Oid ownerId, Oid oldNspId,
Oid newNspId, bool isType)
{
- Relation conRel;
- ScanKeyData key[1];
- SysScanDesc scan;
- HeapTuple tup;
+ Relation conRel;
+ ScanKeyData key[1];
+ SysScanDesc scan;
+ HeapTuple tup;
conRel = heap_open(ConstraintRelationId, RowExclusiveLock);
diff --git a/src/backend/catalog/pg_conversion.c b/src/backend/catalog/pg_conversion.c
index 0cdca75f3ad..21adfbcf94a 100644
--- a/src/backend/catalog/pg_conversion.c
+++ b/src/backend/catalog/pg_conversion.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_conversion.c,v 1.26 2005/09/24 17:53:12 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_conversion.c,v 1.27 2005/10/15 02:49:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -67,17 +67,17 @@ ConversionCreate(const char *conname, Oid connamespace,
if (def)
{
/*
- * make sure there is no existing default <for encoding><to
- * encoding> pair in this name space
+ * make sure there is no existing default <for encoding><to encoding>
+ * pair in this name space
*/
if (FindDefaultConversion(connamespace,
conforencoding,
contoencoding))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("default conversion for %s to %s already exists",
- pg_encoding_to_char(conforencoding),
- pg_encoding_to_char(contoencoding))));
+ errmsg("default conversion for %s to %s already exists",
+ pg_encoding_to_char(conforencoding),
+ pg_encoding_to_char(contoencoding))));
}
/* open pg_conversion */
@@ -150,7 +150,7 @@ ConversionDrop(Oid conversionOid, DropBehavior behavior)
if (!superuser() &&
((Form_pg_conversion) GETSTRUCT(tuple))->conowner != GetUserId())
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION,
- NameStr(((Form_pg_conversion) GETSTRUCT(tuple))->conname));
+ NameStr(((Form_pg_conversion) GETSTRUCT(tuple))->conname));
ReleaseSysCache(tuple);
@@ -330,9 +330,8 @@ pg_convert_using(PG_FUNCTION_ARGS)
ReleaseSysCache(tuple);
/*
- * build text result structure. we cannot use textin() here, since
- * textin assumes that input string encoding is same as database
- * encoding.
+ * build text result structure. we cannot use textin() here, since textin
+ * assumes that input string encoding is same as database encoding.
*/
len = strlen(result) + VARHDRSZ;
retval = palloc(len);
diff --git a/src/backend/catalog/pg_depend.c b/src/backend/catalog/pg_depend.c
index bf910d09a5d..c8f9e53212d 100644
--- a/src/backend/catalog/pg_depend.c
+++ b/src/backend/catalog/pg_depend.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_depend.c,v 1.14 2005/08/01 04:03:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_depend.c,v 1.15 2005/10/15 02:49:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -62,8 +62,8 @@ recordMultipleDependencies(const ObjectAddress *depender,
return; /* nothing to do */
/*
- * During bootstrap, do nothing since pg_depend may not exist yet.
- * initdb will fill in appropriate pg_depend entries after bootstrap.
+ * During bootstrap, do nothing since pg_depend may not exist yet. initdb
+ * will fill in appropriate pg_depend entries after bootstrap.
*/
if (IsBootstrapProcessingMode())
return;
@@ -78,9 +78,9 @@ recordMultipleDependencies(const ObjectAddress *depender,
for (i = 0; i < nreferenced; i++, referenced++)
{
/*
- * If the referenced object is pinned by the system, there's no
- * real need to record dependencies on it. This saves lots of
- * space in pg_depend, so it's worth the time taken to check.
+ * If the referenced object is pinned by the system, there's no real
+ * need to record dependencies on it. This saves lots of space in
+ * pg_depend, so it's worth the time taken to check.
*/
if (!isObjectPinned(referenced, dependDesc))
{
@@ -190,11 +190,10 @@ changeDependencyFor(Oid classId, Oid objectId,
depRel = heap_open(DependRelationId, RowExclusiveLock);
/*
- * If oldRefObjectId is pinned, there won't be any dependency entries
- * on it --- we can't cope in that case. (This isn't really worth
- * expending code to fix, in current usage; it just means you can't
- * rename stuff out of pg_catalog, which would likely be a bad move
- * anyway.)
+ * If oldRefObjectId is pinned, there won't be any dependency entries on
+ * it --- we can't cope in that case. (This isn't really worth expending
+ * code to fix, in current usage; it just means you can't rename stuff out
+ * of pg_catalog, which would likely be a bad move anyway.)
*/
objAddr.classId = refClassId;
objAddr.objectId = oldRefObjectId;
@@ -203,12 +202,12 @@ changeDependencyFor(Oid classId, Oid objectId,
if (isObjectPinned(&objAddr, depRel))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot remove dependency on %s because it is a system object",
- getObjectDescription(&objAddr))));
+ errmsg("cannot remove dependency on %s because it is a system object",
+ getObjectDescription(&objAddr))));
/*
- * We can handle adding a dependency on something pinned, though,
- * since that just means deleting the dependency entry.
+ * We can handle adding a dependency on something pinned, though, since
+ * that just means deleting the dependency entry.
*/
objAddr.objectId = newRefObjectId;
@@ -293,9 +292,9 @@ isObjectPinned(const ObjectAddress *object, Relation rel)
/*
* Since we won't generate additional pg_depend entries for pinned
- * objects, there can be at most one entry referencing a pinned
- * object. Hence, it's sufficient to look at the first returned
- * tuple; we don't need to loop.
+ * objects, there can be at most one entry referencing a pinned object.
+ * Hence, it's sufficient to look at the first returned tuple; we don't
+ * need to loop.
*/
tup = systable_getnext(scan);
if (HeapTupleIsValid(tup))
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index 903a46ac0f0..8dea69a234f 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_operator.c,v 1.93 2005/07/07 20:39:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_operator.c,v 1.94 2005/10/15 02:49:14 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
@@ -90,10 +90,10 @@ validOperatorName(const char *name)
/*
* For SQL92 compatibility, '+' and '-' cannot be the last char of a
- * multi-char operator unless the operator contains chars that are not
- * in SQL92 operators. The idea is to lex '=-' as two operators, but
- * not to forbid operator names like '?-' that could not be sequences
- * of SQL92 operators.
+ * multi-char operator unless the operator contains chars that are not in
+ * SQL92 operators. The idea is to lex '=-' as two operators, but not to
+ * forbid operator names like '?-' that could not be sequences of SQL92
+ * operators.
*/
if (len > 1 &&
(name[len - 1] == '+' ||
@@ -228,14 +228,14 @@ OperatorShellMake(const char *operatorName,
}
/*
- * initialize values[] with the operator name and input data types.
- * Note that oprcode is set to InvalidOid, indicating it's a shell.
+ * initialize values[] with the operator name and input data types. Note
+ * that oprcode is set to InvalidOid, indicating it's a shell.
*/
i = 0;
namestrcpy(&oname, operatorName);
values[i++] = NameGetDatum(&oname); /* oprname */
values[i++] = ObjectIdGetDatum(operatorNamespace); /* oprnamespace */
- values[i++] = ObjectIdGetDatum(GetUserId()); /* oprowner */
+ values[i++] = ObjectIdGetDatum(GetUserId()); /* oprowner */
values[i++] = CharGetDatum(leftTypeId ? (rightTypeId ? 'b' : 'r') : 'l'); /* oprkind */
values[i++] = BoolGetDatum(false); /* oprcanhash */
values[i++] = ObjectIdGetDatum(leftTypeId); /* oprleft */
@@ -410,7 +410,7 @@ OperatorCreate(const char *operatorName,
if (!OidIsValid(leftTypeId) && !OidIsValid(rightTypeId))
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("at least one of leftarg or rightarg must be specified")));
+ errmsg("at least one of leftarg or rightarg must be specified")));
if (!(OidIsValid(leftTypeId) && OidIsValid(rightTypeId)))
{
@@ -418,11 +418,11 @@ OperatorCreate(const char *operatorName,
if (commutatorName)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("only binary operators can have commutators")));
+ errmsg("only binary operators can have commutators")));
if (joinName)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("only binary operators can have join selectivity")));
+ errmsg("only binary operators can have join selectivity")));
if (canHash)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
@@ -451,9 +451,9 @@ OperatorCreate(const char *operatorName,
*/
/*
- * Look up registered procedures -- find the return type of
- * procedureName to place in "result" field. Do this before shells are
- * created so we don't have to worry about deleting them later.
+ * Look up registered procedures -- find the return type of procedureName
+ * to place in "result" field. Do this before shells are created so we
+ * don't have to worry about deleting them later.
*/
if (!OidIsValid(leftTypeId))
{
@@ -519,7 +519,7 @@ OperatorCreate(const char *operatorName,
namestrcpy(&oname, operatorName);
values[i++] = NameGetDatum(&oname); /* oprname */
values[i++] = ObjectIdGetDatum(operatorNamespace); /* oprnamespace */
- values[i++] = ObjectIdGetDatum(GetUserId()); /* oprowner */
+ values[i++] = ObjectIdGetDatum(GetUserId()); /* oprowner */
values[i++] = CharGetDatum(leftTypeId ? (rightTypeId ? 'b' : 'r') : 'l'); /* oprkind */
values[i++] = BoolGetDatum(canHash); /* oprcanhash */
values[i++] = ObjectIdGetDatum(leftTypeId); /* oprleft */
@@ -660,14 +660,14 @@ OperatorCreate(const char *operatorName,
/*
* If a commutator and/or negator link is provided, update the other
- * operator(s) to point at this one, if they don't already have a
- * link. This supports an alternate style of operator definition
- * wherein the user first defines one operator without giving negator
- * or commutator, then defines the other operator of the pair with the
- * proper commutator or negator attribute. That style doesn't require
- * creation of a shell, and it's the only style that worked right
- * before Postgres version 6.5. This code also takes care of the
- * situation where the new operator is its own commutator.
+ * operator(s) to point at this one, if they don't already have a link.
+ * This supports an alternate style of operator definition wherein the
+ * user first defines one operator without giving negator or commutator,
+ * then defines the other operator of the pair with the proper commutator
+ * or negator attribute. That style doesn't require creation of a shell,
+ * and it's the only style that worked right before Postgres version 6.5.
+ * This code also takes care of the situation where the new operator is
+ * its own commutator.
*/
if (selfCommutator)
commutatorId = operatorObjectId;
@@ -721,7 +721,7 @@ get_other_operator(List *otherOp, Oid otherLeftTypeId, Oid otherRightTypeId,
if (!isCommutator)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("operator cannot be its own negator or sort operator")));
+ errmsg("operator cannot be its own negator or sort operator")));
return InvalidOid;
}
@@ -780,9 +780,9 @@ OperatorUpd(Oid baseId, Oid commId, Oid negId)
0, 0, 0);
/*
- * if the commutator and negator are the same operator, do one update.
- * XXX this is probably useless code --- I doubt it ever makes sense
- * for commutator and negator to be the same thing...
+ * if the commutator and negator are the same operator, do one update. XXX
+ * this is probably useless code --- I doubt it ever makes sense for
+ * commutator and negator to be the same thing...
*/
if (commId == negId)
{
@@ -931,10 +931,10 @@ makeOperatorDependencies(HeapTuple tuple)
* NOTE: we do not consider the operator to depend on the associated
* operators oprcom, oprnegate, oprlsortop, oprrsortop, oprltcmpop,
* oprgtcmpop. We would not want to delete this operator if those go
- * away, but only reset the link fields; which is not a function that
- * the dependency code can presently handle. (Something could perhaps
- * be done with objectSubId though.) For now, it's okay to let those
- * links dangle if a referenced operator is removed.
+ * away, but only reset the link fields; which is not a function that the
+ * dependency code can presently handle. (Something could perhaps be done
+ * with objectSubId though.) For now, it's okay to let those links dangle
+ * if a referenced operator is removed.
*/
/* Dependency on implementation function */
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index 691be63dc75..ab3de4ed4be 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.133 2005/09/24 22:54:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.134 2005/10/15 02:49:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -115,9 +115,9 @@ ProcedureCreate(const char *procedureName,
if (allParameterTypes != PointerGetDatum(NULL))
{
/*
- * We expect the array to be a 1-D OID array; verify that. We
- * don't need to use deconstruct_array() since the array data is
- * just going to look like a C array of OID values.
+ * We expect the array to be a 1-D OID array; verify that. We don't
+ * need to use deconstruct_array() since the array data is just going
+ * to look like a C array of OID values.
*/
allParamCount = ARR_DIMS(DatumGetPointer(allParameterTypes))[0];
if (ARR_NDIM(DatumGetPointer(allParameterTypes)) != 1 ||
@@ -136,8 +136,8 @@ ProcedureCreate(const char *procedureName,
/*
* Do not allow return type ANYARRAY or ANYELEMENT unless at least one
- * input argument is ANYARRAY or ANYELEMENT. Also, do not allow
- * return type INTERNAL unless at least one input argument is INTERNAL.
+ * input argument is ANYARRAY or ANYELEMENT. Also, do not allow return
+ * type INTERNAL unless at least one input argument is INTERNAL.
*/
for (i = 0; i < parameterCount; i++)
{
@@ -158,9 +158,9 @@ ProcedureCreate(const char *procedureName,
for (i = 0; i < allParamCount; i++)
{
/*
- * We don't bother to distinguish input and output params here,
- * so if there is, say, just an input INTERNAL param then we will
- * still set internalOutParam. This is OK since we don't really
+ * We don't bother to distinguish input and output params here, so
+ * if there is, say, just an input INTERNAL param then we will
+ * still set internalOutParam. This is OK since we don't really
* care.
*/
switch (allParams[i])
@@ -240,9 +240,9 @@ ProcedureCreate(const char *procedureName,
else
nulls[Anum_pg_proc_proargnames - 1] = 'n';
values[Anum_pg_proc_prosrc - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(prosrc));
+ CStringGetDatum(prosrc));
values[Anum_pg_proc_probin - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(probin));
+ CStringGetDatum(probin));
/* start out with empty permissions */
nulls[Anum_pg_proc_proacl - 1] = 'n';
@@ -264,8 +264,8 @@ ProcedureCreate(const char *procedureName,
if (!replace)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_FUNCTION),
- errmsg("function \"%s\" already exists with same argument types",
- procedureName)));
+ errmsg("function \"%s\" already exists with same argument types",
+ procedureName)));
if (!pg_proc_ownercheck(HeapTupleGetOid(oldtup), GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC,
procedureName);
@@ -295,14 +295,14 @@ ProcedureCreate(const char *procedureName,
parameterModes,
parameterNames);
if (olddesc == NULL && newdesc == NULL)
- /* ok, both are runtime-defined RECORDs */ ;
+ /* ok, both are runtime-defined RECORDs */ ;
else if (olddesc == NULL || newdesc == NULL ||
!equalTupleDescs(olddesc, newdesc))
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("cannot change return type of existing function"),
- errdetail("Row type defined by OUT parameters is different."),
- errhint("Use DROP FUNCTION first.")));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
+ errmsg("cannot change return type of existing function"),
+ errdetail("Row type defined by OUT parameters is different."),
+ errhint("Use DROP FUNCTION first.")));
}
/* Can't change aggregate status, either */
@@ -422,8 +422,8 @@ fmgr_internal_validator(PG_FUNCTION_ARGS)
char *prosrc;
/*
- * We do not honor check_function_bodies since it's unlikely the
- * function name will be found later if it isn't there now.
+ * We do not honor check_function_bodies since it's unlikely the function
+ * name will be found later if it isn't there now.
*/
tuple = SearchSysCache(PROCOID,
@@ -471,10 +471,9 @@ fmgr_c_validator(PG_FUNCTION_ARGS)
char *probin;
/*
- * It'd be most consistent to skip the check if
- * !check_function_bodies, but the purpose of that switch is to be
- * helpful for pg_dump loading, and for pg_dump loading it's much
- * better if we *do* check.
+ * It'd be most consistent to skip the check if !check_function_bodies,
+ * but the purpose of that switch is to be helpful for pg_dump loading,
+ * and for pg_dump loading it's much better if we *do* check.
*/
tuple = SearchSysCache(PROCOID,
@@ -554,8 +553,8 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL functions cannot have arguments of type %s",
- format_type_be(proc->proargtypes.values[i]))));
+ errmsg("SQL functions cannot have arguments of type %s",
+ format_type_be(proc->proargtypes.values[i]))));
}
}
@@ -577,13 +576,13 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
error_context_stack = &sqlerrcontext;
/*
- * We can't do full prechecking of the function definition if
- * there are any polymorphic input types, because actual datatypes
- * of expression results will be unresolvable. The check will be
- * done at runtime instead.
+ * We can't do full prechecking of the function definition if there
+ * are any polymorphic input types, because actual datatypes of
+ * expression results will be unresolvable. The check will be done at
+ * runtime instead.
*
- * We can run the text through the raw parser though; this will at
- * least catch silly syntactic errors.
+ * We can run the text through the raw parser though; this will at least
+ * catch silly syntactic errors.
*/
if (!haspolyarg)
{
@@ -652,8 +651,8 @@ function_parse_error_transpose(const char *prosrc)
* Nothing to do unless we are dealing with a syntax error that has a
* cursor position.
*
- * Some PLs may prefer to report the error position as an internal error
- * to begin with, so check that too.
+ * Some PLs may prefer to report the error position as an internal error to
+ * begin with, so check that too.
*/
origerrposition = geterrposition();
if (origerrposition <= 0)
@@ -703,10 +702,10 @@ match_prosrc_to_query(const char *prosrc, const char *queryText,
int cursorpos)
{
/*
- * Rather than fully parsing the CREATE FUNCTION command, we just scan
- * the command looking for $prosrc$ or 'prosrc'. This could be fooled
- * (though not in any very probable scenarios), so fail if we find
- * more than one match.
+ * Rather than fully parsing the CREATE FUNCTION command, we just scan the
+ * command looking for $prosrc$ or 'prosrc'. This could be fooled (though
+ * not in any very probable scenarios), so fail if we find more than one
+ * match.
*/
int prosrclen = strlen(prosrc);
int querylen = strlen(queryText);
@@ -722,8 +721,8 @@ match_prosrc_to_query(const char *prosrc, const char *queryText,
{
/*
* Found a $foo$ match. Since there are no embedded quoting
- * characters in a dollar-quoted literal, we don't have to do
- * any fancy arithmetic; just offset by the starting position.
+ * characters in a dollar-quoted literal, we don't have to do any
+ * fancy arithmetic; just offset by the starting position.
*/
if (matchpos)
return 0; /* multiple matches, fail */
@@ -735,9 +734,8 @@ match_prosrc_to_query(const char *prosrc, const char *queryText,
cursorpos, &newcursorpos))
{
/*
- * Found a 'foo' match. match_prosrc_to_literal() has
- * adjusted for any quotes or backslashes embedded in the
- * literal.
+ * Found a 'foo' match. match_prosrc_to_literal() has adjusted
+ * for any quotes or backslashes embedded in the literal.
*/
if (matchpos)
return 0; /* multiple matches, fail */
@@ -769,8 +767,8 @@ match_prosrc_to_literal(const char *prosrc, const char *literal,
* string literal. It does not handle the SQL syntax for literals
* continued across line boundaries.
*
- * We do the comparison a character at a time, not a byte at a time, so
- * that we can do the correct cursorpos math.
+ * We do the comparison a character at a time, not a byte at a time, so that
+ * we can do the correct cursorpos math.
*/
while (*prosrc)
{
diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c
index bd326b876b3..4cce7ba13cf 100644
--- a/src/backend/catalog/pg_shdepend.c
+++ b/src/backend/catalog/pg_shdepend.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.2 2005/08/30 01:07:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.3 2005/10/15 02:49:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,22 +36,22 @@ typedef enum
} objectType;
static int getOidListDiff(Oid *list1, int nlist1, Oid *list2, int nlist2,
- Oid **diff);
-static Oid classIdGetDbId(Oid classId);
+ Oid **diff);
+static Oid classIdGetDbId(Oid classId);
static void shdepLockAndCheckObject(Oid classId, Oid objectId);
static void shdepChangeDep(Relation sdepRel, Oid classid, Oid objid,
- Oid refclassid, Oid refobjid,
- SharedDependencyType deptype);
+ Oid refclassid, Oid refobjid,
+ SharedDependencyType deptype);
static void shdepAddDependency(Relation sdepRel, Oid classId, Oid objectId,
- Oid refclassId, Oid refobjId,
- SharedDependencyType deptype);
+ Oid refclassId, Oid refobjId,
+ SharedDependencyType deptype);
static void shdepDropDependency(Relation sdepRel, Oid classId, Oid objectId,
- Oid refclassId, Oid refobjId,
- SharedDependencyType deptype);
+ Oid refclassId, Oid refobjId,
+ SharedDependencyType deptype);
static void storeObjectDescription(StringInfo descs, objectType type,
- ObjectAddress *object,
- SharedDependencyType deptype,
- int count);
+ ObjectAddress *object,
+ SharedDependencyType deptype,
+ int count);
static bool isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel);
@@ -70,7 +70,7 @@ static bool isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel);
*/
void
recordSharedDependencyOn(ObjectAddress *depender,
- ObjectAddress *referenced,
+ ObjectAddress *referenced,
SharedDependencyType deptype)
{
Relation sdepRel;
@@ -95,7 +95,7 @@ recordSharedDependencyOn(ObjectAddress *depender,
sdepRel))
{
shdepAddDependency(sdepRel, depender->classId, depender->objectId,
- referenced->classId, referenced->objectId,
+ referenced->classId, referenced->objectId,
deptype);
}
@@ -132,11 +132,11 @@ recordDependencyOnOwner(Oid classId, Oid objectId, Oid owner)
* shdepChangeDep
*
* Update shared dependency records to account for an updated referenced
- * object. This is an internal workhorse for operations such as changing
+ * object. This is an internal workhorse for operations such as changing
* an object's owner.
*
* There must be no more than one existing entry for the given dependent
- * object and dependency type! So in practice this can only be used for
+ * object and dependency type! So in practice this can only be used for
* updating SHARED_DEPENDENCY_OWNER entries, which should have that property.
*
* If there is no previous entry, we assume it was referencing a PINned
@@ -154,12 +154,12 @@ shdepChangeDep(Relation sdepRel, Oid classid, Oid objid,
Oid dbid = classIdGetDbId(classid);
HeapTuple oldtup = NULL;
HeapTuple scantup;
- ScanKeyData key[3];
- SysScanDesc scan;
+ ScanKeyData key[3];
+ SysScanDesc scan;
/*
- * Make sure the new referenced object doesn't go away while we record
- * the dependency.
+ * Make sure the new referenced object doesn't go away while we record the
+ * dependency.
*/
shdepLockAndCheckObject(refclassid, refobjid);
@@ -167,11 +167,11 @@ shdepChangeDep(Relation sdepRel, Oid classid, Oid objid,
* Look for a previous entry
*/
ScanKeyInit(&key[0],
- Anum_pg_shdepend_dbid,
+ Anum_pg_shdepend_dbid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(dbid));
ScanKeyInit(&key[1],
- Anum_pg_shdepend_classid,
+ Anum_pg_shdepend_classid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(classid));
ScanKeyInit(&key[2],
@@ -181,7 +181,7 @@ shdepChangeDep(Relation sdepRel, Oid classid, Oid objid,
scan = systable_beginscan(sdepRel, SharedDependDependerIndexId, true,
SnapshotNow, 3, key);
-
+
while ((scantup = systable_getnext(scan)) != NULL)
{
/* Ignore if not of the target dependency type */
@@ -220,8 +220,8 @@ shdepChangeDep(Relation sdepRel, Oid classid, Oid objid,
else
{
/* Need to insert new entry */
- Datum values[Natts_pg_shdepend];
- bool nulls[Natts_pg_shdepend];
+ Datum values[Natts_pg_shdepend];
+ bool nulls[Natts_pg_shdepend];
memset(nulls, 0, sizeof(nulls));
@@ -234,8 +234,8 @@ shdepChangeDep(Relation sdepRel, Oid classid, Oid objid,
values[Anum_pg_shdepend_deptype - 1] = CharGetDatum(deptype);
/*
- * we are reusing oldtup just to avoid declaring a new variable,
- * but it's certainly a new tuple
+ * we are reusing oldtup just to avoid declaring a new variable, but
+ * it's certainly a new tuple
*/
oldtup = heap_form_tuple(RelationGetDescr(sdepRel), values, nulls);
simple_heap_insert(sdepRel, oldtup);
@@ -271,7 +271,7 @@ changeDependencyOnOwner(Oid classId, Oid objectId, Oid newOwnerId)
* was previously granted some rights to the object.
*
* This step is analogous to aclnewowner's removal of duplicate entries
- * in the ACL. We have to do it to handle this scenario:
+ * in the ACL. We have to do it to handle this scenario:
* A grants some rights on an object to B
* ALTER OWNER changes the object's owner to B
* ALTER OWNER changes the object's owner to C
@@ -296,7 +296,7 @@ changeDependencyOnOwner(Oid classId, Oid objectId, Oid newOwnerId)
* Helper for updateAclDependencies.
*
* Takes two Oid arrays and returns elements from the first not found in the
- * second. We assume both arrays are sorted and de-duped, and that the
+ * second. We assume both arrays are sorted and de-duped, and that the
* second array does not contain any values not found in the first.
*
* NOTE: Both input arrays are pfreed.
@@ -304,17 +304,17 @@ changeDependencyOnOwner(Oid classId, Oid objectId, Oid newOwnerId)
static int
getOidListDiff(Oid *list1, int nlist1, Oid *list2, int nlist2, Oid **diff)
{
- Oid *result;
- int i,
- j,
- k = 0;
+ Oid *result;
+ int i,
+ j,
+ k = 0;
AssertArg(nlist1 >= nlist2 && nlist2 >= 0);
result = palloc(sizeof(Oid) * (nlist1 - nlist2));
*diff = result;
- for (i = 0, j = 0; i < nlist1 && j < nlist2; )
+ for (i = 0, j = 0; i < nlist1 && j < nlist2;)
{
if (list1[i] == list2[j])
{
@@ -350,7 +350,7 @@ getOidListDiff(Oid *list1, int nlist1, Oid *list2, int nlist2, Oid **diff)
/*
* updateAclDependencies
- * Update the pg_shdepend info for an object's ACL during GRANT/REVOKE.
+ * Update the pg_shdepend info for an object's ACL during GRANT/REVOKE.
*
* classId, objectId: identify the object whose ACL this is
* ownerId: role owning the object
@@ -398,12 +398,12 @@ updateAclDependencies(Oid classId, Oid objectId, Oid ownerId, bool isGrant,
/* Add or drop the respective dependency */
for (i = 0; i < ndiff; i++)
{
- Oid roleid = diff[i];
+ Oid roleid = diff[i];
/*
- * Skip the owner: he has an OWNER shdep entry instead.
- * (This is not just a space optimization; it makes ALTER OWNER
- * easier. See notes in changeDependencyOnOwner.)
+ * Skip the owner: he has an OWNER shdep entry instead. (This is
+ * not just a space optimization; it makes ALTER OWNER easier.
+ * See notes in changeDependencyOnOwner.)
*/
if (roleid == ownerId)
continue;
@@ -416,7 +416,7 @@ updateAclDependencies(Oid classId, Oid objectId, Oid ownerId, bool isGrant,
shdepAddDependency(sdepRel, classId, objectId,
AuthIdRelationId, roleid,
SHARED_DEPENDENCY_ACL);
- else
+ else
shdepDropDependency(sdepRel, classId, objectId,
AuthIdRelationId, roleid,
SHARED_DEPENDENCY_ACL);
@@ -433,15 +433,15 @@ updateAclDependencies(Oid classId, Oid objectId, Oid ownerId, bool isGrant,
*/
typedef struct
{
- Oid dbOid;
- int count;
+ Oid dbOid;
+ int count;
} remoteDep;
/*
* checkSharedDependencies
*
* Check whether there are shared dependency entries for a given shared
- * object. Returns a string containing a newline-separated list of object
+ * object. Returns a string containing a newline-separated list of object
* descriptions that depend on the shared object, or NULL if none is found.
*
* We can find three different kinds of dependencies: dependencies on objects
@@ -456,20 +456,20 @@ char *
checkSharedDependencies(Oid classId, Oid objectId)
{
Relation sdepRel;
- ScanKeyData key[2];
- SysScanDesc scan;
+ ScanKeyData key[2];
+ SysScanDesc scan;
HeapTuple tup;
int totalDeps = 0;
int numLocalDeps = 0;
int numSharedDeps = 0;
List *remDeps = NIL;
ListCell *cell;
- ObjectAddress object;
+ ObjectAddress object;
StringInfoData descs;
/*
- * We try to limit the number of reported dependencies to something
- * sane, both for the user's sake and to avoid blowing out memory.
+ * We try to limit the number of reported dependencies to something sane,
+ * both for the user's sake and to avoid blowing out memory.
*/
#define MAX_REPORTED_DEPS 100
@@ -478,20 +478,20 @@ checkSharedDependencies(Oid classId, Oid objectId)
sdepRel = heap_open(SharedDependRelationId, AccessShareLock);
ScanKeyInit(&key[0],
- Anum_pg_shdepend_refclassid,
+ Anum_pg_shdepend_refclassid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(classId));
ScanKeyInit(&key[1],
- Anum_pg_shdepend_refobjid,
+ Anum_pg_shdepend_refobjid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(objectId));
-
+
scan = systable_beginscan(sdepRel, SharedDependReferenceIndexId, true,
SnapshotNow, 2, key);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
- Form_pg_shdepend sdepForm = (Form_pg_shdepend) GETSTRUCT(tup);
+ Form_pg_shdepend sdepForm = (Form_pg_shdepend) GETSTRUCT(tup);
/* This case can be dispatched quickly */
if (sdepForm->deptype == SHARED_DEPENDENCY_PIN)
@@ -502,7 +502,7 @@ checkSharedDependencies(Oid classId, Oid objectId)
ereport(ERROR,
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
errmsg("cannot drop %s because it is required by the database system",
- getObjectDescription(&object))));
+ getObjectDescription(&object))));
}
object.classId = sdepForm->classid;
@@ -513,8 +513,8 @@ checkSharedDependencies(Oid classId, Oid objectId)
* If it's a dependency local to this database or it's a shared
* object, describe it.
*
- * If it's a remote dependency, keep track of it so we can report
- * the number of them later.
+ * If it's a remote dependency, keep track of it so we can report the
+ * number of them later.
*/
if (sdepForm->dbid == MyDatabaseId)
{
@@ -537,10 +537,10 @@ checkSharedDependencies(Oid classId, Oid objectId)
bool stored = false;
/*
- * XXX this info is kept on a simple List. Maybe it's not good
+ * XXX this info is kept on a simple List. Maybe it's not good
* for performance, but using a hash table seems needlessly
- * complex. The expected number of databases is not high
- * anyway, I suppose.
+ * complex. The expected number of databases is not high anyway,
+ * I suppose.
*/
foreach(cell, remDeps)
{
@@ -572,8 +572,8 @@ checkSharedDependencies(Oid classId, Oid objectId)
/*
* Report seems unreasonably long, so reduce it to per-database info
*
- * Note: we don't ever suppress per-database totals, which should
- * be OK as long as there aren't too many databases ...
+ * Note: we don't ever suppress per-database totals, which should be OK
+ * as long as there aren't too many databases ...
*/
descs.len = 0; /* reset to empty */
descs.data[0] = '\0';
@@ -592,7 +592,7 @@ checkSharedDependencies(Oid classId, Oid objectId)
foreach(cell, remDeps)
{
- remoteDep *dep = lfirst(cell);
+ remoteDep *dep = lfirst(cell);
object.classId = DatabaseRelationId;
object.objectId = dep->dbOid;
@@ -624,8 +624,8 @@ copyTemplateDependencies(Oid templateDbId, Oid newDbId)
{
Relation sdepRel;
TupleDesc sdepDesc;
- ScanKeyData key[1];
- SysScanDesc scan;
+ ScanKeyData key[1];
+ SysScanDesc scan;
HeapTuple tup;
CatalogIndexState indstate;
Datum values[Natts_pg_shdepend];
@@ -655,11 +655,11 @@ copyTemplateDependencies(Oid templateDbId, Oid newDbId)
values[Anum_pg_shdepend_dbid - 1] = ObjectIdGetDatum(newDbId);
/*
- * Copy the entries of the original database, changing the database Id
- * to that of the new database. Note that because we are not copying
- * rows with dbId == 0 (ie, rows describing dependent shared objects)
- * we won't copy the ownership dependency of the template database
- * itself; this is what we want.
+ * Copy the entries of the original database, changing the database Id to
+ * that of the new database. Note that because we are not copying rows
+ * with dbId == 0 (ie, rows describing dependent shared objects) we won't
+ * copy the ownership dependency of the template database itself; this is
+ * what we want.
*/
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
@@ -690,15 +690,15 @@ void
dropDatabaseDependencies(Oid databaseId)
{
Relation sdepRel;
- ScanKeyData key[1];
- SysScanDesc scan;
+ ScanKeyData key[1];
+ SysScanDesc scan;
HeapTuple tup;
sdepRel = heap_open(SharedDependRelationId, RowExclusiveLock);
/*
- * First, delete all the entries that have the database Oid in the
- * dbid field.
+ * First, delete all the entries that have the database Oid in the dbid
+ * field.
*/
ScanKeyInit(&key[0],
Anum_pg_shdepend_dbid,
@@ -747,7 +747,7 @@ deleteSharedDependencyRecordsFor(Oid classId, Oid objectId)
/*
* shdepAddDependency
- * Internal workhorse for inserting into pg_shdepend
+ * Internal workhorse for inserting into pg_shdepend
*
* sdepRel must be the pg_shdepend relation, already opened and suitably
* locked.
@@ -762,9 +762,9 @@ shdepAddDependency(Relation sdepRel, Oid classId, Oid objectId,
bool nulls[Natts_pg_shdepend];
/*
- * Make sure the object doesn't go away while we record the dependency
- * on it. DROP routines should lock the object exclusively before they
- * check shared dependencies.
+ * Make sure the object doesn't go away while we record the dependency on
+ * it. DROP routines should lock the object exclusively before they check
+ * shared dependencies.
*/
shdepLockAndCheckObject(refclassId, refobjId);
@@ -794,7 +794,7 @@ shdepAddDependency(Relation sdepRel, Oid classId, Oid objectId,
/*
* shdepDropDependency
- * Internal workhorse for deleting entries from pg_shdepend.
+ * Internal workhorse for deleting entries from pg_shdepend.
*
* We drop entries having the following properties:
* dependent object is the one identified by classId/objectId
@@ -810,17 +810,17 @@ shdepDropDependency(Relation sdepRel, Oid classId, Oid objectId,
Oid refclassId, Oid refobjId,
SharedDependencyType deptype)
{
- ScanKeyData key[3];
- SysScanDesc scan;
+ ScanKeyData key[3];
+ SysScanDesc scan;
HeapTuple tup;
/* Scan for entries matching the dependent object */
ScanKeyInit(&key[0],
- Anum_pg_shdepend_dbid,
+ Anum_pg_shdepend_dbid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(classIdGetDbId(classId)));
ScanKeyInit(&key[1],
- Anum_pg_shdepend_classid,
+ Anum_pg_shdepend_classid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(classId));
ScanKeyInit(&key[2],
@@ -899,8 +899,8 @@ shdepLockAndCheckObject(Oid classId, Oid objectId)
LockSharedObject(classId, objectId, 0, AccessShareLock);
/*
- * We have to recognize sinval updates here, else our local syscache
- * may still contain the object even if it was just dropped.
+ * We have to recognize sinval updates here, else our local syscache may
+ * still contain the object even if it was just dropped.
*/
AcceptInvalidationMessages();
@@ -916,25 +916,26 @@ shdepLockAndCheckObject(Oid classId, Oid objectId)
objectId)));
break;
- /*
- * Currently, this routine need not support any other shared object
- * types besides roles. If we wanted to record explicit dependencies
- * on databases or tablespaces, we'd need code along these lines:
- */
+ /*
+ * Currently, this routine need not support any other shared
+ * object types besides roles. If we wanted to record explicit
+ * dependencies on databases or tablespaces, we'd need code along
+ * these lines:
+ */
#ifdef NOT_USED
case TableSpaceRelationId:
- {
- /* For lack of a syscache on pg_tablespace, do this: */
- char *tablespace = get_tablespace_name(objectId);
-
- if (tablespace == NULL)
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("tablespace %u was concurrently dropped",
- objectId)));
- pfree(tablespace);
- break;
- }
+ {
+ /* For lack of a syscache on pg_tablespace, do this: */
+ char *tablespace = get_tablespace_name(objectId);
+
+ if (tablespace == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("tablespace %u was concurrently dropped",
+ objectId)));
+ pfree(tablespace);
+ break;
+ }
#endif
default:
@@ -963,13 +964,13 @@ storeObjectDescription(StringInfo descs, objectType type,
SharedDependencyType deptype,
int count)
{
- char *objdesc = getObjectDescription(object);
+ char *objdesc = getObjectDescription(object);
/* separate entries with a newline */
if (descs->len != 0)
appendStringInfoChar(descs, '\n');
- switch (type)
+ switch (type)
{
case LOCAL_OBJECT:
case SHARED_OBJECT:
@@ -1006,16 +1007,16 @@ static bool
isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel)
{
bool result = false;
- ScanKeyData key[2];
- SysScanDesc scan;
+ ScanKeyData key[2];
+ SysScanDesc scan;
HeapTuple tup;
ScanKeyInit(&key[0],
- Anum_pg_shdepend_refclassid,
+ Anum_pg_shdepend_refclassid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(classId));
ScanKeyInit(&key[1],
- Anum_pg_shdepend_refobjid,
+ Anum_pg_shdepend_refobjid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(objectId));
@@ -1024,9 +1025,9 @@ isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel)
/*
* Since we won't generate additional pg_shdepend entries for pinned
- * objects, there can be at most one entry referencing a pinned
- * object. Hence, it's sufficient to look at the first returned
- * tuple; we don't need to loop.
+ * objects, there can be at most one entry referencing a pinned object.
+ * Hence, it's sufficient to look at the first returned tuple; we don't
+ * need to loop.
*/
tup = systable_getnext(scan);
if (HeapTupleIsValid(tup))
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index d84bc2c1ac8..ab250b02ea9 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.103 2005/08/12 01:35:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.104 2005/10/15 02:49:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -75,7 +75,7 @@ TypeShellMake(const char *typeName, Oid typeNamespace)
namestrcpy(&name, typeName);
values[i++] = NameGetDatum(&name); /* typname */
values[i++] = ObjectIdGetDatum(typeNamespace); /* typnamespace */
- values[i++] = ObjectIdGetDatum(GetUserId()); /* typowner */
+ values[i++] = ObjectIdGetDatum(GetUserId()); /* typowner */
values[i++] = Int16GetDatum(0); /* typlen */
values[i++] = BoolGetDatum(false); /* typbyval */
values[i++] = CharGetDatum(0); /* typtype */
@@ -180,8 +180,8 @@ TypeCreate(const char *typeName,
int i;
/*
- * We assume that the caller validated the arguments individually, but
- * did not check for bad combinations.
+ * We assume that the caller validated the arguments individually, but did
+ * not check for bad combinations.
*
* Validate size specifications: either positive (fixed-length) or -1
* (varlena) or -2 (cstring). Pass-by-value types must have a fixed
@@ -198,8 +198,8 @@ TypeCreate(const char *typeName,
(internalSize <= 0 || internalSize > (int16) sizeof(Datum)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("internal size %d is invalid for passed-by-value type",
- internalSize)));
+ errmsg("internal size %d is invalid for passed-by-value type",
+ internalSize)));
/* Only varlena types can be toasted */
if (storage != 'p' && internalSize != -1)
@@ -224,7 +224,7 @@ TypeCreate(const char *typeName,
namestrcpy(&name, typeName);
values[i++] = NameGetDatum(&name); /* typname */
values[i++] = ObjectIdGetDatum(typeNamespace); /* typnamespace */
- values[i++] = ObjectIdGetDatum(GetUserId()); /* typowner */
+ values[i++] = ObjectIdGetDatum(GetUserId()); /* typowner */
values[i++] = Int16GetDatum(internalSize); /* typlen */
values[i++] = BoolGetDatum(passedByValue); /* typbyval */
values[i++] = CharGetDatum(typeType); /* typtype */
@@ -245,8 +245,8 @@ TypeCreate(const char *typeName,
values[i++] = Int32GetDatum(typNDims); /* typndims */
/*
- * initialize the default binary value for this type. Check for nulls
- * of course.
+ * initialize the default binary value for this type. Check for nulls of
+ * course.
*/
if (defaultTypeBin)
values[i] = DirectFunctionCall1(textin,
@@ -260,7 +260,7 @@ TypeCreate(const char *typeName,
*/
if (defaultTypeValue)
values[i] = DirectFunctionCall1(textin,
- CStringGetDatum(defaultTypeValue));
+ CStringGetDatum(defaultTypeValue));
else
nulls[i] = 'n';
i++; /* typdefault */
@@ -356,8 +356,7 @@ TypeCreate(const char *typeName,
void
GenerateTypeDependencies(Oid typeNamespace,
Oid typeObjectId,
- Oid relationOid, /* only for 'c'atalog
- * types */
+ Oid relationOid, /* only for 'c'atalog types */
char relationKind, /* ditto */
Oid owner,
Oid inputProcedure,
@@ -436,13 +435,12 @@ GenerateTypeDependencies(Oid typeNamespace,
/*
* If the type is a rowtype for a relation, mark it as internally
- * dependent on the relation, *unless* it is a stand-alone composite
- * type relation. For the latter case, we have to reverse the
- * dependency.
+ * dependent on the relation, *unless* it is a stand-alone composite type
+ * relation. For the latter case, we have to reverse the dependency.
*
* In the former case, this allows the type to be auto-dropped when the
- * relation is, and not otherwise. And in the latter, of course we get
- * the opposite effect.
+ * relation is, and not otherwise. And in the latter, of course we get the
+ * opposite effect.
*/
if (OidIsValid(relationOid))
{
@@ -457,11 +455,10 @@ GenerateTypeDependencies(Oid typeNamespace,
}
/*
- * If the type is an array type, mark it auto-dependent on the base
- * type. (This is a compromise between the typical case where the
- * array type is automatically generated and the case where it is
- * manually created: we'd prefer INTERNAL for the former case and
- * NORMAL for the latter.)
+ * If the type is an array type, mark it auto-dependent on the base type.
+ * (This is a compromise between the typical case where the array type is
+ * automatically generated and the case where it is manually created: we'd
+ * prefer INTERNAL for the former case and NORMAL for the latter.)
*/
if (OidIsValid(elementType))
{
diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c
index e3efde249d0..160cd8e488a 100644
--- a/src/backend/commands/aggregatecmds.c
+++ b/src/backend/commands/aggregatecmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.29 2005/08/22 17:38:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.30 2005/10/15 02:49:14 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -72,8 +72,8 @@ DefineAggregate(List *names, List *parameters)
DefElem *defel = (DefElem *) lfirst(pl);
/*
- * sfunc1, stype1, and initcond1 are accepted as obsolete
- * spellings for sfunc, stype, initcond.
+ * sfunc1, stype1, and initcond1 are accepted as obsolete spellings
+ * for sfunc, stype, initcond.
*/
if (pg_strcasecmp(defel->defname, "sfunc") == 0)
transfuncName = defGetQualifiedName(defel);
@@ -119,11 +119,11 @@ DefineAggregate(List *names, List *parameters)
/*
* look up the aggregate's base type (input datatype) and transtype.
*
- * We have historically allowed the command to look like basetype = 'ANY'
- * so we must do a case-insensitive comparison for the name ANY. Ugh.
+ * We have historically allowed the command to look like basetype = 'ANY' so
+ * we must do a case-insensitive comparison for the name ANY. Ugh.
*
- * basetype can be a pseudo-type, but transtype can't, since we need to
- * be able to store values of the transtype. However, we can allow
+ * basetype can be a pseudo-type, but transtype can't, since we need to be
+ * able to store values of the transtype. However, we can allow
* polymorphic transtype in some cases (AggregateCreate will check).
*/
if (pg_strcasecmp(TypeNameToString(baseType), "ANY") == 0)
@@ -169,11 +169,11 @@ RemoveAggregate(RemoveAggrStmt *stmt)
ObjectAddress object;
/*
- * if a basetype is passed in, then attempt to find an aggregate for
- * that specific type.
+ * if a basetype is passed in, then attempt to find an aggregate for that
+ * specific type.
*
- * else attempt to find an aggregate with a basetype of ANYOID. This
- * means that the aggregate is to apply to all basetypes (eg, COUNT).
+ * else attempt to find an aggregate with a basetype of ANYOID. This means
+ * that the aggregate is to apply to all basetypes (eg, COUNT).
*/
if (aggType)
basetypeID = typenameTypeId(aggType);
@@ -193,8 +193,8 @@ RemoveAggregate(RemoveAggrStmt *stmt)
/* Permission check: must own agg or its namespace */
if (!pg_proc_ownercheck(procOid, GetUserId()) &&
- !pg_namespace_ownercheck(((Form_pg_proc) GETSTRUCT(tup))->pronamespace,
- GetUserId()))
+ !pg_namespace_ownercheck(((Form_pg_proc) GETSTRUCT(tup))->pronamespace,
+ GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC,
NameListToString(aggName));
@@ -225,10 +225,10 @@ RenameAggregate(List *name, TypeName *basetype, const char *newname)
AclResult aclresult;
/*
- * if a basetype is passed in, then attempt to find an aggregate for
- * that specific type; else attempt to find an aggregate with a
- * basetype of ANYOID. This means that the aggregate applies to all
- * basetypes (eg, COUNT).
+ * if a basetype is passed in, then attempt to find an aggregate for that
+ * specific type; else attempt to find an aggregate with a basetype of
+ * ANYOID. This means that the aggregate applies to all basetypes (eg,
+ * COUNT).
*/
if (basetype)
basetypeOid = typenameTypeId(basetype);
@@ -258,16 +258,16 @@ RenameAggregate(List *name, TypeName *basetype, const char *newname)
if (basetypeOid == ANYOID)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_FUNCTION),
- errmsg("function %s(*) already exists in schema \"%s\"",
- newname,
- get_namespace_name(namespaceOid))));
+ errmsg("function %s(*) already exists in schema \"%s\"",
+ newname,
+ get_namespace_name(namespaceOid))));
else
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_FUNCTION),
errmsg("function %s already exists in schema \"%s\"",
funcname_signature_string(newname,
procForm->pronargs,
- procForm->proargtypes.values),
+ procForm->proargtypes.values),
get_namespace_name(namespaceOid))));
}
@@ -305,10 +305,10 @@ AlterAggregateOwner(List *name, TypeName *basetype, Oid newOwnerId)
AclResult aclresult;
/*
- * if a basetype is passed in, then attempt to find an aggregate for
- * that specific type; else attempt to find an aggregate with a
- * basetype of ANYOID. This means that the aggregate applies to all
- * basetypes (eg, COUNT).
+ * if a basetype is passed in, then attempt to find an aggregate for that
+ * specific type; else attempt to find an aggregate with a basetype of
+ * ANYOID. This means that the aggregate applies to all basetypes (eg,
+ * COUNT).
*/
if (basetype)
basetypeOid = typenameTypeId(basetype);
@@ -353,8 +353,7 @@ AlterAggregateOwner(List *name, TypeName *basetype, Oid newOwnerId)
}
/*
- * Modify the owner --- okay to scribble on tup because it's a
- * copy
+ * Modify the owner --- okay to scribble on tup because it's a copy
*/
procForm->proowner = newOwnerId;
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index 996d70e1632..102dafb8a2a 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.14 2005/08/01 04:03:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.15 2005/10/15 02:49:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -102,8 +102,8 @@ ExecRenameStmt(RenameStmt *stmt)
{
/*
* RENAME TABLE requires that we (still) hold
- * CREATE rights on the containing namespace,
- * as well as ownership of the table.
+ * CREATE rights on the containing namespace, as
+ * well as ownership of the table.
*/
Oid namespaceId = get_rel_namespace(relid);
AclResult aclresult;
@@ -113,7 +113,7 @@ ExecRenameStmt(RenameStmt *stmt)
ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
- get_namespace_name(namespaceId));
+ get_namespace_name(namespaceId));
renamerel(relid, stmt->newname);
break;
@@ -122,7 +122,7 @@ ExecRenameStmt(RenameStmt *stmt)
renameatt(relid,
stmt->subname, /* old att name */
stmt->newname, /* new att name */
- interpretInhOption(stmt->relation->inhOpt), /* recursive? */
+ interpretInhOption(stmt->relation->inhOpt), /* recursive? */
false); /* recursing already? */
break;
case OBJECT_TRIGGER:
@@ -156,18 +156,18 @@ ExecAlterObjectSchemaStmt(AlterObjectSchemaStmt *stmt)
AlterFunctionNamespace(stmt->object, stmt->objarg,
stmt->newschema);
break;
-
+
case OBJECT_SEQUENCE:
case OBJECT_TABLE:
CheckRelationOwnership(stmt->relation, true);
AlterTableNamespace(stmt->relation, stmt->newschema);
break;
-
+
case OBJECT_TYPE:
case OBJECT_DOMAIN:
AlterTypeNamespace(stmt->object, stmt->newschema);
break;
-
+
default:
elog(ERROR, "unrecognized AlterObjectSchemaStmt type: %d",
(int) stmt->objectType);
@@ -181,7 +181,7 @@ ExecAlterObjectSchemaStmt(AlterObjectSchemaStmt *stmt)
void
ExecAlterOwnerStmt(AlterOwnerStmt *stmt)
{
- Oid newowner = get_roleid_checked(stmt->newowner);
+ Oid newowner = get_roleid_checked(stmt->newowner);
switch (stmt->objectType)
{
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index bd32c8c841e..431e39f3b07 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.88 2005/07/29 19:30:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.89 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -119,9 +119,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
elevel = DEBUG2;
/*
- * Use the current context for storing analysis info. vacuum.c
- * ensures that this context will be cleared when I return, thus
- * releasing the memory allocated here.
+ * Use the current context for storing analysis info. vacuum.c ensures
+ * that this context will be cleared when I return, thus releasing the
+ * memory allocated here.
*/
anl_context = CurrentMemoryContext;
@@ -132,8 +132,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
CHECK_FOR_INTERRUPTS();
/*
- * Race condition -- if the pg_class tuple has gone away since the
- * last time we saw it, we don't need to process it.
+ * Race condition -- if the pg_class tuple has gone away since the last
+ * time we saw it, we don't need to process it.
*/
if (!SearchSysCacheExists(RELOID,
ObjectIdGetDatum(relid),
@@ -141,8 +141,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
return;
/*
- * Open the class, getting only a read lock on it, and check
- * permissions. Permissions check should match vacuum's check!
+ * Open the class, getting only a read lock on it, and check permissions.
+ * Permissions check should match vacuum's check!
*/
onerel = relation_open(relid, AccessShareLock);
@@ -159,8 +159,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
}
/*
- * Check that it's a plain table; we used to do this in get_rel_oids()
- * but seems safer to check after we've locked the relation.
+ * Check that it's a plain table; we used to do this in get_rel_oids() but
+ * seems safer to check after we've locked the relation.
*/
if (onerel->rd_rel->relkind != RELKIND_RELATION)
{
@@ -175,10 +175,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
/*
* Silently ignore tables that are temp tables of other backends ---
- * trying to analyze these is rather pointless, since their contents
- * are probably not up-to-date on disk. (We don't throw a warning
- * here; it would just lead to chatter during a database-wide
- * ANALYZE.)
+ * trying to analyze these is rather pointless, since their contents are
+ * probably not up-to-date on disk. (We don't throw a warning here; it
+ * would just lead to chatter during a database-wide ANALYZE.)
*/
if (isOtherTempNamespace(RelationGetNamespace(onerel)))
{
@@ -239,10 +238,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
}
/*
- * Open all indexes of the relation, and see if there are any
- * analyzable columns in the indexes. We do not analyze index columns
- * if there was an explicit column list in the ANALYZE command,
- * however.
+ * Open all indexes of the relation, and see if there are any analyzable
+ * columns in the indexes. We do not analyze index columns if there was
+ * an explicit column list in the ANALYZE command, however.
*/
vac_open_indexes(onerel, AccessShareLock, &nindexes, &Irel);
hasindex = (nindexes > 0);
@@ -280,13 +278,12 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
indexpr_item = lnext(indexpr_item);
/*
- * Can't analyze if the opclass uses a storage
- * type different from the expression result type.
- * We'd get confused because the type shown in
- * pg_attribute for the index column doesn't match
- * what we are getting from the expression.
- * Perhaps this can be fixed someday, but for now,
- * punt.
+ * Can't analyze if the opclass uses a storage type
+ * different from the expression result type. We'd get
+ * confused because the type shown in pg_attribute for
+ * the index column doesn't match what we are getting
+ * from the expression. Perhaps this can be fixed
+ * someday, but for now, punt.
*/
if (exprType(indexkey) !=
Irel[ind]->rd_att->attrs[i]->atttypid)
@@ -313,13 +310,13 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
{
/*
* We report that the table is empty; this is just so that the
- * autovacuum code doesn't go nuts trying to get stats about
- * a zero-column table.
+ * autovacuum code doesn't go nuts trying to get stats about a
+ * zero-column table.
*/
if (!vacstmt->vacuum)
pgstat_report_analyze(RelationGetRelid(onerel),
onerel->rd_rel->relisshared,
- 0, 0);
+ 0, 0);
vac_close_indexes(nindexes, Irel, AccessShareLock);
relation_close(onerel, AccessShareLock);
@@ -327,9 +324,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
}
/*
- * Determine how many rows we need to sample, using the worst case
- * from all analyzable columns. We use a lower bound of 100 rows to
- * avoid possible overflow in Vitter's algorithm.
+ * Determine how many rows we need to sample, using the worst case from
+ * all analyzable columns. We use a lower bound of 100 rows to avoid
+ * possible overflow in Vitter's algorithm.
*/
targrows = 100;
for (i = 0; i < attr_cnt; i++)
@@ -356,10 +353,10 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
&totalrows, &totaldeadrows);
/*
- * Compute the statistics. Temporary results during the calculations
- * for each column are stored in a child context. The calc routines
- * are responsible to make sure that whatever they store into the
- * VacAttrStats structure is allocated in anl_context.
+ * Compute the statistics. Temporary results during the calculations for
+ * each column are stored in a child context. The calc routines are
+ * responsible to make sure that whatever they store into the VacAttrStats
+ * structure is allocated in anl_context.
*/
if (numrows > 0)
{
@@ -397,9 +394,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
/*
* Emit the completed stats rows into pg_statistic, replacing any
- * previous statistics for the target columns. (If there are
- * stats in pg_statistic for columns we didn't process, we leave
- * them alone.)
+ * previous statistics for the target columns. (If there are stats in
+ * pg_statistic for columns we didn't process, we leave them alone.)
*/
update_attstats(relid, attr_cnt, vacattrstats);
@@ -413,11 +409,11 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
}
/*
- * If we are running a standalone ANALYZE, update pages/tuples stats
- * in pg_class. We know the accurate page count from the smgr, but
- * only an approximate number of tuples; therefore, if we are part of
- * VACUUM ANALYZE do *not* overwrite the accurate count already
- * inserted by VACUUM. The same consideration applies to indexes.
+ * If we are running a standalone ANALYZE, update pages/tuples stats in
+ * pg_class. We know the accurate page count from the smgr, but only an
+ * approximate number of tuples; therefore, if we are part of VACUUM
+ * ANALYZE do *not* overwrite the accurate count already inserted by
+ * VACUUM. The same consideration applies to indexes.
*/
if (!vacstmt->vacuum)
{
@@ -440,7 +436,7 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
/* report results to the stats collector, too */
pgstat_report_analyze(RelationGetRelid(onerel),
onerel->rd_rel->relisshared,
- totalrows, totaldeadrows);
+ totalrows, totaldeadrows);
}
/* Done with indexes */
@@ -448,8 +444,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
/*
* Close source relation now, but keep lock so that no one deletes it
- * before we commit. (If someone did, they'd fail to clean up the
- * entries we made in pg_statistic.)
+ * before we commit. (If someone did, they'd fail to clean up the entries
+ * we made in pg_statistic.)
*/
relation_close(onerel, NoLock);
}
@@ -499,8 +495,8 @@ compute_index_stats(Relation onerel, double totalrows,
/*
* Need an EState for evaluation of index expressions and
- * partial-index predicates. Create it in the per-index context
- * to be sure it gets cleaned up at the bottom of the loop.
+ * partial-index predicates. Create it in the per-index context to be
+ * sure it gets cleaned up at the bottom of the loop.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
@@ -539,8 +535,7 @@ compute_index_stats(Relation onerel, double totalrows,
{
/*
* Evaluate the index row to compute expression values. We
- * could do this by hand, but FormIndexDatum is
- * convenient.
+ * could do this by hand, but FormIndexDatum is convenient.
*/
FormIndexDatum(indexInfo,
slot,
@@ -564,9 +559,8 @@ compute_index_stats(Relation onerel, double totalrows,
}
/*
- * Having counted the number of rows that pass the predicate in
- * the sample, we can estimate the total number of rows in the
- * index.
+ * Having counted the number of rows that pass the predicate in the
+ * sample, we can estimate the total number of rows in the index.
*/
thisdata->tupleFract = (double) numindexrows / (double) numrows;
totalindexrows = ceil(thisdata->tupleFract * totalrows);
@@ -644,8 +638,8 @@ examine_attribute(Relation onerel, int attnum)
stats->tupattnum = attnum;
/*
- * Call the type-specific typanalyze function. If none is specified,
- * use std_typanalyze().
+ * Call the type-specific typanalyze function. If none is specified, use
+ * std_typanalyze().
*/
if (OidIsValid(stats->attrtype->typanalyze))
ok = DatumGetBool(OidFunctionCall1(stats->attrtype->typanalyze,
@@ -683,8 +677,8 @@ BlockSampler_Init(BlockSampler bs, BlockNumber nblocks, int samplesize)
bs->N = nblocks; /* measured table size */
/*
- * If we decide to reduce samplesize for tables that have less or not
- * much more than samplesize blocks, here is the place to do it.
+ * If we decide to reduce samplesize for tables that have less or not much
+ * more than samplesize blocks, here is the place to do it.
*/
bs->n = samplesize;
bs->t = 0; /* blocks scanned so far */
@@ -815,12 +809,11 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
vacuum_delay_point();
/*
- * We must maintain a pin on the target page's buffer to ensure
- * that the maxoffset value stays good (else concurrent VACUUM
- * might delete tuples out from under us). Hence, pin the page
- * until we are done looking at it. We don't maintain a lock on
- * the page, so tuples could get added to it, but we ignore such
- * tuples.
+ * We must maintain a pin on the target page's buffer to ensure that
+ * the maxoffset value stays good (else concurrent VACUUM might delete
+ * tuples out from under us). Hence, pin the page until we are done
+ * looking at it. We don't maintain a lock on the page, so tuples
+ * could get added to it, but we ignore such tuples.
*/
targbuffer = ReadBuffer(onerel, targblock);
LockBuffer(targbuffer, BUFFER_LOCK_SHARE);
@@ -842,24 +835,24 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
/*
* The first targrows live rows are simply copied into the
* reservoir. Then we start replacing tuples in the sample
- * until we reach the end of the relation. This algorithm
- * is from Jeff Vitter's paper (see full citation below).
- * It works by repeatedly computing the number of tuples
- * to skip before selecting a tuple, which replaces a
- * randomly chosen element of the reservoir (current set
- * of tuples). At all times the reservoir is a true
- * random sample of the tuples we've passed over so far,
- * so when we fall off the end of the relation we're done.
+ * until we reach the end of the relation. This algorithm is
+ * from Jeff Vitter's paper (see full citation below). It
+ * works by repeatedly computing the number of tuples to skip
+ * before selecting a tuple, which replaces a randomly chosen
+ * element of the reservoir (current set of tuples). At all
+ * times the reservoir is a true random sample of the tuples
+ * we've passed over so far, so when we fall off the end of
+ * the relation we're done.
*/
if (numrows < targrows)
rows[numrows++] = heap_copytuple(&targtuple);
else
{
/*
- * t in Vitter's paper is the number of records
- * already processed. If we need to compute a new S
- * value, we must use the not-yet-incremented value of
- * liverows as t.
+ * t in Vitter's paper is the number of records already
+ * processed. If we need to compute a new S value, we
+ * must use the not-yet-incremented value of liverows as
+ * t.
*/
if (rowstoskip < 0)
rowstoskip = get_next_S(liverows, targrows, &rstate);
@@ -867,8 +860,8 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
if (rowstoskip <= 0)
{
/*
- * Found a suitable tuple, so save it, replacing
- * one old tuple at random
+ * Found a suitable tuple, so save it, replacing one
+ * old tuple at random
*/
int k = (int) (targrows * random_fract());
@@ -895,12 +888,12 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
}
/*
- * If we didn't find as many tuples as we wanted then we're done. No
- * sort is needed, since they're already in order.
+ * If we didn't find as many tuples as we wanted then we're done. No sort
+ * is needed, since they're already in order.
*
- * Otherwise we need to sort the collected tuples by position
- * (itempointer). It's not worth worrying about corner cases where
- * the tuples are already sorted.
+ * Otherwise we need to sort the collected tuples by position (itempointer).
+ * It's not worth worrying about corner cases where the tuples are already
+ * sorted.
*/
if (numrows == targrows)
qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
@@ -1455,8 +1448,7 @@ compute_minimal_stats(VacAttrStatsP stats,
StdAnalyzeData *mystats = (StdAnalyzeData *) stats->extra_data;
/*
- * We track up to 2*n values for an n-element MCV list; but at least
- * 10
+ * We track up to 2*n values for an n-element MCV list; but at least 10
*/
track_max = 2 * num_mcv;
if (track_max < 10)
@@ -1488,9 +1480,9 @@ compute_minimal_stats(VacAttrStatsP stats,
/*
* If it's a variable-width field, add up widths for average width
- * calculation. Note that if the value is toasted, we use the
- * toasted width. We don't bother with this calculation if it's a
- * fixed-width type.
+ * calculation. Note that if the value is toasted, we use the toasted
+ * width. We don't bother with this calculation if it's a fixed-width
+ * type.
*/
if (is_varlena)
{
@@ -1498,10 +1490,10 @@ compute_minimal_stats(VacAttrStatsP stats,
/*
* If the value is toasted, we want to detoast it just once to
- * avoid repeated detoastings and resultant excess memory
- * usage during the comparisons. Also, check to see if the
- * value is excessively wide, and if so don't detoast at all
- * --- just ignore the value.
+ * avoid repeated detoastings and resultant excess memory usage
+ * during the comparisons. Also, check to see if the value is
+ * excessively wide, and if so don't detoast at all --- just
+ * ignore the value.
*/
if (toast_raw_datum_size(value) > WIDTH_THRESHOLD)
{
@@ -1594,9 +1586,9 @@ compute_minimal_stats(VacAttrStatsP stats,
nmultiple == track_cnt)
{
/*
- * Our track list includes every value in the sample, and
- * every value appeared more than once. Assume the column has
- * just these values.
+ * Our track list includes every value in the sample, and every
+ * value appeared more than once. Assume the column has just
+ * these values.
*/
stats->stadistinct = track_cnt;
}
@@ -1641,22 +1633,22 @@ compute_minimal_stats(VacAttrStatsP stats,
}
/*
- * If we estimated the number of distinct values at more than 10%
- * of the total row count (a very arbitrary limit), then assume
- * that stadistinct should scale with the row count rather than be
- * a fixed value.
+ * If we estimated the number of distinct values at more than 10% of
+ * the total row count (a very arbitrary limit), then assume that
+ * stadistinct should scale with the row count rather than be a fixed
+ * value.
*/
if (stats->stadistinct > 0.1 * totalrows)
stats->stadistinct = -(stats->stadistinct / totalrows);
/*
- * Decide how many values are worth storing as most-common values.
- * If we are able to generate a complete MCV list (all the values
- * in the sample will fit, and we think these are all the ones in
- * the table), then do so. Otherwise, store only those values
- * that are significantly more common than the (estimated)
- * average. We set the threshold rather arbitrarily at 25% more
- * than average, with at least 2 instances in the sample.
+ * Decide how many values are worth storing as most-common values. If
+ * we are able to generate a complete MCV list (all the values in the
+ * sample will fit, and we think these are all the ones in the table),
+ * then do so. Otherwise, store only those values that are
+ * significantly more common than the (estimated) average. We set the
+ * threshold rather arbitrarily at 25% more than average, with at
+ * least 2 instances in the sample.
*/
if (track_cnt < track_max && toowide_cnt == 0 &&
stats->stadistinct > 0 &&
@@ -1725,10 +1717,10 @@ compute_minimal_stats(VacAttrStatsP stats,
stats->stats_valid = true;
stats->stanullfrac = 1.0;
if (is_varwidth)
- stats->stawidth = 0; /* "unknown" */
+ stats->stawidth = 0; /* "unknown" */
else
stats->stawidth = stats->attrtype->typlen;
- stats->stadistinct = 0.0; /* "unknown" */
+ stats->stadistinct = 0.0; /* "unknown" */
}
/* We don't need to bother cleaning up any of our temporary palloc's */
@@ -1802,9 +1794,9 @@ compute_scalar_stats(VacAttrStatsP stats,
/*
* If it's a variable-width field, add up widths for average width
- * calculation. Note that if the value is toasted, we use the
- * toasted width. We don't bother with this calculation if it's a
- * fixed-width type.
+ * calculation. Note that if the value is toasted, we use the toasted
+ * width. We don't bother with this calculation if it's a fixed-width
+ * type.
*/
if (is_varlena)
{
@@ -1812,10 +1804,10 @@ compute_scalar_stats(VacAttrStatsP stats,
/*
* If the value is toasted, we want to detoast it just once to
- * avoid repeated detoastings and resultant excess memory
- * usage during the comparisons. Also, check to see if the
- * value is excessively wide, and if so don't detoast at all
- * --- just ignore the value.
+ * avoid repeated detoastings and resultant excess memory usage
+ * during the comparisons. Also, check to see if the value is
+ * excessively wide, and if so don't detoast at all --- just
+ * ignore the value.
*/
if (toast_raw_datum_size(value) > WIDTH_THRESHOLD)
{
@@ -1854,24 +1846,23 @@ compute_scalar_stats(VacAttrStatsP stats,
sizeof(ScalarItem), compare_scalars);
/*
- * Now scan the values in order, find the most common ones, and
- * also accumulate ordering-correlation statistics.
+ * Now scan the values in order, find the most common ones, and also
+ * accumulate ordering-correlation statistics.
*
- * To determine which are most common, we first have to count the
- * number of duplicates of each value. The duplicates are
- * adjacent in the sorted list, so a brute-force approach is to
- * compare successive datum values until we find two that are not
- * equal. However, that requires N-1 invocations of the datum
- * comparison routine, which are completely redundant with work
- * that was done during the sort. (The sort algorithm must at
- * some point have compared each pair of items that are adjacent
- * in the sorted order; otherwise it could not know that it's
- * ordered the pair correctly.) We exploit this by having
+ * To determine which are most common, we first have to count the number
+ * of duplicates of each value. The duplicates are adjacent in the
+ * sorted list, so a brute-force approach is to compare successive
+ * datum values until we find two that are not equal. However, that
+ * requires N-1 invocations of the datum comparison routine, which are
+ * completely redundant with work that was done during the sort. (The
+ * sort algorithm must at some point have compared each pair of items
+ * that are adjacent in the sorted order; otherwise it could not know
+ * that it's ordered the pair correctly.) We exploit this by having
* compare_scalars remember the highest tupno index that each
* ScalarItem has been found equal to. At the end of the sort, a
- * ScalarItem's tupnoLink will still point to itself if and only
- * if it is the last item of its group of duplicates (since the
- * group will be ordered by tupno).
+ * ScalarItem's tupnoLink will still point to itself if and only if it
+ * is the last item of its group of duplicates (since the group will
+ * be ordered by tupno).
*/
corr_xysum = 0;
ndistinct = 0;
@@ -1895,9 +1886,9 @@ compute_scalar_stats(VacAttrStatsP stats,
{
/*
* Found a new item for the mcv list; find its
- * position, bubbling down old items if needed.
- * Loop invariant is that j points at an empty/
- * replaceable slot.
+ * position, bubbling down old items if needed. Loop
+ * invariant is that j points at an empty/ replaceable
+ * slot.
*/
int j;
@@ -1934,8 +1925,8 @@ compute_scalar_stats(VacAttrStatsP stats,
else if (toowide_cnt == 0 && nmultiple == ndistinct)
{
/*
- * Every value in the sample appeared more than once. Assume
- * the column has just these values.
+ * Every value in the sample appeared more than once. Assume the
+ * column has just these values.
*/
stats->stadistinct = ndistinct;
}
@@ -1976,26 +1967,25 @@ compute_scalar_stats(VacAttrStatsP stats,
}
/*
- * If we estimated the number of distinct values at more than 10%
- * of the total row count (a very arbitrary limit), then assume
- * that stadistinct should scale with the row count rather than be
- * a fixed value.
+ * If we estimated the number of distinct values at more than 10% of
+ * the total row count (a very arbitrary limit), then assume that
+ * stadistinct should scale with the row count rather than be a fixed
+ * value.
*/
if (stats->stadistinct > 0.1 * totalrows)
stats->stadistinct = -(stats->stadistinct / totalrows);
/*
- * Decide how many values are worth storing as most-common values.
- * If we are able to generate a complete MCV list (all the values
- * in the sample will fit, and we think these are all the ones in
- * the table), then do so. Otherwise, store only those values
- * that are significantly more common than the (estimated)
- * average. We set the threshold rather arbitrarily at 25% more
- * than average, with at least 2 instances in the sample. Also,
- * we won't suppress values that have a frequency of at least 1/K
- * where K is the intended number of histogram bins; such values
- * might otherwise cause us to emit duplicate histogram bin
- * boundaries.
+ * Decide how many values are worth storing as most-common values. If
+ * we are able to generate a complete MCV list (all the values in the
+ * sample will fit, and we think these are all the ones in the table),
+ * then do so. Otherwise, store only those values that are
+ * significantly more common than the (estimated) average. We set the
+ * threshold rather arbitrarily at 25% more than average, with at
+ * least 2 instances in the sample. Also, we won't suppress values
+ * that have a frequency of at least 1/K where K is the intended
+ * number of histogram bins; such values might otherwise cause us to
+ * emit duplicate histogram bin boundaries.
*/
if (track_cnt == ndistinct && toowide_cnt == 0 &&
stats->stadistinct > 0 &&
@@ -2065,9 +2055,9 @@ compute_scalar_stats(VacAttrStatsP stats,
}
/*
- * Generate a histogram slot entry if there are at least two
- * distinct values not accounted for in the MCV list. (This
- * ensures the histogram won't collapse to empty or a singleton.)
+ * Generate a histogram slot entry if there are at least two distinct
+ * values not accounted for in the MCV list. (This ensures the
+ * histogram won't collapse to empty or a singleton.)
*/
num_hist = ndistinct - num_mcv;
if (num_hist > num_bins)
@@ -2085,10 +2075,9 @@ compute_scalar_stats(VacAttrStatsP stats,
/*
* Collapse out the MCV items from the values[] array.
*
- * Note we destroy the values[] array here... but we don't need
- * it for anything more. We do, however, still need
- * values_cnt. nvals will be the number of remaining entries
- * in values[].
+ * Note we destroy the values[] array here... but we don't need it
+ * for anything more. We do, however, still need values_cnt.
+ * nvals will be the number of remaining entries in values[].
*/
if (num_mcv > 0)
{
@@ -2193,10 +2182,10 @@ compute_scalar_stats(VacAttrStatsP stats,
stats->stats_valid = true;
stats->stanullfrac = 1.0;
if (is_varwidth)
- stats->stawidth = 0; /* "unknown" */
+ stats->stawidth = 0; /* "unknown" */
else
stats->stawidth = stats->attrtype->typlen;
- stats->stadistinct = 0.0; /* "unknown" */
+ stats->stadistinct = 0.0; /* "unknown" */
}
/* We don't need to bother cleaning up any of our temporary palloc's */
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index da133788960..69d97d09237 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.125 2005/10/06 21:30:32 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.126 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -106,8 +106,7 @@
*/
static List *pendingNotifies = NIL;
-static List *upperPendingNotifies = NIL; /* list of upper-xact
- * lists */
+static List *upperPendingNotifies = NIL; /* list of upper-xact lists */
/*
* State for inbound notifies consists of two flags: one saying whether
@@ -158,8 +157,8 @@ Async_Notify(const char *relname)
if (!AsyncExistsPendingNotify(relname))
{
/*
- * The name list needs to live until end of transaction, so store
- * it in the transaction context.
+ * The name list needs to live until end of transaction, so store it
+ * in the transaction context.
*/
MemoryContext oldcontext;
@@ -208,7 +207,7 @@ Async_Listen(const char *relname)
Form_pg_listener listener = (Form_pg_listener) GETSTRUCT(tuple);
if (listener->listenerpid == MyProcPid &&
- strncmp(NameStr(listener->relname), relname, NAMEDATALEN) == 0)
+ strncmp(NameStr(listener->relname), relname, NAMEDATALEN) == 0)
{
alreadyListener = true;
/* No need to scan the rest of the table */
@@ -298,14 +297,14 @@ Async_Unlisten(const char *relname)
Form_pg_listener listener = (Form_pg_listener) GETSTRUCT(tuple);
if (listener->listenerpid == MyProcPid &&
- strncmp(NameStr(listener->relname), relname, NAMEDATALEN) == 0)
+ strncmp(NameStr(listener->relname), relname, NAMEDATALEN) == 0)
{
/* Found the matching tuple, delete it */
simple_heap_delete(lRel, &tuple->t_self);
/*
- * We assume there can be only one match, so no need to scan
- * the rest of the table
+ * We assume there can be only one match, so no need to scan the
+ * rest of the table
*/
break;
}
@@ -387,10 +386,10 @@ static void
Async_UnlistenOnExit(int code, Datum arg)
{
/*
- * We need to start/commit a transaction for the unlisten, but if
- * there is already an active transaction we had better abort that one
- * first. Otherwise we'd end up committing changes that probably
- * ought to be discarded.
+ * We need to start/commit a transaction for the unlisten, but if there is
+ * already an active transaction we had better abort that one first.
+ * Otherwise we'd end up committing changes that probably ought to be
+ * discarded.
*/
AbortOutOfAnyTransaction();
/* Now we can do the unlisten */
@@ -404,14 +403,14 @@ Async_UnlistenOnExit(int code, Datum arg)
*--------------------------------------------------------------
* AtPrepare_Notify
*
- * This is called at the prepare phase of a two-phase
+ * This is called at the prepare phase of a two-phase
* transaction. Save the state for possible commit later.
*--------------------------------------------------------------
*/
void
AtPrepare_Notify(void)
{
- ListCell *p;
+ ListCell *p;
foreach(p, pendingNotifies)
{
@@ -423,8 +422,8 @@ AtPrepare_Notify(void)
/*
* We can clear the state immediately, rather than needing a separate
- * PostPrepare call, because if the transaction fails we'd just
- * discard the state anyway.
+ * PostPrepare call, because if the transaction fails we'd just discard
+ * the state anyway.
*/
ClearPendingNotifies();
}
@@ -464,12 +463,11 @@ AtCommit_Notify(void)
nulls[Natts_pg_listener];
if (pendingNotifies == NIL)
- return; /* no NOTIFY statements in this
- * transaction */
+ return; /* no NOTIFY statements in this transaction */
/*
- * NOTIFY is disabled if not normal processing mode. This test used to
- * be in xact.c, but it seems cleaner to do it here.
+ * NOTIFY is disabled if not normal processing mode. This test used to be
+ * in xact.c, but it seems cleaner to do it here.
*/
if (!IsNormalProcessingMode())
{
@@ -503,10 +501,10 @@ AtCommit_Notify(void)
if (listenerPID == MyProcPid)
{
/*
- * Self-notify: no need to bother with table update. Indeed,
- * we *must not* clear the notification field in this path, or
- * we could lose an outside notify, which'd be bad for
- * applications that ignore self-notify messages.
+ * Self-notify: no need to bother with table update. Indeed, we
+ * *must not* clear the notification field in this path, or we
+ * could lose an outside notify, which'd be bad for applications
+ * that ignore self-notify messages.
*/
if (Trace_notify)
@@ -521,27 +519,27 @@ AtCommit_Notify(void)
listenerPID);
/*
- * If someone has already notified this listener, we don't
- * bother modifying the table, but we do still send a SIGUSR2
- * signal, just in case that backend missed the earlier signal
- * for some reason. It's OK to send the signal first, because
- * the other guy can't read pg_listener until we unlock it.
+ * If someone has already notified this listener, we don't bother
+ * modifying the table, but we do still send a SIGUSR2 signal,
+ * just in case that backend missed the earlier signal for some
+ * reason. It's OK to send the signal first, because the other
+ * guy can't read pg_listener until we unlock it.
*/
if (kill(listenerPID, SIGUSR2) < 0)
{
/*
- * Get rid of pg_listener entry if it refers to a PID that
- * no longer exists. Presumably, that backend crashed
- * without deleting its pg_listener entries. This code
- * used to only delete the entry if errno==ESRCH, but as
- * far as I can see we should just do it for any failure
- * (certainly at least for EPERM too...)
+ * Get rid of pg_listener entry if it refers to a PID that no
+ * longer exists. Presumably, that backend crashed without
+ * deleting its pg_listener entries. This code used to only
+ * delete the entry if errno==ESRCH, but as far as I can see
+ * we should just do it for any failure (certainly at least
+ * for EPERM too...)
*/
simple_heap_delete(lRel, &lTuple->t_self);
}
else if (listener->notification == 0)
{
- HTSU_Result result;
+ HTSU_Result result;
ItemPointerData update_ctid;
TransactionId update_xmax;
@@ -551,17 +549,16 @@ AtCommit_Notify(void)
/*
* We cannot use simple_heap_update here because the tuple
* could have been modified by an uncommitted transaction;
- * specifically, since UNLISTEN releases exclusive lock on
- * the table before commit, the other guy could already
- * have tried to unlisten. There are no other cases where
- * we should be able to see an uncommitted update or
- * delete. Therefore, our response to a
- * HeapTupleBeingUpdated result is just to ignore it. We
- * do *not* wait for the other guy to commit --- that
- * would risk deadlock, and we don't want to block while
- * holding the table lock anyway for performance reasons.
- * We also ignore HeapTupleUpdated, which could occur if
- * the other guy commits between our heap_getnext and
+ * specifically, since UNLISTEN releases exclusive lock on the
+ * table before commit, the other guy could already have tried
+ * to unlisten. There are no other cases where we should be
+ * able to see an uncommitted update or delete. Therefore, our
+ * response to a HeapTupleBeingUpdated result is just to
+ * ignore it. We do *not* wait for the other guy to commit
+ * --- that would risk deadlock, and we don't want to block
+ * while holding the table lock anyway for performance
+ * reasons. We also ignore HeapTupleUpdated, which could occur
+ * if the other guy commits between our heap_getnext and
* heap_update calls.
*/
result = heap_update(lRel, &lTuple->t_self, rTuple,
@@ -603,10 +600,10 @@ AtCommit_Notify(void)
/*
* We do NOT release the lock on pg_listener here; we need to hold it
- * until end of transaction (which is about to happen, anyway) to
- * ensure that notified backends see our tuple updates when they look.
- * Else they might disregard the signal, which would make the
- * application programmer very unhappy.
+ * until end of transaction (which is about to happen, anyway) to ensure
+ * that notified backends see our tuple updates when they look. Else they
+ * might disregard the signal, which would make the application programmer
+ * very unhappy.
*/
heap_close(lRel, NoLock);
@@ -676,8 +673,7 @@ AtSubCommit_Notify(void)
GetCurrentTransactionNestLevel() - 2);
/*
- * We could try to eliminate duplicates here, but it seems not
- * worthwhile.
+ * We could try to eliminate duplicates here, but it seems not worthwhile.
*/
pendingNotifies = list_concat(parentPendingNotifies, pendingNotifies);
}
@@ -695,10 +691,10 @@ AtSubAbort_Notify(void)
* subxact are no longer interesting, and the space will be freed when
* CurTransactionContext is recycled.
*
- * This routine could be called more than once at a given nesting level
- * if there is trouble during subxact abort. Avoid dumping core by
- * using GetCurrentTransactionNestLevel as the indicator of how far
- * we need to prune the list.
+ * This routine could be called more than once at a given nesting level if
+ * there is trouble during subxact abort. Avoid dumping core by using
+ * GetCurrentTransactionNestLevel as the indicator of how far we need to
+ * prune the list.
*/
while (list_length(upperPendingNotifies) > my_level - 2)
{
@@ -731,9 +727,9 @@ NotifyInterruptHandler(SIGNAL_ARGS)
/*
* Note: this is a SIGNAL HANDLER. You must be very wary what you do
- * here. Some helpful soul had this routine sprinkled with TPRINTFs,
- * which would likely lead to corruption of stdio buffers if they were
- * ever turned on.
+ * here. Some helpful soul had this routine sprinkled with TPRINTFs, which
+ * would likely lead to corruption of stdio buffers if they were ever
+ * turned on.
*/
/* Don't joggle the elbow of proc_exit */
@@ -745,19 +741,18 @@ NotifyInterruptHandler(SIGNAL_ARGS)
bool save_ImmediateInterruptOK = ImmediateInterruptOK;
/*
- * We may be called while ImmediateInterruptOK is true; turn it
- * off while messing with the NOTIFY state. (We would have to
- * save and restore it anyway, because PGSemaphore operations
- * inside ProcessIncomingNotify() might reset it.)
+ * We may be called while ImmediateInterruptOK is true; turn it off
+ * while messing with the NOTIFY state. (We would have to save and
+ * restore it anyway, because PGSemaphore operations inside
+ * ProcessIncomingNotify() might reset it.)
*/
ImmediateInterruptOK = false;
/*
* I'm not sure whether some flavors of Unix might allow another
- * SIGUSR2 occurrence to recursively interrupt this routine. To
- * cope with the possibility, we do the same sort of dance that
- * EnableNotifyInterrupt must do --- see that routine for
- * comments.
+ * SIGUSR2 occurrence to recursively interrupt this routine. To cope
+ * with the possibility, we do the same sort of dance that
+ * EnableNotifyInterrupt must do --- see that routine for comments.
*/
notifyInterruptEnabled = 0; /* disable any recursive signal */
notifyInterruptOccurred = 1; /* do at least one iteration */
@@ -781,8 +776,7 @@ NotifyInterruptHandler(SIGNAL_ARGS)
}
/*
- * Restore ImmediateInterruptOK, and check for interrupts if
- * needed.
+ * Restore ImmediateInterruptOK, and check for interrupts if needed.
*/
ImmediateInterruptOK = save_ImmediateInterruptOK;
if (save_ImmediateInterruptOK)
@@ -791,8 +785,7 @@ NotifyInterruptHandler(SIGNAL_ARGS)
else
{
/*
- * In this path it is NOT SAFE to do much of anything, except
- * this:
+ * In this path it is NOT SAFE to do much of anything, except this:
*/
notifyInterruptOccurred = 1;
}
@@ -820,27 +813,25 @@ EnableNotifyInterrupt(void)
return; /* not really idle */
/*
- * This code is tricky because we are communicating with a signal
- * handler that could interrupt us at any point. If we just checked
- * notifyInterruptOccurred and then set notifyInterruptEnabled, we
- * could fail to respond promptly to a signal that happens in between
- * those two steps. (A very small time window, perhaps, but Murphy's
- * Law says you can hit it...) Instead, we first set the enable flag,
- * then test the occurred flag. If we see an unserviced interrupt has
- * occurred, we re-clear the enable flag before going off to do the
- * service work. (That prevents re-entrant invocation of
- * ProcessIncomingNotify() if another interrupt occurs.) If an
- * interrupt comes in between the setting and clearing of
- * notifyInterruptEnabled, then it will have done the service work and
- * left notifyInterruptOccurred zero, so we have to check again after
- * clearing enable. The whole thing has to be in a loop in case
- * another interrupt occurs while we're servicing the first. Once we
- * get out of the loop, enable is set and we know there is no
- * unserviced interrupt.
+ * This code is tricky because we are communicating with a signal handler
+ * that could interrupt us at any point. If we just checked
+ * notifyInterruptOccurred and then set notifyInterruptEnabled, we could
+ * fail to respond promptly to a signal that happens in between those two
+ * steps. (A very small time window, perhaps, but Murphy's Law says you
+ * can hit it...) Instead, we first set the enable flag, then test the
+ * occurred flag. If we see an unserviced interrupt has occurred, we
+ * re-clear the enable flag before going off to do the service work.
+ * (That prevents re-entrant invocation of ProcessIncomingNotify() if
+ * another interrupt occurs.) If an interrupt comes in between the setting
+ * and clearing of notifyInterruptEnabled, then it will have done the
+ * service work and left notifyInterruptOccurred zero, so we have to check
+ * again after clearing enable. The whole thing has to be in a loop in
+ * case another interrupt occurs while we're servicing the first. Once we
+ * get out of the loop, enable is set and we know there is no unserviced
+ * interrupt.
*
- * NB: an overenthusiastic optimizing compiler could easily break this
- * code. Hopefully, they all understand what "volatile" means these
- * days.
+ * NB: an overenthusiastic optimizing compiler could easily break this code.
+ * Hopefully, they all understand what "volatile" means these days.
*/
for (;;)
{
@@ -960,8 +951,7 @@ ProcessIncomingNotify(void)
* Rewrite the tuple with 0 in notification column.
*
* simple_heap_update is safe here because no one else would have
- * tried to UNLISTEN us, so there can be no uncommitted
- * changes.
+ * tried to UNLISTEN us, so there can be no uncommitted changes.
*/
rTuple = heap_modifytuple(lTuple, tdesc, value, nulls, repl);
simple_heap_update(lRel, &lTuple->t_self, rTuple);
@@ -975,18 +965,17 @@ ProcessIncomingNotify(void)
/*
* We do NOT release the lock on pg_listener here; we need to hold it
- * until end of transaction (which is about to happen, anyway) to
- * ensure that other backends see our tuple updates when they look.
- * Otherwise, a transaction started after this one might mistakenly
- * think it doesn't need to send this backend a new NOTIFY.
+ * until end of transaction (which is about to happen, anyway) to ensure
+ * that other backends see our tuple updates when they look. Otherwise, a
+ * transaction started after this one might mistakenly think it doesn't
+ * need to send this backend a new NOTIFY.
*/
heap_close(lRel, NoLock);
CommitTransactionCommand();
/*
- * Must flush the notify messages to ensure frontend gets them
- * promptly.
+ * Must flush the notify messages to ensure frontend gets them promptly.
*/
pq_flush();
@@ -1022,8 +1011,7 @@ NotifyMyFrontEnd(char *relname, int32 listenerPID)
/*
* NOTE: we do not do pq_flush() here. For a self-notify, it will
* happen at the end of the transaction, and for incoming notifies
- * ProcessIncomingNotify will do it after finding all the
- * notifies.
+ * ProcessIncomingNotify will do it after finding all the notifies.
*/
}
else
@@ -1052,11 +1040,11 @@ static void
ClearPendingNotifies(void)
{
/*
- * We used to have to explicitly deallocate the list members and
- * nodes, because they were malloc'd. Now, since we know they are
- * palloc'd in CurTransactionContext, we need not do that --- they'll
- * go away automatically at transaction exit. We need only reset the
- * list head pointer.
+ * We used to have to explicitly deallocate the list members and nodes,
+ * because they were malloc'd. Now, since we know they are palloc'd in
+ * CurTransactionContext, we need not do that --- they'll go away
+ * automatically at transaction exit. We need only reset the list head
+ * pointer.
*/
pendingNotifies = NIL;
}
@@ -1071,11 +1059,10 @@ notify_twophase_postcommit(TransactionId xid, uint16 info,
void *recdata, uint32 len)
{
/*
- * Set up to issue the NOTIFY at the end of my own
- * current transaction. (XXX this has some issues if my own
- * transaction later rolls back, or if there is any significant
- * delay before I commit. OK for now because we disallow
- * COMMIT PREPARED inside a transaction block.)
+ * Set up to issue the NOTIFY at the end of my own current transaction.
+ * (XXX this has some issues if my own transaction later rolls back, or if
+ * there is any significant delay before I commit. OK for now because we
+ * disallow COMMIT PREPARED inside a transaction block.)
*/
Async_Notify((char *) recdata);
}
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 1d5a916c544..35420a87c0b 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.139 2005/08/26 03:07:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.140 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -144,8 +144,8 @@ cluster(ClusterStmt *stmt)
if (!OidIsValid(indexOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("index \"%s\" for table \"%s\" does not exist",
- stmt->indexname, stmt->relation->relname)));
+ errmsg("index \"%s\" for table \"%s\" does not exist",
+ stmt->indexname, stmt->relation->relname)));
}
/* All other checks are done in cluster_rel() */
@@ -161,24 +161,24 @@ cluster(ClusterStmt *stmt)
else
{
/*
- * This is the "multi relation" case. We need to cluster all
- * tables that have some index with indisclustered set.
+ * This is the "multi relation" case. We need to cluster all tables
+ * that have some index with indisclustered set.
*/
MemoryContext cluster_context;
List *rvs;
ListCell *rv;
/*
- * We cannot run this form of CLUSTER inside a user transaction
- * block; we'd be holding locks way too long.
+ * We cannot run this form of CLUSTER inside a user transaction block;
+ * we'd be holding locks way too long.
*/
PreventTransactionChain((void *) stmt, "CLUSTER");
/*
* Create special memory context for cross-transaction storage.
*
- * Since it is a child of PortalContext, it will go away even in case
- * of error.
+ * Since it is a child of PortalContext, it will go away even in case of
+ * error.
*/
cluster_context = AllocSetContextCreate(PortalContext,
"Cluster",
@@ -187,8 +187,8 @@ cluster(ClusterStmt *stmt)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Build the list of relations to cluster. Note that this lives
- * in cluster_context.
+ * Build the list of relations to cluster. Note that this lives in
+ * cluster_context.
*/
rvs = get_tables_to_cluster(cluster_context);
@@ -239,12 +239,12 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
CHECK_FOR_INTERRUPTS();
/*
- * Since we may open a new transaction for each relation, we have to
- * check that the relation still is what we think it is.
+ * Since we may open a new transaction for each relation, we have to check
+ * that the relation still is what we think it is.
*
- * If this is a single-transaction CLUSTER, we can skip these tests. We
- * *must* skip the one on indisclustered since it would reject an
- * attempt to cluster a not-previously-clustered index.
+ * If this is a single-transaction CLUSTER, we can skip these tests. We *must*
+ * skip the one on indisclustered since it would reject an attempt to
+ * cluster a not-previously-clustered index.
*/
if (recheck)
{
@@ -284,10 +284,10 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
}
/*
- * We grab exclusive access to the target rel and index for the
- * duration of the transaction. (This is redundant for the single-
- * transaction case, since cluster() already did it.) The index lock
- * is taken inside check_index_is_clusterable.
+ * We grab exclusive access to the target rel and index for the duration
+ * of the transaction. (This is redundant for the single- transaction
+ * case, since cluster() already did it.) The index lock is taken inside
+ * check_index_is_clusterable.
*/
OldHeap = heap_open(rvtc->tableOid, AccessExclusiveLock);
@@ -328,26 +328,26 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck)
RelationGetRelationName(OldHeap))));
/*
- * Disallow clustering on incomplete indexes (those that might not
- * index every row of the relation). We could relax this by making a
- * separate seqscan pass over the table to copy the missing rows, but
- * that seems expensive and tedious.
+ * Disallow clustering on incomplete indexes (those that might not index
+ * every row of the relation). We could relax this by making a separate
+ * seqscan pass over the table to copy the missing rows, but that seems
+ * expensive and tedious.
*/
if (!heap_attisnull(OldIndex->rd_indextuple, Anum_pg_index_indpred))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot cluster on partial index \"%s\"",
RelationGetRelationName(OldIndex))));
-
+
if (!OldIndex->rd_am->amindexnulls)
{
AttrNumber colno;
/*
- * If the AM doesn't index nulls, then it's a partial index unless
- * we can prove all the rows are non-null. Note we only need look
- * at the first column; multicolumn-capable AMs are *required* to
- * index nulls in columns after the first.
+ * If the AM doesn't index nulls, then it's a partial index unless we
+ * can prove all the rows are non-null. Note we only need look at the
+ * first column; multicolumn-capable AMs are *required* to index nulls
+ * in columns after the first.
*/
colno = OldIndex->rd_index->indkey.values[0];
if (colno > 0)
@@ -358,11 +358,11 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot cluster on index \"%s\" because access method\n"
"does not handle null values",
- RelationGetRelationName(OldIndex)),
+ RelationGetRelationName(OldIndex)),
errhint("You may be able to work around this by marking column \"%s\" NOT NULL%s",
- NameStr(OldHeap->rd_att->attrs[colno - 1]->attname),
- recheck ? ",\nor use ALTER TABLE ... SET WITHOUT CLUSTER to remove the cluster\n"
- "specification from the table." : ".")));
+ NameStr(OldHeap->rd_att->attrs[colno - 1]->attname),
+ recheck ? ",\nor use ALTER TABLE ... SET WITHOUT CLUSTER to remove the cluster\n"
+ "specification from the table." : ".")));
}
else if (colno < 0)
{
@@ -374,15 +374,15 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot cluster on expressional index \"%s\" because its index access\n"
"method does not handle null values",
- RelationGetRelationName(OldIndex))));
+ RelationGetRelationName(OldIndex))));
}
/*
- * Disallow clustering system relations. This will definitely NOT
- * work for shared relations (we have no way to update pg_class rows
- * in other databases), nor for nailed-in-cache relations (the
- * relfilenode values for those are hardwired, see relcache.c). It
- * might work for other system relations, but I ain't gonna risk it.
+ * Disallow clustering system relations. This will definitely NOT work
+ * for shared relations (we have no way to update pg_class rows in other
+ * databases), nor for nailed-in-cache relations (the relfilenode values
+ * for those are hardwired, see relcache.c). It might work for other
+ * system relations, but I ain't gonna risk it.
*/
if (IsSystemRelation(OldHeap))
ereport(ERROR,
@@ -391,13 +391,13 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck)
RelationGetRelationName(OldHeap))));
/*
- * Don't allow cluster on temp tables of other backends ... their
- * local buffer manager is not going to cope.
+ * Don't allow cluster on temp tables of other backends ... their local
+ * buffer manager is not going to cope.
*/
if (isOtherTempNamespace(RelationGetNamespace(OldHeap)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot cluster temporary tables of other sessions")));
+ errmsg("cannot cluster temporary tables of other sessions")));
/* Drop relcache refcnt on OldIndex, but keep lock */
index_close(OldIndex);
@@ -454,8 +454,8 @@ mark_index_clustered(Relation rel, Oid indexOid)
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
/*
- * Unset the bit if set. We know it's wrong because we checked
- * this earlier.
+ * Unset the bit if set. We know it's wrong because we checked this
+ * earlier.
*/
if (indexForm->indisclustered)
{
@@ -503,20 +503,18 @@ rebuild_relation(Relation OldHeap, Oid indexOid)
heap_close(OldHeap, NoLock);
/*
- * Create the new heap, using a temporary name in the same namespace
- * as the existing table. NOTE: there is some risk of collision with
- * user relnames. Working around this seems more trouble than it's
- * worth; in particular, we can't create the new heap in a different
- * namespace from the old, or we will have problems with the TEMP
- * status of temp tables.
+ * Create the new heap, using a temporary name in the same namespace as
+ * the existing table. NOTE: there is some risk of collision with user
+ * relnames. Working around this seems more trouble than it's worth; in
+ * particular, we can't create the new heap in a different namespace from
+ * the old, or we will have problems with the TEMP status of temp tables.
*/
snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", tableOid);
OIDNewHeap = make_new_heap(tableOid, NewHeapName, tableSpace);
/*
- * We don't need CommandCounterIncrement() because make_new_heap did
- * it.
+ * We don't need CommandCounterIncrement() because make_new_heap did it.
*/
/*
@@ -546,9 +544,9 @@ rebuild_relation(Relation OldHeap, Oid indexOid)
/* performDeletion does CommandCounterIncrement at end */
/*
- * Rebuild each index on the relation (but not the toast table, which
- * is all-new at this point). We do not need
- * CommandCounterIncrement() because reindex_relation does it.
+ * Rebuild each index on the relation (but not the toast table, which is
+ * all-new at this point). We do not need CommandCounterIncrement()
+ * because reindex_relation does it.
*/
reindex_relation(tableOid, false);
}
@@ -587,15 +585,15 @@ make_new_heap(Oid OIDOldHeap, const char *NewName, Oid NewTableSpace)
allowSystemTableMods);
/*
- * Advance command counter so that the newly-created relation's
- * catalog tuples will be visible to heap_open.
+ * Advance command counter so that the newly-created relation's catalog
+ * tuples will be visible to heap_open.
*/
CommandCounterIncrement();
/*
* If necessary, create a TOAST table for the new relation. Note that
- * AlterTableCreateToastTable ends with CommandCounterIncrement(), so
- * that the TOAST table will be visible for insertion.
+ * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
+ * the TOAST table will be visible for insertion.
*/
AlterTableCreateToastTable(OIDNewHeap, true);
@@ -629,8 +627,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
OldIndex = index_open(OIDOldIndex);
/*
- * Their tuple descriptors should be exactly alike, but here we only
- * need assume that they have the same number of columns.
+ * Their tuple descriptors should be exactly alike, but here we only need
+ * assume that they have the same number of columns.
*/
oldTupDesc = RelationGetDescr(OldHeap);
newTupDesc = RelationGetDescr(NewHeap);
@@ -654,15 +652,14 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
* We cannot simply pass the tuple to heap_insert(), for several
* reasons:
*
- * 1. heap_insert() will overwrite the commit-status fields of the
- * tuple it's handed. This would trash the source relation, which is
- * bad news if we abort later on. (This was a bug in releases thru
- * 7.0)
+ * 1. heap_insert() will overwrite the commit-status fields of the tuple
+ * it's handed. This would trash the source relation, which is bad
+ * news if we abort later on. (This was a bug in releases thru 7.0)
*
- * 2. We'd like to squeeze out the values of any dropped columns,
- * both to save space and to ensure we have no corner-case failures.
- * (It's possible for example that the new table hasn't got a TOAST
- * table and so is unable to store any large values of dropped cols.)
+ * 2. We'd like to squeeze out the values of any dropped columns, both to
+ * save space and to ensure we have no corner-case failures. (It's
+ * possible for example that the new table hasn't got a TOAST table
+ * and so is unable to store any large values of dropped cols.)
*
* 3. The tuple might not even be legal for the new table; this is
* currently only known to happen as an after-effect of ALTER TABLE
@@ -784,19 +781,18 @@ swap_relation_files(Oid r1, Oid r2)
CatalogCloseIndexes(indstate);
/*
- * If we have toast tables associated with the relations being
- * swapped, change their dependency links to re-associate them with
- * their new owning relations. Otherwise the wrong one will get
- * dropped ...
+ * If we have toast tables associated with the relations being swapped,
+ * change their dependency links to re-associate them with their new
+ * owning relations. Otherwise the wrong one will get dropped ...
*
* NOTE: it is possible that only one table has a toast table; this can
- * happen in CLUSTER if there were dropped columns in the old table,
- * and in ALTER TABLE when adding or changing type of columns.
+ * happen in CLUSTER if there were dropped columns in the old table, and
+ * in ALTER TABLE when adding or changing type of columns.
*
- * NOTE: at present, a TOAST table's only dependency is the one on its
- * owning table. If more are ever created, we'd need to use something
- * more selective than deleteDependencyRecordsFor() to get rid of only
- * the link we want.
+ * NOTE: at present, a TOAST table's only dependency is the one on its owning
+ * table. If more are ever created, we'd need to use something more
+ * selective than deleteDependencyRecordsFor() to get rid of only the link
+ * we want.
*/
if (relform1->reltoastrelid || relform2->reltoastrelid)
{
@@ -845,16 +841,16 @@ swap_relation_files(Oid r1, Oid r2)
/*
* Blow away the old relcache entries now. We need this kluge because
- * relcache.c keeps a link to the smgr relation for the physical file,
- * and that will be out of date as soon as we do
- * CommandCounterIncrement. Whichever of the rels is the second to be
- * cleared during cache invalidation will have a dangling reference to
- * an already-deleted smgr relation. Rather than trying to avoid this
- * by ordering operations just so, it's easiest to not have the
- * relcache entries there at all. (Fortunately, since one of the
- * entries is local in our transaction, it's sufficient to clear out
- * our own relcache this way; the problem cannot arise for other
- * backends when they see our update on the non-local relation.)
+ * relcache.c keeps a link to the smgr relation for the physical file, and
+ * that will be out of date as soon as we do CommandCounterIncrement.
+ * Whichever of the rels is the second to be cleared during cache
+ * invalidation will have a dangling reference to an already-deleted smgr
+ * relation. Rather than trying to avoid this by ordering operations just
+ * so, it's easiest to not have the relcache entries there at all.
+ * (Fortunately, since one of the entries is local in our transaction,
+ * it's sufficient to clear out our own relcache this way; the problem
+ * cannot arise for other backends when they see our update on the
+ * non-local relation.)
*/
RelationForgetRelation(r1);
RelationForgetRelation(r2);
@@ -886,9 +882,9 @@ get_tables_to_cluster(MemoryContext cluster_context)
/*
* Get all indexes that have indisclustered set and are owned by
- * appropriate user. System relations or nailed-in relations cannot
- * ever have indisclustered set, because CLUSTER will refuse to set it
- * when called with one of them as argument.
+ * appropriate user. System relations or nailed-in relations cannot ever
+ * have indisclustered set, because CLUSTER will refuse to set it when
+ * called with one of them as argument.
*/
indRelation = heap_open(IndexRelationId, AccessShareLock);
ScanKeyInit(&entry,
@@ -904,8 +900,8 @@ get_tables_to_cluster(MemoryContext cluster_context)
continue;
/*
- * We have to build the list in a different memory context so it
- * will survive the cross-transaction processing
+ * We have to build the list in a different memory context so it will
+ * survive the cross-transaction processing
*/
old_context = MemoryContextSwitchTo(cluster_context);
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index 8177e39c71c..cf7dc06fa7f 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -7,7 +7,7 @@
* Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.83 2005/04/14 20:03:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.84 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -310,10 +310,9 @@ CommentRelation(int objtype, List *relname, char *comment)
tgtrel = makeRangeVarFromNameList(relname);
/*
- * Open the relation. We do this mainly to acquire a lock that
- * ensures no one else drops the relation before we commit. (If they
- * did, they'd fail to remove the entry we are about to make in
- * pg_description.)
+ * Open the relation. We do this mainly to acquire a lock that ensures no
+ * one else drops the relation before we commit. (If they did, they'd
+ * fail to remove the entry we are about to make in pg_description.)
*/
relation = relation_openrv(tgtrel, AccessShareLock);
@@ -441,17 +440,16 @@ CommentDatabase(List *qualname, char *comment)
database = strVal(linitial(qualname));
/*
- * We cannot currently support cross-database comments (since other
- * DBs cannot see pg_description of this database). So, we reject
- * attempts to comment on a database other than the current one.
- * Someday this might be improved, but it would take a redesigned
- * infrastructure.
+ * We cannot currently support cross-database comments (since other DBs
+ * cannot see pg_description of this database). So, we reject attempts to
+ * comment on a database other than the current one. Someday this might be
+ * improved, but it would take a redesigned infrastructure.
*
- * When loading a dump, we may see a COMMENT ON DATABASE for the old name
- * of the database. Erroring out would prevent pg_restore from
- * completing (which is really pg_restore's fault, but for now we will
- * work around the problem here). Consensus is that the best fix is
- * to treat wrong database name as a WARNING not an ERROR.
+ * When loading a dump, we may see a COMMENT ON DATABASE for the old name of
+ * the database. Erroring out would prevent pg_restore from completing
+ * (which is really pg_restore's fault, but for now we will work around
+ * the problem here). Consensus is that the best fix is to treat wrong
+ * database name as a WARNING not an ERROR.
*/
/* First get the database OID */
@@ -467,8 +465,8 @@ CommentDatabase(List *qualname, char *comment)
/* Only allow comments on the current database */
if (oid != MyDatabaseId)
{
- ereport(WARNING, /* throw just a warning so pg_restore
- * doesn't fail */
+ ereport(WARNING, /* throw just a warning so pg_restore doesn't
+ * fail */
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("database comments may only be applied to the current database")));
return;
@@ -587,8 +585,8 @@ CommentRule(List *qualname, char *comment)
ForwardScanDirection)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("there are multiple rules named \"%s\"", rulename),
- errhint("Specify a relation name as well as a rule name.")));
+ errmsg("there are multiple rules named \"%s\"", rulename),
+ errhint("Specify a relation name as well as a rule name.")));
heap_endscan(scanDesc);
heap_close(RewriteRelation, AccessShareLock);
@@ -616,8 +614,8 @@ CommentRule(List *qualname, char *comment)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("rule \"%s\" for relation \"%s\" does not exist",
- rulename, RelationGetRelationName(relation))));
+ errmsg("rule \"%s\" for relation \"%s\" does not exist",
+ rulename, RelationGetRelationName(relation))));
Assert(reloid == ((Form_pg_rewrite) GETSTRUCT(tuple))->ev_class);
ruleoid = HeapTupleGetOid(tuple);
ReleaseSysCache(tuple);
@@ -802,8 +800,8 @@ CommentTrigger(List *qualname, char *comment)
RelationGetRelationName(relation));
/*
- * Fetch the trigger tuple from pg_trigger. There can be only one
- * because of the unique index.
+ * Fetch the trigger tuple from pg_trigger. There can be only one because
+ * of the unique index.
*/
pg_trigger = heap_open(TriggerRelationId, AccessShareLock);
ScanKeyInit(&entry[0],
@@ -879,9 +877,9 @@ CommentConstraint(List *qualname, char *comment)
RelationGetRelationName(relation));
/*
- * Fetch the constraint tuple from pg_constraint. There may be more
- * than one match, because constraints are not required to have unique
- * names; if so, error out.
+ * Fetch the constraint tuple from pg_constraint. There may be more than
+ * one match, because constraints are not required to have unique names;
+ * if so, error out.
*/
pg_constraint = heap_open(ConstraintRelationId, AccessShareLock);
@@ -902,8 +900,8 @@ CommentConstraint(List *qualname, char *comment)
if (OidIsValid(conOid))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("table \"%s\" has multiple constraints named \"%s\"",
- RelationGetRelationName(relation), conName)));
+ errmsg("table \"%s\" has multiple constraints named \"%s\"",
+ RelationGetRelationName(relation), conName)));
conOid = HeapTupleGetOid(tuple);
}
}
@@ -914,8 +912,8 @@ CommentConstraint(List *qualname, char *comment)
if (!OidIsValid(conOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("constraint \"%s\" for table \"%s\" does not exist",
- conName, RelationGetRelationName(relation))));
+ errmsg("constraint \"%s\" for table \"%s\" does not exist",
+ conName, RelationGetRelationName(relation))));
/* Call CreateComments() to create/drop the comments */
CreateComments(conOid, ConstraintRelationId, 0, comment);
@@ -988,7 +986,7 @@ CommentLanguage(List *qualname, char *comment)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to comment on procedural language")));
+ errmsg("must be superuser to comment on procedural language")));
/* Call CreateComments() to create/drop the comments */
CreateComments(oid, LanguageRelationId, 0, comment);
@@ -1111,7 +1109,7 @@ CommentLargeObject(List *qualname, char *comment)
* strings.
*/
loid = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(strVal(node))));
+ CStringGetDatum(strVal(node))));
break;
default:
elog(ERROR, "unrecognized node type: %d",
diff --git a/src/backend/commands/conversioncmds.c b/src/backend/commands/conversioncmds.c
index 912f35ea20b..53b3f854ce6 100644
--- a/src/backend/commands/conversioncmds.c
+++ b/src/backend/commands/conversioncmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.22 2005/08/22 17:38:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.23 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -74,8 +74,8 @@ CreateConversionCommand(CreateConversionStmt *stmt)
to_encoding_name)));
/*
- * Check the existence of the conversion function. Function name could
- * be a qualified name.
+ * Check the existence of the conversion function. Function name could be
+ * a qualified name.
*/
funcoid = LookupFuncName(func_name, sizeof(funcargs) / sizeof(Oid),
funcargs, false);
@@ -87,8 +87,8 @@ CreateConversionCommand(CreateConversionStmt *stmt)
NameListToString(func_name));
/*
- * All seem ok, go ahead (possible failure would be a duplicate
- * conversion name)
+ * All seem ok, go ahead (possible failure would be a duplicate conversion
+ * name)
*/
ConversionCreate(conversion_name, namespaceId, GetUserId(),
from_encoding, to_encoding, funcoid, stmt->def);
@@ -148,11 +148,11 @@ RenameConversion(List *name, const char *newname)
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("conversion \"%s\" already exists in schema \"%s\"",
- newname, get_namespace_name(namespaceOid))));
+ errmsg("conversion \"%s\" already exists in schema \"%s\"",
+ newname, get_namespace_name(namespaceOid))));
/* must be owner */
- if (!pg_conversion_ownercheck(conversionOid,GetUserId()))
+ if (!pg_conversion_ownercheck(conversionOid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION,
NameListToString(name));
@@ -210,7 +210,7 @@ AlterConversionOwner(List *name, Oid newOwnerId)
if (!superuser())
{
/* Otherwise, must be owner of the existing object */
- if (!pg_conversion_ownercheck(HeapTupleGetOid(tup),GetUserId()))
+ if (!pg_conversion_ownercheck(HeapTupleGetOid(tup), GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION,
NameListToString(name));
@@ -227,8 +227,7 @@ AlterConversionOwner(List *name, Oid newOwnerId)
}
/*
- * Modify the owner --- okay to scribble on tup because it's a
- * copy
+ * Modify the owner --- okay to scribble on tup because it's a copy
*/
convForm->conowner = newOwnerId;
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index cd215cb4154..8ab402e6b74 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.252 2005/10/03 23:43:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.253 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -100,7 +100,7 @@ typedef struct CopyStateData
bool fe_eof; /* true if detected end of copy data */
EolType eol_type; /* EOL type of input */
int client_encoding; /* remote side's character encoding */
- bool need_transcoding; /* client encoding diff from server? */
+ bool need_transcoding; /* client encoding diff from server? */
bool client_only_encoding; /* encoding not valid on server? */
/* parameters from the COPY command */
@@ -111,12 +111,12 @@ typedef struct CopyStateData
bool csv_mode; /* Comma Separated Value format? */
bool header_line; /* CSV header line? */
char *null_print; /* NULL marker string (server encoding!) */
- int null_print_len; /* length of same */
+ int null_print_len; /* length of same */
char *delim; /* column delimiter (must be 1 byte) */
char *quote; /* CSV quote char (must be 1 byte) */
char *escape; /* CSV escape char (must be 1 byte) */
- List *force_quote_atts; /* integer list of attnums to FQ */
- List *force_notnull_atts; /* integer list of attnums to FNN */
+ List *force_quote_atts; /* integer list of attnums to FQ */
+ List *force_notnull_atts; /* integer list of attnums to FNN */
/* these are just for error messages, see copy_in_error_callback */
const char *cur_relname; /* table name for error messages */
@@ -127,26 +127,26 @@ typedef struct CopyStateData
/*
* These variables are used to reduce overhead in textual COPY FROM.
*
- * attribute_buf holds the separated, de-escaped text for each field of
- * the current line. The CopyReadAttributes functions return arrays of
+ * attribute_buf holds the separated, de-escaped text for each field of the
+ * current line. The CopyReadAttributes functions return arrays of
* pointers into this buffer. We avoid palloc/pfree overhead by re-using
* the buffer on each cycle.
*/
StringInfoData attribute_buf;
/*
- * Similarly, line_buf holds the whole input line being processed.
- * The input cycle is first to read the whole line into line_buf,
- * convert it to server encoding there, and then extract the individual
- * attribute fields into attribute_buf. line_buf is preserved unmodified
- * so that we can display it in error messages if appropriate.
+ * Similarly, line_buf holds the whole input line being processed. The
+ * input cycle is first to read the whole line into line_buf, convert it
+ * to server encoding there, and then extract the individual attribute
+ * fields into attribute_buf. line_buf is preserved unmodified so that we
+ * can display it in error messages if appropriate.
*/
StringInfoData line_buf;
- bool line_buf_converted; /* converted to server encoding? */
+ bool line_buf_converted; /* converted to server encoding? */
/*
* Finally, raw_buf holds raw data read from the data source (file or
- * client connection). CopyReadLine parses this data sufficiently to
+ * client connection). CopyReadLine parses this data sufficiently to
* locate line boundaries, then transfers the data to line_buf and
* converts it. Note: we guarantee that there is a \0 at
* raw_buf[raw_buf_len].
@@ -170,17 +170,17 @@ static void CopyFrom(CopyState cstate);
static bool CopyReadLine(CopyState cstate);
static bool CopyReadLineText(CopyState cstate);
static bool CopyReadLineCSV(CopyState cstate);
-static int CopyReadAttributesText(CopyState cstate, int maxfields,
- char **fieldvals);
-static int CopyReadAttributesCSV(CopyState cstate, int maxfields,
- char **fieldvals);
+static int CopyReadAttributesText(CopyState cstate, int maxfields,
+ char **fieldvals);
+static int CopyReadAttributesCSV(CopyState cstate, int maxfields,
+ char **fieldvals);
static Datum CopyReadBinaryAttribute(CopyState cstate,
- int column_no, FmgrInfo *flinfo,
- Oid typioparam, int32 typmod,
- bool *isnull);
+ int column_no, FmgrInfo *flinfo,
+ Oid typioparam, int32 typmod,
+ bool *isnull);
static void CopyAttributeOutText(CopyState cstate, char *server_string);
static void CopyAttributeOutCSV(CopyState cstate, char *server_string,
- bool use_quote);
+ bool use_quote);
static List *CopyGetAttnums(Relation rel, List *attnamelist);
static char *limit_printout_length(const char *str);
@@ -192,8 +192,8 @@ static void CopySendData(CopyState cstate, void *databuf, int datasize);
static void CopySendString(CopyState cstate, const char *str);
static void CopySendChar(CopyState cstate, char c);
static void CopySendEndOfRow(CopyState cstate);
-static int CopyGetData(CopyState cstate, void *databuf,
- int minread, int maxread);
+static int CopyGetData(CopyState cstate, void *databuf,
+ int minread, int maxread);
static void CopySendInt32(CopyState cstate, int32 val);
static bool CopyGetInt32(CopyState cstate, int32 *val);
static void CopySendInt16(CopyState cstate, int16 val);
@@ -230,7 +230,7 @@ SendCopyBegin(CopyState cstate)
if (cstate->binary)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY BINARY is not supported to stdout or from stdin")));
+ errmsg("COPY BINARY is not supported to stdout or from stdin")));
pq_putemptymessage('H');
/* grottiness needed for old COPY OUT protocol */
pq_startcopyout();
@@ -242,7 +242,7 @@ SendCopyBegin(CopyState cstate)
if (cstate->binary)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY BINARY is not supported to stdout or from stdin")));
+ errmsg("COPY BINARY is not supported to stdout or from stdin")));
pq_putemptymessage('B');
/* grottiness needed for old COPY OUT protocol */
pq_startcopyout();
@@ -276,7 +276,7 @@ ReceiveCopyBegin(CopyState cstate)
if (cstate->binary)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY BINARY is not supported to stdout or from stdin")));
+ errmsg("COPY BINARY is not supported to stdout or from stdin")));
pq_putemptymessage('G');
cstate->copy_dest = COPY_OLD_FE;
}
@@ -286,7 +286,7 @@ ReceiveCopyBegin(CopyState cstate)
if (cstate->binary)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY BINARY is not supported to stdout or from stdin")));
+ errmsg("COPY BINARY is not supported to stdout or from stdin")));
pq_putemptymessage('D');
cstate->copy_dest = COPY_OLD_FE;
}
@@ -408,7 +408,7 @@ CopySendEndOfRow(CopyState cstate)
* CopyGetData reads data from the source (file or frontend)
*
* We attempt to read at least minread, and at most maxread, bytes from
- * the source. The actual number of bytes read is returned; if this is
+ * the source. The actual number of bytes read is returned; if this is
* less than minread, EOF was detected.
*
* Note: when copying from the frontend, we expect a proper EOF mark per
@@ -420,7 +420,7 @@ CopySendEndOfRow(CopyState cstate)
static int
CopyGetData(CopyState cstate, void *databuf, int minread, int maxread)
{
- int bytesread = 0;
+ int bytesread = 0;
switch (cstate->copy_dest)
{
@@ -432,12 +432,13 @@ CopyGetData(CopyState cstate, void *databuf, int minread, int maxread)
errmsg("could not read from COPY file: %m")));
break;
case COPY_OLD_FE:
+
/*
* We cannot read more than minread bytes (which in practice is 1)
* because old protocol doesn't have any clear way of separating
- * the COPY stream from following data. This is slow, but not
- * any slower than the code path was originally, and we don't
- * care much anymore about the performance of old protocol.
+ * the COPY stream from following data. This is slow, but not any
+ * slower than the code path was originally, and we don't care
+ * much anymore about the performance of old protocol.
*/
if (pq_getbytes((char *) databuf, minread))
{
@@ -463,11 +464,11 @@ CopyGetData(CopyState cstate, void *databuf, int minread, int maxread)
if (mtype == EOF)
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("unexpected EOF on client connection")));
+ errmsg("unexpected EOF on client connection")));
if (pq_getmessage(cstate->fe_msgbuf, 0))
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("unexpected EOF on client connection")));
+ errmsg("unexpected EOF on client connection")));
switch (mtype)
{
case 'd': /* CopyData */
@@ -480,16 +481,16 @@ CopyGetData(CopyState cstate, void *databuf, int minread, int maxread)
ereport(ERROR,
(errcode(ERRCODE_QUERY_CANCELED),
errmsg("COPY from stdin failed: %s",
- pq_getmsgstring(cstate->fe_msgbuf))));
+ pq_getmsgstring(cstate->fe_msgbuf))));
break;
case 'H': /* Flush */
case 'S': /* Sync */
/*
- * Ignore Flush/Sync for the convenience of
- * client libraries (such as libpq) that may
- * send those without noticing that the
- * command they just sent was COPY.
+ * Ignore Flush/Sync for the convenience of client
+ * libraries (such as libpq) that may send those
+ * without noticing that the command they just
+ * sent was COPY.
*/
goto readmessage;
default:
@@ -593,8 +594,8 @@ CopyGetInt16(CopyState cstate, int16 *val)
static bool
CopyLoadRawBuf(CopyState cstate)
{
- int nbytes;
- int inbytes;
+ int nbytes;
+ int inbytes;
if (cstate->raw_buf_index < cstate->raw_buf_len)
{
@@ -791,7 +792,7 @@ DoCopy(const CopyStmt *stmt)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("COPY delimiter must be a single character")));
- /* Check header */
+ /* Check header */
if (!cstate->csv_mode && cstate->header_line)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -827,23 +828,23 @@ DoCopy(const CopyStmt *stmt)
if (force_quote != NIL && is_from)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY force quote only available using COPY TO")));
+ errmsg("COPY force quote only available using COPY TO")));
/* Check force_notnull */
if (!cstate->csv_mode && force_notnull != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY force not null available only in CSV mode")));
+ errmsg("COPY force not null available only in CSV mode")));
if (force_notnull != NIL && !is_from)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY force not null only available using COPY FROM")));
+ errmsg("COPY force not null only available using COPY FROM")));
/* Don't allow the delimiter to appear in the null string. */
if (strchr(cstate->null_print, cstate->delim[0]) != NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY delimiter must not appear in the NULL specification")));
+ errmsg("COPY delimiter must not appear in the NULL specification")));
/* Don't allow the CSV quote char to appear in the null string. */
if (cstate->csv_mode &&
@@ -874,7 +875,7 @@ DoCopy(const CopyStmt *stmt)
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to COPY to or from a file"),
errhint("Anyone can COPY to stdout or from stdin. "
- "psql's \\copy command also works for anyone.")));
+ "psql's \\copy command also works for anyone.")));
/* Don't allow COPY w/ OIDs to or from a table without them */
if (cstate->oids && !cstate->rel->rd_rel->relhasoids)
@@ -902,8 +903,8 @@ DoCopy(const CopyStmt *stmt)
if (!list_member_int(cstate->attnumlist, attnum))
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("FORCE QUOTE column \"%s\" not referenced by COPY",
- NameStr(attr[attnum - 1]->attname))));
+ errmsg("FORCE QUOTE column \"%s\" not referenced by COPY",
+ NameStr(attr[attnum - 1]->attname))));
}
}
@@ -924,8 +925,8 @@ DoCopy(const CopyStmt *stmt)
if (!list_member_int(cstate->attnumlist, attnum))
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("FORCE NOT NULL column \"%s\" not referenced by COPY",
- NameStr(attr[attnum - 1]->attname))));
+ errmsg("FORCE NOT NULL column \"%s\" not referenced by COPY",
+ NameStr(attr[attnum - 1]->attname))));
}
}
@@ -960,8 +961,8 @@ DoCopy(const CopyStmt *stmt)
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot copy to non-table relation \"%s\"",
- RelationGetRelationName(cstate->rel))));
+ errmsg("cannot copy to non-table relation \"%s\"",
+ RelationGetRelationName(cstate->rel))));
}
if (pipe)
{
@@ -979,8 +980,8 @@ DoCopy(const CopyStmt *stmt)
if (cstate->copy_file == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" for reading: %m",
- filename)));
+ errmsg("could not open file \"%s\" for reading: %m",
+ filename)));
fstat(fileno(cstate->copy_file), &st);
if (S_ISDIR(st.st_mode))
@@ -1011,8 +1012,8 @@ DoCopy(const CopyStmt *stmt)
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot copy from non-table relation \"%s\"",
- RelationGetRelationName(cstate->rel))));
+ errmsg("cannot copy from non-table relation \"%s\"",
+ RelationGetRelationName(cstate->rel))));
}
if (pipe)
{
@@ -1027,13 +1028,13 @@ DoCopy(const CopyStmt *stmt)
struct stat st;
/*
- * Prevent write to relative path ... too easy to shoot
- * oneself in the foot by overwriting a database file ...
+ * Prevent write to relative path ... too easy to shoot oneself in
+ * the foot by overwriting a database file ...
*/
if (!is_absolute_path(filename))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("relative path not allowed for COPY to file")));
+ errmsg("relative path not allowed for COPY to file")));
oumask = umask((mode_t) 022);
cstate->copy_file = AllocateFile(filename, PG_BINARY_W);
@@ -1042,8 +1043,8 @@ DoCopy(const CopyStmt *stmt)
if (cstate->copy_file == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" for writing: %m",
- filename)));
+ errmsg("could not open file \"%s\" for writing: %m",
+ filename)));
fstat(fileno(cstate->copy_file), &st);
if (S_ISDIR(st.st_mode))
@@ -1069,10 +1070,9 @@ DoCopy(const CopyStmt *stmt)
}
/*
- * Close the relation. If reading, we can release the AccessShareLock
- * we got; if writing, we should hold the lock until end of
- * transaction to ensure that updates will be committed before lock is
- * released.
+ * Close the relation. If reading, we can release the AccessShareLock we
+ * got; if writing, we should hold the lock until end of transaction to
+ * ensure that updates will be committed before lock is released.
*/
heap_close(cstate->rel, (is_from ? NoLock : AccessShareLock));
@@ -1105,8 +1105,8 @@ DoCopyTo(CopyState cstate)
{
/*
* Make sure we turn off old-style COPY OUT mode upon error. It is
- * okay to do this in all cases, since it does nothing if the mode
- * is not on.
+ * okay to do this in all cases, since it does nothing if the mode is
+ * not on.
*/
pq_endcopyout(true);
PG_RE_THROW();
@@ -1138,7 +1138,7 @@ CopyTo(CopyState cstate)
attr = tupDesc->attrs;
num_phys_attrs = tupDesc->natts;
attr_count = list_length(cstate->attnumlist);
- null_print_client = cstate->null_print; /* default */
+ null_print_client = cstate->null_print; /* default */
/* Get info about the columns we need to process. */
out_functions = (FmgrInfo *) palloc(num_phys_attrs * sizeof(FmgrInfo));
@@ -1167,9 +1167,9 @@ CopyTo(CopyState cstate)
/*
* Create a temporary memory context that we can reset once per row to
- * recover palloc'd memory. This avoids any problems with leaks
- * inside datatype output routines, and should be faster than retail
- * pfree's anyway. (We don't need a whole econtext as CopyFrom does.)
+ * recover palloc'd memory. This avoids any problems with leaks inside
+ * datatype output routines, and should be faster than retail pfree's
+ * anyway. (We don't need a whole econtext as CopyFrom does.)
*/
mycontext = AllocSetContextCreate(CurrentMemoryContext,
"COPY TO",
@@ -1206,12 +1206,12 @@ CopyTo(CopyState cstate)
/* if a header has been requested send the line */
if (cstate->header_line)
{
- bool hdr_delim = false;
-
+ bool hdr_delim = false;
+
foreach(cur, cstate->attnumlist)
{
int attnum = lfirst_int(cur);
- char *colname;
+ char *colname;
if (hdr_delim)
CopySendChar(cstate, cstate->delim[0]);
@@ -1258,7 +1258,7 @@ CopyTo(CopyState cstate)
if (cstate->oids)
{
string = DatumGetCString(DirectFunctionCall1(oidout,
- ObjectIdGetDatum(HeapTupleGetOid(tuple))));
+ ObjectIdGetDatum(HeapTupleGetOid(tuple))));
CopySendString(cstate, string);
need_delim = true;
}
@@ -1356,7 +1356,7 @@ copy_in_error_callback(void *arg)
if (cstate->cur_attname && cstate->cur_attval)
{
/* error is relevant to a particular column */
- char *attval;
+ char *attval;
attval = limit_printout_length(cstate->cur_attval);
errcontext("COPY %s, line %d, column %s: \"%s\"",
@@ -1369,7 +1369,7 @@ copy_in_error_callback(void *arg)
/* error is relevant to a particular line */
if (cstate->line_buf_converted || !cstate->need_transcoding)
{
- char *lineval;
+ char *lineval;
lineval = limit_printout_length(cstate->line_buf.data);
errcontext("COPY %s, line %d: \"%s\"",
@@ -1379,12 +1379,12 @@ copy_in_error_callback(void *arg)
else
{
/*
- * Here, the line buffer is still in a foreign encoding,
- * and indeed it's quite likely that the error is precisely
- * a failure to do encoding conversion (ie, bad data). We
- * dare not try to convert it, and at present there's no way
- * to regurgitate it without conversion. So we have to punt
- * and just report the line number.
+ * Here, the line buffer is still in a foreign encoding, and
+ * indeed it's quite likely that the error is precisely a
+ * failure to do encoding conversion (ie, bad data). We dare
+ * not try to convert it, and at present there's no way to
+ * regurgitate it without conversion. So we have to punt and
+ * just report the line number.
*/
errcontext("COPY %s, line %d",
cstate->cur_relname, cstate->cur_lineno);
@@ -1474,8 +1474,8 @@ CopyFrom(CopyState cstate)
/*
* We need a ResultRelInfo so we can use the regular executor's
- * index-entry-making machinery. (There used to be a huge amount of
- * code here that basically duplicated execUtils.c ...)
+ * index-entry-making machinery. (There used to be a huge amount of code
+ * here that basically duplicated execUtils.c ...)
*/
resultRelInfo = makeNode(ResultRelInfo);
resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
@@ -1499,9 +1499,9 @@ CopyFrom(CopyState cstate)
/*
* Pick up the required catalog information for each attribute in the
- * relation, including the input function, the element type (to pass
- * to the input function), and info about defaults and constraints.
- * (Which input function we use depends on text/binary format choice.)
+ * relation, including the input function, the element type (to pass to
+ * the input function), and info about defaults and constraints. (Which
+ * input function we use depends on text/binary format choice.)
*/
in_functions = (FmgrInfo *) palloc(num_phys_attrs * sizeof(FmgrInfo));
typioparams = (Oid *) palloc(num_phys_attrs * sizeof(Oid));
@@ -1519,7 +1519,7 @@ CopyFrom(CopyState cstate)
/* Fetch the input function and typioparam info */
if (cstate->binary)
getTypeBinaryInputInfo(attr[attnum - 1]->atttypid,
- &in_func_oid, &typioparams[attnum - 1]);
+ &in_func_oid, &typioparams[attnum - 1]);
else
getTypeInputInfo(attr[attnum - 1]->atttypid,
&in_func_oid, &typioparams[attnum - 1]);
@@ -1553,12 +1553,12 @@ CopyFrom(CopyState cstate)
Node *node;
/*
- * Easiest way to do this is to use parse_coerce.c to set up
- * an expression that checks the constraints. (At present,
- * the expression might contain a length-coercion-function
- * call and/or CoerceToDomain nodes.) The bottom of the
- * expression is a Param node so that we can fill in the
- * actual datum during the data input loop.
+ * Easiest way to do this is to use parse_coerce.c to set up an
+ * expression that checks the constraints. (At present, the
+ * expression might contain a length-coercion-function call and/or
+ * CoerceToDomain nodes.) The bottom of the expression is a Param
+ * node so that we can fill in the actual datum during the data
+ * input loop.
*/
prm = makeNode(Param);
prm->paramkind = PARAM_EXEC;
@@ -1580,11 +1580,10 @@ CopyFrom(CopyState cstate)
AfterTriggerBeginQuery();
/*
- * Check BEFORE STATEMENT insertion triggers. It's debateable whether
- * we should do this for COPY, since it's not really an "INSERT"
- * statement as such. However, executing these triggers maintains
- * consistency with the EACH ROW triggers that we already fire on
- * COPY.
+ * Check BEFORE STATEMENT insertion triggers. It's debateable whether we
+ * should do this for COPY, since it's not really an "INSERT" statement as
+ * such. However, executing these triggers maintains consistency with the
+ * EACH ROW triggers that we already fire on COPY.
*/
ExecBSInsertTriggers(estate, resultRelInfo);
@@ -1612,20 +1611,20 @@ CopyFrom(CopyState cstate)
if ((tmp >> 16) != 0)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("unrecognized critical flags in COPY file header")));
+ errmsg("unrecognized critical flags in COPY file header")));
/* Header extension length */
if (!CopyGetInt32(cstate, &tmp) ||
tmp < 0)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("invalid COPY file header (missing length)")));
+ errmsg("invalid COPY file header (missing length)")));
/* Skip extension header, if present */
while (tmp-- > 0)
{
if (CopyGetData(cstate, readSig, 1, 1) != 1)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("invalid COPY file header (wrong length)")));
+ errmsg("invalid COPY file header (wrong length)")));
}
}
@@ -1700,9 +1699,8 @@ CopyFrom(CopyState cstate)
/*
* EOF at start of line means we're done. If we see EOF after
- * some characters, we act as though it was newline followed
- * by EOF, ie, process the line and then exit loop on next
- * iteration.
+ * some characters, we act as though it was newline followed by
+ * EOF, ie, process the line and then exit loop on next iteration.
*/
if (done && cstate->line_buf.len == 0)
break;
@@ -1732,7 +1730,7 @@ CopyFrom(CopyState cstate)
cstate->cur_attname = "oid";
cstate->cur_attval = string;
loaded_oid = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(string)));
+ CStringGetDatum(string)));
if (loaded_oid == InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
@@ -1768,8 +1766,8 @@ CopyFrom(CopyState cstate)
cstate->cur_attval = string;
values[m] = FunctionCall3(&in_functions[m],
CStringGetDatum(string),
- ObjectIdGetDatum(typioparams[m]),
- Int32GetDatum(attr[m]->atttypmod));
+ ObjectIdGetDatum(typioparams[m]),
+ Int32GetDatum(attr[m]->atttypmod));
nulls[m] = ' ';
cstate->cur_attname = NULL;
cstate->cur_attval = NULL;
@@ -1834,9 +1832,9 @@ CopyFrom(CopyState cstate)
}
/*
- * Now compute and insert any defaults available for the columns
- * not provided by the input data. Anything not processed here or
- * above will remain NULL.
+ * Now compute and insert any defaults available for the columns not
+ * provided by the input data. Anything not processed here or above
+ * will remain NULL.
*/
for (i = 0; i < num_defaults; i++)
{
@@ -1863,9 +1861,9 @@ CopyFrom(CopyState cstate)
prmdata->isnull = (nulls[i] == 'n');
/*
- * Execute the constraint expression. Allow the
- * expression to replace the value (consider e.g. a
- * timestamp precision restriction).
+ * Execute the constraint expression. Allow the expression to
+ * replace the value (consider e.g. a timestamp precision
+ * restriction).
*/
values[i] = ExecEvalExpr(exprstate, econtext,
&isnull, NULL);
@@ -1886,7 +1884,7 @@ CopyFrom(CopyState cstate)
/* BEFORE ROW INSERT Triggers */
if (resultRelInfo->ri_TrigDesc &&
- resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
+ resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
{
HeapTuple newtuple;
@@ -1956,7 +1954,7 @@ CopyFrom(CopyState cstate)
* server encoding.
*
* Result is true if read was terminated by EOF, false if terminated
- * by newline. The terminating newline or EOF marker is not included
+ * by newline. The terminating newline or EOF marker is not included
* in the final value of line_buf.
*/
static bool
@@ -1981,12 +1979,13 @@ CopyReadLine(CopyState cstate)
{
/*
* Reached EOF. In protocol version 3, we should ignore anything
- * after \. up to the protocol end of copy data. (XXX maybe
- * better not to treat \. as special?)
+ * after \. up to the protocol end of copy data. (XXX maybe better
+ * not to treat \. as special?)
*/
if (cstate->copy_dest == COPY_NEW_FE)
{
- do {
+ do
+ {
cstate->raw_buf_index = cstate->raw_buf_len;
} while (CopyLoadRawBuf(cstate));
}
@@ -2070,25 +2069,24 @@ CopyReadLineText(CopyState cstate)
result = false;
/*
- * The objective of this loop is to transfer the entire next input
- * line into line_buf. Hence, we only care for detecting newlines
- * (\r and/or \n) and the end-of-copy marker (\.).
+ * The objective of this loop is to transfer the entire next input line
+ * into line_buf. Hence, we only care for detecting newlines (\r and/or
+ * \n) and the end-of-copy marker (\.).
*
* For backwards compatibility we allow backslashes to escape newline
- * characters. Backslashes other than the end marker get put into the
+ * characters. Backslashes other than the end marker get put into the
* line_buf, since CopyReadAttributesText does its own escape processing.
*
* These four characters, and only these four, are assumed the same in
* frontend and backend encodings.
*
- * For speed, we try to move data to line_buf in chunks rather than
- * one character at a time. raw_buf_ptr points to the next character
- * to examine; any characters from raw_buf_index to raw_buf_ptr have
- * been determined to be part of the line, but not yet transferred
- * to line_buf.
+ * For speed, we try to move data to line_buf in chunks rather than one
+ * character at a time. raw_buf_ptr points to the next character to
+ * examine; any characters from raw_buf_index to raw_buf_ptr have been
+ * determined to be part of the line, but not yet transferred to line_buf.
*
- * For a little extra speed within the loop, we copy raw_buf and
- * raw_buf_len into local variables.
+ * For a little extra speed within the loop, we copy raw_buf and raw_buf_len
+ * into local variables.
*/
copy_raw_buf = cstate->raw_buf;
raw_buf_ptr = cstate->raw_buf_index;
@@ -2098,31 +2096,33 @@ CopyReadLineText(CopyState cstate)
for (;;)
{
- int prev_raw_ptr;
- char c;
+ int prev_raw_ptr;
+ char c;
/* Load more data if needed */
if (raw_buf_ptr >= copy_buf_len || need_data)
{
/*
- * Transfer any approved data to line_buf; must do this to
- * be sure there is some room in raw_buf.
+ * Transfer any approved data to line_buf; must do this to be sure
+ * there is some room in raw_buf.
*/
if (raw_buf_ptr > cstate->raw_buf_index)
{
appendBinaryStringInfo(&cstate->line_buf,
- cstate->raw_buf + cstate->raw_buf_index,
+ cstate->raw_buf + cstate->raw_buf_index,
raw_buf_ptr - cstate->raw_buf_index);
cstate->raw_buf_index = raw_buf_ptr;
}
+
/*
- * Try to read some more data. This will certainly reset
+ * Try to read some more data. This will certainly reset
* raw_buf_index to zero, and raw_buf_ptr must go with it.
*/
if (!CopyLoadRawBuf(cstate))
hit_eof = true;
raw_buf_ptr = 0;
copy_buf_len = cstate->raw_buf_len;
+
/*
* If we are completely out of data, break out of the loop,
* reporting EOF.
@@ -2148,12 +2148,12 @@ CopyReadLineText(CopyState cstate)
/*
* If need more data, go back to loop top to load it.
*
- * Note that if we are at EOF, c will wind up as '\0'
- * because of the guaranteed pad of raw_buf.
+ * Note that if we are at EOF, c will wind up as '\0' because of
+ * the guaranteed pad of raw_buf.
*/
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
@@ -2161,8 +2161,8 @@ CopyReadLineText(CopyState cstate)
if (c == '\n')
{
- raw_buf_ptr++; /* eat newline */
- cstate->eol_type = EOL_CRNL; /* in case not set yet */
+ raw_buf_ptr++; /* eat newline */
+ cstate->eol_type = EOL_CRNL; /* in case not set yet */
}
else
{
@@ -2170,11 +2170,12 @@ CopyReadLineText(CopyState cstate)
if (cstate->eol_type == EOL_CRNL)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("literal carriage return found in data"),
+ errmsg("literal carriage return found in data"),
errhint("Use \"\\r\" to represent carriage return.")));
+
/*
- * if we got here, it is the first line and we didn't
- * find \n, so don't consume the peeked character
+ * if we got here, it is the first line and we didn't find
+ * \n, so don't consume the peeked character
*/
cstate->eol_type = EOL_CR;
}
@@ -2183,7 +2184,7 @@ CopyReadLineText(CopyState cstate)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("literal carriage return found in data"),
- errhint("Use \"\\r\" to represent carriage return.")));
+ errhint("Use \"\\r\" to represent carriage return.")));
/* If reach here, we have found the line terminator */
break;
}
@@ -2195,7 +2196,7 @@ CopyReadLineText(CopyState cstate)
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("literal newline found in data"),
errhint("Use \"\\n\" to represent newline.")));
- cstate->eol_type = EOL_NL; /* in case not set yet */
+ cstate->eol_type = EOL_NL; /* in case not set yet */
/* If reach here, we have found the line terminator */
break;
}
@@ -2219,8 +2220,8 @@ CopyReadLineText(CopyState cstate)
}
/*
- * In non-CSV mode, backslash quotes the following character
- * even if it's a newline, so we always advance to next character
+ * In non-CSV mode, backslash quotes the following character even
+ * if it's a newline, so we always advance to next character
*/
c = copy_raw_buf[raw_buf_ptr++];
@@ -2230,7 +2231,7 @@ CopyReadLineText(CopyState cstate)
{
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
@@ -2247,7 +2248,7 @@ CopyReadLineText(CopyState cstate)
}
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
@@ -2265,13 +2266,13 @@ CopyReadLineText(CopyState cstate)
errmsg("end-of-copy marker does not match previous newline style")));
/*
- * Transfer only the data before the \. into line_buf,
- * then discard the data and the \. sequence.
+ * Transfer only the data before the \. into line_buf, then
+ * discard the data and the \. sequence.
*/
if (prev_raw_ptr > cstate->raw_buf_index)
appendBinaryStringInfo(&cstate->line_buf,
- cstate->raw_buf + cstate->raw_buf_index,
- prev_raw_ptr - cstate->raw_buf_index);
+ cstate->raw_buf + cstate->raw_buf_index,
+ prev_raw_ptr - cstate->raw_buf_index);
cstate->raw_buf_index = raw_buf_ptr;
result = true; /* report EOF */
break;
@@ -2280,10 +2281,10 @@ CopyReadLineText(CopyState cstate)
/*
* Do we need to be careful about trailing bytes of multibyte
- * characters? (See note above about client_only_encoding)
+ * characters? (See note above about client_only_encoding)
*
- * We assume here that pg_encoding_mblen only looks at the first
- * byte of the character!
+ * We assume here that pg_encoding_mblen only looks at the first byte of
+ * the character!
*/
if (cstate->client_only_encoding)
{
@@ -2291,7 +2292,7 @@ CopyReadLineText(CopyState cstate)
s[0] = c;
mblen = pg_encoding_mblen(cstate->client_encoding, s);
- if (raw_buf_ptr + (mblen-1) > copy_buf_len)
+ if (raw_buf_ptr + (mblen - 1) > copy_buf_len)
{
if (hit_eof)
{
@@ -2300,11 +2301,11 @@ CopyReadLineText(CopyState cstate)
result = true;
break;
}
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
- raw_buf_ptr += mblen-1;
+ raw_buf_ptr += mblen - 1;
}
} /* end of outer loop */
@@ -2337,7 +2338,8 @@ CopyReadLineCSV(CopyState cstate)
bool need_data;
bool hit_eof;
char s[2];
- bool in_quote = false, last_was_esc = false;
+ bool in_quote = false,
+ last_was_esc = false;
char quotec = cstate->quote[0];
char escapec = cstate->escape[0];
@@ -2351,25 +2353,24 @@ CopyReadLineCSV(CopyState cstate)
result = false;
/*
- * The objective of this loop is to transfer the entire next input
- * line into line_buf. Hence, we only care for detecting newlines
- * (\r and/or \n) and the end-of-copy marker (\.).
+ * The objective of this loop is to transfer the entire next input line
+ * into line_buf. Hence, we only care for detecting newlines (\r and/or
+ * \n) and the end-of-copy marker (\.).
*
- * In CSV mode, \r and \n inside a quoted field are just part of the
- * data value and are put in line_buf. We keep just enough state
- * to know if we are currently in a quoted field or not.
+ * In CSV mode, \r and \n inside a quoted field are just part of the data
+ * value and are put in line_buf. We keep just enough state to know if we
+ * are currently in a quoted field or not.
*
- * These four characters, and the CSV escape and quote characters,
- * are assumed the same in frontend and backend encodings.
+ * These four characters, and the CSV escape and quote characters, are
+ * assumed the same in frontend and backend encodings.
*
- * For speed, we try to move data to line_buf in chunks rather than
- * one character at a time. raw_buf_ptr points to the next character
- * to examine; any characters from raw_buf_index to raw_buf_ptr have
- * been determined to be part of the line, but not yet transferred
- * to line_buf.
+ * For speed, we try to move data to line_buf in chunks rather than one
+ * character at a time. raw_buf_ptr points to the next character to
+ * examine; any characters from raw_buf_index to raw_buf_ptr have been
+ * determined to be part of the line, but not yet transferred to line_buf.
*
- * For a little extra speed within the loop, we copy raw_buf and
- * raw_buf_len into local variables.
+ * For a little extra speed within the loop, we copy raw_buf and raw_buf_len
+ * into local variables.
*/
copy_raw_buf = cstate->raw_buf;
raw_buf_ptr = cstate->raw_buf_index;
@@ -2379,31 +2380,33 @@ CopyReadLineCSV(CopyState cstate)
for (;;)
{
- int prev_raw_ptr;
- char c;
+ int prev_raw_ptr;
+ char c;
/* Load more data if needed */
if (raw_buf_ptr >= copy_buf_len || need_data)
{
/*
- * Transfer any approved data to line_buf; must do this to
- * be sure there is some room in raw_buf.
+ * Transfer any approved data to line_buf; must do this to be sure
+ * there is some room in raw_buf.
*/
if (raw_buf_ptr > cstate->raw_buf_index)
{
appendBinaryStringInfo(&cstate->line_buf,
- cstate->raw_buf + cstate->raw_buf_index,
+ cstate->raw_buf + cstate->raw_buf_index,
raw_buf_ptr - cstate->raw_buf_index);
cstate->raw_buf_index = raw_buf_ptr;
}
+
/*
- * Try to read some more data. This will certainly reset
+ * Try to read some more data. This will certainly reset
* raw_buf_index to zero, and raw_buf_ptr must go with it.
*/
if (!CopyLoadRawBuf(cstate))
hit_eof = true;
raw_buf_ptr = 0;
copy_buf_len = cstate->raw_buf_len;
+
/*
* If we are completely out of data, break out of the loop,
* reporting EOF.
@@ -2422,44 +2425,44 @@ CopyReadLineCSV(CopyState cstate)
/*
* If character is '\\' or '\r', we may need to look ahead below.
- * Force fetch of the next character if we don't already have it.
- * We need to do this before changing CSV state, in case one of
- * these characters is also the quote or escape character.
+ * Force fetch of the next character if we don't already have it. We
+ * need to do this before changing CSV state, in case one of these
+ * characters is also the quote or escape character.
*
- * Note: old-protocol does not like forced prefetch, but it's OK
- * here since we cannot validly be at EOF.
+ * Note: old-protocol does not like forced prefetch, but it's OK here
+ * since we cannot validly be at EOF.
*/
if (c == '\\' || c == '\r')
{
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
}
- /*
- * Dealing with quotes and escapes here is mildly tricky. If the
- * quote char is also the escape char, there's no problem - we
- * just use the char as a toggle. If they are different, we need
- * to ensure that we only take account of an escape inside a quoted
- * field and immediately preceding a quote char, and not the
- * second in a escape-escape sequence.
- */
+ /*
+ * Dealing with quotes and escapes here is mildly tricky. If the quote
+ * char is also the escape char, there's no problem - we just use the
+ * char as a toggle. If they are different, we need to ensure that we
+ * only take account of an escape inside a quoted field and
+ * immediately preceding a quote char, and not the second in a
+ * escape-escape sequence.
+ */
if (in_quote && c == escapec)
- last_was_esc = ! last_was_esc;
- if (c == quotec && ! last_was_esc)
- in_quote = ! in_quote;
+ last_was_esc = !last_was_esc;
+ if (c == quotec && !last_was_esc)
+ in_quote = !in_quote;
if (c != escapec)
last_was_esc = false;
/*
- * Updating the line count for embedded CR and/or LF chars is
- * necessarily a little fragile - this test is probably about
- * the best we can do. (XXX it's arguable whether we should
- * do this at all --- is cur_lineno a physical or logical count?)
- */
+ * Updating the line count for embedded CR and/or LF chars is
+ * necessarily a little fragile - this test is probably about the best
+ * we can do. (XXX it's arguable whether we should do this at all ---
+ * is cur_lineno a physical or logical count?)
+ */
if (in_quote && c == (cstate->eol_type == EOL_NL ? '\n' : '\r'))
cstate->cur_lineno++;
@@ -2472,12 +2475,12 @@ CopyReadLineCSV(CopyState cstate)
/*
* If need more data, go back to loop top to load it.
*
- * Note that if we are at EOF, c will wind up as '\0'
- * because of the guaranteed pad of raw_buf.
+ * Note that if we are at EOF, c will wind up as '\0' because of
+ * the guaranteed pad of raw_buf.
*/
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
@@ -2485,8 +2488,8 @@ CopyReadLineCSV(CopyState cstate)
if (c == '\n')
{
- raw_buf_ptr++; /* eat newline */
- cstate->eol_type = EOL_CRNL; /* in case not set yet */
+ raw_buf_ptr++; /* eat newline */
+ cstate->eol_type = EOL_CRNL; /* in case not set yet */
}
else
{
@@ -2494,11 +2497,12 @@ CopyReadLineCSV(CopyState cstate)
if (cstate->eol_type == EOL_CRNL)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("unquoted carriage return found in data"),
+ errmsg("unquoted carriage return found in data"),
errhint("Use quoted CSV field to represent carriage return.")));
+
/*
- * if we got here, it is the first line and we didn't
- * find \n, so don't consume the peeked character
+ * if we got here, it is the first line and we didn't find
+ * \n, so don't consume the peeked character
*/
cstate->eol_type = EOL_CR;
}
@@ -2518,8 +2522,8 @@ CopyReadLineCSV(CopyState cstate)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("unquoted newline found in data"),
- errhint("Use quoted CSV field to represent newline.")));
- cstate->eol_type = EOL_NL; /* in case not set yet */
+ errhint("Use quoted CSV field to represent newline.")));
+ cstate->eol_type = EOL_NL; /* in case not set yet */
/* If reach here, we have found the line terminator */
break;
}
@@ -2529,7 +2533,7 @@ CopyReadLineCSV(CopyState cstate)
*/
if (c == '\\' && cstate->line_buf.len == 0)
{
- char c2;
+ char c2;
/*
* If need more data, go back to loop top to load it.
@@ -2548,25 +2552,25 @@ CopyReadLineCSV(CopyState cstate)
}
/*
- * Note: we do not change c here since we aren't treating \
- * as escaping the next character.
+ * Note: we do not change c here since we aren't treating \ as
+ * escaping the next character.
*/
c2 = copy_raw_buf[raw_buf_ptr];
if (c2 == '.')
{
- raw_buf_ptr++; /* consume the '.' */
+ raw_buf_ptr++; /* consume the '.' */
/*
* Note: if we loop back for more data here, it does not
- * matter that the CSV state change checks are re-executed;
- * we will come back here with no important state changed.
+ * matter that the CSV state change checks are re-executed; we
+ * will come back here with no important state changed.
*/
if (cstate->eol_type == EOL_CRNL)
{
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
@@ -2583,7 +2587,7 @@ CopyReadLineCSV(CopyState cstate)
}
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
@@ -2601,12 +2605,12 @@ CopyReadLineCSV(CopyState cstate)
errmsg("end-of-copy marker does not match previous newline style")));
/*
- * Transfer only the data before the \. into line_buf,
- * then discard the data and the \. sequence.
+ * Transfer only the data before the \. into line_buf, then
+ * discard the data and the \. sequence.
*/
if (prev_raw_ptr > cstate->raw_buf_index)
appendBinaryStringInfo(&cstate->line_buf, cstate->raw_buf + cstate->raw_buf_index,
- prev_raw_ptr - cstate->raw_buf_index);
+ prev_raw_ptr - cstate->raw_buf_index);
cstate->raw_buf_index = raw_buf_ptr;
result = true; /* report EOF */
break;
@@ -2615,10 +2619,10 @@ CopyReadLineCSV(CopyState cstate)
/*
* Do we need to be careful about trailing bytes of multibyte
- * characters? (See note above about client_only_encoding)
+ * characters? (See note above about client_only_encoding)
*
- * We assume here that pg_encoding_mblen only looks at the first
- * byte of the character!
+ * We assume here that pg_encoding_mblen only looks at the first byte of
+ * the character!
*/
if (cstate->client_only_encoding)
{
@@ -2626,7 +2630,7 @@ CopyReadLineCSV(CopyState cstate)
s[0] = c;
mblen = pg_encoding_mblen(cstate->client_encoding, s);
- if (raw_buf_ptr + (mblen-1) > copy_buf_len)
+ if (raw_buf_ptr + (mblen - 1) > copy_buf_len)
{
if (hit_eof)
{
@@ -2635,11 +2639,11 @@ CopyReadLineCSV(CopyState cstate)
result = true;
break;
}
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
- raw_buf_ptr += mblen-1;
+ raw_buf_ptr += mblen - 1;
}
} /* end of outer loop */
@@ -2684,7 +2688,7 @@ GetDecimalFromHex(char hex)
* null_print is the null marker string. Note that this is compared to
* the pre-de-escaped input string.
*
- * The return value is the number of fields actually read. (We error out
+ * The return value is the number of fields actually read. (We error out
* if this would exceed maxfields, which is the length of fieldvals[].)
*/
static int
@@ -2716,9 +2720,9 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals)
/*
* The de-escaped attributes will certainly not be longer than the input
* data line, so we can just force attribute_buf to be large enough and
- * then transfer data without any checks for enough space. We need to
- * do it this way because enlarging attribute_buf mid-stream would
- * invalidate pointers already stored into fieldvals[].
+ * then transfer data without any checks for enough space. We need to do
+ * it this way because enlarging attribute_buf mid-stream would invalidate
+ * pointers already stored into fieldvals[].
*/
if (cstate->attribute_buf.maxlen <= cstate->line_buf.len)
enlargeStringInfo(&cstate->attribute_buf, cstate->line_buf.len);
@@ -2750,7 +2754,7 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals)
/* Scan data for field */
for (;;)
{
- char c;
+ char c;
end_ptr = cur_ptr;
if (cur_ptr >= line_end_ptr)
@@ -2776,41 +2780,41 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals)
case '5':
case '6':
case '7':
- {
- /* handle \013 */
- int val;
-
- val = OCTVALUE(c);
- if (cur_ptr < line_end_ptr)
{
- c = *cur_ptr;
- if (ISOCTAL(c))
+ /* handle \013 */
+ int val;
+
+ val = OCTVALUE(c);
+ if (cur_ptr < line_end_ptr)
{
- cur_ptr++;
- val = (val << 3) + OCTVALUE(c);
- if (cur_ptr < line_end_ptr)
+ c = *cur_ptr;
+ if (ISOCTAL(c))
{
- c = *cur_ptr;
- if (ISOCTAL(c))
+ cur_ptr++;
+ val = (val << 3) + OCTVALUE(c);
+ if (cur_ptr < line_end_ptr)
{
- cur_ptr++;
- val = (val << 3) + OCTVALUE(c);
+ c = *cur_ptr;
+ if (ISOCTAL(c))
+ {
+ cur_ptr++;
+ val = (val << 3) + OCTVALUE(c);
+ }
}
}
}
+ c = val & 0377;
}
- c = val & 0377;
- }
- break;
+ break;
case 'x':
/* Handle \x3F */
if (cur_ptr < line_end_ptr)
{
- char hexchar = *cur_ptr;
+ char hexchar = *cur_ptr;
if (isxdigit((unsigned char) hexchar))
{
- int val = GetDecimalFromHex(hexchar);
+ int val = GetDecimalFromHex(hexchar);
cur_ptr++;
if (cur_ptr < line_end_ptr)
@@ -2916,9 +2920,9 @@ CopyReadAttributesCSV(CopyState cstate, int maxfields, char **fieldvals)
/*
* The de-escaped attributes will certainly not be longer than the input
* data line, so we can just force attribute_buf to be large enough and
- * then transfer data without any checks for enough space. We need to
- * do it this way because enlarging attribute_buf mid-stream would
- * invalidate pointers already stored into fieldvals[].
+ * then transfer data without any checks for enough space. We need to do
+ * it this way because enlarging attribute_buf mid-stream would invalidate
+ * pointers already stored into fieldvals[].
*/
if (cstate->attribute_buf.maxlen <= cstate->line_buf.len)
enlargeStringInfo(&cstate->attribute_buf, cstate->line_buf.len);
@@ -2952,7 +2956,7 @@ CopyReadAttributesCSV(CopyState cstate, int maxfields, char **fieldvals)
/* Scan data for field */
for (;;)
{
- char c;
+ char c;
end_ptr = cur_ptr;
if (cur_ptr >= line_end_ptr)
@@ -2980,7 +2984,7 @@ CopyReadAttributesCSV(CopyState cstate, int maxfields, char **fieldvals)
*/
if (cur_ptr < line_end_ptr)
{
- char nextc = *cur_ptr;
+ char nextc = *cur_ptr;
if (nextc == escapec || nextc == quotec)
{
@@ -2990,6 +2994,7 @@ CopyReadAttributesCSV(CopyState cstate, int maxfields, char **fieldvals)
}
}
}
+
/*
* end of quoted field. Must do this test after testing for escape
* in case quote char and escape char are the same (which is the
@@ -3141,9 +3146,9 @@ CopyAttributeOutText(CopyState cstate, char *server_string)
CopySendChar(cstate, '\\');
/*
- * We can skip pg_encoding_mblen() overhead when encoding
- * is safe, because in valid backend encodings, extra
- * bytes of a multibyte character never look like ASCII.
+ * We can skip pg_encoding_mblen() overhead when encoding is
+ * safe, because in valid backend encodings, extra bytes of a
+ * multibyte character never look like ASCII.
*/
if (cstate->client_only_encoding)
mblen = pg_encoding_mblen(cstate->client_encoding, string);
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 548648066b8..accbafc8486 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.172 2005/10/10 20:02:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.173 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -202,11 +202,11 @@ createdb(const CreatedbStmt *stmt)
datdba = GetUserId();
/*
- * To create a database, must have createdb privilege and must be able
- * to become the target role (this does not imply that the target role
- * itself must have createdb privilege). The latter provision guards
- * against "giveaway" attacks. Note that a superuser will always have
- * both of these privileges a fortiori.
+ * To create a database, must have createdb privilege and must be able to
+ * become the target role (this does not imply that the target role itself
+ * must have createdb privilege). The latter provision guards against
+ * "giveaway" attacks. Note that a superuser will always have both of
+ * these privileges a fortiori.
*/
if (!have_createdb_privilege())
ereport(ERROR,
@@ -218,10 +218,10 @@ createdb(const CreatedbStmt *stmt)
/*
* Check for db name conflict. There is a race condition here, since
* another backend could create the same DB name before we commit.
- * However, holding an exclusive lock on pg_database for the whole
- * time we are copying the source database doesn't seem like a good
- * idea, so accept possibility of race to create. We will check again
- * after we grab the exclusive lock.
+ * However, holding an exclusive lock on pg_database for the whole time we
+ * are copying the source database doesn't seem like a good idea, so
+ * accept possibility of race to create. We will check again after we
+ * grab the exclusive lock.
*/
if (get_db_info(dbname, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL))
@@ -240,7 +240,7 @@ createdb(const CreatedbStmt *stmt)
&src_vacuumxid, &src_frozenxid, &src_deftablespace))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_DATABASE),
- errmsg("template database \"%s\" does not exist", dbtemplate)));
+ errmsg("template database \"%s\" does not exist", dbtemplate)));
/*
* Permission check: to copy a DB that's not marked datistemplate, you
@@ -264,8 +264,8 @@ createdb(const CreatedbStmt *stmt)
if (DatabaseHasActiveBackends(src_dboid, true))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("source database \"%s\" is being accessed by other users",
- dbtemplate)));
+ errmsg("source database \"%s\" is being accessed by other users",
+ dbtemplate)));
/* If encoding is defaulted, use source's encoding */
if (encoding < 0)
@@ -300,7 +300,7 @@ createdb(const CreatedbStmt *stmt)
/*
* If we are trying to change the default tablespace of the template,
* we require that the template not have any files in the new default
- * tablespace. This is necessary because otherwise the copied
+ * tablespace. This is necessary because otherwise the copied
* database would contain pg_class rows that refer to its default
* tablespace both explicitly (by OID) and implicitly (as zero), which
* would cause problems. For example another CREATE DATABASE using
@@ -337,7 +337,7 @@ createdb(const CreatedbStmt *stmt)
/*
* Normally we mark the new database with the same datvacuumxid and
- * datfrozenxid as the source. However, if the source is not allowing
+ * datfrozenxid as the source. However, if the source is not allowing
* connections then we assume it is fully frozen, and we can set the
* current transaction ID as the xid limits. This avoids immediately
* starting to generate warnings after cloning template0.
@@ -346,9 +346,9 @@ createdb(const CreatedbStmt *stmt)
src_vacuumxid = src_frozenxid = GetCurrentTransactionId();
/*
- * Preassign OID for pg_database tuple, so that we can compute db
- * path. We have to open pg_database to do this, but we don't want
- * to take ExclusiveLock yet, so just do it and close again.
+ * Preassign OID for pg_database tuple, so that we can compute db path.
+ * We have to open pg_database to do this, but we don't want to take
+ * ExclusiveLock yet, so just do it and close again.
*/
pg_database_rel = heap_open(DatabaseRelationId, AccessShareLock);
dboid = GetNewOid(pg_database_rel);
@@ -357,23 +357,23 @@ createdb(const CreatedbStmt *stmt)
/*
* Force dirty buffers out to disk, to ensure source database is
- * up-to-date for the copy. (We really only need to flush buffers for
- * the source database, but bufmgr.c provides no API for that.)
+ * up-to-date for the copy. (We really only need to flush buffers for the
+ * source database, but bufmgr.c provides no API for that.)
*/
BufferSync();
/*
- * Once we start copying subdirectories, we need to be able to clean
- * 'em up if we fail. Establish a TRY block to make sure this happens.
- * (This is not a 100% solution, because of the possibility of failure
- * during transaction commit after we leave this routine, but it should
- * handle most scenarios.)
+ * Once we start copying subdirectories, we need to be able to clean 'em
+ * up if we fail. Establish a TRY block to make sure this happens. (This
+ * is not a 100% solution, because of the possibility of failure during
+ * transaction commit after we leave this routine, but it should handle
+ * most scenarios.)
*/
PG_TRY();
{
/*
- * Iterate through all tablespaces of the template database,
- * and copy each one to the new database.
+ * Iterate through all tablespaces of the template database, and copy
+ * each one to the new database.
*/
rel = heap_open(TableSpaceRelationId, AccessShareLock);
scan = heap_beginscan(rel, SnapshotNow, 0, NULL);
@@ -478,8 +478,8 @@ createdb(const CreatedbStmt *stmt)
tuple = heap_formtuple(pg_database_dsc, new_record, new_record_nulls);
- HeapTupleSetOid(tuple, dboid); /* override heap_insert's OID
- * selection */
+ HeapTupleSetOid(tuple, dboid); /* override heap_insert's OID
+ * selection */
simple_heap_insert(pg_database_rel, tuple);
@@ -495,30 +495,31 @@ createdb(const CreatedbStmt *stmt)
/*
* We force a checkpoint before committing. This effectively means
* that committed XLOG_DBASE_CREATE operations will never need to be
- * replayed (at least not in ordinary crash recovery; we still have
- * to make the XLOG entry for the benefit of PITR operations).
- * This avoids two nasty scenarios:
+ * replayed (at least not in ordinary crash recovery; we still have to
+ * make the XLOG entry for the benefit of PITR operations). This
+ * avoids two nasty scenarios:
*
* #1: When PITR is off, we don't XLOG the contents of newly created
* indexes; therefore the drop-and-recreate-whole-directory behavior
* of DBASE_CREATE replay would lose such indexes.
*
* #2: Since we have to recopy the source database during DBASE_CREATE
- * replay, we run the risk of copying changes in it that were committed
- * after the original CREATE DATABASE command but before the system
- * crash that led to the replay. This is at least unexpected and at
- * worst could lead to inconsistencies, eg duplicate table names.
+ * replay, we run the risk of copying changes in it that were
+ * committed after the original CREATE DATABASE command but before the
+ * system crash that led to the replay. This is at least unexpected
+ * and at worst could lead to inconsistencies, eg duplicate table
+ * names.
*
* (Both of these were real bugs in releases 8.0 through 8.0.3.)
*
- * In PITR replay, the first of these isn't an issue, and the second
- * is only a risk if the CREATE DATABASE and subsequent template
- * database change both occur while a base backup is being taken.
- * There doesn't seem to be much we can do about that except document
- * it as a limitation.
+ * In PITR replay, the first of these isn't an issue, and the second is
+ * only a risk if the CREATE DATABASE and subsequent template database
+ * change both occur while a base backup is being taken. There doesn't
+ * seem to be much we can do about that except document it as a
+ * limitation.
*
- * Perhaps if we ever implement CREATE DATABASE in a less cheesy
- * way, we can avoid this.
+ * Perhaps if we ever implement CREATE DATABASE in a less cheesy way, we
+ * can avoid this.
*/
RequestCheckpoint(true, false);
@@ -569,16 +570,16 @@ dropdb(const char *dbname)
errmsg("cannot drop the currently open database")));
/*
- * Obtain exclusive lock on pg_database. We need this to ensure that
- * no new backend starts up in the target database while we are
- * deleting it. (Actually, a new backend might still manage to start
- * up, because it isn't able to lock pg_database while starting. But
- * it will detect its error in ReverifyMyDatabase and shut down before
- * any serious damage is done. See postinit.c.)
+ * Obtain exclusive lock on pg_database. We need this to ensure that no
+ * new backend starts up in the target database while we are deleting it.
+ * (Actually, a new backend might still manage to start up, because it
+ * isn't able to lock pg_database while starting. But it will detect its
+ * error in ReverifyMyDatabase and shut down before any serious damage is
+ * done. See postinit.c.)
*
- * An ExclusiveLock, rather than AccessExclusiveLock, is sufficient
- * since ReverifyMyDatabase takes RowShareLock. This allows ordinary
- * readers of pg_database to proceed in parallel.
+ * An ExclusiveLock, rather than AccessExclusiveLock, is sufficient since
+ * ReverifyMyDatabase takes RowShareLock. This allows ordinary readers of
+ * pg_database to proceed in parallel.
*/
pgdbrel = heap_open(DatabaseRelationId, ExclusiveLock);
@@ -594,8 +595,8 @@ dropdb(const char *dbname)
/*
* Disallow dropping a DB that is marked istemplate. This is just to
- * prevent people from accidentally dropping template0 or template1;
- * they can do so if they're really determined ...
+ * prevent people from accidentally dropping template0 or template1; they
+ * can do so if they're really determined ...
*/
if (db_istemplate)
ereport(ERROR,
@@ -608,8 +609,8 @@ dropdb(const char *dbname)
if (DatabaseHasActiveBackends(db_id, false))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("database \"%s\" is being accessed by other users",
- dbname)));
+ errmsg("database \"%s\" is being accessed by other users",
+ dbname)));
/*
* Find the database's tuple by OID (should be unique).
@@ -626,8 +627,8 @@ dropdb(const char *dbname)
if (!HeapTupleIsValid(tup))
{
/*
- * This error should never come up since the existence of the
- * database is checked earlier
+ * This error should never come up since the existence of the database
+ * is checked earlier
*/
elog(ERROR, "database \"%s\" doesn't exist despite earlier reports to the contrary",
dbname);
@@ -641,8 +642,8 @@ dropdb(const char *dbname)
/*
* Delete any comments associated with the database
*
- * NOTE: this is probably dead code since any such comments should have
- * been in that database, not mine.
+ * NOTE: this is probably dead code since any such comments should have been
+ * in that database, not mine.
*/
DeleteComments(db_id, DatabaseRelationId, 0);
@@ -652,9 +653,9 @@ dropdb(const char *dbname)
dropDatabaseDependencies(db_id);
/*
- * Drop pages for this database that are in the shared buffer cache.
- * This is important to ensure that no remaining backend tries to
- * write out a dirty buffer to the dead database later...
+ * Drop pages for this database that are in the shared buffer cache. This
+ * is important to ensure that no remaining backend tries to write out a
+ * dirty buffer to the dead database later...
*/
DropBuffers(db_id);
@@ -701,8 +702,8 @@ RenameDatabase(const char *oldname, const char *newname)
key2;
/*
- * Obtain ExclusiveLock so that no new session gets started
- * while the rename is in progress.
+ * Obtain ExclusiveLock so that no new session gets started while the
+ * rename is in progress.
*/
rel = heap_open(DatabaseRelationId, ExclusiveLock);
@@ -720,10 +721,10 @@ RenameDatabase(const char *oldname, const char *newname)
errmsg("database \"%s\" does not exist", oldname)));
/*
- * XXX Client applications probably store the current database
- * somewhere, so renaming it could cause confusion. On the other
- * hand, there may not be an actual problem besides a little
- * confusion, so think about this and decide.
+ * XXX Client applications probably store the current database somewhere,
+ * so renaming it could cause confusion. On the other hand, there may not
+ * be an actual problem besides a little confusion, so think about this
+ * and decide.
*/
if (HeapTupleGetOid(tup) == MyDatabaseId)
ereport(ERROR,
@@ -737,8 +738,8 @@ RenameDatabase(const char *oldname, const char *newname)
if (DatabaseHasActiveBackends(HeapTupleGetOid(tup), false))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("database \"%s\" is being accessed by other users",
- oldname)));
+ errmsg("database \"%s\" is being accessed by other users",
+ oldname)));
/* make sure the new name doesn't exist */
ScanKeyInit(&key2,
@@ -822,8 +823,7 @@ AlterDatabase(AlterDatabaseStmt *stmt)
connlimit = intVal(dconnlimit->arg);
/*
- * We don't need ExclusiveLock since we aren't updating the
- * flat file.
+ * We don't need ExclusiveLock since we aren't updating the flat file.
*/
rel = heap_open(DatabaseRelationId, RowExclusiveLock);
ScanKeyInit(&scankey,
@@ -868,8 +868,8 @@ AlterDatabase(AlterDatabaseStmt *stmt)
heap_close(rel, NoLock);
/*
- * We don't bother updating the flat file since the existing options
- * for ALTER DATABASE don't affect it.
+ * We don't bother updating the flat file since the existing options for
+ * ALTER DATABASE don't affect it.
*/
}
@@ -893,8 +893,7 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
valuestr = flatten_set_variable_args(stmt->variable, stmt->value);
/*
- * We don't need ExclusiveLock since we aren't updating the
- * flat file.
+ * We don't need ExclusiveLock since we aren't updating the flat file.
*/
rel = heap_open(DatabaseRelationId, RowExclusiveLock);
ScanKeyInit(&scankey,
@@ -958,8 +957,8 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
heap_close(rel, NoLock);
/*
- * We don't bother updating the flat file since ALTER DATABASE SET
- * doesn't affect it.
+ * We don't bother updating the flat file since ALTER DATABASE SET doesn't
+ * affect it.
*/
}
@@ -977,8 +976,7 @@ AlterDatabaseOwner(const char *dbname, Oid newOwnerId)
Form_pg_database datForm;
/*
- * We don't need ExclusiveLock since we aren't updating the
- * flat file.
+ * We don't need ExclusiveLock since we aren't updating the flat file.
*/
rel = heap_open(DatabaseRelationId, RowExclusiveLock);
ScanKeyInit(&scankey,
@@ -1011,7 +1009,7 @@ AlterDatabaseOwner(const char *dbname, Oid newOwnerId)
HeapTuple newtuple;
/* Otherwise, must be owner of the existing object */
- if (!pg_database_ownercheck(HeapTupleGetOid(tuple),GetUserId()))
+ if (!pg_database_ownercheck(HeapTupleGetOid(tuple), GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE,
dbname);
@@ -1019,18 +1017,18 @@ AlterDatabaseOwner(const char *dbname, Oid newOwnerId)
check_is_member_of_role(GetUserId(), newOwnerId);
/*
- * must have createdb rights
+ * must have createdb rights
*
- * NOTE: This is different from other alter-owner checks in
- * that the current user is checked for createdb privileges
- * instead of the destination owner. This is consistent
- * with the CREATE case for databases. Because superusers
- * will always have this right, we need no special case for them.
+ * NOTE: This is different from other alter-owner checks in that the
+ * current user is checked for createdb privileges instead of the
+ * destination owner. This is consistent with the CREATE case for
+ * databases. Because superusers will always have this right, we need
+ * no special case for them.
*/
if (!have_createdb_privilege())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to change owner of database")));
+ errmsg("permission denied to change owner of database")));
memset(repl_null, ' ', sizeof(repl_null));
memset(repl_repl, ' ', sizeof(repl_repl));
@@ -1332,10 +1330,9 @@ dbase_redo(XLogRecPtr lsn, XLogRecord *record)
dst_path = GetDatabasePath(xlrec->db_id, xlrec->tablespace_id);
/*
- * Our theory for replaying a CREATE is to forcibly drop the
- * target subdirectory if present, then re-copy the source data.
- * This may be more work than needed, but it is simple to
- * implement.
+ * Our theory for replaying a CREATE is to forcibly drop the target
+ * subdirectory if present, then re-copy the source data. This may be
+ * more work than needed, but it is simple to implement.
*/
if (stat(dst_path, &st) == 0 && S_ISDIR(st.st_mode))
{
@@ -1367,8 +1364,7 @@ dbase_redo(XLogRecPtr lsn, XLogRecord *record)
dst_path = GetDatabasePath(xlrec->db_id, xlrec->tablespace_id);
/*
- * Drop pages for this database that are in the shared buffer
- * cache
+ * Drop pages for this database that are in the shared buffer cache
*/
DropBuffers(xlrec->db_id);
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index 68b1360bca0..da5a112bf10 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.92 2004/12/31 21:59:41 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.93 2005/10/15 02:49:15 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -157,11 +157,11 @@ defGetInt64(DefElem *def)
/*
* Values too large for int4 will be represented as Float
- * constants by the lexer. Accept these if they are valid
- * int8 strings.
+ * constants by the lexer. Accept these if they are valid int8
+ * strings.
*/
return DatumGetInt64(DirectFunctionCall1(int8in,
- CStringGetDatum(strVal(def->arg))));
+ CStringGetDatum(strVal(def->arg))));
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 0a330a2137e..d470990e942 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.137 2005/06/04 02:07:09 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.138 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -75,12 +75,12 @@ ExplainQuery(ExplainStmt *stmt, DestReceiver *dest)
ListCell *l;
/*
- * Because the planner is not cool about not scribbling on its input,
- * we make a preliminary copy of the source querytree. This prevents
+ * Because the planner is not cool about not scribbling on its input, we
+ * make a preliminary copy of the source querytree. This prevents
* problems in the case that the EXPLAIN is in a portal or plpgsql
* function and is executed repeatedly. (See also the same hack in
- * DECLARE CURSOR and PREPARE.) XXX the planner really shouldn't
- * modify its input ... FIXME someday.
+ * DECLARE CURSOR and PREPARE.) XXX the planner really shouldn't modify
+ * its input ... FIXME someday.
*/
query = copyObject(query);
@@ -219,7 +219,7 @@ void
ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
TupOutputState *tstate)
{
- instr_time starttime;
+ instr_time starttime;
double totaltime = 0;
ExplainState *es;
StringInfo str;
@@ -264,7 +264,7 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
pfree(s);
do_text_output_multiline(tstate, f);
pfree(f);
- do_text_output_oneline(tstate, ""); /* separator line */
+ do_text_output_oneline(tstate, ""); /* separator line */
}
}
@@ -289,21 +289,21 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
if (es->printAnalyze)
{
ResultRelInfo *rInfo;
- int numrels = queryDesc->estate->es_num_result_relations;
- int nr;
+ int numrels = queryDesc->estate->es_num_result_relations;
+ int nr;
rInfo = queryDesc->estate->es_result_relations;
for (nr = 0; nr < numrels; rInfo++, nr++)
{
- int nt;
+ int nt;
if (!rInfo->ri_TrigDesc || !rInfo->ri_TrigInstrument)
continue;
for (nt = 0; nt < rInfo->ri_TrigDesc->numtriggers; nt++)
{
- Trigger *trig = rInfo->ri_TrigDesc->triggers + nt;
+ Trigger *trig = rInfo->ri_TrigDesc->triggers + nt;
Instrumentation *instr = rInfo->ri_TrigInstrument + nt;
- char *conname;
+ char *conname;
/* Must clean up instrumentation state */
InstrEndLoop(instr);
@@ -316,7 +316,7 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
continue;
if (trig->tgisconstraint &&
- (conname = GetConstraintNameForTrigger(trig->tgoid)) != NULL)
+ (conname = GetConstraintNameForTrigger(trig->tgoid)) != NULL)
{
appendStringInfo(str, "Trigger for constraint %s",
conname);
@@ -327,7 +327,7 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
if (numrels > 1)
appendStringInfo(str, " on %s",
- RelationGetRelationName(rInfo->ri_RelationDesc));
+ RelationGetRelationName(rInfo->ri_RelationDesc));
appendStringInfo(str, ": time=%.3f calls=%.0f\n",
1000.0 * instr->total,
@@ -337,8 +337,8 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
}
/*
- * Close down the query and free resources. Include time for this
- * in the total runtime (although it should be pretty minimal).
+ * Close down the query and free resources. Include time for this in the
+ * total runtime (although it should be pretty minimal).
*/
INSTR_TIME_SET_CURRENT(starttime);
@@ -366,7 +366,7 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
static double
elapsed_time(instr_time *starttime)
{
- instr_time endtime;
+ instr_time endtime;
INSTR_TIME_SET_CURRENT(endtime);
@@ -378,7 +378,7 @@ elapsed_time(instr_time *starttime)
endtime.tv_usec += 1000000;
endtime.tv_sec--;
}
-#else /* WIN32 */
+#else /* WIN32 */
endtime.QuadPart -= starttime->QuadPart;
#endif
@@ -583,7 +583,7 @@ explain_outNode(StringInfo str,
if (ScanDirectionIsBackward(((IndexScan *) plan)->indexorderdir))
appendStringInfoString(str, " Backward");
appendStringInfo(str, " using %s",
- quote_identifier(get_rel_name(((IndexScan *) plan)->indexid)));
+ quote_identifier(get_rel_name(((IndexScan *) plan)->indexid)));
/* FALL THRU */
case T_SeqScan:
case T_BitmapHeapScan:
@@ -604,7 +604,7 @@ explain_outNode(StringInfo str,
quote_identifier(relname));
if (strcmp(rte->eref->aliasname, relname) != 0)
appendStringInfo(str, " %s",
- quote_identifier(rte->eref->aliasname));
+ quote_identifier(rte->eref->aliasname));
}
break;
case T_BitmapIndexScan:
@@ -632,10 +632,10 @@ explain_outNode(StringInfo str,
Assert(rte->rtekind == RTE_FUNCTION);
/*
- * If the expression is still a function call, we can get
- * the real name of the function. Otherwise, punt (this
- * can happen if the optimizer simplified away the
- * function call, for example).
+ * If the expression is still a function call, we can get the
+ * real name of the function. Otherwise, punt (this can
+ * happen if the optimizer simplified away the function call,
+ * for example).
*/
if (rte->funcexpr && IsA(rte->funcexpr, FuncExpr))
{
@@ -652,20 +652,20 @@ explain_outNode(StringInfo str,
quote_identifier(proname));
if (strcmp(rte->eref->aliasname, proname) != 0)
appendStringInfo(str, " %s",
- quote_identifier(rte->eref->aliasname));
+ quote_identifier(rte->eref->aliasname));
}
break;
default:
break;
}
-
+
appendStringInfo(str, " (cost=%.2f..%.2f rows=%.0f width=%d)",
plan->startup_cost, plan->total_cost,
plan->plan_rows, plan->plan_width);
/*
- * We have to forcibly clean up the instrumentation state because
- * we haven't done ExecutorEnd yet. This is pretty grotty ...
+ * We have to forcibly clean up the instrumentation state because we
+ * haven't done ExecutorEnd yet. This is pretty grotty ...
*/
if (planstate->instrument)
InstrEndLoop(planstate->instrument);
@@ -675,8 +675,8 @@ explain_outNode(StringInfo str,
double nloops = planstate->instrument->nloops;
appendStringInfo(str, " (actual time=%.3f..%.3f rows=%.0f loops=%.0f)",
- 1000.0 * planstate->instrument->startup / nloops,
- 1000.0 * planstate->instrument->total / nloops,
+ 1000.0 * planstate->instrument->startup / nloops,
+ 1000.0 * planstate->instrument->total / nloops,
planstate->instrument->ntuples / nloops,
planstate->instrument->nloops);
}
@@ -833,9 +833,10 @@ explain_outNode(StringInfo str,
for (i = 0; i < indent; i++)
appendStringInfo(str, " ");
appendStringInfo(str, " -> ");
+
/*
- * Ordinarily we don't pass down our own outer_plan value to our
- * child nodes, but in bitmap scan trees we must, since the bottom
+ * Ordinarily we don't pass down our own outer_plan value to our child
+ * nodes, but in bitmap scan trees we must, since the bottom
* BitmapIndexScan nodes may have outer references.
*/
explain_outNode(str, outerPlan(plan),
@@ -882,7 +883,7 @@ explain_outNode(StringInfo str,
if (IsA(plan, BitmapAnd))
{
- BitmapAnd *bitmapandplan = (BitmapAnd *) plan;
+ BitmapAnd *bitmapandplan = (BitmapAnd *) plan;
BitmapAndState *bitmapandstate = (BitmapAndState *) planstate;
ListCell *lst;
int j;
@@ -898,7 +899,7 @@ explain_outNode(StringInfo str,
explain_outNode(str, subnode,
bitmapandstate->bitmapplans[j],
- outer_plan, /* pass down same outer plan */
+ outer_plan, /* pass down same outer plan */
indent + 3, es);
j++;
}
@@ -906,7 +907,7 @@ explain_outNode(StringInfo str,
if (IsA(plan, BitmapOr))
{
- BitmapOr *bitmaporplan = (BitmapOr *) plan;
+ BitmapOr *bitmaporplan = (BitmapOr *) plan;
BitmapOrState *bitmaporstate = (BitmapOrState *) planstate;
ListCell *lst;
int j;
@@ -922,7 +923,7 @@ explain_outNode(StringInfo str,
explain_outNode(str, subnode,
bitmaporstate->bitmapplans[j],
- outer_plan, /* pass down same outer plan */
+ outer_plan, /* pass down same outer plan */
indent + 3, es);
j++;
}
@@ -1008,9 +1009,9 @@ show_scan_qual(List *qual, const char *qlabel,
scancontext = deparse_context_for_rte(rte);
/*
- * If we have an outer plan that is referenced by the qual, add it to
- * the deparse context. If not, don't (so that we don't force
- * prefixes unnecessarily).
+ * If we have an outer plan that is referenced by the qual, add it to the
+ * deparse context. If not, don't (so that we don't force prefixes
+ * unnecessarily).
*/
if (outer_plan)
{
@@ -1018,7 +1019,7 @@ show_scan_qual(List *qual, const char *qlabel,
if (bms_is_member(OUTER, varnos))
outercontext = deparse_context_for_subplan("outer",
- outer_plan->targetlist,
+ outer_plan->targetlist,
es->rtable);
else
outercontext = NULL;
@@ -1111,11 +1112,10 @@ show_sort_keys(List *tlist, int nkeys, AttrNumber *keycols,
/*
* In this routine we expect that the plan node's tlist has not been
- * processed by set_plan_references(). Normally, any Vars will
- * contain valid varnos referencing the actual rtable. But we might
- * instead be looking at a dummy tlist generated by prepunion.c; if
- * there are Vars with zero varno, use the tlist itself to determine
- * their names.
+ * processed by set_plan_references(). Normally, any Vars will contain
+ * valid varnos referencing the actual rtable. But we might instead be
+ * looking at a dummy tlist generated by prepunion.c; if there are Vars
+ * with zero varno, use the tlist itself to determine their names.
*/
varnos = pull_varnos((Node *) tlist);
if (bms_is_member(0, varnos))
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index a2a8f56e23c..f4d6164775e 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.68 2005/09/24 22:54:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.69 2005/10/15 02:49:15 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
@@ -83,8 +83,8 @@ compute_return_type(TypeName *returnType, Oid languageOid,
if (languageOid == SQLlanguageId)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL function cannot return shell type %s",
- TypeNameToString(returnType))));
+ errmsg("SQL function cannot return shell type %s",
+ TypeNameToString(returnType))));
else
ereport(NOTICE,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
@@ -158,7 +158,7 @@ examine_parameter_list(List *parameters, Oid languageOid,
ListCell *x;
int i;
- *requiredResultType = InvalidOid; /* default result */
+ *requiredResultType = InvalidOid; /* default result */
inTypes = (Oid *) palloc(parameterCount * sizeof(Oid));
allTypes = (Datum *) palloc(parameterCount * sizeof(Datum));
@@ -182,8 +182,8 @@ examine_parameter_list(List *parameters, Oid languageOid,
if (languageOid == SQLlanguageId)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL function cannot accept shell type %s",
- TypeNameToString(t))));
+ errmsg("SQL function cannot accept shell type %s",
+ TypeNameToString(t))));
else
ereport(NOTICE,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
@@ -307,13 +307,13 @@ duplicate_error:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("conflicting or redundant options")));
- return false; /* keep compiler quiet */
+ return false; /* keep compiler quiet */
}
static char
interpret_func_volatility(DefElem *defel)
{
- char *str = strVal(defel->arg);
+ char *str = strVal(defel->arg);
if (strcmp(str, "immutable") == 0)
return PROVOLATILE_IMMUTABLE;
@@ -324,7 +324,7 @@ interpret_func_volatility(DefElem *defel)
else
{
elog(ERROR, "invalid volatility \"%s\"", str);
- return 0; /* keep compiler quiet */
+ return 0; /* keep compiler quiet */
}
}
@@ -445,8 +445,8 @@ compute_attributes_with_style(List *parameters, bool *isStrict_p, char *volatili
else
ereport(WARNING,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unrecognized function attribute \"%s\" ignored",
- param->defname)));
+ errmsg("unrecognized function attribute \"%s\" ignored",
+ param->defname)));
}
}
@@ -469,8 +469,8 @@ interpret_AS_clause(Oid languageOid, const char *languageName, List *as,
if (languageOid == ClanguageId)
{
/*
- * For "C" language, store the file name in probin and, when
- * given, the link symbol name in prosrc.
+ * For "C" language, store the file name in probin and, when given,
+ * the link symbol name in prosrc.
*/
*probin_str_p = strVal(linitial(as));
if (list_length(as) == 1)
@@ -541,7 +541,7 @@ CreateFunction(CreateFunctionStmt *stmt)
/* override attributes from explicit list */
compute_attributes_sql_style(stmt->options,
- &as_clause, &language, &volatility, &isStrict, &security);
+ &as_clause, &language, &volatility, &isStrict, &security);
/* Convert language name to canonical case */
languageName = case_translate_language_name(language);
@@ -630,10 +630,10 @@ CreateFunction(CreateFunctionStmt *stmt)
/*
* In PostgreSQL versions before 6.5, the SQL name of the created
* function could not be different from the internal name, and
- * "prosrc" wasn't used. So there is code out there that does
- * CREATE FUNCTION xyz AS '' LANGUAGE 'internal'. To preserve some
- * modicum of backwards compatibility, accept an empty "prosrc"
- * value as meaning the supplied SQL function name.
+ * "prosrc" wasn't used. So there is code out there that does CREATE
+ * FUNCTION xyz AS '' LANGUAGE 'internal'. To preserve some modicum of
+ * backwards compatibility, accept an empty "prosrc" value as meaning
+ * the supplied SQL function name.
*/
if (strlen(prosrc_str) == 0)
prosrc_str = funcname;
@@ -647,8 +647,8 @@ CreateFunction(CreateFunctionStmt *stmt)
}
/*
- * And now that we have all the parameters, and know we're permitted
- * to do so, go ahead and create the function.
+ * And now that we have all the parameters, and know we're permitted to do
+ * so, go ahead and create the function.
*/
ProcedureCreate(funcname,
namespaceId,
@@ -696,8 +696,8 @@ RemoveFunction(RemoveFuncStmt *stmt)
/* Permission check: must own func or its namespace */
if (!pg_proc_ownercheck(funcOid, GetUserId()) &&
- !pg_namespace_ownercheck(((Form_pg_proc) GETSTRUCT(tup))->pronamespace,
- GetUserId()))
+ !pg_namespace_ownercheck(((Form_pg_proc) GETSTRUCT(tup))->pronamespace,
+ GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC,
NameListToString(functionName));
@@ -706,7 +706,7 @@ RemoveFunction(RemoveFuncStmt *stmt)
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function",
NameListToString(functionName)),
- errhint("Use DROP AGGREGATE to drop aggregate functions.")));
+ errhint("Use DROP AGGREGATE to drop aggregate functions.")));
if (((Form_pg_proc) GETSTRUCT(tup))->prolang == INTERNALlanguageId)
{
@@ -812,7 +812,7 @@ RenameFunction(List *name, List *argtypes, const char *newname)
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function",
NameListToString(name)),
- errhint("Use ALTER AGGREGATE to rename aggregate functions.")));
+ errhint("Use ALTER AGGREGATE to rename aggregate functions.")));
namespaceOid = procForm->pronamespace;
@@ -828,7 +828,7 @@ RenameFunction(List *name, List *argtypes, const char *newname)
errmsg("function %s already exists in schema \"%s\"",
funcname_signature_string(newname,
procForm->pronargs,
- procForm->proargtypes.values),
+ procForm->proargtypes.values),
get_namespace_name(namespaceOid))));
}
@@ -900,7 +900,7 @@ AlterFunctionOwner(List *name, List *argtypes, Oid newOwnerId)
if (!superuser())
{
/* Otherwise, must be owner of the existing object */
- if (!pg_proc_ownercheck(procOid,GetUserId()))
+ if (!pg_proc_ownercheck(procOid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC,
NameListToString(name));
@@ -960,14 +960,14 @@ AlterFunctionOwner(List *name, List *argtypes, Oid newOwnerId)
void
AlterFunction(AlterFunctionStmt *stmt)
{
- HeapTuple tup;
- Oid funcOid;
+ HeapTuple tup;
+ Oid funcOid;
Form_pg_proc procForm;
- Relation rel;
- ListCell *l;
- DefElem *volatility_item = NULL;
- DefElem *strict_item = NULL;
- DefElem *security_def_item = NULL;
+ Relation rel;
+ ListCell *l;
+ DefElem *volatility_item = NULL;
+ DefElem *strict_item = NULL;
+ DefElem *security_def_item = NULL;
rel = heap_open(ProcedureRelationId, RowExclusiveLock);
@@ -995,9 +995,9 @@ AlterFunction(AlterFunctionStmt *stmt)
NameListToString(stmt->func->funcname))));
/* Examine requested actions. */
- foreach (l, stmt->actions)
+ foreach(l, stmt->actions)
{
- DefElem *defel = (DefElem *) lfirst(l);
+ DefElem *defel = (DefElem *) lfirst(l);
if (compute_common_attribute(defel,
&volatility_item,
@@ -1182,27 +1182,27 @@ CreateCast(CreateCastStmt *stmt)
if (nargs < 1 || nargs > 3)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("cast function must take one to three arguments")));
+ errmsg("cast function must take one to three arguments")));
if (procstruct->proargtypes.values[0] != sourcetypeid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("argument of cast function must match source data type")));
+ errmsg("argument of cast function must match source data type")));
if (nargs > 1 && procstruct->proargtypes.values[1] != INT4OID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("second argument of cast function must be type integer")));
+ errmsg("second argument of cast function must be type integer")));
if (nargs > 2 && procstruct->proargtypes.values[2] != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("third argument of cast function must be type boolean")));
+ errmsg("third argument of cast function must be type boolean")));
if (procstruct->prorettype != targettypeid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("return data type of cast function must match target data type")));
/*
- * Restricting the volatility of a cast function may or may not be
- * a good idea in the abstract, but it definitely breaks many old
+ * Restricting the volatility of a cast function may or may not be a
+ * good idea in the abstract, but it definitely breaks many old
* user-defined types. Disable this check --- tgl 2/1/03
*/
#ifdef NOT_USED
@@ -1214,7 +1214,7 @@ CreateCast(CreateCastStmt *stmt)
if (procstruct->proisagg)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("cast function must not be an aggregate function")));
+ errmsg("cast function must not be an aggregate function")));
if (procstruct->proretset)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
@@ -1242,13 +1242,13 @@ CreateCast(CreateCastStmt *stmt)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to create a cast WITHOUT FUNCTION")));
+ errmsg("must be superuser to create a cast WITHOUT FUNCTION")));
/*
* Also, insist that the types match as to size, alignment, and
- * pass-by-value attributes; this provides at least a crude check
- * that they have similar representations. A pair of types that
- * fail this test should certainly not be equated.
+ * pass-by-value attributes; this provides at least a crude check that
+ * they have similar representations. A pair of types that fail this
+ * test should certainly not be equated.
*/
get_typlenbyvalalign(sourcetypeid, &typ1len, &typ1byval, &typ1align);
get_typlenbyvalalign(targettypeid, &typ2len, &typ2byval, &typ2align);
@@ -1267,7 +1267,7 @@ CreateCast(CreateCastStmt *stmt)
if (sourcetypeid == targettypeid && nargs < 2)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("source data type and target data type are the same")));
+ errmsg("source data type and target data type are the same")));
/* convert CoercionContext enum to char value for castcontext */
switch (stmt->context)
@@ -1290,9 +1290,9 @@ CreateCast(CreateCastStmt *stmt)
relation = heap_open(CastRelationId, RowExclusiveLock);
/*
- * Check for duplicate. This is just to give a friendly error
- * message, the unique index would catch it anyway (so no need to
- * sweat about race conditions).
+ * Check for duplicate. This is just to give a friendly error message,
+ * the unique index would catch it anyway (so no need to sweat about race
+ * conditions).
*/
tuple = SearchSysCache(CASTSOURCETARGET,
ObjectIdGetDatum(sourcetypeid),
@@ -1442,12 +1442,12 @@ DropCastById(Oid castOid)
void
AlterFunctionNamespace(List *name, List *argtypes, const char *newschema)
{
- Oid procOid;
- Oid oldNspOid;
- Oid nspOid;
- HeapTuple tup;
- Relation procRel;
- Form_pg_proc proc;
+ Oid procOid;
+ Oid oldNspOid;
+ Oid nspOid;
+ HeapTuple tup;
+ Relation procRel;
+ Form_pg_proc proc;
procRel = heap_open(ProcedureRelationId, RowExclusiveLock);
@@ -1482,7 +1482,7 @@ AlterFunctionNamespace(List *name, List *argtypes, const char *newschema)
if (isAnyTempNamespace(nspOid) || isAnyTempNamespace(oldNspOid))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot move objects into or out of temporary schemas")));
+ errmsg("cannot move objects into or out of temporary schemas")));
/* same for TOAST schema */
if (nspOid == PG_TOAST_NAMESPACE || oldNspOid == PG_TOAST_NAMESPACE)
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 6bfa8a04e24..07654e455ab 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.133 2005/06/22 21:14:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.134 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -145,10 +145,9 @@ DefineIndex(RangeVar *heapRelation,
/*
* Verify we (still) have CREATE rights in the rel's namespace.
- * (Presumably we did when the rel was created, but maybe not
- * anymore.) Skip check if caller doesn't want it. Also skip check
- * if bootstrapping, since permissions machinery may not be working
- * yet.
+ * (Presumably we did when the rel was created, but maybe not anymore.)
+ * Skip check if caller doesn't want it. Also skip check if
+ * bootstrapping, since permissions machinery may not be working yet.
*/
if (check_rights && !IsBootstrapProcessingMode())
{
@@ -193,8 +192,8 @@ DefineIndex(RangeVar *heapRelation,
}
/*
- * Force shared indexes into the pg_global tablespace. This is a bit of
- * a hack but seems simpler than marking them in the BKI commands.
+ * Force shared indexes into the pg_global tablespace. This is a bit of a
+ * hack but seems simpler than marking them in the BKI commands.
*/
if (rel->rd_rel->relisshared)
tablespaceId = GLOBALTABLESPACE_OID;
@@ -221,8 +220,7 @@ DefineIndex(RangeVar *heapRelation,
}
/*
- * look up the access method, verify it can handle the requested
- * features
+ * look up the access method, verify it can handle the requested features
*/
tuple = SearchSysCache(AMNAME,
PointerGetDatum(accessMethodName),
@@ -238,13 +236,13 @@ DefineIndex(RangeVar *heapRelation,
if (unique && !accessMethodForm->amcanunique)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("access method \"%s\" does not support unique indexes",
- accessMethodName)));
+ errmsg("access method \"%s\" does not support unique indexes",
+ accessMethodName)));
if (numberOfAttributes > 1 && !accessMethodForm->amcanmulticol)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("access method \"%s\" does not support multicolumn indexes",
- accessMethodName)));
+ errmsg("access method \"%s\" does not support multicolumn indexes",
+ accessMethodName)));
ReleaseSysCache(tuple);
@@ -275,23 +273,23 @@ DefineIndex(RangeVar *heapRelation,
ListCell *keys;
/*
- * If ALTER TABLE, check that there isn't already a PRIMARY KEY.
- * In CREATE TABLE, we have faith that the parser rejected
- * multiple pkey clauses; and CREATE INDEX doesn't have a way to
- * say PRIMARY KEY, so it's no problem either.
+ * If ALTER TABLE, check that there isn't already a PRIMARY KEY. In
+ * CREATE TABLE, we have faith that the parser rejected multiple pkey
+ * clauses; and CREATE INDEX doesn't have a way to say PRIMARY KEY, so
+ * it's no problem either.
*/
if (is_alter_table &&
relationHasPrimaryKey(rel))
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("multiple primary keys for table \"%s\" are not allowed",
- RelationGetRelationName(rel))));
+ errmsg("multiple primary keys for table \"%s\" are not allowed",
+ RelationGetRelationName(rel))));
}
/*
- * Check that all of the attributes in a primary key are marked as
- * not null, otherwise attempt to ALTER TABLE .. SET NOT NULL
+ * Check that all of the attributes in a primary key are marked as not
+ * null, otherwise attempt to ALTER TABLE .. SET NOT NULL
*/
cmds = NIL;
foreach(keys, attributeList)
@@ -326,35 +324,35 @@ DefineIndex(RangeVar *heapRelation,
else
{
/*
- * This shouldn't happen during CREATE TABLE, but can
- * happen during ALTER TABLE. Keep message in sync with
+ * This shouldn't happen during CREATE TABLE, but can happen
+ * during ALTER TABLE. Keep message in sync with
* transformIndexConstraints() in parser/analyze.c.
*/
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" named in key does not exist",
- key->name)));
+ errmsg("column \"%s\" named in key does not exist",
+ key->name)));
}
}
/*
* XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade to child
- * tables? Currently, since the PRIMARY KEY itself doesn't
- * cascade, we don't cascade the notnull constraint(s) either; but
- * this is pretty debatable.
+ * tables? Currently, since the PRIMARY KEY itself doesn't cascade,
+ * we don't cascade the notnull constraint(s) either; but this is
+ * pretty debatable.
*
- * XXX: possible future improvement: when being called from ALTER
- * TABLE, it would be more efficient to merge this with the outer
- * ALTER TABLE, so as to avoid two scans. But that seems to
- * complicate DefineIndex's API unduly.
+ * XXX: possible future improvement: when being called from ALTER TABLE,
+ * it would be more efficient to merge this with the outer ALTER
+ * TABLE, so as to avoid two scans. But that seems to complicate
+ * DefineIndex's API unduly.
*/
if (cmds)
AlterTableInternal(relationId, cmds, false);
}
/*
- * Prepare arguments for index_create, primarily an IndexInfo
- * structure. Note that ii_Predicate must be in implicit-AND format.
+ * Prepare arguments for index_create, primarily an IndexInfo structure.
+ * Note that ii_Predicate must be in implicit-AND format.
*/
indexInfo = makeNode(IndexInfo);
indexInfo->ii_NumIndexAttrs = numberOfAttributes;
@@ -372,15 +370,15 @@ DefineIndex(RangeVar *heapRelation,
heap_close(rel, NoLock);
/*
- * Report index creation if appropriate (delay this till after most of
- * the error checks)
+ * Report index creation if appropriate (delay this till after most of the
+ * error checks)
*/
if (isconstraint && !quiet)
ereport(NOTICE,
- (errmsg("%s %s will create implicit index \"%s\" for table \"%s\"",
- is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /",
- primary ? "PRIMARY KEY" : "UNIQUE",
- indexRelationName, RelationGetRelationName(rel))));
+ (errmsg("%s %s will create implicit index \"%s\" for table \"%s\"",
+ is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /",
+ primary ? "PRIMARY KEY" : "UNIQUE",
+ indexRelationName, RelationGetRelationName(rel))));
index_create(relationId, indexRelationName, indexRelationId,
indexInfo, accessMethodId, tablespaceId, classObjectId,
@@ -391,8 +389,8 @@ DefineIndex(RangeVar *heapRelation,
* We update the relation's pg_class tuple even if it already has
* relhasindex = true. This is needed to cause a shared-cache-inval
* message to be sent for the pg_class tuple, which will cause other
- * backends to flush their relcache entries and in particular their
- * cached lists of the indexes for this relation.
+ * backends to flush their relcache entries and in particular their cached
+ * lists of the indexes for this relation.
*/
setRelhasindex(relationId, true, primary, InvalidOid);
}
@@ -414,8 +412,7 @@ CheckPredicate(Expr *predicate)
{
/*
* We don't currently support generation of an actual query plan for a
- * predicate, only simple scalar expressions; hence these
- * restrictions.
+ * predicate, only simple scalar expressions; hence these restrictions.
*/
if (contain_subplans((Node *) predicate))
ereport(ERROR,
@@ -433,7 +430,7 @@ CheckPredicate(Expr *predicate)
if (contain_mutable_functions((Node *) predicate))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("functions in index predicate must be marked IMMUTABLE")));
+ errmsg("functions in index predicate must be marked IMMUTABLE")));
}
static void
@@ -470,8 +467,8 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
if (isconstraint)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" named in key does not exist",
- attribute->name)));
+ errmsg("column \"%s\" named in key does not exist",
+ attribute->name)));
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
@@ -501,24 +498,23 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
atttype = exprType(attribute->expr);
/*
- * We don't currently support generation of an actual query
- * plan for an index expression, only simple scalar
- * expressions; hence these restrictions.
+ * We don't currently support generation of an actual query plan
+ * for an index expression, only simple scalar expressions; hence
+ * these restrictions.
*/
if (contain_subplans(attribute->expr))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use subquery in index expression")));
+ errmsg("cannot use subquery in index expression")));
if (contain_agg_clause(attribute->expr))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in index expression")));
+ errmsg("cannot use aggregate function in index expression")));
/*
- * A expression using mutable functions is probably wrong,
- * since if you aren't going to get the same result for the
- * same data every time, it's not clear what the index entries
- * mean at all.
+ * A expression using mutable functions is probably wrong, since
+ * if you aren't going to get the same result for the same data
+ * every time, it's not clear what the index entries mean at all.
*/
if (contain_mutable_functions(attribute->expr))
ereport(ERROR,
@@ -548,16 +544,16 @@ GetIndexOpClass(List *opclass, Oid attrType,
opInputType;
/*
- * Release 7.0 removed network_ops, timespan_ops, and datetime_ops, so
- * we ignore those opclass names so the default *_ops is used. This
- * can be removed in some later release. bjm 2000/02/07
+ * Release 7.0 removed network_ops, timespan_ops, and datetime_ops, so we
+ * ignore those opclass names so the default *_ops is used. This can be
+ * removed in some later release. bjm 2000/02/07
*
* Release 7.1 removes lztext_ops, so suppress that too for a while. tgl
* 2000/07/30
*
- * Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that
- * too for awhile. I'm starting to think we need a better approach.
- * tgl 2000/10/01
+ * Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that too
+ * for awhile. I'm starting to think we need a better approach. tgl
+ * 2000/10/01
*
* Release 8.0 removes bigbox_ops (which was dead code for a long while
* anyway). tgl 2003/11/11
@@ -628,8 +624,8 @@ GetIndexOpClass(List *opclass, Oid attrType,
NameListToString(opclass), accessMethodName)));
/*
- * Verify that the index operator class accepts this datatype. Note
- * we will accept binary compatibility.
+ * Verify that the index operator class accepts this datatype. Note we
+ * will accept binary compatibility.
*/
opClassId = HeapTupleGetOid(tuple);
opInputType = ((Form_pg_opclass) GETSTRUCT(tuple))->opcintype;
@@ -637,8 +633,8 @@ GetIndexOpClass(List *opclass, Oid attrType,
if (!IsBinaryCoercible(attrType, opInputType))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("operator class \"%s\" does not accept data type %s",
- NameListToString(opclass), format_type_be(attrType))));
+ errmsg("operator class \"%s\" does not accept data type %s",
+ NameListToString(opclass), format_type_be(attrType))));
ReleaseSysCache(tuple);
@@ -663,8 +659,8 @@ GetDefaultOpClass(Oid attrType, Oid accessMethodId)
* (either exactly or binary-compatibly, but prefer an exact match).
*
* We could find more than one binary-compatible match, in which case we
- * require the user to specify which one he wants. If we find more
- * than one exact match, then someone put bogus entries in pg_opclass.
+ * require the user to specify which one he wants. If we find more than
+ * one exact match, then someone put bogus entries in pg_opclass.
*
* The initial search is done by namespace.c so that we only consider
* opclasses visible in the current namespace search path. (See also
@@ -694,8 +690,8 @@ GetDefaultOpClass(Oid attrType, Oid accessMethodId)
if (nexact != 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("there are multiple default operator classes for data type %s",
- format_type_be(attrType))));
+ errmsg("there are multiple default operator classes for data type %s",
+ format_type_be(attrType))));
if (ncompatible == 1)
return compatibleOid;
@@ -749,8 +745,8 @@ makeObjectName(const char *name1, const char *name2, const char *label)
/*
* If we must truncate, preferentially truncate the longer name. This
- * logic could be expressed without a loop, but it's simple and
- * obvious as a loop.
+ * logic could be expressed without a loop, but it's simple and obvious as
+ * a loop.
*/
while (name1chars + name2chars > availchars)
{
@@ -842,9 +838,9 @@ relationHasPrimaryKey(Relation rel)
ListCell *indexoidscan;
/*
- * Get the list of index OIDs for the table from the relcache, and
- * look up each one in the pg_index syscache until we find one marked
- * primary key (hopefully there isn't more than one such).
+ * Get the list of index OIDs for the table from the relcache, and look up
+ * each one in the pg_index syscache until we find one marked primary key
+ * (hopefully there isn't more than one such).
*/
indexoidlist = RelationGetIndexList(rel);
@@ -1004,16 +1000,16 @@ ReindexDatabase(const char *databaseName, bool do_system, bool do_user)
/*
* We cannot run inside a user transaction block; if we were inside a
- * transaction, then our commit- and start-transaction-command calls
- * would not have the intended effect!
+ * transaction, then our commit- and start-transaction-command calls would
+ * not have the intended effect!
*/
PreventTransactionChain((void *) databaseName, "REINDEX DATABASE");
/*
- * Create a memory context that will survive forced transaction
- * commits we do below. Since it is a child of PortalContext, it will
- * go away eventually even if we suffer an error; there's no need for
- * special abort cleanup logic.
+ * Create a memory context that will survive forced transaction commits we
+ * do below. Since it is a child of PortalContext, it will go away
+ * eventually even if we suffer an error; there's no need for special
+ * abort cleanup logic.
*/
private_context = AllocSetContextCreate(PortalContext,
"ReindexDatabase",
@@ -1022,10 +1018,10 @@ ReindexDatabase(const char *databaseName, bool do_system, bool do_user)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * We always want to reindex pg_class first. This ensures that if
- * there is any corruption in pg_class' indexes, they will be fixed
- * before we process any other tables. This is critical because
- * reindexing itself will try to update pg_class.
+ * We always want to reindex pg_class first. This ensures that if there
+ * is any corruption in pg_class' indexes, they will be fixed before we
+ * process any other tables. This is critical because reindexing itself
+ * will try to update pg_class.
*/
if (do_system)
{
diff --git a/src/backend/commands/lockcmds.c b/src/backend/commands/lockcmds.c
index b4a87a4d39b..72a61ad9c27 100644
--- a/src/backend/commands/lockcmds.c
+++ b/src/backend/commands/lockcmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/lockcmds.c,v 1.12 2004/12/31 21:59:41 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/lockcmds.c,v 1.13 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,8 +31,8 @@ LockTableCommand(LockStmt *lockstmt)
ListCell *p;
/*
- * Iterate over the list and open, lock, and close the relations one
- * at a time
+ * Iterate over the list and open, lock, and close the relations one at a
+ * time
*/
foreach(p, lockstmt->relations)
@@ -43,8 +43,8 @@ LockTableCommand(LockStmt *lockstmt)
Relation rel;
/*
- * We don't want to open the relation until we've checked
- * privilege. So, manually get the relation OID.
+ * We don't want to open the relation until we've checked privilege.
+ * So, manually get the relation OID.
*/
reloid = RangeVarGetRelid(relation, false);
diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c
index 186fac96edb..ea8afcfccbf 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.37 2005/08/23 01:41:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.38 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -117,16 +117,16 @@ DefineOpClass(CreateOpClassStmt *stmt)
ReleaseSysCache(tup);
/*
- * Currently, we require superuser privileges to create an opclass.
- * This seems necessary because we have no way to validate that the
- * offered set of operators and functions are consistent with the AM's
- * expectations. It would be nice to provide such a check someday, if
- * it can be done without solving the halting problem :-(
+ * Currently, we require superuser privileges to create an opclass. This
+ * seems necessary because we have no way to validate that the offered set
+ * of operators and functions are consistent with the AM's expectations.
+ * It would be nice to provide such a check someday, if it can be done
+ * without solving the halting problem :-(
*/
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to create an operator class")));
+ errmsg("must be superuser to create an operator class")));
/* Look up the datatype */
typeoid = typenameTypeId(stmt->datatype);
@@ -223,7 +223,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
if (OidIsValid(storageoid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("storage type specified more than once")));
+ errmsg("storage type specified more than once")));
storageoid = typenameTypeId(item->storedtype);
break;
default:
@@ -244,8 +244,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
{
/*
* Currently, only GiST allows storagetype different from
- * datatype. This hardcoded test should be eliminated in
- * favor of adding another boolean column to pg_am ...
+ * datatype. This hardcoded test should be eliminated in favor of
+ * adding another boolean column to pg_am ...
*/
if (amoid != GIST_AM_OID)
ereport(ERROR,
@@ -258,8 +258,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
rel = heap_open(OperatorClassRelationId, RowExclusiveLock);
/*
- * Make sure there is no existing opclass of this name (this is just
- * to give a more friendly error message than "duplicate key").
+ * Make sure there is no existing opclass of this name (this is just to
+ * give a more friendly error message than "duplicate key").
*/
if (SearchSysCacheExists(CLAAMNAMENSP,
ObjectIdGetDatum(amoid),
@@ -272,10 +272,9 @@ DefineOpClass(CreateOpClassStmt *stmt)
opcname, stmt->amname)));
/*
- * If we are creating a default opclass, check there isn't one
- * already. (Note we do not restrict this test to visible opclasses;
- * this ensures that typcache.c can find unique solutions to its
- * questions.)
+ * If we are creating a default opclass, check there isn't one already.
+ * (Note we do not restrict this test to visible opclasses; this ensures
+ * that typcache.c can find unique solutions to its questions.)
*/
if (stmt->isDefault)
{
@@ -300,8 +299,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
errmsg("could not make operator class \"%s\" be default for type %s",
opcname,
TypeNameToString(stmt->datatype)),
- errdetail("Operator class \"%s\" already is the default.",
- NameStr(opclass->opcname))));
+ errdetail("Operator class \"%s\" already is the default.",
+ NameStr(opclass->opcname))));
}
systable_endscan(scan);
@@ -321,7 +320,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
namestrcpy(&opcName, opcname);
values[i++] = NameGetDatum(&opcName); /* opcname */
values[i++] = ObjectIdGetDatum(namespaceoid); /* opcnamespace */
- values[i++] = ObjectIdGetDatum(GetUserId()); /* opcowner */
+ values[i++] = ObjectIdGetDatum(GetUserId()); /* opcowner */
values[i++] = ObjectIdGetDatum(typeoid); /* opcintype */
values[i++] = BoolGetDatum(stmt->isDefault); /* opcdefault */
values[i++] = ObjectIdGetDatum(storageoid); /* opckeytype */
@@ -342,8 +341,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
storeProcedures(opclassoid, procedures);
/*
- * Create dependencies. Note: we do not create a dependency link to
- * the AM, because we don't currently support DROP ACCESS METHOD.
+ * Create dependencies. Note: we do not create a dependency link to the
+ * AM, because we don't currently support DROP ACCESS METHOD.
*/
myself.classId = OperatorClassRelationId;
myself.objectId = opclassoid;
@@ -424,8 +423,8 @@ assignOperSubtype(Oid amoid, Oid typeoid, Oid operOid)
opform = (Form_pg_operator) GETSTRUCT(optup);
/*
- * btree operators must be binary ops returning boolean, and the
- * left-side input type must match the operator class' input type.
+ * btree operators must be binary ops returning boolean, and the left-side
+ * input type must match the operator class' input type.
*/
if (opform->oprkind != 'b')
ereport(ERROR,
@@ -438,11 +437,11 @@ assignOperSubtype(Oid amoid, Oid typeoid, Oid operOid)
if (opform->oprleft != typeoid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree operators must have index type as left input")));
+ errmsg("btree operators must have index type as left input")));
/*
- * The subtype is "default" (0) if oprright matches the operator
- * class, otherwise it is oprright.
+ * The subtype is "default" (0) if oprright matches the operator class,
+ * otherwise it is oprright.
*/
if (opform->oprright == typeoid)
subtype = InvalidOid;
@@ -478,8 +477,8 @@ assignProcSubtype(Oid amoid, Oid typeoid, Oid procOid)
procform = (Form_pg_proc) GETSTRUCT(proctup);
/*
- * btree support procs must be 2-arg procs returning int4, and the
- * first input type must match the operator class' input type.
+ * btree support procs must be 2-arg procs returning int4, and the first
+ * input type must match the operator class' input type.
*/
if (procform->pronargs != 2)
ereport(ERROR,
@@ -492,11 +491,11 @@ assignProcSubtype(Oid amoid, Oid typeoid, Oid procOid)
if (procform->proargtypes.values[0] != typeoid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree procedures must have index type as first input")));
+ errmsg("btree procedures must have index type as first input")));
/*
- * The subtype is "default" (0) if second input type matches the
- * operator class, otherwise it is the second input type.
+ * The subtype is "default" (0) if second input type matches the operator
+ * class, otherwise it is the second input type.
*/
if (procform->proargtypes.values[1] == typeoid)
subtype = InvalidOid;
@@ -525,13 +524,13 @@ addClassMember(List **list, OpClassMember *member, bool isProc)
if (isProc)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("procedure number %d appears more than once",
- member->number)));
+ errmsg("procedure number %d appears more than once",
+ member->number)));
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("operator number %d appears more than once",
- member->number)));
+ errmsg("operator number %d appears more than once",
+ member->number)));
}
}
*list = lappend(*list, member);
@@ -688,7 +687,7 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opclassname), stmt->amname)));
+ NameListToString(stmt->opclassname), stmt->amname)));
opcID = HeapTupleGetOid(tuple);
@@ -956,7 +955,7 @@ AlterOpClassOwner(List *name, const char *access_method, Oid newOwnerId)
if (!superuser())
{
/* Otherwise, must be owner of the existing object */
- if (!pg_opclass_ownercheck(HeapTupleGetOid(tup),GetUserId()))
+ if (!pg_opclass_ownercheck(HeapTupleGetOid(tup), GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPCLASS,
NameListToString(name));
@@ -972,8 +971,7 @@ AlterOpClassOwner(List *name, const char *access_method, Oid newOwnerId)
}
/*
- * Modify the owner --- okay to scribble on tup because it's a
- * copy
+ * Modify the owner --- okay to scribble on tup because it's a copy
*/
opcForm->opcowner = newOwnerId;
diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c
index f9db742e844..07877962e3f 100644
--- a/src/backend/commands/operatorcmds.c
+++ b/src/backend/commands/operatorcmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.25 2005/08/22 17:38:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.26 2005/10/15 02:49:15 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -69,11 +69,9 @@ DefineOperator(List *names, List *parameters)
TypeName *typeName2 = NULL; /* second type name */
Oid typeId1 = InvalidOid; /* types converted to OID */
Oid typeId2 = InvalidOid;
- List *commutatorName = NIL; /* optional commutator operator
- * name */
+ List *commutatorName = NIL; /* optional commutator operator name */
List *negatorName = NIL; /* optional negator operator name */
- List *restrictionName = NIL; /* optional restrict. sel.
- * procedure */
+ List *restrictionName = NIL; /* optional restrict. sel. procedure */
List *joinName = NIL; /* optional join sel. procedure */
List *leftSortName = NIL; /* optional left sort operator */
List *rightSortName = NIL; /* optional right sort operator */
@@ -103,7 +101,7 @@ DefineOperator(List *names, List *parameters)
if (typeName1->setof)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("setof type not allowed for operator argument")));
+ errmsg("setof type not allowed for operator argument")));
}
else if (pg_strcasecmp(defel->defname, "rightarg") == 0)
{
@@ -111,7 +109,7 @@ DefineOperator(List *names, List *parameters)
if (typeName2->setof)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("setof type not allowed for operator argument")));
+ errmsg("setof type not allowed for operator argument")));
}
else if (pg_strcasecmp(defel->defname, "procedure") == 0)
functionName = defGetQualifiedName(defel);
@@ -157,8 +155,8 @@ DefineOperator(List *names, List *parameters)
typeId2 = typenameTypeId(typeName2);
/*
- * If any of the mergejoin support operators were given, then canMerge
- * is implicit. If canMerge is specified or implicit, fill in default
+ * If any of the mergejoin support operators were given, then canMerge is
+ * implicit. If canMerge is specified or implicit, fill in default
* operator names for any missing mergejoin support operators.
*/
if (leftSortName || rightSortName || ltCompareName || gtCompareName)
@@ -184,11 +182,9 @@ DefineOperator(List *names, List *parameters)
typeId1, /* left type id */
typeId2, /* right type id */
functionName, /* function for operator */
- commutatorName, /* optional commutator operator
- * name */
+ commutatorName, /* optional commutator operator name */
negatorName, /* optional negator operator name */
- restrictionName, /* optional restrict. sel.
- * procedure */
+ restrictionName, /* optional restrict. sel. procedure */
joinName, /* optional join sel. procedure name */
canHash, /* operator hashes */
leftSortName, /* optional left sort operator */
@@ -300,7 +296,7 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2,
if (!superuser())
{
/* Otherwise, must be owner of the existing object */
- if (!pg_oper_ownercheck(operOid,GetUserId()))
+ if (!pg_oper_ownercheck(operOid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPER,
NameListToString(name));
@@ -317,8 +313,7 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2,
}
/*
- * Modify the owner --- okay to scribble on tup because it's a
- * copy
+ * Modify the owner --- okay to scribble on tup because it's a copy
*/
oprForm->oprowner = newOwnerId;
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
index 0ff53666136..e68d221f01d 100644
--- a/src/backend/commands/portalcmds.c
+++ b/src/backend/commands/portalcmds.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.42 2005/06/03 23:05:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.43 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -54,27 +54,26 @@ PerformCursorOpen(DeclareCursorStmt *stmt, ParamListInfo params)
errmsg("invalid cursor name: must not be empty")));
/*
- * If this is a non-holdable cursor, we require that this statement
- * has been executed inside a transaction block (or else, it would
- * have no user-visible effect).
+ * If this is a non-holdable cursor, we require that this statement has
+ * been executed inside a transaction block (or else, it would have no
+ * user-visible effect).
*/
if (!(stmt->options & CURSOR_OPT_HOLD))
RequireTransactionChain((void *) stmt, "DECLARE CURSOR");
/*
- * Because the planner is not cool about not scribbling on its input,
- * we make a preliminary copy of the source querytree. This prevents
+ * Because the planner is not cool about not scribbling on its input, we
+ * make a preliminary copy of the source querytree. This prevents
* problems in the case that the DECLARE CURSOR is in a portal and is
- * executed repeatedly. XXX the planner really shouldn't modify its
- * input ... FIXME someday.
+ * executed repeatedly. XXX the planner really shouldn't modify its input
+ * ... FIXME someday.
*/
query = copyObject(stmt->query);
/*
* The query has been through parse analysis, but not rewriting or
- * planning as yet. Note that the grammar ensured we have a SELECT
- * query, so we are not expecting rule rewriting to do anything
- * strange.
+ * planning as yet. Note that the grammar ensured we have a SELECT query,
+ * so we are not expecting rule rewriting to do anything strange.
*/
AcquireRewriteLocks(query);
rewritten = QueryRewrite(query);
@@ -91,14 +90,13 @@ PerformCursorOpen(DeclareCursorStmt *stmt, ParamListInfo params)
if (query->rowMarks != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("DECLARE CURSOR ... FOR UPDATE/SHARE is not supported"),
+ errmsg("DECLARE CURSOR ... FOR UPDATE/SHARE is not supported"),
errdetail("Cursors must be READ ONLY.")));
plan = planner(query, true, stmt->options, NULL);
/*
- * Create a portal and copy the query and plan into its memory
- * context.
+ * Create a portal and copy the query and plan into its memory context.
*/
portal = CreatePortal(stmt->portalname, false, false);
@@ -116,11 +114,10 @@ PerformCursorOpen(DeclareCursorStmt *stmt, ParamListInfo params)
/*
* Also copy the outer portal's parameter list into the inner portal's
- * memory context. We want to pass down the parameter values in case
- * we had a command like DECLARE c CURSOR FOR SELECT ... WHERE foo =
- * $1 This will have been parsed using the outer parameter set and the
- * parameter value needs to be preserved for use when the cursor is
- * executed.
+ * memory context. We want to pass down the parameter values in case we
+ * had a command like DECLARE c CURSOR FOR SELECT ... WHERE foo = $1 This
+ * will have been parsed using the outer parameter set and the parameter
+ * value needs to be preserved for use when the cursor is executed.
*/
params = copyParamList(params);
@@ -130,8 +127,8 @@ PerformCursorOpen(DeclareCursorStmt *stmt, ParamListInfo params)
* Set up options for portal.
*
* If the user didn't specify a SCROLL type, allow or disallow scrolling
- * based on whether it would require any additional runtime overhead
- * to do so.
+ * based on whether it would require any additional runtime overhead to do
+ * so.
*/
portal->cursorOptions = stmt->options;
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
@@ -150,8 +147,8 @@ PerformCursorOpen(DeclareCursorStmt *stmt, ParamListInfo params)
Assert(portal->strategy == PORTAL_ONE_SELECT);
/*
- * We're done; the query won't actually be run until
- * PerformPortalFetch is called.
+ * We're done; the query won't actually be run until PerformPortalFetch is
+ * called.
*/
}
@@ -189,7 +186,7 @@ PerformPortalFetch(FetchStmt *stmt,
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_CURSOR),
- errmsg("cursor \"%s\" does not exist", stmt->portalname)));
+ errmsg("cursor \"%s\" does not exist", stmt->portalname)));
return; /* keep compiler happy */
}
@@ -264,10 +261,9 @@ PortalCleanup(Portal portal)
AssertArg(portal->cleanup == PortalCleanup);
/*
- * Shut down executor, if still running. We skip this during error
- * abort, since other mechanisms will take care of releasing executor
- * resources, and we can't be sure that ExecutorEnd itself wouldn't
- * fail.
+ * Shut down executor, if still running. We skip this during error abort,
+ * since other mechanisms will take care of releasing executor resources,
+ * and we can't be sure that ExecutorEnd itself wouldn't fail.
*/
queryDesc = PortalGetQueryDesc(portal);
if (queryDesc)
@@ -367,9 +363,8 @@ PersistHoldablePortal(Portal portal)
MemoryContextSwitchTo(PortalContext);
/*
- * Rewind the executor: we need to store the entire result set in
- * the tuplestore, so that subsequent backward FETCHs can be
- * processed.
+ * Rewind the executor: we need to store the entire result set in the
+ * tuplestore, so that subsequent backward FETCHs can be processed.
*/
ExecutorRewind(queryDesc);
@@ -391,10 +386,10 @@ PersistHoldablePortal(Portal portal)
/*
* Reset the position in the result set: ideally, this could be
- * implemented by just skipping straight to the tuple # that we
- * need to be at, but the tuplestore API doesn't support that. So
- * we start at the beginning of the tuplestore and iterate through
- * it until we reach where we need to be. FIXME someday?
+ * implemented by just skipping straight to the tuple # that we need
+ * to be at, but the tuplestore API doesn't support that. So we start
+ * at the beginning of the tuplestore and iterate through it until we
+ * reach where we need to be. FIXME someday?
*/
MemoryContextSwitchTo(portal->holdContext);
@@ -404,8 +399,8 @@ PersistHoldablePortal(Portal portal)
if (portal->posOverflow) /* oops, cannot trust portalPos */
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not reposition held cursor")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not reposition held cursor")));
tuplestore_rescan(portal->holdStore);
@@ -453,10 +448,10 @@ PersistHoldablePortal(Portal portal)
QueryContext = saveQueryContext;
/*
- * We can now release any subsidiary memory of the portal's heap
- * context; we'll never use it again. The executor already dropped
- * its context, but this will clean up anything that glommed onto the
- * portal's heap via PortalContext.
+ * We can now release any subsidiary memory of the portal's heap context;
+ * we'll never use it again. The executor already dropped its context,
+ * but this will clean up anything that glommed onto the portal's heap via
+ * PortalContext.
*/
MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
}
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index dec3d249dfa..5420da4a626 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -10,7 +10,7 @@
* Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.40 2005/06/22 17:45:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.41 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -84,17 +84,17 @@ PrepareQuery(PrepareStmt *stmt)
}
/*
- * Parse analysis is already done, but we must still rewrite and plan
- * the query.
+ * Parse analysis is already done, but we must still rewrite and plan the
+ * query.
*/
/*
- * Because the planner is not cool about not scribbling on its input,
- * we make a preliminary copy of the source querytree. This prevents
+ * Because the planner is not cool about not scribbling on its input, we
+ * make a preliminary copy of the source querytree. This prevents
* problems in the case that the PREPARE is in a portal or plpgsql
* function and is executed repeatedly. (See also the same hack in
- * DECLARE CURSOR and EXPLAIN.) XXX the planner really shouldn't
- * modify its input ... FIXME someday.
+ * DECLARE CURSOR and EXPLAIN.) XXX the planner really shouldn't modify
+ * its input ... FIXME someday.
*/
query = copyObject(stmt->query);
@@ -106,8 +106,8 @@ PrepareQuery(PrepareStmt *stmt)
plan_list = pg_plan_queries(query_list, NULL, false);
/*
- * Save the results. We don't have the query string for this PREPARE,
- * but we do have the string we got from the client, so use that.
+ * Save the results. We don't have the query string for this PREPARE, but
+ * we do have the string we got from the client, so use that.
*/
StorePreparedStatement(stmt->name,
debug_query_string,
@@ -146,8 +146,8 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest, char *completionTag)
if (entry->argtype_list != NIL)
{
/*
- * Need an EState to evaluate parameters; must not delete it till
- * end of query, in case parameters are pass-by-reference.
+ * Need an EState to evaluate parameters; must not delete it till end
+ * of query, in case parameters are pass-by-reference.
*/
estate = CreateExecutorState();
paramLI = EvaluateParams(estate, stmt->params, entry->argtype_list);
@@ -159,10 +159,10 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest, char *completionTag)
portal = CreateNewPortal();
/*
- * For CREATE TABLE / AS EXECUTE, make a copy of the stored query so
- * that we can modify its destination (yech, but this has always been
- * ugly). For regular EXECUTE we can just use the stored query where
- * it sits, since the executor is read-only.
+ * For CREATE TABLE / AS EXECUTE, make a copy of the stored query so that
+ * we can modify its destination (yech, but this has always been ugly).
+ * For regular EXECUTE we can just use the stored query where it sits,
+ * since the executor is read-only.
*/
if (stmt->into)
{
@@ -245,7 +245,7 @@ EvaluateParams(EState *estate, List *params, List *argtypes)
bool isNull;
paramLI[i].value = ExecEvalExprSwitchContext(n,
- GetPerTupleExprContext(estate),
+ GetPerTupleExprContext(estate),
&isNull,
NULL);
paramLI[i].kind = PARAM_NUM;
@@ -333,8 +333,8 @@ StorePreparedStatement(const char *stmt_name,
/*
* We need to copy the data so that it is stored in the correct memory
* context. Do this before making hashtable entry, so that an
- * out-of-memory failure only wastes memory and doesn't leave us with
- * an incomplete (ie corrupt) hashtable entry.
+ * out-of-memory failure only wastes memory and doesn't leave us with an
+ * incomplete (ie corrupt) hashtable entry.
*/
qstring = query_string ? pstrdup(query_string) : NULL;
query_list = (List *) copyObject(query_list);
@@ -380,9 +380,9 @@ FetchPreparedStatement(const char *stmt_name, bool throwError)
if (prepared_queries)
{
/*
- * We can't just use the statement name as supplied by the user:
- * the hash package is picky enough that it needs to be
- * NULL-padded out to the appropriate length to work correctly.
+ * We can't just use the statement name as supplied by the user: the
+ * hash package is picky enough that it needs to be NULL-padded out to
+ * the appropriate length to work correctly.
*/
StrNCpy(key, stmt_name, sizeof(key));
@@ -447,7 +447,7 @@ FetchPreparedStatementResultDesc(PreparedStatement *stmt)
/*
* Given a prepared statement that returns tuples, extract the query
- * targetlist. Returns NIL if the statement doesn't have a determinable
+ * targetlist. Returns NIL if the statement doesn't have a determinable
* targetlist.
*
* Note: do not modify the result.
@@ -464,31 +464,31 @@ FetchPreparedStatementTargetList(PreparedStatement *stmt)
return ((Query *) linitial(stmt->query_list))->targetList;
if (strategy == PORTAL_UTIL_SELECT)
{
- Node *utilityStmt;
+ Node *utilityStmt;
utilityStmt = ((Query *) linitial(stmt->query_list))->utilityStmt;
switch (nodeTag(utilityStmt))
{
case T_FetchStmt:
- {
- FetchStmt *substmt = (FetchStmt *) utilityStmt;
- Portal subportal;
+ {
+ FetchStmt *substmt = (FetchStmt *) utilityStmt;
+ Portal subportal;
- Assert(!substmt->ismove);
- subportal = GetPortalByName(substmt->portalname);
- Assert(PortalIsValid(subportal));
- return FetchPortalTargetList(subportal);
- }
+ Assert(!substmt->ismove);
+ subportal = GetPortalByName(substmt->portalname);
+ Assert(PortalIsValid(subportal));
+ return FetchPortalTargetList(subportal);
+ }
case T_ExecuteStmt:
- {
- ExecuteStmt *substmt = (ExecuteStmt *) utilityStmt;
- PreparedStatement *entry;
+ {
+ ExecuteStmt *substmt = (ExecuteStmt *) utilityStmt;
+ PreparedStatement *entry;
- Assert(!substmt->into);
- entry = FetchPreparedStatement(substmt->name, true);
- return FetchPreparedStatementTargetList(entry);
- }
+ Assert(!substmt->into);
+ entry = FetchPreparedStatement(substmt->name, true);
+ return FetchPreparedStatementTargetList(entry);
+ }
default:
break;
@@ -564,8 +564,8 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
if (entry->argtype_list != NIL)
{
/*
- * Need an EState to evaluate parameters; must not delete it till
- * end of query, in case parameters are pass-by-reference.
+ * Need an EState to evaluate parameters; must not delete it till end
+ * of query, in case parameters are pass-by-reference.
*/
estate = CreateExecutorState();
paramLI = EvaluateParams(estate, execstmt->params,
@@ -597,7 +597,7 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
if (query->commandType != CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("prepared statement is not a SELECT")));
+ errmsg("prepared statement is not a SELECT")));
/* Copy the query so we can modify it */
query = copyObject(query);
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index 4155dc179ad..b13f7234dba 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.62 2005/09/08 20:07:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.63 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,7 +44,7 @@ typedef struct
} PLTemplate;
static void create_proc_lang(const char *languageName,
- Oid handlerOid, Oid valOid, bool trusted);
+ Oid handlerOid, Oid valOid, bool trusted);
static PLTemplate *find_language_template(const char *languageName);
@@ -68,7 +68,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to create procedural language")));
+ errmsg("must be superuser to create procedural language")));
/*
* Translate the language name and check that this language doesn't
@@ -89,7 +89,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
*/
if ((pltemplate = find_language_template(languageName)) != NULL)
{
- List *funcname;
+ List *funcname;
/*
* Give a notice if we are ignoring supplied parameters.
@@ -99,9 +99,9 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
(errmsg("using pg_pltemplate information instead of CREATE LANGUAGE parameters")));
/*
- * Find or create the handler function, which we force to be in
- * the pg_catalog schema. If already present, it must have the
- * correct return type.
+ * Find or create the handler function, which we force to be in the
+ * pg_catalog schema. If already present, it must have the correct
+ * return type.
*/
funcname = SystemFuncName(pltemplate->tmplhandler);
handlerOid = LookupFuncName(funcname, 0, funcargtypes, true);
@@ -111,23 +111,23 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
if (funcrettype != LANGUAGE_HANDLEROID)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("function %s must return type \"language_handler\"",
- NameListToString(funcname))));
+ errmsg("function %s must return type \"language_handler\"",
+ NameListToString(funcname))));
}
else
{
handlerOid = ProcedureCreate(pltemplate->tmplhandler,
PG_CATALOG_NAMESPACE,
- false, /* replace */
- false, /* returnsSet */
+ false, /* replace */
+ false, /* returnsSet */
LANGUAGE_HANDLEROID,
ClanguageId,
F_FMGR_C_VALIDATOR,
pltemplate->tmplhandler,
pltemplate->tmpllibrary,
- false, /* isAgg */
- false, /* security_definer */
- false, /* isStrict */
+ false, /* isAgg */
+ false, /* security_definer */
+ false, /* isStrict */
PROVOLATILE_VOLATILE,
buildoidvector(funcargtypes, 0),
PointerGetDatum(NULL),
@@ -148,16 +148,16 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
{
valOid = ProcedureCreate(pltemplate->tmplvalidator,
PG_CATALOG_NAMESPACE,
- false, /* replace */
- false, /* returnsSet */
+ false, /* replace */
+ false, /* returnsSet */
VOIDOID,
ClanguageId,
F_FMGR_C_VALIDATOR,
pltemplate->tmplvalidator,
pltemplate->tmpllibrary,
- false, /* isAgg */
- false, /* security_definer */
- false, /* isStrict */
+ false, /* isAgg */
+ false, /* security_definer */
+ false, /* isStrict */
PROVOLATILE_VOLATILE,
buildoidvector(funcargtypes, 1),
PointerGetDatum(NULL),
@@ -175,9 +175,9 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
else
{
/*
- * No template, so use the provided information. If there's
- * no handler clause, the user is trying to rely on a template
- * that we don't have, so complain accordingly.
+ * No template, so use the provided information. If there's no
+ * handler clause, the user is trying to rely on a template that we
+ * don't have, so complain accordingly.
*/
if (!stmt->plhandler)
ereport(ERROR,
@@ -210,8 +210,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("function %s must return type \"language_handler\"",
- NameListToString(stmt->plhandler))));
+ errmsg("function %s must return type \"language_handler\"",
+ NameListToString(stmt->plhandler))));
}
/* validate the validator function */
@@ -385,7 +385,7 @@ DropProceduralLanguage(DropPLangStmt *stmt)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to drop procedural language")));
+ errmsg("must be superuser to drop procedural language")));
/*
* Translate the language name, check that the language exists
@@ -471,7 +471,7 @@ RenameLanguage(const char *oldname, const char *newname)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to rename procedural language")));
+ errmsg("must be superuser to rename procedural language")));
/* rename */
namestrcpy(&(((Form_pg_language) GETSTRUCT(tup))->lanname), newname);
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index f0ae06f15c6..56a3359a532 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.34 2005/08/22 17:38:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.35 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -42,8 +42,8 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
Oid namespaceId;
List *parsetree_list;
ListCell *parsetree_item;
- Oid owner_uid;
- Oid saved_uid;
+ Oid owner_uid;
+ Oid saved_uid;
AclResult aclresult;
saved_uid = GetUserId();
@@ -60,8 +60,8 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
* To create a schema, must have schema-create privilege on the current
* database and must be able to become the target role (this does not
* imply that the target role itself must have create-schema privilege).
- * The latter provision guards against "giveaway" attacks. Note that
- * a superuser will always have both of these privileges a fortiori.
+ * The latter provision guards against "giveaway" attacks. Note that a
+ * superuser will always have both of these privileges a fortiori.
*/
aclresult = pg_database_aclcheck(MyDatabaseId, saved_uid, ACL_CREATE);
if (aclresult != ACLCHECK_OK)
@@ -75,15 +75,15 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable schema name \"%s\"", schemaName),
- errdetail("The prefix \"pg_\" is reserved for system schemas.")));
+ errdetail("The prefix \"pg_\" is reserved for system schemas.")));
/*
* If the requested authorization is different from the current user,
- * temporarily set the current user so that the object(s) will be
- * created with the correct ownership.
+ * temporarily set the current user so that the object(s) will be created
+ * with the correct ownership.
*
- * (The setting will revert to session user on error or at the end of
- * this routine.)
+ * (The setting will revert to session user on error or at the end of this
+ * routine.)
*/
if (saved_uid != owner_uid)
SetUserId(owner_uid);
@@ -95,19 +95,18 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
CommandCounterIncrement();
/*
- * Temporarily make the new namespace be the front of the search path,
- * as well as the default creation target namespace. This will be
- * undone at the end of this routine, or upon error.
+ * Temporarily make the new namespace be the front of the search path, as
+ * well as the default creation target namespace. This will be undone at
+ * the end of this routine, or upon error.
*/
PushSpecialNamespace(namespaceId);
/*
- * Examine the list of commands embedded in the CREATE SCHEMA command,
- * and reorganize them into a sequentially executable order with no
- * forward references. Note that the result is still a list of raw
- * parsetrees in need of parse analysis --- we cannot, in general, run
- * analyze.c on one statement until we have actually executed the
- * prior ones.
+ * Examine the list of commands embedded in the CREATE SCHEMA command, and
+ * reorganize them into a sequentially executable order with no forward
+ * references. Note that the result is still a list of raw parsetrees in
+ * need of parse analysis --- we cannot, in general, run analyze.c on one
+ * statement until we have actually executed the prior ones.
*/
parsetree_list = analyzeCreateSchemaStmt(stmt);
@@ -174,8 +173,8 @@ RemoveSchema(List *names, DropBehavior behavior)
namespaceName);
/*
- * Do the deletion. Objects contained in the schema are removed by
- * means of their dependency links to the schema.
+ * Do the deletion. Objects contained in the schema are removed by means
+ * of their dependency links to the schema.
*/
object.classId = NamespaceRelationId;
object.objectId = namespaceId;
@@ -254,7 +253,7 @@ RenameSchema(const char *oldname, const char *newname)
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable schema name \"%s\"", newname),
- errdetail("The prefix \"pg_\" is reserved for system schemas.")));
+ errdetail("The prefix \"pg_\" is reserved for system schemas.")));
/* rename */
namestrcpy(&(((Form_pg_namespace) GETSTRUCT(tup))->nspname), newname);
@@ -302,21 +301,21 @@ AlterSchemaOwner(const char *name, Oid newOwnerId)
AclResult aclresult;
/* Otherwise, must be owner of the existing object */
- if (!pg_namespace_ownercheck(HeapTupleGetOid(tup),GetUserId()))
+ if (!pg_namespace_ownercheck(HeapTupleGetOid(tup), GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_NAMESPACE,
name);
/* Must be able to become new owner */
- check_is_member_of_role(GetUserId(),newOwnerId);
+ check_is_member_of_role(GetUserId(), newOwnerId);
/*
* must have create-schema rights
*
- * NOTE: This is different from other alter-owner checks in
- * that the current user is checked for create privileges
- * instead of the destination owner. This is consistent
- * with the CREATE case for schemas. Because superusers
- * will always have this right, we need no special case for them.
+ * NOTE: This is different from other alter-owner checks in that the
+ * current user is checked for create privileges instead of the
+ * destination owner. This is consistent with the CREATE case for
+ * schemas. Because superusers will always have this right, we need
+ * no special case for them.
*/
aclresult = pg_database_aclcheck(MyDatabaseId, GetUserId(),
ACL_CREATE);
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 9bf801f2308..201fcbf0c6b 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.124 2005/10/02 23:50:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.125 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -219,17 +219,17 @@ DefineSequence(CreateSeqStmt *seq)
/*
* Two special hacks here:
*
- * 1. Since VACUUM does not process sequences, we have to force the tuple
- * to have xmin = FrozenTransactionId now. Otherwise it would become
+ * 1. Since VACUUM does not process sequences, we have to force the tuple to
+ * have xmin = FrozenTransactionId now. Otherwise it would become
* invisible to SELECTs after 2G transactions. It is okay to do this
* because if the current transaction aborts, no other xact will ever
* examine the sequence tuple anyway.
*
- * 2. Even though heap_insert emitted a WAL log record, we have to emit
- * an XLOG_SEQ_LOG record too, since (a) the heap_insert record will
- * not have the right xmin, and (b) REDO of the heap_insert record
- * would re-init page and sequence magic number would be lost. This
- * means two log records instead of one :-(
+ * 2. Even though heap_insert emitted a WAL log record, we have to emit an
+ * XLOG_SEQ_LOG record too, since (a) the heap_insert record will not have
+ * the right xmin, and (b) REDO of the heap_insert record would re-init
+ * page and sequence magic number would be lost. This means two log
+ * records instead of one :-(
*/
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@@ -237,12 +237,11 @@ DefineSequence(CreateSeqStmt *seq)
{
/*
- * Note that the "tuple" structure is still just a local tuple
- * record created by heap_formtuple; its t_data pointer doesn't
- * point at the disk buffer. To scribble on the disk buffer we
- * need to fetch the item pointer. But do the same to the local
- * tuple, since that will be the source for the WAL log record,
- * below.
+ * Note that the "tuple" structure is still just a local tuple record
+ * created by heap_formtuple; its t_data pointer doesn't point at the
+ * disk buffer. To scribble on the disk buffer we need to fetch the
+ * item pointer. But do the same to the local tuple, since that will
+ * be the source for the WAL log record, below.
*/
ItemId itemId;
Item item;
@@ -334,8 +333,8 @@ AlterSequence(AlterSeqStmt *stmt)
/* Clear local cache so that we don't think we have cached numbers */
elm->last = new.last_value; /* last returned number */
- elm->cached = new.last_value; /* last cached number (forget
- * cached values) */
+ elm->cached = new.last_value; /* last cached number (forget cached
+ * values) */
START_CRIT_SECTION();
@@ -456,14 +455,14 @@ nextval_internal(Oid relid)
}
/*
- * Decide whether we should emit a WAL log record. If so, force up
- * the fetch count to grab SEQ_LOG_VALS more values than we actually
- * need to cache. (These will then be usable without logging.)
+ * Decide whether we should emit a WAL log record. If so, force up the
+ * fetch count to grab SEQ_LOG_VALS more values than we actually need to
+ * cache. (These will then be usable without logging.)
*
- * If this is the first nextval after a checkpoint, we must force a new
- * WAL record to be written anyway, else replay starting from the
- * checkpoint would fail to advance the sequence past the logged
- * values. In this case we may as well fetch extra values.
+ * If this is the first nextval after a checkpoint, we must force a new WAL
+ * record to be written anyway, else replay starting from the checkpoint
+ * would fail to advance the sequence past the logged values. In this
+ * case we may as well fetch extra values.
*/
if (log < fetch)
{
@@ -486,8 +485,8 @@ nextval_internal(Oid relid)
while (fetch) /* try to fetch cache [+ log ] numbers */
{
/*
- * Check MAXVALUE for ascending sequences and MINVALUE for
- * descending sequences
+ * Check MAXVALUE for ascending sequences and MINVALUE for descending
+ * sequences
*/
if (incby > 0)
{
@@ -503,9 +502,9 @@ nextval_internal(Oid relid)
snprintf(buf, sizeof(buf), INT64_FORMAT, maxv);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("nextval: reached maximum value of sequence \"%s\" (%s)",
- RelationGetRelationName(seqrel), buf)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("nextval: reached maximum value of sequence \"%s\" (%s)",
+ RelationGetRelationName(seqrel), buf)));
}
next = minv;
}
@@ -526,9 +525,9 @@ nextval_internal(Oid relid)
snprintf(buf, sizeof(buf), INT64_FORMAT, minv);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("nextval: reached minimum value of sequence \"%s\" (%s)",
- RelationGetRelationName(seqrel), buf)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("nextval: reached minimum value of sequence \"%s\" (%s)",
+ RelationGetRelationName(seqrel), buf)));
}
next = maxv;
}
@@ -721,8 +720,7 @@ do_setval(Oid relid, int64 next, bool iscalled)
/* save info in local cache */
elm->last = next; /* last returned number */
- elm->cached = next; /* last cached number (forget cached
- * values) */
+ elm->cached = next; /* last cached number (forget cached values) */
START_CRIT_SECTION();
@@ -805,7 +803,7 @@ setval3_oid(PG_FUNCTION_ARGS)
/*
* If we haven't touched the sequence already in this transaction,
- * we need to acquire AccessShareLock. We arrange for the lock to
+ * we need to acquire AccessShareLock. We arrange for the lock to
* be owned by the top transaction, so that we don't need to do it
* more than once per xact.
*/
@@ -869,15 +867,15 @@ init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel)
/*
* Allocate new seqtable entry if we didn't find one.
*
- * NOTE: seqtable entries remain in the list for the life of a backend.
- * If the sequence itself is deleted then the entry becomes wasted
- * memory, but it's small enough that this should not matter.
+ * NOTE: seqtable entries remain in the list for the life of a backend. If
+ * the sequence itself is deleted then the entry becomes wasted memory,
+ * but it's small enough that this should not matter.
*/
if (elm == NULL)
{
/*
- * Time to make a new seqtable entry. These entries live as long
- * as the backend does, so we use plain malloc for them.
+ * Time to make a new seqtable entry. These entries live as long as
+ * the backend does, so we use plain malloc for them.
*/
elm = (SeqTable) malloc(sizeof(SeqTableData));
if (elm == NULL)
@@ -1094,8 +1092,8 @@ init_params(List *options, Form_pg_sequence new, bool isInit)
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->min_value);
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("START value (%s) can't be less than MINVALUE (%s)",
- bufs, bufm)));
+ errmsg("START value (%s) can't be less than MINVALUE (%s)",
+ bufs, bufm)));
}
if (new->last_value > new->max_value)
{
@@ -1106,8 +1104,8 @@ init_params(List *options, Form_pg_sequence new, bool isInit)
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->max_value);
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("START value (%s) can't be greater than MAXVALUE (%s)",
- bufs, bufm)));
+ errmsg("START value (%s) can't be greater than MAXVALUE (%s)",
+ bufs, bufm)));
}
/* CACHE */
@@ -1152,7 +1150,7 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record)
buffer = XLogReadBuffer(true, reln, 0);
if (!BufferIsValid(buffer))
elog(PANIC, "seq_redo: can't read block 0 of rel %u/%u/%u",
- xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
+ xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
page = (Page) BufferGetPage(buffer);
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 7df645af9d3..abec1a835d1 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.173 2005/10/03 02:45:12 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.174 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -164,10 +164,10 @@ static int findAttrByName(const char *attributeName, List *schema);
static void setRelhassubclassInRelation(Oid relationId, bool relhassubclass);
static bool needs_toast_table(Relation rel);
static void AlterIndexNamespaces(Relation classRel, Relation rel,
- Oid oldNspOid, Oid newNspOid);
+ Oid oldNspOid, Oid newNspOid);
static void AlterSeqNamespaces(Relation classRel, Relation rel,
- Oid oldNspOid, Oid newNspOid,
- const char *newNspName);
+ Oid oldNspOid, Oid newNspOid,
+ const char *newNspName);
static int transformColumnNameList(Oid relId, List *colList,
int16 *attnums, Oid *atttypids);
static int transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
@@ -238,14 +238,14 @@ static void ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab);
static void ATPostAlterTypeParse(char *cmd, List **wqueue);
static void ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing);
static void change_owner_recurse_to_sequences(Oid relationOid,
- Oid newOwnerId);
+ Oid newOwnerId);
static void ATExecClusterOn(Relation rel, const char *indexName);
static void ATExecDropCluster(Relation rel);
static void ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel,
char *tablespacename);
static void ATExecSetTableSpace(Oid tableOid, Oid newTableSpace);
static void ATExecEnableDisableTrigger(Relation rel, char *trigname,
- bool enable, bool skip_system);
+ bool enable, bool skip_system);
static void copy_relation_data(Relation rel, SMgrRelation dst);
static void update_ri_trigger_args(Oid relid,
const char *oldname,
@@ -281,8 +281,8 @@ DefineRelation(CreateStmt *stmt, char relkind)
AttrNumber attnum;
/*
- * Truncate relname to appropriate length (probably a waste of time,
- * as parser should have done this already).
+ * Truncate relname to appropriate length (probably a waste of time, as
+ * parser should have done this already).
*/
StrNCpy(relname, stmt->relation->relname, NAMEDATALEN);
@@ -292,12 +292,12 @@ DefineRelation(CreateStmt *stmt, char relkind)
if (stmt->oncommit != ONCOMMIT_NOOP && !stmt->relation->istemp)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("ON COMMIT can only be used on temporary tables")));
+ errmsg("ON COMMIT can only be used on temporary tables")));
/*
- * Look up the namespace in which we are supposed to create the
- * relation. Check we have permission to create there. Skip check if
- * bootstrapping, since permissions machinery may not be working yet.
+ * Look up the namespace in which we are supposed to create the relation.
+ * Check we have permission to create there. Skip check if bootstrapping,
+ * since permissions machinery may not be working yet.
*/
namespaceId = RangeVarGetCreationNamespace(stmt->relation);
@@ -344,19 +344,19 @@ DefineRelation(CreateStmt *stmt, char relkind)
}
/*
- * Look up inheritance ancestors and generate relation schema,
- * including inherited attributes.
+ * Look up inheritance ancestors and generate relation schema, including
+ * inherited attributes.
*/
schema = MergeAttributes(schema, stmt->inhRelations,
stmt->relation->istemp,
- &inheritOids, &old_constraints, &parentOidCount);
+ &inheritOids, &old_constraints, &parentOidCount);
/*
- * Create a relation descriptor from the relation schema and create
- * the relation. Note that in this stage only inherited (pre-cooked)
- * defaults and constraints will be included into the new relation.
- * (BuildDescForRelation takes care of the inherited defaults, but we
- * have to copy inherited constraints here.)
+ * Create a relation descriptor from the relation schema and create the
+ * relation. Note that in this stage only inherited (pre-cooked) defaults
+ * and constraints will be included into the new relation.
+ * (BuildDescForRelation takes care of the inherited defaults, but we have
+ * to copy inherited constraints here.)
*/
descriptor = BuildDescForRelation(schema);
@@ -380,11 +380,10 @@ DefineRelation(CreateStmt *stmt, char relkind)
Assert(cdef->raw_expr == NULL && cdef->cooked_expr != NULL);
/*
- * In multiple-inheritance situations, it's possible to
- * inherit the same grandparent constraint through multiple
- * parents. Hence, discard inherited constraints that match as
- * to both name and expression. Otherwise, gripe if the names
- * conflict.
+ * In multiple-inheritance situations, it's possible to inherit
+ * the same grandparent constraint through multiple parents.
+ * Hence, discard inherited constraints that match as to both name
+ * and expression. Otherwise, gripe if the names conflict.
*/
for (i = 0; i < ncheck; i++)
{
@@ -444,25 +443,24 @@ DefineRelation(CreateStmt *stmt, char relkind)
/*
* Open the new relation and acquire exclusive lock on it. This isn't
- * really necessary for locking out other backends (since they can't
- * see the new rel anyway until we commit), but it keeps the lock
- * manager from complaining about deadlock risks.
+ * really necessary for locking out other backends (since they can't see
+ * the new rel anyway until we commit), but it keeps the lock manager from
+ * complaining about deadlock risks.
*/
rel = relation_open(relationId, AccessExclusiveLock);
/*
- * Now add any newly specified column default values and CHECK
- * constraints to the new relation. These are passed to us in the
- * form of raw parsetrees; we need to transform them to executable
- * expression trees before they can be added. The most convenient way
- * to do that is to apply the parser's transformExpr routine, but
- * transformExpr doesn't work unless we have a pre-existing relation.
- * So, the transformation has to be postponed to this final step of
- * CREATE TABLE.
+ * Now add any newly specified column default values and CHECK constraints
+ * to the new relation. These are passed to us in the form of raw
+ * parsetrees; we need to transform them to executable expression trees
+ * before they can be added. The most convenient way to do that is to
+ * apply the parser's transformExpr routine, but transformExpr doesn't
+ * work unless we have a pre-existing relation. So, the transformation has
+ * to be postponed to this final step of CREATE TABLE.
*
- * Another task that's conveniently done at this step is to add
- * dependency links between columns and supporting relations (such as
- * SERIAL sequences).
+ * Another task that's conveniently done at this step is to add dependency
+ * links between columns and supporting relations (such as SERIAL
+ * sequences).
*
* First, scan schema to find new column defaults.
*/
@@ -528,7 +526,7 @@ RemoveRelation(const RangeVar *relation, DropBehavior behavior)
/*
* ExecuteTruncate
- * Executes a TRUNCATE command.
+ * Executes a TRUNCATE command.
*
* This is a multi-relation truncate. It first opens and grabs exclusive
* locks on all relations involved, checking permissions and otherwise
@@ -540,8 +538,8 @@ RemoveRelation(const RangeVar *relation, DropBehavior behavior)
void
ExecuteTruncate(List *relations)
{
- List *rels = NIL;
- ListCell *cell;
+ List *rels = NIL;
+ ListCell *cell;
foreach(cell, relations)
{
@@ -556,18 +554,18 @@ ExecuteTruncate(List *relations)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is not a table",
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
/* Permissions checks */
if (!pg_class_ownercheck(RelationGetRelid(rel), GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
- RelationGetRelationName(rel));
+ RelationGetRelationName(rel));
if (!allowSystemTableMods && IsSystemRelation(rel))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied: \"%s\" is a system catalog",
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
/*
* We can never allow truncation of shared or nailed-in-cache
@@ -578,7 +576,7 @@ ExecuteTruncate(List *relations)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot truncate system relation \"%s\"",
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
/*
* Don't allow truncate on temp tables of other backends ... their
@@ -587,7 +585,7 @@ ExecuteTruncate(List *relations)
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot truncate temporary tables of other sessions")));
+ errmsg("cannot truncate temporary tables of other sessions")));
/* Save it into the list of rels to truncate */
rels = lappend(rels, rel);
@@ -704,20 +702,19 @@ MergeAttributes(List *schema, List *supers, bool istemp,
List *constraints = NIL;
int parentsWithOids = 0;
bool have_bogus_defaults = false;
- char *bogus_marker = "Bogus!"; /* marks conflicting
- * defaults */
+ char *bogus_marker = "Bogus!"; /* marks conflicting defaults */
int child_attno;
/*
- * Check for and reject tables with too many columns. We perform
- * this check relatively early for two reasons: (a) we don't run
- * the risk of overflowing an AttrNumber in subsequent code (b) an
- * O(n^2) algorithm is okay if we're processing <= 1600 columns,
- * but could take minutes to execute if the user attempts to
- * create a table with hundreds of thousands of columns.
+ * Check for and reject tables with too many columns. We perform this
+ * check relatively early for two reasons: (a) we don't run the risk of
+ * overflowing an AttrNumber in subsequent code (b) an O(n^2) algorithm is
+ * okay if we're processing <= 1600 columns, but could take minutes to
+ * execute if the user attempts to create a table with hundreds of
+ * thousands of columns.
*
- * Note that we also need to check that any we do not exceed this
- * figure after including columns from inherited relations.
+ * Note that we also need to check that any we do not exceed this figure
+ * after including columns from inherited relations.
*/
if (list_length(schema) > MaxHeapAttributeNumber)
ereport(ERROR,
@@ -728,9 +725,9 @@ MergeAttributes(List *schema, List *supers, bool istemp,
/*
* Check for duplicate names in the explicit list of attributes.
*
- * Although we might consider merging such entries in the same way that
- * we handle name conflicts for inherited attributes, it seems to make
- * more sense to assume such conflicts are errors.
+ * Although we might consider merging such entries in the same way that we
+ * handle name conflicts for inherited attributes, it seems to make more
+ * sense to assume such conflicts are errors.
*/
foreach(entry, schema)
{
@@ -750,9 +747,9 @@ MergeAttributes(List *schema, List *supers, bool istemp,
}
/*
- * Scan the parents left-to-right, and merge their attributes to form
- * a list of inherited attributes (inhSchema). Also check to see if
- * we need to inherit an OID column.
+ * Scan the parents left-to-right, and merge their attributes to form a
+ * list of inherited attributes (inhSchema). Also check to see if we need
+ * to inherit an OID column.
*/
child_attno = 0;
foreach(entry, supers)
@@ -775,8 +772,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (!istemp && isTempNamespace(RelationGetNamespace(relation)))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot inherit from temporary relation \"%s\"",
- parent->relname)));
+ errmsg("cannot inherit from temporary relation \"%s\"",
+ parent->relname)));
/*
* We should have an UNDER permission flag for this, but for now,
@@ -804,10 +801,9 @@ MergeAttributes(List *schema, List *supers, bool istemp,
constr = tupleDesc->constr;
/*
- * newattno[] will contain the child-table attribute numbers for
- * the attributes of this parent table. (They are not the same
- * for parents after the first one, nor if we have dropped
- * columns.)
+ * newattno[] will contain the child-table attribute numbers for the
+ * attributes of this parent table. (They are not the same for
+ * parents after the first one, nor if we have dropped columns.)
*/
newattno = (AttrNumber *)
palloc(tupleDesc->natts * sizeof(AttrNumber));
@@ -828,8 +824,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
{
/*
* change_varattnos_of_a_node asserts that this is greater
- * than zero, so if anything tries to use it, we should
- * find out.
+ * than zero, so if anything tries to use it, we should find
+ * out.
*/
newattno[parent_attno - 1] = 0;
continue;
@@ -853,11 +849,11 @@ MergeAttributes(List *schema, List *supers, bool istemp,
def->typename->typmod != attribute->atttypmod)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("inherited column \"%s\" has a type conflict",
- attributeName),
+ errmsg("inherited column \"%s\" has a type conflict",
+ attributeName),
errdetail("%s versus %s",
TypeNameToString(def->typename),
- format_type_be(attribute->atttypid))));
+ format_type_be(attribute->atttypid))));
def->inhcount++;
/* Merge of NOT NULL constraints = OR 'em together */
def->is_not_null |= attribute->attnotnull;
@@ -909,15 +905,14 @@ MergeAttributes(List *schema, List *supers, bool istemp,
Assert(this_default != NULL);
/*
- * If default expr could contain any vars, we'd need to
- * fix 'em, but it can't; so default is ready to apply to
- * child.
+ * If default expr could contain any vars, we'd need to fix
+ * 'em, but it can't; so default is ready to apply to child.
*
- * If we already had a default from some prior parent, check
- * to see if they are the same. If so, no problem; if
- * not, mark the column as having a bogus default. Below,
- * we will complain if the bogus default isn't overridden
- * by the child schema.
+ * If we already had a default from some prior parent, check to
+ * see if they are the same. If so, no problem; if not, mark
+ * the column as having a bogus default. Below, we will
+ * complain if the bogus default isn't overridden by the child
+ * schema.
*/
Assert(def->raw_default == NULL);
if (def->cooked_default == NULL)
@@ -931,8 +926,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
}
/*
- * Now copy the constraints of this parent, adjusting attnos using
- * the completed newattno[] map
+ * Now copy the constraints of this parent, adjusting attnos using the
+ * completed newattno[] map
*/
if (constr && constr->num_check > 0)
{
@@ -958,17 +953,17 @@ MergeAttributes(List *schema, List *supers, bool istemp,
pfree(newattno);
/*
- * Close the parent rel, but keep our AccessShareLock on it until
- * xact commit. That will prevent someone else from deleting or
- * ALTERing the parent before the child is committed.
+ * Close the parent rel, but keep our AccessShareLock on it until xact
+ * commit. That will prevent someone else from deleting or ALTERing
+ * the parent before the child is committed.
*/
heap_close(relation, NoLock);
}
/*
* If we had no inherited attributes, the result schema is just the
- * explicitly declared columns. Otherwise, we need to merge the
- * declared columns into the inherited schema list.
+ * explicitly declared columns. Otherwise, we need to merge the declared
+ * columns into the inherited schema list.
*/
if (inhSchema != NIL)
{
@@ -991,8 +986,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
* have the same type and typmod.
*/
ereport(NOTICE,
- (errmsg("merging column \"%s\" with inherited definition",
- attributeName)));
+ (errmsg("merging column \"%s\" with inherited definition",
+ attributeName)));
def = (ColumnDef *) list_nth(inhSchema, exist_attno - 1);
if (typenameTypeId(def->typename) != typenameTypeId(newdef->typename) ||
def->typename->typmod != newdef->typename->typmod)
@@ -1002,7 +997,7 @@ MergeAttributes(List *schema, List *supers, bool istemp,
attributeName),
errdetail("%s versus %s",
TypeNameToString(def->typename),
- TypeNameToString(newdef->typename))));
+ TypeNameToString(newdef->typename))));
/* Mark the column as locally defined */
def->is_local = true;
/* Merge of NOT NULL constraints = OR 'em together */
@@ -1026,8 +1021,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
schema = inhSchema;
/*
- * Check that we haven't exceeded the legal # of columns after
- * merging in inherited columns.
+ * Check that we haven't exceeded the legal # of columns after merging
+ * in inherited columns.
*/
if (list_length(schema) > MaxHeapAttributeNumber)
ereport(ERROR,
@@ -1037,8 +1032,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
}
/*
- * If we found any conflicting parent default values, check to make
- * sure they were overridden by the child.
+ * If we found any conflicting parent default values, check to make sure
+ * they were overridden by the child.
*/
if (have_bogus_defaults)
{
@@ -1049,8 +1044,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (def->cooked_default == bogus_marker)
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_DEFINITION),
- errmsg("column \"%s\" inherits conflicting default values",
- def->colname),
+ errmsg("column \"%s\" inherits conflicting default values",
+ def->colname),
errhint("To resolve the conflict, specify a default explicitly.")));
}
}
@@ -1083,9 +1078,9 @@ change_varattnos_walker(Node *node, const AttrNumber *newattno)
var->varattno > 0)
{
/*
- * ??? the following may be a problem when the node is
- * multiply referenced though stringToNode() doesn't create
- * such a node currently.
+ * ??? the following may be a problem when the node is multiply
+ * referenced though stringToNode() doesn't create such a node
+ * currently.
*/
Assert(newattno[var->varattno - 1] > 0);
var->varattno = newattno[var->varattno - 1];
@@ -1126,13 +1121,13 @@ StoreCatalogInheritance(Oid relationId, List *supers)
return;
/*
- * Store INHERITS information in pg_inherits using direct ancestors
- * only. Also enter dependencies on the direct ancestors, and make
- * sure they are marked with relhassubclass = true.
+ * Store INHERITS information in pg_inherits using direct ancestors only.
+ * Also enter dependencies on the direct ancestors, and make sure they are
+ * marked with relhassubclass = true.
*
- * (Once upon a time, both direct and indirect ancestors were found here
- * and then entered into pg_ipl. Since that catalog doesn't exist
- * anymore, there's no need to look for indirect ancestors.)
+ * (Once upon a time, both direct and indirect ancestors were found here and
+ * then entered into pg_ipl. Since that catalog doesn't exist anymore,
+ * there's no need to look for indirect ancestors.)
*/
relation = heap_open(InheritsRelationId, RowExclusiveLock);
desc = RelationGetDescr(relation);
@@ -1222,8 +1217,8 @@ setRelhassubclassInRelation(Oid relationId, bool relhassubclass)
/*
* Fetch a modifiable copy of the tuple, modify it, update pg_class.
*
- * If the tuple already has the right relhassubclass setting, we don't
- * need to update it, but we still need to issue an SI inval message.
+ * If the tuple already has the right relhassubclass setting, we don't need
+ * to update it, but we still need to issue an SI inval message.
*/
relationRelation = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
@@ -1282,14 +1277,14 @@ renameatt(Oid myrelid,
ListCell *indexoidscan;
/*
- * Grab an exclusive lock on the target table, which we will NOT
- * release until end of transaction.
+ * Grab an exclusive lock on the target table, which we will NOT release
+ * until end of transaction.
*/
targetrelation = relation_open(myrelid, AccessExclusiveLock);
/*
- * permissions checking. this would normally be done in utility.c,
- * but this particular routine is recursive.
+ * permissions checking. this would normally be done in utility.c, but
+ * this particular routine is recursive.
*
* normally, only the owner of a class can change its schema.
*/
@@ -1307,9 +1302,8 @@ renameatt(Oid myrelid,
* attribute in all classes that inherit from 'relname' (as well as in
* 'relname').
*
- * any permissions or problems with duplicate attributes will cause the
- * whole transaction to abort, which is what we want -- all or
- * nothing.
+ * any permissions or problems with duplicate attributes will cause the whole
+ * transaction to abort, which is what we want -- all or nothing.
*/
if (recurse)
{
@@ -1320,9 +1314,9 @@ renameatt(Oid myrelid,
children = find_all_inheritors(myrelid);
/*
- * find_all_inheritors does the recursive search of the
- * inheritance hierarchy, so all we have to do is process all of
- * the relids in the list that it returns.
+ * find_all_inheritors does the recursive search of the inheritance
+ * hierarchy, so all we have to do is process all of the relids in the
+ * list that it returns.
*/
foreach(child, children)
{
@@ -1337,8 +1331,8 @@ renameatt(Oid myrelid,
else
{
/*
- * If we are told not to recurse, there had better not be any
- * child tables; else the rename would put them out of step.
+ * If we are told not to recurse, there had better not be any child
+ * tables; else the rename would put them out of step.
*/
if (!recursing &&
find_inheritance_children(myrelid) != NIL)
@@ -1384,7 +1378,7 @@ renameatt(Oid myrelid,
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
errmsg("column \"%s\" of relation \"%s\" already exists",
- newattname, RelationGetRelationName(targetrelation))));
+ newattname, RelationGetRelationName(targetrelation))));
namestrcpy(&(attform->attname), newattname);
@@ -1396,8 +1390,7 @@ renameatt(Oid myrelid,
heap_freetuple(atttup);
/*
- * Update column names of indexes that refer to the column being
- * renamed.
+ * Update column names of indexes that refer to the column being renamed.
*/
indexoidlist = RelationGetIndexList(targetrelation);
@@ -1494,8 +1487,8 @@ renamerel(Oid myrelid, const char *newrelname)
bool relhastriggers;
/*
- * Grab an exclusive lock on the target table or index, which we will
- * NOT release until end of transaction.
+ * Grab an exclusive lock on the target table or index, which we will NOT
+ * release until end of transaction.
*/
targetrelation = relation_open(myrelid, AccessExclusiveLock);
@@ -1512,8 +1505,7 @@ renamerel(Oid myrelid, const char *newrelname)
relhastriggers = (targetrelation->rd_rel->reltriggers > 0);
/*
- * Find relation's pg_class tuple, and make sure newrelname isn't in
- * use.
+ * Find relation's pg_class tuple, and make sure newrelname isn't in use.
*/
relrelation = heap_open(RelationRelationId, RowExclusiveLock);
@@ -1530,8 +1522,8 @@ renamerel(Oid myrelid, const char *newrelname)
newrelname)));
/*
- * Update pg_class tuple with new relname. (Scribbling on reltup is
- * OK because it's a copy...)
+ * Update pg_class tuple with new relname. (Scribbling on reltup is OK
+ * because it's a copy...)
*/
namestrcpy(&(((Form_pg_class) GETSTRUCT(reltup))->relname), newrelname);
@@ -1641,8 +1633,8 @@ update_ri_trigger_args(Oid relid,
/*
* It is an RI trigger, so parse the tgargs bytea.
*
- * NB: we assume the field will never be compressed or moved out of
- * line; so does trigger.c ...
+ * NB: we assume the field will never be compressed or moved out of line;
+ * so does trigger.c ...
*/
tgnargs = pg_trigger->tgnargs;
val = (bytea *)
@@ -1663,11 +1655,11 @@ update_ri_trigger_args(Oid relid,
}
/*
- * Figure out which item(s) to look at. If the trigger is
- * primary-key type and attached to my rel, I should look at the
- * PK fields; if it is foreign-key type and attached to my rel, I
- * should look at the FK fields. But the opposite rule holds when
- * examining triggers found by tgconstrrel search.
+ * Figure out which item(s) to look at. If the trigger is primary-key
+ * type and attached to my rel, I should look at the PK fields; if it
+ * is foreign-key type and attached to my rel, I should look at the FK
+ * fields. But the opposite rule holds when examining triggers found
+ * by tgconstrrel search.
*/
examine_pk = (tg_type == RI_TRIGGER_PK) == (!fk_scan);
@@ -1763,9 +1755,9 @@ update_ri_trigger_args(Oid relid,
heap_close(tgrel, RowExclusiveLock);
/*
- * Increment cmd counter to make updates visible; this is needed in
- * case the same tuple has to be updated again by next pass (can
- * happen in case of a self-referential FK relationship).
+ * Increment cmd counter to make updates visible; this is needed in case
+ * the same tuple has to be updated again by next pass (can happen in case
+ * of a self-referential FK relationship).
*/
CommandCounterIncrement();
}
@@ -1870,14 +1862,14 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
/*
* Copy the original subcommand for each table. This avoids conflicts
* when different child tables need to make different parse
- * transformations (for example, the same column may have different
- * column numbers in different children).
+ * transformations (for example, the same column may have different column
+ * numbers in different children).
*/
cmd = copyObject(cmd);
/*
- * Do permissions checking, recursion to child tables if needed, and
- * any additional phase-1 processing needed.
+ * Do permissions checking, recursion to child tables if needed, and any
+ * additional phase-1 processing needed.
*/
switch (cmd->subtype)
{
@@ -1890,8 +1882,8 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
case AT_ColumnDefault: /* ALTER COLUMN DEFAULT */
/*
- * We allow defaults on views so that INSERT into a view can
- * have default-ish behavior. This works because the rewriter
+ * We allow defaults on views so that INSERT into a view can have
+ * default-ish behavior. This works because the rewriter
* substitutes default values into INSERTs before it expands
* rules.
*/
@@ -1943,8 +1935,8 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
/*
* Currently we recurse only for CHECK constraints, never for
- * foreign-key constraints. UNIQUE/PKEY constraints won't be
- * seen here.
+ * foreign-key constraints. UNIQUE/PKEY constraints won't be seen
+ * here.
*/
if (IsA(cmd->def, Constraint))
ATSimpleRecursion(wqueue, rel, cmd, recurse);
@@ -2042,11 +2034,11 @@ ATRewriteCatalogs(List **wqueue)
ListCell *ltab;
/*
- * We process all the tables "in parallel", one pass at a time. This
- * is needed because we may have to propagate work from one table to
- * another (specifically, ALTER TYPE on a foreign key's PK has to
- * dispatch the re-adding of the foreign key constraint to the other
- * table). Work can only be propagated into later passes, however.
+ * We process all the tables "in parallel", one pass at a time. This is
+ * needed because we may have to propagate work from one table to another
+ * (specifically, ALTER TYPE on a foreign key's PK has to dispatch the
+ * re-adding of the foreign key constraint to the other table). Work can
+ * only be propagated into later passes, however.
*/
for (pass = 0; pass < AT_NUM_PASSES; pass++)
{
@@ -2062,8 +2054,7 @@ ATRewriteCatalogs(List **wqueue)
continue;
/*
- * Exclusive lock was obtained by phase 1, needn't get it
- * again
+ * Exclusive lock was obtained by phase 1, needn't get it again
*/
rel = relation_open(tab->relid, NoLock);
@@ -2071,9 +2062,9 @@ ATRewriteCatalogs(List **wqueue)
ATExecCmd(tab, rel, (AlterTableCmd *) lfirst(lcmd));
/*
- * After the ALTER TYPE pass, do cleanup work (this is not
- * done in ATExecAlterColumnType since it should be done only
- * once if multiple columns of a table are altered).
+ * After the ALTER TYPE pass, do cleanup work (this is not done in
+ * ATExecAlterColumnType since it should be done only once if
+ * multiple columns of a table are altered).
*/
if (pass == AT_PASS_ALTER_TYPE)
ATPostAlterTypeCleanup(wqueue, tab);
@@ -2083,8 +2074,8 @@ ATRewriteCatalogs(List **wqueue)
}
/*
- * Do an implicit CREATE TOAST TABLE if we executed any subcommands
- * that might have added a column or changed column storage.
+ * Do an implicit CREATE TOAST TABLE if we executed any subcommands that
+ * might have added a column or changed column storage.
*/
foreach(ltab, *wqueue)
{
@@ -2190,7 +2181,7 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd)
case AT_EnableTrigUser: /* ENABLE TRIGGER USER */
ATExecEnableDisableTrigger(rel, NULL, true, true);
break;
- case AT_DisableTrigUser: /* DISABLE TRIGGER USER */
+ case AT_DisableTrigUser: /* DISABLE TRIGGER USER */
ATExecEnableDisableTrigger(rel, NULL, false, true);
break;
default: /* oops */
@@ -2200,8 +2191,8 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd)
}
/*
- * Bump the command counter to ensure the next subcommand in the
- * sequence can see the changes so far
+ * Bump the command counter to ensure the next subcommand in the sequence
+ * can see the changes so far
*/
CommandCounterIncrement();
}
@@ -2220,8 +2211,8 @@ ATRewriteTables(List **wqueue)
AlteredTableInfo *tab = (AlteredTableInfo *) lfirst(ltab);
/*
- * We only need to rewrite the table if at least one column needs
- * to be recomputed.
+ * We only need to rewrite the table if at least one column needs to
+ * be recomputed.
*/
if (tab->newvals != NIL)
{
@@ -2236,8 +2227,8 @@ ATRewriteTables(List **wqueue)
/*
* We can never allow rewriting of shared or nailed-in-cache
- * relations, because we can't support changing their
- * relfilenode values.
+ * relations, because we can't support changing their relfilenode
+ * values.
*/
if (OldHeap->rd_rel->relisshared || OldHeap->rd_isnailed)
ereport(ERROR,
@@ -2246,13 +2237,13 @@ ATRewriteTables(List **wqueue)
RelationGetRelationName(OldHeap))));
/*
- * Don't allow rewrite on temp tables of other backends ...
- * their local buffer manager is not going to cope.
+ * Don't allow rewrite on temp tables of other backends ... their
+ * local buffer manager is not going to cope.
*/
if (isOtherTempNamespace(RelationGetNamespace(OldHeap)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot rewrite temporary tables of other sessions")));
+ errmsg("cannot rewrite temporary tables of other sessions")));
/*
* Select destination tablespace (same as original unless user
@@ -2267,12 +2258,11 @@ ATRewriteTables(List **wqueue)
/*
* Create the new heap, using a temporary name in the same
- * namespace as the existing table. NOTE: there is some risk
- * of collision with user relnames. Working around this seems
- * more trouble than it's worth; in particular, we can't
- * create the new heap in a different namespace from the old,
- * or we will have problems with the TEMP status of temp
- * tables.
+ * namespace as the existing table. NOTE: there is some risk of
+ * collision with user relnames. Working around this seems more
+ * trouble than it's worth; in particular, we can't create the new
+ * heap in a different namespace from the old, or we will have
+ * problems with the TEMP status of temp tables.
*/
snprintf(NewHeapName, sizeof(NewHeapName),
"pg_temp_%u", tab->relid);
@@ -2304,8 +2294,8 @@ ATRewriteTables(List **wqueue)
/* performDeletion does CommandCounterIncrement at end */
/*
- * Rebuild each index on the relation (but not the toast
- * table, which is all-new anyway). We do not need
+ * Rebuild each index on the relation (but not the toast table,
+ * which is all-new anyway). We do not need
* CommandCounterIncrement() because reindex_relation does it.
*/
reindex_relation(tab->relid, false);
@@ -2313,16 +2303,15 @@ ATRewriteTables(List **wqueue)
else
{
/*
- * Test the current data within the table against new
- * constraints generated by ALTER TABLE commands, but don't
- * rebuild data.
+ * Test the current data within the table against new constraints
+ * generated by ALTER TABLE commands, but don't rebuild data.
*/
if (tab->constraints != NIL)
ATRewriteTable(tab, InvalidOid);
/*
- * If we had SET TABLESPACE but no reason to reconstruct
- * tuples, just do a block-by-block copy.
+ * If we had SET TABLESPACE but no reason to reconstruct tuples,
+ * just do a block-by-block copy.
*/
if (tab->newTableSpace)
ATExecSetTableSpace(tab->relid, tab->newTableSpace);
@@ -2331,10 +2320,10 @@ ATRewriteTables(List **wqueue)
/*
* Foreign key constraints are checked in a final pass, since (a) it's
- * generally best to examine each one separately, and (b) it's at
- * least theoretically possible that we have changed both relations of
- * the foreign key, and we'd better have finished both rewrites before
- * we try to read the tables.
+ * generally best to examine each one separately, and (b) it's at least
+ * theoretically possible that we have changed both relations of the
+ * foreign key, and we'd better have finished both rewrites before we try
+ * to read the tables.
*/
foreach(ltab, *wqueue)
{
@@ -2401,12 +2390,12 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
newrel = NULL;
/*
- * If we need to rewrite the table, the operation has to be propagated
- * to tables that use this table's rowtype as a column type.
+ * If we need to rewrite the table, the operation has to be propagated to
+ * tables that use this table's rowtype as a column type.
*
- * (Eventually this will probably become true for scans as well, but at
- * the moment a composite type does not enforce any constraints, so
- * it's not necessary/appropriate to enforce them just during ALTER.)
+ * (Eventually this will probably become true for scans as well, but at the
+ * moment a composite type does not enforce any constraints, so it's not
+ * necessary/appropriate to enforce them just during ALTER.)
*/
if (newrel)
find_composite_type_dependencies(oldrel->rd_rel->reltype,
@@ -2461,15 +2450,15 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
HeapScanDesc scan;
HeapTuple tuple;
MemoryContext oldCxt;
- List *dropped_attrs = NIL;
- ListCell *lc;
+ List *dropped_attrs = NIL;
+ ListCell *lc;
econtext = GetPerTupleExprContext(estate);
/*
- * Make tuple slots for old and new tuples. Note that even when
- * the tuples are the same, the tupDescs might not be (consider
- * ADD COLUMN without a default).
+ * Make tuple slots for old and new tuples. Note that even when the
+ * tuples are the same, the tupDescs might not be (consider ADD COLUMN
+ * without a default).
*/
oldslot = MakeSingleTupleTableSlot(oldTupDesc);
newslot = MakeSingleTupleTableSlot(newTupDesc);
@@ -2483,9 +2472,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
/*
* Any attributes that are dropped according to the new tuple
- * descriptor can be set to NULL. We precompute the list of
- * dropped attributes to avoid needing to do so in the
- * per-tuple loop.
+ * descriptor can be set to NULL. We precompute the list of dropped
+ * attributes to avoid needing to do so in the per-tuple loop.
*/
for (i = 0; i < newTupDesc->natts; i++)
{
@@ -2500,8 +2488,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
scan = heap_beginscan(oldrel, SnapshotNow, 0, NULL);
/*
- * Switch to per-tuple memory context and reset it for each
- * tuple produced, so we don't leak memory.
+ * Switch to per-tuple memory context and reset it for each tuple
+ * produced, so we don't leak memory.
*/
oldCxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
@@ -2509,7 +2497,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
{
if (newrel)
{
- Oid tupOid = InvalidOid;
+ Oid tupOid = InvalidOid;
/* Extract data from old tuple */
heap_deform_tuple(tuple, oldTupDesc, values, isnull);
@@ -2517,12 +2505,12 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
tupOid = HeapTupleGetOid(tuple);
/* Set dropped attributes to null in new tuple */
- foreach (lc, dropped_attrs)
+ foreach(lc, dropped_attrs)
isnull[lfirst_int(lc)] = true;
/*
- * Process supplied expressions to replace selected
- * columns. Expression inputs come from the old tuple.
+ * Process supplied expressions to replace selected columns.
+ * Expression inputs come from the old tuple.
*/
ExecStoreTuple(tuple, oldslot, InvalidBuffer, false);
econtext->ecxt_scantuple = oldslot;
@@ -2533,14 +2521,13 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate,
econtext,
- &isnull[ex->attnum - 1],
+ &isnull[ex->attnum - 1],
NULL);
}
/*
- * Form the new tuple. Note that we don't explicitly
- * pfree it, since the per-tuple memory context will
- * be reset shortly.
+ * Form the new tuple. Note that we don't explicitly pfree it,
+ * since the per-tuple memory context will be reset shortly.
*/
tuple = heap_form_tuple(newTupDesc, values, isnull);
@@ -2575,10 +2562,10 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
&isnull);
if (isnull)
ereport(ERROR,
- (errcode(ERRCODE_NOT_NULL_VIOLATION),
- errmsg("column \"%s\" contains null values",
- get_attname(tab->relid,
- con->attnum))));
+ (errcode(ERRCODE_NOT_NULL_VIOLATION),
+ errmsg("column \"%s\" contains null values",
+ get_attname(tab->relid,
+ con->attnum))));
}
break;
case CONSTR_FOREIGN:
@@ -2706,9 +2693,9 @@ ATSimpleRecursion(List **wqueue, Relation rel,
children = find_all_inheritors(relid);
/*
- * find_all_inheritors does the recursive search of the
- * inheritance hierarchy, so all we have to do is process all of
- * the relids in the list that it returns.
+ * find_all_inheritors does the recursive search of the inheritance
+ * hierarchy, so all we have to do is process all of the relids in the
+ * list that it returns.
*/
foreach(child, children)
{
@@ -2775,8 +2762,8 @@ find_composite_type_dependencies(Oid typeOid, const char *origTblName)
HeapTuple depTup;
/*
- * We scan pg_depend to find those things that depend on the rowtype.
- * (We assume we can ignore refobjsubid for a rowtype.)
+ * We scan pg_depend to find those things that depend on the rowtype. (We
+ * assume we can ignore refobjsubid for a rowtype.)
*/
depRel = heap_open(DependRelationId, AccessShareLock);
@@ -2819,9 +2806,8 @@ find_composite_type_dependencies(Oid typeOid, const char *origTblName)
else if (OidIsValid(rel->rd_rel->reltype))
{
/*
- * A view or composite type itself isn't a problem, but we
- * must recursively check for indirect dependencies via its
- * rowtype.
+ * A view or composite type itself isn't a problem, but we must
+ * recursively check for indirect dependencies via its rowtype.
*/
find_composite_type_dependencies(rel->rd_rel->reltype,
origTblName);
@@ -2851,9 +2837,9 @@ ATPrepAddColumn(List **wqueue, Relation rel, bool recurse,
/*
* Recurse to add the column to child classes, if requested.
*
- * We must recurse one level at a time, so that multiply-inheriting
- * children are visited the right number of times and end up with the
- * right attinhcount.
+ * We must recurse one level at a time, so that multiply-inheriting children
+ * are visited the right number of times and end up with the right
+ * attinhcount.
*/
if (recurse)
{
@@ -2871,8 +2857,8 @@ ATPrepAddColumn(List **wqueue, Relation rel, bool recurse,
else
{
/*
- * If we are told not to recurse, there had better not be any
- * child tables; else the addition would put them out of step.
+ * If we are told not to recurse, there had better not be any child
+ * tables; else the addition would put them out of step.
*/
if (find_inheritance_children(RelationGetRelid(rel)) != NIL)
ereport(ERROR,
@@ -2903,8 +2889,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
attrdesc = heap_open(AttributeRelationId, RowExclusiveLock);
/*
- * Are we adding the column to a recursion child? If so, check
- * whether to merge with an existing definition for the column.
+ * Are we adding the column to a recursion child? If so, check whether to
+ * merge with an existing definition for the column.
*/
if (colDef->inhcount > 0)
{
@@ -2922,7 +2908,7 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("child table \"%s\" has different type for column \"%s\"",
- RelationGetRelationName(rel), colDef->colname)));
+ RelationGetRelationName(rel), colDef->colname)));
/* Bump the existing child att's inhcount */
childatt->attinhcount++;
@@ -2933,8 +2919,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
/* Inform the user about the merge */
ereport(NOTICE,
- (errmsg("merging definition of column \"%s\" for child \"%s\"",
- colDef->colname, RelationGetRelationName(rel))));
+ (errmsg("merging definition of column \"%s\" for child \"%s\"",
+ colDef->colname, RelationGetRelationName(rel))));
heap_close(attrdesc, RowExclusiveLock);
return;
@@ -2950,9 +2936,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
elog(ERROR, "cache lookup failed for relation %u", myrelid);
/*
- * this test is deliberately not attisdropped-aware, since if one
- * tries to add a column matching a dropped column name, it's gonna
- * fail anyway.
+ * this test is deliberately not attisdropped-aware, since if one tries to
+ * add a column matching a dropped column name, it's gonna fail anyway.
*/
if (SearchSysCacheExists(ATTNAME,
ObjectIdGetDatum(myrelid),
@@ -3054,30 +3039,30 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
/*
* Tell Phase 3 to fill in the default expression, if there is one.
*
- * If there is no default, Phase 3 doesn't have to do anything, because
- * that effectively means that the default is NULL. The heap tuple
- * access routines always check for attnum > # of attributes in tuple,
- * and return NULL if so, so without any modification of the tuple
- * data we will get the effect of NULL values in the new column.
+ * If there is no default, Phase 3 doesn't have to do anything, because that
+ * effectively means that the default is NULL. The heap tuple access
+ * routines always check for attnum > # of attributes in tuple, and return
+ * NULL if so, so without any modification of the tuple data we will get
+ * the effect of NULL values in the new column.
*
- * An exception occurs when the new column is of a domain type: the
- * domain might have a NOT NULL constraint, or a check constraint that
- * indirectly rejects nulls. If there are any domain constraints then
- * we construct an explicit NULL default value that will be passed through
- * CoerceToDomain processing. (This is a tad inefficient, since it
- * causes rewriting the table which we really don't have to do, but
- * the present design of domain processing doesn't offer any simple way
- * of checking the constraints more directly.)
+ * An exception occurs when the new column is of a domain type: the domain
+ * might have a NOT NULL constraint, or a check constraint that indirectly
+ * rejects nulls. If there are any domain constraints then we construct
+ * an explicit NULL default value that will be passed through
+ * CoerceToDomain processing. (This is a tad inefficient, since it causes
+ * rewriting the table which we really don't have to do, but the present
+ * design of domain processing doesn't offer any simple way of checking
+ * the constraints more directly.)
*
* Note: we use build_column_default, and not just the cooked default
- * returned by AddRelationRawConstraints, so that the right thing
- * happens when a datatype's default applies.
+ * returned by AddRelationRawConstraints, so that the right thing happens
+ * when a datatype's default applies.
*/
defval = (Expr *) build_column_default(rel, attribute->attnum);
if (!defval && GetDomainConstraints(typeOid) != NIL)
{
- Oid basetype = getBaseType(typeOid);
+ Oid basetype = getBaseType(typeOid);
defval = (Expr *) makeNullConst(basetype);
defval = (Expr *) coerce_to_target_type(NULL,
@@ -3355,8 +3340,8 @@ ATPrepSetStatistics(Relation rel, const char *colName, Node *flagValue)
{
/*
* We do our own permission checking because (a) we want to allow SET
- * STATISTICS on indexes (for expressional index columns), and (b) we
- * want to allow SET STATISTICS on system catalogs without requiring
+ * STATISTICS on indexes (for expressional index columns), and (b) we want
+ * to allow SET STATISTICS on system catalogs without requiring
* allowSystemTableMods to be turned on.
*/
if (rel->rd_rel->relkind != RELKIND_RELATION &&
@@ -3481,8 +3466,8 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue)
colName)));
/*
- * safety check: do not allow toasted storage modes unless column
- * datatype is TOAST-aware.
+ * safety check: do not allow toasted storage modes unless column datatype
+ * is TOAST-aware.
*/
if (newstorage == 'p' || TypeIsToastable(attrtuple->atttypid))
attrtuple->attstorage = newstorage;
@@ -3560,8 +3545,8 @@ ATExecDropColumn(Relation rel, const char *colName,
/*
* Propagate to children as appropriate. Unlike most other ALTER
- * routines, we have to do this one level of recursion at a time; we
- * can't use find_all_inheritors to do it in one pass.
+ * routines, we have to do this one level of recursion at a time; we can't
+ * use find_all_inheritors to do it in one pass.
*/
children = find_inheritance_children(RelationGetRelid(rel));
@@ -3593,8 +3578,8 @@ ATExecDropColumn(Relation rel, const char *colName,
{
/*
* If the child column has other definition sources, just
- * decrement its inheritance count; if not, recurse to
- * delete it.
+ * decrement its inheritance count; if not, recurse to delete
+ * it.
*/
if (childatt->attinhcount == 1 && !childatt->attislocal)
{
@@ -3618,9 +3603,9 @@ ATExecDropColumn(Relation rel, const char *colName,
else
{
/*
- * If we were told to drop ONLY in this table (no
- * recursion), we need to mark the inheritors' attribute
- * as locally defined rather than inherited.
+ * If we were told to drop ONLY in this table (no recursion),
+ * we need to mark the inheritors' attribute as locally
+ * defined rather than inherited.
*/
childatt->attinhcount--;
childatt->attislocal = true;
@@ -3661,7 +3646,7 @@ ATExecDropColumn(Relation rel, const char *colName,
class_rel = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
- ObjectIdGetDatum(RelationGetRelid(rel)),
+ ObjectIdGetDatum(RelationGetRelid(rel)),
0, 0, 0);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for relation %u",
@@ -3734,8 +3719,8 @@ ATExecAddConstraint(AlteredTableInfo *tab, Relation rel, Node *newConstraint)
/*
* Currently, we only expect to see CONSTR_CHECK nodes
* arriving here (see the preprocessing done in
- * parser/analyze.c). Use a switch anyway to make it
- * easier to add more code later.
+ * parser/analyze.c). Use a switch anyway to make it easier
+ * to add more code later.
*/
switch (constr->contype)
{
@@ -3745,12 +3730,11 @@ ATExecAddConstraint(AlteredTableInfo *tab, Relation rel, Node *newConstraint)
ListCell *lcon;
/*
- * Call AddRelationRawConstraints to do the
- * work. It returns a list of cooked
- * constraints.
+ * Call AddRelationRawConstraints to do the work.
+ * It returns a list of cooked constraints.
*/
newcons = AddRelationRawConstraints(rel, NIL,
- list_make1(constr));
+ list_make1(constr));
/* Add each constraint to Phase 3's queue */
foreach(lcon, newcons)
{
@@ -3798,7 +3782,7 @@ ATExecAddConstraint(AlteredTableInfo *tab, Relation rel, Node *newConstraint)
else
fkconstraint->constr_name =
ChooseConstraintName(RelationGetRelationName(rel),
- strVal(linitial(fkconstraint->fk_attrs)),
+ strVal(linitial(fkconstraint->fk_attrs)),
"fkey",
RelationGetNamespace(rel),
NIL);
@@ -3838,19 +3822,19 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
Oid constrOid;
/*
- * Grab an exclusive lock on the pk table, so that someone doesn't
- * delete rows out from under us. (Although a lesser lock would do for
- * that purpose, we'll need exclusive lock anyway to add triggers to
- * the pk table; trying to start with a lesser lock will just create a
- * risk of deadlock.)
+ * Grab an exclusive lock on the pk table, so that someone doesn't delete
+ * rows out from under us. (Although a lesser lock would do for that
+ * purpose, we'll need exclusive lock anyway to add triggers to the pk
+ * table; trying to start with a lesser lock will just create a risk of
+ * deadlock.)
*/
pkrel = heap_openrv(fkconstraint->pktable, AccessExclusiveLock);
/*
* Validity and permissions checks
*
- * Note: REFERENCES permissions checks are redundant with CREATE TRIGGER,
- * but we may as well error out sooner instead of later.
+ * Note: REFERENCES permissions checks are redundant with CREATE TRIGGER, but
+ * we may as well error out sooner instead of later.
*/
if (pkrel->rd_rel->relkind != RELKIND_RELATION)
ereport(ERROR,
@@ -3877,12 +3861,12 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
RelationGetRelationName(rel));
/*
- * Disallow reference from permanent table to temp table or vice
- * versa. (The ban on perm->temp is for fairly obvious reasons. The
- * ban on temp->perm is because other backends might need to run the
- * RI triggers on the perm table, but they can't reliably see tuples
- * the owning backend has created in the temp table, because
- * non-shared buffers are used for temp tables.)
+ * Disallow reference from permanent table to temp table or vice versa.
+ * (The ban on perm->temp is for fairly obvious reasons. The ban on
+ * temp->perm is because other backends might need to run the RI triggers
+ * on the perm table, but they can't reliably see tuples the owning
+ * backend has created in the temp table, because non-shared buffers are
+ * used for temp tables.)
*/
if (isTempNamespace(RelationGetNamespace(pkrel)))
{
@@ -3900,8 +3884,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
}
/*
- * Look up the referencing attributes to make sure they exist, and
- * record their attnums and type OIDs.
+ * Look up the referencing attributes to make sure they exist, and record
+ * their attnums and type OIDs.
*/
MemSet(pkattnum, 0, sizeof(pkattnum));
MemSet(fkattnum, 0, sizeof(fkattnum));
@@ -3914,11 +3898,10 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
fkattnum, fktypoid);
/*
- * If the attribute list for the referenced table was omitted, lookup
- * the definition of the primary key and use it. Otherwise, validate
- * the supplied attribute list. In either case, discover the index
- * OID and index opclasses, and the attnums and type OIDs of the
- * attributes.
+ * If the attribute list for the referenced table was omitted, lookup the
+ * definition of the primary key and use it. Otherwise, validate the
+ * supplied attribute list. In either case, discover the index OID and
+ * index opclasses, and the attnums and type OIDs of the attributes.
*/
if (fkconstraint->pk_attrs == NIL)
{
@@ -3946,15 +3929,15 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
for (i = 0; i < numpks; i++)
{
/*
- * pktypoid[i] is the primary key table's i'th key's type
- * fktypoid[i] is the foreign key table's i'th key's type
+ * pktypoid[i] is the primary key table's i'th key's type fktypoid[i]
+ * is the foreign key table's i'th key's type
*
- * Note that we look for an operator with the PK type on the left;
- * when the types are different this is critical because the PK
- * index will need operators with the indexkey on the left.
- * (Ordinarily both commutator operators will exist if either
- * does, but we won't get the right answer from the test below on
- * opclass membership unless we select the proper operator.)
+ * Note that we look for an operator with the PK type on the left; when
+ * the types are different this is critical because the PK index will
+ * need operators with the indexkey on the left. (Ordinarily both
+ * commutator operators will exist if either does, but we won't get
+ * the right answer from the test below on opclass membership unless
+ * we select the proper operator.)
*/
Operator o = oper(list_make1(makeString("=")),
pktypoid[i], fktypoid[i], true);
@@ -3967,15 +3950,15 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
fkconstraint->constr_name),
errdetail("Key columns \"%s\" and \"%s\" "
"are of incompatible types: %s and %s.",
- strVal(list_nth(fkconstraint->fk_attrs, i)),
- strVal(list_nth(fkconstraint->pk_attrs, i)),
+ strVal(list_nth(fkconstraint->fk_attrs, i)),
+ strVal(list_nth(fkconstraint->pk_attrs, i)),
format_type_be(fktypoid[i]),
format_type_be(pktypoid[i]))));
/*
- * Check that the found operator is compatible with the PK index,
- * and generate a warning if not, since otherwise costly seqscans
- * will be incurred to check FK validity.
+ * Check that the found operator is compatible with the PK index, and
+ * generate a warning if not, since otherwise costly seqscans will be
+ * incurred to check FK validity.
*/
if (!op_in_opclass(oprid(o), opclasses[i]))
ereport(WARNING,
@@ -3984,8 +3967,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
fkconstraint->constr_name),
errdetail("Key columns \"%s\" and \"%s\" "
"are of different types: %s and %s.",
- strVal(list_nth(fkconstraint->fk_attrs, i)),
- strVal(list_nth(fkconstraint->pk_attrs, i)),
+ strVal(list_nth(fkconstraint->fk_attrs, i)),
+ strVal(list_nth(fkconstraint->pk_attrs, i)),
format_type_be(fktypoid[i]),
format_type_be(pktypoid[i]))));
@@ -3993,8 +3976,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
}
/*
- * Tell Phase 3 to check that the constraint is satisfied by existing
- * rows (we can skip this during table creation).
+ * Tell Phase 3 to check that the constraint is satisfied by existing rows
+ * (we can skip this during table creation).
*/
if (!fkconstraint->skip_validation)
{
@@ -4072,8 +4055,8 @@ transformColumnNameList(Oid relId, List *colList,
if (attnum >= INDEX_MAX_KEYS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_COLUMNS),
- errmsg("cannot have more than %d keys in a foreign key",
- INDEX_MAX_KEYS)));
+ errmsg("cannot have more than %d keys in a foreign key",
+ INDEX_MAX_KEYS)));
attnums[attnum] = ((Form_pg_attribute) GETSTRUCT(atttuple))->attnum;
atttypids[attnum] = ((Form_pg_attribute) GETSTRUCT(atttuple))->atttypid;
ReleaseSysCache(atttuple);
@@ -4111,9 +4094,9 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
int i;
/*
- * Get the list of index OIDs for the table from the relcache, and
- * look up each one in the pg_index syscache until we find one marked
- * primary key (hopefully there isn't more than one such).
+ * Get the list of index OIDs for the table from the relcache, and look up
+ * each one in the pg_index syscache until we find one marked primary key
+ * (hopefully there isn't more than one such).
*/
*indexOid = InvalidOid;
@@ -4145,8 +4128,8 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
if (!OidIsValid(*indexOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("there is no primary key for referenced table \"%s\"",
- RelationGetRelationName(pkrel))));
+ errmsg("there is no primary key for referenced table \"%s\"",
+ RelationGetRelationName(pkrel))));
/* Must get indclass the hard way */
indclassDatum = SysCacheGetAttr(INDEXRELID, indexTuple,
@@ -4167,7 +4150,7 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
atttypids[i] = attnumTypeId(pkrel, pkattno);
opclasses[i] = indclass->values[i];
*attnamelist = lappend(*attnamelist,
- makeString(pstrdup(NameStr(*attnumAttName(pkrel, pkattno)))));
+ makeString(pstrdup(NameStr(*attnumAttName(pkrel, pkattno)))));
}
ReleaseSysCache(indexTuple);
@@ -4194,9 +4177,9 @@ transformFkeyCheckAttrs(Relation pkrel,
ListCell *indexoidscan;
/*
- * Get the list of index OIDs for the table from the relcache, and
- * look up each one in the pg_index syscache, and match unique indexes
- * to the list of attnums we are given.
+ * Get the list of index OIDs for the table from the relcache, and look up
+ * each one in the pg_index syscache, and match unique indexes to the list
+ * of attnums we are given.
*/
indexoidlist = RelationGetIndexList(pkrel);
@@ -4235,8 +4218,8 @@ transformFkeyCheckAttrs(Relation pkrel,
indclass = (oidvector *) DatumGetPointer(indclassDatum);
/*
- * The given attnum list may match the index columns in any
- * order. Check that each list is a subset of the other.
+ * The given attnum list may match the index columns in any order.
+ * Check that each list is a subset of the other.
*/
for (i = 0; i < numattrs; i++)
{
@@ -4312,9 +4295,9 @@ validateForeignKeyConstraint(FkConstraint *fkconstraint,
return;
/*
- * Scan through each tuple, calling RI_FKey_check_ins (insert trigger)
- * as if that tuple had just been inserted. If any of those fail, it
- * should ereport(ERROR) and that's that.
+ * Scan through each tuple, calling RI_FKey_check_ins (insert trigger) as
+ * if that tuple had just been inserted. If any of those fail, it should
+ * ereport(ERROR) and that's that.
*/
MemSet(&trig, 0, sizeof(trig));
trig.tgoid = InvalidOid;
@@ -4326,8 +4309,8 @@ validateForeignKeyConstraint(FkConstraint *fkconstraint,
trig.tginitdeferred = FALSE;
trig.tgargs = (char **) palloc(sizeof(char *) *
- (4 + list_length(fkconstraint->fk_attrs)
- + list_length(fkconstraint->pk_attrs)));
+ (4 + list_length(fkconstraint->fk_attrs)
+ + list_length(fkconstraint->pk_attrs)));
trig.tgargs[0] = trig.tgname;
trig.tgargs[1] = RelationGetRelationName(rel);
@@ -4426,9 +4409,9 @@ CreateFKCheckTrigger(RangeVar *myRel, FkConstraint *fkconstraint,
fk_trigger->args = lappend(fk_trigger->args,
makeString(myRel->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->pktable->relname));
+ makeString(fkconstraint->pktable->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
+ makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
if (list_length(fkconstraint->fk_attrs) != list_length(fkconstraint->pk_attrs))
ereport(ERROR,
(errcode(ERRCODE_INVALID_FOREIGN_KEY),
@@ -4465,8 +4448,7 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
constrobj;
/*
- * Reconstruct a RangeVar for my relation (not passed in,
- * unfortunately).
+ * Reconstruct a RangeVar for my relation (not passed in, unfortunately).
*/
myRel = makeRangeVar(get_namespace_name(RelationGetNamespace(rel)),
pstrdup(RelationGetRelationName(rel)));
@@ -4484,8 +4466,8 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
CommandCounterIncrement();
/*
- * Build and execute a CREATE CONSTRAINT TRIGGER statement for the
- * CHECK action for both INSERTs and UPDATEs on the referencing table.
+ * Build and execute a CREATE CONSTRAINT TRIGGER statement for the CHECK
+ * action for both INSERTs and UPDATEs on the referencing table.
*/
CreateFKCheckTrigger(myRel, fkconstraint, &constrobj, &trigobj, true);
CreateFKCheckTrigger(myRel, fkconstraint, &constrobj, &trigobj, false);
@@ -4543,9 +4525,9 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
fk_trigger->args = lappend(fk_trigger->args,
makeString(myRel->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->pktable->relname));
+ makeString(fkconstraint->pktable->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
+ makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
forboth(fk_attr, fkconstraint->fk_attrs,
pk_attr, fkconstraint->pk_attrs)
{
@@ -4613,9 +4595,9 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
fk_trigger->args = lappend(fk_trigger->args,
makeString(myRel->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->pktable->relname));
+ makeString(fkconstraint->pktable->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
+ makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
forboth(fk_attr, fkconstraint->fk_attrs,
pk_attr, fkconstraint->pk_attrs)
{
@@ -4690,8 +4672,8 @@ ATExecDropConstraint(Relation rel, const char *constrName,
/* Otherwise if more than one constraint deleted, notify */
else if (deleted > 1)
ereport(NOTICE,
- (errmsg("multiple constraints named \"%s\" were dropped",
- constrName)));
+ (errmsg("multiple constraints named \"%s\" were dropped",
+ constrName)));
}
}
@@ -4750,12 +4732,12 @@ ATPrepAlterColumnType(List **wqueue,
CheckAttributeType(colName, targettype);
/*
- * Set up an expression to transform the old data value to the new
- * type. If a USING option was given, transform and use that
- * expression, else just take the old value and try to coerce it. We
- * do this first so that type incompatibility can be detected before
- * we waste effort, and because we need the expression to be parsed
- * against the original table rowtype.
+ * Set up an expression to transform the old data value to the new type.
+ * If a USING option was given, transform and use that expression, else
+ * just take the old value and try to coerce it. We do this first so that
+ * type incompatibility can be detected before we waste effort, and
+ * because we need the expression to be parsed against the original table
+ * rowtype.
*/
if (cmd->transform)
{
@@ -4775,17 +4757,17 @@ ATPrepAlterColumnType(List **wqueue,
if (expression_returns_set(transform))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("transform expression must not return a set")));
+ errmsg("transform expression must not return a set")));
/* No subplans or aggregates, either... */
if (pstate->p_hasSubLinks)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use subquery in transform expression")));
+ errmsg("cannot use subquery in transform expression")));
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in transform expression")));
+ errmsg("cannot use aggregate function in transform expression")));
}
else
{
@@ -4818,9 +4800,9 @@ ATPrepAlterColumnType(List **wqueue,
ReleaseSysCache(tuple);
/*
- * The recursion case is handled by ATSimpleRecursion. However, if we
- * are told not to recurse, there had better not be any child tables;
- * else the alter would put them out of step.
+ * The recursion case is handled by ATSimpleRecursion. However, if we are
+ * told not to recurse, there had better not be any child tables; else the
+ * alter would put them out of step.
*/
if (recurse)
ATSimpleRecursion(wqueue, rel, cmd, recurse);
@@ -4875,17 +4857,16 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
targettype = HeapTupleGetOid(typeTuple);
/*
- * If there is a default expression for the column, get it and ensure
- * we can coerce it to the new datatype. (We must do this before
- * changing the column type, because build_column_default itself will
- * try to coerce, and will not issue the error message we want if it
- * fails.)
+ * If there is a default expression for the column, get it and ensure we
+ * can coerce it to the new datatype. (We must do this before changing
+ * the column type, because build_column_default itself will try to
+ * coerce, and will not issue the error message we want if it fails.)
*
- * We remove any implicit coercion steps at the top level of the old
- * default expression; this has been agreed to satisfy the principle
- * of least surprise. (The conversion to the new column type should
- * act like it started from what the user sees as the stored expression,
- * and the implicit coercions aren't going to be shown.)
+ * We remove any implicit coercion steps at the top level of the old default
+ * expression; this has been agreed to satisfy the principle of least
+ * surprise. (The conversion to the new column type should act like it
+ * started from what the user sees as the stored expression, and the
+ * implicit coercions aren't going to be shown.)
*/
if (attTup->atthasdef)
{
@@ -4893,32 +4874,32 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
Assert(defaultexpr);
defaultexpr = strip_implicit_coercions(defaultexpr);
defaultexpr = coerce_to_target_type(NULL, /* no UNKNOWN params */
- defaultexpr, exprType(defaultexpr),
+ defaultexpr, exprType(defaultexpr),
targettype, typename->typmod,
COERCION_ASSIGNMENT,
COERCE_IMPLICIT_CAST);
if (defaultexpr == NULL)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("default for column \"%s\" cannot be cast to type \"%s\"",
- colName, TypeNameToString(typename))));
+ errmsg("default for column \"%s\" cannot be cast to type \"%s\"",
+ colName, TypeNameToString(typename))));
}
else
defaultexpr = NULL;
/*
- * Find everything that depends on the column (constraints, indexes,
- * etc), and record enough information to let us recreate the objects.
+ * Find everything that depends on the column (constraints, indexes, etc),
+ * and record enough information to let us recreate the objects.
*
* The actual recreation does not happen here, but only after we have
- * performed all the individual ALTER TYPE operations. We have to
- * save the info before executing ALTER TYPE, though, else the
- * deparser will get confused.
+ * performed all the individual ALTER TYPE operations. We have to save
+ * the info before executing ALTER TYPE, though, else the deparser will
+ * get confused.
*
- * There could be multiple entries for the same object, so we must check
- * to ensure we process each one only once. Note: we assume that an
- * index that implements a constraint will not show a direct
- * dependency on the column.
+ * There could be multiple entries for the same object, so we must check to
+ * ensure we process each one only once. Note: we assume that an index
+ * that implements a constraint will not show a direct dependency on the
+ * column.
*/
depRel = heap_open(DependRelationId, RowExclusiveLock);
@@ -4963,16 +4944,16 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
if (!list_member_oid(tab->changedIndexOids, foundObject.objectId))
{
tab->changedIndexOids = lappend_oid(tab->changedIndexOids,
- foundObject.objectId);
+ foundObject.objectId);
tab->changedIndexDefs = lappend(tab->changedIndexDefs,
- pg_get_indexdef_string(foundObject.objectId));
+ pg_get_indexdef_string(foundObject.objectId));
}
}
else if (relKind == RELKIND_SEQUENCE)
{
/*
- * This must be a SERIAL column's sequence. We
- * need not do anything to it.
+ * This must be a SERIAL column's sequence. We need
+ * not do anything to it.
*/
Assert(foundObject.objectSubId == 0);
}
@@ -4990,9 +4971,9 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
if (!list_member_oid(tab->changedConstraintOids, foundObject.objectId))
{
tab->changedConstraintOids = lappend_oid(tab->changedConstraintOids,
- foundObject.objectId);
+ foundObject.objectId);
tab->changedConstraintDefs = lappend(tab->changedConstraintDefs,
- pg_get_constraintdef_string(foundObject.objectId));
+ pg_get_constraintdef_string(foundObject.objectId));
}
break;
@@ -5009,8 +4990,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
case OCLASS_DEFAULT:
/*
- * Ignore the column's default expression, since we will
- * fix it below.
+ * Ignore the column's default expression, since we will fix
+ * it below.
*/
Assert(defaultexpr);
break;
@@ -5026,8 +5007,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
case OCLASS_SCHEMA:
/*
- * We don't expect any of these sorts of objects to depend
- * on a column.
+ * We don't expect any of these sorts of objects to depend on
+ * a column.
*/
elog(ERROR, "unexpected object depending on column: %s",
getObjectDescription(&foundObject));
@@ -5043,8 +5024,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
/*
* Now scan for dependencies of this column on other things. The only
- * thing we should find is the dependency on the column datatype,
- * which we want to remove.
+ * thing we should find is the dependency on the column datatype, which we
+ * want to remove.
*/
ScanKeyInit(&key[0],
Anum_pg_depend_classid,
@@ -5105,17 +5086,16 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
add_column_datatype_dependency(RelationGetRelid(rel), attnum, targettype);
/*
- * Drop any pg_statistic entry for the column, since it's now wrong
- * type
+ * Drop any pg_statistic entry for the column, since it's now wrong type
*/
RemoveStatistics(RelationGetRelid(rel), attnum);
/*
- * Update the default, if present, by brute force --- remove and
- * re-add the default. Probably unsafe to take shortcuts, since the
- * new version may well have additional dependencies. (It's okay to
- * do this now, rather than after other ALTER TYPE commands, since the
- * default won't depend on other column types.)
+ * Update the default, if present, by brute force --- remove and re-add
+ * the default. Probably unsafe to take shortcuts, since the new version
+ * may well have additional dependencies. (It's okay to do this now,
+ * rather than after other ALTER TYPE commands, since the default won't
+ * depend on other column types.)
*/
if (defaultexpr)
{
@@ -5123,8 +5103,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
CommandCounterIncrement();
/*
- * We use RESTRICT here for safety, but at present we do not
- * expect anything to depend on the default.
+ * We use RESTRICT here for safety, but at present we do not expect
+ * anything to depend on the default.
*/
RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, true);
@@ -5147,12 +5127,12 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab)
ListCell *l;
/*
- * Re-parse the index and constraint definitions, and attach them to
- * the appropriate work queue entries. We do this before dropping
- * because in the case of a FOREIGN KEY constraint, we might not yet
- * have exclusive lock on the table the constraint is attached to, and
- * we need to get that before dropping. It's safe because the parser
- * won't actually look at the catalogs to detect the existing entry.
+ * Re-parse the index and constraint definitions, and attach them to the
+ * appropriate work queue entries. We do this before dropping because in
+ * the case of a FOREIGN KEY constraint, we might not yet have exclusive
+ * lock on the table the constraint is attached to, and we need to get
+ * that before dropping. It's safe because the parser won't actually look
+ * at the catalogs to detect the existing entry.
*/
foreach(l, tab->changedIndexDefs)
ATPostAlterTypeParse((char *) lfirst(l), wqueue);
@@ -5160,10 +5140,10 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab)
ATPostAlterTypeParse((char *) lfirst(l), wqueue);
/*
- * Now we can drop the existing constraints and indexes ---
- * constraints first, since some of them might depend on the indexes.
- * It should be okay to use DROP_RESTRICT here, since nothing else
- * should be depending on these objects.
+ * Now we can drop the existing constraints and indexes --- constraints
+ * first, since some of them might depend on the indexes. It should be
+ * okay to use DROP_RESTRICT here, since nothing else should be depending
+ * on these objects.
*/
foreach(l, tab->changedConstraintOids)
{
@@ -5182,8 +5162,8 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab)
}
/*
- * The objects will get recreated during subsequent passes over the
- * work queue.
+ * The objects will get recreated during subsequent passes over the work
+ * queue.
*/
}
@@ -5195,8 +5175,8 @@ ATPostAlterTypeParse(char *cmd, List **wqueue)
ListCell *list_item;
/*
- * We expect that we only have to do raw parsing and parse analysis,
- * not any rule rewriting, since these will all be utility statements.
+ * We expect that we only have to do raw parsing and parse analysis, not
+ * any rule rewriting, since these will all be utility statements.
*/
raw_parsetree_list = raw_parser(cmd);
querytree_list = NIL;
@@ -5209,9 +5189,8 @@ ATPostAlterTypeParse(char *cmd, List **wqueue)
}
/*
- * Attach each generated command to the proper place in the work
- * queue. Note this could result in creation of entirely new
- * work-queue entries.
+ * Attach each generated command to the proper place in the work queue.
+ * Note this could result in creation of entirely new work-queue entries.
*/
foreach(list_item, querytree_list)
{
@@ -5294,8 +5273,8 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing)
Form_pg_class tuple_class;
/*
- * Get exclusive lock till end of transaction on the target table.
- * Use relation_open so that we can work on indexes and sequences.
+ * Get exclusive lock till end of transaction on the target table. Use
+ * relation_open so that we can work on indexes and sequences.
*/
target_rel = relation_open(relationOid, AccessExclusiveLock);
@@ -5368,11 +5347,11 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing)
/* Superusers can always do it */
if (!superuser())
{
- Oid namespaceOid = tuple_class->relnamespace;
+ Oid namespaceOid = tuple_class->relnamespace;
AclResult aclresult;
/* Otherwise, must be owner of the existing object */
- if (!pg_class_ownercheck(relationOid,GetUserId()))
+ if (!pg_class_ownercheck(relationOid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
RelationGetRelationName(target_rel));
@@ -5426,9 +5405,9 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing)
AlterTypeOwnerInternal(tuple_class->reltype, newOwnerId);
/*
- * If we are operating on a table, also change the ownership of
- * any indexes and sequences that belong to the table, as well as
- * the table's toast table (if it has one)
+ * If we are operating on a table, also change the ownership of any
+ * indexes and sequences that belong to the table, as well as the
+ * table's toast table (if it has one)
*/
if (tuple_class->relkind == RELKIND_RELATION ||
tuple_class->relkind == RELKIND_TOASTVALUE)
@@ -5475,23 +5454,23 @@ change_owner_recurse_to_sequences(Oid relationOid, Oid newOwnerId)
{
Relation depRel;
SysScanDesc scan;
- ScanKeyData key[2];
+ ScanKeyData key[2];
HeapTuple tup;
/*
- * SERIAL sequences are those having an internal dependency on one
- * of the table's columns (we don't care *which* column, exactly).
+ * SERIAL sequences are those having an internal dependency on one of the
+ * table's columns (we don't care *which* column, exactly).
*/
depRel = heap_open(DependRelationId, AccessShareLock);
ScanKeyInit(&key[0],
- Anum_pg_depend_refclassid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(RelationRelationId));
+ Anum_pg_depend_refclassid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(RelationRelationId));
ScanKeyInit(&key[1],
- Anum_pg_depend_refobjid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(relationOid));
+ Anum_pg_depend_refobjid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(relationOid));
/* we leave refobjsubid unspecified */
scan = systable_beginscan(depRel, DependReferenceIndexId, true,
@@ -5605,7 +5584,7 @@ ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename)
if (!OidIsValid(tablespaceId))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("tablespace \"%s\" does not exist", tablespacename)));
+ errmsg("tablespace \"%s\" does not exist", tablespacename)));
/* Check its permissions */
aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(), ACL_CREATE);
@@ -5616,7 +5595,7 @@ ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename)
if (OidIsValid(tab->newTableSpace))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("cannot have multiple SET TABLESPACE subcommands")));
+ errmsg("cannot have multiple SET TABLESPACE subcommands")));
tab->newTableSpace = tablespaceId;
}
@@ -5650,13 +5629,13 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace)
RelationGetRelationName(rel))));
/*
- * Don't allow moving temp tables of other backends ... their local
- * buffer manager is not going to cope.
+ * Don't allow moving temp tables of other backends ... their local buffer
+ * manager is not going to cope.
*/
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot move temporary tables of other sessions")));
+ errmsg("cannot move temporary tables of other sessions")));
/*
* No work if no change in tablespace.
@@ -5738,16 +5717,16 @@ copy_relation_data(Relation rel, SMgrRelation dst)
Page page = (Page) buf;
/*
- * Since we copy the file directly without looking at the shared
- * buffers, we'd better first flush out any pages of the source
- * relation that are in shared buffers. We assume no new changes
- * will be made while we are holding exclusive lock on the rel.
+ * Since we copy the file directly without looking at the shared buffers,
+ * we'd better first flush out any pages of the source relation that are
+ * in shared buffers. We assume no new changes will be made while we are
+ * holding exclusive lock on the rel.
*/
FlushRelationBuffers(rel);
/*
- * We need to log the copied data in WAL iff WAL archiving is enabled
- * AND it's not a temp rel.
+ * We need to log the copied data in WAL iff WAL archiving is enabled AND
+ * it's not a temp rel.
*/
use_wal = XLogArchivingActive() && !rel->rd_istemp;
@@ -5791,27 +5770,26 @@ copy_relation_data(Relation rel, SMgrRelation dst)
}
/*
- * Now write the page. We say isTemp = true even if it's not a
- * temp rel, because there's no need for smgr to schedule an fsync
- * for this write; we'll do it ourselves below.
+ * Now write the page. We say isTemp = true even if it's not a temp
+ * rel, because there's no need for smgr to schedule an fsync for this
+ * write; we'll do it ourselves below.
*/
smgrwrite(dst, blkno, buf, true);
}
/*
- * If the rel isn't temp, we must fsync it down to disk before it's
- * safe to commit the transaction. (For a temp rel we don't care
- * since the rel will be uninteresting after a crash anyway.)
+ * If the rel isn't temp, we must fsync it down to disk before it's safe
+ * to commit the transaction. (For a temp rel we don't care since the rel
+ * will be uninteresting after a crash anyway.)
*
- * It's obvious that we must do this when not WAL-logging the copy. It's
- * less obvious that we have to do it even if we did WAL-log the
- * copied pages. The reason is that since we're copying outside
- * shared buffers, a CHECKPOINT occurring during the copy has no way
- * to flush the previously written data to disk (indeed it won't know
- * the new rel even exists). A crash later on would replay WAL from
- * the checkpoint, therefore it wouldn't replay our earlier WAL
- * entries. If we do not fsync those pages here, they might still not
- * be on disk when the crash occurs.
+ * It's obvious that we must do this when not WAL-logging the copy. It's less
+ * obvious that we have to do it even if we did WAL-log the copied pages.
+ * The reason is that since we're copying outside shared buffers, a
+ * CHECKPOINT occurring during the copy has no way to flush the previously
+ * written data to disk (indeed it won't know the new rel even exists). A
+ * crash later on would replay WAL from the checkpoint, therefore it
+ * wouldn't replay our earlier WAL entries. If we do not fsync those pages
+ * here, they might still not be on disk when the crash occurs.
*/
if (!rel->rd_istemp)
smgrimmedsync(dst);
@@ -5855,21 +5833,21 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
toastobject;
/*
- * Grab an exclusive lock on the target table, which we will NOT
- * release until end of transaction. (This is probably redundant in
- * all present uses...)
+ * Grab an exclusive lock on the target table, which we will NOT release
+ * until end of transaction. (This is probably redundant in all present
+ * uses...)
*/
rel = heap_open(relOid, AccessExclusiveLock);
/*
* Toast table is shared if and only if its parent is.
*
- * We cannot allow toasting a shared relation after initdb (because
- * there's no way to mark it toasted in other databases' pg_class).
- * Unfortunately we can't distinguish initdb from a manually started
- * standalone backend (toasting happens after the bootstrap phase, so
- * checking IsBootstrapProcessingMode() won't work). However, we can
- * at least prevent this mistake under normal multi-user operation.
+ * We cannot allow toasting a shared relation after initdb (because there's
+ * no way to mark it toasted in other databases' pg_class). Unfortunately
+ * we can't distinguish initdb from a manually started standalone backend
+ * (toasting happens after the bootstrap phase, so checking
+ * IsBootstrapProcessingMode() won't work). However, we can at least
+ * prevent this mistake under normal multi-user operation.
*/
shared_relation = rel->rd_rel->relisshared;
if (shared_relation && IsUnderPostmaster)
@@ -5944,11 +5922,10 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
tupdesc->attrs[2]->attstorage = 'p';
/*
- * Note: the toast relation is placed in the regular pg_toast
- * namespace even if its master relation is a temp table. There
- * cannot be any naming collision, and the toast rel will be destroyed
- * when its master is, so there's no need to handle the toast rel as
- * temp.
+ * Note: the toast relation is placed in the regular pg_toast namespace
+ * even if its master relation is a temp table. There cannot be any
+ * naming collision, and the toast rel will be destroyed when its master
+ * is, so there's no need to handle the toast rel as temp.
*/
toast_relid = heap_create_with_catalog(toast_relname,
PG_TOAST_NAMESPACE,
@@ -5971,11 +5948,11 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
*
* NOTE: the normal TOAST access routines could actually function with a
* single-column index on chunk_id only. However, the slice access
- * routines use both columns for faster access to an individual chunk.
- * In addition, we want it to be unique as a check against the
- * possibility of duplicate TOAST chunk OIDs. The index might also be
- * a little more efficient this way, since btree isn't all that happy
- * with large numbers of equal keys.
+ * routines use both columns for faster access to an individual chunk. In
+ * addition, we want it to be unique as a check against the possibility of
+ * duplicate TOAST chunk OIDs. The index might also be a little more
+ * efficient this way, since btree isn't all that happy with large numbers
+ * of equal keys.
*/
indexInfo = makeNode(IndexInfo);
@@ -6000,8 +5977,8 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
/*
* Update toast rel's pg_class entry to show that it has an index. The
- * index OID is stored into the reltoastidxid field for easy access by
- * the tuple toaster.
+ * index OID is stored into the reltoastidxid field for easy access by the
+ * tuple toaster.
*/
setRelhasindex(toast_relid, true, true, toast_idxid);
@@ -6142,7 +6119,7 @@ AlterTableNamespace(RangeVar *relation, const char *newschema)
if (isAnyTempNamespace(nspOid) || isAnyTempNamespace(oldNspOid))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot move objects into or out of temporary schemas")));
+ errmsg("cannot move objects into or out of temporary schemas")));
/* same for TOAST schema */
if (nspOid == PG_TOAST_NAMESPACE || oldNspOid == PG_TOAST_NAMESPACE)
@@ -6182,7 +6159,7 @@ AlterRelationNamespaceInternal(Relation classRel, Oid relOid,
Oid oldNspOid, Oid newNspOid,
bool hasDependEntry)
{
- HeapTuple classTup;
+ HeapTuple classTup;
Form_pg_class classForm;
classTup = SearchSysCacheCopy(RELOID,
@@ -6236,12 +6213,12 @@ AlterIndexNamespaces(Relation classRel, Relation rel,
foreach(l, indexList)
{
- Oid indexOid = lfirst_oid(l);
+ Oid indexOid = lfirst_oid(l);
/*
- * Note: currently, the index will not have its own dependency
- * on the namespace, so we don't need to do changeDependencyFor().
- * There's no rowtype in pg_type, either.
+ * Note: currently, the index will not have its own dependency on the
+ * namespace, so we don't need to do changeDependencyFor(). There's no
+ * rowtype in pg_type, either.
*/
AlterRelationNamespaceInternal(classRel, indexOid,
oldNspOid, newNspOid,
@@ -6264,12 +6241,12 @@ AlterSeqNamespaces(Relation classRel, Relation rel,
{
Relation depRel;
SysScanDesc scan;
- ScanKeyData key[2];
+ ScanKeyData key[2];
HeapTuple tup;
/*
- * SERIAL sequences are those having an internal dependency on one
- * of the table's columns (we don't care *which* column, exactly).
+ * SERIAL sequences are those having an internal dependency on one of the
+ * table's columns (we don't care *which* column, exactly).
*/
depRel = heap_open(DependRelationId, AccessShareLock);
@@ -6313,9 +6290,10 @@ AlterSeqNamespaces(Relation classRel, Relation rel,
AlterRelationNamespaceInternal(classRel, depForm->objid,
oldNspOid, newNspOid,
true);
+
/*
- * Sequences have entries in pg_type. We need to be careful
- * to move them to the new namespace, too.
+ * Sequences have entries in pg_type. We need to be careful to move
+ * them to the new namespace, too.
*/
AlterTypeNamespaceInternal(RelationGetForm(seqRel)->reltype,
newNspOid, false);
@@ -6348,8 +6326,8 @@ register_on_commit_action(Oid relid, OnCommitAction action)
MemoryContext oldcxt;
/*
- * We needn't bother registering the relation unless there is an ON
- * COMMIT action we need to take.
+ * We needn't bother registering the relation unless there is an ON COMMIT
+ * action we need to take.
*/
if (action == ONCOMMIT_NOOP || action == ONCOMMIT_PRESERVE_ROWS)
return;
@@ -6429,8 +6407,8 @@ PreCommit_on_commit_actions(void)
/*
* Note that table deletion will call
- * remove_on_commit_action, so the entry should get
- * marked as deleted.
+ * remove_on_commit_action, so the entry should get marked
+ * as deleted.
*/
Assert(oc->deleting_subid != InvalidSubTransactionId);
break;
@@ -6440,7 +6418,7 @@ PreCommit_on_commit_actions(void)
if (oids_to_truncate != NIL)
{
heap_truncate(oids_to_truncate);
- CommandCounterIncrement(); /* XXX needed? */
+ CommandCounterIncrement(); /* XXX needed? */
}
}
diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c
index 4bf2a4777f3..f83d1ab8843 100644
--- a/src/backend/commands/tablespace.c
+++ b/src/backend/commands/tablespace.c
@@ -37,7 +37,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.27 2005/08/30 01:08:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.28 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -67,7 +67,7 @@
/* GUC variable */
-char *default_tablespace = NULL;
+char *default_tablespace = NULL;
static bool remove_tablespace_directories(Oid tablespaceoid, bool redo);
@@ -118,9 +118,9 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
if (errno == ENOENT)
{
/*
- * Acquire ExclusiveLock on pg_tablespace to ensure that no
- * DROP TABLESPACE or TablespaceCreateDbspace is running
- * concurrently. Simple reads from pg_tablespace are OK.
+ * Acquire ExclusiveLock on pg_tablespace to ensure that no DROP
+ * TABLESPACE or TablespaceCreateDbspace is running concurrently.
+ * Simple reads from pg_tablespace are OK.
*/
Relation rel;
@@ -130,8 +130,8 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
rel = NULL;
/*
- * Recheck to see if someone created the directory while we
- * were waiting for lock.
+ * Recheck to see if someone created the directory while we were
+ * waiting for lock.
*/
if (stat(dir, &st) == 0 && S_ISDIR(st.st_mode))
{
@@ -147,22 +147,22 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
if (errno != ENOENT || !isRedo)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not create directory \"%s\": %m",
- dir)));
+ errmsg("could not create directory \"%s\": %m",
+ dir)));
/* Try to make parent directory too */
parentdir = pstrdup(dir);
get_parent_directory(parentdir);
if (mkdir(parentdir, S_IRWXU) < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not create directory \"%s\": %m",
- parentdir)));
+ errmsg("could not create directory \"%s\": %m",
+ parentdir)));
pfree(parentdir);
if (mkdir(dir, S_IRWXU) < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not create directory \"%s\": %m",
- dir)));
+ errmsg("could not create directory \"%s\": %m",
+ dir)));
}
}
@@ -209,7 +209,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
Oid tablespaceoid;
char *location;
char *linkloc;
- Oid ownerId;
+ Oid ownerId;
/* validate */
@@ -238,7 +238,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
if (strchr(location, '\''))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("tablespace location may not contain single quotes")));
+ errmsg("tablespace location may not contain single quotes")));
/*
* Allowing relative paths seems risky
@@ -251,9 +251,9 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
errmsg("tablespace location must be an absolute path")));
/*
- * Check that location isn't too long. Remember that we're going to
- * append '/<dboid>/<relid>.<nnn>' (XXX but do we ever form the whole
- * path explicitly? This may be overly conservative.)
+ * Check that location isn't too long. Remember that we're going to append
+ * '/<dboid>/<relid>.<nnn>' (XXX but do we ever form the whole path
+ * explicitly? This may be overly conservative.)
*/
if (strlen(location) >= (MAXPGPATH - 1 - 10 - 1 - 10 - 1 - 10))
ereport(ERROR,
@@ -270,7 +270,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable tablespace name \"%s\"",
stmt->tablespacename),
- errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
+ errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
/*
* Check that there is no other tablespace by this name. (The unique
@@ -284,9 +284,9 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
stmt->tablespacename)));
/*
- * Insert tuple into pg_tablespace. The purpose of doing this first
- * is to lock the proposed tablename against other would-be creators.
- * The insertion will roll back if we find problems below.
+ * Insert tuple into pg_tablespace. The purpose of doing this first is to
+ * lock the proposed tablename against other would-be creators. The
+ * insertion will roll back if we find problems below.
*/
rel = heap_open(TableSpaceRelationId, RowExclusiveLock);
@@ -312,14 +312,14 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
recordDependencyOnOwner(TableSpaceRelationId, tablespaceoid, ownerId);
/*
- * Attempt to coerce target directory to safe permissions. If this
- * fails, it doesn't exist or has the wrong owner.
+ * Attempt to coerce target directory to safe permissions. If this fails,
+ * it doesn't exist or has the wrong owner.
*/
if (chmod(location, 0700) != 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not set permissions on directory \"%s\": %m",
- location)));
+ errmsg("could not set permissions on directory \"%s\": %m",
+ location)));
/*
* Check the target directory is empty.
@@ -331,11 +331,11 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
location)));
/*
- * Create the PG_VERSION file in the target directory. This has
- * several purposes: to make sure we can write in the directory, to
- * prevent someone from creating another tablespace pointing at the
- * same directory (the emptiness check above will fail), and to label
- * tablespace directories by PG version.
+ * Create the PG_VERSION file in the target directory. This has several
+ * purposes: to make sure we can write in the directory, to prevent
+ * someone from creating another tablespace pointing at the same directory
+ * (the emptiness check above will fail), and to label tablespace
+ * directories by PG version.
*/
set_short_version(location);
@@ -375,7 +375,6 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
/* We keep the lock on pg_tablespace until commit */
heap_close(rel, NoLock);
-
#else /* !HAVE_SYMLINK */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -403,9 +402,8 @@ DropTableSpace(DropTableSpaceStmt *stmt)
PreventTransactionChain((void *) stmt, "DROP TABLESPACE");
/*
- * Acquire ExclusiveLock on pg_tablespace to ensure that no one else
- * is trying to do DROP TABLESPACE or TablespaceCreateDbspace
- * concurrently.
+ * Acquire ExclusiveLock on pg_tablespace to ensure that no one else is
+ * trying to do DROP TABLESPACE or TablespaceCreateDbspace concurrently.
*/
rel = heap_open(TableSpaceRelationId, ExclusiveLock);
@@ -439,8 +437,7 @@ DropTableSpace(DropTableSpaceStmt *stmt)
tablespacename);
/*
- * Remove the pg_tablespace tuple (this will roll back if we fail
- * below)
+ * Remove the pg_tablespace tuple (this will roll back if we fail below)
*/
simple_heap_delete(rel, &tuple->t_self);
@@ -476,7 +473,6 @@ DropTableSpace(DropTableSpaceStmt *stmt)
/* We keep the lock on pg_tablespace until commit */
heap_close(rel, NoLock);
-
#else /* !HAVE_SYMLINK */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -504,17 +500,17 @@ remove_tablespace_directories(Oid tablespaceoid, bool redo)
sprintf(location, "pg_tblspc/%u", tablespaceoid);
/*
- * Check if the tablespace still contains any files. We try to rmdir
- * each per-database directory we find in it. rmdir failure implies
- * there are still files in that subdirectory, so give up. (We do not
- * have to worry about undoing any already completed rmdirs, since the
- * next attempt to use the tablespace from that database will simply
- * recreate the subdirectory via TablespaceCreateDbspace.)
+ * Check if the tablespace still contains any files. We try to rmdir each
+ * per-database directory we find in it. rmdir failure implies there are
+ * still files in that subdirectory, so give up. (We do not have to worry
+ * about undoing any already completed rmdirs, since the next attempt to
+ * use the tablespace from that database will simply recreate the
+ * subdirectory via TablespaceCreateDbspace.)
*
* Since we hold exclusive lock, no one else should be creating any fresh
- * subdirectories in parallel. It is possible that new files are
- * being created within subdirectories, though, so the rmdir call
- * could fail. Worst consequence is a less friendly error message.
+ * subdirectories in parallel. It is possible that new files are being
+ * created within subdirectories, though, so the rmdir call could fail.
+ * Worst consequence is a less friendly error message.
*/
dirdesc = AllocateDir(location);
if (dirdesc == NULL)
@@ -558,8 +554,8 @@ remove_tablespace_directories(Oid tablespaceoid, bool redo)
FreeDir(dirdesc);
/*
- * Okay, try to unlink PG_VERSION (we allow it to not be there, even
- * in non-REDO case, for robustness).
+ * Okay, try to unlink PG_VERSION (we allow it to not be there, even in
+ * non-REDO case, for robustness).
*/
subfile = palloc(strlen(location) + 11 + 1);
sprintf(subfile, "%s/PG_VERSION", location);
@@ -577,9 +573,9 @@ remove_tablespace_directories(Oid tablespaceoid, bool redo)
/*
* Okay, try to remove the symlink. We must however deal with the
- * possibility that it's a directory instead of a symlink --- this
- * could happen during WAL replay (see TablespaceCreateDbspace), and
- * it is also the normal case on Windows.
+ * possibility that it's a directory instead of a symlink --- this could
+ * happen during WAL replay (see TablespaceCreateDbspace), and it is also
+ * the normal case on Windows.
*/
if (lstat(location, &st) == 0 && S_ISDIR(st.st_mode))
{
@@ -725,7 +721,7 @@ RenameTableSpace(const char *oldname, const char *newname)
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable tablespace name \"%s\"", newname),
- errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
+ errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
/* Make sure the new name doesn't exist */
ScanKeyInit(&entry[0],
@@ -802,13 +798,13 @@ AlterTableSpaceOwner(const char *name, Oid newOwnerId)
check_is_member_of_role(GetUserId(), newOwnerId);
/*
- * Normally we would also check for create permissions here,
- * but there are none for tablespaces so we follow what rename
- * tablespace does and omit the create permissions check.
+ * Normally we would also check for create permissions here, but there
+ * are none for tablespaces so we follow what rename tablespace does
+ * and omit the create permissions check.
*
- * NOTE: Only superusers may create tablespaces to begin with and
- * so initially only a superuser would be able to change its
- * ownership anyway.
+ * NOTE: Only superusers may create tablespaces to begin with and so
+ * initially only a superuser would be able to change its ownership
+ * anyway.
*/
memset(repl_null, ' ', sizeof(repl_null));
@@ -860,7 +856,7 @@ assign_default_tablespace(const char *newval, bool doit, GucSource source)
{
/*
* If we aren't inside a transaction, we cannot do database access so
- * cannot verify the name. Must accept the value on faith.
+ * cannot verify the name. Must accept the value on faith.
*/
if (IsTransactionState())
{
@@ -895,15 +891,16 @@ GetDefaultTablespace(void)
/* Fast path for default_tablespace == "" */
if (default_tablespace == NULL || default_tablespace[0] == '\0')
return InvalidOid;
+
/*
* It is tempting to cache this lookup for more speed, but then we would
- * fail to detect the case where the tablespace was dropped since the
- * GUC variable was set. Note also that we don't complain if the value
- * fails to refer to an existing tablespace; we just silently return
- * InvalidOid, causing the new object to be created in the database's
- * tablespace.
+ * fail to detect the case where the tablespace was dropped since the GUC
+ * variable was set. Note also that we don't complain if the value fails
+ * to refer to an existing tablespace; we just silently return InvalidOid,
+ * causing the new object to be created in the database's tablespace.
*/
result = get_tablespace_oid(default_tablespace);
+
/*
* Allow explicit specification of database's default tablespace in
* default_tablespace without triggering permissions checks.
@@ -1001,14 +998,14 @@ tblspc_redo(XLogRecPtr lsn, XLogRecord *record)
char *linkloc;
/*
- * Attempt to coerce target directory to safe permissions. If
- * this fails, it doesn't exist or has the wrong owner.
+ * Attempt to coerce target directory to safe permissions. If this
+ * fails, it doesn't exist or has the wrong owner.
*/
if (chmod(location, 0700) != 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not set permissions on directory \"%s\": %m",
- location)));
+ errmsg("could not set permissions on directory \"%s\": %m",
+ location)));
/* Create or re-create the PG_VERSION file in the target directory */
set_short_version(location);
@@ -1022,8 +1019,8 @@ tblspc_redo(XLogRecPtr lsn, XLogRecord *record)
if (errno != EEXIST)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not create symbolic link \"%s\": %m",
- linkloc)));
+ errmsg("could not create symbolic link \"%s\": %m",
+ linkloc)));
}
pfree(linkloc);
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index b3caaa4ce3c..a3f7c37dc28 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.194 2005/08/24 17:38:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.195 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,7 +52,7 @@ static HeapTuple ExecCallTriggerFunc(TriggerData *trigdata,
Instrumentation *instr,
MemoryContext per_tuple_context);
static void AfterTriggerSaveEvent(ResultRelInfo *relinfo, int event,
- bool row_trigger, HeapTuple oldtup, HeapTuple newtup);
+ bool row_trigger, HeapTuple oldtup, HeapTuple newtup);
/*
@@ -98,15 +98,14 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
{
/*
* If this trigger is a constraint (and a foreign key one) then we
- * really need a constrrelid. Since we don't have one, we'll try
- * to generate one from the argument information.
+ * really need a constrrelid. Since we don't have one, we'll try to
+ * generate one from the argument information.
*
* This is really just a workaround for a long-ago pg_dump bug that
* omitted the FROM clause in dumped CREATE CONSTRAINT TRIGGER
- * commands. We don't want to bomb out completely here if we
- * can't determine the correct relation, because that would
- * prevent loading the dump file. Instead, NOTICE here and ERROR
- * in the trigger.
+ * commands. We don't want to bomb out completely here if we can't
+ * determine the correct relation, because that would prevent loading
+ * the dump file. Instead, NOTICE here and ERROR in the trigger.
*/
bool needconstrrelid = false;
void *elem = NULL;
@@ -181,8 +180,8 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
}
/*
- * Generate the trigger's OID now, so that we can use it in the name
- * if needed.
+ * Generate the trigger's OID now, so that we can use it in the name if
+ * needed.
*/
tgrel = heap_open(TriggerRelationId, RowExclusiveLock);
@@ -190,9 +189,8 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
/*
* If trigger is an RI constraint, use specified trigger name as
- * constraint name and build a unique trigger name instead. This is
- * mainly for backwards compatibility with CREATE CONSTRAINT TRIGGER
- * commands.
+ * constraint name and build a unique trigger name instead. This is mainly
+ * for backwards compatibility with CREATE CONSTRAINT TRIGGER commands.
*/
if (stmt->isconstraint)
{
@@ -246,10 +244,10 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
}
/*
- * Scan pg_trigger for existing triggers on relation. We do this
- * mainly because we must count them; a secondary benefit is to give a
- * nice error message if there's already a trigger of the same name.
- * (The unique index on tgrelid/tgname would complain anyway.)
+ * Scan pg_trigger for existing triggers on relation. We do this mainly
+ * because we must count them; a secondary benefit is to give a nice error
+ * message if there's already a trigger of the same name. (The unique
+ * index on tgrelid/tgname would complain anyway.)
*
* NOTE that this is cool only because we have AccessExclusiveLock on the
* relation, so the trigger set won't be changing underneath us.
@@ -267,8 +265,8 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" already exists",
- trigname, stmt->relation->relname)));
+ errmsg("trigger \"%s\" for relation \"%s\" already exists",
+ trigname, stmt->relation->relname)));
found++;
}
systable_endscan(tgscan);
@@ -281,8 +279,8 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
if (funcrettype != TRIGGEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we
- * see a trigger function declared OPAQUE, change it to TRIGGER.
+ * We allow OPAQUE just so we can load old dump files. When we see a
+ * trigger function declared OPAQUE, change it to TRIGGER.
*/
if (funcrettype == OPAQUEOID)
{
@@ -305,13 +303,13 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(trigname));
+ CStringGetDatum(trigname));
values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
values[Anum_pg_trigger_tgenabled - 1] = BoolGetDatum(true);
values[Anum_pg_trigger_tgisconstraint - 1] = BoolGetDatum(stmt->isconstraint);
values[Anum_pg_trigger_tgconstrname - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(constrname));
+ CStringGetDatum(constrname));
values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
@@ -351,13 +349,13 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
}
values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
- CStringGetDatum(args));
+ CStringGetDatum(args));
}
else
{
values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
- CStringGetDatum(""));
+ CStringGetDatum(""));
}
/* tgattr is currently always a zero-length array */
tgattr = buildint2vector(NULL, 0);
@@ -386,9 +384,9 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
/*
- * Update relation's pg_class entry. Crucial side-effect: other
- * backends (and this one too!) are sent SI message to make them
- * rebuild relcache entries.
+ * Update relation's pg_class entry. Crucial side-effect: other backends
+ * (and this one too!) are sent SI message to make them rebuild relcache
+ * entries.
*/
pgrel = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
@@ -409,19 +407,18 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
/*
* We used to try to update the rel's relcache entry here, but that's
- * fairly pointless since it will happen as a byproduct of the
- * upcoming CommandCounterIncrement...
+ * fairly pointless since it will happen as a byproduct of the upcoming
+ * CommandCounterIncrement...
*/
/*
- * Record dependencies for trigger. Always place a normal dependency
- * on the function. If we are doing this in response to an explicit
- * CREATE TRIGGER command, also make trigger be auto-dropped if its
- * relation is dropped or if the FK relation is dropped. (Auto drop
- * is compatible with our pre-7.3 behavior.) If the trigger is being
- * made for a constraint, we can skip the relation links; the
- * dependency on the constraint will indirectly depend on the
- * relations.
+ * Record dependencies for trigger. Always place a normal dependency on
+ * the function. If we are doing this in response to an explicit CREATE
+ * TRIGGER command, also make trigger be auto-dropped if its relation is
+ * dropped or if the FK relation is dropped. (Auto drop is compatible
+ * with our pre-7.3 behavior.) If the trigger is being made for a
+ * constraint, we can skip the relation links; the dependency on the
+ * constraint will indirectly depend on the relations.
*/
referenced.classId = ProcedureRelationId;
referenced.objectId = funcoid;
@@ -565,13 +562,12 @@ RemoveTriggerById(Oid trigOid)
heap_close(tgrel, RowExclusiveLock);
/*
- * Update relation's pg_class entry. Crucial side-effect: other
- * backends (and this one too!) are sent SI message to make them
- * rebuild relcache entries.
+ * Update relation's pg_class entry. Crucial side-effect: other backends
+ * (and this one too!) are sent SI message to make them rebuild relcache
+ * entries.
*
- * Note this is OK only because we have AccessExclusiveLock on the rel,
- * so no one else is creating/deleting triggers on this rel at the
- * same time.
+ * Note this is OK only because we have AccessExclusiveLock on the rel, so no
+ * one else is creating/deleting triggers on this rel at the same time.
*/
pgrel = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
@@ -623,16 +619,16 @@ renametrig(Oid relid,
ScanKeyData key[2];
/*
- * Grab an exclusive lock on the target table, which we will NOT
- * release until end of transaction.
+ * Grab an exclusive lock on the target table, which we will NOT release
+ * until end of transaction.
*/
targetrel = heap_open(relid, AccessExclusiveLock);
/*
- * Scan pg_trigger twice for existing triggers on relation. We do
- * this in order to ensure a trigger does not exist with newname (The
- * unique index on tgrelid/tgname would complain anyway) and to ensure
- * a trigger does exist with oldname.
+ * Scan pg_trigger twice for existing triggers on relation. We do this in
+ * order to ensure a trigger does not exist with newname (The unique index
+ * on tgrelid/tgname would complain anyway) and to ensure a trigger does
+ * exist with oldname.
*
* NOTE that this is cool only because we have AccessExclusiveLock on the
* relation, so the trigger set won't be changing underneath us.
@@ -655,8 +651,8 @@ renametrig(Oid relid,
if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" already exists",
- newname, RelationGetRelationName(targetrel))));
+ errmsg("trigger \"%s\" for relation \"%s\" already exists",
+ newname, RelationGetRelationName(targetrel))));
systable_endscan(tgscan);
/*
@@ -687,10 +683,9 @@ renametrig(Oid relid,
CatalogUpdateIndexes(tgrel, tuple);
/*
- * Invalidate relation's relcache entry so that other backends
- * (and this one too!) are sent SI message to make them rebuild
- * relcache entries. (Ideally this should happen
- * automatically...)
+ * Invalidate relation's relcache entry so that other backends (and
+ * this one too!) are sent SI message to make them rebuild relcache
+ * entries. (Ideally this should happen automatically...)
*/
CacheInvalidateRelcache(targetrel);
}
@@ -732,13 +727,13 @@ void
EnableDisableTrigger(Relation rel, const char *tgname,
bool enable, bool skip_system)
{
- Relation tgrel;
- int nkeys;
+ Relation tgrel;
+ int nkeys;
ScanKeyData keys[2];
SysScanDesc tgscan;
- HeapTuple tuple;
- bool found;
- bool changed;
+ HeapTuple tuple;
+ bool found;
+ bool changed;
/* Scan the relevant entries in pg_triggers */
tgrel = heap_open(TriggerRelationId, RowExclusiveLock);
@@ -775,8 +770,8 @@ EnableDisableTrigger(Relation rel, const char *tgname,
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied: \"%s\" is a system trigger",
- NameStr(oldtrig->tgname))));
+ errmsg("permission denied: \"%s\" is a system trigger",
+ NameStr(oldtrig->tgname))));
}
found = true;
@@ -784,7 +779,7 @@ EnableDisableTrigger(Relation rel, const char *tgname,
if (oldtrig->tgenabled != enable)
{
/* need to change this one ... make a copy to scribble on */
- HeapTuple newtup = heap_copytuple(tuple);
+ HeapTuple newtup = heap_copytuple(tuple);
Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
newtrig->tgenabled = enable;
@@ -848,10 +843,10 @@ RelationBuildTriggers(Relation relation)
triggers = (Trigger *) palloc(ntrigs * sizeof(Trigger));
/*
- * Note: since we scan the triggers using TriggerRelidNameIndexId, we
- * will be reading the triggers in name order, except possibly during
- * emergency-recovery operations (ie, IsIgnoringSystemIndexes). This
- * in turn ensures that triggers will be fired in name order.
+ * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
+ * be reading the triggers in name order, except possibly during
+ * emergency-recovery operations (ie, IsIgnoringSystemIndexes). This in
+ * turn ensures that triggers will be fired in name order.
*/
ScanKeyInit(&skey,
Anum_pg_trigger_tgrelid,
@@ -874,7 +869,7 @@ RelationBuildTriggers(Relation relation)
build->tgoid = HeapTupleGetOid(htup);
build->tgname = DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&pg_trigger->tgname)));
+ NameGetDatum(&pg_trigger->tgname)));
build->tgfoid = pg_trigger->tgfoid;
build->tgtype = pg_trigger->tgtype;
build->tgenabled = pg_trigger->tgenabled;
@@ -1183,12 +1178,12 @@ equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
j;
/*
- * We need not examine the "index" data, just the trigger array
- * itself; if we have the same triggers with the same types, the
- * derived index data should match.
+ * We need not examine the "index" data, just the trigger array itself; if
+ * we have the same triggers with the same types, the derived index data
+ * should match.
*
- * As of 7.3 we assume trigger set ordering is significant in the
- * comparison; so we just compare corresponding slots of the two sets.
+ * As of 7.3 we assume trigger set ordering is significant in the comparison;
+ * so we just compare corresponding slots of the two sets.
*/
if (trigdesc1 != NULL)
{
@@ -1279,9 +1274,9 @@ ExecCallTriggerFunc(TriggerData *trigdata,
/*
* Do the function evaluation in the per-tuple memory context, so that
- * leaked memory will be reclaimed once per tuple. Note in particular
- * that any new tuple created by the trigger function will live till
- * the end of the tuple cycle.
+ * leaked memory will be reclaimed once per tuple. Note in particular that
+ * any new tuple created by the trigger function will live till the end of
+ * the tuple cycle.
*/
oldContext = MemoryContextSwitchTo(per_tuple_context);
@@ -1295,8 +1290,8 @@ ExecCallTriggerFunc(TriggerData *trigdata,
MemoryContextSwitchTo(oldContext);
/*
- * Trigger protocol allows function to return a null pointer, but NOT
- * to set the isnull result flag.
+ * Trigger protocol allows function to return a null pointer, but NOT to
+ * set the isnull result flag.
*/
if (fcinfo.isnull)
ereport(ERROR,
@@ -1305,8 +1300,8 @@ ExecCallTriggerFunc(TriggerData *trigdata,
fcinfo.flinfo->fn_oid)));
/*
- * If doing EXPLAIN ANALYZE, stop charging time to this trigger,
- * and count one "tuple returned" (really the number of firings).
+ * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
+ * one "tuple returned" (really the number of firings).
*/
if (instr)
InstrStopNode(instr + tgindx, true);
@@ -1359,7 +1354,7 @@ ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
@@ -1470,7 +1465,7 @@ ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
@@ -1601,7 +1596,7 @@ ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
@@ -1703,7 +1698,7 @@ GetTupleForTrigger(EState *estate, ResultRelInfo *relinfo,
if (newSlot != NULL)
{
- HTSU_Result test;
+ HTSU_Result test;
ItemPointerData update_ctid;
TransactionId update_xmax;
@@ -1751,8 +1746,8 @@ ltrmark:;
}
/*
- * if tuple was deleted or PlanQual failed for updated
- * tuple - we have not process this tuple!
+ * if tuple was deleted or PlanQual failed for updated tuple -
+ * we have not process this tuple!
*/
return NULL;
@@ -1799,7 +1794,7 @@ ltrmark:;
* they will easily go away during subtransaction abort.
*
* Because the list of pending events can grow large, we go to some effort
- * to minimize memory consumption. We do not use the generic List mechanism
+ * to minimize memory consumption. We do not use the generic List mechanism
* but thread the events manually.
*
* XXX We need to be able to save the per-event data in a file if it grows too
@@ -1832,7 +1827,7 @@ typedef struct SetConstraintStateData
bool all_isdeferred;
int numstates; /* number of trigstates[] entries in use */
int numalloc; /* allocated size of trigstates[] */
- SetConstraintTriggerData trigstates[1]; /* VARIABLE LENGTH ARRAY */
+ SetConstraintTriggerData trigstates[1]; /* VARIABLE LENGTH ARRAY */
} SetConstraintStateData;
typedef SetConstraintStateData *SetConstraintState;
@@ -1849,12 +1844,12 @@ typedef struct AfterTriggerEventData *AfterTriggerEvent;
typedef struct AfterTriggerEventData
{
- AfterTriggerEvent ate_next; /* list link */
- TriggerEvent ate_event; /* event type and status bits */
- CommandId ate_firing_id; /* ID for firing cycle */
- Oid ate_tgoid; /* the trigger's ID */
- Oid ate_relid; /* the relation it's on */
- ItemPointerData ate_oldctid; /* specific tuple(s) involved */
+ AfterTriggerEvent ate_next; /* list link */
+ TriggerEvent ate_event; /* event type and status bits */
+ CommandId ate_firing_id; /* ID for firing cycle */
+ Oid ate_tgoid; /* the trigger's ID */
+ Oid ate_relid; /* the relation it's on */
+ ItemPointerData ate_oldctid; /* specific tuple(s) involved */
ItemPointerData ate_newctid;
} AfterTriggerEventData;
@@ -1873,7 +1868,7 @@ typedef struct AfterTriggerEventList
*
* firing_counter is incremented for each call of afterTriggerInvokeEvents.
* We mark firable events with the current firing cycle's ID so that we can
- * tell which ones to work on. This ensures sane behavior if a trigger
+ * tell which ones to work on. This ensures sane behavior if a trigger
* function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
* only fire those events that weren't already scheduled for firing.
*
@@ -1881,7 +1876,7 @@ typedef struct AfterTriggerEventList
* This is saved and restored across failed subtransactions.
*
* events is the current list of deferred events. This is global across
- * all subtransactions of the current transaction. In a subtransaction
+ * all subtransactions of the current transaction. In a subtransaction
* abort, we know that the events added by the subtransaction are at the
* end of the list, so it is relatively easy to discard them.
*
@@ -1908,31 +1903,31 @@ typedef struct AfterTriggerEventList
* which we similarly use to clean up at subtransaction abort.
*
* firing_stack is a stack of copies of subtransaction-start-time
- * firing_counter. We use this to recognize which deferred triggers were
+ * firing_counter. We use this to recognize which deferred triggers were
* fired (or marked for firing) within an aborted subtransaction.
*
* We use GetCurrentTransactionNestLevel() to determine the correct array
* index in these stacks. maxtransdepth is the number of allocated entries in
- * each stack. (By not keeping our own stack pointer, we can avoid trouble
+ * each stack. (By not keeping our own stack pointer, we can avoid trouble
* in cases where errors during subxact abort cause multiple invocations
* of AfterTriggerEndSubXact() at the same nesting depth.)
*/
typedef struct AfterTriggersData
{
- CommandId firing_counter; /* next firing ID to assign */
- SetConstraintState state; /* the active S C state */
+ CommandId firing_counter; /* next firing ID to assign */
+ SetConstraintState state; /* the active S C state */
AfterTriggerEventList events; /* deferred-event list */
- int query_depth; /* current query list index */
- AfterTriggerEventList *query_stack; /* events pending from each query */
- int maxquerydepth; /* allocated len of above array */
+ int query_depth; /* current query list index */
+ AfterTriggerEventList *query_stack; /* events pending from each query */
+ int maxquerydepth; /* allocated len of above array */
/* these fields are just for resetting at subtrans abort: */
SetConstraintState *state_stack; /* stacked S C states */
- AfterTriggerEventList *events_stack; /* stacked list pointers */
- int *depth_stack; /* stacked query_depths */
- CommandId *firing_stack; /* stacked firing_counters */
- int maxtransdepth; /* allocated len of above arrays */
+ AfterTriggerEventList *events_stack; /* stacked list pointers */
+ int *depth_stack; /* stacked query_depths */
+ CommandId *firing_stack; /* stacked firing_counters */
+ int maxtransdepth; /* allocated len of above arrays */
} AfterTriggersData;
typedef AfterTriggersData *AfterTriggers;
@@ -1941,14 +1936,14 @@ static AfterTriggers afterTriggers;
static void AfterTriggerExecute(AfterTriggerEvent event,
- Relation rel, TriggerDesc *trigdesc,
- FmgrInfo *finfo,
- Instrumentation *instr,
- MemoryContext per_tuple_context);
+ Relation rel, TriggerDesc *trigdesc,
+ FmgrInfo *finfo,
+ Instrumentation *instr,
+ MemoryContext per_tuple_context);
static SetConstraintState SetConstraintStateCreate(int numalloc);
static SetConstraintState SetConstraintStateCopy(SetConstraintState state);
static SetConstraintState SetConstraintStateAddItem(SetConstraintState state,
- Oid tgoid, bool tgisdeferred);
+ Oid tgoid, bool tgisdeferred);
/* ----------
@@ -2075,8 +2070,8 @@ AfterTriggerExecute(AfterTriggerEvent event,
elog(ERROR, "could not find trigger %u", tgoid);
/*
- * If doing EXPLAIN ANALYZE, start charging time to this trigger.
- * We want to include time spent re-fetching tuples in the trigger cost.
+ * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
+ * to include time spent re-fetching tuples in the trigger cost.
*/
if (instr)
InstrStartNode(instr + tgindx);
@@ -2133,8 +2128,8 @@ AfterTriggerExecute(AfterTriggerEvent event,
MemoryContextReset(per_tuple_context);
/*
- * Call the trigger and throw away any possibly returned updated
- * tuple. (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
+ * Call the trigger and throw away any possibly returned updated tuple.
+ * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
*/
rettuple = ExecCallTriggerFunc(&LocTriggerData,
tgindx,
@@ -2153,8 +2148,8 @@ AfterTriggerExecute(AfterTriggerEvent event,
ReleaseBuffer(newbuffer);
/*
- * If doing EXPLAIN ANALYZE, stop charging time to this trigger,
- * and count one "tuple returned" (really the number of firings).
+ * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
+ * one "tuple returned" (really the number of firings).
*/
if (instr)
InstrStopNode(instr + tgindx, true);
@@ -2264,7 +2259,7 @@ afterTriggerMarkEvents(AfterTriggerEventList *events,
*
* If estate isn't NULL, then we expect that all the firable events are
* for triggers of the relations included in the estate's result relation
- * array. This allows us to re-use the estate's open relations and
+ * array. This allows us to re-use the estate's open relations and
* trigger cache info. When estate is NULL, we have to find the relations
* the hard way.
*
@@ -2308,8 +2303,8 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events,
event->ate_firing_id == firing_id)
{
/*
- * So let's fire it... but first, open the correct
- * relation if this is not the same relation as before.
+ * So let's fire it... but first, open the correct relation if
+ * this is not the same relation as before.
*/
if (rel == NULL || rel->rd_id != event->ate_relid)
{
@@ -2317,7 +2312,7 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events,
{
/* Find target relation among estate's result rels */
ResultRelInfo *rInfo;
- int nr;
+ int nr;
rInfo = estate->es_result_relations;
nr = estate->es_num_result_relations;
@@ -2328,7 +2323,7 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events,
rInfo++;
nr--;
}
- if (nr <= 0) /* should not happen */
+ if (nr <= 0) /* should not happen */
elog(ERROR, "could not find relation %u among query result relations",
event->ate_relid);
rel = rInfo->ri_RelationDesc;
@@ -2345,17 +2340,17 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events,
FreeTriggerDesc(trigdesc);
if (finfo)
pfree(finfo);
- Assert(instr == NULL); /* never used in this case */
+ Assert(instr == NULL); /* never used in this case */
/*
- * We assume that an appropriate lock is still held by
- * the executor, so grab no new lock here.
+ * We assume that an appropriate lock is still held by the
+ * executor, so grab no new lock here.
*/
rel = heap_open(event->ate_relid, NoLock);
/*
- * Copy relation's trigger info so that we have a
- * stable copy no matter what the called triggers do.
+ * Copy relation's trigger info so that we have a stable
+ * copy no matter what the called triggers do.
*/
trigdesc = CopyTriggerDesc(rel->trigdesc);
@@ -2364,8 +2359,7 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events,
event->ate_relid);
/*
- * Allocate space to cache fmgr lookup info for
- * triggers.
+ * Allocate space to cache fmgr lookup info for triggers.
*/
finfo = (FmgrInfo *)
palloc0(trigdesc->numtriggers * sizeof(FmgrInfo));
@@ -2376,8 +2370,8 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events,
/*
* Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is still
- * set, so recursive examinations of the event list won't try
- * to re-fire it.
+ * set, so recursive examinations of the event list won't try to
+ * re-fire it.
*/
AfterTriggerExecute(event, rel, trigdesc, finfo, instr,
per_tuple_context);
@@ -2393,9 +2387,9 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events,
* If it's now done, throw it away, if allowed.
*
* NB: it's possible the trigger call above added more events to the
- * queue, or that calls we will do later will want to add more, so
- * we have to be careful about maintaining list validity at all
- * points here.
+ * queue, or that calls we will do later will want to add more, so we
+ * have to be careful about maintaining list validity at all points
+ * here.
*/
next_event = event->ate_next;
@@ -2499,7 +2493,7 @@ AfterTriggerBeginQuery(void)
if (afterTriggers->query_depth >= afterTriggers->maxquerydepth)
{
/* repalloc will keep the stack in the same context */
- int new_alloc = afterTriggers->maxquerydepth * 2;
+ int new_alloc = afterTriggers->maxquerydepth * 2;
afterTriggers->query_stack = (AfterTriggerEventList *)
repalloc(afterTriggers->query_stack,
@@ -2537,21 +2531,21 @@ AfterTriggerEndQuery(EState *estate)
Assert(afterTriggers->query_depth >= 0);
/*
- * Process all immediate-mode triggers queued by the query, and move
- * the deferred ones to the main list of deferred events.
+ * Process all immediate-mode triggers queued by the query, and move the
+ * deferred ones to the main list of deferred events.
*
- * Notice that we decide which ones will be fired, and put the deferred
- * ones on the main list, before anything is actually fired. This
- * ensures reasonably sane behavior if a trigger function does
- * SET CONSTRAINTS ... IMMEDIATE: all events we have decided to defer
- * will be available for it to fire.
+ * Notice that we decide which ones will be fired, and put the deferred ones
+ * on the main list, before anything is actually fired. This ensures
+ * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
+ * IMMEDIATE: all events we have decided to defer will be available for it
+ * to fire.
*
* If we find no firable events, we don't have to increment firing_counter.
*/
events = &afterTriggers->query_stack[afterTriggers->query_depth];
if (afterTriggerMarkEvents(events, &afterTriggers->events, true))
{
- CommandId firing_id = afterTriggers->firing_counter++;
+ CommandId firing_id = afterTriggers->firing_counter++;
/* OK to delete the immediate events after processing them */
afterTriggerInvokeEvents(events, firing_id, estate, true);
@@ -2584,21 +2578,21 @@ AfterTriggerFireDeferred(void)
Assert(afterTriggers->query_depth == -1);
/*
- * If there are any triggers to fire, make sure we have set a snapshot
- * for them to use. (Since PortalRunUtility doesn't set a snap for
- * COMMIT, we can't assume ActiveSnapshot is valid on entry.)
+ * If there are any triggers to fire, make sure we have set a snapshot for
+ * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
+ * can't assume ActiveSnapshot is valid on entry.)
*/
events = &afterTriggers->events;
if (events->head != NULL)
ActiveSnapshot = CopySnapshot(GetTransactionSnapshot());
/*
- * Run all the remaining triggers. Loop until they are all gone,
- * just in case some trigger queues more for us to do.
+ * Run all the remaining triggers. Loop until they are all gone, just in
+ * case some trigger queues more for us to do.
*/
while (afterTriggerMarkEvents(events, NULL, false))
{
- CommandId firing_id = afterTriggers->firing_counter++;
+ CommandId firing_id = afterTriggers->firing_counter++;
afterTriggerInvokeEvents(events, firing_id, NULL, true);
}
@@ -2643,7 +2637,7 @@ AfterTriggerBeginSubXact(void)
int my_level = GetCurrentTransactionNestLevel();
/*
- * Ignore call if the transaction is in aborted state. (Probably
+ * Ignore call if the transaction is in aborted state. (Probably
* shouldn't happen?)
*/
if (afterTriggers == NULL)
@@ -2676,7 +2670,7 @@ AfterTriggerBeginSubXact(void)
else
{
/* repalloc will keep the stacks in the same context */
- int new_alloc = afterTriggers->maxtransdepth * 2;
+ int new_alloc = afterTriggers->maxtransdepth * 2;
afterTriggers->state_stack = (SetConstraintState *)
repalloc(afterTriggers->state_stack,
@@ -2695,8 +2689,8 @@ AfterTriggerBeginSubXact(void)
}
/*
- * Push the current information into the stack. The SET CONSTRAINTS
- * state is not saved until/unless changed.
+ * Push the current information into the stack. The SET CONSTRAINTS state
+ * is not saved until/unless changed.
*/
afterTriggers->state_stack[my_level] = NULL;
afterTriggers->events_stack[my_level] = afterTriggers->events;
@@ -2718,7 +2712,8 @@ AfterTriggerEndSubXact(bool isCommit)
CommandId subxact_firing_id;
/*
- * Ignore call if the transaction is in aborted state. (Probably unneeded)
+ * Ignore call if the transaction is in aborted state. (Probably
+ * unneeded)
*/
if (afterTriggers == NULL)
return;
@@ -2759,8 +2754,8 @@ AfterTriggerEndSubXact(bool isCommit)
*/
/*
- * Restore the trigger state. If the saved state is NULL, then
- * this subxact didn't save it, so it doesn't need restoring.
+ * Restore the trigger state. If the saved state is NULL, then this
+ * subxact didn't save it, so it doesn't need restoring.
*/
state = afterTriggers->state_stack[my_level];
if (state != NULL)
@@ -2772,12 +2767,12 @@ AfterTriggerEndSubXact(bool isCommit)
afterTriggers->state_stack[my_level] = NULL;
/*
- * Scan for any remaining deferred events that were marked DONE
- * or IN PROGRESS by this subxact or a child, and un-mark them.
- * We can recognize such events because they have a firing ID
- * greater than or equal to the firing_counter value we saved at
- * subtransaction start. (This essentially assumes that the
- * current subxact includes all subxacts started after it.)
+ * Scan for any remaining deferred events that were marked DONE or IN
+ * PROGRESS by this subxact or a child, and un-mark them. We can
+ * recognize such events because they have a firing ID greater than or
+ * equal to the firing_counter value we saved at subtransaction start.
+ * (This essentially assumes that the current subxact includes all
+ * subxacts started after it.)
*/
subxact_firing_id = afterTriggers->firing_stack[my_level];
for (event = afterTriggers->events.head;
@@ -2813,7 +2808,7 @@ SetConstraintStateCreate(int numalloc)
state = (SetConstraintState)
MemoryContextAllocZero(TopTransactionContext,
sizeof(SetConstraintStateData) +
- (numalloc - 1) *sizeof(SetConstraintTriggerData));
+ (numalloc - 1) *sizeof(SetConstraintTriggerData));
state->numalloc = numalloc;
@@ -2840,7 +2835,7 @@ SetConstraintStateCopy(SetConstraintState origstate)
}
/*
- * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
+ * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
* pointer to the state object (it will change if we have to repalloc).
*/
static SetConstraintState
@@ -2885,9 +2880,8 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
return;
/*
- * If in a subtransaction, and we didn't save the current state
- * already, save it so it can be restored if the subtransaction
- * aborts.
+ * If in a subtransaction, and we didn't save the current state already,
+ * save it so it can be restored if the subtransaction aborts.
*/
if (my_level > 1 &&
afterTriggers->state_stack[my_level] == NULL)
@@ -2939,7 +2933,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
if (strlen(cname) == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("unnamed constraints cannot be set explicitly")));
+ errmsg("unnamed constraints cannot be set explicitly")));
/*
* Setup to scan pg_trigger by tgconstrname ...
@@ -2962,9 +2956,9 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
/*
- * If we found some, check that they fit the deferrability
- * but skip referential action ones, since they are
- * silently never deferrable.
+ * If we found some, check that they fit the deferrability but
+ * skip referential action ones, since they are silently never
+ * deferrable.
*/
if (pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_UPD &&
pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_DEL &&
@@ -3026,15 +3020,15 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
}
/*
- * SQL99 requires that when a constraint is set to IMMEDIATE, any
- * deferred checks against that constraint must be made when the SET
- * CONSTRAINTS command is executed -- i.e. the effects of the SET
- * CONSTRAINTS command apply retroactively. We've updated the
- * constraints state, so scan the list of previously deferred events
- * to fire any that have now become immediate.
+ * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
+ * checks against that constraint must be made when the SET CONSTRAINTS
+ * command is executed -- i.e. the effects of the SET CONSTRAINTS command
+ * apply retroactively. We've updated the constraints state, so scan the
+ * list of previously deferred events to fire any that have now become
+ * immediate.
*
- * Obviously, if this was SET ... DEFERRED then it can't have converted
- * any unfired events to immediate, so we need do nothing in that case.
+ * Obviously, if this was SET ... DEFERRED then it can't have converted any
+ * unfired events to immediate, so we need do nothing in that case.
*/
if (!stmt->deferred)
{
@@ -3042,12 +3036,12 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
if (afterTriggerMarkEvents(events, NULL, true))
{
- CommandId firing_id = afterTriggers->firing_counter++;
+ CommandId firing_id = afterTriggers->firing_counter++;
/*
- * We can delete fired events if we are at top transaction
- * level, but we'd better not if inside a subtransaction, since
- * the subtransaction could later get rolled back.
+ * We can delete fired events if we are at top transaction level,
+ * but we'd better not if inside a subtransaction, since the
+ * subtransaction could later get rolled back.
*/
afterTriggerInvokeEvents(events, firing_id, NULL,
!IsSubTransaction());
@@ -3116,9 +3110,9 @@ AfterTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
continue;
/*
- * If this is an UPDATE of a PK table or FK table that does
- * not change the PK or FK respectively, we can skip queuing
- * the event: there is no need to fire the trigger.
+ * If this is an UPDATE of a PK table or FK table that does not change
+ * the PK or FK respectively, we can skip queuing the event: there is
+ * no need to fire the trigger.
*/
if ((event & TRIGGER_EVENT_OPMASK) == TRIGGER_EVENT_UPDATE)
{
@@ -3134,17 +3128,17 @@ AfterTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
break;
case RI_TRIGGER_FK:
+
/*
* Update on FK table
*
- * There is one exception when updating FK tables:
- * if the updated row was inserted by our own
- * transaction and the FK is deferred, we still
- * need to fire the trigger. This is because our
- * UPDATE will invalidate the INSERT so the
- * end-of-transaction INSERT RI trigger will not
- * do anything, so we have to do the check for the
- * UPDATE anyway.
+ * There is one exception when updating FK tables: if the
+ * updated row was inserted by our own transaction and the
+ * FK is deferred, we still need to fire the trigger. This
+ * is because our UPDATE will invalidate the INSERT so the
+ * end-of-transaction INSERT RI trigger will not do
+ * anything, so we have to do the check for the UPDATE
+ * anyway.
*/
if (HeapTupleHeaderGetXmin(oldtup->t_data) !=
GetCurrentTransactionId() &&
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index ee69821bcfb..7caacdacd2f 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.80 2005/08/22 17:38:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.81 2005/10/15 02:49:16 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -130,8 +130,7 @@ DefineType(List *names, List *parameters)
/*
* Type names must be one character shorter than other names, allowing
- * room to create the corresponding array type name with prepended
- * "_".
+ * room to create the corresponding array type name with prepended "_".
*/
if (strlen(typeName) > (NAMEDATALEN - 2))
ereport(ERROR,
@@ -183,10 +182,9 @@ DefineType(List *names, List *parameters)
char *a = defGetString(defel);
/*
- * Note: if argument was an unquoted identifier, parser will
- * have applied translations to it, so be prepared to
- * recognize translated type names as well as the nominal
- * form.
+ * Note: if argument was an unquoted identifier, parser will have
+ * applied translations to it, so be prepared to recognize
+ * translated type names as well as the nominal form.
*/
if (pg_strcasecmp(a, "double") == 0 ||
pg_strcasecmp(a, "float8") == 0 ||
@@ -303,8 +301,8 @@ DefineType(List *names, List *parameters)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type output function %s must return type \"cstring\"",
- NameListToString(outputName))));
+ errmsg("type output function %s must return type \"cstring\"",
+ NameListToString(outputName))));
}
if (receiveOid)
{
@@ -312,8 +310,8 @@ DefineType(List *names, List *parameters)
if (resulttype != typoid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type receive function %s must return type %s",
- NameListToString(receiveName), typeName)));
+ errmsg("type receive function %s must return type %s",
+ NameListToString(receiveName), typeName)));
}
if (sendOid)
{
@@ -321,14 +319,13 @@ DefineType(List *names, List *parameters)
if (resulttype != BYTEAOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type send function %s must return type \"bytea\"",
- NameListToString(sendName))));
+ errmsg("type send function %s must return type \"bytea\"",
+ NameListToString(sendName))));
}
/*
- * Convert analysis function proc name to an OID. If no analysis
- * function is specified, we'll use zero to select the built-in
- * default algorithm.
+ * Convert analysis function proc name to an OID. If no analysis function
+ * is specified, we'll use zero to select the built-in default algorithm.
*/
if (analyzeName)
analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid);
@@ -361,8 +358,8 @@ DefineType(List *names, List *parameters)
false); /* Type NOT NULL */
/*
- * When we create a base type (as opposed to a complex type) we need
- * to have an array entry for it in pg_type as well.
+ * When we create a base type (as opposed to a complex type) we need to
+ * have an array entry for it in pg_type as well.
*/
shadow_type = makeArrayTypeName(typeName);
@@ -430,8 +427,8 @@ RemoveType(List *names, DropBehavior behavior)
/* Permission check: must own type or its namespace */
if (!pg_type_ownercheck(typeoid, GetUserId()) &&
- !pg_namespace_ownercheck(((Form_pg_type) GETSTRUCT(tup))->typnamespace,
- GetUserId()))
+ !pg_namespace_ownercheck(((Form_pg_type) GETSTRUCT(tup))->typnamespace,
+ GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TYPE,
TypeNameToString(typename));
@@ -522,12 +519,11 @@ DefineDomain(CreateDomainStmt *stmt)
get_namespace_name(domainNamespace));
/*
- * Domainnames, unlike typenames don't need to account for the '_'
- * prefix. So they can be one character longer. (This test is
- * presently useless since the parser will have truncated the name to
- * fit. But leave it here since we may someday support arrays of
- * domains, in which case we'll be back to needing to enforce
- * NAMEDATALEN-2.)
+ * Domainnames, unlike typenames don't need to account for the '_' prefix.
+ * So they can be one character longer. (This test is presently useless
+ * since the parser will have truncated the name to fit. But leave it
+ * here since we may someday support arrays of domains, in which case
+ * we'll be back to needing to enforce NAMEDATALEN-2.)
*/
if (strlen(domainName) > (NAMEDATALEN - 1))
ereport(ERROR,
@@ -544,10 +540,9 @@ DefineDomain(CreateDomainStmt *stmt)
basetypeoid = HeapTupleGetOid(typeTup);
/*
- * Base type must be a plain base type. Domains over pseudo types
- * would create a security hole. Domains of domains might be made to
- * work in the future, but not today. Ditto for domains over complex
- * types.
+ * Base type must be a plain base type. Domains over pseudo types would
+ * create a security hole. Domains of domains might be made to work in
+ * the future, but not today. Ditto for domains over complex types.
*/
typtype = baseType->typtype;
if (typtype != 'b')
@@ -613,7 +608,7 @@ DefineDomain(CreateDomainStmt *stmt)
if (IsA(newConstraint, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("foreign key constraints not possible for domains")));
+ errmsg("foreign key constraints not possible for domains")));
/* otherwise it should be a plain Constraint */
if (!IsA(newConstraint, Constraint))
@@ -627,8 +622,8 @@ DefineDomain(CreateDomainStmt *stmt)
case CONSTR_DEFAULT:
/*
- * The inherited default value may be overridden by the
- * user with the DEFAULT <expr> statement.
+ * The inherited default value may be overridden by the user
+ * with the DEFAULT <expr> statement.
*/
if (defaultExpr)
ereport(ERROR,
@@ -639,8 +634,8 @@ DefineDomain(CreateDomainStmt *stmt)
pstate = make_parsestate(NULL);
/*
- * Cook the constr->raw_expr into an expression. Note:
- * Name is strictly for error message
+ * Cook the constr->raw_expr into an expression. Note: Name is
+ * strictly for error message
*/
defaultExpr = cookDefault(pstate, constr->raw_expr,
basetypeoid,
@@ -648,13 +643,13 @@ DefineDomain(CreateDomainStmt *stmt)
domainName);
/*
- * Expression must be stored as a nodeToString result, but
- * we also require a valid textual representation (mainly
- * to make life easier for pg_dump).
+ * Expression must be stored as a nodeToString result, but we
+ * also require a valid textual representation (mainly to make
+ * life easier for pg_dump).
*/
defaultValue = deparse_expression(defaultExpr,
- deparse_context_for(domainName,
- InvalidOid),
+ deparse_context_for(domainName,
+ InvalidOid),
false, false);
defaultValueBin = nodeToString(defaultExpr);
break;
@@ -663,7 +658,7 @@ DefineDomain(CreateDomainStmt *stmt)
if (nullDefined && !typNotNull)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("conflicting NULL/NOT NULL constraints")));
+ errmsg("conflicting NULL/NOT NULL constraints")));
typNotNull = true;
nullDefined = true;
break;
@@ -672,7 +667,7 @@ DefineDomain(CreateDomainStmt *stmt)
if (nullDefined && typNotNull)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("conflicting NULL/NOT NULL constraints")));
+ errmsg("conflicting NULL/NOT NULL constraints")));
typNotNull = false;
nullDefined = true;
break;
@@ -691,13 +686,13 @@ DefineDomain(CreateDomainStmt *stmt)
case CONSTR_UNIQUE:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unique constraints not possible for domains")));
+ errmsg("unique constraints not possible for domains")));
break;
case CONSTR_PRIMARY:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("primary key constraints not possible for domains")));
+ errmsg("primary key constraints not possible for domains")));
break;
case CONSTR_ATTR_DEFERRABLE:
@@ -744,8 +739,7 @@ DefineDomain(CreateDomainStmt *stmt)
typNotNull); /* Type NOT NULL */
/*
- * Process constraints which refer to the domain ID returned by
- * TypeCreate
+ * Process constraints which refer to the domain ID returned by TypeCreate
*/
foreach(listptr, schema)
{
@@ -815,8 +809,8 @@ RemoveDomain(List *names, DropBehavior behavior)
/* Permission check: must own type or its namespace */
if (!pg_type_ownercheck(typeoid, GetUserId()) &&
- !pg_namespace_ownercheck(((Form_pg_type) GETSTRUCT(tup))->typnamespace,
- GetUserId()))
+ !pg_namespace_ownercheck(((Form_pg_type) GETSTRUCT(tup))->typnamespace,
+ GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TYPE,
TypeNameToString(typename));
@@ -856,11 +850,11 @@ findTypeInputFunction(List *procname, Oid typeOid)
Oid procOid;
/*
- * Input functions can take a single argument of type CSTRING, or
- * three arguments (string, typioparam OID, typmod).
+ * Input functions can take a single argument of type CSTRING, or three
+ * arguments (string, typioparam OID, typmod).
*
- * For backwards compatibility we allow OPAQUE in place of CSTRING; if we
- * see this, we issue a warning and fix up the pg_proc entry.
+ * For backwards compatibility we allow OPAQUE in place of CSTRING; if we see
+ * this, we issue a warning and fix up the pg_proc entry.
*/
argList[0] = CSTRINGOID;
@@ -897,8 +891,8 @@ findTypeInputFunction(List *procname, Oid typeOid)
SetFunctionArgType(procOid, 0, CSTRINGOID);
/*
- * Need CommandCounterIncrement since DefineType will likely try
- * to alter the pg_proc tuple again.
+ * Need CommandCounterIncrement since DefineType will likely try to
+ * alter the pg_proc tuple again.
*/
CommandCounterIncrement();
@@ -925,9 +919,8 @@ findTypeOutputFunction(List *procname, Oid typeOid)
/*
* Output functions can take a single argument of the type.
*
- * For backwards compatibility we allow OPAQUE in place of the actual
- * type name; if we see this, we issue a warning and fix up the
- * pg_proc entry.
+ * For backwards compatibility we allow OPAQUE in place of the actual type
+ * name; if we see this, we issue a warning and fix up the pg_proc entry.
*/
argList[0] = typeOid;
@@ -944,13 +937,13 @@ findTypeOutputFunction(List *procname, Oid typeOid)
{
/* Found, but must complain and fix the pg_proc entry */
ereport(WARNING,
- (errmsg("changing argument type of function %s from \"opaque\" to %s",
- NameListToString(procname), format_type_be(typeOid))));
+ (errmsg("changing argument type of function %s from \"opaque\" to %s",
+ NameListToString(procname), format_type_be(typeOid))));
SetFunctionArgType(procOid, 0, typeOid);
/*
- * Need CommandCounterIncrement since DefineType will likely try
- * to alter the pg_proc tuple again.
+ * Need CommandCounterIncrement since DefineType will likely try to
+ * alter the pg_proc tuple again.
*/
CommandCounterIncrement();
@@ -975,8 +968,8 @@ findTypeReceiveFunction(List *procname, Oid typeOid)
Oid procOid;
/*
- * Receive functions can take a single argument of type INTERNAL, or
- * three arguments (internal, typioparam OID, typmod).
+ * Receive functions can take a single argument of type INTERNAL, or three
+ * arguments (internal, typioparam OID, typmod).
*/
argList[0] = INTERNALOID;
@@ -1029,8 +1022,7 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid)
Oid procOid;
/*
- * Analyze functions always take one INTERNAL argument and return
- * bool.
+ * Analyze functions always take one INTERNAL argument and return bool.
*/
argList[0] = INTERNALOID;
@@ -1044,8 +1036,8 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid)
if (get_func_rettype(procOid) != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type analyze function %s must return type \"boolean\"",
- NameListToString(procname))));
+ errmsg("type analyze function %s must return type \"boolean\"",
+ NameListToString(procname))));
return procOid;
}
@@ -1073,7 +1065,7 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist)
if (coldeflist == NIL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("composite type must have at least one attribute")));
+ errmsg("composite type must have at least one attribute")));
/*
* now set the parameters for keys/inheritance etc. All of these are
@@ -1165,28 +1157,28 @@ AlterDomainDefault(List *names, Node *defaultRaw)
/*
* Expression must be stored as a nodeToString result, but we also
- * require a valid textual representation (mainly to make life
- * easier for pg_dump).
+ * require a valid textual representation (mainly to make life easier
+ * for pg_dump).
*/
defaultValue = deparse_expression(defaultExpr,
- deparse_context_for(NameStr(typTup->typname),
- InvalidOid),
+ deparse_context_for(NameStr(typTup->typname),
+ InvalidOid),
false, false);
/*
* Form an updated tuple with the new default and write it back.
*/
new_record[Anum_pg_type_typdefaultbin - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(
- nodeToString(defaultExpr)));
+ CStringGetDatum(
+ nodeToString(defaultExpr)));
new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r';
new_record[Anum_pg_type_typdefault - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(defaultValue));
+ CStringGetDatum(defaultValue));
new_record_repl[Anum_pg_type_typdefault - 1] = 'r';
}
else
- /* Default is NULL, drop it */
+ /* Default is NULL, drop it */
{
new_record_nulls[Anum_pg_type_typdefaultbin - 1] = 'n';
new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r';
@@ -1305,8 +1297,8 @@ AlterDomainNotNull(List *names, bool notNull)
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("column \"%s\" of table \"%s\" contains null values",
- NameStr(tupdesc->attrs[attnum - 1]->attname),
- RelationGetRelationName(testrel))));
+ NameStr(tupdesc->attrs[attnum - 1]->attname),
+ RelationGetRelationName(testrel))));
}
}
heap_endscan(scan);
@@ -1317,8 +1309,8 @@ AlterDomainNotNull(List *names, bool notNull)
}
/*
- * Okay to update pg_type row. We can scribble on typTup because it's
- * a copy.
+ * Okay to update pg_type row. We can scribble on typTup because it's a
+ * copy.
*/
typTup->typnotnull = notNull;
@@ -1467,7 +1459,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
if (IsA(newConstraint, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("foreign key constraints not possible for domains")));
+ errmsg("foreign key constraints not possible for domains")));
/* otherwise it should be a plain Constraint */
if (!IsA(newConstraint, Constraint))
@@ -1485,13 +1477,13 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
case CONSTR_UNIQUE:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unique constraints not possible for domains")));
+ errmsg("unique constraints not possible for domains")));
break;
case CONSTR_PRIMARY:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("primary key constraints not possible for domains")));
+ errmsg("primary key constraints not possible for domains")));
break;
case CONSTR_ATTR_DEFERRABLE:
@@ -1511,8 +1503,8 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
/*
* Since all other constraint types throw errors, this must be a check
- * constraint. First, process the constraint expression and add an
- * entry to pg_constraint.
+ * constraint. First, process the constraint expression and add an entry
+ * to pg_constraint.
*/
ccbin = domainAddConstraint(HeapTupleGetOid(tup), typTup->typnamespace,
@@ -1572,7 +1564,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
ereport(ERROR,
(errcode(ERRCODE_CHECK_VIOLATION),
errmsg("column \"%s\" of table \"%s\" contains values that violate the new constraint",
- NameStr(tupdesc->attrs[attnum - 1]->attname),
+ NameStr(tupdesc->attrs[attnum - 1]->attname),
RelationGetRelationName(testrel))));
}
@@ -1626,8 +1618,8 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
HeapTuple depTup;
/*
- * We scan pg_depend to find those things that depend on the domain.
- * (We assume we can ignore refobjsubid for a domain.)
+ * We scan pg_depend to find those things that depend on the domain. (We
+ * assume we can ignore refobjsubid for a domain.)
*/
depRel = heap_open(DependRelationId, AccessShareLock);
@@ -1693,10 +1685,10 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
}
/*
- * Confirm column has not been dropped, and is of the expected
- * type. This defends against an ALTER DROP COLUMN occuring just
- * before we acquired lock ... but if the whole table were
- * dropped, we'd still have a problem.
+ * Confirm column has not been dropped, and is of the expected type.
+ * This defends against an ALTER DROP COLUMN occuring just before we
+ * acquired lock ... but if the whole table were dropped, we'd still
+ * have a problem.
*/
if (pg_depend->objsubid > RelationGetNumberOfAttributes(rtc->rel))
continue;
@@ -1705,9 +1697,9 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
continue;
/*
- * Okay, add column to result. We store the columns in
- * column-number order; this is just a hack to improve
- * predictability of regression test output ...
+ * Okay, add column to result. We store the columns in column-number
+ * order; this is just a hack to improve predictability of regression
+ * test output ...
*/
Assert(rtc->natts < RelationGetNumberOfAttributes(rtc->rel));
@@ -1777,8 +1769,8 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
constr->name))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("constraint \"%s\" for domain \"%s\" already exists",
- constr->name, domainName)));
+ errmsg("constraint \"%s\" for domain \"%s\" already exists",
+ constr->name, domainName)));
}
else
constr->name = ChooseConstraintName(domainName,
@@ -1793,11 +1785,11 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
pstate = make_parsestate(NULL);
/*
- * Set up a CoerceToDomainValue to represent the occurrence of VALUE
- * in the expression. Note that it will appear to have the type of
- * the base type, not the domain. This seems correct since within the
- * check expression, we should not assume the input value can be
- * considered a member of the domain.
+ * Set up a CoerceToDomainValue to represent the occurrence of VALUE in
+ * the expression. Note that it will appear to have the type of the base
+ * type, not the domain. This seems correct since within the check
+ * expression, we should not assume the input value can be considered a
+ * member of the domain.
*/
domVal = makeNode(CoerceToDomainValue);
domVal->typeId = baseTypeOid;
@@ -1818,7 +1810,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
if (list_length(pstate->p_rtable) != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("cannot use table references in domain check constraint")));
+ errmsg("cannot use table references in domain check constraint")));
/*
* Domains don't allow var clauses (this should be redundant with the
@@ -1827,7 +1819,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
if (contain_var_clause(expr))
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("cannot use table references in domain check constraint")));
+ errmsg("cannot use table references in domain check constraint")));
/*
* No subplans or aggregates, either...
@@ -1849,8 +1841,8 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
/*
* Deparse it to produce text for consrc.
*
- * Since VARNOs aren't allowed in domain constraints, relation context
- * isn't required as anything other than a shell.
+ * Since VARNOs aren't allowed in domain constraints, relation context isn't
+ * required as anything other than a shell.
*/
ccsrc = deparse_expression(expr,
deparse_context_for(domainName,
@@ -1881,8 +1873,8 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
ccsrc); /* Source form check constraint */
/*
- * Return the compiled constraint expression so the calling routine
- * can perform any additional required tests.
+ * Return the compiled constraint expression so the calling routine can
+ * perform any additional required tests.
*/
return ccbin;
}
@@ -1956,8 +1948,7 @@ GetDomainConstraints(Oid typeOid)
continue;
/*
- * Not expecting conbin to be NULL, but we'll test for it
- * anyway
+ * Not expecting conbin to be NULL, but we'll test for it anyway
*/
val = fastgetattr(conTup, Anum_pg_constraint_conbin,
conRel->rd_att, &isNull);
@@ -1978,8 +1969,8 @@ GetDomainConstraints(Oid typeOid)
r->check_expr = ExecInitExpr(check_expr, NULL);
/*
- * use lcons() here because constraints of lower domains
- * should be applied earlier.
+ * use lcons() here because constraints of lower domains should be
+ * applied earlier.
*/
result = lcons(r, result);
}
@@ -1994,8 +1985,8 @@ GetDomainConstraints(Oid typeOid)
heap_close(conRel, AccessShareLock);
/*
- * Only need to add one NOT NULL check regardless of how many domains
- * in the stack request it.
+ * Only need to add one NOT NULL check regardless of how many domains in
+ * the stack request it.
*/
if (notNull)
{
@@ -2071,7 +2062,7 @@ AlterTypeOwner(List *names, Oid newOwnerId)
if (!superuser())
{
/* Otherwise, must be owner of the existing object */
- if (!pg_type_ownercheck(HeapTupleGetOid(tup),GetUserId()))
+ if (!pg_type_ownercheck(HeapTupleGetOid(tup), GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TYPE,
TypeNameToString(typename));
@@ -2088,8 +2079,7 @@ AlterTypeOwner(List *names, Oid newOwnerId)
}
/*
- * Modify the owner --- okay to scribble on typTup because it's a
- * copy
+ * Modify the owner --- okay to scribble on typTup because it's a copy
*/
typTup->typowner = newOwnerId;
@@ -2128,8 +2118,7 @@ AlterTypeOwnerInternal(Oid typeOid, Oid newOwnerId)
typTup = (Form_pg_type) GETSTRUCT(tup);
/*
- * Modify the owner --- okay to scribble on typTup because it's a
- * copy
+ * Modify the owner --- okay to scribble on typTup because it's a copy
*/
typTup->typowner = newOwnerId;
@@ -2150,9 +2139,9 @@ AlterTypeOwnerInternal(Oid typeOid, Oid newOwnerId)
void
AlterTypeNamespace(List *names, const char *newschema)
{
- TypeName *typename;
- Oid typeOid;
- Oid nspOid;
+ TypeName *typename;
+ Oid typeOid;
+ Oid nspOid;
/* get type OID */
typename = makeNode(TypeName);
@@ -2221,7 +2210,7 @@ AlterTypeNamespaceInternal(Oid typeOid, Oid nspOid,
if (isAnyTempNamespace(nspOid) || isAnyTempNamespace(oldNspOid))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot move objects into or out of temporary schemas")));
+ errmsg("cannot move objects into or out of temporary schemas")));
/* same for TOAST schema */
if (nspOid == PG_TOAST_NAMESPACE || oldNspOid == PG_TOAST_NAMESPACE)
@@ -2264,18 +2253,18 @@ AlterTypeNamespaceInternal(Oid typeOid, Oid nspOid,
/*
* Composite types have pg_class entries.
*
- * We need to modify the pg_class tuple as well to
- * reflect the change of schema.
+ * We need to modify the pg_class tuple as well to reflect the change of
+ * schema.
*/
if (isCompositeType)
{
- Relation classRel;
+ Relation classRel;
classRel = heap_open(RelationRelationId, RowExclusiveLock);
/*
- * The dependency on the schema is listed under the pg_class entry,
- * so tell AlterRelationNamespaceInternal to fix it.
+ * The dependency on the schema is listed under the pg_class entry, so
+ * tell AlterRelationNamespaceInternal to fix it.
*/
AlterRelationNamespaceInternal(classRel, typform->typrelid,
oldNspOid, nspOid,
@@ -2284,8 +2273,8 @@ AlterTypeNamespaceInternal(Oid typeOid, Oid nspOid,
heap_close(classRel, RowExclusiveLock);
/*
- * Check for constraints associated with the composite type
- * (we don't currently support this, but probably will someday).
+ * Check for constraints associated with the composite type (we don't
+ * currently support this, but probably will someday).
*/
AlterConstraintNamespaces(typform->typrelid, oldNspOid,
nspOid, false);
@@ -2297,12 +2286,12 @@ AlterTypeNamespaceInternal(Oid typeOid, Oid nspOid,
AlterConstraintNamespaces(typeOid, oldNspOid, nspOid, true);
/*
- * Update dependency on schema, if any --- a table rowtype has not
- * got one.
+ * Update dependency on schema, if any --- a table rowtype has not got
+ * one.
*/
if (typform->typtype != 'c')
if (changeDependencyFor(TypeRelationId, typeOid,
- NamespaceRelationId, oldNspOid, nspOid) != 1)
+ NamespaceRelationId, oldNspOid, nspOid) != 1)
elog(ERROR, "failed to change schema dependency for type %s",
format_type_be(typeOid));
}
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 082ea0cf7a0..706e85dea5b 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.160 2005/07/31 17:19:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.161 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,11 +34,11 @@ extern bool Password_encryption;
static List *roleNamesToIds(List *memberNames);
static void AddRoleMems(const char *rolename, Oid roleid,
- List *memberNames, List *memberIds,
- Oid grantorId, bool admin_opt);
+ List *memberNames, List *memberIds,
+ Oid grantorId, bool admin_opt);
static void DelRoleMems(const char *rolename, Oid roleid,
- List *memberNames, List *memberIds,
- bool admin_opt);
+ List *memberNames, List *memberIds,
+ bool admin_opt);
/* Check if current user has createrole privileges */
@@ -78,16 +78,16 @@ CreateRole(CreateRoleStmt *stmt)
Oid roleid;
ListCell *item;
ListCell *option;
- char *password = NULL; /* user password */
+ char *password = NULL; /* user password */
bool encrypt_password = Password_encryption; /* encrypt password? */
char encrypted_password[MD5_PASSWD_LEN + 1];
- bool issuper = false; /* Make the user a superuser? */
- bool inherit = true; /* Auto inherit privileges? */
+ bool issuper = false; /* Make the user a superuser? */
+ bool inherit = true; /* Auto inherit privileges? */
bool createrole = false; /* Can this user create roles? */
bool createdb = false; /* Can the user create databases? */
bool canlogin = false; /* Can this user login? */
- int connlimit = -1; /* maximum connections allowed */
- List *addroleto = NIL; /* roles to make this a member of */
+ int connlimit = -1; /* maximum connections allowed */
+ List *addroleto = NIL; /* roles to make this a member of */
List *rolemembers = NIL; /* roles to be members of this role */
List *adminmembers = NIL; /* roles to be admins of this role */
char *validUntil = NULL; /* time the login is valid until */
@@ -272,9 +272,9 @@ CreateRole(CreateRoleStmt *stmt)
stmt->role)));
/*
- * Check the pg_authid relation to be certain the role doesn't
- * already exist. Note we secure exclusive lock because
- * we need to protect our eventual update of the flat auth file.
+ * Check the pg_authid relation to be certain the role doesn't already
+ * exist. Note we secure exclusive lock because we need to protect our
+ * eventual update of the flat auth file.
*/
pg_authid_rel = heap_open(AuthIdRelationId, ExclusiveLock);
pg_authid_dsc = RelationGetDescr(pg_authid_rel);
@@ -344,8 +344,8 @@ CreateRole(CreateRoleStmt *stmt)
CatalogUpdateIndexes(pg_authid_rel, tuple);
/*
- * Advance command counter so we can see new record; else tests
- * in AddRoleMems may fail.
+ * Advance command counter so we can see new record; else tests in
+ * AddRoleMems may fail.
*/
if (addroleto || adminmembers || rolemembers)
CommandCounterIncrement();
@@ -355,8 +355,8 @@ CreateRole(CreateRoleStmt *stmt)
*/
foreach(item, addroleto)
{
- char *oldrolename = strVal(lfirst(item));
- Oid oldroleid = get_roleid_checked(oldrolename);
+ char *oldrolename = strVal(lfirst(item));
+ Oid oldroleid = get_roleid_checked(oldrolename);
AddRoleMems(oldrolename, oldroleid,
list_make1(makeString(stmt->role)),
@@ -365,8 +365,8 @@ CreateRole(CreateRoleStmt *stmt)
}
/*
- * Add the specified members to this new role. adminmembers get the
- * admin option, rolemembers don't.
+ * Add the specified members to this new role. adminmembers get the admin
+ * option, rolemembers don't.
*/
AddRoleMems(stmt->role, roleid,
adminmembers, roleNamesToIds(adminmembers),
@@ -406,15 +406,15 @@ AlterRole(AlterRoleStmt *stmt)
HeapTuple tuple,
new_tuple;
ListCell *option;
- char *password = NULL; /* user password */
+ char *password = NULL; /* user password */
bool encrypt_password = Password_encryption; /* encrypt password? */
char encrypted_password[MD5_PASSWD_LEN + 1];
- int issuper = -1; /* Make the user a superuser? */
- int inherit = -1; /* Auto inherit privileges? */
- int createrole = -1; /* Can this user create roles? */
- int createdb = -1; /* Can the user create databases? */
- int canlogin = -1; /* Can this user login? */
- int connlimit = -1; /* maximum connections allowed */
+ int issuper = -1; /* Make the user a superuser? */
+ int inherit = -1; /* Auto inherit privileges? */
+ int createrole = -1; /* Can this user create roles? */
+ int createdb = -1; /* Can the user create databases? */
+ int canlogin = -1; /* Can this user login? */
+ int connlimit = -1; /* maximum connections allowed */
List *rolemembers = NIL; /* roles to be added/removed */
char *validUntil = NULL; /* time the login is valid until */
DefElem *dpassword = NULL;
@@ -591,9 +591,9 @@ AlterRole(AlterRoleStmt *stmt)
* issuper/createrole/catupdate/etc
*
* XXX It's rather unclear how to handle catupdate. It's probably best to
- * keep it equal to the superuser status, otherwise you could end up
- * with a situation where no existing superuser can alter the
- * catalogs, including pg_authid!
+ * keep it equal to the superuser status, otherwise you could end up with
+ * a situation where no existing superuser can alter the catalogs,
+ * including pg_authid!
*/
if (issuper >= 0)
{
@@ -673,8 +673,8 @@ AlterRole(AlterRoleStmt *stmt)
heap_freetuple(new_tuple);
/*
- * Advance command counter so we can see new record; else tests
- * in AddRoleMems may fail.
+ * Advance command counter so we can see new record; else tests in
+ * AddRoleMems may fail.
*/
if (rolemembers)
CommandCounterIncrement();
@@ -801,7 +801,8 @@ AlterRoleSet(AlterRoleSetStmt *stmt)
void
DropRole(DropRoleStmt *stmt)
{
- Relation pg_authid_rel, pg_auth_members_rel;
+ Relation pg_authid_rel,
+ pg_auth_members_rel;
ListCell *item;
if (!have_createrole_privilege())
@@ -811,9 +812,9 @@ DropRole(DropRoleStmt *stmt)
/*
* Scan the pg_authid relation to find the Oid of the role(s) to be
- * deleted. Note we secure exclusive lock on pg_authid, because we
- * need to protect our update of the flat auth file. A regular
- * writer's lock on pg_auth_members is sufficient though.
+ * deleted. Note we secure exclusive lock on pg_authid, because we need
+ * to protect our update of the flat auth file. A regular writer's lock
+ * on pg_auth_members is sufficient though.
*/
pg_authid_rel = heap_open(AuthIdRelationId, ExclusiveLock);
pg_auth_members_rel = heap_open(AuthMemRelationId, RowExclusiveLock);
@@ -823,7 +824,7 @@ DropRole(DropRoleStmt *stmt)
const char *role = strVal(lfirst(item));
HeapTuple tuple,
tmp_tuple;
- ScanKeyData scankey;
+ ScanKeyData scankey;
char *detail;
SysScanDesc sscan;
Oid roleid;
@@ -865,7 +866,7 @@ DropRole(DropRoleStmt *stmt)
/*
* Lock the role, so nobody can add dependencies to her while we drop
* her. We keep the lock until the end of transaction.
- */
+ */
LockSharedObject(AuthIdRelationId, roleid, 0, AccessExclusiveLock);
/* Check for pg_shdepend entries depending on this role */
@@ -873,7 +874,7 @@ DropRole(DropRoleStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
errmsg("role \"%s\" cannot be dropped because some objects depend on it",
- role),
+ role),
errdetail("%s", detail)));
/*
@@ -884,10 +885,10 @@ DropRole(DropRoleStmt *stmt)
ReleaseSysCache(tuple);
/*
- * Remove role from the pg_auth_members table. We have to remove
- * all tuples that show it as either a role or a member.
+ * Remove role from the pg_auth_members table. We have to remove all
+ * tuples that show it as either a role or a member.
*
- * XXX what about grantor entries? Maybe we should do one heap scan.
+ * XXX what about grantor entries? Maybe we should do one heap scan.
*/
ScanKeyInit(&scankey,
Anum_pg_auth_members_roleid,
@@ -920,13 +921,13 @@ DropRole(DropRoleStmt *stmt)
systable_endscan(sscan);
/*
- * Advance command counter so that later iterations of this loop
- * will see the changes already made. This is essential if, for
- * example, we are trying to drop both a role and one of its
- * direct members --- we'll get an error if we try to delete the
- * linking pg_auth_members tuple twice. (We do not need a CCI
- * between the two delete loops above, because it's not allowed
- * for a role to directly contain itself.)
+ * Advance command counter so that later iterations of this loop will
+ * see the changes already made. This is essential if, for example,
+ * we are trying to drop both a role and one of its direct members ---
+ * we'll get an error if we try to delete the linking pg_auth_members
+ * tuple twice. (We do not need a CCI between the two delete loops
+ * above, because it's not allowed for a role to directly contain
+ * itself.)
*/
CommandCounterIncrement();
}
@@ -975,11 +976,11 @@ RenameRole(const char *oldname, const char *newname)
errmsg("role \"%s\" does not exist", oldname)));
/*
- * XXX Client applications probably store the session user somewhere,
- * so renaming it could cause confusion. On the other hand, there may
- * not be an actual problem besides a little confusion, so think about
- * this and decide. Same for SET ROLE ... we don't restrict renaming
- * the current effective userid, though.
+ * XXX Client applications probably store the session user somewhere, so
+ * renaming it could cause confusion. On the other hand, there may not be
+ * an actual problem besides a little confusion, so think about this and
+ * decide. Same for SET ROLE ... we don't restrict renaming the current
+ * effective userid, though.
*/
roleid = HeapTupleGetOid(oldtuple);
@@ -1032,7 +1033,7 @@ RenameRole(const char *oldname, const char *newname)
repl_repl[Anum_pg_authid_rolname - 1] = 'r';
repl_val[Anum_pg_authid_rolname - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(newname));
+ CStringGetDatum(newname));
repl_null[Anum_pg_authid_rolname - 1] = ' ';
datum = heap_getattr(oldtuple, Anum_pg_authid_rolpassword, dsc, &isnull);
@@ -1082,23 +1083,22 @@ GrantRole(GrantRoleStmt *stmt)
grantee_ids = roleNamesToIds(stmt->grantee_roles);
/*
- * Even though this operation doesn't change pg_authid, we must
- * secure exclusive lock on it to protect our update of the flat
- * auth file.
+ * Even though this operation doesn't change pg_authid, we must secure
+ * exclusive lock on it to protect our update of the flat auth file.
*/
pg_authid_rel = heap_open(AuthIdRelationId, ExclusiveLock);
/*
- * Step through all of the granted roles and add/remove
- * entries for the grantees, or, if admin_opt is set, then
- * just add/remove the admin option.
+ * Step through all of the granted roles and add/remove entries for the
+ * grantees, or, if admin_opt is set, then just add/remove the admin
+ * option.
*
* Note: Permissions checking is done by AddRoleMems/DelRoleMems
*/
foreach(item, stmt->granted_roles)
{
- char *rolename = strVal(lfirst(item));
- Oid roleid = get_roleid_checked(rolename);
+ char *rolename = strVal(lfirst(item));
+ Oid roleid = get_roleid_checked(rolename);
if (stmt->is_grant)
AddRoleMems(rolename, roleid,
@@ -1132,8 +1132,8 @@ roleNamesToIds(List *memberNames)
foreach(l, memberNames)
{
- char *rolename = strVal(lfirst(l));
- Oid roleid = get_roleid_checked(rolename);
+ char *rolename = strVal(lfirst(l));
+ Oid roleid = get_roleid_checked(rolename);
result = lappend_oid(result, roleid);
}
@@ -1160,8 +1160,8 @@ AddRoleMems(const char *rolename, Oid roleid,
{
Relation pg_authmem_rel;
TupleDesc pg_authmem_dsc;
- ListCell *nameitem;
- ListCell *iditem;
+ ListCell *nameitem;
+ ListCell *iditem;
Assert(list_length(memberNames) == list_length(memberIds));
@@ -1170,9 +1170,8 @@ AddRoleMems(const char *rolename, Oid roleid,
return;
/*
- * Check permissions: must have createrole or admin option on the
- * role to be changed. To mess with a superuser role, you gotta
- * be superuser.
+ * Check permissions: must have createrole or admin option on the role to
+ * be changed. To mess with a superuser role, you gotta be superuser.
*/
if (superuser_arg(roleid))
{
@@ -1207,32 +1206,32 @@ AddRoleMems(const char *rolename, Oid roleid,
Oid memberid = lfirst_oid(iditem);
HeapTuple authmem_tuple;
HeapTuple tuple;
- Datum new_record[Natts_pg_auth_members];
- char new_record_nulls[Natts_pg_auth_members];
- char new_record_repl[Natts_pg_auth_members];
+ Datum new_record[Natts_pg_auth_members];
+ char new_record_nulls[Natts_pg_auth_members];
+ char new_record_repl[Natts_pg_auth_members];
/*
* Refuse creation of membership loops, including the trivial case
- * where a role is made a member of itself. We do this by checking
- * to see if the target role is already a member of the proposed
- * member role.
+ * where a role is made a member of itself. We do this by checking to
+ * see if the target role is already a member of the proposed member
+ * role.
*/
if (is_member_of_role(roleid, memberid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
- (errmsg("role \"%s\" is a member of role \"%s\"",
- rolename, membername))));
+ (errmsg("role \"%s\" is a member of role \"%s\"",
+ rolename, membername))));
/*
- * Check if entry for this role/member already exists;
- * if so, give warning unless we are adding admin option.
+ * Check if entry for this role/member already exists; if so, give
+ * warning unless we are adding admin option.
*/
authmem_tuple = SearchSysCache(AUTHMEMROLEMEM,
ObjectIdGetDatum(roleid),
ObjectIdGetDatum(memberid),
0, 0);
if (HeapTupleIsValid(authmem_tuple) &&
- (!admin_opt ||
+ (!admin_opt ||
((Form_pg_auth_members) GETSTRUCT(authmem_tuple))->admin_option))
{
ereport(NOTICE,
@@ -1301,8 +1300,8 @@ DelRoleMems(const char *rolename, Oid roleid,
{
Relation pg_authmem_rel;
TupleDesc pg_authmem_dsc;
- ListCell *nameitem;
- ListCell *iditem;
+ ListCell *nameitem;
+ ListCell *iditem;
Assert(list_length(memberNames) == list_length(memberIds));
@@ -1311,9 +1310,8 @@ DelRoleMems(const char *rolename, Oid roleid,
return;
/*
- * Check permissions: must have createrole or admin option on the
- * role to be changed. To mess with a superuser role, you gotta
- * be superuser.
+ * Check permissions: must have createrole or admin option on the role to
+ * be changed. To mess with a superuser role, you gotta be superuser.
*/
if (superuser_arg(roleid))
{
@@ -1366,9 +1364,9 @@ DelRoleMems(const char *rolename, Oid roleid,
{
/* Just turn off the admin option */
HeapTuple tuple;
- Datum new_record[Natts_pg_auth_members];
- char new_record_nulls[Natts_pg_auth_members];
- char new_record_repl[Natts_pg_auth_members];
+ Datum new_record[Natts_pg_auth_members];
+ char new_record_nulls[Natts_pg_auth_members];
+ char new_record_repl[Natts_pg_auth_members];
/* Build a tuple to update with */
MemSet(new_record, 0, sizeof(new_record));
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 4f9eb192123..506eb23e707 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.316 2005/10/03 22:52:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.317 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -198,7 +198,7 @@ static TransactionId FreezeLimit;
/* non-export function prototypes */
static List *get_rel_oids(List *relids, const RangeVar *vacrel,
- const char *stmttype);
+ const char *stmttype);
static void vac_update_dbstats(Oid dbid,
TransactionId vacuumXID,
TransactionId frozenXID);
@@ -281,17 +281,16 @@ vacuum(VacuumStmt *vacstmt, List *relids)
elevel = DEBUG2;
/*
- * We cannot run VACUUM inside a user transaction block; if we were
- * inside a transaction, then our commit- and
- * start-transaction-command calls would not have the intended effect!
- * Furthermore, the forced commit that occurs before truncating the
- * relation's file would have the effect of committing the rest of the
- * user's transaction too, which would certainly not be the desired
- * behavior. (This only applies to VACUUM FULL, though. We could in
- * theory run lazy VACUUM inside a transaction block, but we choose to
- * disallow that case because we'd rather commit as soon as possible
- * after finishing the vacuum. This is mainly so that we can let go
- * the AccessExclusiveLock that we may be holding.)
+ * We cannot run VACUUM inside a user transaction block; if we were inside
+ * a transaction, then our commit- and start-transaction-command calls
+ * would not have the intended effect! Furthermore, the forced commit that
+ * occurs before truncating the relation's file would have the effect of
+ * committing the rest of the user's transaction too, which would
+ * certainly not be the desired behavior. (This only applies to VACUUM
+ * FULL, though. We could in theory run lazy VACUUM inside a transaction
+ * block, but we choose to disallow that case because we'd rather commit
+ * as soon as possible after finishing the vacuum. This is mainly so that
+ * we can let go the AccessExclusiveLock that we may be holding.)
*
* ANALYZE (without VACUUM) can run either way.
*/
@@ -306,16 +305,16 @@ vacuum(VacuumStmt *vacstmt, List *relids)
/*
* Disallow the combination VACUUM FULL FREEZE; although it would mostly
* work, VACUUM FULL's ability to move tuples around means that it is
- * injecting its own XID into tuple visibility checks. We'd have to
+ * injecting its own XID into tuple visibility checks. We'd have to
* guarantee that every moved tuple is properly marked XMIN_COMMITTED or
* XMIN_INVALID before the end of the operation. There are corner cases
- * where this does not happen, and getting rid of them all seems hard
- * (not to mention fragile to maintain). On the whole it's not worth it
+ * where this does not happen, and getting rid of them all seems hard (not
+ * to mention fragile to maintain). On the whole it's not worth it
* compared to telling people to use two operations. See pgsql-hackers
* discussion of 27-Nov-2004, and comments below for update_hint_bits().
*
- * Note: this is enforced here, and not in the grammar, since (a) we can
- * give a better error message, and (b) we might want to allow it again
+ * Note: this is enforced here, and not in the grammar, since (a) we can give
+ * a better error message, and (b) we might want to allow it again
* someday.
*/
if (vacstmt->vacuum && vacstmt->full && vacstmt->freeze)
@@ -333,9 +332,8 @@ vacuum(VacuumStmt *vacstmt, List *relids)
/*
* Create special memory context for cross-transaction storage.
*
- * Since it is a child of PortalContext, it will go away eventually even
- * if we suffer an error; there's no need for special abort cleanup
- * logic.
+ * Since it is a child of PortalContext, it will go away eventually even if
+ * we suffer an error; there's no need for special abort cleanup logic.
*/
vac_context = AllocSetContextCreate(PortalContext,
"Vacuum",
@@ -347,8 +345,8 @@ vacuum(VacuumStmt *vacstmt, List *relids)
all_rels = (relids == NIL && vacstmt->relation == NULL);
/*
- * Build list of relations to process, unless caller gave us one.
- * (If we build one, we put it in vac_context for safekeeping.)
+ * Build list of relations to process, unless caller gave us one. (If we
+ * build one, we put it in vac_context for safekeeping.)
*/
relations = get_rel_oids(relids, vacstmt->relation, stmttype);
@@ -357,21 +355,21 @@ vacuum(VacuumStmt *vacstmt, List *relids)
/*
* It's a database-wide VACUUM.
*
- * Compute the initially applicable OldestXmin and FreezeLimit XIDs,
- * so that we can record these values at the end of the VACUUM.
- * Note that individual tables may well be processed with newer
- * values, but we can guarantee that no (non-shared) relations are
- * processed with older ones.
+ * Compute the initially applicable OldestXmin and FreezeLimit XIDs, so
+ * that we can record these values at the end of the VACUUM. Note that
+ * individual tables may well be processed with newer values, but we
+ * can guarantee that no (non-shared) relations are processed with
+ * older ones.
*
- * It is okay to record non-shared values in pg_database, even though
- * we may vacuum shared relations with older cutoffs, because only
- * the minimum of the values present in pg_database matters. We
- * can be sure that shared relations have at some time been
- * vacuumed with cutoffs no worse than the global minimum; for, if
- * there is a backend in some other DB with xmin = OLDXMIN that's
- * determining the cutoff with which we vacuum shared relations,
- * it is not possible for that database to have a cutoff newer
- * than OLDXMIN recorded in pg_database.
+ * It is okay to record non-shared values in pg_database, even though we
+ * may vacuum shared relations with older cutoffs, because only the
+ * minimum of the values present in pg_database matters. We can be
+ * sure that shared relations have at some time been vacuumed with
+ * cutoffs no worse than the global minimum; for, if there is a
+ * backend in some other DB with xmin = OLDXMIN that's determining the
+ * cutoff with which we vacuum shared relations, it is not possible
+ * for that database to have a cutoff newer than OLDXMIN recorded in
+ * pg_database.
*/
vacuum_set_xid_limits(vacstmt, false,
&initialOldestXmin,
@@ -381,16 +379,15 @@ vacuum(VacuumStmt *vacstmt, List *relids)
/*
* Decide whether we need to start/commit our own transactions.
*
- * For VACUUM (with or without ANALYZE): always do so, so that we can
- * release locks as soon as possible. (We could possibly use the
- * outer transaction for a one-table VACUUM, but handling TOAST tables
- * would be problematic.)
+ * For VACUUM (with or without ANALYZE): always do so, so that we can release
+ * locks as soon as possible. (We could possibly use the outer
+ * transaction for a one-table VACUUM, but handling TOAST tables would be
+ * problematic.)
*
* For ANALYZE (no VACUUM): if inside a transaction block, we cannot
- * start/commit our own transactions. Also, there's no need to do so
- * if only processing one relation. For multiple relations when not
- * within a transaction block, use own transactions so we can release
- * locks sooner.
+ * start/commit our own transactions. Also, there's no need to do so if
+ * only processing one relation. For multiple relations when not within a
+ * transaction block, use own transactions so we can release locks sooner.
*/
if (vacstmt->vacuum)
use_own_xacts = true;
@@ -406,8 +403,8 @@ vacuum(VacuumStmt *vacstmt, List *relids)
}
/*
- * If we are running ANALYZE without per-table transactions, we'll
- * need a memory context with table lifetime.
+ * If we are running ANALYZE without per-table transactions, we'll need a
+ * memory context with table lifetime.
*/
if (!use_own_xacts)
anl_context = AllocSetContextCreate(PortalContext,
@@ -417,12 +414,12 @@ vacuum(VacuumStmt *vacstmt, List *relids)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * vacuum_rel expects to be entered with no transaction active; it
- * will start and commit its own transaction. But we are called by an
- * SQL command, and so we are executing inside a transaction already.
- * We commit the transaction started in PostgresMain() here, and start
- * another one before exiting to match the commit waiting for us back
- * in PostgresMain().
+ * vacuum_rel expects to be entered with no transaction active; it will
+ * start and commit its own transaction. But we are called by an SQL
+ * command, and so we are executing inside a transaction already. We
+ * commit the transaction started in PostgresMain() here, and start
+ * another one before exiting to match the commit waiting for us back in
+ * PostgresMain().
*/
if (use_own_xacts)
{
@@ -455,11 +452,11 @@ vacuum(VacuumStmt *vacstmt, List *relids)
MemoryContext old_context = NULL;
/*
- * If using separate xacts, start one for analyze.
- * Otherwise, we can use the outer transaction, but we
- * still need to call analyze_rel in a memory context that
- * will be cleaned up on return (else we leak memory while
- * processing multiple tables).
+ * If using separate xacts, start one for analyze. Otherwise,
+ * we can use the outer transaction, but we still need to call
+ * analyze_rel in a memory context that will be cleaned up on
+ * return (else we leak memory while processing multiple
+ * tables).
*/
if (use_own_xacts)
{
@@ -471,8 +468,8 @@ vacuum(VacuumStmt *vacstmt, List *relids)
old_context = MemoryContextSwitchTo(anl_context);
/*
- * Tell the buffer replacement strategy that vacuum is
- * causing the IO
+ * Tell the buffer replacement strategy that vacuum is causing
+ * the IO
*/
StrategyHintVacuum(true);
@@ -518,16 +515,16 @@ vacuum(VacuumStmt *vacstmt, List *relids)
if (vacstmt->vacuum)
{
/*
- * If it was a database-wide VACUUM, print FSM usage statistics
- * (we don't make you be superuser to see these).
+ * If it was a database-wide VACUUM, print FSM usage statistics (we
+ * don't make you be superuser to see these).
*/
if (all_rels)
PrintFreeSpaceMapStatistics(elevel);
/*
* If we completed a database-wide VACUUM without skipping any
- * relations, update the database's pg_database row with info
- * about the transaction IDs used, and try to truncate pg_clog.
+ * relations, update the database's pg_database row with info about
+ * the transaction IDs used, and try to truncate pg_clog.
*/
if (all_rels)
{
@@ -539,8 +536,8 @@ vacuum(VacuumStmt *vacstmt, List *relids)
/*
* Clean up working storage --- note we must do this after
- * StartTransactionCommand, else we might be trying to delete the
- * active context!
+ * StartTransactionCommand, else we might be trying to delete the active
+ * context!
*/
MemoryContextDelete(vac_context);
vac_context = NULL;
@@ -725,10 +722,10 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
/*
- * Invalidate the tuple in the catcaches; this also arranges to flush
- * the relation's relcache entry. (If we fail to commit for some
- * reason, no flush will occur, but no great harm is done since there
- * are no noncritical state updates here.)
+ * Invalidate the tuple in the catcaches; this also arranges to flush the
+ * relation's relcache entry. (If we fail to commit for some reason, no
+ * flush will occur, but no great harm is done since there are no
+ * noncritical state updates here.)
*/
CacheInvalidateHeapTuple(rd, &rtup);
@@ -878,8 +875,8 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID)
heap_close(relation, AccessShareLock);
/*
- * Do not truncate CLOG if we seem to have suffered wraparound
- * already; the computed minimum XID might be bogus.
+ * Do not truncate CLOG if we seem to have suffered wraparound already;
+ * the computed minimum XID might be bogus.
*/
if (vacuumAlreadyWrapped)
{
@@ -893,8 +890,8 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID)
TruncateCLOG(vacuumXID);
/*
- * Do not update varsup.c if we seem to have suffered wraparound
- * already; the computed XID might be bogus.
+ * Do not update varsup.c if we seem to have suffered wraparound already;
+ * the computed XID might be bogus.
*/
if (frozenAlreadyWrapped)
{
@@ -911,11 +908,11 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID)
age = (int32) (myXID - frozenXID);
if (age > (int32) ((MaxTransactionId >> 3) * 3))
ereport(WARNING,
- (errmsg("database \"%s\" must be vacuumed within %u transactions",
- NameStr(oldest_datname),
- (MaxTransactionId >> 1) - age),
- errhint("To avoid a database shutdown, execute a full-database VACUUM in \"%s\".",
- NameStr(oldest_datname))));
+ (errmsg("database \"%s\" must be vacuumed within %u transactions",
+ NameStr(oldest_datname),
+ (MaxTransactionId >> 1) - age),
+ errhint("To avoid a database shutdown, execute a full-database VACUUM in \"%s\".",
+ NameStr(oldest_datname))));
}
@@ -970,8 +967,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
CHECK_FOR_INTERRUPTS();
/*
- * Race condition -- if the pg_class tuple has gone away since the
- * last time we saw it, we don't need to vacuum it.
+ * Race condition -- if the pg_class tuple has gone away since the last
+ * time we saw it, we don't need to vacuum it.
*/
if (!SearchSysCacheExists(RELOID,
ObjectIdGetDatum(relid),
@@ -983,24 +980,22 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
}
/*
- * Determine the type of lock we want --- hard exclusive lock for a
- * FULL vacuum, but just ShareUpdateExclusiveLock for concurrent
- * vacuum. Either way, we can be sure that no other backend is
- * vacuuming the same table.
+ * Determine the type of lock we want --- hard exclusive lock for a FULL
+ * vacuum, but just ShareUpdateExclusiveLock for concurrent vacuum.
+ * Either way, we can be sure that no other backend is vacuuming the same
+ * table.
*/
lmode = vacstmt->full ? AccessExclusiveLock : ShareUpdateExclusiveLock;
/*
- * Open the class, get an appropriate lock on it, and check
- * permissions.
+ * Open the class, get an appropriate lock on it, and check permissions.
*
- * We allow the user to vacuum a table if he is superuser, the table
- * owner, or the database owner (but in the latter case, only if it's
- * not a shared relation). pg_class_ownercheck includes the superuser
- * case.
+ * We allow the user to vacuum a table if he is superuser, the table owner,
+ * or the database owner (but in the latter case, only if it's not a
+ * shared relation). pg_class_ownercheck includes the superuser case.
*
- * Note we choose to treat permissions failure as a WARNING and keep
- * trying to vacuum the rest of the DB --- is this appropriate?
+ * Note we choose to treat permissions failure as a WARNING and keep trying
+ * to vacuum the rest of the DB --- is this appropriate?
*/
onerel = relation_open(relid, lmode);
@@ -1017,8 +1012,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
}
/*
- * Check that it's a plain table; we used to do this in get_rel_oids()
- * but seems safer to check after we've locked the relation.
+ * Check that it's a plain table; we used to do this in get_rel_oids() but
+ * seems safer to check after we've locked the relation.
*/
if (onerel->rd_rel->relkind != expected_relkind)
{
@@ -1043,15 +1038,14 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
relation_close(onerel, lmode);
StrategyHintVacuum(false);
CommitTransactionCommand();
- return true; /* assume no long-lived data in temp
- * tables */
+ return true; /* assume no long-lived data in temp tables */
}
/*
* Get a session-level lock too. This will protect our access to the
* relation across multiple transactions, so that we can vacuum the
- * relation's TOAST table (if any) secure in the knowledge that no one
- * is deleting the parent relation.
+ * relation's TOAST table (if any) secure in the knowledge that no one is
+ * deleting the parent relation.
*
* NOTE: this cannot block, even if someone else is waiting for access,
* because the lock manager knows that both lock requests are from the
@@ -1087,9 +1081,9 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
/*
* If the relation has a secondary toast rel, vacuum that too while we
* still hold the session lock on the master table. Note however that
- * "analyze" will not get done on the toast table. This is good,
- * because the toaster always uses hardcoded index access and
- * statistics are totally unimportant for toast relations.
+ * "analyze" will not get done on the toast table. This is good, because
+ * the toaster always uses hardcoded index access and statistics are
+ * totally unimportant for toast relations.
*/
if (toast_relid != InvalidOid)
{
@@ -1128,8 +1122,8 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
{
VacPageListData vacuum_pages; /* List of pages to vacuum and/or
* clean indexes */
- VacPageListData fraged_pages; /* List of pages with space enough
- * for re-using */
+ VacPageListData fraged_pages; /* List of pages with space enough for
+ * re-using */
Relation *Irel;
int nindexes,
i;
@@ -1198,7 +1192,7 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
/* report results to the stats collector, too */
pgstat_report_vacuum(RelationGetRelid(onerel), onerel->rd_rel->relisshared,
- vacstmt->analyze, vacrelstats->rel_tuples);
+ vacstmt->analyze, vacrelstats->rel_tuples);
}
@@ -1275,11 +1269,11 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/*
* Since we are holding exclusive lock on the relation, no other
- * backend can be accessing the page; however it is possible that
- * the background writer will try to write the page if it's already
- * marked dirty. To ensure that invalid data doesn't get written to
- * disk, we must take exclusive buffer lock wherever we potentially
- * modify pages.
+ * backend can be accessing the page; however it is possible that the
+ * background writer will try to write the page if it's already marked
+ * dirty. To ensure that invalid data doesn't get written to disk, we
+ * must take exclusive buffer lock wherever we potentially modify
+ * pages.
*/
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@@ -1292,8 +1286,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
VacPage vacpagecopy;
ereport(WARNING,
- (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
- relname, blkno)));
+ (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
+ relname, blkno)));
PageInit(page, BufferGetPageSize(buf), 0);
vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
free_space += vacpage->free;
@@ -1357,8 +1351,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
case HEAPTUPLE_LIVE:
/*
- * Tuple is good. Consider whether to replace its
- * xmin value with FrozenTransactionId.
+ * Tuple is good. Consider whether to replace its xmin
+ * value with FrozenTransactionId.
*/
if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple.t_data)) &&
TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
@@ -1381,15 +1375,14 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
case HEAPTUPLE_RECENTLY_DEAD:
/*
- * If tuple is recently deleted then we must not
- * remove it from relation.
+ * If tuple is recently deleted then we must not remove it
+ * from relation.
*/
nkeep += 1;
/*
- * If we do shrinking and this tuple is updated one
- * then remember it to construct updated tuple
- * dependencies.
+ * If we do shrinking and this tuple is updated one then
+ * remember it to construct updated tuple dependencies.
*/
if (do_shrinking &&
!(ItemPointerEquals(&(tuple.t_self),
@@ -1399,8 +1392,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
{
free_vtlinks = 1000;
vtlinks = (VTupleLink) repalloc(vtlinks,
- (free_vtlinks + num_vtlinks) *
- sizeof(VTupleLinkData));
+ (free_vtlinks + num_vtlinks) *
+ sizeof(VTupleLinkData));
}
vtlinks[num_vtlinks].new_tid = tuple.t_data->t_ctid;
vtlinks[num_vtlinks].this_tid = tuple.t_self;
@@ -1411,10 +1404,10 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
case HEAPTUPLE_INSERT_IN_PROGRESS:
/*
- * This should not happen, since we hold exclusive
- * lock on the relation; shouldn't we raise an error?
- * (Actually, it can happen in system catalogs, since
- * we tend to release write lock before commit there.)
+ * This should not happen, since we hold exclusive lock on
+ * the relation; shouldn't we raise an error? (Actually,
+ * it can happen in system catalogs, since we tend to
+ * release write lock before commit there.)
*/
ereport(NOTICE,
(errmsg("relation \"%s\" TID %u/%u: InsertTransactionInProgress %u --- can't shrink relation",
@@ -1424,10 +1417,10 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
case HEAPTUPLE_DELETE_IN_PROGRESS:
/*
- * This should not happen, since we hold exclusive
- * lock on the relation; shouldn't we raise an error?
- * (Actually, it can happen in system catalogs, since
- * we tend to release write lock before commit there.)
+ * This should not happen, since we hold exclusive lock on
+ * the relation; shouldn't we raise an error? (Actually,
+ * it can happen in system catalogs, since we tend to
+ * release write lock before commit there.)
*/
ereport(NOTICE,
(errmsg("relation \"%s\" TID %u/%u: DeleteTransactionInProgress %u --- can't shrink relation",
@@ -1444,12 +1437,12 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
ItemId lpp;
/*
- * Here we are building a temporary copy of the page with
- * dead tuples removed. Below we will apply
+ * Here we are building a temporary copy of the page with dead
+ * tuples removed. Below we will apply
* PageRepairFragmentation to the copy, so that we can
- * determine how much space will be available after
- * removal of dead tuples. But note we are NOT changing
- * the real page yet...
+ * determine how much space will be available after removal of
+ * dead tuples. But note we are NOT changing the real page
+ * yet...
*/
if (tempPage == NULL)
{
@@ -1499,8 +1492,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/*
* Add the page to fraged_pages if it has a useful amount of free
* space. "Useful" means enough for a minimal-sized tuple. But we
- * don't know that accurately near the start of the relation, so
- * add pages unconditionally if they have >= BLCKSZ/10 free space.
+ * don't know that accurately near the start of the relation, so add
+ * pages unconditionally if they have >= BLCKSZ/10 free space.
*/
do_frag = (vacpage->free >= min_tlen || vacpage->free >= BLCKSZ / 10);
@@ -1516,8 +1509,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/*
* Include the page in empty_end_pages if it will be empty after
- * vacuuming; this is to keep us from using it as a move
- * destination.
+ * vacuuming; this is to keep us from using it as a move destination.
*/
if (notup)
{
@@ -1588,11 +1580,11 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
RelationGetRelationName(onerel),
tups_vacuumed, num_tuples, nblocks),
errdetail("%.0f dead row versions cannot be removed yet.\n"
- "Nonremovable row versions range from %lu to %lu bytes long.\n"
+ "Nonremovable row versions range from %lu to %lu bytes long.\n"
"There were %.0f unused item pointers.\n"
- "Total free space (including removable row versions) is %.0f bytes.\n"
+ "Total free space (including removable row versions) is %.0f bytes.\n"
"%u pages are or will become empty, including %u at the end of the table.\n"
- "%u pages containing %.0f free bytes are potential move destinations.\n"
+ "%u pages containing %.0f free bytes are potential move destinations.\n"
"%s.",
nkeep,
(unsigned long) min_tlen, (unsigned long) max_tlen,
@@ -1663,14 +1655,14 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
vacpage->offsets_used = vacpage->offsets_free = 0;
/*
- * Scan pages backwards from the last nonempty page, trying to move
- * tuples down to lower pages. Quit when we reach a page that we have
- * moved any tuples onto, or the first page if we haven't moved
- * anything, or when we find a page we cannot completely empty (this
- * last condition is handled by "break" statements within the loop).
+ * Scan pages backwards from the last nonempty page, trying to move tuples
+ * down to lower pages. Quit when we reach a page that we have moved any
+ * tuples onto, or the first page if we haven't moved anything, or when we
+ * find a page we cannot completely empty (this last condition is handled
+ * by "break" statements within the loop).
*
- * NB: this code depends on the vacuum_pages and fraged_pages lists being
- * in order by blkno.
+ * NB: this code depends on the vacuum_pages and fraged_pages lists being in
+ * order by blkno.
*/
nblocks = vacrelstats->rel_pages;
for (blkno = nblocks - vacuum_pages->empty_end_pages - 1;
@@ -1688,18 +1680,17 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
vacuum_delay_point();
/*
- * Forget fraged_pages pages at or after this one; they're no
- * longer useful as move targets, since we only want to move down.
- * Note that since we stop the outer loop at last_move_dest_block,
- * pages removed here cannot have had anything moved onto them
- * already.
+ * Forget fraged_pages pages at or after this one; they're no longer
+ * useful as move targets, since we only want to move down. Note that
+ * since we stop the outer loop at last_move_dest_block, pages removed
+ * here cannot have had anything moved onto them already.
*
- * Also note that we don't change the stored fraged_pages list, only
- * our local variable num_fraged_pages; so the forgotten pages are
- * still available to be loaded into the free space map later.
+ * Also note that we don't change the stored fraged_pages list, only our
+ * local variable num_fraged_pages; so the forgotten pages are still
+ * available to be loaded into the free space map later.
*/
while (num_fraged_pages > 0 &&
- fraged_pages->pagedesc[num_fraged_pages - 1]->blkno >= blkno)
+ fraged_pages->pagedesc[num_fraged_pages - 1]->blkno >= blkno)
{
Assert(fraged_pages->pagedesc[num_fraged_pages - 1]->offsets_used == 0);
--num_fraged_pages;
@@ -1752,8 +1743,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
else
Assert(!isempty);
- chain_tuple_moved = false; /* no one chain-tuple was moved
- * off this page, yet */
+ chain_tuple_moved = false; /* no one chain-tuple was moved off
+ * this page, yet */
vacpage->blkno = blkno;
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber;
@@ -1807,9 +1798,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
elog(ERROR, "invalid XVAC in tuple header");
/*
- * If this (chain) tuple is moved by me already then I
- * have to check is it in vacpage or not - i.e. is it
- * moved while cleaning this page or some previous one.
+ * If this (chain) tuple is moved by me already then I have to
+ * check is it in vacpage or not - i.e. is it moved while
+ * cleaning this page or some previous one.
*/
/* Can't we Assert(keep_tuples > 0) here? */
@@ -1839,34 +1830,33 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
}
/*
- * If this tuple is in a chain of tuples created in updates
- * by "recent" transactions then we have to move the whole chain
- * of tuples to other places, so that we can write new t_ctid
- * links that preserve the chain relationship.
+ * If this tuple is in a chain of tuples created in updates by
+ * "recent" transactions then we have to move the whole chain of
+ * tuples to other places, so that we can write new t_ctid links
+ * that preserve the chain relationship.
*
* This test is complicated. Read it as "if tuple is a recently
- * created updated version, OR if it is an obsoleted version".
- * (In the second half of the test, we needn't make any check
- * on XMAX --- it must be recently obsoleted, else scan_heap
- * would have deemed it removable.)
+ * created updated version, OR if it is an obsoleted version". (In
+ * the second half of the test, we needn't make any check on XMAX
+ * --- it must be recently obsoleted, else scan_heap would have
+ * deemed it removable.)
*
- * NOTE: this test is not 100% accurate: it is possible for a
- * tuple to be an updated one with recent xmin, and yet not
- * match any new_tid entry in the vtlinks list. Presumably
- * there was once a parent tuple with xmax matching the xmin,
- * but it's possible that that tuple has been removed --- for
- * example, if it had xmin = xmax and wasn't itself an updated
- * version, then HeapTupleSatisfiesVacuum would deem it removable
- * as soon as the xmin xact completes.
+ * NOTE: this test is not 100% accurate: it is possible for a tuple
+ * to be an updated one with recent xmin, and yet not match any
+ * new_tid entry in the vtlinks list. Presumably there was once a
+ * parent tuple with xmax matching the xmin, but it's possible
+ * that that tuple has been removed --- for example, if it had
+ * xmin = xmax and wasn't itself an updated version, then
+ * HeapTupleSatisfiesVacuum would deem it removable as soon as the
+ * xmin xact completes.
*
- * To be on the safe side, we abandon the repair_frag process if
- * we cannot find the parent tuple in vtlinks. This may be
- * overly conservative; AFAICS it would be safe to move the
- * chain.
+ * To be on the safe side, we abandon the repair_frag process if we
+ * cannot find the parent tuple in vtlinks. This may be overly
+ * conservative; AFAICS it would be safe to move the chain.
*/
if (((tuple.t_data->t_infomask & HEAP_UPDATED) &&
- !TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
- OldestXmin)) ||
+ !TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
+ OldestXmin)) ||
(!(tuple.t_data->t_infomask & (HEAP_XMAX_INVALID |
HEAP_IS_LOCKED)) &&
!(ItemPointerEquals(&(tuple.t_self),
@@ -1899,10 +1889,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
}
/*
- * If this tuple is in the begin/middle of the chain then
- * we have to move to the end of chain. As with any
- * t_ctid chase, we have to verify that each new tuple
- * is really the descendant of the tuple we came from.
+ * If this tuple is in the begin/middle of the chain then we
+ * have to move to the end of chain. As with any t_ctid
+ * chase, we have to verify that each new tuple is really the
+ * descendant of the tuple we came from.
*/
while (!(tp.t_data->t_infomask & (HEAP_XMAX_INVALID |
HEAP_IS_LOCKED)) &&
@@ -1963,9 +1953,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
free_vtmove = 100;
/*
- * Now, walk backwards up the chain (towards older tuples)
- * and check if all items in chain can be moved. We record
- * all the moves that need to be made in the vtmove array.
+ * Now, walk backwards up the chain (towards older tuples) and
+ * check if all items in chain can be moved. We record all
+ * the moves that need to be made in the vtmove array.
*/
for (;;)
{
@@ -2020,9 +2010,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* Done if at beginning of chain */
if (!(tp.t_data->t_infomask & HEAP_UPDATED) ||
- TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data),
- OldestXmin))
- break; /* out of check-all-items loop */
+ TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data),
+ OldestXmin))
+ break; /* out of check-all-items loop */
/* Move to tuple with prior row version */
vtld.new_tid = tp.t_self;
@@ -2041,10 +2031,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
}
tp.t_self = vtlp->this_tid;
Pbuf = ReadBuffer(onerel,
- ItemPointerGetBlockNumber(&(tp.t_self)));
+ ItemPointerGetBlockNumber(&(tp.t_self)));
Ppage = BufferGetPage(Pbuf);
Pitemid = PageGetItemId(Ppage,
- ItemPointerGetOffsetNumber(&(tp.t_self)));
+ ItemPointerGetOffsetNumber(&(tp.t_self)));
/* this can't happen since we saw tuple earlier: */
if (!ItemIdIsUsed(Pitemid))
elog(ERROR, "parent itemid marked as unused");
@@ -2056,19 +2046,18 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* Read above about cases when !ItemIdIsUsed(nextItemid)
- * (child item is removed)... Due to the fact that at
- * the moment we don't remove unuseful part of
- * update-chain, it's possible to get non-matching parent
- * row here. Like as in the case which caused this
- * problem, we stop shrinking here. I could try to
- * find real parent row but want not to do it because
- * of real solution will be implemented anyway, later,
- * and we are too close to 6.5 release. - vadim
- * 06/11/99
+ * (child item is removed)... Due to the fact that at the
+ * moment we don't remove unuseful part of update-chain,
+ * it's possible to get non-matching parent row here. Like
+ * as in the case which caused this problem, we stop
+ * shrinking here. I could try to find real parent row but
+ * want not to do it because of real solution will be
+ * implemented anyway, later, and we are too close to 6.5
+ * release. - vadim 06/11/99
*/
if ((PTdata->t_infomask & HEAP_XMAX_IS_MULTI) ||
!(TransactionIdEquals(HeapTupleHeaderGetXmax(PTdata),
- HeapTupleHeaderGetXmin(tp.t_data))))
+ HeapTupleHeaderGetXmin(tp.t_data))))
{
ReleaseBuffer(Pbuf);
elog(DEBUG2, "too old parent tuple found --- can't continue repair_frag");
@@ -2091,9 +2080,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (chain_move_failed)
{
/*
- * Undo changes to offsets_used state. We don't
- * bother cleaning up the amount-free state, since
- * we're not going to do any further tuple motion.
+ * Undo changes to offsets_used state. We don't bother
+ * cleaning up the amount-free state, since we're not
+ * going to do any further tuple motion.
*/
for (i = 0; i < num_vtmove; i++)
{
@@ -2119,7 +2108,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* Get page to move from */
tuple.t_self = vtmove[ti].tid;
Cbuf = ReadBuffer(onerel,
- ItemPointerGetBlockNumber(&(tuple.t_self)));
+ ItemPointerGetBlockNumber(&(tuple.t_self)));
/* Get page to move to */
dst_buffer = ReadBuffer(onerel, destvacpage->blkno);
@@ -2132,7 +2121,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
Cpage = BufferGetPage(Cbuf);
Citemid = PageGetItemId(Cpage,
- ItemPointerGetOffsetNumber(&(tuple.t_self)));
+ ItemPointerGetOffsetNumber(&(tuple.t_self)));
tuple.t_datamcxt = NULL;
tuple.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid);
tuple_len = tuple.t_len = ItemIdGetLength(Citemid);
@@ -2211,18 +2200,17 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
} /* walk along page */
/*
- * If we broke out of the walk-along-page loop early (ie, still
- * have offnum <= maxoff), then we failed to move some tuple off
- * this page. No point in shrinking any more, so clean up and
- * exit the per-page loop.
+ * If we broke out of the walk-along-page loop early (ie, still have
+ * offnum <= maxoff), then we failed to move some tuple off this page.
+ * No point in shrinking any more, so clean up and exit the per-page
+ * loop.
*/
if (offnum < maxoff && keep_tuples > 0)
{
OffsetNumber off;
/*
- * Fix vacpage state for any unvisited tuples remaining on
- * page
+ * Fix vacpage state for any unvisited tuples remaining on page
*/
for (off = OffsetNumberNext(offnum);
off <= maxoff;
@@ -2238,8 +2226,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
continue;
/*
- * See comments in the walk-along-page loop above about
- * why only MOVED_OFF tuples should be found here.
+ * See comments in the walk-along-page loop above about why
+ * only MOVED_OFF tuples should be found here.
*/
if (htup->t_infomask & HEAP_MOVED_IN)
elog(ERROR, "HEAP_MOVED_IN was not expected");
@@ -2307,20 +2295,20 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* We have to commit our tuple movings before we truncate the
* relation. Ideally we should do Commit/StartTransactionCommand
* here, relying on the session-level table lock to protect our
- * exclusive access to the relation. However, that would require
- * a lot of extra code to close and re-open the relation, indexes,
- * etc. For now, a quick hack: record status of current
- * transaction as committed, and continue.
+ * exclusive access to the relation. However, that would require a
+ * lot of extra code to close and re-open the relation, indexes, etc.
+ * For now, a quick hack: record status of current transaction as
+ * committed, and continue.
*/
RecordTransactionCommit();
}
/*
* We are not going to move any more tuples across pages, but we still
- * need to apply vacuum_page to compact free space in the remaining
- * pages in vacuum_pages list. Note that some of these pages may also
- * be in the fraged_pages list, and may have had tuples moved onto
- * them; if so, we already did vacuum_page and needn't do it again.
+ * need to apply vacuum_page to compact free space in the remaining pages
+ * in vacuum_pages list. Note that some of these pages may also be in the
+ * fraged_pages list, and may have had tuples moved onto them; if so, we
+ * already did vacuum_page and needn't do it again.
*/
for (i = 0, curpage = vacuum_pages->pagedesc;
i < vacuumed_pages;
@@ -2354,17 +2342,17 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
last_move_dest_block, num_moved);
/*
- * It'd be cleaner to make this report at the bottom of this routine,
- * but then the rusage would double-count the second pass of index
- * vacuuming. So do it here and ignore the relatively small amount of
- * processing that occurs below.
+ * It'd be cleaner to make this report at the bottom of this routine, but
+ * then the rusage would double-count the second pass of index vacuuming.
+ * So do it here and ignore the relatively small amount of processing that
+ * occurs below.
*/
ereport(elevel,
- (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages",
- RelationGetRelationName(onerel),
- num_moved, nblocks, blkno),
- errdetail("%s.",
- pg_rusage_show(&ru0))));
+ (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages",
+ RelationGetRelationName(onerel),
+ num_moved, nblocks, blkno),
+ errdetail("%s.",
+ pg_rusage_show(&ru0))));
/*
* Reflect the motion of system tuples to catalog cache here.
@@ -2382,7 +2370,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* re-sort Nvacpagelist.pagedesc */
for (vpleft = Nvacpagelist.pagedesc,
- vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1;
+ vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1;
vpleft < vpright; vpleft++, vpright--)
{
vpsave = *vpleft;
@@ -2391,11 +2379,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
}
/*
- * keep_tuples is the number of tuples that have been moved
- * off a page during chain moves but not been scanned over
- * subsequently. The tuple ids of these tuples are not
- * recorded as free offsets for any VacPage, so they will not
- * be cleared from the indexes.
+ * keep_tuples is the number of tuples that have been moved off a
+ * page during chain moves but not been scanned over subsequently.
+ * The tuple ids of these tuples are not recorded as free offsets
+ * for any VacPage, so they will not be cleared from the indexes.
*/
Assert(keep_tuples >= 0);
for (i = 0; i < nindexes; i++)
@@ -2406,9 +2393,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* Clean moved-off tuples from last page in Nvacpagelist list.
*
- * We need only do this in this one page, because higher-numbered
- * pages are going to be truncated from the relation entirely.
- * But see comments for update_hint_bits().
+ * We need only do this in this one page, because higher-numbered pages
+ * are going to be truncated from the relation entirely. But see
+ * comments for update_hint_bits().
*/
if (vacpage->blkno == (blkno - 1) &&
vacpage->offsets_free > 0)
@@ -2439,8 +2426,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
continue;
/*
- * See comments in the walk-along-page loop above about
- * why only MOVED_OFF tuples should be found here.
+ * See comments in the walk-along-page loop above about why
+ * only MOVED_OFF tuples should be found here.
*/
if (htup->t_infomask & HEAP_MOVED_IN)
elog(ERROR, "HEAP_MOVED_IN was not expected");
@@ -2470,8 +2457,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
else
{
/*
- * No XLOG record, but still need to flag that XID exists
- * on disk
+ * No XLOG record, but still need to flag that XID exists on
+ * disk
*/
MyXactMadeTempRelUpdate = true;
}
@@ -2554,20 +2541,20 @@ move_chain_tuple(Relation rel,
/*
* If this page was not used before - clean it.
*
- * NOTE: a nasty bug used to lurk here. It is possible for the source
- * and destination pages to be the same (since this tuple-chain member
- * can be on a page lower than the one we're currently processing in
- * the outer loop). If that's true, then after vacuum_page() the
- * source tuple will have been moved, and tuple.t_data will be
- * pointing at garbage. Therefore we must do everything that uses
- * old_tup->t_data BEFORE this step!!
+ * NOTE: a nasty bug used to lurk here. It is possible for the source and
+ * destination pages to be the same (since this tuple-chain member can be
+ * on a page lower than the one we're currently processing in the outer
+ * loop). If that's true, then after vacuum_page() the source tuple will
+ * have been moved, and tuple.t_data will be pointing at garbage.
+ * Therefore we must do everything that uses old_tup->t_data BEFORE this
+ * step!!
*
- * This path is different from the other callers of vacuum_page, because
- * we have already incremented the vacpage's offsets_used field to
- * account for the tuple(s) we expect to move onto the page. Therefore
- * vacuum_page's check for offsets_used == 0 is wrong. But since
- * that's a good debugging check for all other callers, we work around
- * it here rather than remove it.
+ * This path is different from the other callers of vacuum_page, because we
+ * have already incremented the vacpage's offsets_used field to account
+ * for the tuple(s) we expect to move onto the page. Therefore
+ * vacuum_page's check for offsets_used == 0 is wrong. But since that's a
+ * good debugging check for all other callers, we work around it here
+ * rather than remove it.
*/
if (!PageIsEmpty(dst_page) && cleanVpd)
{
@@ -2579,8 +2566,8 @@ move_chain_tuple(Relation rel,
}
/*
- * Update the state of the copied tuple, and store it on the
- * destination page.
+ * Update the state of the copied tuple, and store it on the destination
+ * page.
*/
newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
HEAP_XMIN_INVALID |
@@ -2601,9 +2588,9 @@ move_chain_tuple(Relation rel,
ItemPointerSet(&(newtup.t_self), dst_vacpage->blkno, newoff);
/*
- * Set new tuple's t_ctid pointing to itself if last tuple in chain,
- * and to next tuple in chain otherwise. (Since we move the chain
- * in reverse order, this is actually the previously processed tuple.)
+ * Set new tuple's t_ctid pointing to itself if last tuple in chain, and
+ * to next tuple in chain otherwise. (Since we move the chain in reverse
+ * order, this is actually the previously processed tuple.)
*/
if (!ItemPointerIsValid(ctid))
newtup.t_data->t_ctid = newtup.t_self;
@@ -2678,8 +2665,8 @@ move_plain_tuple(Relation rel,
* register invalidation of source tuple in catcaches.
*
* (Note: we do not need to register the copied tuple, because we are not
- * changing the tuple contents and so there cannot be any need to
- * flush negative catcache entries.)
+ * changing the tuple contents and so there cannot be any need to flush
+ * negative catcache entries.)
*/
CacheInvalidateHeapTuple(rel, old_tup);
@@ -2957,9 +2944,9 @@ scan_index(Relation indrel, double num_tuples)
/*
* Even though we're not planning to delete anything, we use the
- * ambulkdelete call, because (a) the scan happens within the index AM
- * for more speed, and (b) it may want to pass private statistics to
- * the amvacuumcleanup call.
+ * ambulkdelete call, because (a) the scan happens within the index AM for
+ * more speed, and (b) it may want to pass private statistics to the
+ * amvacuumcleanup call.
*/
stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL);
@@ -2978,18 +2965,18 @@ scan_index(Relation indrel, double num_tuples)
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%u index pages have been deleted, %u are currently reusable.\n"
- "%s.",
- stats->pages_deleted, stats->pages_free,
- pg_rusage_show(&ru0))));
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%u index pages have been deleted, %u are currently reusable.\n"
+ "%s.",
+ stats->pages_deleted, stats->pages_free,
+ pg_rusage_show(&ru0))));
/*
- * Check for tuple count mismatch. If the index is partial, then it's
- * OK for it to have fewer tuples than the heap; else we got trouble.
+ * Check for tuple count mismatch. If the index is partial, then it's OK
+ * for it to have fewer tuples than the heap; else we got trouble.
*/
if (stats->num_index_tuples != num_tuples)
{
@@ -3045,20 +3032,20 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%.0f index row versions were removed.\n"
- "%u index pages have been deleted, %u are currently reusable.\n"
- "%s.",
- stats->tuples_removed,
- stats->pages_deleted, stats->pages_free,
- pg_rusage_show(&ru0))));
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%.0f index row versions were removed.\n"
+ "%u index pages have been deleted, %u are currently reusable.\n"
+ "%s.",
+ stats->tuples_removed,
+ stats->pages_deleted, stats->pages_free,
+ pg_rusage_show(&ru0))));
/*
- * Check for tuple count mismatch. If the index is partial, then it's
- * OK for it to have fewer tuples than the heap; else we got trouble.
+ * Check for tuple count mismatch. If the index is partial, then it's OK
+ * for it to have fewer tuples than the heap; else we got trouble.
*/
if (stats->num_index_tuples != num_tuples + keep_tuples)
{
@@ -3067,7 +3054,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
ereport(WARNING,
(errmsg("index \"%s\" contains %.0f row versions, but table contains %.0f row versions",
RelationGetRelationName(indrel),
- stats->num_index_tuples, num_tuples + keep_tuples),
+ stats->num_index_tuples, num_tuples + keep_tuples),
errhint("Rebuild the index with REINDEX.")));
}
@@ -3152,14 +3139,13 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages,
/*
* We only report pages with free space at least equal to the average
- * request size --- this avoids cluttering FSM with uselessly-small
- * bits of space. Although FSM would discard pages with little free
- * space anyway, it's important to do this prefiltering because (a) it
- * reduces the time spent holding the FSM lock in
- * RecordRelationFreeSpace, and (b) FSM uses the number of pages
- * reported as a statistic for guiding space management. If we didn't
- * threshold our reports the same way vacuumlazy.c does, we'd be
- * skewing that statistic.
+ * request size --- this avoids cluttering FSM with uselessly-small bits
+ * of space. Although FSM would discard pages with little free space
+ * anyway, it's important to do this prefiltering because (a) it reduces
+ * the time spent holding the FSM lock in RecordRelationFreeSpace, and (b)
+ * FSM uses the number of pages reported as a statistic for guiding space
+ * management. If we didn't threshold our reports the same way
+ * vacuumlazy.c does, we'd be skewing that statistic.
*/
threshold = GetAvgFSMRequestSize(&onerel->rd_node);
@@ -3170,9 +3156,9 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages,
for (i = 0; i < nPages; i++)
{
/*
- * fraged_pages may contain entries for pages that we later
- * decided to truncate from the relation; don't enter them into
- * the free space map!
+ * fraged_pages may contain entries for pages that we later decided to
+ * truncate from the relation; don't enter them into the free space
+ * map!
*/
if (pagedesc[i]->blkno >= rel_pages)
break;
@@ -3198,7 +3184,7 @@ copy_vac_page(VacPage vacpage)
/* allocate a VacPageData entry */
newvacpage = (VacPage) palloc(sizeof(VacPageData) +
- vacpage->offsets_free * sizeof(OffsetNumber));
+ vacpage->offsets_free * sizeof(OffsetNumber));
/* fill it in */
if (vacpage->offsets_free > 0)
@@ -3368,7 +3354,7 @@ vac_open_indexes(Relation relation, LOCKMODE lockmode,
}
/*
- * Release the resources acquired by vac_open_indexes. Optionally release
+ * Release the resources acquired by vac_open_indexes. Optionally release
* the locks (say NoLock to keep 'em).
*/
void
@@ -3396,8 +3382,7 @@ bool
vac_is_partial_index(Relation indrel)
{
/*
- * If the index's AM doesn't support nulls, it's partial for our
- * purposes
+ * If the index's AM doesn't support nulls, it's partial for our purposes
*/
if (!indrel->rd_am->amindexnulls)
return true;
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 8a109237efc..7f276199015 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -31,7 +31,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.60 2005/10/03 22:52:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.61 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -67,7 +67,7 @@ typedef struct LVRelStats
/* Overall statistics about rel */
BlockNumber rel_pages;
double rel_tuples;
- BlockNumber pages_removed;
+ BlockNumber pages_removed;
double tuples_deleted;
BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
Size threshold; /* minimum interesting free space */
@@ -97,9 +97,9 @@ static void lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
static void lazy_scan_index(Relation indrel, LVRelStats *vacrelstats);
static void lazy_vacuum_index(Relation indrel,
- double *index_tups_vacuumed,
- BlockNumber *index_pages_removed,
- LVRelStats *vacrelstats);
+ double *index_tups_vacuumed,
+ BlockNumber *index_pages_removed,
+ LVRelStats *vacrelstats);
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
int tupindex, LVRelStats *vacrelstats);
static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
@@ -167,7 +167,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
*/
possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
if (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
- possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)
+ possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)
lazy_truncate_heap(onerel, vacrelstats);
/* Update shared free space map with final free space info */
@@ -181,7 +181,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
/* report results to the stats collector, too */
pgstat_report_vacuum(RelationGetRelid(onerel), onerel->rd_rel->relisshared,
- vacstmt->analyze, vacrelstats->rel_tuples);
+ vacstmt->analyze, vacrelstats->rel_tuples);
}
@@ -228,7 +228,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* track of the total number of rows and pages removed from each index.
* index_tups_vacuumed[i] is the number removed so far from the i'th
* index. (For partial indexes this could well be different from
- * tups_vacuumed.) Likewise for index_pages_removed[i].
+ * tups_vacuumed.) Likewise for index_pages_removed[i].
*/
index_tups_vacuumed = (double *) palloc0(nindexes * sizeof(double));
index_pages_removed = (BlockNumber *) palloc0(nindexes * sizeof(BlockNumber));
@@ -253,9 +253,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
vacuum_delay_point();
/*
- * If we are close to overrunning the available space for
- * dead-tuple TIDs, pause and do a cycle of vacuuming before we
- * tackle this page.
+ * If we are close to overrunning the available space for dead-tuple
+ * TIDs, pause and do a cycle of vacuuming before we tackle this page.
*/
if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage &&
vacrelstats->num_dead_tuples > 0)
@@ -283,25 +282,25 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
if (PageIsNew(page))
{
/*
- * An all-zeroes page could be left over if a backend extends
- * the relation but crashes before initializing the page.
- * Reclaim such pages for use.
+ * An all-zeroes page could be left over if a backend extends the
+ * relation but crashes before initializing the page. Reclaim such
+ * pages for use.
*
- * We have to be careful here because we could be looking at
- * a page that someone has just added to the relation and not
- * yet been able to initialize (see RelationGetBufferForTuple).
- * To interlock against that, release the buffer read lock
- * (which we must do anyway) and grab the relation extension
- * lock before re-locking in exclusive mode. If the page is
- * still uninitialized by then, it must be left over from a
- * crashed backend, and we can initialize it.
+ * We have to be careful here because we could be looking at a page
+ * that someone has just added to the relation and not yet been
+ * able to initialize (see RelationGetBufferForTuple). To
+ * interlock against that, release the buffer read lock (which we
+ * must do anyway) and grab the relation extension lock before
+ * re-locking in exclusive mode. If the page is still
+ * uninitialized by then, it must be left over from a crashed
+ * backend, and we can initialize it.
*
- * We don't really need the relation lock when this is a new
- * or temp relation, but it's probably not worth the code space
- * to check that, since this surely isn't a critical path.
+ * We don't really need the relation lock when this is a new or temp
+ * relation, but it's probably not worth the code space to check
+ * that, since this surely isn't a critical path.
*
- * Note: the comparable code in vacuum.c need not worry
- * because it's got exclusive lock on the whole relation.
+ * Note: the comparable code in vacuum.c need not worry because it's
+ * got exclusive lock on the whole relation.
*/
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockRelationForExtension(onerel, ExclusiveLock);
@@ -310,8 +309,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
if (PageIsNew(page))
{
ereport(WARNING,
- (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
- relname, blkno)));
+ (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
+ relname, blkno)));
PageInit(page, BufferGetPageSize(buf), 0);
empty_pages++;
lazy_record_free_space(vacrelstats, blkno,
@@ -365,15 +364,15 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
case HEAPTUPLE_LIVE:
/*
- * Tuple is good. Consider whether to replace its
- * xmin value with FrozenTransactionId.
+ * Tuple is good. Consider whether to replace its xmin
+ * value with FrozenTransactionId.
*
- * NB: Since we hold only a shared buffer lock here, we
- * are assuming that TransactionId read/write is
- * atomic. This is not the only place that makes such
- * an assumption. It'd be possible to avoid the
- * assumption by momentarily acquiring exclusive lock,
- * but for the moment I see no need to.
+ * NB: Since we hold only a shared buffer lock here, we are
+ * assuming that TransactionId read/write is atomic. This
+ * is not the only place that makes such an assumption.
+ * It'd be possible to avoid the assumption by momentarily
+ * acquiring exclusive lock, but for the moment I see no
+ * need to.
*/
if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple.t_data)) &&
TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
@@ -396,8 +395,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
case HEAPTUPLE_RECENTLY_DEAD:
/*
- * If tuple is recently deleted then we must not
- * remove it from relation.
+ * If tuple is recently deleted then we must not remove it
+ * from relation.
*/
nkeep += 1;
break;
@@ -426,9 +425,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
/*
* If we remembered any tuples for deletion, then the page will be
- * visited again by lazy_vacuum_heap, which will compute and
- * record its post-compaction free space. If not, then we're done
- * with this page, so remember its free space as-is.
+ * visited again by lazy_vacuum_heap, which will compute and record
+ * its post-compaction free space. If not, then we're done with this
+ * page, so remember its free space as-is.
*/
if (vacrelstats->num_dead_tuples == prev_dead_count)
{
@@ -608,8 +607,8 @@ lazy_scan_index(Relation indrel, LVRelStats *vacrelstats)
pg_rusage_init(&ru0);
/*
- * Acquire appropriate type of lock on index: must be exclusive if
- * index AM isn't concurrent-safe.
+ * Acquire appropriate type of lock on index: must be exclusive if index
+ * AM isn't concurrent-safe.
*/
if (indrel->rd_am->amconcurrent)
LockRelation(indrel, RowExclusiveLock);
@@ -618,9 +617,9 @@ lazy_scan_index(Relation indrel, LVRelStats *vacrelstats)
/*
* Even though we're not planning to delete anything, we use the
- * ambulkdelete call, because (a) the scan happens within the index AM
- * for more speed, and (b) it may want to pass private statistics to
- * the amvacuumcleanup call.
+ * ambulkdelete call, because (a) the scan happens within the index AM for
+ * more speed, and (b) it may want to pass private statistics to the
+ * amvacuumcleanup call.
*/
stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL);
@@ -648,14 +647,14 @@ lazy_scan_index(Relation indrel, LVRelStats *vacrelstats)
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%u index pages have been deleted, %u are currently reusable.\n"
- "%s.",
- stats->pages_deleted, stats->pages_free,
- pg_rusage_show(&ru0))));
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%u index pages have been deleted, %u are currently reusable.\n"
+ "%s.",
+ stats->pages_deleted, stats->pages_free,
+ pg_rusage_show(&ru0))));
pfree(stats);
}
@@ -685,8 +684,8 @@ lazy_vacuum_index(Relation indrel,
pg_rusage_init(&ru0);
/*
- * Acquire appropriate type of lock on index: must be exclusive if
- * index AM isn't concurrent-safe.
+ * Acquire appropriate type of lock on index: must be exclusive if index
+ * AM isn't concurrent-safe.
*/
if (indrel->rd_am->amconcurrent)
LockRelation(indrel, RowExclusiveLock);
@@ -724,16 +723,16 @@ lazy_vacuum_index(Relation indrel,
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%.0f index row versions were removed.\n"
- "%u index pages have been deleted, %u are currently reusable.\n"
- "%s.",
- stats->tuples_removed,
- stats->pages_deleted, stats->pages_free,
- pg_rusage_show(&ru0))));
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%.0f index row versions were removed.\n"
+ "%u index pages have been deleted, %u are currently reusable.\n"
+ "%s.",
+ stats->tuples_removed,
+ stats->pages_deleted, stats->pages_free,
+ pg_rusage_show(&ru0))));
pfree(stats);
}
@@ -755,19 +754,18 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
pg_rusage_init(&ru0);
/*
- * We need full exclusive lock on the relation in order to do
- * truncation. If we can't get it, give up rather than waiting --- we
- * don't want to block other backends, and we don't want to deadlock
- * (which is quite possible considering we already hold a lower-grade
- * lock).
+ * We need full exclusive lock on the relation in order to do truncation.
+ * If we can't get it, give up rather than waiting --- we don't want to
+ * block other backends, and we don't want to deadlock (which is quite
+ * possible considering we already hold a lower-grade lock).
*/
if (!ConditionalLockRelation(onerel, AccessExclusiveLock))
return;
/*
* Now that we have exclusive lock, look to see if the rel has grown
- * whilst we were vacuuming with non-exclusive lock. If so, give up;
- * the newly added pages presumably contain non-deletable tuples.
+ * whilst we were vacuuming with non-exclusive lock. If so, give up; the
+ * newly added pages presumably contain non-deletable tuples.
*/
new_rel_pages = RelationGetNumberOfBlocks(onerel);
if (new_rel_pages != old_rel_pages)
@@ -780,9 +778,9 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
/*
* Scan backwards from the end to verify that the end pages actually
- * contain nothing we need to keep. This is *necessary*, not
- * optional, because other backends could have added tuples to these
- * pages whilst we were vacuuming.
+ * contain nothing we need to keep. This is *necessary*, not optional,
+ * because other backends could have added tuples to these pages whilst we
+ * were vacuuming.
*/
new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
@@ -905,8 +903,8 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
case HEAPTUPLE_RECENTLY_DEAD:
/*
- * If tuple is recently deleted then we must not
- * remove it from relation.
+ * If tuple is recently deleted then we must not remove it
+ * from relation.
*/
break;
case HEAPTUPLE_INSERT_IN_PROGRESS:
@@ -938,8 +936,8 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
/*
* If we fall out of the loop, all the previously-thought-to-be-empty
- * pages really are; we need not bother to look at the last
- * known-nonempty page.
+ * pages really are; we need not bother to look at the last known-nonempty
+ * page.
*/
return vacrelstats->nonempty_pages;
}
@@ -1010,18 +1008,16 @@ lazy_record_free_space(LVRelStats *vacrelstats,
/*
* A page with less than stats->threshold free space will be forgotten
* immediately, and never passed to the free space map. Removing the
- * uselessly small entries early saves cycles, and in particular
- * reduces the amount of time we spend holding the FSM lock when we
- * finally call RecordRelationFreeSpace. Since the FSM will probably
- * drop pages with little free space anyway, there's no point in
- * making this really small.
+ * uselessly small entries early saves cycles, and in particular reduces
+ * the amount of time we spend holding the FSM lock when we finally call
+ * RecordRelationFreeSpace. Since the FSM will probably drop pages with
+ * little free space anyway, there's no point in making this really small.
*
- * XXX Is it worth trying to measure average tuple size, and using that
- * to adjust the threshold? Would be worthwhile if FSM has no stats
- * yet for this relation. But changing the threshold as we scan the
- * rel might lead to bizarre behavior, too. Also, it's probably
- * better if vacuum.c has the same thresholding behavior as we do
- * here.
+ * XXX Is it worth trying to measure average tuple size, and using that to
+ * adjust the threshold? Would be worthwhile if FSM has no stats yet for
+ * this relation. But changing the threshold as we scan the rel might
+ * lead to bizarre behavior, too. Also, it's probably better if vacuum.c
+ * has the same thresholding behavior as we do here.
*/
if (avail < vacrelstats->threshold)
return;
@@ -1055,8 +1051,8 @@ lazy_record_free_space(LVRelStats *vacrelstats,
{
/*
* Scan backwards through the array, "sift-up" each value into its
- * correct position. We can start the scan at n/2-1 since each
- * entry above that position has no children to worry about.
+ * correct position. We can start the scan at n/2-1 since each entry
+ * above that position has no children to worry about.
*/
int l = n / 2;
@@ -1092,9 +1088,9 @@ lazy_record_free_space(LVRelStats *vacrelstats,
{
/*
* Notionally, we replace the zero'th entry with the new data, and
- * then sift-up to maintain the heap property. Physically, the
- * new data doesn't get stored into the arrays until we find the
- * right location for it.
+ * then sift-up to maintain the heap property. Physically, the new
+ * data doesn't get stored into the arrays until we find the right
+ * location for it.
*/
int i = 0; /* i is where the "hole" is */
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 845c59625d6..31113fffe2d 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.113 2005/08/08 23:39:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.114 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,7 +63,7 @@ assign_datestyle(const char *value, bool doit, GucSource source)
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid list syntax for parameter \"datestyle\"")));
+ errmsg("invalid list syntax for parameter \"datestyle\"")));
return NULL;
}
@@ -131,11 +131,11 @@ assign_datestyle(const char *value, bool doit, GucSource source)
else if (pg_strcasecmp(tok, "DEFAULT") == 0)
{
/*
- * Easiest way to get the current DEFAULT state is to fetch
- * the DEFAULT string from guc.c and recursively parse it.
+ * Easiest way to get the current DEFAULT state is to fetch the
+ * DEFAULT string from guc.c and recursively parse it.
*
- * We can't simply "return assign_datestyle(...)" because we need
- * to handle constructs like "DEFAULT, ISO".
+ * We can't simply "return assign_datestyle(...)" because we need to
+ * handle constructs like "DEFAULT, ISO".
*/
int saveDateStyle = DateStyle;
int saveDateOrder = DateOrder;
@@ -163,8 +163,8 @@ assign_datestyle(const char *value, bool doit, GucSource source)
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unrecognized \"datestyle\" key word: \"%s\"",
- tok)));
+ errmsg("unrecognized \"datestyle\" key word: \"%s\"",
+ tok)));
ok = false;
break;
}
@@ -224,8 +224,8 @@ assign_datestyle(const char *value, bool doit, GucSource source)
}
/*
- * Finally, it's safe to assign to the global variables; the
- * assignment cannot fail now.
+ * Finally, it's safe to assign to the global variables; the assignment
+ * cannot fail now.
*/
DateStyle = newDateStyle;
DateOrder = newDateOrder;
@@ -274,14 +274,14 @@ assign_timezone(const char *value, bool doit, GucSource source)
/*
* Try to parse it. XXX an invalid interval format will result in
- * ereport, which is not desirable for GUC. We did what we could
- * to guard against this in flatten_set_variable_args, but a
- * string coming in from postgresql.conf might contain anything.
+ * ereport, which is not desirable for GUC. We did what we could to
+ * guard against this in flatten_set_variable_args, but a string
+ * coming in from postgresql.conf might contain anything.
*/
interval = DatumGetIntervalP(DirectFunctionCall3(interval_in,
- CStringGetDatum(val),
- ObjectIdGetDatum(InvalidOid),
- Int32GetDatum(-1)));
+ CStringGetDatum(val),
+ ObjectIdGetDatum(InvalidOid),
+ Int32GetDatum(-1)));
pfree(val);
if (interval->month != 0)
@@ -336,15 +336,14 @@ assign_timezone(const char *value, bool doit, GucSource source)
* UNKNOWN is the value shown as the "default" for TimeZone in
* guc.c. We interpret it as being a complete no-op; we don't
* change the timezone setting. Note that if there is a known
- * timezone setting, we will return that name rather than
- * UNKNOWN as the canonical spelling.
+ * timezone setting, we will return that name rather than UNKNOWN
+ * as the canonical spelling.
*
- * During GUC initialization, since the timezone library isn't
- * set up yet, pg_get_timezone_name will return NULL and we
- * will leave the setting as UNKNOWN. If this isn't
- * overridden from the config file then
- * pg_timezone_initialize() will eventually select a default
- * value from the environment.
+ * During GUC initialization, since the timezone library isn't set up
+ * yet, pg_get_timezone_name will return NULL and we will leave
+ * the setting as UNKNOWN. If this isn't overridden from the
+ * config file then pg_timezone_initialize() will eventually
+ * select a default value from the environment.
*/
if (doit)
{
@@ -359,7 +358,7 @@ assign_timezone(const char *value, bool doit, GucSource source)
/*
* Otherwise assume it is a timezone name, and try to load it.
*/
- pg_tz *new_tz;
+ pg_tz *new_tz;
new_tz = pg_tzset(value);
@@ -376,9 +375,9 @@ assign_timezone(const char *value, bool doit, GucSource source)
{
ereport((source >= PGC_S_INTERACTIVE) ? ERROR : LOG,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("time zone \"%s\" appears to use leap seconds",
- value),
- errdetail("PostgreSQL does not support leap seconds.")));
+ errmsg("time zone \"%s\" appears to use leap seconds",
+ value),
+ errdetail("PostgreSQL does not support leap seconds.")));
return NULL;
}
@@ -406,7 +405,7 @@ assign_timezone(const char *value, bool doit, GucSource source)
if (!result)
return NULL;
snprintf(result, 64, "%.5f",
- (double) (-CTimeZone) / (double)SECS_PER_HOUR);
+ (double) (-CTimeZone) / (double) SECS_PER_HOUR);
}
else
result = strdup(value);
@@ -424,7 +423,7 @@ show_timezone(void)
if (HasCTZSet)
{
- Interval interval;
+ Interval interval;
interval.month = 0;
interval.day = 0;
@@ -435,7 +434,7 @@ show_timezone(void)
#endif
tzn = DatumGetCString(DirectFunctionCall1(interval_out,
- IntervalPGetDatum(&interval)));
+ IntervalPGetDatum(&interval)));
}
else
tzn = pg_get_timezone_name(global_timezone);
@@ -559,18 +558,18 @@ assign_client_encoding(const char *value, bool doit, GucSource source)
return NULL;
/*
- * Note: if we are in startup phase then SetClientEncoding may not be
- * able to really set the encoding. In this case we will assume that
- * the encoding is okay, and InitializeClientEncoding() will fix
- * things once initialization is complete.
+ * Note: if we are in startup phase then SetClientEncoding may not be able
+ * to really set the encoding. In this case we will assume that the
+ * encoding is okay, and InitializeClientEncoding() will fix things once
+ * initialization is complete.
*/
if (SetClientEncoding(encoding, doit) < 0)
{
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("conversion between %s and %s is not supported",
- value, GetDatabaseEncodingName())));
+ errmsg("conversion between %s and %s is not supported",
+ value, GetDatabaseEncodingName())));
return NULL;
}
return value;
@@ -594,7 +593,7 @@ extern char *session_authorization_string; /* in guc.c */
const char *
assign_session_authorization(const char *value, bool doit, GucSource source)
{
- Oid roleid = InvalidOid;
+ Oid roleid = InvalidOid;
bool is_superuser = false;
const char *actual_rolename = NULL;
char *result;
@@ -603,7 +602,7 @@ assign_session_authorization(const char *value, bool doit, GucSource source)
(value[NAMEDATALEN] == 'T' || value[NAMEDATALEN] == 'F'))
{
/* might be a saved userid string */
- Oid savedoid;
+ Oid savedoid;
char *endptr;
savedoid = (Oid) strtoul(value + NAMEDATALEN + 1, &endptr, 10);
@@ -625,9 +624,9 @@ assign_session_authorization(const char *value, bool doit, GucSource source)
if (!IsTransactionState())
{
/*
- * Can't do catalog lookups, so fail. The upshot of this is
- * that session_authorization cannot be set in
- * postgresql.conf, which seems like a good thing anyway.
+ * Can't do catalog lookups, so fail. The upshot of this is that
+ * session_authorization cannot be set in postgresql.conf, which
+ * seems like a good thing anyway.
*/
return NULL;
}
@@ -676,7 +675,7 @@ show_session_authorization(void)
* assign_session_authorization
*/
const char *value = session_authorization_string;
- Oid savedoid;
+ Oid savedoid;
char *endptr;
Assert(strspn(value, "x") == NAMEDATALEN &&
@@ -706,7 +705,7 @@ extern char *role_string; /* in guc.c */
const char *
assign_role(const char *value, bool doit, GucSource source)
{
- Oid roleid = InvalidOid;
+ Oid roleid = InvalidOid;
bool is_superuser = false;
const char *actual_rolename = value;
char *result;
@@ -715,7 +714,7 @@ assign_role(const char *value, bool doit, GucSource source)
(value[NAMEDATALEN] == 'T' || value[NAMEDATALEN] == 'F'))
{
/* might be a saved userid string */
- Oid savedoid;
+ Oid savedoid;
char *endptr;
savedoid = (Oid) strtoul(value + NAMEDATALEN + 1, &endptr, 10);
@@ -738,9 +737,9 @@ assign_role(const char *value, bool doit, GucSource source)
if (!IsTransactionState())
{
/*
- * Can't do catalog lookups, so fail. The upshot of this is
- * that role cannot be set in postgresql.conf, which seems
- * like a good thing anyway.
+ * Can't do catalog lookups, so fail. The upshot of this is that
+ * role cannot be set in postgresql.conf, which seems like a good
+ * thing anyway.
*/
return NULL;
}
@@ -797,11 +796,10 @@ const char *
show_role(void)
{
/*
- * Extract the role name from the stored string; see
- * assign_role
+ * Extract the role name from the stored string; see assign_role
*/
const char *value = role_string;
- Oid savedoid;
+ Oid savedoid;
char *endptr;
/* This special case only applies if no SET ROLE has been done */
@@ -816,11 +814,11 @@ show_role(void)
Assert(endptr != value + NAMEDATALEN + 1 && *endptr == ',');
/*
- * Check that the stored string still matches the effective setting,
- * else return "none". This is a kluge to deal with the fact that
- * SET SESSION AUTHORIZATION logically resets SET ROLE to NONE, but
- * we cannot set the GUC role variable from assign_session_authorization
- * (because we haven't got enough info to call set_config_option).
+ * Check that the stored string still matches the effective setting, else
+ * return "none". This is a kluge to deal with the fact that SET SESSION
+ * AUTHORIZATION logically resets SET ROLE to NONE, but we cannot set the
+ * GUC role variable from assign_session_authorization (because we haven't
+ * got enough info to call set_config_option).
*/
if (savedoid != GetCurrentRoleId())
return "none";
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index 6158b16654c..54030452f8a 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.90 2005/04/14 01:38:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.91 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,16 +55,18 @@ isViewOnTempTable_walker(Node *node, void *context)
if (IsA(node, Query))
{
- Query *query = (Query *) node;
- ListCell *rtable;
+ Query *query = (Query *) node;
+ ListCell *rtable;
- foreach (rtable, query->rtable)
+ foreach(rtable, query->rtable)
{
RangeTblEntry *rte = lfirst(rtable);
+
if (rte->rtekind == RTE_RELATION)
{
- Relation rel = heap_open(rte->relid, AccessShareLock);
- bool istemp = rel->rd_istemp;
+ Relation rel = heap_open(rte->relid, AccessShareLock);
+ bool istemp = rel->rd_istemp;
+
heap_close(rel, AccessShareLock);
if (istemp)
return true;
@@ -101,8 +103,8 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace)
ListCell *t;
/*
- * create a list of ColumnDef nodes based on the names and types of
- * the (non-junk) targetlist items from the view's SELECT list.
+ * create a list of ColumnDef nodes based on the names and types of the
+ * (non-junk) targetlist items from the view's SELECT list.
*/
attrList = NIL;
foreach(t, tlist)
@@ -167,15 +169,15 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace)
RelationGetRelationName(rel));
/*
- * Due to the namespace visibility rules for temporary
- * objects, we should only end up replacing a temporary view
- * with another temporary view, and vice versa.
+ * Due to the namespace visibility rules for temporary objects, we
+ * should only end up replacing a temporary view with another
+ * temporary view, and vice versa.
*/
Assert(relation->istemp == rel->rd_istemp);
/*
- * Create a tuple descriptor to compare against the existing view,
- * and verify it matches.
+ * Create a tuple descriptor to compare against the existing view, and
+ * verify it matches.
*/
descriptor = BuildDescForRelation(attrList);
checkViewTupleDesc(descriptor, rel->rd_att);
@@ -190,8 +192,8 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace)
else
{
/*
- * now set the parameters for keys/inheritance etc. All of these
- * are uninteresting for views...
+ * now set the parameters for keys/inheritance etc. All of these are
+ * uninteresting for views...
*/
createStmt->relation = (RangeVar *) relation;
createStmt->tableElts = attrList;
@@ -203,8 +205,8 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace)
/*
* finally create the relation (this will error out if there's an
- * existing view, so we don't need more code to complain if
- * "replace" is false).
+ * existing view, so we don't need more code to complain if "replace"
+ * is false).
*/
return DefineRelation(createStmt, RELKIND_VIEW);
}
@@ -247,8 +249,8 @@ checkViewTupleDesc(TupleDesc newdesc, TupleDesc olddesc)
newattr->atttypmod != oldattr->atttypmod)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("cannot change data type of view column \"%s\"",
- NameStr(oldattr->attname))));
+ errmsg("cannot change data type of view column \"%s\"",
+ NameStr(oldattr->attname))));
/* We can ignore the remaining attributes of an attribute... */
}
@@ -265,8 +267,8 @@ FormViewRetrieveRule(const RangeVar *view, Query *viewParse, bool replace)
RuleStmt *rule;
/*
- * Create a RuleStmt that corresponds to the suitable rewrite rule
- * args for DefineQueryRewrite();
+ * Create a RuleStmt that corresponds to the suitable rewrite rule args
+ * for DefineQueryRewrite();
*/
rule = makeNode(RuleStmt);
rule->relation = copyObject((RangeVar *) view);
@@ -336,11 +338,11 @@ UpdateRangeTableOfViewParse(Oid viewOid, Query *viewParse)
/*
* Make a copy of the given parsetree. It's not so much that we don't
- * want to scribble on our input, it's that the parser has a bad habit
- * of outputting multiple links to the same subtree for constructs
- * like BETWEEN, and we mustn't have OffsetVarNodes increment the
- * varno of a Var node twice. copyObject will expand any
- * multiply-referenced subtree into multiple copies.
+ * want to scribble on our input, it's that the parser has a bad habit of
+ * outputting multiple links to the same subtree for constructs like
+ * BETWEEN, and we mustn't have OffsetVarNodes increment the varno of a
+ * Var node twice. copyObject will expand any multiply-referenced subtree
+ * into multiple copies.
*/
viewParse = (Query *) copyObject(viewParse);
@@ -348,8 +350,8 @@ UpdateRangeTableOfViewParse(Oid viewOid, Query *viewParse)
viewRel = relation_open(viewOid, AccessShareLock);
/*
- * Create the 2 new range table entries and form the new range
- * table... OLD first, then NEW....
+ * Create the 2 new range table entries and form the new range table...
+ * OLD first, then NEW....
*/
rt_entry1 = addRangeTableEntryForRelation(NULL, viewRel,
makeAlias("*OLD*", NIL),
@@ -393,8 +395,8 @@ DefineView(RangeVar *view, Query *viewParse, bool replace)
Oid viewOid;
/*
- * If the user didn't explicitly ask for a temporary view, check
- * whether we need one implicitly.
+ * If the user didn't explicitly ask for a temporary view, check whether
+ * we need one implicitly.
*/
if (!view->istemp)
{
@@ -404,25 +406,24 @@ DefineView(RangeVar *view, Query *viewParse, bool replace)
(errmsg("view \"%s\" will be a temporary view",
view->relname)));
}
-
+
/*
* Create the view relation
*
- * NOTE: if it already exists and replace is false, the xact will be
- * aborted.
+ * NOTE: if it already exists and replace is false, the xact will be aborted.
*/
viewOid = DefineVirtualRelation(view, viewParse->targetList, replace);
/*
- * The relation we have just created is not visible to any other
- * commands running with the same transaction & command id. So,
- * increment the command id counter (but do NOT pfree any memory!!!!)
+ * The relation we have just created is not visible to any other commands
+ * running with the same transaction & command id. So, increment the
+ * command id counter (but do NOT pfree any memory!!!!)
*/
CommandCounterIncrement();
/*
- * The range table of 'viewParse' does not contain entries for the
- * "OLD" and "NEW" relations. So... add them!
+ * The range table of 'viewParse' does not contain entries for the "OLD"
+ * and "NEW" relations. So... add them!
*/
viewParse = UpdateRangeTableOfViewParse(viewOid, viewParse);
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index c2cb4b68835..06e4ab7b232 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.84 2005/05/15 21:19:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.85 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -251,10 +251,10 @@ ExecMarkPos(PlanState *node)
*
* NOTE: the semantics of this are that the first ExecProcNode following
* the restore operation will yield the same tuple as the first one following
- * the mark operation. It is unspecified what happens to the plan node's
+ * the mark operation. It is unspecified what happens to the plan node's
* result TupleTableSlot. (In most cases the result slot is unchanged by
* a restore, but the node may choose to clear it or to load it with the
- * restored-to tuple.) Hence the caller should discard any previously
+ * restored-to tuple.) Hence the caller should discard any previously
* returned TupleTableSlot after doing a restore.
*/
void
@@ -398,15 +398,14 @@ ExecMayReturnRawTuples(PlanState *node)
{
/*
* At a table scan node, we check whether ExecAssignScanProjectionInfo
- * decided to do projection or not. Most non-scan nodes always
- * project and so we can return "false" immediately. For nodes that
- * don't project but just pass up input tuples, we have to recursively
- * examine the input plan node.
+ * decided to do projection or not. Most non-scan nodes always project
+ * and so we can return "false" immediately. For nodes that don't project
+ * but just pass up input tuples, we have to recursively examine the input
+ * plan node.
*
- * Note: Hash and Material are listed here because they sometimes return
- * an original input tuple, not a copy. But Sort and SetOp never
- * return an original tuple, so they can be treated like projecting
- * nodes.
+ * Note: Hash and Material are listed here because they sometimes return an
+ * original input tuple, not a copy. But Sort and SetOp never return an
+ * original tuple, so they can be treated like projecting nodes.
*/
switch (nodeTag(node))
{
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index 1bf46d815cc..688e2157e8b 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.15 2005/05/29 04:23:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.16 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -66,11 +66,10 @@ execTuplesMatch(TupleTableSlot *slot1,
oldContext = MemoryContextSwitchTo(evalContext);
/*
- * We cannot report a match without checking all the fields, but we
- * can report a non-match as soon as we find unequal fields. So,
- * start comparing at the last field (least significant sort key).
- * That's the most likely to be different if we are dealing with
- * sorted input.
+ * We cannot report a match without checking all the fields, but we can
+ * report a non-match as soon as we find unequal fields. So, start
+ * comparing at the last field (least significant sort key). That's the
+ * most likely to be different if we are dealing with sorted input.
*/
result = true;
@@ -137,11 +136,10 @@ execTuplesUnequal(TupleTableSlot *slot1,
oldContext = MemoryContextSwitchTo(evalContext);
/*
- * We cannot report a match without checking all the fields, but we
- * can report a non-match as soon as we find unequal fields. So,
- * start comparing at the last field (least significant sort key).
- * That's the most likely to be different if we are dealing with
- * sorted input.
+ * We cannot report a match without checking all the fields, but we can
+ * report a non-match as soon as we find unequal fields. So, start
+ * comparing at the last field (least significant sort key). That's the
+ * most likely to be different if we are dealing with sorted input.
*/
result = false;
@@ -288,7 +286,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx,
Assert(entrysize >= sizeof(TupleHashEntryData));
hashtable = (TupleHashTable) MemoryContextAlloc(tablecxt,
- sizeof(TupleHashTableData));
+ sizeof(TupleHashTableData));
hashtable->numCols = numCols;
hashtable->keyColIdx = keyColIdx;
@@ -297,7 +295,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx,
hashtable->tablecxt = tablecxt;
hashtable->tempcxt = tempcxt;
hashtable->entrysize = entrysize;
- hashtable->tableslot = NULL; /* will be made on first lookup */
+ hashtable->tableslot = NULL; /* will be made on first lookup */
hashtable->inputslot = NULL;
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
@@ -308,7 +306,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx,
hash_ctl.hcxt = tablecxt;
hashtable->hashtab = hash_create("TupleHashTable", (long) nbuckets,
&hash_ctl,
- HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+ HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
return hashtable;
}
@@ -341,6 +339,7 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
TupleDesc tupdesc;
oldContext = MemoryContextSwitchTo(hashtable->tablecxt);
+
/*
* We copy the input tuple descriptor just for safety --- we assume
* all input tuples will have equivalent descriptors.
@@ -382,9 +381,9 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
/*
* created new entry
*
- * Zero any caller-requested space in the entry. (This zaps
- * the "key data" dynahash.c copied into the new entry, but we
- * don't care since we're about to overwrite it anyway.)
+ * Zero any caller-requested space in the entry. (This zaps the "key
+ * data" dynahash.c copied into the new entry, but we don't care
+ * since we're about to overwrite it anyway.)
*/
MemSet(entry, 0, hashtable->entrysize);
@@ -482,6 +481,7 @@ static int
TupleHashTableMatch(const void *key1, const void *key2, Size keysize)
{
HeapTuple tuple1 = ((const TupleHashEntryData *) key1)->firstTuple;
+
#ifdef USE_ASSERT_CHECKING
HeapTuple tuple2 = ((const TupleHashEntryData *) key2)->firstTuple;
#endif
diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c
index 1cf403f88dd..2245c61e7fe 100644
--- a/src/backend/executor/execJunk.c
+++ b/src/backend/executor/execJunk.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.49 2005/04/06 16:34:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.50 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,7 +55,7 @@
*
* Initialize the Junk filter.
*
- * The source targetlist is passed in. The output tuple descriptor is
+ * The source targetlist is passed in. The output tuple descriptor is
* built from the non-junk tlist entries, plus the passed specification
* of whether to include room for an OID or not.
* An optional resultSlot can be passed as well.
@@ -87,11 +87,11 @@ ExecInitJunkFilter(List *targetList, bool hasoid, TupleTableSlot *slot)
* Now calculate the mapping between the original tuple's attributes and
* the "clean" tuple's attributes.
*
- * The "map" is an array of "cleanLength" attribute numbers, i.e. one
- * entry for every attribute of the "clean" tuple. The value of this
- * entry is the attribute number of the corresponding attribute of the
- * "original" tuple. (Zero indicates a NULL output attribute, but we
- * do not use that feature in this routine.)
+ * The "map" is an array of "cleanLength" attribute numbers, i.e. one entry
+ * for every attribute of the "clean" tuple. The value of this entry is
+ * the attribute number of the corresponding attribute of the "original"
+ * tuple. (Zero indicates a NULL output attribute, but we do not use that
+ * feature in this routine.)
*/
cleanLength = cleanTupType->natts;
if (cleanLength > 0)
@@ -155,14 +155,14 @@ ExecInitJunkFilterConversion(List *targetList,
slot = MakeSingleTupleTableSlot(cleanTupType);
/*
- * Calculate the mapping between the original tuple's attributes and
- * the "clean" tuple's attributes.
+ * Calculate the mapping between the original tuple's attributes and the
+ * "clean" tuple's attributes.
*
- * The "map" is an array of "cleanLength" attribute numbers, i.e. one
- * entry for every attribute of the "clean" tuple. The value of this
- * entry is the attribute number of the corresponding attribute of the
- * "original" tuple. We store zero for any deleted attributes, marking
- * that a NULL is needed in the output tuple.
+ * The "map" is an array of "cleanLength" attribute numbers, i.e. one entry
+ * for every attribute of the "clean" tuple. The value of this entry is
+ * the attribute number of the corresponding attribute of the "original"
+ * tuple. We store zero for any deleted attributes, marking that a NULL
+ * is needed in the output tuple.
*/
cleanLength = cleanTupType->natts;
if (cleanLength > 0)
@@ -220,8 +220,8 @@ ExecGetJunkAttribute(JunkFilter *junkfilter,
ListCell *t;
/*
- * Look in the junkfilter's target list for an attribute with
- * the given name
+ * Look in the junkfilter's target list for an attribute with the given
+ * name
*/
foreach(t, junkfilter->jf_targetList)
{
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 05b4a48be29..2a96a161c81 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -26,7 +26,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.255 2005/08/26 03:07:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.256 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -208,8 +208,7 @@ ExecutorRun(QueryDesc *queryDesc,
oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
/*
- * extract information from the query descriptor and the query
- * feature.
+ * extract information from the query descriptor and the query feature.
*/
operation = queryDesc->operation;
dest = queryDesc->dest;
@@ -352,15 +351,15 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
{
AclMode requiredPerms;
Oid relOid;
- Oid userid;
+ Oid userid;
/*
- * Only plain-relation RTEs need to be checked here. Subquery RTEs
- * are checked by ExecInitSubqueryScan if the subquery is still a
- * separate subquery --- if it's been pulled up into our query level
- * then the RTEs are in our rangetable and will be checked here.
- * Function RTEs are checked by init_fcache when the function is
- * prepared for execution. Join and special RTEs need no checks.
+ * Only plain-relation RTEs need to be checked here. Subquery RTEs are
+ * checked by ExecInitSubqueryScan if the subquery is still a separate
+ * subquery --- if it's been pulled up into our query level then the RTEs
+ * are in our rangetable and will be checked here. Function RTEs are
+ * checked by init_fcache when the function is prepared for execution.
+ * Join and special RTEs need no checks.
*/
if (rte->rtekind != RTE_RELATION)
return;
@@ -375,19 +374,17 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
relOid = rte->relid;
/*
- * userid to check as: current user unless we have a setuid
- * indication.
+ * userid to check as: current user unless we have a setuid indication.
*
- * Note: GetUserId() is presently fast enough that there's no harm in
- * calling it separately for each RTE. If that stops being true, we
- * could call it once in ExecCheckRTPerms and pass the userid down
- * from there. But for now, no need for the extra clutter.
+ * Note: GetUserId() is presently fast enough that there's no harm in calling
+ * it separately for each RTE. If that stops being true, we could call it
+ * once in ExecCheckRTPerms and pass the userid down from there. But for
+ * now, no need for the extra clutter.
*/
userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
/*
- * We must have *all* the requiredPerms bits, so use aclmask not
- * aclcheck.
+ * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
*/
if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
!= requiredPerms)
@@ -515,8 +512,7 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
else
{
/*
- * Single result relation identified by
- * parseTree->resultRelation
+ * Single result relation identified by parseTree->resultRelation
*/
numResultRelations = 1;
resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
@@ -544,8 +540,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
/*
* Detect whether we're doing SELECT INTO. If so, set the es_into_oids
- * flag appropriately so that the plan tree will be initialized with
- * the correct tuple descriptors.
+ * flag appropriately so that the plan tree will be initialized with the
+ * correct tuple descriptors.
*/
do_select_into = false;
@@ -583,10 +579,10 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
}
/*
- * initialize the executor "tuple" table. We need slots for all the
- * plan nodes, plus possibly output slots for the junkfilter(s). At
- * this point we aren't sure if we need junkfilters, so just add slots
- * for them unconditionally.
+ * initialize the executor "tuple" table. We need slots for all the plan
+ * nodes, plus possibly output slots for the junkfilter(s). At this point
+ * we aren't sure if we need junkfilters, so just add slots for them
+ * unconditionally.
*/
{
int nSlots = ExecCountSlotsNode(plan);
@@ -606,26 +602,26 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
estate->es_useEvalPlan = false;
/*
- * initialize the private state information for all the nodes in the
- * query tree. This opens files, allocates storage and leaves us
- * ready to start processing tuples.
+ * initialize the private state information for all the nodes in the query
+ * tree. This opens files, allocates storage and leaves us ready to start
+ * processing tuples.
*/
planstate = ExecInitNode(plan, estate);
/*
- * Get the tuple descriptor describing the type of tuples to return.
- * (this is especially important if we are creating a relation with
- * "SELECT INTO")
+ * Get the tuple descriptor describing the type of tuples to return. (this
+ * is especially important if we are creating a relation with "SELECT
+ * INTO")
*/
tupType = ExecGetResultType(planstate);
/*
- * Initialize the junk filter if needed. SELECT and INSERT queries
- * need a filter if there are any junk attrs in the tlist. INSERT and
- * SELECT INTO also need a filter if the plan may return raw disk
- * tuples (else heap_insert will be scribbling on the source
- * relation!). UPDATE and DELETE always need a filter, since there's
- * always a junk 'ctid' attribute present --- no need to look first.
+ * Initialize the junk filter if needed. SELECT and INSERT queries need a
+ * filter if there are any junk attrs in the tlist. INSERT and SELECT
+ * INTO also need a filter if the plan may return raw disk tuples (else
+ * heap_insert will be scribbling on the source relation!). UPDATE and
+ * DELETE always need a filter, since there's always a junk 'ctid'
+ * attribute present --- no need to look first.
*/
{
bool junk_filter_needed = false;
@@ -661,10 +657,9 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
if (junk_filter_needed)
{
/*
- * If there are multiple result relations, each one needs its
- * own junk filter. Note this is only possible for
- * UPDATE/DELETE, so we can't be fooled by some needing a
- * filter and some not.
+ * If there are multiple result relations, each one needs its own
+ * junk filter. Note this is only possible for UPDATE/DELETE, so
+ * we can't be fooled by some needing a filter and some not.
*/
if (parseTree->resultRelations != NIL)
{
@@ -687,15 +682,15 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
JunkFilter *j;
j = ExecInitJunkFilter(subplan->plan->targetlist,
- resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
- ExecAllocTableSlot(estate->es_tupleTable));
+ resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
+ ExecAllocTableSlot(estate->es_tupleTable));
resultRelInfo->ri_junkFilter = j;
resultRelInfo++;
}
/*
- * Set active junkfilter too; at this point ExecInitAppend
- * has already selected an active result relation...
+ * Set active junkfilter too; at this point ExecInitAppend has
+ * already selected an active result relation...
*/
estate->es_junkFilter =
estate->es_result_relation_info->ri_junkFilter;
@@ -707,7 +702,7 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
j = ExecInitJunkFilter(planstate->plan->targetlist,
tupType->tdhasoid,
- ExecAllocTableSlot(estate->es_tupleTable));
+ ExecAllocTableSlot(estate->es_tupleTable));
estate->es_junkFilter = j;
if (estate->es_result_relation_info)
estate->es_result_relation_info->ri_junkFilter = j;
@@ -777,10 +772,9 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
CommandCounterIncrement();
/*
- * If necessary, create a TOAST table for the into relation. Note
- * that AlterTableCreateToastTable ends with
- * CommandCounterIncrement(), so that the TOAST table will be
- * visible for insertion.
+ * If necessary, create a TOAST table for the into relation. Note that
+ * AlterTableCreateToastTable ends with CommandCounterIncrement(), so
+ * that the TOAST table will be visible for insertion.
*/
AlterTableCreateToastTable(intoRelationId, true);
@@ -795,11 +789,11 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
/*
* We can skip WAL-logging the insertions, unless PITR is in use.
*
- * Note that for a non-temp INTO table, this is safe only because
- * we know that the catalog changes above will have been WAL-logged,
- * and so RecordTransactionCommit will think it needs to WAL-log the
- * eventual transaction commit. Else the commit might be lost, even
- * though all the data is safely fsync'd ...
+ * Note that for a non-temp INTO table, this is safe only because we know
+ * that the catalog changes above will have been WAL-logged, and so
+ * RecordTransactionCommit will think it needs to WAL-log the eventual
+ * transaction commit. Else the commit might be lost, even though all
+ * the data is safely fsync'd ...
*/
estate->es_into_relation_use_wal = XLogArchivingActive();
}
@@ -832,19 +826,19 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change sequence \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRelationDesc))));
break;
case RELKIND_TOASTVALUE:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change TOAST relation \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRelationDesc))));
break;
case RELKIND_VIEW:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change view \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRelationDesc))));
break;
}
@@ -859,7 +853,7 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
if (resultRelInfo->ri_TrigDesc)
{
- int n = resultRelInfo->ri_TrigDesc->numtriggers;
+ int n = resultRelInfo->ri_TrigDesc->numtriggers;
resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
palloc0(n * sizeof(FmgrInfo));
@@ -878,9 +872,9 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
/*
* If there are indices on the result relation, open them and save
- * descriptors in the result relation info, so that we can add new
- * index entries for the tuples we add/update. We need not do this
- * for a DELETE, however, since deletion doesn't affect indexes.
+ * descriptors in the result relation info, so that we can add new index
+ * entries for the tuples we add/update. We need not do this for a
+ * DELETE, however, since deletion doesn't affect indexes.
*/
if (resultRelationDesc->rd_rel->relhasindex &&
operation != CMD_DELETE)
@@ -981,8 +975,7 @@ ExecEndPlan(PlanState *planstate, EState *estate)
estate->es_tupleTable = NULL;
/*
- * close the result relation(s) if any, but hold locks until xact
- * commit.
+ * close the result relation(s) if any, but hold locks until xact commit.
*/
resultRelInfo = estate->es_result_relations;
for (i = estate->es_num_result_relations; i > 0; i--)
@@ -999,10 +992,10 @@ ExecEndPlan(PlanState *planstate, EState *estate)
if (estate->es_into_relation_descriptor != NULL)
{
/*
- * If we skipped using WAL, and it's not a temp relation,
- * we must force the relation down to disk before it's
- * safe to commit the transaction. This requires forcing
- * out any dirty buffers and then doing a forced fsync.
+ * If we skipped using WAL, and it's not a temp relation, we must
+ * force the relation down to disk before it's safe to commit the
+ * transaction. This requires forcing out any dirty buffers and then
+ * doing a forced fsync.
*/
if (!estate->es_into_relation_use_wal &&
!estate->es_into_relation_descriptor->rd_istemp)
@@ -1087,8 +1080,7 @@ ExecutePlan(EState *estate,
}
/*
- * Loop until we've processed the proper number of tuples from the
- * plan.
+ * Loop until we've processed the proper number of tuples from the plan.
*/
for (;;)
@@ -1120,12 +1112,12 @@ lnext: ;
}
/*
- * if we have a junk filter, then project a new tuple with the
- * junk removed.
+ * if we have a junk filter, then project a new tuple with the junk
+ * removed.
*
* Store this new "clean" tuple in the junkfilter's resultSlot.
- * (Formerly, we stored it back over the "dirty" tuple, which is
- * WRONG because that tuple slot has the wrong descriptor.)
+ * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
+ * because that tuple slot has the wrong descriptor.)
*
* Also, extract all the junk information we need.
*/
@@ -1151,10 +1143,10 @@ lnext: ;
elog(ERROR, "ctid is NULL");
tupleid = (ItemPointer) DatumGetPointer(datum);
- tuple_ctid = *tupleid; /* make sure we don't free the
- * ctid!! */
+ tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */
tupleid = &tuple_ctid;
}
+
/*
* Process any FOR UPDATE or FOR SHARE locking requested.
*/
@@ -1171,8 +1163,8 @@ lnext: ;
ItemPointerData update_ctid;
TransactionId update_xmax;
TupleTableSlot *newSlot;
- LockTupleMode lockmode;
- HTSU_Result test;
+ LockTupleMode lockmode;
+ HTSU_Result test;
if (!ExecGetJunkAttribute(junkfilter,
slot,
@@ -1210,8 +1202,8 @@ lnext: ;
case HeapTupleUpdated:
if (IsXactIsoLevelSerializable)
ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("could not serialize access due to concurrent update")));
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("could not serialize access due to concurrent update")));
if (!ItemPointerEquals(&update_ctid,
&tuple.t_self))
{
@@ -1230,8 +1222,7 @@ lnext: ;
/*
* if tuple was deleted or PlanQual failed for
- * updated tuple - we must not return this
- * tuple!
+ * updated tuple - we must not return this tuple!
*/
goto lnext;
@@ -1251,9 +1242,9 @@ lnext: ;
}
/*
- * now that we have a tuple, do the appropriate thing with it..
- * either return it to the user, add it to a relation someplace,
- * delete it from a relation, or modify some of its attributes.
+ * now that we have a tuple, do the appropriate thing with it.. either
+ * return it to the user, add it to a relation someplace, delete it
+ * from a relation, or modify some of its attributes.
*/
switch (operation)
{
@@ -1287,9 +1278,9 @@ lnext: ;
}
/*
- * check our tuple count.. if we've processed the proper number
- * then quit, else loop again and process more tuples. Zero
- * numberTuples means no limit.
+ * check our tuple count.. if we've processed the proper number then
+ * quit, else loop again and process more tuples. Zero numberTuples
+ * means no limit.
*/
current_tuple_count++;
if (numberTuples && numberTuples == current_tuple_count)
@@ -1383,8 +1374,8 @@ ExecInsert(TupleTableSlot *slot,
Oid newId;
/*
- * get the heap tuple out of the tuple table slot, making sure
- * we have a writable copy
+ * get the heap tuple out of the tuple table slot, making sure we have a
+ * writable copy
*/
tuple = ExecMaterializeSlot(slot);
@@ -1396,7 +1387,7 @@ ExecInsert(TupleTableSlot *slot,
/* BEFORE ROW INSERT Triggers */
if (resultRelInfo->ri_TrigDesc &&
- resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
+ resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
{
HeapTuple newtuple;
@@ -1409,9 +1400,9 @@ ExecInsert(TupleTableSlot *slot,
{
/*
* Insert modified tuple into tuple table slot, replacing the
- * original. We assume that it was allocated in per-tuple
- * memory context, and therefore will go away by itself. The
- * tuple table slot should not try to clear it.
+ * original. We assume that it was allocated in per-tuple memory
+ * context, and therefore will go away by itself. The tuple table
+ * slot should not try to clear it.
*/
ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
tuple = newtuple;
@@ -1427,8 +1418,8 @@ ExecInsert(TupleTableSlot *slot,
/*
* insert the tuple
*
- * Note: heap_insert returns the tid (location) of the new tuple
- * in the t_self field.
+ * Note: heap_insert returns the tid (location) of the new tuple in the
+ * t_self field.
*/
newId = heap_insert(resultRelationDesc, tuple,
estate->es_snapshot->curcid,
@@ -1463,7 +1454,7 @@ ExecDelete(TupleTableSlot *slot,
{
ResultRelInfo *resultRelInfo;
Relation resultRelationDesc;
- HTSU_Result result;
+ HTSU_Result result;
ItemPointerData update_ctid;
TransactionId update_xmax;
@@ -1475,7 +1466,7 @@ ExecDelete(TupleTableSlot *slot,
/* BEFORE ROW DELETE Triggers */
if (resultRelInfo->ri_TrigDesc &&
- resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
+ resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
{
bool dodelete;
@@ -1489,9 +1480,9 @@ ExecDelete(TupleTableSlot *slot,
/*
* delete the tuple
*
- * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
- * the row to be deleted is visible to that snapshot, and throw a can't-
- * serialize error if not. This is a special-case behavior needed for
+ * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that the
+ * row to be deleted is visible to that snapshot, and throw a can't-
+ * serialize error if not. This is a special-case behavior needed for
* referential integrity updates in serializable transactions.
*/
ldelete:;
@@ -1543,9 +1534,9 @@ ldelete:;
* Note: Normally one would think that we have to delete index tuples
* associated with the heap tuple now..
*
- * ... but in POSTGRES, we have no need to do this because the vacuum
- * daemon automatically opens an index scan and deletes index tuples
- * when it finds deleted heap tuples. -cim 9/27/89
+ * ... but in POSTGRES, we have no need to do this because the vacuum daemon
+ * automatically opens an index scan and deletes index tuples when it
+ * finds deleted heap tuples. -cim 9/27/89
*/
/* AFTER ROW DELETE Triggers */
@@ -1571,7 +1562,7 @@ ExecUpdate(TupleTableSlot *slot,
HeapTuple tuple;
ResultRelInfo *resultRelInfo;
Relation resultRelationDesc;
- HTSU_Result result;
+ HTSU_Result result;
ItemPointerData update_ctid;
TransactionId update_xmax;
@@ -1582,8 +1573,8 @@ ExecUpdate(TupleTableSlot *slot,
elog(ERROR, "cannot UPDATE during bootstrap");
/*
- * get the heap tuple out of the tuple table slot, making sure
- * we have a writable copy
+ * get the heap tuple out of the tuple table slot, making sure we have a
+ * writable copy
*/
tuple = ExecMaterializeSlot(slot);
@@ -1595,7 +1586,7 @@ ExecUpdate(TupleTableSlot *slot,
/* BEFORE ROW UPDATE Triggers */
if (resultRelInfo->ri_TrigDesc &&
- resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
+ resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
{
HeapTuple newtuple;
@@ -1610,9 +1601,9 @@ ExecUpdate(TupleTableSlot *slot,
{
/*
* Insert modified tuple into tuple table slot, replacing the
- * original. We assume that it was allocated in per-tuple
- * memory context, and therefore will go away by itself. The
- * tuple table slot should not try to clear it.
+ * original. We assume that it was allocated in per-tuple memory
+ * context, and therefore will go away by itself. The tuple table
+ * slot should not try to clear it.
*/
ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
tuple = newtuple;
@@ -1622,11 +1613,11 @@ ExecUpdate(TupleTableSlot *slot,
/*
* Check the constraints of the tuple
*
- * If we generate a new candidate tuple after EvalPlanQual testing, we
- * must loop back here and recheck constraints. (We don't need to
- * redo triggers, however. If there are any BEFORE triggers then
- * trigger.c will have done heap_lock_tuple to lock the correct tuple,
- * so there's no need to do them again.)
+ * If we generate a new candidate tuple after EvalPlanQual testing, we must
+ * loop back here and recheck constraints. (We don't need to redo
+ * triggers, however. If there are any BEFORE triggers then trigger.c
+ * will have done heap_lock_tuple to lock the correct tuple, so there's no
+ * need to do them again.)
*/
lreplace:;
if (resultRelationDesc->rd_att->constr)
@@ -1635,9 +1626,9 @@ lreplace:;
/*
* replace the heap tuple
*
- * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
- * the row to be updated is visible to that snapshot, and throw a can't-
- * serialize error if not. This is a special-case behavior needed for
+ * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that the
+ * row to be updated is visible to that snapshot, and throw a can't-
+ * serialize error if not. This is a special-case behavior needed for
* referential integrity updates in serializable transactions.
*/
result = heap_update(resultRelationDesc, tupleid, tuple,
@@ -1687,18 +1678,18 @@ lreplace:;
(estate->es_processed)++;
/*
- * Note: instead of having to update the old index tuples associated
- * with the heap tuple, all we do is form and insert new index tuples.
- * This is because UPDATEs are actually DELETEs and INSERTs, and index
- * tuple deletion is done automagically by the vacuum daemon. All we
- * do is insert new index tuples. -cim 9/27/89
+ * Note: instead of having to update the old index tuples associated with
+ * the heap tuple, all we do is form and insert new index tuples. This is
+ * because UPDATEs are actually DELETEs and INSERTs, and index tuple
+ * deletion is done automagically by the vacuum daemon. All we do is
+ * insert new index tuples. -cim 9/27/89
*/
/*
* insert index entries for tuple
*
- * Note: heap_update returns the tid (location) of the new tuple
- * in the t_self field.
+ * Note: heap_update returns the tid (location) of the new tuple in the
+ * t_self field.
*/
if (resultRelInfo->ri_NumIndices > 0)
ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
@@ -1721,8 +1712,8 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
/*
* If first time through for this result relation, build expression
- * nodetrees for rel's constraint expressions. Keep them in the
- * per-query memory context so they'll survive throughout the query.
+ * nodetrees for rel's constraint expressions. Keep them in the per-query
+ * memory context so they'll survive throughout the query.
*/
if (resultRelInfo->ri_ConstraintExprs == NULL)
{
@@ -1740,8 +1731,8 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
}
/*
- * We will use the EState's per-tuple context for evaluating
- * constraint expressions (creating it if it's not already there).
+ * We will use the EState's per-tuple context for evaluating constraint
+ * expressions (creating it if it's not already there).
*/
econtext = GetPerTupleExprContext(estate);
@@ -1787,7 +1778,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("null value in column \"%s\" violates not-null constraint",
- NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
+ NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
}
}
@@ -1870,9 +1861,9 @@ EvalPlanQual(EState *estate, Index rti,
{
/*
* If xmin isn't what we're expecting, the slot must have been
- * recycled and reused for an unrelated tuple. This implies
- * that the latest version of the row was deleted, so we need
- * do nothing. (Should be safe to examine xmin without getting
+ * recycled and reused for an unrelated tuple. This implies that
+ * the latest version of the row was deleted, so we need do
+ * nothing. (Should be safe to examine xmin without getting
* buffer's content lock, since xmin never changes in an existing
* tuple.)
*/
@@ -1888,8 +1879,8 @@ EvalPlanQual(EState *estate, Index rti,
elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
/*
- * If tuple is being updated by other transaction then we have
- * to wait for its commit/abort.
+ * If tuple is being updated by other transaction then we have to
+ * wait for its commit/abort.
*/
if (TransactionIdIsValid(SnapshotDirty->xmax))
{
@@ -1907,8 +1898,8 @@ EvalPlanQual(EState *estate, Index rti,
}
/*
- * If the referenced slot was actually empty, the latest version
- * of the row must have been deleted, so we need do nothing.
+ * If the referenced slot was actually empty, the latest version of
+ * the row must have been deleted, so we need do nothing.
*/
if (tuple.t_data == NULL)
{
@@ -1928,15 +1919,15 @@ EvalPlanQual(EState *estate, Index rti,
/*
* If we get here, the tuple was found but failed SnapshotDirty.
- * Assuming the xmin is either a committed xact or our own xact
- * (as it certainly should be if we're trying to modify the tuple),
- * this must mean that the row was updated or deleted by either
- * a committed xact or our own xact. If it was deleted, we can
- * ignore it; if it was updated then chain up to the next version
- * and repeat the whole test.
+ * Assuming the xmin is either a committed xact or our own xact (as it
+ * certainly should be if we're trying to modify the tuple), this must
+ * mean that the row was updated or deleted by either a committed xact
+ * or our own xact. If it was deleted, we can ignore it; if it was
+ * updated then chain up to the next version and repeat the whole
+ * test.
*
- * As above, it should be safe to examine xmax and t_ctid without
- * the buffer content lock, because they can't be changing.
+ * As above, it should be safe to examine xmax and t_ctid without the
+ * buffer content lock, because they can't be changing.
*/
if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
{
@@ -1954,8 +1945,8 @@ EvalPlanQual(EState *estate, Index rti,
}
/*
- * For UPDATE/DELETE we have to return tid of actual row we're
- * executing PQ for.
+ * For UPDATE/DELETE we have to return tid of actual row we're executing
+ * PQ for.
*/
*tid = tuple.t_self;
@@ -1974,10 +1965,10 @@ EvalPlanQual(EState *estate, Index rti,
}
/*
- * If this is request for another RTE - Ra, - then we have to check
- * wasn't PlanQual requested for Ra already and if so then Ra' row was
- * updated again and we have to re-start old execution for Ra and
- * forget all what we done after Ra was suspended. Cool? -:))
+ * If this is request for another RTE - Ra, - then we have to check wasn't
+ * PlanQual requested for Ra already and if so then Ra' row was updated
+ * again and we have to re-start old execution for Ra and forget all what
+ * we done after Ra was suspended. Cool? -:))
*/
if (epq != NULL && epq->rti != rti &&
epq->estate->es_evTuple[rti - 1] != NULL)
@@ -1999,8 +1990,8 @@ EvalPlanQual(EState *estate, Index rti,
}
/*
- * If we are requested for another RTE then we have to suspend
- * execution of current PlanQual and start execution for new one.
+ * If we are requested for another RTE then we have to suspend execution
+ * of current PlanQual and start execution for new one.
*/
if (epq == NULL || epq->rti != rti)
{
@@ -2031,18 +2022,17 @@ EvalPlanQual(EState *estate, Index rti,
Assert(epq->rti == rti);
/*
- * Ok - we're requested for the same RTE. Unfortunately we still have
- * to end and restart execution of the plan, because ExecReScan
- * wouldn't ensure that upper plan nodes would reset themselves. We
- * could make that work if insertion of the target tuple were
- * integrated with the Param mechanism somehow, so that the upper plan
- * nodes know that their children's outputs have changed.
+ * Ok - we're requested for the same RTE. Unfortunately we still have to
+ * end and restart execution of the plan, because ExecReScan wouldn't
+ * ensure that upper plan nodes would reset themselves. We could make
+ * that work if insertion of the target tuple were integrated with the
+ * Param mechanism somehow, so that the upper plan nodes know that their
+ * children's outputs have changed.
*
* Note that the stack of free evalPlanQual nodes is quite useless at the
* moment, since it only saves us from pallocing/releasing the
- * evalPlanQual nodes themselves. But it will be useful once we
- * implement ReScan instead of end/restart for re-using PlanQual
- * nodes.
+ * evalPlanQual nodes themselves. But it will be useful once we implement
+ * ReScan instead of end/restart for re-using PlanQual nodes.
*/
if (endNode)
{
@@ -2055,15 +2045,14 @@ EvalPlanQual(EState *estate, Index rti,
*
* Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
* instead copy down changeable state from the top plan (including
- * es_result_relation_info, es_junkFilter) and reset locally
- * changeable state in the epq (including es_param_exec_vals,
- * es_evTupleNull).
+ * es_result_relation_info, es_junkFilter) and reset locally changeable
+ * state in the epq (including es_param_exec_vals, es_evTupleNull).
*/
EvalPlanQualStart(epq, estate, epq->next);
/*
- * free old RTE' tuple, if any, and store target tuple where
- * relation's scan node will see it
+ * free old RTE' tuple, if any, and store target tuple where relation's
+ * scan node will see it
*/
epqstate = epq->estate;
if (epqstate->es_evTuple[rti - 1] != NULL)
@@ -2171,10 +2160,10 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
/*
- * The epqstates share the top query's copy of unchanging state such
- * as the snapshot, rangetable, result-rel info, and external Param
- * info. They need their own copies of local state, including a tuple
- * table, es_param_exec_vals, etc.
+ * The epqstates share the top query's copy of unchanging state such as
+ * the snapshot, rangetable, result-rel info, and external Param info.
+ * They need their own copies of local state, including a tuple table,
+ * es_param_exec_vals, etc.
*/
epqstate->es_direction = ForwardScanDirection;
epqstate->es_snapshot = estate->es_snapshot;
@@ -2199,9 +2188,9 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
epqstate->es_topPlan = estate->es_topPlan;
/*
- * Each epqstate must have its own es_evTupleNull state, but all the
- * stack entries share es_evTuple state. This allows sub-rechecks to
- * inherit the value being examined by an outer recheck.
+ * Each epqstate must have its own es_evTupleNull state, but all the stack
+ * entries share es_evTuple state. This allows sub-rechecks to inherit
+ * the value being examined by an outer recheck.
*/
epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
if (priorepq == NULL)
diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c
index 28f67a2562f..fe067086d3b 100644
--- a/src/backend/executor/execProcnode.c
+++ b/src/backend/executor/execProcnode.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execProcnode.c,v 1.50 2005/04/19 22:35:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execProcnode.c,v 1.51 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -240,8 +240,8 @@ ExecInitNode(Plan *node, EState *estate)
}
/*
- * Initialize any initPlans present in this node. The planner put
- * them in a separate list for us.
+ * Initialize any initPlans present in this node. The planner put them in
+ * a separate list for us.
*/
subps = NIL;
foreach(l, node->initPlan)
@@ -258,9 +258,9 @@ ExecInitNode(Plan *node, EState *estate)
/*
* Initialize any subPlans present in this node. These were found by
- * ExecInitExpr during initialization of the PlanState. Note we must
- * do this after initializing initPlans, in case their arguments
- * contain subPlans (is that actually possible? perhaps not).
+ * ExecInitExpr during initialization of the PlanState. Note we must do
+ * this after initializing initPlans, in case their arguments contain
+ * subPlans (is that actually possible? perhaps not).
*/
foreach(l, result->subPlan)
{
@@ -422,7 +422,7 @@ ExecProcNode(PlanState *node)
Node *
MultiExecProcNode(PlanState *node)
{
- Node *result;
+ Node *result;
CHECK_FOR_INTERRUPTS();
@@ -431,9 +431,9 @@ MultiExecProcNode(PlanState *node)
switch (nodeTag(node))
{
- /*
- * Only node types that actually support multiexec will be listed
- */
+ /*
+ * Only node types that actually support multiexec will be listed
+ */
case T_HashState:
result = MultiExecHash((HashState *) node);
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index 87fcf53bf05..d535e6453d5 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.180 2005/06/26 22:05:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.181 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -89,8 +89,8 @@ static Datum ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext,
static Datum ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalConvertRowtype(ConvertRowtypeExprState *cstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCaseTestExpr(ExprState *exprstate,
@@ -106,8 +106,8 @@ static Datum ExecEvalCoalesce(CoalesceExprState *coalesceExpr,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalMinMax(MinMaxExprState *minmaxExpr,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalNullIf(FuncExprState *nullIfExpr,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
@@ -243,8 +243,8 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
isDone));
/*
- * If refexpr yields NULL, and it's a fetch, then result is NULL. In
- * the assignment case, we'll cons up something below.
+ * If refexpr yields NULL, and it's a fetch, then result is NULL. In the
+ * assignment case, we'll cons up something below.
*/
if (*isNull)
{
@@ -298,8 +298,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
NULL));
/*
- * If any index expr yields NULL, result is NULL or source
- * array
+ * If any index expr yields NULL, result is NULL or source array
*/
if (eisnull)
{
@@ -326,13 +325,12 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
/*
* Evaluate the value to be assigned into the array.
*
- * XXX At some point we'll need to look into making the old value of
- * the array element available via CaseTestExpr, as is done by
- * ExecEvalFieldStore. This is not needed now but will be needed
- * to support arrays of composite types; in an assignment to a
- * field of an array member, the parser would generate a
- * FieldStore that expects to fetch its input tuple via
- * CaseTestExpr.
+ * XXX At some point we'll need to look into making the old value of the
+ * array element available via CaseTestExpr, as is done by
+ * ExecEvalFieldStore. This is not needed now but will be needed to
+ * support arrays of composite types; in an assignment to a field of
+ * an array member, the parser would generate a FieldStore that
+ * expects to fetch its input tuple via CaseTestExpr.
*/
sourceData = ExecEvalExpr(astate->refassgnexpr,
econtext,
@@ -340,19 +338,18 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
NULL);
/*
- * For now, can't cope with inserting NULL into an array, so make
- * it a no-op per discussion above...
+ * For now, can't cope with inserting NULL into an array, so make it a
+ * no-op per discussion above...
*/
if (eisnull)
return PointerGetDatum(array_source);
/*
- * For an assignment, if all the subscripts and the input
- * expression are non-null but the original array is null, then
- * substitute an empty (zero-dimensional) array and proceed with
- * the assignment. This only works for varlena arrays, though; for
- * fixed-length array types we punt and return the null input
- * array.
+ * For an assignment, if all the subscripts and the input expression
+ * are non-null but the original array is null, then substitute an
+ * empty (zero-dimensional) array and proceed with the assignment.
+ * This only works for varlena arrays, though; for fixed-length array
+ * types we punt and return the null input array.
*/
if (*isNull)
{
@@ -379,7 +376,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
else
resultArray = array_set_slice(array_source, i,
upper.indx, lower.indx,
- (ArrayType *) DatumGetPointer(sourceData),
+ (ArrayType *) DatumGetPointer(sourceData),
astate->refattrlength,
astate->refelemlength,
astate->refelembyval,
@@ -451,10 +448,10 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
/*
* Get the slot and attribute number we want
*
- * The asserts check that references to system attributes only appear at
- * the level of a relation scan; at higher levels, system attributes
- * must be treated as ordinary variables (since we no longer have
- * access to the original tuple).
+ * The asserts check that references to system attributes only appear at the
+ * level of a relation scan; at higher levels, system attributes must be
+ * treated as ordinary variables (since we no longer have access to the
+ * original tuple).
*/
attnum = variable->varattno;
@@ -477,6 +474,7 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
}
#ifdef USE_ASSERT_CHECKING
+
/*
* Some checks that are only applied for user attribute numbers (bogus
* system attnums will be caught inside slot_getattr).
@@ -491,11 +489,10 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
Assert(attnum <= tuple_type->natts);
/*
- * This assert checks that the datatype the plan expects to get
- * (as told by our "variable" argument) is in fact the datatype of
- * the attribute being fetched (as seen in the current context,
- * identified by our "econtext" argument). Otherwise crashes are
- * likely.
+ * This assert checks that the datatype the plan expects to get (as
+ * told by our "variable" argument) is in fact the datatype of the
+ * attribute being fetched (as seen in the current context, identified
+ * by our "econtext" argument). Otherwise crashes are likely.
*
* Note that we can't check dropped columns, since their atttypid has
* been zeroed.
@@ -503,7 +500,7 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
Assert(variable->vartype == tuple_type->attrs[attnum - 1]->atttypid ||
tuple_type->attrs[attnum - 1]->attisdropped);
}
-#endif /* USE_ASSERT_CHECKING */
+#endif /* USE_ASSERT_CHECKING */
return slot_getattr(slot, attnum, isNull);
}
@@ -559,9 +556,8 @@ ExecEvalParam(ExprState *exprstate, ExprContext *econtext,
if (thisParamKind == PARAM_EXEC)
{
/*
- * PARAM_EXEC params (internal executor parameters) are stored in
- * the ecxt_param_exec_vals array, and can be accessed by array
- * index.
+ * PARAM_EXEC params (internal executor parameters) are stored in the
+ * ecxt_param_exec_vals array, and can be accessed by array index.
*/
ParamExecData *prm;
@@ -579,8 +575,7 @@ ExecEvalParam(ExprState *exprstate, ExprContext *econtext,
else
{
/*
- * All other parameter types must be sought in
- * ecxt_param_list_info.
+ * All other parameter types must be sought in ecxt_param_list_info.
*/
ParamListInfo paramInfo;
@@ -641,9 +636,9 @@ GetAttributeByNum(HeapTupleHeader tuple,
tupDesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
/*
- * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set
- * all the fields in the struct just in case user tries to inspect
- * system columns.
+ * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set all
+ * the fields in the struct just in case user tries to inspect system
+ * columns.
*/
tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple);
ItemPointerSetInvalid(&(tmptup.t_self));
@@ -699,9 +694,9 @@ GetAttributeByName(HeapTupleHeader tuple, const char *attname, bool *isNull)
elog(ERROR, "attribute \"%s\" does not exist", attname);
/*
- * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set
- * all the fields in the struct just in case user tries to inspect
- * system columns.
+ * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set all
+ * the fields in the struct just in case user tries to inspect system
+ * columns.
*/
tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple);
ItemPointerSetInvalid(&(tmptup.t_self));
@@ -730,9 +725,9 @@ init_fcache(Oid foid, FuncExprState *fcache, MemoryContext fcacheCxt)
/*
* Safety check on nargs. Under normal circumstances this should never
- * fail, as parser should check sooner. But possibly it might fail
- * if server has been compiled with FUNC_MAX_ARGS smaller than some
- * functions declared in pg_proc?
+ * fail, as parser should check sooner. But possibly it might fail if
+ * server has been compiled with FUNC_MAX_ARGS smaller than some functions
+ * declared in pg_proc?
*/
if (list_length(fcache->args) > FUNC_MAX_ARGS)
ereport(ERROR,
@@ -793,10 +788,9 @@ ExecEvalFuncArgs(FunctionCallInfo fcinfo,
if (thisArgIsDone != ExprSingleResult)
{
/*
- * We allow only one argument to have a set value; we'd need
- * much more complexity to keep track of multiple set
- * arguments (cf. ExecTargetList) and it doesn't seem worth
- * it.
+ * We allow only one argument to have a set value; we'd need much
+ * more complexity to keep track of multiple set arguments (cf.
+ * ExecTargetList) and it doesn't seem worth it.
*/
if (argIsDone != ExprSingleResult)
ereport(ERROR,
@@ -835,11 +829,10 @@ ExecMakeFunctionResult(FuncExprState *fcache,
check_stack_depth();
/*
- * arguments is a list of expressions to evaluate before passing to
- * the function manager. We skip the evaluation if it was already
- * done in the previous call (ie, we are continuing the evaluation of
- * a set-valued function). Otherwise, collect the current argument
- * values into fcinfo.
+ * arguments is a list of expressions to evaluate before passing to the
+ * function manager. We skip the evaluation if it was already done in the
+ * previous call (ie, we are continuing the evaluation of a set-valued
+ * function). Otherwise, collect the current argument values into fcinfo.
*/
if (!fcache->setArgsValid)
{
@@ -870,8 +863,7 @@ ExecMakeFunctionResult(FuncExprState *fcache,
}
/*
- * If function returns set, prepare a resultinfo node for
- * communication
+ * If function returns set, prepare a resultinfo node for communication
*/
if (fcache->func.fn_retset)
{
@@ -887,14 +879,14 @@ ExecMakeFunctionResult(FuncExprState *fcache,
}
/*
- * now return the value gotten by calling the function manager,
- * passing the function the evaluated parameter values.
+ * now return the value gotten by calling the function manager, passing
+ * the function the evaluated parameter values.
*/
if (fcache->func.fn_retset || hasSetArg)
{
/*
- * We need to return a set result. Complain if caller not ready
- * to accept one.
+ * We need to return a set result. Complain if caller not ready to
+ * accept one.
*/
if (isDone == NULL)
ereport(ERROR,
@@ -902,18 +894,18 @@ ExecMakeFunctionResult(FuncExprState *fcache,
errmsg("set-valued function called in context that cannot accept a set")));
/*
- * This loop handles the situation where we have both a set
- * argument and a set-valued function. Once we have exhausted the
- * function's value(s) for a particular argument value, we have to
- * get the next argument value and start the function over again.
- * We might have to do it more than once, if the function produces
- * an empty result set for a particular input value.
+ * This loop handles the situation where we have both a set argument
+ * and a set-valued function. Once we have exhausted the function's
+ * value(s) for a particular argument value, we have to get the next
+ * argument value and start the function over again. We might have to
+ * do it more than once, if the function produces an empty result set
+ * for a particular input value.
*/
for (;;)
{
/*
- * If function is strict, and there are any NULL arguments,
- * skip calling the function (at least for this set of args).
+ * If function is strict, and there are any NULL arguments, skip
+ * calling the function (at least for this set of args).
*/
bool callit = true;
@@ -948,8 +940,8 @@ ExecMakeFunctionResult(FuncExprState *fcache,
{
/*
* Got a result from current argument. If function itself
- * returns set, save the current argument values to re-use
- * on the next call.
+ * returns set, save the current argument values to re-use on
+ * the next call.
*/
if (fcache->func.fn_retset && *isDone == ExprMultipleResult)
{
@@ -961,7 +953,7 @@ ExecMakeFunctionResult(FuncExprState *fcache,
{
RegisterExprContextCallback(econtext,
ShutdownFuncExpr,
- PointerGetDatum(fcache));
+ PointerGetDatum(fcache));
fcache->shutdown_reg = true;
}
}
@@ -992,8 +984,8 @@ ExecMakeFunctionResult(FuncExprState *fcache,
}
/*
- * If we reach here, loop around to run the function on the
- * new argument.
+ * If we reach here, loop around to run the function on the new
+ * argument.
*/
}
}
@@ -1003,9 +995,9 @@ ExecMakeFunctionResult(FuncExprState *fcache,
* Non-set case: much easier.
*
* We change the ExprState function pointer to use the simpler
- * ExecMakeFunctionResultNoSets on subsequent calls. This amounts
- * to assuming that no argument can return a set if it didn't do
- * so the first time.
+ * ExecMakeFunctionResultNoSets on subsequent calls. This amounts to
+ * assuming that no argument can return a set if it didn't do so the
+ * first time.
*/
fcache->xprstate.evalfunc = (ExprStateEvalFunc) ExecMakeFunctionResultNoSets;
@@ -1074,8 +1066,8 @@ ExecMakeFunctionResultNoSets(FuncExprState *fcache,
InitFunctionCallInfoData(fcinfo, &(fcache->func), i, NULL, NULL);
/*
- * If function is strict, and there are any NULL arguments, skip
- * calling the function and return NULL.
+ * If function is strict, and there are any NULL arguments, skip calling
+ * the function and return NULL.
*/
if (fcache->func.fn_strict)
{
@@ -1100,7 +1092,7 @@ ExecMakeFunctionResultNoSets(FuncExprState *fcache,
* ExecMakeTableFunctionResult
*
* Evaluate a table function, producing a materialized result in a Tuplestore
- * object. *returnDesc is set to the tupledesc actually returned by the
+ * object. *returnDesc is set to the tupledesc actually returned by the
* function, or NULL if it didn't provide one.
*/
Tuplestorestate *
@@ -1130,11 +1122,11 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
get_typtype(funcrettype) == 'c');
/*
- * Prepare a resultinfo node for communication. We always do this
- * even if not expecting a set result, so that we can pass
- * expectedDesc. In the generic-expression case, the expression
- * doesn't actually get to see the resultinfo, but set it up anyway
- * because we use some of the fields as our own state variables.
+ * Prepare a resultinfo node for communication. We always do this even if
+ * not expecting a set result, so that we can pass expectedDesc. In the
+ * generic-expression case, the expression doesn't actually get to see the
+ * resultinfo, but set it up anyway because we use some of the fields as
+ * our own state variables.
*/
InitFunctionCallInfoData(fcinfo, NULL, 0, NULL, (Node *) &rsinfo);
rsinfo.type = T_ReturnSetInfo;
@@ -1147,14 +1139,14 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
rsinfo.setDesc = NULL;
/*
- * Normally the passed expression tree will be a FuncExprState, since
- * the grammar only allows a function call at the top level of a table
- * function reference. However, if the function doesn't return set
- * then the planner might have replaced the function call via
- * constant-folding or inlining. So if we see any other kind of
- * expression node, execute it via the general ExecEvalExpr() code;
- * the only difference is that we don't get a chance to pass a special
- * ReturnSetInfo to any functions buried in the expression.
+ * Normally the passed expression tree will be a FuncExprState, since the
+ * grammar only allows a function call at the top level of a table
+ * function reference. However, if the function doesn't return set then
+ * the planner might have replaced the function call via constant-folding
+ * or inlining. So if we see any other kind of expression node, execute
+ * it via the general ExecEvalExpr() code; the only difference is that we
+ * don't get a chance to pass a special ReturnSetInfo to any functions
+ * buried in the expression.
*/
if (funcexpr && IsA(funcexpr, FuncExprState) &&
IsA(funcexpr->expr, FuncExpr))
@@ -1182,9 +1174,9 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
* Evaluate the function's argument list.
*
* Note: ideally, we'd do this in the per-tuple context, but then the
- * argument values would disappear when we reset the context in
- * the inner loop. So do it in caller context. Perhaps we should
- * make a separate context just to hold the evaluated arguments?
+ * argument values would disappear when we reset the context in the
+ * inner loop. So do it in caller context. Perhaps we should make a
+ * separate context just to hold the evaluated arguments?
*/
fcinfo.flinfo = &(fcache->func);
argDone = ExecEvalFuncArgs(&fcinfo, fcache->args, econtext);
@@ -1217,8 +1209,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
}
/*
- * Switch to short-lived context for calling the function or
- * expression.
+ * Switch to short-lived context for calling the function or expression.
*/
MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
@@ -1232,9 +1223,9 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
HeapTuple tuple;
/*
- * reset per-tuple memory context before each call of the function
- * or expression. This cleans up any local memory the function may
- * leak when called.
+ * reset per-tuple memory context before each call of the function or
+ * expression. This cleans up any local memory the function may leak
+ * when called.
*/
ResetExprContext(econtext);
@@ -1261,12 +1252,12 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
break;
/*
- * Can't do anything very useful with NULL rowtype values.
- * For a function returning set, we consider this a protocol
- * violation (but another alternative would be to just ignore
- * the result and "continue" to get another row). For a function
- * not returning set, we fall out of the loop; we'll cons up
- * an all-nulls result row below.
+ * Can't do anything very useful with NULL rowtype values. For a
+ * function returning set, we consider this a protocol violation
+ * (but another alternative would be to just ignore the result and
+ * "continue" to get another row). For a function not returning
+ * set, we fall out of the loop; we'll cons up an all-nulls result
+ * row below.
*/
if (returnsTuple && fcinfo.isnull)
{
@@ -1278,8 +1269,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
}
/*
- * If first time through, build tupdesc and tuplestore for
- * result
+ * If first time through, build tupdesc and tuplestore for result
*/
if (first_time)
{
@@ -1287,15 +1277,14 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
if (returnsTuple)
{
/*
- * Use the type info embedded in the rowtype Datum to
- * look up the needed tupdesc. Make a copy for the
- * query.
+ * Use the type info embedded in the rowtype Datum to look
+ * up the needed tupdesc. Make a copy for the query.
*/
HeapTupleHeader td;
td = DatumGetHeapTupleHeader(result);
tupdesc = lookup_rowtype_tupdesc(HeapTupleHeaderGetTypeId(td),
- HeapTupleHeaderGetTypMod(td));
+ HeapTupleHeaderGetTypMod(td));
tupdesc = CreateTupleDescCopy(tupdesc);
}
else
@@ -1507,7 +1496,7 @@ ExecEvalDistinct(FuncExprState *fcache,
if (argDone != ExprSingleResult)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("IS DISTINCT FROM does not support set arguments")));
+ errmsg("IS DISTINCT FROM does not support set arguments")));
Assert(fcinfo.nargs == 2);
if (fcinfo.argnull[0] && fcinfo.argnull[1])
@@ -1580,12 +1569,12 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
if (argDone != ExprSingleResult)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("op ANY/ALL (array) does not support set arguments")));
+ errmsg("op ANY/ALL (array) does not support set arguments")));
Assert(fcinfo.nargs == 2);
/*
- * If the array is NULL then we return NULL --- it's not very
- * meaningful to do anything else, even if the operator isn't strict.
+ * If the array is NULL then we return NULL --- it's not very meaningful
+ * to do anything else, even if the operator isn't strict.
*/
if (fcinfo.argnull[1])
{
@@ -1598,18 +1587,17 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
/*
* If the array is empty, we return either FALSE or TRUE per the useOr
* flag. This is correct even if the scalar is NULL; since we would
- * evaluate the operator zero times, it matters not whether it would
- * want to return NULL.
+ * evaluate the operator zero times, it matters not whether it would want
+ * to return NULL.
*/
nitems = ArrayGetNItems(ARR_NDIM(arr), ARR_DIMS(arr));
if (nitems <= 0)
return BoolGetDatum(!useOr);
/*
- * If the scalar is NULL, and the function is strict, return NULL.
- * This is just to avoid having to test for strictness inside the
- * loop. (XXX but if arrays could have null elements, we'd need a
- * test anyway.)
+ * If the scalar is NULL, and the function is strict, return NULL. This is
+ * just to avoid having to test for strictness inside the loop. (XXX but
+ * if arrays could have null elements, we'd need a test anyway.)
*/
if (fcinfo.argnull[0] && sstate->fxprstate.func.fn_strict)
{
@@ -1618,9 +1606,8 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
}
/*
- * We arrange to look up info about the element type only once per
- * series of calls, assuming the element type doesn't change
- * underneath us.
+ * We arrange to look up info about the element type only once per series
+ * of calls, assuming the element type doesn't change underneath us.
*/
if (sstate->element_type != ARR_ELEMTYPE(arr))
{
@@ -1711,15 +1698,15 @@ ExecEvalNot(BoolExprState *notclause, ExprContext *econtext,
expr_value = ExecEvalExpr(clause, econtext, isNull, NULL);
/*
- * if the expression evaluates to null, then we just cascade the null
- * back to whoever called us.
+ * if the expression evaluates to null, then we just cascade the null back
+ * to whoever called us.
*/
if (*isNull)
return expr_value;
/*
- * evaluation of 'not' is simple.. expr is false, then return 'true'
- * and vice versa.
+ * evaluation of 'not' is simple.. expr is false, then return 'true' and
+ * vice versa.
*/
return BoolGetDatum(!DatumGetBool(expr_value));
}
@@ -1742,18 +1729,17 @@ ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext,
AnyNull = false;
/*
- * If any of the clauses is TRUE, the OR result is TRUE regardless of
- * the states of the rest of the clauses, so we can stop evaluating
- * and return TRUE immediately. If none are TRUE and one or more is
- * NULL, we return NULL; otherwise we return FALSE. This makes sense
- * when you interpret NULL as "don't know": if we have a TRUE then the
- * OR is TRUE even if we aren't sure about some of the other inputs.
- * If all the known inputs are FALSE, but we have one or more "don't
- * knows", then we have to report that we "don't know" what the OR's
- * result should be --- perhaps one of the "don't knows" would have
- * been TRUE if we'd known its value. Only when all the inputs are
- * known to be FALSE can we state confidently that the OR's result is
- * FALSE.
+ * If any of the clauses is TRUE, the OR result is TRUE regardless of the
+ * states of the rest of the clauses, so we can stop evaluating and return
+ * TRUE immediately. If none are TRUE and one or more is NULL, we return
+ * NULL; otherwise we return FALSE. This makes sense when you interpret
+ * NULL as "don't know": if we have a TRUE then the OR is TRUE even if we
+ * aren't sure about some of the other inputs. If all the known inputs are
+ * FALSE, but we have one or more "don't knows", then we have to report
+ * that we "don't know" what the OR's result should be --- perhaps one of
+ * the "don't knows" would have been TRUE if we'd known its value. Only
+ * when all the inputs are known to be FALSE can we state confidently that
+ * the OR's result is FALSE.
*/
foreach(clause, clauses)
{
@@ -1794,12 +1780,12 @@ ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
AnyNull = false;
/*
- * If any of the clauses is FALSE, the AND result is FALSE regardless
- * of the states of the rest of the clauses, so we can stop evaluating
- * and return FALSE immediately. If none are FALSE and one or more is
- * NULL, we return NULL; otherwise we return TRUE. This makes sense
- * when you interpret NULL as "don't know", using the same sort of
- * reasoning as for OR, above.
+ * If any of the clauses is FALSE, the AND result is FALSE regardless of
+ * the states of the rest of the clauses, so we can stop evaluating and
+ * return FALSE immediately. If none are FALSE and one or more is NULL,
+ * we return NULL; otherwise we return TRUE. This makes sense when you
+ * interpret NULL as "don't know", using the same sort of reasoning as for
+ * OR, above.
*/
foreach(clause, clauses)
@@ -1826,7 +1812,7 @@ ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
/* ----------------------------------------------------------------
* ExecEvalConvertRowtype
*
- * Evaluate a rowtype coercion operation. This may require
+ * Evaluate a rowtype coercion operation. This may require
* rearranging field positions.
* ----------------------------------------------------------------
*/
@@ -1865,10 +1851,9 @@ ExecEvalConvertRowtype(ConvertRowtypeExprState *cstate,
tmptup.t_data = tuple;
/*
- * Extract all the values of the old tuple, offsetting the arrays
- * so that invalues[0] is NULL and invalues[1] is the first
- * source attribute; this exactly matches the numbering convention
- * in attrMap.
+ * Extract all the values of the old tuple, offsetting the arrays so that
+ * invalues[0] is NULL and invalues[1] is the first source attribute; this
+ * exactly matches the numbering convention in attrMap.
*/
heap_deform_tuple(&tmptup, cstate->indesc, invalues + 1, inisnull + 1);
invalues[0] = (Datum) 0;
@@ -1915,10 +1900,10 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
*isDone = ExprSingleResult;
/*
- * If there's a test expression, we have to evaluate it and save the
- * value where the CaseTestExpr placeholders can find it. We must save
- * and restore prior setting of econtext's caseValue fields, in case
- * this node is itself within a larger CASE.
+ * If there's a test expression, we have to evaluate it and save the value
+ * where the CaseTestExpr placeholders can find it. We must save and
+ * restore prior setting of econtext's caseValue fields, in case this node
+ * is itself within a larger CASE.
*/
save_datum = econtext->caseValue_datum;
save_isNull = econtext->caseValue_isNull;
@@ -1927,14 +1912,14 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
{
econtext->caseValue_datum = ExecEvalExpr(caseExpr->arg,
econtext,
- &econtext->caseValue_isNull,
+ &econtext->caseValue_isNull,
NULL);
}
/*
- * we evaluate each of the WHEN clauses in turn, as soon as one is
- * true we return the corresponding result. If none are true then we
- * return the value of the default clause, or NULL if there is none.
+ * we evaluate each of the WHEN clauses in turn, as soon as one is true we
+ * return the corresponding result. If none are true then we return the
+ * value of the default clause, or NULL if there is none.
*/
foreach(clause, clauses)
{
@@ -1947,9 +1932,9 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
NULL);
/*
- * if we have a true test, then we return the result, since the
- * case statement is satisfied. A NULL result from the test is
- * not considered true.
+ * if we have a true test, then we return the result, since the case
+ * statement is satisfied. A NULL result from the test is not
+ * considered true.
*/
if (DatumGetBool(clause_value) && !*isNull)
{
@@ -2098,7 +2083,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("cannot merge incompatible arrays"),
errdetail("Array with element type %s cannot be "
- "included in ARRAY construct with element type %s.",
+ "included in ARRAY construct with element type %s.",
format_type_be(ARR_ELEMTYPE(array)),
format_type_be(element_type))));
@@ -2110,8 +2095,8 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
if (ndims <= 0 || ndims > MAXDIM)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("number of array dimensions (%d) exceeds " \
- "the maximum allowed (%d)", ndims, MAXDIM)));
+ errmsg("number of array dimensions (%d) exceeds " \
+ "the maximum allowed (%d)", ndims, MAXDIM)));
elem_dims = (int *) palloc(elem_ndims * sizeof(int));
memcpy(elem_dims, ARR_DIMS(array), elem_ndims * sizeof(int));
@@ -2130,8 +2115,8 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
elem_ndims * sizeof(int)) != 0)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("multidimensional arrays must have array "
- "expressions with matching dimensions")));
+ errmsg("multidimensional arrays must have array "
+ "expressions with matching dimensions")));
}
elem_ndatabytes = ARR_SIZE(array) - ARR_OVERHEAD(elem_ndims);
@@ -2258,10 +2243,10 @@ static Datum
ExecEvalMinMax(MinMaxExprState *minmaxExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone)
{
- Datum result = (Datum) 0;
+ Datum result = (Datum) 0;
MinMaxOp op = ((MinMaxExpr *) minmaxExpr->xprstate.expr)->op;
FunctionCallInfoData locfcinfo;
- ListCell *arg;
+ ListCell *arg;
if (isDone)
*isDone = ExprSingleResult;
@@ -2295,7 +2280,7 @@ ExecEvalMinMax(MinMaxExprState *minmaxExpr, ExprContext *econtext,
locfcinfo.arg[1] = value;
locfcinfo.isnull = false;
cmpresult = DatumGetInt32(FunctionCallInvoke(&locfcinfo));
- if (locfcinfo.isnull) /* probably should not happen */
+ if (locfcinfo.isnull) /* probably should not happen */
continue;
if (cmpresult > 0 && op == IS_LEAST)
result = value;
@@ -2531,8 +2516,8 @@ ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext,
if (*isNull)
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
- errmsg("domain %s does not allow null values",
- format_type_be(ctest->resulttype))));
+ errmsg("domain %s does not allow null values",
+ format_type_be(ctest->resulttype))));
break;
case DOM_CONSTRAINT_CHECK:
{
@@ -2545,8 +2530,7 @@ ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext,
* Set up value to be returned by CoerceToDomainValue
* nodes. We must save and restore prior setting of
* econtext's domainValue fields, in case this node is
- * itself within a check expression for another
- * domain.
+ * itself within a check expression for another domain.
*/
save_datum = econtext->domainValue_datum;
save_isNull = econtext->domainValue_isNull;
@@ -2647,9 +2631,9 @@ ExecEvalFieldSelect(FieldSelectState *fstate,
}
/*
- * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set
- * all the fields in the struct just in case user tries to inspect
- * system columns.
+ * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set all
+ * the fields in the struct just in case user tries to inspect system
+ * columns.
*/
tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple);
ItemPointerSetInvalid(&(tmptup.t_self));
@@ -2715,8 +2699,8 @@ ExecEvalFieldStore(FieldStoreState *fstate,
if (!*isNull)
{
/*
- * heap_deform_tuple needs a HeapTuple not a bare HeapTupleHeader.
- * We set all the fields in the struct just in case.
+ * heap_deform_tuple needs a HeapTuple not a bare HeapTupleHeader. We
+ * set all the fields in the struct just in case.
*/
HeapTupleHeader tuphdr;
HeapTupleData tmptup;
@@ -2749,11 +2733,11 @@ ExecEvalFieldStore(FieldStoreState *fstate,
Assert(fieldnum > 0 && fieldnum <= tupDesc->natts);
/*
- * Use the CaseTestExpr mechanism to pass down the old value of
- * the field being replaced; this is useful in case we have a
- * nested field update situation. It's safe to reuse the CASE
- * mechanism because there cannot be a CASE between here and where
- * the value would be needed.
+ * Use the CaseTestExpr mechanism to pass down the old value of the
+ * field being replaced; this is useful in case we have a nested field
+ * update situation. It's safe to reuse the CASE mechanism because
+ * there cannot be a CASE between here and where the value would be
+ * needed.
*/
econtext->caseValue_datum = values[fieldnum - 1];
econtext->caseValue_isNull = isnull[fieldnum - 1];
@@ -2895,8 +2879,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
/*
* Complain if the aggregate's argument contains any
* aggregates; nested agg functions are semantically
- * nonsensical. (This should have been caught
- * earlier, but we defend against it here anyway.)
+ * nonsensical. (This should have been caught earlier,
+ * but we defend against it here anyway.)
*/
if (naggs != aggstate->numaggs)
ereport(ERROR,
@@ -3020,9 +3004,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
elog(ERROR, "SubPlan found with no parent plan");
/*
- * Here we just add the SubPlanState nodes to
- * parent->subPlan. The subplans will be initialized
- * later.
+ * Here we just add the SubPlanState nodes to parent->subPlan.
+ * The subplans will be initialized later.
*/
parent->subPlan = lcons(sstate, parent->subPlan);
sstate->sub_estate = NULL;
@@ -3073,8 +3056,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
{
ConvertRowtypeExpr *convert = (ConvertRowtypeExpr *) node;
ConvertRowtypeExprState *cstate = makeNode(ConvertRowtypeExprState);
- int i;
- int n;
+ int i;
+ int n;
cstate->xprstate.evalfunc = (ExprStateEvalFunc) ExecEvalConvertRowtype;
cstate->arg = ExecInitExpr(convert->arg, parent);
@@ -3095,7 +3078,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
int j;
if (att->attisdropped)
- continue; /* attrMap[i] is already 0 */
+ continue; /* attrMap[i] is already 0 */
attname = NameStr(att->attname);
atttypid = att->atttypid;
atttypmod = att->atttypmod;
@@ -3111,7 +3094,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
elog(ERROR, "attribute \"%s\" of type %s does not match corresponding attribute of type %s",
attname,
format_type_be(cstate->indesc->tdtypeid),
- format_type_be(cstate->outdesc->tdtypeid));
+ format_type_be(cstate->outdesc->tdtypeid));
cstate->attrMap[i] = (AttrNumber) (j + 1);
break;
}
@@ -3217,24 +3200,24 @@ ExecInitExpr(Expr *node, PlanState *parent)
if (!attrs[i]->attisdropped)
{
/*
- * Guard against ALTER COLUMN TYPE on rowtype
- * since the RowExpr was created. XXX should we
- * check typmod too? Not sure we can be sure
- * it'll be the same.
+ * Guard against ALTER COLUMN TYPE on rowtype since
+ * the RowExpr was created. XXX should we check
+ * typmod too? Not sure we can be sure it'll be the
+ * same.
*/
if (exprType((Node *) e) != attrs[i]->atttypid)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("ROW() column has type %s instead of type %s",
- format_type_be(exprType((Node *) e)),
- format_type_be(attrs[i]->atttypid))));
+ format_type_be(exprType((Node *) e)),
+ format_type_be(attrs[i]->atttypid))));
}
else
{
/*
- * Ignore original expression and insert a NULL.
- * We don't really care what type of NULL it is,
- * so always make an int4 NULL.
+ * Ignore original expression and insert a NULL. We
+ * don't really care what type of NULL it is, so
+ * always make an int4 NULL.
*/
e = (Expr *) makeNullConst(INT4OID);
}
@@ -3485,16 +3468,16 @@ ExecQual(List *qual, ExprContext *econtext, bool resultForNull)
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
/*
- * Evaluate the qual conditions one at a time. If we find a FALSE
- * result, we can stop evaluating and return FALSE --- the AND result
- * must be FALSE. Also, if we find a NULL result when resultForNull
- * is FALSE, we can stop and return FALSE --- the AND result must be
- * FALSE or NULL in that case, and the caller doesn't care which.
+ * Evaluate the qual conditions one at a time. If we find a FALSE result,
+ * we can stop evaluating and return FALSE --- the AND result must be
+ * FALSE. Also, if we find a NULL result when resultForNull is FALSE, we
+ * can stop and return FALSE --- the AND result must be FALSE or NULL in
+ * that case, and the caller doesn't care which.
*
- * If we get to the end of the list, we can return TRUE. This will
- * happen when the AND result is indeed TRUE, or when the AND result
- * is NULL (one or more NULL subresult, with all the rest TRUE) and
- * the caller has specified resultForNull = TRUE.
+ * If we get to the end of the list, we can return TRUE. This will happen
+ * when the AND result is indeed TRUE, or when the AND result is NULL (one
+ * or more NULL subresult, with all the rest TRUE) and the caller has
+ * specified resultForNull = TRUE.
*/
result = true;
@@ -3637,8 +3620,7 @@ ExecTargetList(List *targetlist,
if (*isDone == ExprSingleResult)
{
/*
- * all sets are done, so report that tlist expansion is
- * complete.
+ * all sets are done, so report that tlist expansion is complete.
*/
*isDone = ExprEndResult;
MemoryContextSwitchTo(oldContext);
@@ -3647,8 +3629,8 @@ ExecTargetList(List *targetlist,
else
{
/*
- * We have some done and some undone sets. Restart the done
- * ones so that we can deliver a tuple (if possible).
+ * We have some done and some undone sets. Restart the done ones
+ * so that we can deliver a tuple (if possible).
*/
foreach(tl, targetlist)
{
@@ -3666,8 +3648,8 @@ ExecTargetList(List *targetlist,
if (itemIsDone[resind] == ExprEndResult)
{
/*
- * Oh dear, this item is returning an empty set.
- * Guess we can't make a tuple after all.
+ * Oh dear, this item is returning an empty set. Guess
+ * we can't make a tuple after all.
*/
*isDone = ExprEndResult;
break;
@@ -3676,9 +3658,9 @@ ExecTargetList(List *targetlist,
}
/*
- * If we cannot make a tuple because some sets are empty, we
- * still have to cycle the nonempty sets to completion, else
- * resources will not be released from subplans etc.
+ * If we cannot make a tuple because some sets are empty, we still
+ * have to cycle the nonempty sets to completion, else resources
+ * will not be released from subplans etc.
*
* XXX is that still necessary?
*/
@@ -3741,8 +3723,8 @@ ExecVariableList(ProjectionInfo *projInfo,
projInfo->pi_lastScanVar);
/*
- * Assign to result by direct extraction of fields from source
- * slots ... a mite ugly, but fast ...
+ * Assign to result by direct extraction of fields from source slots ... a
+ * mite ugly, but fast ...
*/
for (i = list_length(projInfo->pi_targetlist) - 1; i >= 0; i--)
{
@@ -3784,10 +3766,9 @@ ExecProject(ProjectionInfo *projInfo, ExprDoneCond *isDone)
slot = projInfo->pi_slot;
/*
- * Clear any former contents of the result slot. This makes it
- * safe for us to use the slot's Datum/isnull arrays as workspace.
- * (Also, we can return the slot as-is if we decide no rows can
- * be projected.)
+ * Clear any former contents of the result slot. This makes it safe for
+ * us to use the slot's Datum/isnull arrays as workspace. (Also, we can
+ * return the slot as-is if we decide no rows can be projected.)
*/
ExecClearTuple(slot);
diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c
index 843aa15101c..90ffda092a0 100644
--- a/src/backend/executor/execScan.c
+++ b/src/backend/executor/execScan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.36 2005/05/22 22:30:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.37 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,16 +61,16 @@ ExecScan(ScanState *node,
projInfo = node->ps.ps_ProjInfo;
/*
- * If we have neither a qual to check nor a projection to do,
- * just skip all the overhead and return the raw scan tuple.
+ * If we have neither a qual to check nor a projection to do, just skip
+ * all the overhead and return the raw scan tuple.
*/
if (!qual && !projInfo)
return (*accessMtd) (node);
/*
- * Check to see if we're still projecting out tuples from a previous
- * scan tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
+ * Check to see if we're still projecting out tuples from a previous scan
+ * tuple (because there is a function-returning-set in the projection
+ * expressions). If so, try to project another one.
*/
if (node->ps.ps_TupFromTlist)
{
@@ -84,15 +84,15 @@ ExecScan(ScanState *node,
/*
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a scan tuple.
+ * storage allocated in the previous tuple cycle. Note this can't happen
+ * until we're done projecting out tuples from a scan tuple.
*/
econtext = node->ps.ps_ExprContext;
ResetExprContext(econtext);
/*
- * get a tuple from the access method loop until we obtain a tuple
- * which passes the qualification.
+ * get a tuple from the access method loop until we obtain a tuple which
+ * passes the qualification.
*/
for (;;)
{
@@ -103,10 +103,10 @@ ExecScan(ScanState *node,
slot = (*accessMtd) (node);
/*
- * if the slot returned by the accessMtd contains NULL, then it
- * means there is nothing more to scan so we just return an empty
- * slot, being careful to use the projection result slot so it has
- * correct tupleDesc.
+ * if the slot returned by the accessMtd contains NULL, then it means
+ * there is nothing more to scan so we just return an empty slot,
+ * being careful to use the projection result slot so it has correct
+ * tupleDesc.
*/
if (TupIsNull(slot))
{
@@ -125,8 +125,8 @@ ExecScan(ScanState *node,
* check that the current tuple satisfies the qual-clause
*
* check for non-nil qual here to avoid a function call to ExecQual()
- * when the qual is nil ... saves only a few cycles, but they add
- * up ...
+ * when the qual is nil ... saves only a few cycles, but they add up
+ * ...
*/
if (!qual || ExecQual(qual, econtext, false))
{
@@ -136,10 +136,9 @@ ExecScan(ScanState *node,
if (projInfo)
{
/*
- * Form a projection tuple, store it in the result tuple
- * slot and return it --- unless we find we can project no
- * tuples from this scan tuple, in which case continue
- * scan.
+ * Form a projection tuple, store it in the result tuple slot
+ * and return it --- unless we find we can project no tuples
+ * from this scan tuple, in which case continue scan.
*/
resultSlot = ExecProject(projInfo, &isDone);
if (isDone != ExprEndResult)
@@ -226,8 +225,8 @@ tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, TupleDesc tupdesc
return false; /* tlist too long */
/*
- * If the plan context requires a particular hasoid setting, then that
- * has to match, too.
+ * If the plan context requires a particular hasoid setting, then that has
+ * to match, too.
*/
if (ExecContextForcesOids(ps, &hasoid) &&
hasoid != tupdesc->tdhasoid)
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index 1c82a3b64be..b38bcc44cb4 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.87 2005/04/06 16:34:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.88 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -129,7 +129,7 @@ ExecCreateTupleTable(int tableSize)
* allocate the table itself
*/
newtable = (TupleTable) palloc(sizeof(TupleTableData) +
- (tableSize - 1) * sizeof(TupleTableSlot));
+ (tableSize - 1) *sizeof(TupleTableSlot));
newtable->size = tableSize;
newtable->next = 0;
@@ -175,10 +175,9 @@ ExecDropTupleTable(TupleTable table, /* tuple table */
Assert(table != NULL);
/*
- * first free all the valid pointers in the tuple array and drop
- * refcounts of any referenced buffers, if that's what the caller
- * wants. (There is probably no good reason for the caller ever not
- * to want it!)
+ * first free all the valid pointers in the tuple array and drop refcounts
+ * of any referenced buffers, if that's what the caller wants. (There is
+ * probably no good reason for the caller ever not to want it!)
*/
if (shouldFree)
{
@@ -288,9 +287,9 @@ ExecAllocTableSlot(TupleTable table)
Assert(table != NULL);
/*
- * We expect that the table was made big enough to begin with.
- * We cannot reallocate it on the fly since previous plan nodes
- * have already got pointers to individual entries.
+ * We expect that the table was made big enough to begin with. We cannot
+ * reallocate it on the fly since previous plan nodes have already got
+ * pointers to individual entries.
*/
if (table->next >= table->size)
elog(ERROR, "plan requires more slots than are available");
@@ -322,8 +321,8 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */
ExecClearTuple(slot);
/*
- * Release any old descriptor. Also release old Datum/isnull arrays
- * if present (we don't bother to check if they could be re-used).
+ * Release any old descriptor. Also release old Datum/isnull arrays if
+ * present (we don't bother to check if they could be re-used).
*/
if (slot->tts_shouldFreeDesc)
FreeTupleDesc(slot->tts_tupleDescriptor);
@@ -340,9 +339,8 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */
slot->tts_shouldFreeDesc = shouldFree;
/*
- * Allocate Datum/isnull arrays of the appropriate size. These must
- * have the same lifetime as the slot, so allocate in the slot's own
- * context.
+ * Allocate Datum/isnull arrays of the appropriate size. These must have
+ * the same lifetime as the slot, so allocate in the slot's own context.
*/
slot->tts_values = (Datum *)
MemoryContextAlloc(slot->tts_mcxt, tupdesc->natts * sizeof(Datum));
@@ -417,8 +415,8 @@ ExecStoreTuple(HeapTuple tuple,
slot->tts_tuple = tuple;
/*
- * If tuple is on a disk page, keep the page pinned as long as we hold
- * a pointer into it. We assume the caller already has such a pin.
+ * If tuple is on a disk page, keep the page pinned as long as we hold a
+ * pointer into it. We assume the caller already has such a pin.
*/
slot->tts_buffer = buffer;
if (BufferIsValid(buffer))
@@ -621,21 +619,20 @@ ExecMaterializeSlot(TupleTableSlot *slot)
Assert(!slot->tts_isempty);
/*
- * If we have a physical tuple, and it's locally palloc'd, we have
- * nothing to do.
+ * If we have a physical tuple, and it's locally palloc'd, we have nothing
+ * to do.
*/
if (slot->tts_tuple && slot->tts_shouldFree)
return slot->tts_tuple;
/*
* Otherwise, copy or build a tuple, and then store it as the new slot
- * value. (Note: tts_nvalid will be reset to zero here. There are
- * cases in which this could be optimized but it's probably not worth
- * worrying about.)
+ * value. (Note: tts_nvalid will be reset to zero here. There are cases
+ * in which this could be optimized but it's probably not worth worrying
+ * about.)
*
- * We may be called in a context that is shorter-lived than the
- * tuple slot, but we have to ensure that the materialized tuple
- * will survive anyway.
+ * We may be called in a context that is shorter-lived than the tuple slot,
+ * but we have to ensure that the materialized tuple will survive anyway.
*/
oldContext = MemoryContextSwitchTo(slot->tts_mcxt);
newTuple = ExecCopySlotTuple(slot);
@@ -663,9 +660,9 @@ ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
MemoryContext oldContext;
/*
- * There might be ways to optimize this when the source is virtual,
- * but for now just always build a physical copy. Make sure it is
- * in the right context.
+ * There might be ways to optimize this when the source is virtual, but
+ * for now just always build a physical copy. Make sure it is in the
+ * right context.
*/
oldContext = MemoryContextSwitchTo(dstslot->tts_mcxt);
newTuple = ExecCopySlotTuple(srcslot);
@@ -893,8 +890,7 @@ TupleDescGetAttInMetadata(TupleDesc tupdesc)
attinmeta->tupdesc = BlessTupleDesc(tupdesc);
/*
- * Gather info needed later to call the "in" function for each
- * attribute
+ * Gather info needed later to call the "in" function for each attribute
*/
attinfuncinfo = (FmgrInfo *) palloc0(natts * sizeof(FmgrInfo));
attioparams = (Oid *) palloc0(natts * sizeof(Oid));
@@ -974,8 +970,8 @@ BuildTupleFromCStrings(AttInMetadata *attinmeta, char **values)
tuple = heap_formtuple(tupdesc, dvalues, nulls);
/*
- * Release locally palloc'd space. XXX would probably be good to
- * pfree values of pass-by-reference datums, as well.
+ * Release locally palloc'd space. XXX would probably be good to pfree
+ * values of pass-by-reference datums, as well.
*/
pfree(dvalues);
pfree(nulls);
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index feeffe70520..05bfc08dc7d 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.125 2005/08/01 20:31:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.126 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,8 +63,8 @@ int NTupleReplaced;
int NTupleAppended;
int NTupleDeleted;
int NIndexTupleInserted;
-extern int NIndexTupleProcessed; /* have to be defined in the
- * access method level so that the
+extern int NIndexTupleProcessed; /* have to be defined in the access
+ * method level so that the
* cinterface.a will link ok. */
@@ -166,8 +166,8 @@ CreateExecutorState(void)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Make the EState node within the per-query context. This way, we
- * don't need a separate pfree() operation for it at shutdown.
+ * Make the EState node within the per-query context. This way, we don't
+ * need a separate pfree() operation for it at shutdown.
*/
oldcontext = MemoryContextSwitchTo(qcontext);
@@ -244,16 +244,16 @@ void
FreeExecutorState(EState *estate)
{
/*
- * Shut down and free any remaining ExprContexts. We do this
- * explicitly to ensure that any remaining shutdown callbacks get
- * called (since they might need to release resources that aren't
- * simply memory within the per-query memory context).
+ * Shut down and free any remaining ExprContexts. We do this explicitly
+ * to ensure that any remaining shutdown callbacks get called (since they
+ * might need to release resources that aren't simply memory within the
+ * per-query memory context).
*/
while (estate->es_exprcontexts)
{
/*
- * XXX: seems there ought to be a faster way to implement this
- * than repeated list_delete(), no?
+ * XXX: seems there ought to be a faster way to implement this than
+ * repeated list_delete(), no?
*/
FreeExprContext((ExprContext *) linitial(estate->es_exprcontexts));
/* FreeExprContext removed the list link for us */
@@ -324,10 +324,9 @@ CreateExprContext(EState *estate)
econtext->ecxt_callbacks = NULL;
/*
- * Link the ExprContext into the EState to ensure it is shut down when
- * the EState is freed. Because we use lcons(), shutdowns will occur
- * in reverse order of creation, which may not be essential but can't
- * hurt.
+ * Link the ExprContext into the EState to ensure it is shut down when the
+ * EState is freed. Because we use lcons(), shutdowns will occur in
+ * reverse order of creation, which may not be essential but can't hurt.
*/
estate->es_exprcontexts = lcons(econtext, estate->es_exprcontexts);
@@ -471,9 +470,9 @@ ExecAssignResultTypeFromTL(PlanState *planstate)
}
/*
- * ExecTypeFromTL needs the parse-time representation of the tlist,
- * not a list of ExprStates. This is good because some plan nodes
- * don't bother to set up planstate->targetlist ...
+ * ExecTypeFromTL needs the parse-time representation of the tlist, not a
+ * list of ExprStates. This is good because some plan nodes don't bother
+ * to set up planstate->targetlist ...
*/
tupDesc = ExecTypeFromTL(planstate->plan->targetlist, hasoid);
ExecAssignResultType(planstate, tupDesc, true);
@@ -518,8 +517,8 @@ ExecBuildProjectionInfo(List *targetList,
/*
* Determine whether the target list consists entirely of simple Var
- * references (ie, references to non-system attributes). If so,
- * we can use the simpler ExecVariableList instead of ExecTargetList.
+ * references (ie, references to non-system attributes). If so, we can
+ * use the simpler ExecVariableList instead of ExecTargetList.
*/
isVarList = true;
foreach(tl, targetList)
@@ -545,18 +544,18 @@ ExecBuildProjectionInfo(List *targetList,
AttrNumber lastOuterVar = 0;
AttrNumber lastScanVar = 0;
- projInfo->pi_itemIsDone = NULL; /* not needed */
+ projInfo->pi_itemIsDone = NULL; /* not needed */
projInfo->pi_varSlotOffsets = varSlotOffsets = (int *)
palloc0(len * sizeof(int));
projInfo->pi_varNumbers = varNumbers = (int *)
palloc0(len * sizeof(int));
/*
- * Set up the data needed by ExecVariableList. The slots in which
- * the variables can be found at runtime are denoted by the offsets
- * of their slot pointers within the econtext. This rather grotty
- * representation is needed because the caller may not have given
- * us the real econtext yet (see hacks in nodeSubplan.c).
+ * Set up the data needed by ExecVariableList. The slots in which the
+ * variables can be found at runtime are denoted by the offsets of
+ * their slot pointers within the econtext. This rather grotty
+ * representation is needed because the caller may not have given us
+ * the real econtext yet (see hacks in nodeSubplan.c).
*/
foreach(tl, targetList)
{
@@ -631,7 +630,7 @@ ExecAssignProjectionInfo(PlanState *planstate)
*
* However ... there is no particular need to do it during ExecEndNode,
* because FreeExecutorState will free any remaining ExprContexts within
- * the EState. Letting FreeExecutorState do it allows the ExprContexts to
+ * the EState. Letting FreeExecutorState do it allows the ExprContexts to
* be freed in reverse order of creation, rather than order of creation as
* will happen if we delete them here, which saves O(N^2) work in the list
* cleanup inside FreeExprContext.
@@ -641,8 +640,8 @@ void
ExecFreeExprContext(PlanState *planstate)
{
/*
- * Per above discussion, don't actually delete the ExprContext.
- * We do unlink it from the plan node, though.
+ * Per above discussion, don't actually delete the ExprContext. We do
+ * unlink it from the plan node, though.
*/
planstate->ps_ExprContext = NULL;
}
@@ -774,13 +773,13 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo)
* to a new tablespace.
*
* If the index AM is not safe for concurrent updates, obtain an
- * exclusive lock on the index to lock out other updaters as well
- * as readers (index_beginscan places AccessShareLock).
+ * exclusive lock on the index to lock out other updaters as well as
+ * readers (index_beginscan places AccessShareLock).
*
- * If there are multiple not-concurrent-safe indexes, all backends
- * must lock the indexes in the same order or we will get deadlocks
- * here. This is guaranteed by RelationGetIndexList(), which promises
- * to return the index list in OID order.
+ * If there are multiple not-concurrent-safe indexes, all backends must
+ * lock the indexes in the same order or we will get deadlocks here.
+ * This is guaranteed by RelationGetIndexList(), which promises to
+ * return the index list in OID order.
*
* The locks will be released in ExecCloseIndices.
*/
@@ -876,9 +875,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
heapRelation = resultRelInfo->ri_RelationDesc;
/*
- * We will use the EState's per-tuple context for evaluating
- * predicates and index expressions (creating it if it's not already
- * there).
+ * We will use the EState's per-tuple context for evaluating predicates
+ * and index expressions (creating it if it's not already there).
*/
econtext = GetPerTupleExprContext(estate);
@@ -903,8 +901,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
List *predicate;
/*
- * If predicate state not set up yet, create it (in the
- * estate's per-query context)
+ * If predicate state not set up yet, create it (in the estate's
+ * per-query context)
*/
predicate = indexInfo->ii_PredicateState;
if (predicate == NIL)
@@ -921,8 +919,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
}
/*
- * FormIndexDatum fills in its values and isnull parameters with
- * the appropriate values for the column(s) of the index.
+ * FormIndexDatum fills in its values and isnull parameters with the
+ * appropriate values for the column(s) of the index.
*/
FormIndexDatum(indexInfo,
slot,
@@ -931,14 +929,14 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
isnull);
/*
- * The index AM does the rest. Note we suppress unique-index
- * checks if we are being called from VACUUM, since VACUUM may
- * need to move dead tuples that have the same keys as live ones.
+ * The index AM does the rest. Note we suppress unique-index checks
+ * if we are being called from VACUUM, since VACUUM may need to move
+ * dead tuples that have the same keys as live ones.
*/
index_insert(relationDescs[i], /* index relation */
- values, /* array of index Datums */
- isnull, /* null flags */
- tupleid, /* tid of heap tuple */
+ values, /* array of index Datums */
+ isnull, /* null flags */
+ tupleid, /* tid of heap tuple */
heapRelation,
relationDescs[i]->rd_index->indisunique && !is_vacuum);
@@ -959,14 +957,14 @@ UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
Bitmapset *parmset;
/*
- * The plan node only depends on params listed in its allParam set.
- * Don't include anything else into its chgParam set.
+ * The plan node only depends on params listed in its allParam set. Don't
+ * include anything else into its chgParam set.
*/
parmset = bms_intersect(node->plan->allParam, newchg);
/*
- * Keep node->chgParam == NULL if there's not actually any members;
- * this allows the simplest possible tests in executor node files.
+ * Keep node->chgParam == NULL if there's not actually any members; this
+ * allows the simplest possible tests in executor node files.
*/
if (!bms_is_empty(parmset))
node->chgParam = bms_join(node->chgParam, parmset);
@@ -1049,8 +1047,8 @@ ShutdownExprContext(ExprContext *econtext)
return;
/*
- * Call the callbacks in econtext's per-tuple context. This ensures
- * that any memory they might leak will get cleaned up.
+ * Call the callbacks in econtext's per-tuple context. This ensures that
+ * any memory they might leak will get cleaned up.
*/
oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index 893ef64f03f..24a8b9a493a 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.97 2005/04/10 18:04:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.98 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -82,7 +82,7 @@ typedef SQLFunctionCache *SQLFunctionCachePtr;
/* non-export function prototypes */
static execution_state *init_execution_state(List *queryTree_list,
- bool readonly_func);
+ bool readonly_func);
static void init_sql_fcache(FmgrInfo *finfo);
static void postquel_start(execution_state *es, SQLFunctionCachePtr fcache);
static TupleTableSlot *postquel_getnext(execution_state *es);
@@ -115,14 +115,14 @@ init_execution_state(List *queryTree_list, bool readonly_func)
IsA(queryTree->utilityStmt, TransactionStmt))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL statement name */
+ /* translator: %s is a SQL statement name */
errmsg("%s is not allowed in a SQL function",
CreateQueryTag(queryTree))));
if (readonly_func && !QueryIsReadOnly(queryTree))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL statement name */
+ /* translator: %s is a SQL statement name */
errmsg("%s is not allowed in a non-volatile function",
CreateQueryTag(queryTree))));
@@ -178,8 +178,8 @@ init_sql_fcache(FmgrInfo *finfo)
procedureStruct = (Form_pg_proc) GETSTRUCT(procedureTuple);
/*
- * get the result type from the procedure tuple, and check for
- * polymorphic result type; if so, find out the actual result type.
+ * get the result type from the procedure tuple, and check for polymorphic
+ * result type; if so, find out the actual result type.
*/
rettype = procedureStruct->prorettype;
@@ -190,7 +190,7 @@ init_sql_fcache(FmgrInfo *finfo)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("could not determine actual result type for function declared to return type %s",
- format_type_be(procedureStruct->prorettype))));
+ format_type_be(procedureStruct->prorettype))));
}
fcache->rettype = rettype;
@@ -208,9 +208,9 @@ init_sql_fcache(FmgrInfo *finfo)
typeStruct = (Form_pg_type) GETSTRUCT(typeTuple);
/*
- * get the type length and by-value flag from the type tuple; also do
- * a preliminary check for returnsTuple (this may prove inaccurate,
- * see below).
+ * get the type length and by-value flag from the type tuple; also do a
+ * preliminary check for returnsTuple (this may prove inaccurate, see
+ * below).
*/
fcache->typlen = typeStruct->typlen;
fcache->typbyval = typeStruct->typbyval;
@@ -218,8 +218,8 @@ init_sql_fcache(FmgrInfo *finfo)
rettype == RECORDOID);
/*
- * Parse and rewrite the queries. We need the argument type info to
- * pass to the parser.
+ * Parse and rewrite the queries. We need the argument type info to pass
+ * to the parser.
*/
nargs = procedureStruct->pronargs;
haspolyarg = false;
@@ -265,17 +265,17 @@ init_sql_fcache(FmgrInfo *finfo)
queryTree_list = pg_parse_and_rewrite(src, argOidVect, nargs);
/*
- * If the function has any arguments declared as polymorphic types,
- * then it wasn't type-checked at definition time; must do so now.
+ * If the function has any arguments declared as polymorphic types, then
+ * it wasn't type-checked at definition time; must do so now.
*
- * Also, force a type-check if the declared return type is a rowtype; we
- * need to find out whether we are actually returning the whole tuple
- * result, or just regurgitating a rowtype expression result. In the
- * latter case we clear returnsTuple because we need not act different
- * from the scalar result case.
+ * Also, force a type-check if the declared return type is a rowtype; we need
+ * to find out whether we are actually returning the whole tuple result,
+ * or just regurgitating a rowtype expression result. In the latter case
+ * we clear returnsTuple because we need not act different from the scalar
+ * result case.
*
- * In the returnsTuple case, check_sql_fn_retval will also construct
- * a JunkFilter we can use to coerce the returned rowtype to the desired
+ * In the returnsTuple case, check_sql_fn_retval will also construct a
+ * JunkFilter we can use to coerce the returned rowtype to the desired
* form.
*/
if (haspolyarg || fcache->returnsTuple)
@@ -307,9 +307,9 @@ postquel_start(execution_state *es, SQLFunctionCachePtr fcache)
/*
* In a read-only function, use the surrounding query's snapshot;
* otherwise take a new snapshot for each query. The snapshot should
- * include a fresh command ID so that all work to date in this
- * transaction is visible. We copy in both cases so that postquel_end
- * can unconditionally do FreeSnapshot.
+ * include a fresh command ID so that all work to date in this transaction
+ * is visible. We copy in both cases so that postquel_end can
+ * unconditionally do FreeSnapshot.
*/
if (fcache->readonly_func)
snapshot = CopySnapshot(ActiveSnapshot);
@@ -470,8 +470,8 @@ postquel_execute(execution_state *es,
if (TupIsNull(slot))
{
/*
- * We fall out here for all cases except where we have obtained
- * a row from a function's final SELECT.
+ * We fall out here for all cases except where we have obtained a row
+ * from a function's final SELECT.
*/
postquel_end(es);
fcinfo->isnull = true;
@@ -479,34 +479,34 @@ postquel_execute(execution_state *es,
}
/*
- * If we got a row from a command within the function it has to be
- * the final command. All others shouldn't be returning anything.
+ * If we got a row from a command within the function it has to be the
+ * final command. All others shouldn't be returning anything.
*/
Assert(LAST_POSTQUEL_COMMAND(es));
/*
- * Set up to return the function value. For pass-by-reference
- * datatypes, be sure to allocate the result in resultcontext,
- * not the current memory context (which has query lifespan).
+ * Set up to return the function value. For pass-by-reference datatypes,
+ * be sure to allocate the result in resultcontext, not the current memory
+ * context (which has query lifespan).
*/
oldcontext = MemoryContextSwitchTo(resultcontext);
if (fcache->returnsTuple)
{
/*
- * We are returning the whole tuple, so filter it and apply the
- * proper labeling to make it a valid Datum. There are several
- * reasons why we do this:
+ * We are returning the whole tuple, so filter it and apply the proper
+ * labeling to make it a valid Datum. There are several reasons why
+ * we do this:
*
- * 1. To copy the tuple out of the child execution context and
- * into the desired result context.
+ * 1. To copy the tuple out of the child execution context and into the
+ * desired result context.
*
- * 2. To remove any junk attributes present in the raw subselect
- * result. (This is probably not absolutely necessary, but it
- * seems like good policy.)
+ * 2. To remove any junk attributes present in the raw subselect result.
+ * (This is probably not absolutely necessary, but it seems like good
+ * policy.)
*
- * 3. To insert dummy null columns if the declared result type
- * has any attisdropped columns.
+ * 3. To insert dummy null columns if the declared result type has any
+ * attisdropped columns.
*/
HeapTuple newtup;
HeapTupleHeader dtup;
@@ -517,19 +517,18 @@ postquel_execute(execution_state *es,
newtup = ExecRemoveJunk(fcache->junkFilter, slot);
/*
- * Compress out the HeapTuple header data. We assume that
- * heap_form_tuple made the tuple with header and body in one
- * palloc'd chunk. We want to return a pointer to the chunk
- * start so that it will work if someone tries to free it.
+ * Compress out the HeapTuple header data. We assume that
+ * heap_form_tuple made the tuple with header and body in one palloc'd
+ * chunk. We want to return a pointer to the chunk start so that it
+ * will work if someone tries to free it.
*/
t_len = newtup->t_len;
dtup = (HeapTupleHeader) newtup;
memmove((char *) dtup, (char *) newtup->t_data, t_len);
/*
- * Use the declared return type if it's not RECORD; else take
- * the type from the computed result, making sure a typmod has
- * been assigned.
+ * Use the declared return type if it's not RECORD; else take the type
+ * from the computed result, making sure a typmod has been assigned.
*/
if (fcache->rettype != RECORDOID)
{
@@ -559,9 +558,8 @@ postquel_execute(execution_state *es,
else
{
/*
- * Returning a scalar, which we have to extract from the first
- * column of the SELECT result, and then copy into result
- * context if needed.
+ * Returning a scalar, which we have to extract from the first column
+ * of the SELECT result, and then copy into result context if needed.
*/
value = slot_getattr(slot, 1, &(fcinfo->isnull));
@@ -617,8 +615,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
es = fcache->func_state;
/*
- * Convert params to appropriate format if starting a fresh execution.
- * (If continuing execution, we can re-use prior params.)
+ * Convert params to appropriate format if starting a fresh execution. (If
+ * continuing execution, we can re-use prior params.)
*/
if (es && es->status == F_EXEC_START)
postquel_sub_params(fcache, fcinfo);
@@ -631,8 +629,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
/*
* Execute each command in the function one after another until we're
- * executing the final command and get a result or we run out of
- * commands.
+ * executing the final command and get a result or we run out of commands.
*/
while (es)
{
@@ -691,8 +688,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
}
/*
- * If we got a result from a command within the function it has to be
- * the final command. All others shouldn't be returning anything.
+ * If we got a result from a command within the function it has to be the
+ * final command. All others shouldn't be returning anything.
*/
Assert(LAST_POSTQUEL_COMMAND(es));
@@ -711,8 +708,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
errmsg("set-valued function called in context that cannot accept a set")));
/*
- * Ensure we will get shut down cleanly if the exprcontext is not
- * run to completion.
+ * Ensure we will get shut down cleanly if the exprcontext is not run
+ * to completion.
*/
if (!fcache->shutdown_reg)
{
@@ -754,8 +751,7 @@ sql_exec_error_callback(void *arg)
fn_name = NameStr(functup->proname);
/*
- * If there is a syntax error position, convert to internal syntax
- * error
+ * If there is a syntax error position, convert to internal syntax error
*/
syntaxerrposition = geterrposition();
if (syntaxerrposition > 0)
@@ -776,11 +772,11 @@ sql_exec_error_callback(void *arg)
}
/*
- * Try to determine where in the function we failed. If there is a
- * query with non-null QueryDesc, finger it. (We check this rather
- * than looking for F_EXEC_RUN state, so that errors during
- * ExecutorStart or ExecutorEnd are blamed on the appropriate query;
- * see postquel_start and postquel_end.)
+ * Try to determine where in the function we failed. If there is a query
+ * with non-null QueryDesc, finger it. (We check this rather than looking
+ * for F_EXEC_RUN state, so that errors during ExecutorStart or
+ * ExecutorEnd are blamed on the appropriate query; see postquel_start and
+ * postquel_end.)
*/
if (fcache)
{
@@ -888,9 +884,9 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
if (rettype != VOIDOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type mismatch in function declared to return %s",
- format_type_be(rettype)),
- errdetail("Function's final statement must be a SELECT.")));
+ errmsg("return type mismatch in function declared to return %s",
+ format_type_be(rettype)),
+ errdetail("Function's final statement must be a SELECT.")));
return false;
}
@@ -901,17 +897,16 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
tlist = parse->targetList;
/*
- * The last query must be a SELECT if and only if return type isn't
- * VOID.
+ * The last query must be a SELECT if and only if return type isn't VOID.
*/
if (rettype == VOIDOID)
{
if (cmd == CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type mismatch in function declared to return %s",
- format_type_be(rettype)),
- errdetail("Function's final statement must not be a SELECT.")));
+ errmsg("return type mismatch in function declared to return %s",
+ format_type_be(rettype)),
+ errdetail("Function's final statement must not be a SELECT.")));
return false;
}
@@ -919,9 +914,9 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
if (cmd != CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type mismatch in function declared to return %s",
- format_type_be(rettype)),
- errdetail("Function's final statement must be a SELECT.")));
+ errmsg("return type mismatch in function declared to return %s",
+ format_type_be(rettype)),
+ errdetail("Function's final statement must be a SELECT.")));
/*
* Count the non-junk entries in the result targetlist.
@@ -934,22 +929,22 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
{
/*
* For base-type returns, the target list should have exactly one
- * entry, and its type should agree with what the user declared.
- * (As of Postgres 7.2, we accept binary-compatible types too.)
+ * entry, and its type should agree with what the user declared. (As
+ * of Postgres 7.2, we accept binary-compatible types too.)
*/
if (tlistlen != 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type mismatch in function declared to return %s",
- format_type_be(rettype)),
- errdetail("Final SELECT must return exactly one column.")));
+ errmsg("return type mismatch in function declared to return %s",
+ format_type_be(rettype)),
+ errdetail("Final SELECT must return exactly one column.")));
restype = exprType((Node *) ((TargetEntry *) linitial(tlist))->expr);
if (!IsBinaryCoercible(restype, rettype))
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type mismatch in function declared to return %s",
- format_type_be(rettype)),
+ errmsg("return type mismatch in function declared to return %s",
+ format_type_be(rettype)),
errdetail("Actual return type is %s.",
format_type_be(restype))));
}
@@ -957,16 +952,16 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
{
/* Returns a rowtype */
TupleDesc tupdesc;
- int tupnatts; /* physical number of columns in tuple */
- int tuplogcols; /* # of nondeleted columns in tuple */
- int colindex; /* physical column index */
+ int tupnatts; /* physical number of columns in tuple */
+ int tuplogcols; /* # of nondeleted columns in tuple */
+ int colindex; /* physical column index */
/*
- * If the target list is of length 1, and the type of the varnode
- * in the target list matches the declared return type, this is
- * okay. This can happen, for example, where the body of the
- * function is 'SELECT func2()', where func2 has the same return
- * type as the function that's calling it.
+ * If the target list is of length 1, and the type of the varnode in
+ * the target list matches the declared return type, this is okay.
+ * This can happen, for example, where the body of the function is
+ * 'SELECT func2()', where func2 has the same return type as the
+ * function that's calling it.
*/
if (tlistlen == 1)
{
@@ -979,9 +974,8 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
if (get_func_result_type(func_id, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
{
/*
- * Assume we are returning the whole tuple.
- * Crosschecking against what the caller expects will happen at
- * runtime.
+ * Assume we are returning the whole tuple. Crosschecking against
+ * what the caller expects will happen at runtime.
*/
if (junkFilter)
*junkFilter = ExecInitJunkFilter(tlist, false, NULL);
@@ -990,9 +984,9 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
Assert(tupdesc);
/*
- * Verify that the targetlist matches the return tuple type.
- * We scan the non-deleted attributes to ensure that they match the
- * datatypes of the non-resjunk columns.
+ * Verify that the targetlist matches the return tuple type. We scan
+ * the non-deleted attributes to ensure that they match the datatypes
+ * of the non-resjunk columns.
*/
tupnatts = tupdesc->natts;
tuplogcols = 0; /* we'll count nondeleted cols as we go */
@@ -1016,7 +1010,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
- errdetail("Final SELECT returns too many columns.")));
+ errdetail("Final SELECT returns too many columns.")));
attr = tupdesc->attrs[colindex - 1];
} while (attr->attisdropped);
tuplogcols++;
@@ -1046,15 +1040,15 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
if (tlistlen != tuplogcols)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type mismatch in function declared to return %s",
- format_type_be(rettype)),
+ errmsg("return type mismatch in function declared to return %s",
+ format_type_be(rettype)),
errdetail("Final SELECT returns too few columns.")));
/* Set up junk filter if needed */
if (junkFilter)
*junkFilter = ExecInitJunkFilterConversion(tlist,
- CreateTupleDescCopy(tupdesc),
- NULL);
+ CreateTupleDescCopy(tupdesc),
+ NULL);
/* Report that we are returning entire tuple result */
return true;
@@ -1070,8 +1064,8 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type %s is not supported for SQL functions",
- format_type_be(rettype))));
+ errmsg("return type %s is not supported for SQL functions",
+ format_type_be(rettype))));
return false;
}
diff --git a/src/backend/executor/instrument.c b/src/backend/executor/instrument.c
index c5b4a252d61..08d35c16163 100644
--- a/src/backend/executor/instrument.c
+++ b/src/backend/executor/instrument.c
@@ -7,7 +7,7 @@
* Copyright (c) 2001-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/instrument.c,v 1.12 2005/04/16 20:07:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/instrument.c,v 1.13 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,7 +43,7 @@ InstrStartNode(Instrumentation *instr)
void
InstrStopNode(Instrumentation *instr, bool returnedTuple)
{
- instr_time endtime;
+ instr_time endtime;
/* count the returned tuples */
if (returnedTuple)
@@ -72,7 +72,7 @@ InstrStopNode(Instrumentation *instr, bool returnedTuple)
instr->counter.tv_usec -= 1000000;
instr->counter.tv_sec++;
}
-#else /* WIN32 */
+#else /* WIN32 */
instr->counter.QuadPart += (endtime.QuadPart - instr->starttime.QuadPart);
#endif
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index b7a0bc344ff..0403c9aca1b 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -53,7 +53,7 @@
* pass-by-ref inputs, but in the aggregate case we know the left input is
* either the initial transition value or a previous function result, and
* in either case its value need not be preserved. See int8inc() for an
- * example. Notice that advance_transition_function() is coded to avoid a
+ * example. Notice that advance_transition_function() is coded to avoid a
* data copy step when the previous transition value pointer is returned.
*
*
@@ -61,7 +61,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.134 2005/06/28 05:08:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.135 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -109,8 +109,8 @@ typedef struct AggStatePerAggData
/*
* fmgr lookup data for transfer functions --- only valid when
- * corresponding oid is not InvalidOid. Note in particular that
- * fn_strict flags are kept here.
+ * corresponding oid is not InvalidOid. Note in particular that fn_strict
+ * flags are kept here.
*/
FmgrInfo transfn;
FmgrInfo finalfn;
@@ -124,8 +124,8 @@ typedef struct AggStatePerAggData
Oid sortOperator;
/*
- * fmgr lookup data for input type's equality operator --- only
- * set/used when aggregate has DISTINCT flag.
+ * fmgr lookup data for input type's equality operator --- only set/used
+ * when aggregate has DISTINCT flag.
*/
FmgrInfo equalfn;
@@ -147,14 +147,14 @@ typedef struct AggStatePerAggData
transtypeByVal;
/*
- * These values are working state that is initialized at the start of
- * an input tuple group and updated for each input tuple.
+ * These values are working state that is initialized at the start of an
+ * input tuple group and updated for each input tuple.
*
* For a simple (non DISTINCT) aggregate, we just feed the input values
* straight to the transition function. If it's DISTINCT, we pass the
- * input values into a Tuplesort object; then at completion of the
- * input tuple group, we scan the sorted values, eliminate duplicates,
- * and run the transition function on the rest.
+ * input values into a Tuplesort object; then at completion of the input
+ * tuple group, we scan the sorted values, eliminate duplicates, and run
+ * the transition function on the rest.
*/
Tuplesortstate *sortstate; /* sort object, if a DISTINCT agg */
@@ -184,12 +184,11 @@ typedef struct AggStatePerGroupData
bool noTransValue; /* true if transValue not set yet */
/*
- * Note: noTransValue initially has the same value as
- * transValueIsNull, and if true both are cleared to false at the same
- * time. They are not the same though: if transfn later returns a
- * NULL, we want to keep that NULL and not auto-replace it with a
- * later input value. Only the first non-NULL input will be
- * auto-substituted.
+ * Note: noTransValue initially has the same value as transValueIsNull,
+ * and if true both are cleared to false at the same time. They are not
+ * the same though: if transfn later returns a NULL, we want to keep that
+ * NULL and not auto-replace it with a later input value. Only the first
+ * non-NULL input will be auto-substituted.
*/
} AggStatePerGroupData;
@@ -270,11 +269,11 @@ initialize_aggregates(AggState *aggstate,
}
/*
- * If we are reinitializing after a group boundary, we have to
- * free any prior transValue to avoid memory leakage. We must
- * check not only the isnull flag but whether the pointer is NULL;
- * since pergroupstate is initialized with palloc0, the initial
- * condition has isnull = 0 and null pointer.
+ * If we are reinitializing after a group boundary, we have to free
+ * any prior transValue to avoid memory leakage. We must check not
+ * only the isnull flag but whether the pointer is NULL; since
+ * pergroupstate is initialized with palloc0, the initial condition
+ * has isnull = 0 and null pointer.
*/
if (!peraggstate->transtypeByVal &&
!pergroupstate->transValueIsNull &&
@@ -284,8 +283,8 @@ initialize_aggregates(AggState *aggstate,
/*
* (Re)set transValue to the initial value.
*
- * Note that when the initial value is pass-by-ref, we must copy it
- * (into the aggcontext) since we will pfree the transValue later.
+ * Note that when the initial value is pass-by-ref, we must copy it (into
+ * the aggcontext) since we will pfree the transValue later.
*/
if (peraggstate->initValueIsNull)
pergroupstate->transValue = peraggstate->initValue;
@@ -295,18 +294,18 @@ initialize_aggregates(AggState *aggstate,
oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
pergroupstate->transValue = datumCopy(peraggstate->initValue,
- peraggstate->transtypeByVal,
- peraggstate->transtypeLen);
+ peraggstate->transtypeByVal,
+ peraggstate->transtypeLen);
MemoryContextSwitchTo(oldContext);
}
pergroupstate->transValueIsNull = peraggstate->initValueIsNull;
/*
- * If the initial value for the transition state doesn't exist in
- * the pg_aggregate table then we will let the first non-NULL
- * value returned from the outer procNode become the initial
- * value. (This is useful for aggregates like max() and min().)
- * The noTransValue flag signals that we still need to do this.
+ * If the initial value for the transition state doesn't exist in the
+ * pg_aggregate table then we will let the first non-NULL value
+ * returned from the outer procNode become the initial value. (This is
+ * useful for aggregates like max() and min().) The noTransValue flag
+ * signals that we still need to do this.
*/
pergroupstate->noTransValue = peraggstate->initValueIsNull;
}
@@ -337,20 +336,18 @@ advance_transition_function(AggState *aggstate,
if (pergroupstate->noTransValue)
{
/*
- * transValue has not been initialized. This is the first
- * non-NULL input value. We use it as the initial value for
- * transValue. (We already checked that the agg's input type
- * is binary-compatible with its transtype, so straight copy
- * here is OK.)
+ * transValue has not been initialized. This is the first non-NULL
+ * input value. We use it as the initial value for transValue. (We
+ * already checked that the agg's input type is binary-compatible
+ * with its transtype, so straight copy here is OK.)
*
- * We must copy the datum into aggcontext if it is pass-by-ref.
- * We do not need to pfree the old transValue, since it's
- * NULL.
+ * We must copy the datum into aggcontext if it is pass-by-ref. We do
+ * not need to pfree the old transValue, since it's NULL.
*/
oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
pergroupstate->transValue = datumCopy(newVal,
- peraggstate->transtypeByVal,
- peraggstate->transtypeLen);
+ peraggstate->transtypeByVal,
+ peraggstate->transtypeLen);
pergroupstate->transValueIsNull = false;
pergroupstate->noTransValue = false;
MemoryContextSwitchTo(oldContext);
@@ -360,10 +357,9 @@ advance_transition_function(AggState *aggstate,
{
/*
* Don't call a strict function with NULL inputs. Note it is
- * possible to get here despite the above tests, if the
- * transfn is strict *and* returned a NULL on a prior cycle.
- * If that happens we will propagate the NULL all the way to
- * the end.
+ * possible to get here despite the above tests, if the transfn is
+ * strict *and* returned a NULL on a prior cycle. If that happens
+ * we will propagate the NULL all the way to the end.
*/
return;
}
@@ -385,12 +381,12 @@ advance_transition_function(AggState *aggstate,
newVal = FunctionCallInvoke(&fcinfo);
/*
- * If pass-by-ref datatype, must copy the new value into aggcontext
- * and pfree the prior transValue. But if transfn returned a pointer
- * to its first input, we don't need to do anything.
+ * If pass-by-ref datatype, must copy the new value into aggcontext and
+ * pfree the prior transValue. But if transfn returned a pointer to its
+ * first input, we don't need to do anything.
*/
if (!peraggstate->transtypeByVal &&
- DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
+ DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
{
if (!fcinfo.isnull)
{
@@ -473,24 +469,24 @@ process_sorted_aggregate(AggState *aggstate,
tuplesort_performsort(peraggstate->sortstate);
/*
- * Note: if input type is pass-by-ref, the datums returned by the sort
- * are freshly palloc'd in the per-query context, so we must be
- * careful to pfree them when they are no longer needed.
+ * Note: if input type is pass-by-ref, the datums returned by the sort are
+ * freshly palloc'd in the per-query context, so we must be careful to
+ * pfree them when they are no longer needed.
*/
while (tuplesort_getdatum(peraggstate->sortstate, true,
&newVal, &isNull))
{
/*
- * DISTINCT always suppresses nulls, per SQL spec, regardless of
- * the transition function's strictness.
+ * DISTINCT always suppresses nulls, per SQL spec, regardless of the
+ * transition function's strictness.
*/
if (isNull)
continue;
/*
- * Clear and select the working context for evaluation of the
- * equality function and transition function.
+ * Clear and select the working context for evaluation of the equality
+ * function and transition function.
*/
MemoryContextReset(workcontext);
oldContext = MemoryContextSwitchTo(workcontext);
@@ -726,8 +722,8 @@ agg_retrieve_direct(AggState *aggstate)
while (!aggstate->agg_done)
{
/*
- * If we don't already have the first tuple of the new group,
- * fetch it from the outer plan.
+ * If we don't already have the first tuple of the new group, fetch it
+ * from the outer plan.
*/
if (aggstate->grp_firstTuple == NULL)
{
@@ -735,8 +731,8 @@ agg_retrieve_direct(AggState *aggstate)
if (!TupIsNull(outerslot))
{
/*
- * Make a copy of the first input tuple; we will use this
- * for comparisons (in group mode) and for projection.
+ * Make a copy of the first input tuple; we will use this for
+ * comparisons (in group mode) and for projection.
*/
aggstate->grp_firstTuple = ExecCopySlotTuple(outerslot);
}
@@ -764,8 +760,8 @@ agg_retrieve_direct(AggState *aggstate)
{
/*
* Store the copied first input tuple in the tuple table slot
- * reserved for it. The tuple will be deleted when it is
- * cleared from the slot.
+ * reserved for it. The tuple will be deleted when it is cleared
+ * from the slot.
*/
ExecStoreTuple(aggstate->grp_firstTuple,
firstSlot,
@@ -807,7 +803,7 @@ agg_retrieve_direct(AggState *aggstate)
outerslot,
node->numCols, node->grpColIdx,
aggstate->eqfunctions,
- tmpcontext->ecxt_per_tuple_memory))
+ tmpcontext->ecxt_per_tuple_memory))
{
/*
* Save the first input tuple of the next group.
@@ -838,17 +834,16 @@ agg_retrieve_direct(AggState *aggstate)
/*
* If we have no first tuple (ie, the outerPlan didn't return
* anything), create a dummy all-nulls input tuple for use by
- * ExecQual/ExecProject. 99.44% of the time this is a waste of
- * cycles, because ordinarily the projected output tuple's
- * targetlist cannot contain any direct (non-aggregated)
- * references to input columns, so the dummy tuple will not be
- * referenced. However there are special cases where this isn't so
- * --- in particular an UPDATE involving an aggregate will have a
- * targetlist reference to ctid. We need to return a null for
- * ctid in that situation, not coredump.
+ * ExecQual/ExecProject. 99.44% of the time this is a waste of cycles,
+ * because ordinarily the projected output tuple's targetlist cannot
+ * contain any direct (non-aggregated) references to input columns, so
+ * the dummy tuple will not be referenced. However there are special
+ * cases where this isn't so --- in particular an UPDATE involving an
+ * aggregate will have a targetlist reference to ctid. We need to
+ * return a null for ctid in that situation, not coredump.
*
- * The values returned for the aggregates will be the initial values
- * of the transition functions.
+ * The values returned for the aggregates will be the initial values of
+ * the transition functions.
*/
if (TupIsNull(firstSlot))
{
@@ -866,15 +861,15 @@ agg_retrieve_direct(AggState *aggstate)
econtext->ecxt_scantuple = firstSlot;
/*
- * Check the qual (HAVING clause); if the group does not match,
- * ignore it and loop back to try to process another group.
+ * Check the qual (HAVING clause); if the group does not match, ignore
+ * it and loop back to try to process another group.
*/
if (ExecQual(aggstate->ss.ps.qual, econtext, false))
{
/*
- * Form and return a projection tuple using the aggregate
- * results and the representative input tuple. Note we do not
- * support aggregates returning sets ...
+ * Form and return a projection tuple using the aggregate results
+ * and the representative input tuple. Note we do not support
+ * aggregates returning sets ...
*/
return ExecProject(projInfo, NULL);
}
@@ -903,8 +898,8 @@ agg_fill_hash_table(AggState *aggstate)
tmpcontext = aggstate->tmpcontext;
/*
- * Process each outer-plan tuple, and then fetch the next one, until
- * we exhaust the outer plan.
+ * Process each outer-plan tuple, and then fetch the next one, until we
+ * exhaust the outer plan.
*/
for (;;)
{
@@ -979,8 +974,8 @@ agg_retrieve_hash_table(AggState *aggstate)
ResetExprContext(econtext);
/*
- * Store the copied first input tuple in the tuple table slot
- * reserved for it, so that it can be used in ExecProject.
+ * Store the copied first input tuple in the tuple table slot reserved
+ * for it, so that it can be used in ExecProject.
*/
ExecStoreTuple(entry->shared.firstTuple,
firstSlot,
@@ -1010,15 +1005,15 @@ agg_retrieve_hash_table(AggState *aggstate)
econtext->ecxt_scantuple = firstSlot;
/*
- * Check the qual (HAVING clause); if the group does not match,
- * ignore it and loop back to try to process another group.
+ * Check the qual (HAVING clause); if the group does not match, ignore
+ * it and loop back to try to process another group.
*/
if (ExecQual(aggstate->ss.ps.qual, econtext, false))
{
/*
- * Form and return a projection tuple using the aggregate
- * results and the representative input tuple. Note we do not
- * support aggregates returning sets ...
+ * Form and return a projection tuple using the aggregate results
+ * and the representative input tuple. Note we do not support
+ * aggregates returning sets ...
*/
return ExecProject(projInfo, NULL);
}
@@ -1065,8 +1060,8 @@ ExecInitAgg(Agg *node, EState *estate)
/*
* Create expression contexts. We need two, one for per-input-tuple
- * processing and one for per-output-tuple processing. We cheat a
- * little by using ExecAssignExprContext() to build both.
+ * processing and one for per-output-tuple processing. We cheat a little
+ * by using ExecAssignExprContext() to build both.
*/
ExecAssignExprContext(estate, &aggstate->ss.ps);
aggstate->tmpcontext = aggstate->ss.ps.ps_ExprContext;
@@ -1074,10 +1069,10 @@ ExecInitAgg(Agg *node, EState *estate)
/*
* We also need a long-lived memory context for holding hashtable data
- * structures and transition values. NOTE: the details of what is
- * stored in aggcontext and what is stored in the regular per-query
- * memory context are driven by a simple decision: we want to reset
- * the aggcontext in ExecReScanAgg to recover no-longer-wanted space.
+ * structures and transition values. NOTE: the details of what is stored
+ * in aggcontext and what is stored in the regular per-query memory
+ * context are driven by a simple decision: we want to reset the
+ * aggcontext in ExecReScanAgg to recover no-longer-wanted space.
*/
aggstate->aggcontext =
AllocSetContextCreate(CurrentMemoryContext,
@@ -1098,10 +1093,10 @@ ExecInitAgg(Agg *node, EState *estate)
* initialize child expressions
*
* Note: ExecInitExpr finds Aggrefs for us, and also checks that no aggs
- * contain other agg calls in their arguments. This would make no
- * sense under SQL semantics anyway (and it's forbidden by the spec).
- * Because that is true, we don't need to worry about evaluating the
- * aggs in any particular order.
+ * contain other agg calls in their arguments. This would make no sense
+ * under SQL semantics anyway (and it's forbidden by the spec). Because
+ * that is true, we don't need to worry about evaluating the aggs in any
+ * particular order.
*/
aggstate->ss.ps.targetlist = (List *)
ExecInitExpr((Expr *) node->plan.targetlist,
@@ -1135,20 +1130,19 @@ ExecInitAgg(Agg *node, EState *estate)
if (numaggs <= 0)
{
/*
- * This is not an error condition: we might be using the Agg node
- * just to do hash-based grouping. Even in the regular case,
- * constant-expression simplification could optimize away all of
- * the Aggrefs in the targetlist and qual. So keep going, but
- * force local copy of numaggs positive so that palloc()s below
- * don't choke.
+ * This is not an error condition: we might be using the Agg node just
+ * to do hash-based grouping. Even in the regular case,
+ * constant-expression simplification could optimize away all of the
+ * Aggrefs in the targetlist and qual. So keep going, but force local
+ * copy of numaggs positive so that palloc()s below don't choke.
*/
numaggs = 1;
}
/*
- * If we are grouping, precompute fmgr lookup data for inner loop. We
- * need both equality and hashing functions to do it by hashing, but
- * only equality if not hashing.
+ * If we are grouping, precompute fmgr lookup data for inner loop. We need
+ * both equality and hashing functions to do it by hashing, but only
+ * equality if not hashing.
*/
if (node->numCols > 0)
{
@@ -1166,8 +1160,8 @@ ExecInitAgg(Agg *node, EState *estate)
}
/*
- * Set up aggregate-result storage in the output expr context, and
- * also allocate my private per-agg working storage
+ * Set up aggregate-result storage in the output expr context, and also
+ * allocate my private per-agg working storage
*/
econtext = aggstate->ss.ps.ps_ExprContext;
econtext->ecxt_aggvalues = (Datum *) palloc0(sizeof(Datum) * numaggs);
@@ -1192,10 +1186,10 @@ ExecInitAgg(Agg *node, EState *estate)
/*
* Perform lookups of aggregate function info, and initialize the
* unchanging fields of the per-agg data. We also detect duplicate
- * aggregates (for example, "SELECT sum(x) ... HAVING sum(x) > 0").
- * When duplicates are detected, we only make an AggStatePerAgg struct
- * for the first one. The clones are simply pointed at the same
- * result entry by giving them duplicate aggno values.
+ * aggregates (for example, "SELECT sum(x) ... HAVING sum(x) > 0"). When
+ * duplicates are detected, we only make an AggStatePerAgg struct for the
+ * first one. The clones are simply pointed at the same result entry by
+ * giving them duplicate aggno values.
*/
aggno = -1;
foreach(l, aggstate->aggs)
@@ -1243,9 +1237,9 @@ ExecInitAgg(Agg *node, EState *estate)
peraggstate->aggref = aggref;
/*
- * Get actual datatype of the input. We need this because it may
- * be different from the agg's declared input type, when the agg
- * accepts ANY (eg, COUNT(*)) or ANYARRAY or ANYELEMENT.
+ * Get actual datatype of the input. We need this because it may be
+ * different from the agg's declared input type, when the agg accepts
+ * ANY (eg, COUNT(*)) or ANYARRAY or ANYELEMENT.
*/
inputType = exprType((Node *) aggref->target);
@@ -1270,7 +1264,7 @@ ExecInitAgg(Agg *node, EState *estate)
/* Check that aggregate owner has permission to call component fns */
{
HeapTuple procTuple;
- Oid aggOwner;
+ Oid aggOwner;
procTuple = SearchSysCache(PROCOID,
ObjectIdGetDatum(aggref->aggfnoid),
@@ -1339,8 +1333,8 @@ ExecInitAgg(Agg *node, EState *estate)
&peraggstate->transtypeByVal);
/*
- * initval is potentially null, so don't try to access it as a
- * struct field. Must do it the hard way with SysCacheGetAttr.
+ * initval is potentially null, so don't try to access it as a struct
+ * field. Must do it the hard way with SysCacheGetAttr.
*/
textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
Anum_pg_aggregate_agginitval,
@@ -1353,11 +1347,11 @@ ExecInitAgg(Agg *node, EState *estate)
aggtranstype);
/*
- * If the transfn is strict and the initval is NULL, make sure
- * input type and transtype are the same (or at least binary-
- * compatible), so that it's OK to use the first input value as
- * the initial transValue. This should have been checked at agg
- * definition time, but just in case...
+ * If the transfn is strict and the initval is NULL, make sure input
+ * type and transtype are the same (or at least binary- compatible),
+ * so that it's OK to use the first input value as the initial
+ * transValue. This should have been checked at agg definition time,
+ * but just in case...
*/
if (peraggstate->transfn.fn_strict && peraggstate->initValueIsNull)
{
@@ -1463,18 +1457,18 @@ ExecReScanAgg(AggState *node, ExprContext *exprCtxt)
if (((Agg *) node->ss.ps.plan)->aggstrategy == AGG_HASHED)
{
/*
- * In the hashed case, if we haven't yet built the hash table then
- * we can just return; nothing done yet, so nothing to undo. If
- * subnode's chgParam is not NULL then it will be re-scanned by
- * ExecProcNode, else no reason to re-scan it at all.
+ * In the hashed case, if we haven't yet built the hash table then we
+ * can just return; nothing done yet, so nothing to undo. If subnode's
+ * chgParam is not NULL then it will be re-scanned by ExecProcNode,
+ * else no reason to re-scan it at all.
*/
if (!node->table_filled)
return;
/*
* If we do have the hash table and the subplan does not have any
- * parameter changes, then we can just rescan the existing hash
- * table; no need to build it again.
+ * parameter changes, then we can just rescan the existing hash table;
+ * no need to build it again.
*/
if (((PlanState *) node)->lefttree->chgParam == NULL)
{
@@ -1516,8 +1510,7 @@ ExecReScanAgg(AggState *node, ExprContext *exprCtxt)
else
{
/*
- * Reset the per-group state (in particular, mark transvalues
- * null)
+ * Reset the per-group state (in particular, mark transvalues null)
*/
MemSet(node->pergroup, 0,
sizeof(AggStatePerGroupData) * node->numaggs);
diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c
index b88eec46a40..fc5c445db0e 100644
--- a/src/backend/executor/nodeAppend.c
+++ b/src/backend/executor/nodeAppend.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeAppend.c,v 1.64 2005/05/22 22:30:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeAppend.c,v 1.65 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -88,10 +88,9 @@ exec_append_initialize_next(AppendState *appendstate)
if (whichplan < appendstate->as_firstplan)
{
/*
- * if scanning in reverse, we start at the last scan in the list
- * and then proceed back to the first.. in any case we inform
- * ExecAppend that we are at the end of the line by returning
- * FALSE
+ * if scanning in reverse, we start at the last scan in the list and
+ * then proceed back to the first.. in any case we inform ExecAppend
+ * that we are at the end of the line by returning FALSE
*/
appendstate->as_whichplan = appendstate->as_firstplan;
return FALSE;
@@ -99,8 +98,7 @@ exec_append_initialize_next(AppendState *appendstate)
else if (whichplan > appendstate->as_lastplan)
{
/*
- * as above, end the scan if we go beyond the last scan in our
- * list..
+ * as above, end the scan if we go beyond the last scan in our list..
*/
appendstate->as_whichplan = appendstate->as_lastplan;
return FALSE;
@@ -110,8 +108,8 @@ exec_append_initialize_next(AppendState *appendstate)
/*
* initialize the scan
*
- * If we are controlling the target relation, select the proper
- * active ResultRelInfo and junk filter for this target.
+ * If we are controlling the target relation, select the proper active
+ * ResultRelInfo and junk filter for this target.
*/
if (((Append *) appendstate->ps.plan)->isTarget)
{
@@ -168,9 +166,8 @@ ExecInitAppend(Append *node, EState *estate)
appendstate->as_nplans = nplans;
/*
- * Do we want to scan just one subplan? (Special case for
- * EvalPlanQual) XXX pretty dirty way of determining that this case
- * applies ...
+ * Do we want to scan just one subplan? (Special case for EvalPlanQual)
+ * XXX pretty dirty way of determining that this case applies ...
*/
if (node->isTarget && estate->es_evTuple != NULL)
{
@@ -199,8 +196,8 @@ ExecInitAppend(Append *node, EState *estate)
#define APPEND_NSLOTS 1
/*
- * append nodes still have Result slots, which hold pointers to
- * tuples, so we have to initialize them.
+ * append nodes still have Result slots, which hold pointers to tuples, so
+ * we have to initialize them.
*/
ExecInitResultTupleSlot(estate, &appendstate->ps);
@@ -220,10 +217,10 @@ ExecInitAppend(Append *node, EState *estate)
}
/*
- * Initialize tuple type. (Note: in an inherited UPDATE situation,
- * the tuple type computed here corresponds to the parent table, which
- * is really a lie since tuples returned from child subplans will not
- * all look the same.)
+ * Initialize tuple type. (Note: in an inherited UPDATE situation, the
+ * tuple type computed here corresponds to the parent table, which is
+ * really a lie since tuples returned from child subplans will not all
+ * look the same.)
*/
ExecAssignResultTypeFromTL(&appendstate->ps);
appendstate->ps.ps_ProjInfo = NULL;
@@ -275,19 +272,19 @@ ExecAppend(AppendState *node)
if (!TupIsNull(result))
{
/*
- * If the subplan gave us something then return it as-is.
- * We do NOT make use of the result slot that was set up in
- * ExecInitAppend, first because there's no reason to and
- * second because it may have the wrong tuple descriptor in
+ * If the subplan gave us something then return it as-is. We do
+ * NOT make use of the result slot that was set up in
+ * ExecInitAppend, first because there's no reason to and second
+ * because it may have the wrong tuple descriptor in
* inherited-UPDATE cases.
*/
return result;
}
/*
- * Go on to the "next" subplan in the appropriate direction.
- * If no more subplans, return the empty slot set up for us
- * by ExecInitAppend.
+ * Go on to the "next" subplan in the appropriate direction. If no
+ * more subplans, return the empty slot set up for us by
+ * ExecInitAppend.
*/
if (ScanDirectionIsForward(node->ps.state->es_direction))
node->as_whichplan++;
@@ -348,8 +345,8 @@ ExecReScanAppend(AppendState *node, ExprContext *exprCtxt)
UpdateChangedParamSet(subnode, node->ps.chgParam);
/*
- * if chgParam of subnode is not null then plan will be re-scanned
- * by first ExecProcNode.
+ * if chgParam of subnode is not null then plan will be re-scanned by
+ * first ExecProcNode.
*/
if (subnode->chgParam == NULL)
{
diff --git a/src/backend/executor/nodeBitmapAnd.c b/src/backend/executor/nodeBitmapAnd.c
index 939062d4d6c..a9e63cbfccb 100644
--- a/src/backend/executor/nodeBitmapAnd.c
+++ b/src/backend/executor/nodeBitmapAnd.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapAnd.c,v 1.3 2005/08/28 22:47:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapAnd.c,v 1.4 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -16,7 +16,7 @@
* ExecInitBitmapAnd - initialize the BitmapAnd node
* MultiExecBitmapAnd - retrieve the result bitmap from the node
* ExecEndBitmapAnd - shut down the BitmapAnd node
- * ExecReScanBitmapAnd - rescan the BitmapAnd node
+ * ExecReScanBitmapAnd - rescan the BitmapAnd node
*
* NOTES
* BitmapAnd nodes don't make use of their left and right
@@ -137,7 +137,7 @@ MultiExecBitmapAnd(BitmapAndState *node)
elog(ERROR, "unrecognized result from subplan");
if (result == NULL)
- result = subresult; /* first subplan */
+ result = subresult; /* first subplan */
else
{
tbm_intersect(result, subresult);
@@ -145,11 +145,11 @@ MultiExecBitmapAnd(BitmapAndState *node)
}
/*
- * If at any stage we have a completely empty bitmap, we can fall
- * out without evaluating the remaining subplans, since ANDing them
- * can no longer change the result. (Note: the fact that indxpath.c
- * orders the subplans by selectivity should make this case more
- * likely to occur.)
+ * If at any stage we have a completely empty bitmap, we can fall out
+ * without evaluating the remaining subplans, since ANDing them can no
+ * longer change the result. (Note: the fact that indxpath.c orders
+ * the subplans by selectivity should make this case more likely to
+ * occur.)
*/
if (tbm_is_empty(result))
break;
@@ -160,7 +160,7 @@ MultiExecBitmapAnd(BitmapAndState *node)
/* must provide our own instrumentation support */
if (node->ps.instrument)
- InstrStopNodeMulti(node->ps.instrument, 0 /* XXX */);
+ InstrStopNodeMulti(node->ps.instrument, 0 /* XXX */ );
return (Node *) result;
}
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 3c3c1fd96f1..5d92c19ea5e 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -5,7 +5,7 @@
*
* NOTE: it is critical that this plan type only be used with MVCC-compliant
* snapshots (ie, regular snapshots, not SnapshotNow or one of the other
- * special snapshots). The reason is that since index and heap scans are
+ * special snapshots). The reason is that since index and heap scans are
* decoupled, there can be no assurance that the index tuple prompting a
* visit to a particular heap TID still exists when the visit is made.
* Therefore the tuple might not exist anymore either (which is OK because
@@ -21,7 +21,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.3 2005/10/06 02:29:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.4 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -76,11 +76,11 @@ BitmapHeapNext(BitmapHeapScanState *node)
tbmres = node->tbmres;
/*
- * Clear any reference to the previously returned tuple. The idea
- * here is to not have the tuple slot be the last holder of a pin on
- * that tuple's buffer; if it is, we'll need a separate visit to the
- * bufmgr to release the buffer. By clearing here, we get to have the
- * release done by ReleaseAndReadBuffer, below.
+ * Clear any reference to the previously returned tuple. The idea here is
+ * to not have the tuple slot be the last holder of a pin on that tuple's
+ * buffer; if it is, we'll need a separate visit to the bufmgr to release
+ * the buffer. By clearing here, we get to have the release done by
+ * ReleaseAndReadBuffer, below.
*/
ExecClearTuple(slot);
@@ -105,7 +105,7 @@ BitmapHeapNext(BitmapHeapScanState *node)
ResetExprContext(econtext);
if (!ExecQual(node->bitmapqualorig, econtext, false))
- ExecClearTuple(slot); /* would not be returned by scan */
+ ExecClearTuple(slot); /* would not be returned by scan */
/* Flag for the next call that no more tuples */
estate->es_evTupleNull[scanrelid - 1] = true;
@@ -114,8 +114,8 @@ BitmapHeapNext(BitmapHeapScanState *node)
}
/*
- * If we haven't yet performed the underlying index scan, do it,
- * and prepare the bitmap to be iterated over.
+ * If we haven't yet performed the underlying index scan, do it, and
+ * prepare the bitmap to be iterated over.
*/
if (tbm == NULL)
{
@@ -145,10 +145,10 @@ BitmapHeapNext(BitmapHeapScanState *node)
}
/*
- * Ignore any claimed entries past what we think is the end of
- * the relation. (This is probably not necessary given that we
- * got AccessShareLock before performing any of the indexscans,
- * but let's be safe.)
+ * Ignore any claimed entries past what we think is the end of the
+ * relation. (This is probably not necessary given that we got
+ * AccessShareLock before performing any of the indexscans, but
+ * let's be safe.)
*/
if (tbmres->blockno >= scandesc->rs_nblocks)
{
@@ -157,19 +157,18 @@ BitmapHeapNext(BitmapHeapScanState *node)
}
/*
- * Acquire pin on the current heap page. We'll hold the pin
- * until done looking at the page. We trade in any pin we
- * held before.
+ * Acquire pin on the current heap page. We'll hold the pin until
+ * done looking at the page. We trade in any pin we held before.
*/
scandesc->rs_cbuf = ReleaseAndReadBuffer(scandesc->rs_cbuf,
scandesc->rs_rd,
tbmres->blockno);
/*
- * Determine how many entries we need to look at on this page.
- * If the bitmap is lossy then we need to look at each physical
- * item pointer; otherwise we just look through the offsets
- * listed in tbmres.
+ * Determine how many entries we need to look at on this page. If
+ * the bitmap is lossy then we need to look at each physical item
+ * pointer; otherwise we just look through the offsets listed in
+ * tbmres.
*/
if (tbmres->ntuples >= 0)
{
@@ -180,7 +179,7 @@ BitmapHeapNext(BitmapHeapScanState *node)
else
{
/* lossy case */
- Page dp;
+ Page dp;
LockBuffer(scandesc->rs_cbuf, BUFFER_LOCK_SHARE);
dp = (Page) BufferGetPage(scandesc->rs_cbuf);
@@ -230,8 +229,8 @@ BitmapHeapNext(BitmapHeapScanState *node)
ItemPointerSet(&scandesc->rs_ctup.t_self, tbmres->blockno, targoffset);
/*
- * Fetch the heap tuple and see if it matches the snapshot.
- * We use heap_release_fetch to avoid useless bufmgr traffic.
+ * Fetch the heap tuple and see if it matches the snapshot. We use
+ * heap_release_fetch to avoid useless bufmgr traffic.
*/
if (heap_release_fetch(scandesc->rs_rd,
scandesc->rs_snapshot,
@@ -241,8 +240,8 @@ BitmapHeapNext(BitmapHeapScanState *node)
&scandesc->rs_pgstat_info))
{
/*
- * Set up the result slot to point to this tuple.
- * Note that the slot acquires a pin on the buffer.
+ * Set up the result slot to point to this tuple. Note that the
+ * slot acquires a pin on the buffer.
*/
ExecStoreTuple(&scandesc->rs_ctup,
slot,
@@ -338,8 +337,8 @@ ExecBitmapHeapReScan(BitmapHeapScanState *node, ExprContext *exprCtxt)
node->tbmres = NULL;
/*
- * Always rescan the input immediately, to ensure we can pass down
- * any outer tuple that might be used in index quals.
+ * Always rescan the input immediately, to ensure we can pass down any
+ * outer tuple that might be used in index quals.
*/
ExecReScan(outerPlanState(node), exprCtxt);
}
@@ -391,9 +390,9 @@ ExecEndBitmapHeapScan(BitmapHeapScanState *node)
* close the heap relation.
*
* Currently, we do not release the AccessShareLock acquired by
- * ExecInitBitmapHeapScan. This lock should be held till end of
- * transaction. (There is a faction that considers this too much
- * locking, however.)
+ * ExecInitBitmapHeapScan. This lock should be held till end of
+ * transaction. (There is a faction that considers this too much locking,
+ * however.)
*/
heap_close(relation, NoLock);
}
@@ -470,9 +469,9 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate)
scanstate->ss.ss_currentRelation = currentRelation;
/*
- * Even though we aren't going to do a conventional seqscan, it is
- * useful to create a HeapScanDesc --- this checks the relation size
- * and sets up statistical infrastructure for us.
+ * Even though we aren't going to do a conventional seqscan, it is useful
+ * to create a HeapScanDesc --- this checks the relation size and sets up
+ * statistical infrastructure for us.
*/
scanstate->ss.ss_currentScanDesc = heap_beginscan(currentRelation,
estate->es_snapshot,
@@ -482,7 +481,7 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate)
/*
* One problem is that heap_beginscan counts a "sequential scan" start,
* when we actually aren't doing any such thing. Reverse out the added
- * scan count. (Eventually we may want to count bitmap scans separately.)
+ * scan count. (Eventually we may want to count bitmap scans separately.)
*/
pgstat_discount_heap_scan(&scanstate->ss.ss_currentScanDesc->rs_pgstat_info);
diff --git a/src/backend/executor/nodeBitmapIndexscan.c b/src/backend/executor/nodeBitmapIndexscan.c
index 231c35b9560..49b63170d49 100644
--- a/src/backend/executor/nodeBitmapIndexscan.c
+++ b/src/backend/executor/nodeBitmapIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.9 2005/05/06 17:24:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.10 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -54,17 +54,16 @@ MultiExecBitmapIndexScan(BitmapIndexScanState *node)
scandesc = node->biss_ScanDesc;
/*
- * If we have runtime keys and they've not already been set up, do it
- * now.
+ * If we have runtime keys and they've not already been set up, do it now.
*/
if (node->biss_RuntimeKeyInfo && !node->biss_RuntimeKeysReady)
ExecReScan((PlanState *) node, NULL);
/*
* Prepare the result bitmap. Normally we just create a new one to pass
- * back; however, our parent node is allowed to store a pre-made one
- * into node->biss_result, in which case we just OR our tuple IDs into
- * the existing bitmap. (This saves needing explicit UNION steps.)
+ * back; however, our parent node is allowed to store a pre-made one into
+ * node->biss_result, in which case we just OR our tuple IDs into the
+ * existing bitmap. (This saves needing explicit UNION steps.)
*/
if (node->biss_result)
{
@@ -82,7 +81,7 @@ MultiExecBitmapIndexScan(BitmapIndexScanState *node)
*/
for (;;)
{
- bool more = index_getmulti(scandesc, tids, MAX_TIDS, &ntids);
+ bool more = index_getmulti(scandesc, tids, MAX_TIDS, &ntids);
if (ntids > 0)
{
@@ -116,8 +115,7 @@ ExecBitmapIndexReScan(BitmapIndexScanState *node, ExprContext *exprCtxt)
ExprContext *econtext;
ExprState **runtimeKeyInfo;
- econtext = node->biss_RuntimeContext; /* context for runtime
- * keys */
+ econtext = node->biss_RuntimeContext; /* context for runtime keys */
runtimeKeyInfo = node->biss_RuntimeKeyInfo;
if (econtext)
@@ -130,16 +128,16 @@ ExecBitmapIndexReScan(BitmapIndexScanState *node, ExprContext *exprCtxt)
econtext->ecxt_outertuple = exprCtxt->ecxt_outertuple;
/*
- * Reset the runtime-key context so we don't leak memory as each
- * outer tuple is scanned. Note this assumes that we will
- * recalculate *all* runtime keys on each call.
+ * Reset the runtime-key context so we don't leak memory as each outer
+ * tuple is scanned. Note this assumes that we will recalculate *all*
+ * runtime keys on each call.
*/
ResetExprContext(econtext);
}
/*
- * If we are doing runtime key calculations (ie, the index keys depend
- * on data from an outer scan), compute the new key values
+ * If we are doing runtime key calculations (ie, the index keys depend on
+ * data from an outer scan), compute the new key values
*/
if (runtimeKeyInfo)
{
@@ -213,8 +211,8 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate)
/*
* Miscellaneous initialization
*
- * We do not need a standard exprcontext for this node, though we may
- * decide below to create a runtime-key exprcontext
+ * We do not need a standard exprcontext for this node, though we may decide
+ * below to create a runtime-key exprcontext
*/
/*
@@ -252,10 +250,10 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate)
indexstate->biss_NumScanKeys = numScanKeys;
/*
- * If we have runtime keys, we need an ExprContext to evaluate them.
- * We could just create a "standard" plan node exprcontext, but to
- * keep the code looking similar to nodeIndexscan.c, it seems better
- * to stick with the approach of using a separate ExprContext.
+ * If we have runtime keys, we need an ExprContext to evaluate them. We
+ * could just create a "standard" plan node exprcontext, but to keep the
+ * code looking similar to nodeIndexscan.c, it seems better to stick with
+ * the approach of using a separate ExprContext.
*/
if (have_runtime_keys)
{
@@ -272,17 +270,17 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate)
/*
* We do not open or lock the base relation here. We assume that an
- * ancestor BitmapHeapScan node is holding AccessShareLock on the
- * heap relation throughout the execution of the plan tree.
+ * ancestor BitmapHeapScan node is holding AccessShareLock on the heap
+ * relation throughout the execution of the plan tree.
*/
indexstate->ss.ss_currentRelation = NULL;
indexstate->ss.ss_currentScanDesc = NULL;
/*
- * open the index relation and initialize relation and scan
- * descriptors. Note we acquire no locks here; the index machinery
- * does its own locks and unlocks.
+ * open the index relation and initialize relation and scan descriptors.
+ * Note we acquire no locks here; the index machinery does its own locks
+ * and unlocks.
*/
indexstate->biss_RelationDesc = index_open(node->indexid);
indexstate->biss_ScanDesc =
diff --git a/src/backend/executor/nodeBitmapOr.c b/src/backend/executor/nodeBitmapOr.c
index 9078855ec33..772b948cc52 100644
--- a/src/backend/executor/nodeBitmapOr.c
+++ b/src/backend/executor/nodeBitmapOr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapOr.c,v 1.2 2005/04/20 15:48:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapOr.c,v 1.3 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -133,13 +133,13 @@ MultiExecBitmapOr(BitmapOrState *node)
TIDBitmap *subresult;
/*
- * We can special-case BitmapIndexScan children to avoid an
- * explicit tbm_union step for each child: just pass down the
- * current result bitmap and let the child OR directly into it.
+ * We can special-case BitmapIndexScan children to avoid an explicit
+ * tbm_union step for each child: just pass down the current result
+ * bitmap and let the child OR directly into it.
*/
if (IsA(subnode, BitmapIndexScanState))
{
- if (result == NULL) /* first subplan */
+ if (result == NULL) /* first subplan */
{
/* XXX should we use less than work_mem for this? */
result = tbm_create(work_mem * 1024L);
@@ -161,7 +161,7 @@ MultiExecBitmapOr(BitmapOrState *node)
elog(ERROR, "unrecognized result from subplan");
if (result == NULL)
- result = subresult; /* first subplan */
+ result = subresult; /* first subplan */
else
{
tbm_union(result, subresult);
@@ -176,7 +176,7 @@ MultiExecBitmapOr(BitmapOrState *node)
/* must provide our own instrumentation support */
if (node->ps.instrument)
- InstrStopNodeMulti(node->ps.instrument, 0 /* XXX */);
+ InstrStopNodeMulti(node->ps.instrument, 0 /* XXX */ );
return (Node *) result;
}
diff --git a/src/backend/executor/nodeFunctionscan.c b/src/backend/executor/nodeFunctionscan.c
index 5cd6de45fda..a0178e8fa17 100644
--- a/src/backend/executor/nodeFunctionscan.c
+++ b/src/backend/executor/nodeFunctionscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeFunctionscan.c,v 1.34 2005/05/22 22:30:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeFunctionscan.c,v 1.35 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,9 +60,8 @@ FunctionNext(FunctionScanState *node)
tuplestorestate = node->tuplestorestate;
/*
- * If first time through, read all tuples from function and put them
- * in a tuplestore. Subsequent calls just fetch tuples from
- * tuplestore.
+ * If first time through, read all tuples from function and put them in a
+ * tuplestore. Subsequent calls just fetch tuples from tuplestore.
*/
if (tuplestorestate == NULL)
{
@@ -77,10 +76,10 @@ FunctionNext(FunctionScanState *node)
/*
* If function provided a tupdesc, cross-check it. We only really
- * need to do this for functions returning RECORD, but might as
- * well do it always.
+ * need to do this for functions returning RECORD, but might as well
+ * do it always.
*/
- if (funcTupdesc)
+ if (funcTupdesc)
tupledesc_match(node->tupdesc, funcTupdesc);
}
@@ -174,8 +173,8 @@ ExecInitFunctionScan(FunctionScan *node, EState *estate)
Assert(rte->rtekind == RTE_FUNCTION);
/*
- * Now determine if the function returns a simple or composite type,
- * and build an appropriate tupdesc.
+ * Now determine if the function returns a simple or composite type, and
+ * build an appropriate tupdesc.
*/
functypclass = get_expr_result_type(rte->funcexpr,
&funcrettype,
@@ -213,8 +212,8 @@ ExecInitFunctionScan(FunctionScan *node, EState *estate)
/*
* For RECORD results, make sure a typmod has been assigned. (The
- * function should do this for itself, but let's cover things in case
- * it doesn't.)
+ * function should do this for itself, but let's cover things in case it
+ * doesn't.)
*/
BlessTupleDesc(tupdesc);
@@ -329,10 +328,10 @@ ExecFunctionReScan(FunctionScanState *node, ExprContext *exprCtxt)
return;
/*
- * Here we have a choice whether to drop the tuplestore (and recompute
- * the function outputs) or just rescan it. This should depend on
- * whether the function expression contains parameters and/or is
- * marked volatile. FIXME soon.
+ * Here we have a choice whether to drop the tuplestore (and recompute the
+ * function outputs) or just rescan it. This should depend on whether the
+ * function expression contains parameters and/or is marked volatile.
+ * FIXME soon.
*/
if (node->ss.ps.chgParam != NULL)
{
@@ -376,7 +375,7 @@ tupledesc_match(TupleDesc dst_tupdesc, TupleDesc src_tupdesc)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function return row and query-specified return row do not match"),
- errdetail("Returned type %s at ordinal position %d, but query expects %s.",
+ errdetail("Returned type %s at ordinal position %d, but query expects %s.",
format_type_be(sattr->atttypid),
i + 1,
format_type_be(dattr->atttypid))));
diff --git a/src/backend/executor/nodeGroup.c b/src/backend/executor/nodeGroup.c
index e16a228fa15..91a08add4d9 100644
--- a/src/backend/executor/nodeGroup.c
+++ b/src/backend/executor/nodeGroup.c
@@ -15,7 +15,7 @@
* locate group boundaries.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeGroup.c,v 1.61 2005/03/16 21:38:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeGroup.c,v 1.62 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,8 +61,8 @@ ExecGroup(GroupState *node)
*/
/*
- * If first time through, acquire first input tuple and determine
- * whether to return it or not.
+ * If first time through, acquire first input tuple and determine whether
+ * to return it or not.
*/
if (TupIsNull(firsttupleslot))
{
@@ -76,15 +76,15 @@ ExecGroup(GroupState *node)
/* Copy tuple, set up as input for qual test and projection */
ExecCopySlot(firsttupleslot, outerslot);
econtext->ecxt_scantuple = firsttupleslot;
+
/*
- * Check the qual (HAVING clause); if the group does not match,
- * ignore it and fall into scan loop.
+ * Check the qual (HAVING clause); if the group does not match, ignore
+ * it and fall into scan loop.
*/
if (ExecQual(node->ss.ps.qual, econtext, false))
{
/*
- * Form and return a projection tuple using the first input
- * tuple.
+ * Form and return a projection tuple using the first input tuple.
*/
return ExecProject(node->ss.ps.ps_ProjInfo, NULL);
}
@@ -92,8 +92,8 @@ ExecGroup(GroupState *node)
/*
* This loop iterates once per input tuple group. At the head of the
- * loop, we have finished processing the first tuple of the group and
- * now need to scan over all the other group members.
+ * loop, we have finished processing the first tuple of the group and now
+ * need to scan over all the other group members.
*/
for (;;)
{
@@ -120,22 +120,23 @@ ExecGroup(GroupState *node)
econtext->ecxt_per_tuple_memory))
break;
}
+
/*
- * We have the first tuple of the next input group. See if we
- * want to return it.
+ * We have the first tuple of the next input group. See if we want to
+ * return it.
*/
/* Copy tuple, set up as input for qual test and projection */
ExecCopySlot(firsttupleslot, outerslot);
econtext->ecxt_scantuple = firsttupleslot;
+
/*
- * Check the qual (HAVING clause); if the group does not match,
- * ignore it and loop back to scan the rest of the group.
+ * Check the qual (HAVING clause); if the group does not match, ignore
+ * it and loop back to scan the rest of the group.
*/
if (ExecQual(node->ss.ps.qual, econtext, false))
{
/*
- * Form and return a projection tuple using the first input
- * tuple.
+ * Form and return a projection tuple using the first input tuple.
*/
return ExecProject(node->ss.ps.ps_ProjInfo, NULL);
}
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 5e2be394d86..8c51e785b28 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.95 2005/09/25 19:37:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.96 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -100,11 +100,11 @@ MultiExecHash(HashState *node)
InstrStopNodeMulti(node->ps.instrument, hashtable->totalTuples);
/*
- * We do not return the hash table directly because it's not a subtype
- * of Node, and so would violate the MultiExecProcNode API. Instead,
- * our parent Hashjoin node is expected to know how to fish it out
- * of our node state. Ugly but not really worth cleaning up, since
- * Hashjoin knows quite a bit more about Hash besides that.
+ * We do not return the hash table directly because it's not a subtype of
+ * Node, and so would violate the MultiExecProcNode API. Instead, our
+ * parent Hashjoin node is expected to know how to fish it out of our node
+ * state. Ugly but not really worth cleaning up, since Hashjoin knows
+ * quite a bit more about Hash besides that.
*/
return NULL;
}
@@ -161,8 +161,8 @@ ExecInitHash(Hash *node, EState *estate)
outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate);
/*
- * initialize tuple type. no need to initialize projection info
- * because this node doesn't do projections
+ * initialize tuple type. no need to initialize projection info because
+ * this node doesn't do projections
*/
ExecAssignResultTypeFromOuterPlan(&hashstate->ps);
hashstate->ps.ps_ProjInfo = NULL;
@@ -221,9 +221,9 @@ ExecHashTableCreate(Hash *node, List *hashOperators)
MemoryContext oldcxt;
/*
- * Get information about the size of the relation to be hashed (it's
- * the "outer" subtree of this node, but the inner relation of the
- * hashjoin). Compute the appropriate size of the hash table.
+ * Get information about the size of the relation to be hashed (it's the
+ * "outer" subtree of this node, but the inner relation of the hashjoin).
+ * Compute the appropriate size of the hash table.
*/
outerNode = outerPlan(node);
@@ -237,8 +237,8 @@ ExecHashTableCreate(Hash *node, List *hashOperators)
/*
* Initialize the hash table control block.
*
- * The hashtable control block is just palloc'd from the executor's
- * per-query memory context.
+ * The hashtable control block is just palloc'd from the executor's per-query
+ * memory context.
*/
hashtable = (HashJoinTable) palloc(sizeof(HashJoinTableData));
hashtable->nbuckets = nbuckets;
@@ -273,8 +273,8 @@ ExecHashTableCreate(Hash *node, List *hashOperators)
}
/*
- * Create temporary memory contexts in which to keep the hashtable
- * working storage. See notes in executor/hashjoin.h.
+ * Create temporary memory contexts in which to keep the hashtable working
+ * storage. See notes in executor/hashjoin.h.
*/
hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
"HashTableContext",
@@ -353,9 +353,9 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
ntuples = 1000.0;
/*
- * Estimate tupsize based on footprint of tuple in hashtable... note
- * this does not allow for any palloc overhead. The manipulations of
- * spaceUsed don't count palloc overhead either.
+ * Estimate tupsize based on footprint of tuple in hashtable... note this
+ * does not allow for any palloc overhead. The manipulations of spaceUsed
+ * don't count palloc overhead either.
*/
tupsize = MAXALIGN(sizeof(HashJoinTupleData)) +
MAXALIGN(sizeof(HeapTupleHeaderData)) +
@@ -375,16 +375,16 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
if (inner_rel_bytes > hash_table_bytes)
{
/* We'll need multiple batches */
- long lbuckets;
- double dbatch;
- int minbatch;
+ long lbuckets;
+ double dbatch;
+ int minbatch;
lbuckets = (hash_table_bytes / tupsize) / NTUP_PER_BUCKET;
lbuckets = Min(lbuckets, INT_MAX);
nbuckets = (int) lbuckets;
dbatch = ceil(inner_rel_bytes / hash_table_bytes);
- dbatch = Min(dbatch, INT_MAX/2);
+ dbatch = Min(dbatch, INT_MAX / 2);
minbatch = (int) dbatch;
nbatch = 2;
while (nbatch < minbatch)
@@ -393,7 +393,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
else
{
/* We expect the hashtable to fit in memory */
- double dbuckets;
+ double dbuckets;
dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
dbuckets = Min(dbuckets, INT_MAX);
@@ -406,8 +406,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
* We want nbuckets to be prime so as to avoid having bucket and batch
* numbers depend on only some bits of the hash code. Choose the next
* larger prime from the list in hprimes[]. (This also enforces that
- * nbuckets is not very small, by the simple expedient of not putting
- * any very small entries in hprimes[].)
+ * nbuckets is not very small, by the simple expedient of not putting any
+ * very small entries in hprimes[].)
*/
for (i = 0; i < (int) lengthof(hprimes); i++)
{
@@ -475,7 +475,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
return;
/* safety check to avoid overflow */
- if (oldnbatch > INT_MAX/2)
+ if (oldnbatch > INT_MAX / 2)
return;
nbatch = oldnbatch * 2;
@@ -514,8 +514,8 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
hashtable->nbatch = nbatch;
/*
- * Scan through the existing hash table entries and dump out any
- * that are no longer of the current batch.
+ * Scan through the existing hash table entries and dump out any that are
+ * no longer of the current batch.
*/
ninmemory = nfreed = 0;
@@ -571,12 +571,12 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
#endif
/*
- * If we dumped out either all or none of the tuples in the table,
- * disable further expansion of nbatch. This situation implies that
- * we have enough tuples of identical hashvalues to overflow spaceAllowed.
- * Increasing nbatch will not fix it since there's no way to subdivide
- * the group any more finely.
- * We have to just gut it out and hope the server has enough RAM.
+ * If we dumped out either all or none of the tuples in the table, disable
+ * further expansion of nbatch. This situation implies that we have
+ * enough tuples of identical hashvalues to overflow spaceAllowed.
+ * Increasing nbatch will not fix it since there's no way to subdivide the
+ * group any more finely. We have to just gut it out and hope the server
+ * has enough RAM.
*/
if (nfreed == 0 || nfreed == ninmemory)
{
@@ -663,8 +663,8 @@ ExecHashGetHashValue(HashJoinTable hashtable,
MemoryContext oldContext;
/*
- * We reset the eval context each time to reclaim any memory leaked in
- * the hashkey expressions.
+ * We reset the eval context each time to reclaim any memory leaked in the
+ * hashkey expressions.
*/
ResetExprContext(econtext);
@@ -727,8 +727,8 @@ ExecHashGetBucketAndBatch(HashJoinTable hashtable,
int *bucketno,
int *batchno)
{
- uint32 nbuckets = (uint32) hashtable->nbuckets;
- uint32 nbatch = (uint32) hashtable->nbatch;
+ uint32 nbuckets = (uint32) hashtable->nbuckets;
+ uint32 nbatch = (uint32) hashtable->nbatch;
if (nbatch > 1)
{
@@ -759,8 +759,8 @@ ExecScanHashBucket(HashJoinState *hjstate,
uint32 hashvalue = hjstate->hj_CurHashValue;
/*
- * hj_CurTuple is NULL to start scanning a new bucket, or the address
- * of the last tuple returned from the current bucket.
+ * hj_CurTuple is NULL to start scanning a new bucket, or the address of
+ * the last tuple returned from the current bucket.
*/
if (hashTuple == NULL)
hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo];
@@ -812,8 +812,8 @@ ExecHashTableReset(HashJoinTable hashtable)
int nbuckets = hashtable->nbuckets;
/*
- * Release all the hash buckets and tuples acquired in the prior pass,
- * and reinitialize the context for a new pass.
+ * Release all the hash buckets and tuples acquired in the prior pass, and
+ * reinitialize the context for a new pass.
*/
MemoryContextReset(hashtable->batchCxt);
oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index 4b0f9377ba8..9f002dde9cf 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.73 2005/09/25 19:37:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.74 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,8 +24,8 @@
static TupleTableSlot *ExecHashJoinOuterGetTuple(PlanState *outerNode,
- HashJoinState *hjstate,
- uint32 *hashvalue);
+ HashJoinState *hjstate,
+ uint32 *hashvalue);
static TupleTableSlot *ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
BufFile *file,
uint32 *hashvalue,
@@ -77,9 +77,9 @@ ExecHashJoin(HashJoinState *node)
econtext = node->js.ps.ps_ExprContext;
/*
- * Check to see if we're still projecting out tuples from a previous
- * join tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
+ * Check to see if we're still projecting out tuples from a previous join
+ * tuple (because there is a function-returning-set in the projection
+ * expressions). If so, try to project another one.
*/
if (node->js.ps.ps_TupFromTlist)
{
@@ -93,17 +93,17 @@ ExecHashJoin(HashJoinState *node)
}
/*
- * If we're doing an IN join, we want to return at most one row per
- * outer tuple; so we can stop scanning the inner scan if we matched
- * on the previous try.
+ * If we're doing an IN join, we want to return at most one row per outer
+ * tuple; so we can stop scanning the inner scan if we matched on the
+ * previous try.
*/
if (node->js.jointype == JOIN_IN && node->hj_MatchedOuter)
node->hj_NeedNewOuter = true;
/*
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a join tuple.
+ * storage allocated in the previous tuple cycle. Note this can't happen
+ * until we're done projecting out tuples from a join tuple.
*/
ResetExprContext(econtext);
@@ -114,17 +114,17 @@ ExecHashJoin(HashJoinState *node)
{
/*
* If the outer relation is completely empty, we can quit without
- * building the hash table. However, for an inner join it is only
- * a win to check this when the outer relation's startup cost is less
- * than the projected cost of building the hash table. Otherwise
- * it's best to build the hash table first and see if the inner
- * relation is empty. (When it's an outer join, we should always
- * make this check, since we aren't going to be able to skip the
- * join on the strength of an empty inner relation anyway.)
+ * building the hash table. However, for an inner join it is only a
+ * win to check this when the outer relation's startup cost is less
+ * than the projected cost of building the hash table. Otherwise it's
+ * best to build the hash table first and see if the inner relation is
+ * empty. (When it's an outer join, we should always make this check,
+ * since we aren't going to be able to skip the join on the strength
+ * of an empty inner relation anyway.)
*
- * The only way to make the check is to try to fetch a tuple from
- * the outer plan node. If we succeed, we have to stash it away
- * for later consumption by ExecHashJoinOuterGetTuple.
+ * The only way to make the check is to try to fetch a tuple from the
+ * outer plan node. If we succeed, we have to stash it away for later
+ * consumption by ExecHashJoinOuterGetTuple.
*/
if (outerNode->plan->startup_cost < hashNode->ps.plan->total_cost ||
node->js.jointype == JOIN_LEFT)
@@ -150,8 +150,8 @@ ExecHashJoin(HashJoinState *node)
(void) MultiExecProcNode((PlanState *) hashNode);
/*
- * If the inner relation is completely empty, and we're not doing
- * an outer join, we can quit without scanning the outer relation.
+ * If the inner relation is completely empty, and we're not doing an
+ * outer join, we can quit without scanning the outer relation.
*/
if (hashtable->totalTuples == 0 && node->js.jointype != JOIN_LEFT)
{
@@ -193,8 +193,8 @@ ExecHashJoin(HashJoinState *node)
node->hj_MatchedOuter = false;
/*
- * now we have an outer tuple, find the corresponding bucket
- * for this tuple from the hash table
+ * now we have an outer tuple, find the corresponding bucket for
+ * this tuple from the hash table
*/
node->hj_CurHashValue = hashvalue;
ExecHashGetBucketAndBatch(hashtable, hashvalue,
@@ -202,21 +202,21 @@ ExecHashJoin(HashJoinState *node)
node->hj_CurTuple = NULL;
/*
- * Now we've got an outer tuple and the corresponding hash
- * bucket, but this tuple may not belong to the current batch.
+ * Now we've got an outer tuple and the corresponding hash bucket,
+ * but this tuple may not belong to the current batch.
*/
if (batchno != hashtable->curbatch)
{
/*
- * Need to postpone this outer tuple to a later batch.
- * Save it in the corresponding outer-batch file.
+ * Need to postpone this outer tuple to a later batch. Save it
+ * in the corresponding outer-batch file.
*/
Assert(batchno > hashtable->curbatch);
ExecHashJoinSaveTuple(ExecFetchSlotTuple(outerTupleSlot),
hashvalue,
&hashtable->outerBatchFile[batchno]);
node->hj_NeedNewOuter = true;
- continue; /* loop around for a new outer tuple */
+ continue; /* loop around for a new outer tuple */
}
}
@@ -243,11 +243,11 @@ ExecHashJoin(HashJoinState *node)
/*
* if we pass the qual, then save state for next call and have
- * ExecProject form the projection, store it in the tuple
- * table, and return the slot.
+ * ExecProject form the projection, store it in the tuple table,
+ * and return the slot.
*
- * Only the joinquals determine MatchedOuter status, but all
- * quals must pass to actually return the tuple.
+ * Only the joinquals determine MatchedOuter status, but all quals
+ * must pass to actually return the tuple.
*/
if (joinqual == NIL || ExecQual(joinqual, econtext, false))
{
@@ -268,8 +268,7 @@ ExecHashJoin(HashJoinState *node)
}
/*
- * If we didn't return a tuple, may need to set
- * NeedNewOuter
+ * If we didn't return a tuple, may need to set NeedNewOuter
*/
if (node->js.jointype == JOIN_IN)
{
@@ -281,8 +280,8 @@ ExecHashJoin(HashJoinState *node)
/*
* Now the current outer tuple has run out of matches, so check
- * whether to emit a dummy outer-join tuple. If not, loop around
- * to get a new outer tuple.
+ * whether to emit a dummy outer-join tuple. If not, loop around to
+ * get a new outer tuple.
*/
node->hj_NeedNewOuter = true;
@@ -290,19 +289,17 @@ ExecHashJoin(HashJoinState *node)
node->js.jointype == JOIN_LEFT)
{
/*
- * We are doing an outer join and there were no join matches
- * for this outer tuple. Generate a fake join tuple with
- * nulls for the inner tuple, and return it if it passes the
- * non-join quals.
+ * We are doing an outer join and there were no join matches for
+ * this outer tuple. Generate a fake join tuple with nulls for
+ * the inner tuple, and return it if it passes the non-join quals.
*/
econtext->ecxt_innertuple = node->hj_NullInnerTupleSlot;
if (ExecQual(otherqual, econtext, false))
{
/*
- * qualification was satisfied so we project and return
- * the slot containing the result tuple using
- * ExecProject().
+ * qualification was satisfied so we project and return the
+ * slot containing the result tuple using ExecProject().
*/
TupleTableSlot *result;
@@ -392,7 +389,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate)
case JOIN_LEFT:
hjstate->hj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(innerPlanState(hjstate)));
+ ExecGetResultType(innerPlanState(hjstate)));
break;
default:
elog(ERROR, "unrecognized join type: %d",
@@ -400,11 +397,11 @@ ExecInitHashJoin(HashJoin *node, EState *estate)
}
/*
- * now for some voodoo. our temporary tuple slot is actually the
- * result tuple slot of the Hash node (which is our inner plan). we
- * do this because Hash nodes don't return tuples via ExecProcNode()
- * -- instead the hash join node uses ExecScanHashBucket() to get at
- * the contents of the hash table. -cim 6/9/91
+ * now for some voodoo. our temporary tuple slot is actually the result
+ * tuple slot of the Hash node (which is our inner plan). we do this
+ * because Hash nodes don't return tuples via ExecProcNode() -- instead
+ * the hash join node uses ExecScanHashBucket() to get at the contents of
+ * the hash table. -cim 6/9/91
*/
{
HashState *hashstate = (HashState *) innerPlanState(hjstate);
@@ -434,10 +431,10 @@ ExecInitHashJoin(HashJoin *node, EState *estate)
hjstate->hj_CurTuple = NULL;
/*
- * Deconstruct the hash clauses into outer and inner argument values,
- * so that we can evaluate those subexpressions separately. Also make
- * a list of the hash operator OIDs, in preparation for looking up the
- * hash functions to use.
+ * Deconstruct the hash clauses into outer and inner argument values, so
+ * that we can evaluate those subexpressions separately. Also make a list
+ * of the hash operator OIDs, in preparation for looking up the hash
+ * functions to use.
*/
lclauses = NIL;
rclauses = NIL;
@@ -536,6 +533,7 @@ ExecHashJoinOuterGetTuple(PlanState *outerNode,
if (curbatch == 0)
{ /* if it is the first pass */
+
/*
* Check to see if first outer tuple was already fetched by
* ExecHashJoin() and not used yet.
@@ -560,16 +558,16 @@ ExecHashJoinOuterGetTuple(PlanState *outerNode,
}
/*
- * We have just reached the end of the first pass. Try to switch
- * to a saved batch.
+ * We have just reached the end of the first pass. Try to switch to a
+ * saved batch.
*/
curbatch = ExecHashJoinNewBatch(hjstate);
}
/*
- * Try to read from a temp file. Loop allows us to advance to new
- * batches as needed. NOTE: nbatch could increase inside
- * ExecHashJoinNewBatch, so don't try to optimize this loop.
+ * Try to read from a temp file. Loop allows us to advance to new batches
+ * as needed. NOTE: nbatch could increase inside ExecHashJoinNewBatch, so
+ * don't try to optimize this loop.
*/
while (curbatch < hashtable->nbatch)
{
@@ -623,16 +621,16 @@ start_over:
* sides. We can sometimes skip over batches that are empty on only one
* side, but there are exceptions:
*
- * 1. In a LEFT JOIN, we have to process outer batches even if the
- * inner batch is empty.
+ * 1. In a LEFT JOIN, we have to process outer batches even if the inner
+ * batch is empty.
*
- * 2. If we have increased nbatch since the initial estimate, we have
- * to scan inner batches since they might contain tuples that need to
- * be reassigned to later inner batches.
+ * 2. If we have increased nbatch since the initial estimate, we have to scan
+ * inner batches since they might contain tuples that need to be
+ * reassigned to later inner batches.
*
- * 3. Similarly, if we have increased nbatch since starting the outer
- * scan, we have to rescan outer batches in case they contain tuples
- * that need to be reassigned.
+ * 3. Similarly, if we have increased nbatch since starting the outer scan,
+ * we have to rescan outer batches in case they contain tuples that need
+ * to be reassigned.
*/
curbatch++;
while (curbatch < nbatch &&
@@ -676,7 +674,7 @@ start_over:
if (BufFileSeek(innerFile, 0, 0L, SEEK_SET))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not rewind hash-join temporary file: %m")));
+ errmsg("could not rewind hash-join temporary file: %m")));
while ((slot = ExecHashJoinGetSavedTuple(hjstate,
innerFile,
@@ -684,8 +682,8 @@ start_over:
hjstate->hj_HashTupleSlot)))
{
/*
- * NOTE: some tuples may be sent to future batches. Also,
- * it is possible for hashtable->nbatch to be increased here!
+ * NOTE: some tuples may be sent to future batches. Also, it is
+ * possible for hashtable->nbatch to be increased here!
*/
ExecHashTableInsert(hashtable,
ExecFetchSlotTuple(slot),
@@ -733,7 +731,7 @@ void
ExecHashJoinSaveTuple(HeapTuple heapTuple, uint32 hashvalue,
BufFile **fileptr)
{
- BufFile *file = *fileptr;
+ BufFile *file = *fileptr;
size_t written;
if (file == NULL)
@@ -764,7 +762,7 @@ ExecHashJoinSaveTuple(HeapTuple heapTuple, uint32 hashvalue,
/*
* ExecHashJoinGetSavedTuple
- * read the next tuple from a batch file. Return NULL if no more.
+ * read the next tuple from a batch file. Return NULL if no more.
*
* On success, *hashvalue is set to the tuple's hash value, and the tuple
* itself is stored in the given slot.
@@ -809,18 +807,18 @@ void
ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt)
{
/*
- * If we haven't yet built the hash table then we can just return;
- * nothing done yet, so nothing to undo.
+ * If we haven't yet built the hash table then we can just return; nothing
+ * done yet, so nothing to undo.
*/
if (node->hj_HashTable == NULL)
return;
/*
- * In a multi-batch join, we currently have to do rescans the hard
- * way, primarily because batch temp files may have already been
- * released. But if it's a single-batch join, and there is no
- * parameter change for the inner subnode, then we can just re-use the
- * existing hash table without rebuilding it.
+ * In a multi-batch join, we currently have to do rescans the hard way,
+ * primarily because batch temp files may have already been released. But
+ * if it's a single-batch join, and there is no parameter change for the
+ * inner subnode, then we can just re-use the existing hash table without
+ * rebuilding it.
*/
if (node->hj_HashTable->nbatch == 1 &&
((PlanState *) node)->righttree->chgParam == NULL)
@@ -835,8 +833,8 @@ ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt)
node->hj_FirstOuterTupleSlot = NULL;
/*
- * if chgParam of subnode is not null then plan will be re-scanned
- * by first ExecProcNode.
+ * if chgParam of subnode is not null then plan will be re-scanned by
+ * first ExecProcNode.
*/
if (((PlanState *) node)->righttree->chgParam == NULL)
ExecReScan(((PlanState *) node)->righttree, exprCtxt);
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 2a10ef39c0d..94ab2223c75 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.103 2005/05/06 17:24:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.104 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -75,11 +75,11 @@ IndexNext(IndexScanState *node)
scanrelid = ((IndexScan *) node->ss.ps.plan)->scan.scanrelid;
/*
- * Clear any reference to the previously returned tuple. The idea
- * here is to not have the tuple slot be the last holder of a pin on
- * that tuple's buffer; if it is, we'll need a separate visit to the
- * bufmgr to release the buffer. By clearing here, we get to have the
- * release done by ReleaseAndReadBuffer inside index_getnext.
+ * Clear any reference to the previously returned tuple. The idea here is
+ * to not have the tuple slot be the last holder of a pin on that tuple's
+ * buffer; if it is, we'll need a separate visit to the bufmgr to release
+ * the buffer. By clearing here, we get to have the release done by
+ * ReleaseAndReadBuffer inside index_getnext.
*/
ExecClearTuple(slot);
@@ -104,7 +104,7 @@ IndexNext(IndexScanState *node)
ResetExprContext(econtext);
if (!ExecQual(node->indexqualorig, econtext, false))
- ExecClearTuple(slot); /* would not be returned by scan */
+ ExecClearTuple(slot); /* would not be returned by scan */
/* Flag for the next call that no more tuples */
estate->es_evTupleNull[scanrelid - 1] = true;
@@ -118,22 +118,21 @@ IndexNext(IndexScanState *node)
if ((tuple = index_getnext(scandesc, direction)) != NULL)
{
/*
- * Store the scanned tuple in the scan tuple slot of the scan
- * state. Note: we pass 'false' because tuples returned by
- * amgetnext are pointers onto disk pages and must not be
- * pfree()'d.
+ * Store the scanned tuple in the scan tuple slot of the scan state.
+ * Note: we pass 'false' because tuples returned by amgetnext are
+ * pointers onto disk pages and must not be pfree()'d.
*/
- ExecStoreTuple(tuple, /* tuple to store */
- slot, /* slot to store in */
- scandesc->xs_cbuf, /* buffer containing tuple */
- false); /* don't pfree */
+ ExecStoreTuple(tuple, /* tuple to store */
+ slot, /* slot to store in */
+ scandesc->xs_cbuf, /* buffer containing tuple */
+ false); /* don't pfree */
return slot;
}
/*
- * if we get here it means the index scan failed so we are at the end
- * of the scan..
+ * if we get here it means the index scan failed so we are at the end of
+ * the scan..
*/
return ExecClearTuple(slot);
}
@@ -146,8 +145,7 @@ TupleTableSlot *
ExecIndexScan(IndexScanState *node)
{
/*
- * If we have runtime keys and they've not already been set up, do it
- * now.
+ * If we have runtime keys and they've not already been set up, do it now.
*/
if (node->iss_RuntimeKeyInfo && !node->iss_RuntimeKeysReady)
ExecReScan((PlanState *) node, NULL);
@@ -179,8 +177,7 @@ ExecIndexReScan(IndexScanState *node, ExprContext *exprCtxt)
Index scanrelid;
estate = node->ss.ps.state;
- econtext = node->iss_RuntimeContext; /* context for runtime
- * keys */
+ econtext = node->iss_RuntimeContext; /* context for runtime keys */
scanKeys = node->iss_ScanKeys;
runtimeKeyInfo = node->iss_RuntimeKeyInfo;
numScanKeys = node->iss_NumScanKeys;
@@ -203,16 +200,16 @@ ExecIndexReScan(IndexScanState *node, ExprContext *exprCtxt)
}
/*
- * Reset the runtime-key context so we don't leak memory as each
- * outer tuple is scanned. Note this assumes that we will
- * recalculate *all* runtime keys on each call.
+ * Reset the runtime-key context so we don't leak memory as each outer
+ * tuple is scanned. Note this assumes that we will recalculate *all*
+ * runtime keys on each call.
*/
ResetExprContext(econtext);
}
/*
- * If we are doing runtime key calculations (ie, the index keys depend
- * on data from an outer scan), compute the new key values
+ * If we are doing runtime key calculations (ie, the index keys depend on
+ * data from an outer scan), compute the new key values
*/
if (runtimeKeyInfo)
{
@@ -251,16 +248,16 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext,
for (j = 0; j < n_keys; j++)
{
/*
- * If we have a run-time key, then extract the run-time
- * expression and evaluate it with respect to the current
- * outer tuple. We then stick the result into the scan key.
+ * If we have a run-time key, then extract the run-time expression and
+ * evaluate it with respect to the current outer tuple. We then stick
+ * the result into the scan key.
*
- * Note: the result of the eval could be a pass-by-ref value
- * that's stored in the outer scan's tuple, not in
- * econtext->ecxt_per_tuple_memory. We assume that the
- * outer tuple will stay put throughout our scan. If this
- * is wrong, we could copy the result into our context
- * explicitly, but I think that's not necessary...
+ * Note: the result of the eval could be a pass-by-ref value that's
+ * stored in the outer scan's tuple, not in
+ * econtext->ecxt_per_tuple_memory. We assume that the outer tuple
+ * will stay put throughout our scan. If this is wrong, we could copy
+ * the result into our context explicitly, but I think that's not
+ * necessary...
*/
if (run_keys[j] != NULL)
{
@@ -323,9 +320,8 @@ ExecEndIndexScan(IndexScanState *node)
* close the heap relation.
*
* Currently, we do not release the AccessShareLock acquired by
- * ExecInitIndexScan. This lock should be held till end of
- * transaction. (There is a faction that considers this too much
- * locking, however.)
+ * ExecInitIndexScan. This lock should be held till end of transaction.
+ * (There is a faction that considers this too much locking, however.)
*/
heap_close(relation, NoLock);
}
@@ -392,11 +388,10 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
* initialize child expressions
*
* Note: we don't initialize all of the indexqual expression, only the
- * sub-parts corresponding to runtime keys (see below). The
- * indexqualorig expression is always initialized even though it will
- * only be used in some uncommon cases --- would be nice to improve
- * that. (Problem is that any SubPlans present in the expression must
- * be found now...)
+ * sub-parts corresponding to runtime keys (see below). The indexqualorig
+ * expression is always initialized even though it will only be used in
+ * some uncommon cases --- would be nice to improve that. (Problem is
+ * that any SubPlans present in the expression must be found now...)
*/
indexstate->ss.ps.targetlist = (List *)
ExecInitExpr((Expr *) node->scan.plan.targetlist,
@@ -440,10 +435,10 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
indexstate->iss_NumScanKeys = numScanKeys;
/*
- * If we have runtime keys, we need an ExprContext to evaluate them.
- * The node's standard context won't do because we want to reset that
- * context for every tuple. So, build another context just like the
- * other one... -tgl 7/11/00
+ * If we have runtime keys, we need an ExprContext to evaluate them. The
+ * node's standard context won't do because we want to reset that context
+ * for every tuple. So, build another context just like the other one...
+ * -tgl 7/11/00
*/
if (have_runtime_keys)
{
@@ -476,10 +471,10 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
ExecAssignScanType(&indexstate->ss, RelationGetDescr(currentRelation), false);
/*
- * open the index relation and initialize relation and scan
- * descriptors. Note we acquire no locks here; the index machinery
- * does its own locks and unlocks. (We rely on having AccessShareLock
- * on the parent table to ensure the index won't go away!)
+ * open the index relation and initialize relation and scan descriptors.
+ * Note we acquire no locks here; the index machinery does its own locks
+ * and unlocks. (We rely on having AccessShareLock on the parent table to
+ * ensure the index won't go away!)
*/
indexstate->iss_RelationDesc = index_open(node->indexid);
indexstate->iss_ScanDesc = index_beginscan(currentRelation,
@@ -543,8 +538,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals,
(ExprState **) palloc(n_keys * sizeof(ExprState *));
/*
- * for each opclause in the given qual, convert each qual's
- * opclause into a single scan key
+ * for each opclause in the given qual, convert each qual's opclause into
+ * a single scan key
*/
qual_cell = list_head(quals);
strategy_cell = list_head(strategies);
@@ -552,15 +547,15 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals,
for (j = 0; j < n_keys; j++)
{
- OpExpr *clause; /* one clause of index qual */
- Expr *leftop; /* expr on lhs of operator */
- Expr *rightop; /* expr on rhs ... */
+ OpExpr *clause; /* one clause of index qual */
+ Expr *leftop; /* expr on lhs of operator */
+ Expr *rightop; /* expr on rhs ... */
int flags = 0;
- AttrNumber varattno; /* att number used in scan */
+ AttrNumber varattno; /* att number used in scan */
StrategyNumber strategy; /* op's strategy number */
- Oid subtype; /* op's strategy subtype */
- RegProcedure opfuncid; /* operator proc id used in scan */
- Datum scanvalue; /* value used in scan (if const) */
+ Oid subtype; /* op's strategy subtype */
+ RegProcedure opfuncid; /* operator proc id used in scan */
+ Datum scanvalue; /* value used in scan (if const) */
/*
* extract clause information from the qualification
@@ -578,18 +573,17 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals,
opfuncid = clause->opfuncid;
/*
- * Here we figure out the contents of the index qual. The
- * usual case is (var op const) which means we form a scan key
- * for the attribute listed in the var node and use the value
- * of the const as comparison data.
+ * Here we figure out the contents of the index qual. The usual case
+ * is (var op const) which means we form a scan key for the attribute
+ * listed in the var node and use the value of the const as comparison
+ * data.
*
- * If we don't have a const node, it means our scan key is a
- * function of information obtained during the execution of
- * the plan, in which case we need to recalculate the index
- * scan key at run time. Hence, we set have_runtime_keys to
- * true and place the appropriate subexpression in run_keys.
- * The corresponding scan key values are recomputed at run
- * time.
+ * If we don't have a const node, it means our scan key is a function of
+ * information obtained during the execution of the plan, in which
+ * case we need to recalculate the index scan key at run time. Hence,
+ * we set have_runtime_keys to true and place the appropriate
+ * subexpression in run_keys. The corresponding scan key values are
+ * recomputed at run time.
*/
run_keys[j] = NULL;
@@ -622,8 +616,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals,
if (IsA(rightop, Const))
{
/*
- * if the rightop is a const node then it means it
- * identifies the value to place in our scan key.
+ * if the rightop is a const node then it means it identifies the
+ * value to place in our scan key.
*/
scanvalue = ((Const *) rightop)->constvalue;
if (((Const *) rightop)->constisnull)
@@ -632,9 +626,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals,
else
{
/*
- * otherwise, the rightop contains an expression evaluable
- * at runtime to figure out the value to place in our scan
- * key.
+ * otherwise, the rightop contains an expression evaluable at
+ * runtime to figure out the value to place in our scan key.
*/
have_runtime_keys = true;
run_keys[j] = ExecInitExpr(rightop, planstate);
@@ -646,11 +639,11 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals,
*/
ScanKeyEntryInitialize(&scan_keys[j],
flags,
- varattno, /* attribute number to scan */
- strategy, /* op's strategy */
- subtype, /* strategy subtype */
- opfuncid, /* reg proc to use */
- scanvalue); /* constant */
+ varattno, /* attribute number to scan */
+ strategy, /* op's strategy */
+ subtype, /* strategy subtype */
+ opfuncid, /* reg proc to use */
+ scanvalue); /* constant */
}
/* If no runtime keys, get rid of speculatively-allocated array */
diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c
index 40e0283e86f..462db0aee9d 100644
--- a/src/backend/executor/nodeLimit.c
+++ b/src/backend/executor/nodeLimit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeLimit.c,v 1.21 2005/03/16 21:38:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeLimit.c,v 1.22 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,10 +61,9 @@ ExecLimit(LimitState *node)
return NULL;
/*
- * First call for this scan, so compute limit/offset. (We
- * can't do this any earlier, because parameters from upper
- * nodes may not be set until now.) This also sets position =
- * 0.
+ * First call for this scan, so compute limit/offset. (We can't do
+ * this any earlier, because parameters from upper nodes may not
+ * be set until now.) This also sets position = 0.
*/
recompute_limits(node);
@@ -86,8 +85,8 @@ ExecLimit(LimitState *node)
if (TupIsNull(slot))
{
/*
- * The subplan returns too few tuples for us to
- * produce any output at all.
+ * The subplan returns too few tuples for us to produce
+ * any output at all.
*/
node->lstate = LIMIT_EMPTY;
return NULL;
@@ -115,11 +114,10 @@ ExecLimit(LimitState *node)
if (ScanDirectionIsForward(direction))
{
/*
- * Forwards scan, so check for stepping off end of window.
- * If we are at the end of the window, return NULL without
- * advancing the subplan or the position variable; but
- * change the state machine state to record having done
- * so.
+ * Forwards scan, so check for stepping off end of window. If
+ * we are at the end of the window, return NULL without
+ * advancing the subplan or the position variable; but change
+ * the state machine state to record having done so.
*/
if (!node->noCount &&
node->position >= node->offset + node->count)
@@ -143,9 +141,8 @@ ExecLimit(LimitState *node)
else
{
/*
- * Backwards scan, so check for stepping off start of
- * window. As above, change only state-machine status if
- * so.
+ * Backwards scan, so check for stepping off start of window.
+ * As above, change only state-machine status if so.
*/
if (node->position <= node->offset + 1)
{
@@ -169,9 +166,8 @@ ExecLimit(LimitState *node)
return NULL;
/*
- * Backing up from subplan EOF, so re-fetch previous tuple;
- * there should be one! Note previous tuple must be in
- * window.
+ * Backing up from subplan EOF, so re-fetch previous tuple; there
+ * should be one! Note previous tuple must be in window.
*/
slot = ExecProcNode(outerPlan);
if (TupIsNull(slot))
@@ -328,8 +324,8 @@ ExecInitLimit(Limit *node, EState *estate)
outerPlanState(limitstate) = ExecInitNode(outerPlan, estate);
/*
- * limit nodes do no projections, so initialize projection info for
- * this node appropriately
+ * limit nodes do no projections, so initialize projection info for this
+ * node appropriately
*/
ExecAssignResultTypeFromOuterPlan(&limitstate->ps);
limitstate->ps.ps_ProjInfo = NULL;
diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c
index fe128595576..750f355b0ee 100644
--- a/src/backend/executor/nodeMaterial.c
+++ b/src/backend/executor/nodeMaterial.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.49 2005/03/16 21:38:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.50 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,8 +68,8 @@ ExecMaterial(MaterialState *node)
}
/*
- * If we are not at the end of the tuplestore, or are going backwards,
- * try to fetch a tuple from tuplestore.
+ * If we are not at the end of the tuplestore, or are going backwards, try
+ * to fetch a tuple from tuplestore.
*/
eof_tuplestore = tuplestore_ateof(tuplestorestate);
@@ -79,9 +79,9 @@ ExecMaterial(MaterialState *node)
{
/*
* When reversing direction at tuplestore EOF, the first
- * getheaptuple call will fetch the last-added tuple; but we
- * want to return the one before that, if possible. So do an
- * extra fetch.
+ * getheaptuple call will fetch the last-added tuple; but we want
+ * to return the one before that, if possible. So do an extra
+ * fetch.
*/
heapTuple = tuplestore_getheaptuple(tuplestorestate,
forward,
@@ -106,10 +106,10 @@ ExecMaterial(MaterialState *node)
/*
* If necessary, try to fetch another row from the subplan.
*
- * Note: the eof_underlying state variable exists to short-circuit
- * further subplan calls. It's not optional, unfortunately, because
- * some plan node types are not robust about being called again when
- * they've already returned NULL.
+ * Note: the eof_underlying state variable exists to short-circuit further
+ * subplan calls. It's not optional, unfortunately, because some plan
+ * node types are not robust about being called again when they've already
+ * returned NULL.
*/
if (eof_tuplestore && !node->eof_underlying)
{
@@ -117,8 +117,8 @@ ExecMaterial(MaterialState *node)
TupleTableSlot *outerslot;
/*
- * We can only get here with forward==true, so no need to worry
- * about which direction the subplan will go.
+ * We can only get here with forward==true, so no need to worry about
+ * which direction the subplan will go.
*/
outerNode = outerPlanState(node);
outerslot = ExecProcNode(outerNode);
@@ -132,8 +132,8 @@ ExecMaterial(MaterialState *node)
/*
* Append returned tuple to tuplestore, too. NOTE: because the
- * tuplestore is certainly in EOF state, its read position will
- * move forward over the added tuple. This is what we want.
+ * tuplestore is certainly in EOF state, its read position will move
+ * forward over the added tuple. This is what we want.
*/
tuplestore_puttuple(tuplestorestate, (void *) heapTuple);
}
@@ -192,8 +192,8 @@ ExecInitMaterial(Material *node, EState *estate)
outerPlanState(matstate) = ExecInitNode(outerPlan, estate);
/*
- * initialize tuple type. no need to initialize projection info
- * because this node doesn't do projections.
+ * initialize tuple type. no need to initialize projection info because
+ * this node doesn't do projections.
*/
ExecAssignResultTypeFromOuterPlan(&matstate->ss.ps);
ExecAssignScanTypeFromOuterPlan(&matstate->ss);
@@ -284,9 +284,9 @@ void
ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt)
{
/*
- * If we haven't materialized yet, just return. If outerplan' chgParam
- * is not NULL then it will be re-scanned by ExecProcNode, else - no
- * reason to re-scan it at all.
+ * If we haven't materialized yet, just return. If outerplan' chgParam is
+ * not NULL then it will be re-scanned by ExecProcNode, else - no reason
+ * to re-scan it at all.
*/
if (!node->tuplestorestate)
return;
@@ -294,11 +294,11 @@ ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt)
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
/*
- * If subnode is to be rescanned then we forget previous stored
- * results; we have to re-read the subplan and re-store.
+ * If subnode is to be rescanned then we forget previous stored results;
+ * we have to re-read the subplan and re-store.
*
- * Otherwise we can just rewind and rescan the stored output. The state
- * of the subnode does not change.
+ * Otherwise we can just rewind and rescan the stored output. The state of
+ * the subnode does not change.
*/
if (((PlanState *) node)->lefttree->chgParam != NULL)
{
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index fb279e8b68e..0d4eed4c9ba 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.74 2005/05/15 21:19:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.75 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,7 +40,7 @@
* matching tuple and so on.
*
* Therefore, when initializing the merge-join node, we look up the
- * associated sort operators. We assume the planner has seen to it
+ * associated sort operators. We assume the planner has seen to it
* that the inputs are correctly sorted by these operators. Rather
* than directly executing the merge join clauses, we evaluate the
* left and right key expressions separately and then compare the
@@ -124,30 +124,33 @@ typedef enum
typedef struct MergeJoinClauseData
{
/* Executable expression trees */
- ExprState *lexpr; /* left-hand (outer) input expression */
- ExprState *rexpr; /* right-hand (inner) input expression */
+ ExprState *lexpr; /* left-hand (outer) input expression */
+ ExprState *rexpr; /* right-hand (inner) input expression */
+
/*
* If we have a current left or right input tuple, the values of the
* expressions are loaded into these fields:
*/
- Datum ldatum; /* current left-hand value */
- Datum rdatum; /* current right-hand value */
- bool lisnull; /* and their isnull flags */
- bool risnull;
+ Datum ldatum; /* current left-hand value */
+ Datum rdatum; /* current right-hand value */
+ bool lisnull; /* and their isnull flags */
+ bool risnull;
+
/*
* Remember whether mergejoin operator is strict (usually it will be).
- * NOTE: if it's not strict, we still assume it cannot return true for
- * one null and one non-null input.
+ * NOTE: if it's not strict, we still assume it cannot return true for one
+ * null and one non-null input.
*/
- bool mergestrict;
+ bool mergestrict;
+
/*
- * The comparison strategy in use, and the lookup info to let us call
- * the needed comparison routines. eqfinfo is the "=" operator itself.
+ * The comparison strategy in use, and the lookup info to let us call the
+ * needed comparison routines. eqfinfo is the "=" operator itself.
* cmpfinfo is either the btree comparator or the "<" operator.
*/
MergeFunctionKind cmpstrategy;
- FmgrInfo eqfinfo;
- FmgrInfo cmpfinfo;
+ FmgrInfo eqfinfo;
+ FmgrInfo cmpfinfo;
} MergeJoinClauseData;
@@ -167,8 +170,8 @@ typedef struct MergeJoinClauseData
*
* The best, most efficient way to compare two expressions is to use a btree
* comparison support routine, since that requires only one function call
- * per comparison. Hence we try to find a btree opclass that matches the
- * mergejoinable operator. If we cannot find one, we'll have to call both
+ * per comparison. Hence we try to find a btree opclass that matches the
+ * mergejoinable operator. If we cannot find one, we'll have to call both
* the "=" and (often) the "<" operator for each comparison.
*/
static MergeJoinClause
@@ -204,8 +207,8 @@ MJExamineQuals(List *qualList, PlanState *parent)
clause->rexpr = ExecInitExpr((Expr *) lsecond(qual->args), parent);
/*
- * Check permission to call the mergejoinable operator.
- * For predictability, we check this even if we end up not using it.
+ * Check permission to call the mergejoinable operator. For
+ * predictability, we check this even if we end up not using it.
*/
aclresult = pg_proc_aclcheck(qual->opfuncid, GetUserId(), ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
@@ -220,7 +223,7 @@ MJExamineQuals(List *qualList, PlanState *parent)
/*
* Lookup the comparison operators that go with the mergejoinable
- * top-level operator. (This will elog if the operator isn't
+ * top-level operator. (This will elog if the operator isn't
* mergejoinable, which would be the planner's mistake.)
*/
op_mergejoin_crossops(qual->opno,
@@ -232,13 +235,12 @@ MJExamineQuals(List *qualList, PlanState *parent)
clause->cmpstrategy = MERGEFUNC_LT;
/*
- * Look for a btree opclass including all three operators.
- * This is much like SelectSortFunction except we insist on
- * matching all the operators provided, and it can be a cross-type
- * opclass.
+ * Look for a btree opclass including all three operators. This is
+ * much like SelectSortFunction except we insist on matching all the
+ * operators provided, and it can be a cross-type opclass.
*
- * XXX for now, insist on forward sort so that NULLs can be counted
- * on to be high.
+ * XXX for now, insist on forward sort so that NULLs can be counted on to
+ * be high.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(qual->opno),
@@ -255,13 +257,13 @@ MJExamineQuals(List *qualList, PlanState *parent)
if (!opclass_is_btree(opcid))
continue;
if (get_op_opclass_strategy(ltop, opcid) == BTLessStrategyNumber &&
- get_op_opclass_strategy(gtop, opcid) == BTGreaterStrategyNumber)
+ get_op_opclass_strategy(gtop, opcid) == BTGreaterStrategyNumber)
{
clause->cmpstrategy = MERGEFUNC_CMP;
ltproc = get_opclass_proc(opcid, aform->amopsubtype,
BTORDER_PROC);
Assert(RegProcedureIsValid(ltproc));
- break; /* done looking */
+ break; /* done looking */
}
}
@@ -325,7 +327,7 @@ MJEvalOuterValues(MergeJoinState *mergestate)
/*
* MJEvalInnerValues
*
- * Same as above, but for the inner tuple. Here, we have to be prepared
+ * Same as above, but for the inner tuple. Here, we have to be prepared
* to load data from either the true current inner, or the marked inner,
* so caller must tell us which slot to load from.
*/
@@ -379,8 +381,8 @@ MJCompare(MergeJoinState *mergestate)
FunctionCallInfoData fcinfo;
/*
- * Call the comparison functions in short-lived context, in case they
- * leak memory.
+ * Call the comparison functions in short-lived context, in case they leak
+ * memory.
*/
ResetExprContext(econtext);
@@ -394,11 +396,11 @@ MJCompare(MergeJoinState *mergestate)
/*
* Deal with null inputs. We treat NULL as sorting after non-NULL.
*
- * If both inputs are NULL, and the comparison function isn't
- * strict, then we call it and check for a true result (this allows
- * operators that behave like IS NOT DISTINCT to be mergejoinable).
- * If the function is strict or returns false, we temporarily
- * pretend NULL == NULL and contine checking remaining columns.
+ * If both inputs are NULL, and the comparison function isn't strict,
+ * then we call it and check for a true result (this allows operators
+ * that behave like IS NOT DISTINCT to be mergejoinable). If the
+ * function is strict or returns false, we temporarily pretend NULL ==
+ * NULL and contine checking remaining columns.
*/
if (clause->lisnull)
{
@@ -477,7 +479,8 @@ MJCompare(MergeJoinState *mergestate)
break;
}
}
- else /* must be MERGEFUNC_CMP */
+ else
+ /* must be MERGEFUNC_CMP */
{
InitFunctionCallInfoData(fcinfo, &(clause->cmpfinfo), 2,
NULL, NULL);
@@ -512,10 +515,10 @@ MJCompare(MergeJoinState *mergestate)
}
/*
- * If we had any null comparison results or NULL-vs-NULL inputs,
- * we do not want to report that the tuples are equal. Instead,
- * if result is still 0, change it to +1. This will result in
- * advancing the inner side of the join.
+ * If we had any null comparison results or NULL-vs-NULL inputs, we do not
+ * want to report that the tuples are equal. Instead, if result is still
+ * 0, change it to +1. This will result in advancing the inner side of
+ * the join.
*/
if (nulleqnull && result == 0)
result = 1;
@@ -544,8 +547,8 @@ MJFillOuter(MergeJoinState *node)
if (ExecQual(otherqual, econtext, false))
{
/*
- * qualification succeeded. now form the desired projection tuple
- * and return the slot containing it.
+ * qualification succeeded. now form the desired projection tuple and
+ * return the slot containing it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@@ -583,8 +586,8 @@ MJFillInner(MergeJoinState *node)
if (ExecQual(otherqual, econtext, false))
{
/*
- * qualification succeeded. now form the desired projection tuple
- * and return the slot containing it.
+ * qualification succeeded. now form the desired projection tuple and
+ * return the slot containing it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@@ -696,9 +699,9 @@ ExecMergeJoin(MergeJoinState *node)
doFillInner = node->mj_FillInner;
/*
- * Check to see if we're still projecting out tuples from a previous
- * join tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
+ * Check to see if we're still projecting out tuples from a previous join
+ * tuple (because there is a function-returning-set in the projection
+ * expressions). If so, try to project another one.
*/
if (node->js.ps.ps_TupFromTlist)
{
@@ -714,8 +717,8 @@ ExecMergeJoin(MergeJoinState *node)
/*
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a join tuple.
+ * storage allocated in the previous tuple cycle. Note this can't happen
+ * until we're done projecting out tuples from a join tuple.
*/
ResetExprContext(econtext);
@@ -733,10 +736,10 @@ ExecMergeJoin(MergeJoinState *node)
{
/*
* EXEC_MJ_INITIALIZE_OUTER means that this is the first time
- * ExecMergeJoin() has been called and so we have to fetch
- * the first matchable tuple for both outer and inner subplans.
- * We do the outer side in INITIALIZE_OUTER state, then
- * advance to INITIALIZE_INNER state for the inner subplan.
+ * ExecMergeJoin() has been called and so we have to fetch the
+ * first matchable tuple for both outer and inner subplans. We
+ * do the outer side in INITIALIZE_OUTER state, then advance
+ * to INITIALIZE_INNER state for the inner subplan.
*/
case EXEC_MJ_INITIALIZE_OUTER:
MJ_printf("ExecMergeJoin: EXEC_MJ_INITIALIZE_OUTER\n");
@@ -749,9 +752,9 @@ ExecMergeJoin(MergeJoinState *node)
if (doFillInner)
{
/*
- * Need to emit right-join tuples for remaining
- * inner tuples. We set MatchedInner = true to
- * force the ENDOUTER state to advance inner.
+ * Need to emit right-join tuples for remaining inner
+ * tuples. We set MatchedInner = true to force the
+ * ENDOUTER state to advance inner.
*/
node->mj_JoinState = EXEC_MJ_ENDOUTER;
node->mj_MatchedInner = true;
@@ -797,11 +800,10 @@ ExecMergeJoin(MergeJoinState *node)
if (doFillOuter)
{
/*
- * Need to emit left-join tuples for all outer
- * tuples, including the one we just fetched. We
- * set MatchedOuter = false to force the ENDINNER
- * state to emit first tuple before advancing
- * outer.
+ * Need to emit left-join tuples for all outer tuples,
+ * including the one we just fetched. We set
+ * MatchedOuter = false to force the ENDINNER state to
+ * emit first tuple before advancing outer.
*/
node->mj_JoinState = EXEC_MJ_ENDINNER;
node->mj_MatchedOuter = false;
@@ -840,9 +842,9 @@ ExecMergeJoin(MergeJoinState *node)
break;
/*
- * EXEC_MJ_JOINTUPLES means we have two tuples which
- * satisfied the merge clause so we join them and then
- * proceed to get the next inner tuple (EXEC_MJ_NEXTINNER).
+ * EXEC_MJ_JOINTUPLES means we have two tuples which satisfied
+ * the merge clause so we join them and then proceed to get
+ * the next inner tuple (EXEC_MJ_NEXTINNER).
*/
case EXEC_MJ_JOINTUPLES:
MJ_printf("ExecMergeJoin: EXEC_MJ_JOINTUPLES\n");
@@ -855,18 +857,18 @@ ExecMergeJoin(MergeJoinState *node)
node->mj_JoinState = EXEC_MJ_NEXTINNER;
/*
- * Check the extra qual conditions to see if we actually
- * want to return this join tuple. If not, can proceed
- * with merge. We must distinguish the additional
- * joinquals (which must pass to consider the tuples
- * "matched" for outer-join logic) from the otherquals
- * (which must pass before we actually return the tuple).
+ * Check the extra qual conditions to see if we actually want
+ * to return this join tuple. If not, can proceed with merge.
+ * We must distinguish the additional joinquals (which must
+ * pass to consider the tuples "matched" for outer-join logic)
+ * from the otherquals (which must pass before we actually
+ * return the tuple).
*
* We don't bother with a ResetExprContext here, on the
- * assumption that we just did one while checking the
- * merge qual. One per tuple should be sufficient. We
- * do have to set up the econtext links to the tuples
- * for ExecQual to use.
+ * assumption that we just did one while checking the merge
+ * qual. One per tuple should be sufficient. We do have to
+ * set up the econtext links to the tuples for ExecQual to
+ * use.
*/
outerTupleSlot = node->mj_OuterTupleSlot;
econtext->ecxt_outertuple = outerTupleSlot;
@@ -896,8 +898,7 @@ ExecMergeJoin(MergeJoinState *node)
{
/*
* qualification succeeded. now form the desired
- * projection tuple and return the slot containing
- * it.
+ * projection tuple and return the slot containing it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@@ -918,9 +919,9 @@ ExecMergeJoin(MergeJoinState *node)
break;
/*
- * EXEC_MJ_NEXTINNER means advance the inner scan to the
- * next tuple. If the tuple is not nil, we then proceed to
- * test it against the join qualification.
+ * EXEC_MJ_NEXTINNER means advance the inner scan to the next
+ * tuple. If the tuple is not nil, we then proceed to test it
+ * against the join qualification.
*
* Before advancing, we check to see if we must emit an
* outer-join fill tuple for this inner tuple.
@@ -932,8 +933,7 @@ ExecMergeJoin(MergeJoinState *node)
{
/*
* Generate a fake join tuple with nulls for the outer
- * tuple, and return it if it passes the non-join
- * quals.
+ * tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
@@ -945,12 +945,12 @@ ExecMergeJoin(MergeJoinState *node)
}
/*
- * now we get the next inner tuple, if any. If there's
- * none, advance to next outer tuple (which may be able
- * to join to previously marked tuples).
+ * now we get the next inner tuple, if any. If there's none,
+ * advance to next outer tuple (which may be able to join to
+ * previously marked tuples).
*
- * If we find one but it cannot join to anything, stay
- * in NEXTINNER state to fetch the next one.
+ * If we find one but it cannot join to anything, stay in
+ * NEXTINNER state to fetch the next one.
*/
innerTupleSlot = ExecProcNode(innerPlan);
node->mj_InnerTupleSlot = innerTupleSlot;
@@ -969,8 +969,8 @@ ExecMergeJoin(MergeJoinState *node)
/*
* Test the new inner tuple to see if it matches outer.
*
- * If they do match, then we join them and move on to the
- * next inner tuple (EXEC_MJ_JOINTUPLES).
+ * If they do match, then we join them and move on to the next
+ * inner tuple (EXEC_MJ_JOINTUPLES).
*
* If they do not match then advance to next outer tuple.
*/
@@ -1013,8 +1013,7 @@ ExecMergeJoin(MergeJoinState *node)
{
/*
* Generate a fake join tuple with nulls for the inner
- * tuple, and return it if it passes the non-join
- * quals.
+ * tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
@@ -1034,8 +1033,8 @@ ExecMergeJoin(MergeJoinState *node)
node->mj_MatchedOuter = false;
/*
- * if the outer tuple is null then we are done with the
- * join, unless we have inner tuples we need to null-fill.
+ * if the outer tuple is null then we are done with the join,
+ * unless we have inner tuples we need to null-fill.
*/
if (TupIsNull(outerTupleSlot))
{
@@ -1044,8 +1043,8 @@ ExecMergeJoin(MergeJoinState *node)
if (doFillInner && !TupIsNull(innerTupleSlot))
{
/*
- * Need to emit right-join tuples for remaining
- * inner tuples.
+ * Need to emit right-join tuples for remaining inner
+ * tuples.
*/
node->mj_JoinState = EXEC_MJ_ENDOUTER;
break;
@@ -1118,26 +1117,25 @@ ExecMergeJoin(MergeJoinState *node)
if (compareResult == 0)
{
/*
- * the merge clause matched so now we restore the
- * inner scan position to the first mark, and go join
- * that tuple (and any following ones) to the new outer.
+ * the merge clause matched so now we restore the inner
+ * scan position to the first mark, and go join that tuple
+ * (and any following ones) to the new outer.
*
- * NOTE: we do not need to worry about the MatchedInner
- * state for the rescanned inner tuples. We know all
- * of them will match this new outer tuple and
- * therefore won't be emitted as fill tuples. This
- * works *only* because we require the extra joinquals
- * to be nil when doing a right or full join ---
- * otherwise some of the rescanned tuples might fail
- * the extra joinquals.
+ * NOTE: we do not need to worry about the MatchedInner state
+ * for the rescanned inner tuples. We know all of them
+ * will match this new outer tuple and therefore won't be
+ * emitted as fill tuples. This works *only* because we
+ * require the extra joinquals to be nil when doing a
+ * right or full join --- otherwise some of the rescanned
+ * tuples might fail the extra joinquals.
*/
ExecRestrPos(innerPlan);
/*
* ExecRestrPos probably should give us back a new Slot,
* but since it doesn't, use the marked slot. (The
- * previously returned mj_InnerTupleSlot cannot be
- * assumed to hold the required tuple.)
+ * previously returned mj_InnerTupleSlot cannot be assumed
+ * to hold the required tuple.)
*/
node->mj_InnerTupleSlot = innerTupleSlot;
/* we need not do MJEvalInnerValues again */
@@ -1159,7 +1157,7 @@ ExecMergeJoin(MergeJoinState *node)
* which means that all subsequent outer tuples will be
* larger than our marked inner tuples. So we need not
* revisit any of the marked tuples but can proceed to
- * look for a match to the current inner. If there's
+ * look for a match to the current inner. If there's
* no more inners, we are done.
* ----------------
*/
@@ -1222,8 +1220,8 @@ ExecMergeJoin(MergeJoinState *node)
/*
* before we advance, make sure the current tuples do not
- * satisfy the mergeclauses. If they do, then we update
- * the marked tuple position and go join them.
+ * satisfy the mergeclauses. If they do, then we update the
+ * marked tuple position and go join them.
*/
compareResult = MJCompare(node);
MJ_DEBUG_COMPARE(compareResult);
@@ -1238,7 +1236,8 @@ ExecMergeJoin(MergeJoinState *node)
}
else if (compareResult < 0)
node->mj_JoinState = EXEC_MJ_SKIPOUTER_ADVANCE;
- else /* compareResult > 0 */
+ else
+ /* compareResult > 0 */
node->mj_JoinState = EXEC_MJ_SKIPINNER_ADVANCE;
break;
@@ -1253,8 +1252,7 @@ ExecMergeJoin(MergeJoinState *node)
{
/*
* Generate a fake join tuple with nulls for the inner
- * tuple, and return it if it passes the non-join
- * quals.
+ * tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
@@ -1274,8 +1272,8 @@ ExecMergeJoin(MergeJoinState *node)
node->mj_MatchedOuter = false;
/*
- * if the outer tuple is null then we are done with the
- * join, unless we have inner tuples we need to null-fill.
+ * if the outer tuple is null then we are done with the join,
+ * unless we have inner tuples we need to null-fill.
*/
if (TupIsNull(outerTupleSlot))
{
@@ -1284,8 +1282,8 @@ ExecMergeJoin(MergeJoinState *node)
if (doFillInner && !TupIsNull(innerTupleSlot))
{
/*
- * Need to emit right-join tuples for remaining
- * inner tuples.
+ * Need to emit right-join tuples for remaining inner
+ * tuples.
*/
node->mj_JoinState = EXEC_MJ_ENDOUTER;
break;
@@ -1317,8 +1315,7 @@ ExecMergeJoin(MergeJoinState *node)
{
/*
* Generate a fake join tuple with nulls for the outer
- * tuple, and return it if it passes the non-join
- * quals.
+ * tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
@@ -1338,8 +1335,8 @@ ExecMergeJoin(MergeJoinState *node)
node->mj_MatchedInner = false;
/*
- * if the inner tuple is null then we are done with the
- * join, unless we have outer tuples we need to null-fill.
+ * if the inner tuple is null then we are done with the join,
+ * unless we have outer tuples we need to null-fill.
*/
if (TupIsNull(innerTupleSlot))
{
@@ -1348,8 +1345,8 @@ ExecMergeJoin(MergeJoinState *node)
if (doFillOuter && !TupIsNull(outerTupleSlot))
{
/*
- * Need to emit left-join tuples for remaining
- * outer tuples.
+ * Need to emit left-join tuples for remaining outer
+ * tuples.
*/
node->mj_JoinState = EXEC_MJ_ENDINNER;
break;
@@ -1371,9 +1368,9 @@ ExecMergeJoin(MergeJoinState *node)
break;
/*
- * EXEC_MJ_ENDOUTER means we have run out of outer tuples,
- * but are doing a right/full join and therefore must
- * null-fill any remaing unmatched inner tuples.
+ * EXEC_MJ_ENDOUTER means we have run out of outer tuples, but
+ * are doing a right/full join and therefore must null-fill
+ * any remaing unmatched inner tuples.
*/
case EXEC_MJ_ENDOUTER:
MJ_printf("ExecMergeJoin: EXEC_MJ_ENDOUTER\n");
@@ -1384,8 +1381,7 @@ ExecMergeJoin(MergeJoinState *node)
{
/*
* Generate a fake join tuple with nulls for the outer
- * tuple, and return it if it passes the non-join
- * quals.
+ * tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
@@ -1414,9 +1410,9 @@ ExecMergeJoin(MergeJoinState *node)
break;
/*
- * EXEC_MJ_ENDINNER means we have run out of inner tuples,
- * but are doing a left/full join and therefore must null-
- * fill any remaing unmatched outer tuples.
+ * EXEC_MJ_ENDINNER means we have run out of inner tuples, but
+ * are doing a left/full join and therefore must null- fill
+ * any remaing unmatched outer tuples.
*/
case EXEC_MJ_ENDINNER:
MJ_printf("ExecMergeJoin: EXEC_MJ_ENDINNER\n");
@@ -1427,8 +1423,7 @@ ExecMergeJoin(MergeJoinState *node)
{
/*
* Generate a fake join tuple with nulls for the inner
- * tuple, and return it if it passes the non-join
- * quals.
+ * tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
@@ -1493,10 +1488,9 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate)
ExecAssignExprContext(estate, &mergestate->js.ps);
/*
- * we need two additional econtexts in which we can compute the
- * join expressions from the left and right input tuples. The
- * node's regular econtext won't do because it gets reset too
- * often.
+ * we need two additional econtexts in which we can compute the join
+ * expressions from the left and right input tuples. The node's regular
+ * econtext won't do because it gets reset too often.
*/
mergestate->mj_OuterEContext = CreateExprContext(estate);
mergestate->mj_InnerEContext = CreateExprContext(estate);
@@ -1546,18 +1540,18 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate)
mergestate->mj_FillInner = false;
mergestate->mj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(innerPlanState(mergestate)));
+ ExecGetResultType(innerPlanState(mergestate)));
break;
case JOIN_RIGHT:
mergestate->mj_FillOuter = false;
mergestate->mj_FillInner = true;
mergestate->mj_NullOuterTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(outerPlanState(mergestate)));
+ ExecGetResultType(outerPlanState(mergestate)));
/*
- * Can't handle right or full join with non-nil extra
- * joinclauses. This should have been caught by planner.
+ * Can't handle right or full join with non-nil extra joinclauses.
+ * This should have been caught by planner.
*/
if (node->join.joinqual != NIL)
ereport(ERROR,
@@ -1569,14 +1563,13 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate)
mergestate->mj_FillInner = true;
mergestate->mj_NullOuterTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(outerPlanState(mergestate)));
+ ExecGetResultType(outerPlanState(mergestate)));
mergestate->mj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(innerPlanState(mergestate)));
+ ExecGetResultType(innerPlanState(mergestate)));
/*
- * Can't handle right or full join with non-nil extra
- * joinclauses.
+ * Can't handle right or full join with non-nil extra joinclauses.
*/
if (node->join.joinqual != NIL)
ereport(ERROR,
@@ -1675,8 +1668,8 @@ ExecReScanMergeJoin(MergeJoinState *node, ExprContext *exprCtxt)
node->mj_InnerTupleSlot = NULL;
/*
- * if chgParam of subnodes is not null then plans will be re-scanned
- * by first ExecProcNode.
+ * if chgParam of subnodes is not null then plans will be re-scanned by
+ * first ExecProcNode.
*/
if (((PlanState *) node)->lefttree->chgParam == NULL)
ExecReScan(((PlanState *) node)->lefttree, exprCtxt);
diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c
index 8b48ceefd19..a497e9ac337 100644
--- a/src/backend/executor/nodeNestloop.c
+++ b/src/backend/executor/nodeNestloop.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.38 2004/12/31 21:59:45 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.39 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -85,9 +85,9 @@ ExecNestLoop(NestLoopState *node)
econtext->ecxt_outertuple = outerTupleSlot;
/*
- * Check to see if we're still projecting out tuples from a previous
- * join tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
+ * Check to see if we're still projecting out tuples from a previous join
+ * tuple (because there is a function-returning-set in the projection
+ * expressions). If so, try to project another one.
*/
if (node->js.ps.ps_TupFromTlist)
{
@@ -102,9 +102,9 @@ ExecNestLoop(NestLoopState *node)
}
/*
- * If we're doing an IN join, we want to return at most one row per
- * outer tuple; so we can stop scanning the inner scan if we matched
- * on the previous try.
+ * If we're doing an IN join, we want to return at most one row per outer
+ * tuple; so we can stop scanning the inner scan if we matched on the
+ * previous try.
*/
if (node->js.jointype == JOIN_IN &&
node->nl_MatchedOuter)
@@ -112,8 +112,8 @@ ExecNestLoop(NestLoopState *node)
/*
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a join tuple.
+ * storage allocated in the previous tuple cycle. Note this can't happen
+ * until we're done projecting out tuples from a join tuple.
*/
ResetExprContext(econtext);
@@ -135,8 +135,7 @@ ExecNestLoop(NestLoopState *node)
outerTupleSlot = ExecProcNode(outerPlan);
/*
- * if there are no more outer tuples, then the join is
- * complete..
+ * if there are no more outer tuples, then the join is complete..
*/
if (TupIsNull(outerTupleSlot))
{
@@ -157,8 +156,8 @@ ExecNestLoop(NestLoopState *node)
/*
* The scan key of the inner plan might depend on the current
- * outer tuple (e.g. in index scans), that's why we pass our
- * expr context.
+ * outer tuple (e.g. in index scans), that's why we pass our expr
+ * context.
*/
ExecReScan(innerPlan, econtext);
}
@@ -181,10 +180,10 @@ ExecNestLoop(NestLoopState *node)
node->js.jointype == JOIN_LEFT)
{
/*
- * We are doing an outer join and there were no join
- * matches for this outer tuple. Generate a fake join
- * tuple with nulls for the inner tuple, and return it if
- * it passes the non-join quals.
+ * We are doing an outer join and there were no join matches
+ * for this outer tuple. Generate a fake join tuple with
+ * nulls for the inner tuple, and return it if it passes the
+ * non-join quals.
*/
econtext->ecxt_innertuple = node->nl_NullInnerTupleSlot;
@@ -193,8 +192,8 @@ ExecNestLoop(NestLoopState *node)
if (ExecQual(otherqual, econtext, false))
{
/*
- * qualification was satisfied so we project and
- * return the slot containing the result tuple using
+ * qualification was satisfied so we project and return
+ * the slot containing the result tuple using
* ExecProject().
*/
TupleTableSlot *result;
@@ -220,12 +219,12 @@ ExecNestLoop(NestLoopState *node)
}
/*
- * at this point we have a new pair of inner and outer tuples so
- * we test the inner and outer tuples to see if they satisfy the
- * node's qualification.
+ * at this point we have a new pair of inner and outer tuples so we
+ * test the inner and outer tuples to see if they satisfy the node's
+ * qualification.
*
- * Only the joinquals determine MatchedOuter status, but all quals
- * must pass to actually return the tuple.
+ * Only the joinquals determine MatchedOuter status, but all quals must
+ * pass to actually return the tuple.
*/
ENL1_printf("testing qualification");
@@ -236,9 +235,8 @@ ExecNestLoop(NestLoopState *node)
if (otherqual == NIL || ExecQual(otherqual, econtext, false))
{
/*
- * qualification was satisfied so we project and return
- * the slot containing the result tuple using
- * ExecProject().
+ * qualification was satisfied so we project and return the
+ * slot containing the result tuple using ExecProject().
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@@ -330,7 +328,7 @@ ExecInitNestLoop(NestLoop *node, EState *estate)
case JOIN_LEFT:
nlstate->nl_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(innerPlanState(nlstate)));
+ ExecGetResultType(innerPlanState(nlstate)));
break;
default:
elog(ERROR, "unrecognized join type: %d",
@@ -408,10 +406,9 @@ ExecReScanNestLoop(NestLoopState *node, ExprContext *exprCtxt)
/*
* If outerPlan->chgParam is not null then plan will be automatically
- * re-scanned by first ExecProcNode. innerPlan is re-scanned for each
- * new outer tuple and MUST NOT be re-scanned from here or you'll get
- * troubles from inner index scans when outer Vars are used as
- * run-time keys...
+ * re-scanned by first ExecProcNode. innerPlan is re-scanned for each new
+ * outer tuple and MUST NOT be re-scanned from here or you'll get troubles
+ * from inner index scans when outer Vars are used as run-time keys...
*/
if (outerPlan->chgParam == NULL)
ExecReScan(outerPlan, exprCtxt);
diff --git a/src/backend/executor/nodeResult.c b/src/backend/executor/nodeResult.c
index 7c77dc07121..013c4e99794 100644
--- a/src/backend/executor/nodeResult.c
+++ b/src/backend/executor/nodeResult.c
@@ -38,7 +38,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeResult.c,v 1.31 2005/04/24 15:32:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeResult.c,v 1.32 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -92,9 +92,9 @@ ExecResult(ResultState *node)
}
/*
- * Check to see if we're still projecting out tuples from a previous
- * scan tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
+ * Check to see if we're still projecting out tuples from a previous scan
+ * tuple (because there is a function-returning-set in the projection
+ * expressions). If so, try to project another one.
*/
if (node->ps.ps_TupFromTlist)
{
@@ -107,16 +107,16 @@ ExecResult(ResultState *node)
/*
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a scan tuple.
+ * storage allocated in the previous tuple cycle. Note this can't happen
+ * until we're done projecting out tuples from a scan tuple.
*/
ResetExprContext(econtext);
/*
* if rs_done is true then it means that we were asked to return a
* constant tuple and we already did the last time ExecResult() was
- * called, OR that we failed the constant qual check. Either way, now
- * we are through.
+ * called, OR that we failed the constant qual check. Either way, now we
+ * are through.
*/
while (!node->rs_done)
{
@@ -125,8 +125,7 @@ ExecResult(ResultState *node)
if (outerPlan != NULL)
{
/*
- * retrieve tuples from the outer plan until there are no
- * more.
+ * retrieve tuples from the outer plan until there are no more.
*/
outerTupleSlot = ExecProcNode(outerPlan);
@@ -136,8 +135,7 @@ ExecResult(ResultState *node)
node->ps.ps_OuterTupleSlot = outerTupleSlot;
/*
- * XXX gross hack. use outer tuple as scan tuple for
- * projection
+ * XXX gross hack. use outer tuple as scan tuple for projection
*/
econtext->ecxt_outertuple = outerTupleSlot;
econtext->ecxt_scantuple = outerTupleSlot;
@@ -145,16 +143,16 @@ ExecResult(ResultState *node)
else
{
/*
- * if we don't have an outer plan, then we are just generating
- * the results from a constant target list. Do it only once.
+ * if we don't have an outer plan, then we are just generating the
+ * results from a constant target list. Do it only once.
*/
node->rs_done = true;
}
/*
- * form the result tuple using ExecProject(), and return it ---
- * unless the projection produces an empty set, in which case we
- * must loop back to see if there are more outerPlan tuples.
+ * form the result tuple using ExecProject(), and return it --- unless
+ * the projection produces an empty set, in which case we must loop
+ * back to see if there are more outerPlan tuples.
*/
resultSlot = ExecProject(node->ps.ps_ProjInfo, &isDone);
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index fab526f399c..91e0c81e036 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.53 2005/05/15 21:19:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.54 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -62,11 +62,11 @@ SeqNext(SeqScanState *node)
slot = node->ss_ScanTupleSlot;
/*
- * Clear any reference to the previously returned tuple. The idea
- * here is to not have the tuple slot be the last holder of a pin on
- * that tuple's buffer; if it is, we'll need a separate visit to the
- * bufmgr to release the buffer. By clearing here, we get to have the
- * release done by ReleaseAndReadBuffer inside heap_getnext.
+ * Clear any reference to the previously returned tuple. The idea here is
+ * to not have the tuple slot be the last holder of a pin on that tuple's
+ * buffer; if it is, we'll need a separate visit to the bufmgr to release
+ * the buffer. By clearing here, we get to have the release done by
+ * ReleaseAndReadBuffer inside heap_getnext.
*/
ExecClearTuple(slot);
@@ -87,8 +87,8 @@ SeqNext(SeqScanState *node)
/*
* Note that unlike IndexScan, SeqScan never use keys in
- * heap_beginscan (and this is very bad) - so, here we do not
- * check are keys ok or not.
+ * heap_beginscan (and this is very bad) - so, here we do not check
+ * are keys ok or not.
*/
/* Flag for the next call that no more tuples */
@@ -102,20 +102,19 @@ SeqNext(SeqScanState *node)
tuple = heap_getnext(scandesc, direction);
/*
- * save the tuple and the buffer returned to us by the access methods
- * in our scan tuple slot and return the slot. Note: we pass 'false'
- * because tuples returned by heap_getnext() are pointers onto disk
- * pages and were not created with palloc() and so should not be
- * pfree()'d. Note also that ExecStoreTuple will increment the
- * refcount of the buffer; the refcount will not be dropped until the
- * tuple table slot is cleared.
+ * save the tuple and the buffer returned to us by the access methods in
+ * our scan tuple slot and return the slot. Note: we pass 'false' because
+ * tuples returned by heap_getnext() are pointers onto disk pages and were
+ * not created with palloc() and so should not be pfree()'d. Note also
+ * that ExecStoreTuple will increment the refcount of the buffer; the
+ * refcount will not be dropped until the tuple table slot is cleared.
*/
if (tuple)
- ExecStoreTuple(tuple, /* tuple to store */
- slot, /* slot to store in */
- scandesc->rs_cbuf, /* buffer associated with
- * this tuple */
- false); /* don't pfree this pointer */
+ ExecStoreTuple(tuple, /* tuple to store */
+ slot, /* slot to store in */
+ scandesc->rs_cbuf, /* buffer associated with this
+ * tuple */
+ false); /* don't pfree this pointer */
return slot;
}
@@ -157,8 +156,8 @@ InitScanRelation(SeqScanState *node, EState *estate)
HeapScanDesc currentScanDesc;
/*
- * get the relation object id from the relid'th entry in the range
- * table, open that relation and initialize the scan state.
+ * get the relation object id from the relid'th entry in the range table,
+ * open that relation and initialize the scan state.
*
* We acquire AccessShareLock for the duration of the scan.
*/
@@ -191,8 +190,8 @@ ExecInitSeqScan(SeqScan *node, EState *estate)
SeqScanState *scanstate;
/*
- * Once upon a time it was possible to have an outerPlan of a SeqScan,
- * but not any more.
+ * Once upon a time it was possible to have an outerPlan of a SeqScan, but
+ * not any more.
*/
Assert(outerPlan(node) == NULL);
Assert(innerPlan(node) == NULL);
@@ -291,9 +290,8 @@ ExecEndSeqScan(SeqScanState *node)
* close the heap relation.
*
* Currently, we do not release the AccessShareLock acquired by
- * InitScanRelation. This lock should be held till end of
- * transaction. (There is a faction that considers this too much
- * locking, however.)
+ * InitScanRelation. This lock should be held till end of transaction.
+ * (There is a faction that considers this too much locking, however.)
*/
heap_close(relation, NoLock);
}
@@ -359,10 +357,10 @@ ExecSeqRestrPos(SeqScanState *node)
HeapScanDesc scan = node->ss_currentScanDesc;
/*
- * Clear any reference to the previously returned tuple. This is
- * needed because the slot is simply pointing at scan->rs_cbuf, which
- * heap_restrpos will change; we'd have an internally inconsistent
- * slot if we didn't do this.
+ * Clear any reference to the previously returned tuple. This is needed
+ * because the slot is simply pointing at scan->rs_cbuf, which
+ * heap_restrpos will change; we'd have an internally inconsistent slot if
+ * we didn't do this.
*/
ExecClearTuple(node->ss_ScanTupleSlot);
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index 6daadfd0b8c..a5ca58354c6 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -21,7 +21,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSetOp.c,v 1.17 2005/05/06 17:24:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSetOp.c,v 1.18 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,8 +58,8 @@ ExecSetOp(SetOpState *node)
resultTupleSlot = node->ps.ps_ResultTupleSlot;
/*
- * If the previously-returned tuple needs to be returned more than
- * once, keep returning it.
+ * If the previously-returned tuple needs to be returned more than once,
+ * keep returning it.
*/
if (node->numOutput > 0)
{
@@ -71,9 +71,9 @@ ExecSetOp(SetOpState *node)
ExecClearTuple(resultTupleSlot);
/*
- * Absorb groups of duplicate tuples, counting them, and saving the
- * first of each group as a possible return value. At the end of each
- * group, decide whether to return anything.
+ * Absorb groups of duplicate tuples, counting them, and saving the first
+ * of each group as a possible return value. At the end of each group,
+ * decide whether to return anything.
*
* We assume that the tuples arrive in sorted order so we can detect
* duplicates easily.
@@ -177,8 +177,8 @@ ExecSetOp(SetOpState *node)
else
{
/*
- * Current tuple is member of same group as resultTuple. Count
- * it in the appropriate counter.
+ * Current tuple is member of same group as resultTuple. Count it
+ * in the appropriate counter.
*/
int flag;
bool isNull;
@@ -232,8 +232,8 @@ ExecInitSetOp(SetOp *node, EState *estate)
* Miscellaneous initialization
*
* SetOp nodes have no ExprContext initialization because they never call
- * ExecQual or ExecProject. But they do need a per-tuple memory
- * context anyway for calling execTuplesMatch.
+ * ExecQual or ExecProject. But they do need a per-tuple memory context
+ * anyway for calling execTuplesMatch.
*/
setopstate->tempContext =
AllocSetContextCreate(CurrentMemoryContext,
@@ -255,8 +255,8 @@ ExecInitSetOp(SetOp *node, EState *estate)
outerPlanState(setopstate) = ExecInitNode(outerPlan(node), estate);
/*
- * setop nodes do no projections, so initialize projection info for
- * this node appropriately
+ * setop nodes do no projections, so initialize projection info for this
+ * node appropriately
*/
ExecAssignResultTypeFromOuterPlan(&setopstate->ps);
setopstate->ps.ps_ProjInfo = NULL;
diff --git a/src/backend/executor/nodeSort.c b/src/backend/executor/nodeSort.c
index ef025374149..d3e4fb5e0b3 100644
--- a/src/backend/executor/nodeSort.c
+++ b/src/backend/executor/nodeSort.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSort.c,v 1.50 2005/03/16 21:38:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSort.c,v 1.51 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -56,9 +56,8 @@ ExecSort(SortState *node)
tuplesortstate = (Tuplesortstate *) node->tuplesortstate;
/*
- * If first time through, read all tuples from outer plan and pass
- * them to tuplesort.c. Subsequent calls just fetch tuples from
- * tuplesort.
+ * If first time through, read all tuples from outer plan and pass them to
+ * tuplesort.c. Subsequent calls just fetch tuples from tuplesort.
*/
if (!node->sort_Done)
@@ -71,8 +70,8 @@ ExecSort(SortState *node)
"sorting subplan");
/*
- * Want to scan subplan in the forward direction while creating
- * the sorted data.
+ * Want to scan subplan in the forward direction while creating the
+ * sorted data.
*/
estate->es_direction = ForwardScanDirection;
@@ -191,8 +190,8 @@ ExecInitSort(Sort *node, EState *estate)
outerPlanState(sortstate) = ExecInitNode(outerPlan(node), estate);
/*
- * initialize tuple type. no need to initialize projection info
- * because this node doesn't do projections.
+ * initialize tuple type. no need to initialize projection info because
+ * this node doesn't do projections.
*/
ExecAssignResultTypeFromOuterPlan(&sortstate->ss.ps);
ExecAssignScanTypeFromOuterPlan(&sortstate->ss);
@@ -286,9 +285,9 @@ void
ExecReScanSort(SortState *node, ExprContext *exprCtxt)
{
/*
- * If we haven't sorted yet, just return. If outerplan' chgParam is
- * not NULL then it will be re-scanned by ExecProcNode, else - no
- * reason to re-scan it at all.
+ * If we haven't sorted yet, just return. If outerplan' chgParam is not
+ * NULL then it will be re-scanned by ExecProcNode, else - no reason to
+ * re-scan it at all.
*/
if (!node->sort_Done)
return;
@@ -296,8 +295,8 @@ ExecReScanSort(SortState *node, ExprContext *exprCtxt)
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
/*
- * If subnode is to be rescanned then we forget previous sort results;
- * we have to re-read the subplan and re-sort.
+ * If subnode is to be rescanned then we forget previous sort results; we
+ * have to re-read the subplan and re-sort.
*
* Otherwise we can just rewind and rescan the sorted output.
*/
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index 5bed87aea9b..0e7b6df7225 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.69 2005/05/06 17:24:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.70 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -86,15 +86,15 @@ ExecHashSubPlan(SubPlanState *node,
elog(ERROR, "hashed subplan with direct correlation not supported");
/*
- * If first time through or we need to rescan the subplan, build the
- * hash table.
+ * If first time through or we need to rescan the subplan, build the hash
+ * table.
*/
if (node->hashtable == NULL || planstate->chgParam != NULL)
buildSubPlanHash(node);
/*
- * The result for an empty subplan is always FALSE; no need to
- * evaluate lefthand side.
+ * The result for an empty subplan is always FALSE; no need to evaluate
+ * lefthand side.
*/
*isNull = false;
if (!node->havehashrows && !node->havenullrows)
@@ -108,34 +108,32 @@ ExecHashSubPlan(SubPlanState *node,
slot = ExecProject(node->projLeft, NULL);
/*
- * Note: because we are typically called in a per-tuple context, we
- * have to explicitly clear the projected tuple before returning.
- * Otherwise, we'll have a double-free situation: the per-tuple
- * context will probably be reset before we're called again, and then
- * the tuple slot will think it still needs to free the tuple.
+ * Note: because we are typically called in a per-tuple context, we have
+ * to explicitly clear the projected tuple before returning. Otherwise,
+ * we'll have a double-free situation: the per-tuple context will probably
+ * be reset before we're called again, and then the tuple slot will think
+ * it still needs to free the tuple.
*/
/*
- * Since the hashtable routines will use innerecontext's per-tuple
- * memory as working memory, be sure to reset it for each tuple.
+ * Since the hashtable routines will use innerecontext's per-tuple memory
+ * as working memory, be sure to reset it for each tuple.
*/
ResetExprContext(innerecontext);
/*
- * If the LHS is all non-null, probe for an exact match in the main
- * hash table. If we find one, the result is TRUE. Otherwise, scan
- * the partly-null table to see if there are any rows that aren't
- * provably unequal to the LHS; if so, the result is UNKNOWN. (We
- * skip that part if we don't care about UNKNOWN.) Otherwise, the
- * result is FALSE.
+ * If the LHS is all non-null, probe for an exact match in the main hash
+ * table. If we find one, the result is TRUE. Otherwise, scan the
+ * partly-null table to see if there are any rows that aren't provably
+ * unequal to the LHS; if so, the result is UNKNOWN. (We skip that part
+ * if we don't care about UNKNOWN.) Otherwise, the result is FALSE.
*
- * Note: the reason we can avoid a full scan of the main hash table is
- * that the combining operators are assumed never to yield NULL when
- * both inputs are non-null. If they were to do so, we might need to
- * produce UNKNOWN instead of FALSE because of an UNKNOWN result in
- * comparing the LHS to some main-table entry --- which is a
- * comparison we will not even make, unless there's a chance match of
- * hash keys.
+ * Note: the reason we can avoid a full scan of the main hash table is that
+ * the combining operators are assumed never to yield NULL when both
+ * inputs are non-null. If they were to do so, we might need to produce
+ * UNKNOWN instead of FALSE because of an UNKNOWN result in comparing the
+ * LHS to some main-table entry --- which is a comparison we will not even
+ * make, unless there's a chance match of hash keys.
*/
if (slotNoNulls(slot))
{
@@ -157,14 +155,14 @@ ExecHashSubPlan(SubPlanState *node,
}
/*
- * When the LHS is partly or wholly NULL, we can never return TRUE. If
- * we don't care about UNKNOWN, just return FALSE. Otherwise, if the
- * LHS is wholly NULL, immediately return UNKNOWN. (Since the
- * combining operators are strict, the result could only be FALSE if
- * the sub-select were empty, but we already handled that case.)
- * Otherwise, we must scan both the main and partly-null tables to see
- * if there are any rows that aren't provably unequal to the LHS; if
- * so, the result is UNKNOWN. Otherwise, the result is FALSE.
+ * When the LHS is partly or wholly NULL, we can never return TRUE. If we
+ * don't care about UNKNOWN, just return FALSE. Otherwise, if the LHS is
+ * wholly NULL, immediately return UNKNOWN. (Since the combining
+ * operators are strict, the result could only be FALSE if the sub-select
+ * were empty, but we already handled that case.) Otherwise, we must scan
+ * both the main and partly-null tables to see if there are any rows that
+ * aren't provably unequal to the LHS; if so, the result is UNKNOWN.
+ * Otherwise, the result is FALSE.
*/
if (node->hashnulls == NULL)
{
@@ -217,9 +215,9 @@ ExecScanSubPlan(SubPlanState *node,
ArrayBuildState *astate = NULL;
/*
- * We are probably in a short-lived expression-evaluation context.
- * Switch to the child plan's per-query context for manipulating its
- * chgParam, calling ExecProcNode on it, etc.
+ * We are probably in a short-lived expression-evaluation context. Switch
+ * to the child plan's per-query context for manipulating its chgParam,
+ * calling ExecProcNode on it, etc.
*/
oldcontext = MemoryContextSwitchTo(node->sub_estate->es_query_cxt);
@@ -245,24 +243,23 @@ ExecScanSubPlan(SubPlanState *node,
ExecReScan(planstate, NULL);
/*
- * For all sublink types except EXPR_SUBLINK and ARRAY_SUBLINK, the
- * result is boolean as are the results of the combining operators. We
- * combine results within a tuple (if there are multiple columns)
- * using OR semantics if "useOr" is true, AND semantics if not. We
- * then combine results across tuples (if the subplan produces more
- * than one) using OR semantics for ANY_SUBLINK or AND semantics for
- * ALL_SUBLINK. (MULTIEXPR_SUBLINK doesn't allow multiple tuples from
- * the subplan.) NULL results from the combining operators are handled
- * according to the usual SQL semantics for OR and AND. The result
- * for no input tuples is FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK,
- * NULL for MULTIEXPR_SUBLINK.
+ * For all sublink types except EXPR_SUBLINK and ARRAY_SUBLINK, the result
+ * is boolean as are the results of the combining operators. We combine
+ * results within a tuple (if there are multiple columns) using OR
+ * semantics if "useOr" is true, AND semantics if not. We then combine
+ * results across tuples (if the subplan produces more than one) using OR
+ * semantics for ANY_SUBLINK or AND semantics for ALL_SUBLINK.
+ * (MULTIEXPR_SUBLINK doesn't allow multiple tuples from the subplan.)
+ * NULL results from the combining operators are handled according to the
+ * usual SQL semantics for OR and AND. The result for no input tuples is
+ * FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for
+ * MULTIEXPR_SUBLINK.
*
- * For EXPR_SUBLINK we require the subplan to produce no more than one
- * tuple, else an error is raised. For ARRAY_SUBLINK we allow the
- * subplan to produce more than one tuple. In either case, if zero
- * tuples are produced, we return NULL. Assuming we get a tuple, we
- * just use its first column (there can be only one non-junk column in
- * this case).
+ * For EXPR_SUBLINK we require the subplan to produce no more than one tuple,
+ * else an error is raised. For ARRAY_SUBLINK we allow the subplan to
+ * produce more than one tuple. In either case, if zero tuples are
+ * produced, we return NULL. Assuming we get a tuple, we just use its
+ * first column (there can be only one non-junk column in this case).
*/
result = BoolGetDatum(subLinkType == ALL_SUBLINK);
*isNull = false;
@@ -294,12 +291,12 @@ ExecScanSubPlan(SubPlanState *node,
found = true;
/*
- * We need to copy the subplan's tuple in case the result is
- * of pass-by-ref type --- our return value will point into
- * this copied tuple! Can't use the subplan's instance of the
- * tuple since it won't still be valid after next
- * ExecProcNode() call. node->curTuple keeps track of the
- * copied tuple for eventual freeing.
+ * We need to copy the subplan's tuple in case the result is of
+ * pass-by-ref type --- our return value will point into this
+ * copied tuple! Can't use the subplan's instance of the tuple
+ * since it won't still be valid after next ExecProcNode() call.
+ * node->curTuple keeps track of the copied tuple for eventual
+ * freeing.
*/
MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
if (node->curTuple)
@@ -350,8 +347,7 @@ ExecScanSubPlan(SubPlanState *node,
bool expnull;
/*
- * Load up the Param representing this column of the
- * sub-select.
+ * Load up the Param representing this column of the sub-select.
*/
prmdata = &(econtext->ecxt_param_exec_vals[paramid]);
Assert(prmdata->execPlan == NULL);
@@ -436,8 +432,8 @@ ExecScanSubPlan(SubPlanState *node,
{
/*
* deal with empty subplan result. result/isNull were previously
- * initialized correctly for all sublink types except EXPR, ARRAY,
- * and MULTIEXPR; for those, return NULL.
+ * initialized correctly for all sublink types except EXPR, ARRAY, and
+ * MULTIEXPR; for those, return NULL.
*/
if (subLinkType == EXPR_SUBLINK ||
subLinkType == ARRAY_SUBLINK ||
@@ -478,19 +474,19 @@ buildSubPlanHash(SubPlanState *node)
Assert(!subplan->useOr);
/*
- * If we already had any hash tables, destroy 'em; then create empty
- * hash table(s).
+ * If we already had any hash tables, destroy 'em; then create empty hash
+ * table(s).
*
* If we need to distinguish accurately between FALSE and UNKNOWN (i.e.,
- * NULL) results of the IN operation, then we have to store subplan
- * output rows that are partly or wholly NULL. We store such rows in
- * a separate hash table that we expect will be much smaller than the
- * main table. (We can use hashing to eliminate partly-null rows that
- * are not distinct. We keep them separate to minimize the cost of
- * the inevitable full-table searches; see findPartialMatch.)
+ * NULL) results of the IN operation, then we have to store subplan output
+ * rows that are partly or wholly NULL. We store such rows in a separate
+ * hash table that we expect will be much smaller than the main table.
+ * (We can use hashing to eliminate partly-null rows that are not
+ * distinct. We keep them separate to minimize the cost of the inevitable
+ * full-table searches; see findPartialMatch.)
*
- * If it's not necessary to distinguish FALSE and UNKNOWN, then we don't
- * need to store subplan output rows that contain NULL.
+ * If it's not necessary to distinguish FALSE and UNKNOWN, then we don't need
+ * to store subplan output rows that contain NULL.
*/
MemoryContextReset(node->tablecxt);
node->hashtable = NULL;
@@ -532,9 +528,8 @@ buildSubPlanHash(SubPlanState *node)
}
/*
- * We are probably in a short-lived expression-evaluation context.
- * Switch to the child plan's per-query context for calling
- * ExecProcNode.
+ * We are probably in a short-lived expression-evaluation context. Switch
+ * to the child plan's per-query context for calling ExecProcNode.
*/
oldcontext = MemoryContextSwitchTo(node->sub_estate->es_query_cxt);
@@ -544,9 +539,8 @@ buildSubPlanHash(SubPlanState *node)
ExecReScan(planstate, NULL);
/*
- * Scan the subplan and load the hash table(s). Note that when there
- * are duplicate rows coming out of the sub-select, only one copy is
- * stored.
+ * Scan the subplan and load the hash table(s). Note that when there are
+ * duplicate rows coming out of the sub-select, only one copy is stored.
*/
for (slot = ExecProcNode(planstate);
!TupIsNull(slot);
@@ -557,8 +551,8 @@ buildSubPlanHash(SubPlanState *node)
bool isnew;
/*
- * Load up the Params representing the raw sub-select outputs,
- * then form the projection tuple to store in the hashtable.
+ * Load up the Params representing the raw sub-select outputs, then
+ * form the projection tuple to store in the hashtable.
*/
foreach(plst, subplan->paramIds)
{
@@ -588,18 +582,18 @@ buildSubPlanHash(SubPlanState *node)
}
/*
- * Reset innerecontext after each inner tuple to free any memory
- * used in hash computation or comparison routines.
+ * Reset innerecontext after each inner tuple to free any memory used
+ * in hash computation or comparison routines.
*/
ResetExprContext(innerecontext);
}
/*
- * Since the projected tuples are in the sub-query's context and not
- * the main context, we'd better clear the tuple slot before there's
- * any chance of a reset of the sub-query's context. Else we will
- * have the potential for a double free attempt. (XXX possibly
- * no longer needed, but can't hurt.)
+ * Since the projected tuples are in the sub-query's context and not the
+ * main context, we'd better clear the tuple slot before there's any
+ * chance of a reset of the sub-query's context. Else we will have the
+ * potential for a double free attempt. (XXX possibly no longer needed,
+ * but can't hurt.)
*/
ExecClearTuple(node->projRight->pi_slot);
@@ -710,10 +704,10 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
/*
* create an EState for the subplan
*
- * The subquery needs its own EState because it has its own rangetable.
- * It shares our Param ID space, however. XXX if rangetable access
- * were done differently, the subquery could share our EState, which
- * would eliminate some thrashing about in this module...
+ * The subquery needs its own EState because it has its own rangetable. It
+ * shares our Param ID space, however. XXX if rangetable access were done
+ * differently, the subquery could share our EState, which would eliminate
+ * some thrashing about in this module...
*/
sp_estate = CreateExecutorState();
node->sub_estate = sp_estate;
@@ -739,13 +733,12 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
MemoryContextSwitchTo(oldcontext);
/*
- * If this plan is un-correlated or undirect correlated one and want
- * to set params for parent plan then mark parameters as needing
- * evaluation.
+ * If this plan is un-correlated or undirect correlated one and want to
+ * set params for parent plan then mark parameters as needing evaluation.
*
* Note that in the case of un-correlated subqueries we don't care about
- * setting parent->chgParam here: indices take care about it, for
- * others - it doesn't matter...
+ * setting parent->chgParam here: indices take care about it, for others -
+ * it doesn't matter...
*/
if (subplan->setParam != NIL)
{
@@ -761,8 +754,8 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
}
/*
- * If we are going to hash the subquery output, initialize relevant
- * stuff. (We don't create the hashtable until needed, though.)
+ * If we are going to hash the subquery output, initialize relevant stuff.
+ * (We don't create the hashtable until needed, though.)
*/
if (subplan->useHashTable)
{
@@ -794,18 +787,17 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
/*
* We use ExecProject to evaluate the lefthand and righthand
- * expression lists and form tuples. (You might think that we
- * could use the sub-select's output tuples directly, but that is
- * not the case if we had to insert any run-time coercions of the
- * sub-select's output datatypes; anyway this avoids storing any
- * resjunk columns that might be in the sub-select's output.) Run
- * through the combining expressions to build tlists for the
- * lefthand and righthand sides. We need both the ExprState list
- * (for ExecProject) and the underlying parse Exprs (for
- * ExecTypeFromTL).
+ * expression lists and form tuples. (You might think that we could
+ * use the sub-select's output tuples directly, but that is not the
+ * case if we had to insert any run-time coercions of the sub-select's
+ * output datatypes; anyway this avoids storing any resjunk columns
+ * that might be in the sub-select's output.) Run through the
+ * combining expressions to build tlists for the lefthand and
+ * righthand sides. We need both the ExprState list (for ExecProject)
+ * and the underlying parse Exprs (for ExecTypeFromTL).
*
- * We also extract the combining operators themselves to initialize
- * the equality and hashing functions for the hash tables.
+ * We also extract the combining operators themselves to initialize the
+ * equality and hashing functions for the hash tables.
*/
lefttlist = righttlist = NIL;
leftptlist = rightptlist = NIL;
@@ -869,21 +861,21 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
}
/*
- * Create a tupletable to hold these tuples. (Note: we never
- * bother to free the tupletable explicitly; that's okay because
- * it will never store raw disk tuples that might have associated
- * buffer pins. The only resource involved is memory, which will
- * be cleaned up by freeing the query context.)
+ * Create a tupletable to hold these tuples. (Note: we never bother
+ * to free the tupletable explicitly; that's okay because it will
+ * never store raw disk tuples that might have associated buffer pins.
+ * The only resource involved is memory, which will be cleaned up by
+ * freeing the query context.)
*/
tupTable = ExecCreateTupleTable(2);
/*
- * Construct tupdescs, slots and projection nodes for left and
- * right sides. The lefthand expressions will be evaluated in the
- * parent plan node's exprcontext, which we don't have access to
- * here. Fortunately we can just pass NULL for now and fill it in
- * later (hack alert!). The righthand expressions will be
- * evaluated in our own innerecontext.
+ * Construct tupdescs, slots and projection nodes for left and right
+ * sides. The lefthand expressions will be evaluated in the parent
+ * plan node's exprcontext, which we don't have access to here.
+ * Fortunately we can just pass NULL for now and fill it in later
+ * (hack alert!). The righthand expressions will be evaluated in our
+ * own innerecontext.
*/
tupDesc = ExecTypeFromTL(leftptlist, false);
slot = ExecAllocTableSlot(tupTable);
@@ -983,11 +975,10 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
found = true;
/*
- * We need to copy the subplan's tuple into our own context, in
- * case any of the params are pass-by-ref type --- the pointers
- * stored in the param structs will point at this copied tuple!
- * node->curTuple keeps track of the copied tuple for eventual
- * freeing.
+ * We need to copy the subplan's tuple into our own context, in case
+ * any of the params are pass-by-ref type --- the pointers stored in
+ * the param structs will point at this copied tuple! node->curTuple
+ * keeps track of the copied tuple for eventual freeing.
*/
MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
if (node->curTuple)
diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c
index 90e59f90f4d..9b1bd251435 100644
--- a/src/backend/executor/nodeSubqueryscan.c
+++ b/src/backend/executor/nodeSubqueryscan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.26 2005/05/22 22:30:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.27 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -62,13 +62,13 @@ SubqueryNext(SubqueryScanState *node)
direction = estate->es_direction;
/*
- * We need not support EvalPlanQual here, since we are not scanning a
- * real relation.
+ * We need not support EvalPlanQual here, since we are not scanning a real
+ * relation.
*/
/*
- * Get the next tuple from the sub-query. We have to be careful to
- * run it in its appropriate memory context.
+ * Get the next tuple from the sub-query. We have to be careful to run it
+ * in its appropriate memory context.
*/
node->sss_SubEState->es_direction = direction;
@@ -170,11 +170,10 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate)
ExecCheckRTPerms(rte->subquery->rtable);
/*
- * The subquery needs its own EState because it has its own
- * rangetable. It shares our Param ID space, however. XXX if
- * rangetable access were done differently, the subquery could share
- * our EState, which would eliminate some thrashing about in this
- * module...
+ * The subquery needs its own EState because it has its own rangetable. It
+ * shares our Param ID space, however. XXX if rangetable access were done
+ * differently, the subquery could share our EState, which would eliminate
+ * some thrashing about in this module...
*/
sp_estate = CreateExecutorState();
subquerystate->sss_SubEState = sp_estate;
@@ -246,7 +245,7 @@ ExecEndSubqueryScan(SubqueryScanState *node)
* clean out the upper tuple table
*/
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
- node->ss.ss_ScanTupleSlot = NULL; /* not ours to clear */
+ node->ss.ss_ScanTupleSlot = NULL; /* not ours to clear */
/*
* close down subquery
@@ -278,9 +277,8 @@ ExecSubqueryReScan(SubqueryScanState *node, ExprContext *exprCtxt)
/*
* ExecReScan doesn't know about my subplan, so I have to do
- * changed-parameter signaling myself. This is just as well, because
- * the subplan has its own memory context in which its chgParam state
- * lives.
+ * changed-parameter signaling myself. This is just as well, because the
+ * subplan has its own memory context in which its chgParam state lives.
*/
if (node->ss.ps.chgParam != NULL)
UpdateChangedParamSet(node->subplan, node->ss.ps.chgParam);
diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c
index 4cc1e4df148..c8708f58311 100644
--- a/src/backend/executor/nodeTidscan.c
+++ b/src/backend/executor/nodeTidscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.42 2005/09/22 15:09:51 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.43 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -126,8 +126,8 @@ TidNext(TidScanState *node)
return slot; /* return empty slot */
/*
- * XXX shouldn't we check here to make sure tuple matches TID
- * list? In runtime-key case this is not certain, is it?
+ * XXX shouldn't we check here to make sure tuple matches TID list? In
+ * runtime-key case this is not certain, is it?
*/
ExecStoreTuple(estate->es_evTuple[scanrelid - 1],
@@ -150,9 +150,9 @@ TidNext(TidScanState *node)
tuple = &(node->tss_htup);
/*
- * ok, now that we have what we need, fetch an tid tuple. if scanning
- * this tid succeeded then return the appropriate heap tuple.. else
- * return NULL.
+ * ok, now that we have what we need, fetch an tid tuple. if scanning this
+ * tid succeeded then return the appropriate heap tuple.. else return
+ * NULL.
*/
bBackward = ScanDirectionIsBackward(direction);
if (bBackward)
@@ -184,10 +184,10 @@ TidNext(TidScanState *node)
/*
* store the scanned tuple in the scan tuple slot of the scan
- * state. Eventually we will only do this and not return a
- * tuple. Note: we pass 'false' because tuples returned by
- * amgetnext are pointers onto disk pages and were not created
- * with palloc() and so should not be pfree()'d.
+ * state. Eventually we will only do this and not return a tuple.
+ * Note: we pass 'false' because tuples returned by amgetnext are
+ * pointers onto disk pages and were not created with palloc() and
+ * so should not be pfree()'d.
*/
ExecStoreTuple(tuple, /* tuple to store */
slot, /* slot to store in */
@@ -196,8 +196,7 @@ TidNext(TidScanState *node)
/*
* At this point we have an extra pin on the buffer, because
- * ExecStoreTuple incremented the pin count. Drop our local
- * pin.
+ * ExecStoreTuple incremented the pin count. Drop our local pin.
*/
ReleaseBuffer(buffer);
@@ -229,8 +228,8 @@ TidNext(TidScanState *node)
}
/*
- * if we get here it means the tid scan failed so we are at the end of
- * the scan..
+ * if we get here it means the tid scan failed so we are at the end of the
+ * scan..
*/
return ExecClearTuple(slot);
}
@@ -420,8 +419,8 @@ ExecInitTidScan(TidScan *node, EState *estate)
tidstate->tss_TidPtr = -1;
/*
- * get the range table and direction information from the execution
- * state (these are needed to open the relations).
+ * get the range table and direction information from the execution state
+ * (these are needed to open the relations).
*/
rangeTable = estate->es_range_table;
diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c
index b00a572e14c..ab3879d7cc6 100644
--- a/src/backend/executor/nodeUnique.c
+++ b/src/backend/executor/nodeUnique.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.47 2005/05/06 17:24:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.48 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -56,10 +56,10 @@ ExecUnique(UniqueState *node)
* now loop, returning only non-duplicate tuples. We assume that the
* tuples arrive in sorted order so we can detect duplicates easily.
*
- * We return the first tuple from each group of duplicates (or the last
- * tuple of each group, when moving backwards). At either end of the
- * subplan, clear the result slot so that we correctly return the
- * first/last tuple when reversing direction.
+ * We return the first tuple from each group of duplicates (or the last tuple
+ * of each group, when moving backwards). At either end of the subplan,
+ * clear the result slot so that we correctly return the first/last tuple
+ * when reversing direction.
*/
for (;;)
{
@@ -81,9 +81,9 @@ ExecUnique(UniqueState *node)
break;
/*
- * Else test if the new tuple and the previously returned tuple
- * match. If so then we loop back and fetch another new tuple
- * from the subplan.
+ * Else test if the new tuple and the previously returned tuple match.
+ * If so then we loop back and fetch another new tuple from the
+ * subplan.
*/
if (!execTuplesMatch(slot, resultTupleSlot,
plannode->numCols, plannode->uniqColIdx,
@@ -93,10 +93,10 @@ ExecUnique(UniqueState *node)
}
/*
- * We have a new tuple different from the previous saved tuple (if
- * any). Save it and return it. We must copy it because the source
- * subplan won't guarantee that this source tuple is still accessible
- * after fetching the next source tuple.
+ * We have a new tuple different from the previous saved tuple (if any).
+ * Save it and return it. We must copy it because the source subplan
+ * won't guarantee that this source tuple is still accessible after
+ * fetching the next source tuple.
*/
return ExecCopySlot(resultTupleSlot, slot);
}
@@ -123,9 +123,9 @@ ExecInitUnique(Unique *node, EState *estate)
/*
* Miscellaneous initialization
*
- * Unique nodes have no ExprContext initialization because they never
- * call ExecQual or ExecProject. But they do need a per-tuple memory
- * context anyway for calling execTuplesMatch.
+ * Unique nodes have no ExprContext initialization because they never call
+ * ExecQual or ExecProject. But they do need a per-tuple memory context
+ * anyway for calling execTuplesMatch.
*/
uniquestate->tempContext =
AllocSetContextCreate(CurrentMemoryContext,
@@ -147,8 +147,8 @@ ExecInitUnique(Unique *node, EState *estate)
outerPlanState(uniquestate) = ExecInitNode(outerPlan(node), estate);
/*
- * unique nodes do no projections, so initialize projection info for
- * this node appropriately
+ * unique nodes do no projections, so initialize projection info for this
+ * node appropriately
*/
ExecAssignResultTypeFromOuterPlan(&uniquestate->ps);
uniquestate->ps.ps_ProjInfo = NULL;
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index ff1b8932ea1..c4aef41a8d0 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.142 2005/10/01 18:43:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.143 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -38,11 +38,11 @@ static int _SPI_curid = -1;
static void _SPI_prepare_plan(const char *src, _SPI_plan *plan);
static int _SPI_execute_plan(_SPI_plan *plan,
- Datum *Values, const char *Nulls,
- Snapshot snapshot, Snapshot crosscheck_snapshot,
- bool read_only, long tcount);
+ Datum *Values, const char *Nulls,
+ Snapshot snapshot, Snapshot crosscheck_snapshot,
+ bool read_only, long tcount);
-static int _SPI_pquery(QueryDesc *queryDesc, long tcount);
+static int _SPI_pquery(QueryDesc *queryDesc, long tcount);
static void _SPI_error_callback(void *arg);
@@ -66,8 +66,8 @@ SPI_connect(void)
int newdepth;
/*
- * When procedure called by Executor _SPI_curid expected to be equal
- * to _SPI_connected
+ * When procedure called by Executor _SPI_curid expected to be equal to
+ * _SPI_connected
*/
if (_SPI_curid != _SPI_connected)
return SPI_ERROR_CONNECT;
@@ -106,28 +106,28 @@ SPI_connect(void)
_SPI_current->processed = 0;
_SPI_current->lastoid = InvalidOid;
_SPI_current->tuptable = NULL;
- _SPI_current->procCxt = NULL; /* in case we fail to create 'em */
+ _SPI_current->procCxt = NULL; /* in case we fail to create 'em */
_SPI_current->execCxt = NULL;
_SPI_current->connectSubid = GetCurrentSubTransactionId();
/*
* Create memory contexts for this procedure
*
- * XXX it would be better to use PortalContext as the parent context, but
- * we may not be inside a portal (consider deferred-trigger
- * execution). Perhaps CurTransactionContext would do? For now it
- * doesn't matter because we clean up explicitly in AtEOSubXact_SPI().
+ * XXX it would be better to use PortalContext as the parent context, but we
+ * may not be inside a portal (consider deferred-trigger execution).
+ * Perhaps CurTransactionContext would do? For now it doesn't matter
+ * because we clean up explicitly in AtEOSubXact_SPI().
*/
_SPI_current->procCxt = AllocSetContextCreate(TopTransactionContext,
"SPI Proc",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
_SPI_current->execCxt = AllocSetContextCreate(TopTransactionContext,
"SPI Exec",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
/* ... and switch to procedure's context */
_SPI_current->savedcxt = MemoryContextSwitchTo(_SPI_current->procCxt);
@@ -161,9 +161,9 @@ SPI_finish(void)
SPI_tuptable = NULL;
/*
- * After _SPI_begin_call _SPI_connected == _SPI_curid. Now we are
- * closing connection to SPI and returning to upper Executor and so
- * _SPI_connected must be equal to _SPI_curid.
+ * After _SPI_begin_call _SPI_connected == _SPI_curid. Now we are closing
+ * connection to SPI and returning to upper Executor and so _SPI_connected
+ * must be equal to _SPI_curid.
*/
_SPI_connected--;
_SPI_curid--;
@@ -182,9 +182,9 @@ void
AtEOXact_SPI(bool isCommit)
{
/*
- * Note that memory contexts belonging to SPI stack entries will be
- * freed automatically, so we can ignore them here. We just need to
- * restore our static variables to initial state.
+ * Note that memory contexts belonging to SPI stack entries will be freed
+ * automatically, so we can ignore them here. We just need to restore our
+ * static variables to initial state.
*/
if (isCommit && _SPI_connected != -1)
ereport(WARNING,
@@ -236,8 +236,8 @@ AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid)
/*
* Pop the stack entry and reset global variables. Unlike
- * SPI_finish(), we don't risk switching to memory contexts that
- * might be already gone.
+ * SPI_finish(), we don't risk switching to memory contexts that might
+ * be already gone.
*/
_SPI_connected--;
_SPI_curid = _SPI_connected;
@@ -560,8 +560,8 @@ SPI_modifytuple(Relation rel, HeapTuple tuple, int natts, int *attnum,
mtuple = heap_formtuple(rel->rd_att, v, n);
/*
- * copy the identification info of the old tuple: t_ctid, t_self,
- * and OID (if any)
+ * copy the identification info of the old tuple: t_ctid, t_self, and
+ * OID (if any)
*/
mtuple->t_data->t_ctid = tuple->t_data->t_ctid;
mtuple->t_self = tuple->t_self;
@@ -658,8 +658,8 @@ SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber)
getTypeOutputInfo(typoid, &foutoid, &typisvarlena);
/*
- * If we have a toasted datum, forcibly detoast it here to avoid
- * memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid memory
+ * leakage inside the type's output routine.
*/
if (typisvarlena)
val = PointerGetDatum(PG_DETOAST_DATUM(origval));
@@ -755,7 +755,7 @@ SPI_getrelname(Relation rel)
char *
SPI_getnspname(Relation rel)
{
- return get_namespace_name(RelationGetNamespace(rel));
+ return get_namespace_name(RelationGetNamespace(rel));
}
void *
@@ -939,8 +939,8 @@ SPI_cursor_open(const char *name, void *plan,
portal->cursorOptions |= CURSOR_OPT_NO_SCROLL;
/*
- * Set up the snapshot to use. (PortalStart will do CopySnapshot,
- * so we skip that here.)
+ * Set up the snapshot to use. (PortalStart will do CopySnapshot, so we
+ * skip that here.)
*/
if (read_only)
snapshot = ActiveSnapshot;
@@ -1214,7 +1214,7 @@ spi_printtup(TupleTableSlot *slot, DestReceiver *self)
tuptable->free = 256;
tuptable->alloced += tuptable->free;
tuptable->vals = (HeapTuple *) repalloc(tuptable->vals,
- tuptable->alloced * sizeof(HeapTuple));
+ tuptable->alloced * sizeof(HeapTuple));
}
tuptable->vals[tuptable->alloced - tuptable->free] =
@@ -1247,9 +1247,9 @@ _SPI_prepare_plan(const char *src, _SPI_plan *plan)
int nargs = plan->nargs;
/*
- * Increment CommandCounter to see changes made by now. We must do
- * this to be sure of seeing any schema changes made by a just-preceding
- * SPI command. (But we don't bother advancing the snapshot, since the
+ * Increment CommandCounter to see changes made by now. We must do this
+ * to be sure of seeing any schema changes made by a just-preceding SPI
+ * command. (But we don't bother advancing the snapshot, since the
* planner generally operates under SnapshotNow rules anyway.)
*/
CommandCounterIncrement();
@@ -1270,9 +1270,9 @@ _SPI_prepare_plan(const char *src, _SPI_plan *plan)
/*
* Do parse analysis and rule rewrite for each raw parsetree.
*
- * We save the querytrees from each raw parsetree as a separate
- * sublist. This allows _SPI_execute_plan() to know where the
- * boundaries between original queries fall.
+ * We save the querytrees from each raw parsetree as a separate sublist.
+ * This allows _SPI_execute_plan() to know where the boundaries between
+ * original queries fall.
*/
query_list_list = NIL;
plan_list = NIL;
@@ -1316,7 +1316,7 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls,
volatile int res = 0;
volatile uint32 my_processed = 0;
volatile Oid my_lastoid = InvalidOid;
- SPITupleTable * volatile my_tuptable = NULL;
+ SPITupleTable *volatile my_tuptable = NULL;
Snapshot saveActiveSnapshot;
/* Be sure to restore ActiveSnapshot on error exit */
@@ -1407,9 +1407,10 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls,
if (read_only && !QueryIsReadOnly(queryTree))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL statement name */
- errmsg("%s is not allowed in a non-volatile function",
- CreateQueryTag(queryTree))));
+ /* translator: %s is a SQL statement name */
+ errmsg("%s is not allowed in a non-volatile function",
+ CreateQueryTag(queryTree))));
+
/*
* If not read-only mode, advance the command counter before
* each command.
@@ -1462,6 +1463,7 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls,
}
FreeSnapshot(ActiveSnapshot);
ActiveSnapshot = NULL;
+
/*
* The last canSetTag query sets the auxiliary values returned
* to the caller. Be careful to free any tuptables not
@@ -1520,10 +1522,10 @@ _SPI_pquery(QueryDesc *queryDesc, long tcount)
{
case CMD_SELECT:
res = SPI_OK_SELECT;
- if (queryDesc->parsetree->into) /* select into table? */
+ if (queryDesc->parsetree->into) /* select into table? */
{
res = SPI_OK_SELINTO;
- queryDesc->dest = None_Receiver; /* don't output results */
+ queryDesc->dest = None_Receiver; /* don't output results */
}
else if (queryDesc->dest->mydest != SPI)
{
@@ -1589,8 +1591,8 @@ _SPI_error_callback(void *arg)
int syntaxerrposition;
/*
- * If there is a syntax error position, convert to internal syntax
- * error; otherwise treat the query as an item of context stack
+ * If there is a syntax error position, convert to internal syntax error;
+ * otherwise treat the query as an item of context stack
*/
syntaxerrposition = geterrposition();
if (syntaxerrposition > 0)
@@ -1635,13 +1637,12 @@ _SPI_cursor_operation(Portal portal, bool forward, long count,
dest);
/*
- * Think not to combine this store with the preceding function call.
- * If the portal contains calls to functions that use SPI, then
- * SPI_stack is likely to move around while the portal runs. When
- * control returns, _SPI_current will point to the correct stack
- * entry... but the pointer may be different than it was beforehand.
- * So we must be sure to re-fetch the pointer after the function call
- * completes.
+ * Think not to combine this store with the preceding function call. If
+ * the portal contains calls to functions that use SPI, then SPI_stack is
+ * likely to move around while the portal runs. When control returns,
+ * _SPI_current will point to the correct stack entry... but the pointer
+ * may be different than it was beforehand. So we must be sure to re-fetch
+ * the pointer after the function call completes.
*/
_SPI_current->processed = nfetched;
@@ -1738,12 +1739,13 @@ _SPI_copy_plan(_SPI_plan *plan, int location)
parentcxt = _SPI_current->procCxt;
else if (location == _SPI_CPLAN_TOPCXT)
parentcxt = TopMemoryContext;
- else /* (this case not currently used) */
+ else
+ /* (this case not currently used) */
parentcxt = CurrentMemoryContext;
/*
- * Create a memory context for the plan. We don't expect the plan to
- * be very large, so use smaller-than-default alloc parameters.
+ * Create a memory context for the plan. We don't expect the plan to be
+ * very large, so use smaller-than-default alloc parameters.
*/
plancxt = AllocSetContextCreate(parentcxt,
"SPI Plan",
diff --git a/src/backend/lib/dllist.c b/src/backend/lib/dllist.c
index 59708bee78c..95802a31eec 100644
--- a/src/backend/lib/dllist.c
+++ b/src/backend/lib/dllist.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/lib/dllist.c,v 1.31 2005/01/18 22:59:32 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/lib/dllist.c,v 1.32 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -148,7 +148,7 @@ DLAddHead(Dllist *l, Dlelem *e)
e->dle_prev = NULL;
l->dll_head = e;
- if (l->dll_tail == NULL) /* if this is first element added */
+ if (l->dll_tail == NULL) /* if this is first element added */
l->dll_tail = e;
}
@@ -163,7 +163,7 @@ DLAddTail(Dllist *l, Dlelem *e)
e->dle_next = NULL;
l->dll_tail = e;
- if (l->dll_head == NULL) /* if this is first element added */
+ if (l->dll_head == NULL) /* if this is first element added */
l->dll_head = e;
}
diff --git a/src/backend/lib/stringinfo.c b/src/backend/lib/stringinfo.c
index 49d509e57b7..53e4913b4a8 100644
--- a/src/backend/lib/stringinfo.c
+++ b/src/backend/lib/stringinfo.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/lib/stringinfo.c,v 1.41 2004/12/31 21:59:48 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/lib/stringinfo.c,v 1.42 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -106,8 +106,8 @@ appendStringInfoVA(StringInfo str, const char *fmt, va_list args)
Assert(str != NULL);
/*
- * If there's hardly any space, don't bother trying, just fail to make
- * the caller enlarge the buffer first.
+ * If there's hardly any space, don't bother trying, just fail to make the
+ * caller enlarge the buffer first.
*/
avail = str->maxlen - str->len - 1;
if (avail < 16)
@@ -115,8 +115,8 @@ appendStringInfoVA(StringInfo str, const char *fmt, va_list args)
/*
* Assert check here is to catch buggy vsnprintf that overruns the
- * specified buffer length. Solaris 7 in 64-bit mode is an example of
- * a platform with such a bug.
+ * specified buffer length. Solaris 7 in 64-bit mode is an example of a
+ * platform with such a bug.
*/
#ifdef USE_ASSERT_CHECKING
str->data[str->maxlen - 1] = '\0';
@@ -127,9 +127,9 @@ appendStringInfoVA(StringInfo str, const char *fmt, va_list args)
Assert(str->data[str->maxlen - 1] == '\0');
/*
- * Note: some versions of vsnprintf return the number of chars
- * actually stored, but at least one returns -1 on failure. Be
- * conservative about believing whether the print worked.
+ * Note: some versions of vsnprintf return the number of chars actually
+ * stored, but at least one returns -1 on failure. Be conservative about
+ * believing whether the print worked.
*/
if (nprinted >= 0 && nprinted < avail - 1)
{
@@ -193,8 +193,8 @@ appendBinaryStringInfo(StringInfo str, const char *data, int datalen)
str->len += datalen;
/*
- * Keep a trailing null in place, even though it's probably useless
- * for binary data...
+ * Keep a trailing null in place, even though it's probably useless for
+ * binary data...
*/
str->data[str->len] = '\0';
}
@@ -222,9 +222,9 @@ enlargeStringInfo(StringInfo str, int needed)
int newlen;
/*
- * Guard against ridiculous "needed" values, which can occur if we're
- * fed bogus data. Without this, we can get an overflow or infinite
- * loop in the following.
+ * Guard against ridiculous "needed" values, which can occur if we're fed
+ * bogus data. Without this, we can get an overflow or infinite loop in
+ * the following.
*/
if (needed < 0 ||
((Size) needed) >= (MaxAllocSize - (Size) str->len))
@@ -239,19 +239,18 @@ enlargeStringInfo(StringInfo str, int needed)
return; /* got enough space already */
/*
- * We don't want to allocate just a little more space with each
- * append; for efficiency, double the buffer size each time it
- * overflows. Actually, we might need to more than double it if
- * 'needed' is big...
+ * We don't want to allocate just a little more space with each append;
+ * for efficiency, double the buffer size each time it overflows.
+ * Actually, we might need to more than double it if 'needed' is big...
*/
newlen = 2 * str->maxlen;
while (needed > newlen)
newlen = 2 * newlen;
/*
- * Clamp to MaxAllocSize in case we went past it. Note we are
- * assuming here that MaxAllocSize <= INT_MAX/2, else the above loop
- * could overflow. We will still have newlen >= needed.
+ * Clamp to MaxAllocSize in case we went past it. Note we are assuming
+ * here that MaxAllocSize <= INT_MAX/2, else the above loop could
+ * overflow. We will still have newlen >= needed.
*/
if (newlen > (int) MaxAllocSize)
newlen = (int) MaxAllocSize;
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 3bdddf86da4..240a02f0e93 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.129 2005/10/13 22:55:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.130 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,7 +41,7 @@ static char *recv_password_packet(Port *port);
static int recv_and_check_password_packet(Port *port);
char *pg_krb_server_keyfile;
-char *pg_krb_srvnam;
+char *pg_krb_srvnam;
bool pg_krb_caseins_users;
char *pg_krb_server_hostname = NULL;
@@ -65,8 +65,8 @@ static struct pam_conv pam_passw_conv = {
};
static char *pam_passwd = NULL; /* Workaround for Solaris 2.6 brokenness */
-static Port *pam_port_cludge; /* Workaround for passing "Port *port"
- * into pam_passwd_conv_proc */
+static Port *pam_port_cludge; /* Workaround for passing "Port *port" into
+ * pam_passwd_conv_proc */
#endif /* USE_PAM */
#ifdef KRB5
@@ -119,7 +119,7 @@ static int
pg_krb5_init(void)
{
krb5_error_code retval;
- char *khostname;
+ char *khostname;
if (pg_krb5_initialised)
return STATUS_OK;
@@ -147,8 +147,8 @@ pg_krb5_init(void)
}
/*
- * If no hostname was specified, pg_krb_server_hostname is already
- * NULL. If it's set to blank, force it to NULL.
+ * If no hostname was specified, pg_krb_server_hostname is already NULL.
+ * If it's set to blank, force it to NULL.
*/
khostname = pg_krb_server_hostname;
if (khostname && khostname[0] == '\0')
@@ -163,9 +163,9 @@ pg_krb5_init(void)
{
ereport(LOG,
(errmsg("Kerberos sname_to_principal(\"%s\", \"%s\") returned error %d",
- khostname ? khostname : "localhost", pg_krb_srvnam, retval)));
+ khostname ? khostname : "localhost", pg_krb_srvnam, retval)));
com_err("postgres", retval,
- "while getting server principal for server \"%s\" for service \"%s\"",
+ "while getting server principal for server \"%s\" for service \"%s\"",
khostname ? khostname : "localhost", pg_krb_srvnam);
krb5_kt_close(pg_krb5_context, pg_krb5_keytab);
krb5_free_context(pg_krb5_context);
@@ -260,7 +260,6 @@ pg_krb5_recvauth(Port *port)
return ret;
}
-
#else
static int
@@ -293,13 +292,13 @@ auth_failed(Port *port, int status)
/*
* If we failed due to EOF from client, just quit; there's no point in
- * trying to send a message to the client, and not much point in
- * logging the failure in the postmaster log. (Logging the failure
- * might be desirable, were it not for the fact that libpq closes the
- * connection unceremoniously if challenged for a password when it
- * hasn't got one to send. We'll get a useless log entry for every
- * psql connection under password auth, even if it's perfectly
- * successful, if we log STATUS_EOF events.)
+ * trying to send a message to the client, and not much point in logging
+ * the failure in the postmaster log. (Logging the failure might be
+ * desirable, were it not for the fact that libpq closes the connection
+ * unceremoniously if challenged for a password when it hasn't got one to
+ * send. We'll get a useless log entry for every psql connection under
+ * password auth, even if it's perfectly successful, if we log STATUS_EOF
+ * events.)
*/
if (status == STATUS_EOF)
proc_exit(0);
@@ -351,9 +350,9 @@ ClientAuthentication(Port *port)
/*
* Get the authentication method to use for this frontend/database
- * combination. Note: a failure return indicates a problem with the
- * hba config file, not with the request. hba.c should have dropped
- * an error message into the postmaster logfile if it failed.
+ * combination. Note: a failure return indicates a problem with the hba
+ * config file, not with the request. hba.c should have dropped an error
+ * message into the postmaster logfile if it failed.
*/
if (hba_getauthmethod(port) != STATUS_OK)
ereport(FATAL,
@@ -368,11 +367,11 @@ ClientAuthentication(Port *port)
/*
* This could have come from an explicit "reject" entry in
* pg_hba.conf, but more likely it means there was no matching
- * entry. Take pity on the poor user and issue a helpful
- * error message. NOTE: this is not a security breach,
- * because all the info reported here is known at the frontend
- * and must be assumed known to bad guys. We're merely helping
- * out the less clueful good guys.
+ * entry. Take pity on the poor user and issue a helpful error
+ * message. NOTE: this is not a security breach, because all the
+ * info reported here is known at the frontend and must be assumed
+ * known to bad guys. We're merely helping out the less clueful
+ * good guys.
*/
{
char hostinfo[NI_MAXHOST];
@@ -384,14 +383,14 @@ ClientAuthentication(Port *port)
#ifdef USE_SSL
ereport(FATAL,
- (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s",
- hostinfo, port->user_name, port->database_name,
- port->ssl ? _("SSL on") : _("SSL off"))));
+ (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
+ errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s",
+ hostinfo, port->user_name, port->database_name,
+ port->ssl ? _("SSL on") : _("SSL off"))));
#else
ereport(FATAL,
- (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"",
+ (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
+ errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"",
hostinfo, port->user_name, port->database_name)));
#endif
break;
@@ -425,7 +424,7 @@ ClientAuthentication(Port *port)
if (setsockopt(port->sock, 0, LOCAL_CREDS, &on, sizeof(on)) < 0)
ereport(FATAL,
(errcode_for_socket_access(),
- errmsg("could not enable credential reception: %m")));
+ errmsg("could not enable credential reception: %m")));
#endif
sendAuthRequest(port, AUTH_REQ_SCM_CREDS);
@@ -488,8 +487,8 @@ sendAuthRequest(Port *port, AuthRequest areq)
pq_endmessage(&buf);
/*
- * Flush message so client will see it, except for AUTH_REQ_OK, which
- * need not be sent until we are ready for queries.
+ * Flush message so client will see it, except for AUTH_REQ_OK, which need
+ * not be sent until we are ready for queries.
*/
if (areq != AUTH_REQ_OK)
pq_flush();
@@ -526,15 +525,15 @@ pam_passwd_conv_proc(int num_msg, const struct pam_message ** msg,
if (!appdata_ptr)
{
/*
- * Workaround for Solaris 2.6 where the PAM library is broken and
- * does not pass appdata_ptr to the conversation routine
+ * Workaround for Solaris 2.6 where the PAM library is broken and does
+ * not pass appdata_ptr to the conversation routine
*/
appdata_ptr = pam_passwd;
}
/*
- * Password wasn't passed to PAM the first time around - let's go ask
- * the client to send a password, which we then stuff into PAM.
+ * Password wasn't passed to PAM the first time around - let's go ask the
+ * client to send a password, which we then stuff into PAM.
*/
if (strlen(appdata_ptr) == 0)
{
@@ -695,15 +694,15 @@ recv_password_packet(Port *port)
{
/*
* If the client just disconnects without offering a password,
- * don't make a log entry. This is legal per protocol spec
- * and in fact commonly done by psql, so complaining just
- * clutters the log.
+ * don't make a log entry. This is legal per protocol spec and in
+ * fact commonly done by psql, so complaining just clutters the
+ * log.
*/
if (mtype != EOF)
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("expected password response, got message type %d",
- mtype)));
+ errmsg("expected password response, got message type %d",
+ mtype)));
return NULL; /* EOF or bad message type */
}
}
@@ -723,8 +722,8 @@ recv_password_packet(Port *port)
}
/*
- * Apply sanity check: password packet length should agree with length
- * of contained string. Note it is safe to use strlen here because
+ * Apply sanity check: password packet length should agree with length of
+ * contained string. Note it is safe to use strlen here because
* StringInfo is guaranteed to have an appended '\0'.
*/
if (strlen(buf.data) + 1 != buf.len)
@@ -738,8 +737,8 @@ recv_password_packet(Port *port)
/*
* Return the received string. Note we do not attempt to do any
- * character-set conversion on it; since we don't yet know the
- * client's encoding, there wouldn't be much point.
+ * character-set conversion on it; since we don't yet know the client's
+ * encoding, there wouldn't be much point.
*/
return buf.data;
}
diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c
index 016884e425f..139f8946dd8 100644
--- a/src/backend/libpq/be-fsstubs.c
+++ b/src/backend/libpq/be-fsstubs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/be-fsstubs.c,v 1.78 2005/06/13 02:26:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/be-fsstubs.c,v 1.79 2005/10/15 02:49:17 momjian Exp $
*
* NOTES
* This should be moved to a more appropriate place. It is here
@@ -74,7 +74,7 @@ static MemoryContext fscxt = NULL;
ALLOCSET_DEFAULT_INITSIZE, \
ALLOCSET_DEFAULT_MAXSIZE); \
} while (0)
-
+
static int newLOfd(LargeObjectDesc *lobjCookie);
static void deleteLOfd(int fd);
@@ -198,8 +198,8 @@ lo_write(int fd, char *buf, int len)
if ((cookies[fd]->flags & IFS_WRLOCK) == 0)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("large object descriptor %d was not opened for writing",
- fd)));
+ errmsg("large object descriptor %d was not opened for writing",
+ fd)));
Assert(fscxt != NULL);
currentContext = MemoryContextSwitchTo(fscxt);
@@ -289,9 +289,8 @@ lo_tell(PG_FUNCTION_ARGS)
}
/*
- * We assume we do not need to switch contexts for inv_tell. That is
- * true for now, but is probably more than this module ought to
- * assume...
+ * We assume we do not need to switch contexts for inv_tell. That is true
+ * for now, but is probably more than this module ought to assume...
*/
PG_RETURN_INT32(inv_tell(cookies[fd]));
}
@@ -322,9 +321,9 @@ lo_unlink(PG_FUNCTION_ARGS)
}
/*
- * inv_drop does not need a context switch, indeed it doesn't touch
- * any LO-specific data structures at all. (Again, that's probably
- * more than this module ought to be assuming.)
+ * inv_drop does not need a context switch, indeed it doesn't touch any
+ * LO-specific data structures at all. (Again, that's probably more than
+ * this module ought to be assuming.)
*/
PG_RETURN_INT32(inv_drop(lobjId));
}
@@ -388,13 +387,13 @@ lo_import(PG_FUNCTION_ARGS)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to use server-side lo_import()"),
+ errmsg("must be superuser to use server-side lo_import()"),
errhint("Anyone can use the client-side lo_import() provided by libpq.")));
#endif
/*
- * We don't actually need to switch into fscxt, but create it anyway
- * to ensure that AtEOXact_LargeObject knows there is state to clean up
+ * We don't actually need to switch into fscxt, but create it anyway to
+ * ensure that AtEOXact_LargeObject knows there is state to clean up
*/
CreateFSContext();
@@ -462,13 +461,13 @@ lo_export(PG_FUNCTION_ARGS)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to use server-side lo_export()"),
+ errmsg("must be superuser to use server-side lo_export()"),
errhint("Anyone can use the client-side lo_export() provided by libpq.")));
#endif
/*
- * We don't actually need to switch into fscxt, but create it anyway
- * to ensure that AtEOXact_LargeObject knows there is state to clean up
+ * We don't actually need to switch into fscxt, but create it anyway to
+ * ensure that AtEOXact_LargeObject knows there is state to clean up
*/
CreateFSContext();
@@ -480,9 +479,9 @@ lo_export(PG_FUNCTION_ARGS)
/*
* open the file to be written to
*
- * Note: we reduce backend's normal 077 umask to the slightly friendlier
- * 022. This code used to drop it all the way to 0, but creating
- * world-writable export files doesn't seem wise.
+ * Note: we reduce backend's normal 077 umask to the slightly friendlier 022.
+ * This code used to drop it all the way to 0, but creating world-writable
+ * export files doesn't seem wise.
*/
nbytes = VARSIZE(filename) - VARHDRSZ;
if (nbytes >= MAXPGPATH)
@@ -533,8 +532,8 @@ AtEOXact_LargeObject(bool isCommit)
currentContext = MemoryContextSwitchTo(fscxt);
/*
- * Close LO fds and clear cookies array so that LO fds are no longer
- * good. On abort we skip the close step.
+ * Close LO fds and clear cookies array so that LO fds are no longer good.
+ * On abort we skip the close step.
*/
for (i = 0; i < cookies_size; i++)
{
@@ -587,8 +586,8 @@ AtEOSubXact_LargeObject(bool isCommit, SubTransactionId mySubid,
else
{
/*
- * Make sure we do not call inv_close twice if it errors
- * out for some reason. Better a leak than a crash.
+ * Make sure we do not call inv_close twice if it errors out
+ * for some reason. Better a leak than a crash.
*/
deleteLOfd(i);
inv_close(lo);
diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c
index 01dc4f1af0d..a2404ebd38a 100644
--- a/src/backend/libpq/be-secure.c
+++ b/src/backend/libpq/be-secure.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.58 2005/07/04 04:51:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.59 2005/10/15 02:49:17 momjian Exp $
*
* Since the server static private key ($DataDir/server.key)
* will normally be stored unencrypted so that the database
@@ -103,7 +103,7 @@
#define ROOT_CERT_FILE "root.crt"
#define SERVER_CERT_FILE "server.crt"
-#define SERVER_PRIVATE_KEY_FILE "server.key"
+#define SERVER_PRIVATE_KEY_FILE "server.key"
static DH *load_dh_file(int keylength);
static DH *load_dh_buffer(const char *, size_t);
@@ -276,8 +276,8 @@ rloop:
case SSL_ERROR_WANT_WRITE:
#ifdef WIN32
pgwin32_waitforsinglesocket(SSL_get_fd(port->ssl),
- (err==SSL_ERROR_WANT_READ) ?
- FD_READ|FD_CLOSE : FD_WRITE|FD_CLOSE);
+ (err == SSL_ERROR_WANT_READ) ?
+ FD_READ | FD_CLOSE : FD_WRITE | FD_CLOSE);
#endif
goto rloop;
case SSL_ERROR_SYSCALL:
@@ -353,7 +353,7 @@ secure_write(Port *port, void *ptr, size_t len)
if (port->ssl->state != SSL_ST_OK)
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("SSL failed to send renegotiation request")));
+ errmsg("SSL failed to send renegotiation request")));
port->ssl->state |= SSL_ST_ACCEPT;
SSL_do_handshake(port->ssl);
if (port->ssl->state != SSL_ST_OK)
@@ -375,8 +375,8 @@ wloop:
case SSL_ERROR_WANT_WRITE:
#ifdef WIN32
pgwin32_waitforsinglesocket(SSL_get_fd(port->ssl),
- (err==SSL_ERROR_WANT_READ) ?
- FD_READ|FD_CLOSE : FD_WRITE|FD_CLOSE);
+ (err == SSL_ERROR_WANT_READ) ?
+ FD_READ | FD_CLOSE : FD_WRITE | FD_CLOSE);
#endif
goto wloop;
case SSL_ERROR_SYSCALL:
@@ -439,12 +439,12 @@ wloop:
static bool my_bio_initialized = false;
static BIO_METHOD my_bio_methods;
-static int (*std_sock_read) (BIO *h, char *buf, int size);
+static int (*std_sock_read) (BIO *h, char *buf, int size);
static int
my_sock_read(BIO *h, char *buf, int size)
{
- int res;
+ int res;
prepare_for_client_read();
@@ -472,21 +472,21 @@ my_BIO_s_socket(void)
static int
my_SSL_set_fd(SSL *s, int fd)
{
- int ret=0;
- BIO *bio=NULL;
+ int ret = 0;
+ BIO *bio = NULL;
- bio=BIO_new(my_BIO_s_socket());
+ bio = BIO_new(my_BIO_s_socket());
if (bio == NULL)
{
- SSLerr(SSL_F_SSL_SET_FD,ERR_R_BUF_LIB);
+ SSLerr(SSL_F_SSL_SET_FD, ERR_R_BUF_LIB);
goto err;
}
- BIO_set_fd(bio,fd,BIO_NOCLOSE);
- SSL_set_bio(s,bio,bio);
- ret=1;
+ BIO_set_fd(bio, fd, BIO_NOCLOSE);
+ SSL_set_bio(s, bio, bio);
+ ret = 1;
err:
- return(ret);
+ return (ret);
}
/*
@@ -539,7 +539,7 @@ load_dh_file(int keylength)
(codes & DH_CHECK_P_NOT_SAFE_PRIME))
{
elog(LOG,
- "DH error (%s): neither suitable generator or safe prime",
+ "DH error (%s): neither suitable generator or safe prime",
fnbuf);
return NULL;
}
@@ -640,8 +640,8 @@ tmp_dh_cb(SSL *s, int is_export, int keylength)
if (r == NULL || 8 * DH_size(r) < keylength)
{
ereport(DEBUG2,
- (errmsg_internal("DH: generating parameters (%d bits)....",
- keylength)));
+ (errmsg_internal("DH: generating parameters (%d bits)....",
+ keylength)));
r = DH_generate_parameters(keylength, DH_GENERATOR_2, NULL, NULL);
}
@@ -735,30 +735,30 @@ initialize_SSL(void)
SSL_FILETYPE_PEM))
ereport(FATAL,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("could not load server certificate file \"%s\": %s",
- SERVER_CERT_FILE, SSLerrmessage())));
+ errmsg("could not load server certificate file \"%s\": %s",
+ SERVER_CERT_FILE, SSLerrmessage())));
if (stat(SERVER_PRIVATE_KEY_FILE, &buf) == -1)
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not access private key file \"%s\": %m",
- SERVER_PRIVATE_KEY_FILE)));
+ errmsg("could not access private key file \"%s\": %m",
+ SERVER_PRIVATE_KEY_FILE)));
/*
* Require no public access to key file.
*
- * XXX temporarily suppress check when on Windows, because there may
- * not be proper support for Unix-y file permissions. Need to
- * think of a reasonable check to apply on Windows. (See also the
- * data directory permission check in postmaster.c)
+ * XXX temporarily suppress check when on Windows, because there may not
+ * be proper support for Unix-y file permissions. Need to think of a
+ * reasonable check to apply on Windows. (See also the data directory
+ * permission check in postmaster.c)
*/
#if !defined(WIN32) && !defined(__CYGWIN__)
if (!S_ISREG(buf.st_mode) || (buf.st_mode & (S_IRWXG | S_IRWXO)) ||
buf.st_uid != geteuid())
ereport(FATAL,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("unsafe permissions on private key file \"%s\"",
- SERVER_PRIVATE_KEY_FILE),
+ errmsg("unsafe permissions on private key file \"%s\"",
+ SERVER_PRIVATE_KEY_FILE),
errdetail("File must be owned by the database user and must have no permissions for \"group\" or \"other\".")));
#endif
@@ -861,8 +861,8 @@ aloop:
case SSL_ERROR_WANT_WRITE:
#ifdef WIN32
pgwin32_waitforsinglesocket(SSL_get_fd(port->ssl),
- (err==SSL_ERROR_WANT_READ) ?
- FD_READ|FD_CLOSE|FD_ACCEPT : FD_WRITE|FD_CLOSE);
+ (err == SSL_ERROR_WANT_READ) ?
+ FD_READ | FD_CLOSE | FD_ACCEPT : FD_WRITE | FD_CLOSE);
#endif
goto aloop;
case SSL_ERROR_SYSCALL:
@@ -873,7 +873,7 @@ aloop:
else
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("could not accept SSL connection: EOF detected")));
+ errmsg("could not accept SSL connection: EOF detected")));
break;
case SSL_ERROR_SSL:
ereport(COMMERROR,
@@ -884,7 +884,7 @@ aloop:
case SSL_ERROR_ZERO_RETURN:
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("could not accept SSL connection: EOF detected")));
+ errmsg("could not accept SSL connection: EOF detected")));
break;
default:
ereport(COMMERROR,
@@ -912,7 +912,7 @@ aloop:
port->peer_dn, sizeof(port->peer_dn));
port->peer_dn[sizeof(port->peer_dn) - 1] = '\0';
X509_NAME_get_text_by_NID(X509_get_subject_name(port->peer),
- NID_commonName, port->peer_cn, sizeof(port->peer_cn));
+ NID_commonName, port->peer_cn, sizeof(port->peer_cn));
port->peer_cn[sizeof(port->peer_cn) - 1] = '\0';
}
ereport(DEBUG2,
diff --git a/src/backend/libpq/crypt.c b/src/backend/libpq/crypt.c
index 4e91b1a36f0..b0a17aea53b 100644
--- a/src/backend/libpq/crypt.c
+++ b/src/backend/libpq/crypt.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/libpq/crypt.c,v 1.65 2005/08/15 02:40:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/crypt.c,v 1.66 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -148,9 +148,9 @@ md5_crypt_verify(const Port *port, const char *role, char *client_pass)
TimestampTz vuntil;
vuntil = DatumGetTimestampTz(DirectFunctionCall3(timestamptz_in,
- CStringGetDatum(valuntil),
- ObjectIdGetDatum(InvalidOid),
- Int32GetDatum(-1)));
+ CStringGetDatum(valuntil),
+ ObjectIdGetDatum(InvalidOid),
+ Int32GetDatum(-1)));
if (vuntil < GetCurrentTimestamp())
retval = STATUS_ERROR;
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index f565442ac68..734a4568d2a 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.147 2005/08/11 21:11:44 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.148 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -80,9 +80,9 @@ static List **role_sorted = NULL; /* sorted role list, for bsearch() */
static int role_length;
static void tokenize_file(const char *filename, FILE *file,
- List **lines, List **line_nums);
+ List **lines, List **line_nums);
static char *tokenize_inc_file(const char *outer_filename,
- const char *inc_filename);
+ const char *inc_filename);
/*
* isblank() exists in the ISO C99 spec, but it's not very portable yet,
@@ -136,8 +136,8 @@ next_token(FILE *fp, char *buf, int bufsz)
}
/*
- * Build a token in buf of next characters up to EOF, EOL, unquoted
- * comma, or unquoted whitespace.
+ * Build a token in buf of next characters up to EOF, EOL, unquoted comma,
+ * or unquoted whitespace.
*/
while (c != EOF && c != '\n' &&
(!pg_isblank(c) || in_quote == true))
@@ -158,8 +158,8 @@ next_token(FILE *fp, char *buf, int bufsz)
*buf = '\0';
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("authentication file token too long, skipping: \"%s\"",
- start_buf)));
+ errmsg("authentication file token too long, skipping: \"%s\"",
+ start_buf)));
/* Discard remainder of line */
while ((c = getc(fp)) != EOF && c != '\n')
;
@@ -189,8 +189,8 @@ next_token(FILE *fp, char *buf, int bufsz)
}
/*
- * Put back the char right after the token (critical in case it is
- * EOL, since we need to detect end-of-line at next call).
+ * Put back the char right after the token (critical in case it is EOL,
+ * since we need to detect end-of-line at next call).
*/
if (c != EOF)
ungetc(c, fp);
@@ -370,8 +370,8 @@ tokenize_inc_file(const char *outer_filename,
foreach(token, token_list)
{
- int oldlen = strlen(comma_str);
- int needed;
+ int oldlen = strlen(comma_str);
+ int needed;
needed = oldlen + strlen(lfirst(token)) + 1;
if (oldlen > 0)
@@ -460,7 +460,7 @@ role_bsearch_cmp(const void *role, const void *list)
/*
* Lookup a role name in the pg_auth file
*/
-List **
+List **
get_role_line(const char *role)
{
/* On some versions of Solaris, bsearch of zero items dumps core */
@@ -495,8 +495,8 @@ is_member(const char *user, const char *role)
return true;
/*
- * skip over the role name, password, valuntil, examine all the
- * membership entries
+ * skip over the role name, password, valuntil, examine all the membership
+ * entries
*/
if (list_length(*line) < 4)
return false;
@@ -761,9 +761,9 @@ parse_hba(List *line, int line_num, hbaPort *port,
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("invalid IP address \"%s\" in file \"%s\" line %d: %s",
- token, HbaFileName, line_num,
- gai_strerror(ret))));
+ errmsg("invalid IP address \"%s\" in file \"%s\" line %d: %s",
+ token, HbaFileName, line_num,
+ gai_strerror(ret))));
if (cidr_slash)
*cidr_slash = '/';
if (gai_result)
@@ -796,9 +796,9 @@ parse_hba(List *line, int line_num, hbaPort *port,
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("invalid IP mask \"%s\" in file \"%s\" line %d: %s",
- token, HbaFileName, line_num,
- gai_strerror(ret))));
+ errmsg("invalid IP mask \"%s\" in file \"%s\" line %d: %s",
+ token, HbaFileName, line_num,
+ gai_strerror(ret))));
if (gai_result)
freeaddrinfo_all(hints.ai_family, gai_result);
goto hba_other_error;
@@ -820,9 +820,9 @@ parse_hba(List *line, int line_num, hbaPort *port,
if (addr.ss_family != port->raddr.addr.ss_family)
{
/*
- * Wrong address family. We allow only one case: if the file
- * has IPv4 and the port is IPv6, promote the file address to
- * IPv6 and try to match that way.
+ * Wrong address family. We allow only one case: if the file has
+ * IPv4 and the port is IPv6, promote the file address to IPv6 and
+ * try to match that way.
*/
#ifdef HAVE_IPV6
if (addr.ss_family == AF_INET &&
@@ -869,14 +869,14 @@ hba_syntax:
if (line_item)
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("invalid entry in file \"%s\" at line %d, token \"%s\"",
- HbaFileName, line_num,
- (char *) lfirst(line_item))));
+ errmsg("invalid entry in file \"%s\" at line %d, token \"%s\"",
+ HbaFileName, line_num,
+ (char *) lfirst(line_item))));
else
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("missing field in file \"%s\" at end of line %d",
- HbaFileName, line_num)));
+ errmsg("missing field in file \"%s\" at end of line %d",
+ HbaFileName, line_num)));
/* Come here if suitable message already logged */
hba_other_error:
@@ -928,7 +928,7 @@ load_role(void)
/* Discard any old data */
if (role_lines || role_line_nums)
free_lines(&role_lines, &role_line_nums);
- if (role_sorted)
+ if (role_sorted)
pfree(role_sorted);
role_sorted = NULL;
role_length = 0;
@@ -957,8 +957,8 @@ load_role(void)
role_length = list_length(role_lines);
if (role_length)
{
- int i = 0;
- ListCell *line;
+ int i = 0;
+ ListCell *line;
/* We assume the flat file was written already-sorted */
role_sorted = palloc(role_length * sizeof(List *));
@@ -1124,7 +1124,7 @@ check_ident_usermap(const char *usermap_name,
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("cannot use Ident authentication without usermap field")));
+ errmsg("cannot use Ident authentication without usermap field")));
found_entry = false;
}
else if (strcmp(usermap_name, "sameuser\n") == 0 ||
@@ -1191,12 +1191,10 @@ static bool
interpret_ident_response(const char *ident_response,
char *ident_user)
{
- const char *cursor = ident_response; /* Cursor into
- * *ident_response */
+ const char *cursor = ident_response; /* Cursor into *ident_response */
/*
- * Ident's response, in the telnet tradition, should end in crlf
- * (\r\n).
+ * Ident's response, in the telnet tradition, should end in crlf (\r\n).
*/
if (strlen(ident_response) < 2)
return false;
@@ -1230,9 +1228,8 @@ interpret_ident_response(const char *ident_response,
else
{
/*
- * It's a USERID response. Good. "cursor" should be
- * pointing to the colon that precedes the operating
- * system type.
+ * It's a USERID response. Good. "cursor" should be pointing
+ * to the colon that precedes the operating system type.
*/
if (*cursor != ':')
return false;
@@ -1280,10 +1277,9 @@ ident_inet(const SockAddr remote_addr,
const SockAddr local_addr,
char *ident_user)
{
- int sock_fd, /* File descriptor for socket on which we
- * talk to Ident */
- rc; /* Return code from a locally called
- * function */
+ int sock_fd, /* File descriptor for socket on which we talk
+ * to Ident */
+ rc; /* Return code from a locally called function */
bool ident_return;
char remote_addr_s[NI_MAXHOST];
char remote_port[NI_MAXSERV];
@@ -1297,8 +1293,8 @@ ident_inet(const SockAddr remote_addr,
hints;
/*
- * Might look a little weird to first convert it to text and then back
- * to sockaddr, but it's protocol independent.
+ * Might look a little weird to first convert it to text and then back to
+ * sockaddr, but it's protocol independent.
*/
getnameinfo_all(&remote_addr.addr, remote_addr.salen,
remote_addr_s, sizeof(remote_addr_s),
@@ -1348,16 +1344,15 @@ ident_inet(const SockAddr remote_addr,
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not create socket for Ident connection: %m")));
+ errmsg("could not create socket for Ident connection: %m")));
ident_return = false;
goto ident_inet_done;
}
/*
- * Bind to the address which the client originally contacted,
- * otherwise the ident server won't be able to match up the right
- * connection. This is necessary if the PostgreSQL server is running
- * on an IP alias.
+ * Bind to the address which the client originally contacted, otherwise
+ * the ident server won't be able to match up the right connection. This
+ * is necessary if the PostgreSQL server is running on an IP alias.
*/
rc = bind(sock_fd, la->ai_addr, la->ai_addrlen);
if (rc != 0)
@@ -1421,8 +1416,8 @@ ident_inet(const SockAddr remote_addr,
ident_return = interpret_ident_response(ident_response, ident_user);
if (!ident_return)
ereport(LOG,
- (errmsg("invalidly formatted response from Ident server: \"%s\"",
- ident_response)));
+ (errmsg("invalidly formatted response from Ident server: \"%s\"",
+ ident_response)));
ident_inet_done:
if (sock_fd >= 0)
@@ -1473,7 +1468,6 @@ ident_unix(int sock, char *ident_user)
StrNCpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1);
return true;
-
#elif defined(SO_PEERCRED)
/* Linux style: use getsockopt(SO_PEERCRED) */
struct ucred peercred;
@@ -1504,7 +1498,6 @@ ident_unix(int sock, char *ident_user)
StrNCpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1);
return true;
-
#elif defined(HAVE_STRUCT_CMSGCRED) || defined(HAVE_STRUCT_FCRED) || (defined(HAVE_STRUCT_SOCKCRED) && defined(LOCAL_CREDS))
struct msghdr msg;
@@ -1543,8 +1536,8 @@ ident_unix(int sock, char *ident_user)
/*
* The one character which is received here is not meaningful; its
- * purposes is only to make sure that recvmsg() blocks long enough for
- * the other side to send its credentials.
+ * purposes is only to make sure that recvmsg() blocks long enough for the
+ * other side to send its credentials.
*/
iov.iov_base = &buf;
iov.iov_len = 1;
@@ -1574,7 +1567,6 @@ ident_unix(int sock, char *ident_user)
StrNCpy(ident_user, pw->pw_name, IDENT_USERNAME_MAX + 1);
return true;
-
#else
ereport(LOG,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
diff --git a/src/backend/libpq/md5.c b/src/backend/libpq/md5.c
index 3c7fcd69127..f73d38795a6 100644
--- a/src/backend/libpq/md5.c
+++ b/src/backend/libpq/md5.c
@@ -14,7 +14,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/md5.c,v 1.28 2005/02/23 22:46:17 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/md5.c,v 1.29 2005/10/15 02:49:18 momjian Exp $
*/
@@ -329,8 +329,8 @@ EncryptMD5(const char *passwd, const char *salt, size_t salt_len,
bool ret;
/*
- * Place salt at the end because it may be known by users trying to
- * crack the MD5 output.
+ * Place salt at the end because it may be known by users trying to crack
+ * the MD5 output.
*/
strcpy(crypt_buf, passwd);
memcpy(crypt_buf + passwd_len, salt, salt_len);
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index d0904bfc7df..ccb4bcf2b51 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -30,7 +30,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.180 2005/09/24 17:53:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.181 2005/10/15 02:49:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -107,12 +107,10 @@ static char sock_path[MAXPGPATH];
#define PQ_BUFFER_SIZE 8192
static char PqSendBuffer[PQ_BUFFER_SIZE];
-static int PqSendPointer; /* Next index to store a byte in
- * PqSendBuffer */
+static int PqSendPointer; /* Next index to store a byte in PqSendBuffer */
static char PqRecvBuffer[PQ_BUFFER_SIZE];
-static int PqRecvPointer; /* Next index to read a byte from
- * PqRecvBuffer */
+static int PqRecvPointer; /* Next index to read a byte from PqRecvBuffer */
static int PqRecvLength; /* End of data available in PqRecvBuffer */
/*
@@ -126,6 +124,7 @@ static bool DoingCopyOut;
static void pq_close(int code, Datum arg);
static int internal_putbytes(const char *s, size_t len);
static int internal_flush(void);
+
#ifdef HAVE_UNIX_SOCKETS
static int Lock_AF_UNIX(unsigned short portNumber, char *unixSocketName);
static int Setup_AF_UNIX(void);
@@ -178,11 +177,11 @@ pq_close(int code, Datum arg)
secure_close(MyProcPort);
/*
- * Formerly we did an explicit close() here, but it seems better
- * to leave the socket open until the process dies. This allows
- * clients to perform a "synchronous close" if they care --- wait
- * till the transport layer reports connection closure, and you
- * can be sure the backend has exited.
+ * Formerly we did an explicit close() here, but it seems better to
+ * leave the socket open until the process dies. This allows clients
+ * to perform a "synchronous close" if they care --- wait till the
+ * transport layer reports connection closure, and you can be sure the
+ * backend has exited.
*
* We do set sock to -1 to prevent any further I/O, though.
*/
@@ -272,8 +271,8 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
hostName, service, gai_strerror(ret))));
else
ereport(LOG,
- (errmsg("could not translate service \"%s\" to address: %s",
- service, gai_strerror(ret))));
+ (errmsg("could not translate service \"%s\" to address: %s",
+ service, gai_strerror(ret))));
if (addrs)
freeaddrinfo_all(hint.ai_family, addrs);
return STATUS_ERROR;
@@ -284,8 +283,8 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
if (!IS_AF_UNIX(family) && IS_AF_UNIX(addr->ai_family))
{
/*
- * Only set up a unix domain socket when they really asked for
- * it. The service/port is different in that case.
+ * Only set up a unix domain socket when they really asked for it.
+ * The service/port is different in that case.
*/
continue;
}
@@ -368,9 +367,9 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
/*
* Note: This might fail on some OS's, like Linux older than
- * 2.4.21-pre3, that don't have the IPV6_V6ONLY socket option, and
- * map ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all
- * ipv4 connections.
+ * 2.4.21-pre3, that don't have the IPV6_V6ONLY socket option, and map
+ * ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all ipv4
+ * connections.
*/
err = bind(fd, addr->ai_addr, addr->ai_addrlen);
if (err < 0)
@@ -381,12 +380,12 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
errmsg("could not bind %s socket: %m",
familyDesc),
(IS_AF_UNIX(addr->ai_family)) ?
- errhint("Is another postmaster already running on port %d?"
- " If not, remove socket file \"%s\" and retry.",
- (int) portNumber, sock_path) :
- errhint("Is another postmaster already running on port %d?"
- " If not, wait a few seconds and retry.",
- (int) portNumber)));
+ errhint("Is another postmaster already running on port %d?"
+ " If not, remove socket file \"%s\" and retry.",
+ (int) portNumber, sock_path) :
+ errhint("Is another postmaster already running on port %d?"
+ " If not, wait a few seconds and retry.",
+ (int) portNumber)));
closesocket(fd);
continue;
}
@@ -403,10 +402,9 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
#endif
/*
- * Select appropriate accept-queue length limit. PG_SOMAXCONN is
- * only intended to provide a clamp on the request on platforms
- * where an overly large request provokes a kernel error (are
- * there any?).
+ * Select appropriate accept-queue length limit. PG_SOMAXCONN is only
+ * intended to provide a clamp on the request on platforms where an
+ * overly large request provokes a kernel error (are there any?).
*/
maxconn = MaxBackends * 2;
if (maxconn > PG_SOMAXCONN)
@@ -472,8 +470,8 @@ Setup_AF_UNIX(void)
/*
* Fix socket ownership/permission if requested. Note we must do this
- * before we listen() to avoid a window where unwanted connections
- * could get accepted.
+ * before we listen() to avoid a window where unwanted connections could
+ * get accepted.
*/
Assert(Unix_socket_group);
if (Unix_socket_group[0] != '\0')
@@ -596,11 +594,11 @@ StreamConnection(int server_fd, Port *port)
}
/*
- * Also apply the current keepalive parameters. If we fail to set
- * a parameter, don't error out, because these aren't universally
+ * Also apply the current keepalive parameters. If we fail to set a
+ * parameter, don't error out, because these aren't universally
* supported. (Note: you might think we need to reset the GUC
- * variables to 0 in such a case, but it's not necessary because
- * the show hooks for these variables report the truth anyway.)
+ * variables to 0 in such a case, but it's not necessary because the
+ * show hooks for these variables report the truth anyway.)
*/
(void) pq_setkeepalivesidle(tcp_keepalives_idle, port);
(void) pq_setkeepalivesinterval(tcp_keepalives_interval, port);
@@ -642,9 +640,9 @@ TouchSocketFile(void)
if (sock_path[0] != '\0')
{
/*
- * utime() is POSIX standard, utimes() is a common alternative. If
- * we have neither, there's no way to affect the mod or access
- * time of the socket :-(
+ * utime() is POSIX standard, utimes() is a common alternative. If we
+ * have neither, there's no way to affect the mod or access time of
+ * the socket :-(
*
* In either path, we ignore errors; there's no point in complaining.
*/
@@ -705,10 +703,9 @@ pq_recvbuf(void)
continue; /* Ok if interrupted */
/*
- * Careful: an ereport() that tries to write to the client
- * would cause recursion to here, leading to stack overflow
- * and core dump! This message must go *only* to the
- * postmaster log.
+ * Careful: an ereport() that tries to write to the client would
+ * cause recursion to here, leading to stack overflow and core
+ * dump! This message must go *only* to the postmaster log.
*/
ereport(COMMERROR,
(errcode_for_socket_access(),
@@ -718,8 +715,8 @@ pq_recvbuf(void)
if (r == 0)
{
/*
- * EOF detected. We used to write a log message here, but
- * it's better to expect the ultimate caller to do that.
+ * EOF detected. We used to write a log message here, but it's
+ * better to expect the ultimate caller to do that.
*/
return EOF;
}
@@ -925,7 +922,7 @@ pq_getmessage(StringInfo s, int maxlen)
if (len > 0)
{
/*
- * Allocate space for message. If we run out of room (ridiculously
+ * Allocate space for message. If we run out of room (ridiculously
* large message), we will elog(ERROR), but we want to discard the
* message body so as not to lose communication sync.
*/
@@ -1044,14 +1041,13 @@ internal_flush(void)
continue; /* Ok if we were interrupted */
/*
- * Careful: an ereport() that tries to write to the client
- * would cause recursion to here, leading to stack overflow
- * and core dump! This message must go *only* to the
- * postmaster log.
+ * Careful: an ereport() that tries to write to the client would
+ * cause recursion to here, leading to stack overflow and core
+ * dump! This message must go *only* to the postmaster log.
*
* If a client disconnects while we're in the midst of output, we
- * might write quite a bit of data before we get to a safe
- * query abort point. So, suppress duplicate log messages.
+ * might write quite a bit of data before we get to a safe query
+ * abort point. So, suppress duplicate log messages.
*/
if (errno != last_reported_send_errno)
{
@@ -1187,14 +1183,14 @@ pq_getkeepalivesidle(Port *port)
if (port->default_keepalives_idle == 0)
{
- socklen_t size = sizeof(port->default_keepalives_idle);
+ socklen_t size = sizeof(port->default_keepalives_idle);
if (getsockopt(port->sock, IPPROTO_TCP, TCP_KEEPIDLE,
(char *) &port->default_keepalives_idle,
&size) < 0)
{
elog(LOG, "getsockopt(TCP_KEEPIDLE) failed: %m");
- port->default_keepalives_idle = -1; /* don't know */
+ port->default_keepalives_idle = -1; /* don't know */
}
}
@@ -1219,7 +1215,7 @@ pq_setkeepalivesidle(int idle, Port *port)
if (pq_getkeepalivesidle(port) < 0)
{
if (idle == 0)
- return STATUS_OK; /* default is set but unknown */
+ return STATUS_OK; /* default is set but unknown */
else
return STATUS_ERROR;
}
@@ -1259,14 +1255,14 @@ pq_getkeepalivesinterval(Port *port)
if (port->default_keepalives_interval == 0)
{
- socklen_t size = sizeof(port->default_keepalives_interval);
+ socklen_t size = sizeof(port->default_keepalives_interval);
if (getsockopt(port->sock, IPPROTO_TCP, TCP_KEEPINTVL,
(char *) &port->default_keepalives_interval,
&size) < 0)
{
elog(LOG, "getsockopt(TCP_KEEPINTVL) failed: %m");
- port->default_keepalives_interval = -1; /* don't know */
+ port->default_keepalives_interval = -1; /* don't know */
}
}
@@ -1291,7 +1287,7 @@ pq_setkeepalivesinterval(int interval, Port *port)
if (pq_getkeepalivesinterval(port) < 0)
{
if (interval == 0)
- return STATUS_OK; /* default is set but unknown */
+ return STATUS_OK; /* default is set but unknown */
else
return STATUS_ERROR;
}
@@ -1331,14 +1327,14 @@ pq_getkeepalivescount(Port *port)
if (port->default_keepalives_count == 0)
{
- socklen_t size = sizeof(port->default_keepalives_count);
+ socklen_t size = sizeof(port->default_keepalives_count);
if (getsockopt(port->sock, IPPROTO_TCP, TCP_KEEPCNT,
(char *) &port->default_keepalives_count,
&size) < 0)
{
elog(LOG, "getsockopt(TCP_KEEPCNT) failed: %m");
- port->default_keepalives_count = -1; /* don't know */
+ port->default_keepalives_count = -1; /* don't know */
}
}
@@ -1363,7 +1359,7 @@ pq_setkeepalivescount(int count, Port *port)
if (pq_getkeepalivescount(port) < 0)
{
if (count == 0)
- return STATUS_OK; /* default is set but unknown */
+ return STATUS_OK; /* default is set but unknown */
else
return STATUS_ERROR;
}
diff --git a/src/backend/libpq/pqformat.c b/src/backend/libpq/pqformat.c
index 284427b832a..46e75c5e049 100644
--- a/src/backend/libpq/pqformat.c
+++ b/src/backend/libpq/pqformat.c
@@ -24,7 +24,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/libpq/pqformat.c,v 1.39 2005/09/24 17:53:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/pqformat.c,v 1.40 2005/10/15 02:49:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,9 +93,8 @@ pq_beginmessage(StringInfo buf, char msgtype)
/*
* We stash the message type into the buffer's cursor field, expecting
- * that the pq_sendXXX routines won't touch it. We could
- * alternatively make it the first byte of the buffer contents, but
- * this seems easier.
+ * that the pq_sendXXX routines won't touch it. We could alternatively
+ * make it the first byte of the buffer contents, but this seems easier.
*/
buf->cursor = msgtype;
}
@@ -664,8 +663,8 @@ pq_getmsgstring(StringInfo msg)
str = &msg->data[msg->cursor];
/*
- * It's safe to use strlen() here because a StringInfo is guaranteed
- * to have a trailing null byte. But check we found a null inside the
+ * It's safe to use strlen() here because a StringInfo is guaranteed to
+ * have a trailing null byte. But check we found a null inside the
* message.
*/
slen = strlen(str);
diff --git a/src/backend/libpq/pqsignal.c b/src/backend/libpq/pqsignal.c
index 6bc3535e83a..abf13e33fc9 100644
--- a/src/backend/libpq/pqsignal.c
+++ b/src/backend/libpq/pqsignal.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/pqsignal.c,v 1.40 2005/02/14 23:02:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/pqsignal.c,v 1.41 2005/10/15 02:49:18 momjian Exp $
*
* NOTES
* This shouldn't be in libpq, but the monitor and some other
@@ -50,7 +50,6 @@
sigset_t UnBlockSig,
BlockSig,
AuthBlockSig;
-
#else
int UnBlockSig,
BlockSig,
@@ -83,9 +82,9 @@ pqinitmask(void)
sigfillset(&AuthBlockSig);
/*
- * Unmark those signals that should never be blocked. Some of these
- * signal names don't exist on all platforms. Most do, but might as
- * well ifdef them all for consistency...
+ * Unmark those signals that should never be blocked. Some of these signal
+ * names don't exist on all platforms. Most do, but might as well ifdef
+ * them all for consistency...
*/
#ifdef SIGTRAP
sigdelset(&BlockSig, SIGTRAP);
@@ -135,7 +134,7 @@ pqinitmask(void)
UnBlockSig = 0;
BlockSig = sigmask(SIGQUIT) |
sigmask(SIGTERM) | sigmask(SIGALRM) |
- /* common signals between two */
+ /* common signals between two */
sigmask(SIGHUP) |
sigmask(SIGINT) | sigmask(SIGUSR1) |
sigmask(SIGUSR2) | sigmask(SIGCHLD) |
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index ed1895d839c..ea1a3bef254 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/main/main.c,v 1.95 2005/10/13 15:37:14 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/main/main.c,v 1.96 2005/10/15 02:49:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -56,15 +56,15 @@ main(int argc, char *argv[])
char *pw_name_persist;
/*
- * Place platform-specific startup hacks here. This is the right
- * place to put code that must be executed early in launch of either a
- * postmaster, a standalone backend, or a standalone bootstrap run.
- * Note that this code will NOT be executed when a backend or
- * sub-bootstrap run is forked by the postmaster.
+ * Place platform-specific startup hacks here. This is the right place to
+ * put code that must be executed early in launch of either a postmaster,
+ * a standalone backend, or a standalone bootstrap run. Note that this
+ * code will NOT be executed when a backend or sub-bootstrap run is forked
+ * by the postmaster.
*
- * XXX The need for code here is proof that the platform in question is
- * too brain-dead to provide a standard C execution environment
- * without help. Avoid adding more here, if you can.
+ * XXX The need for code here is proof that the platform in question is too
+ * brain-dead to provide a standard C execution environment without help.
+ * Avoid adding more here, if you can.
*/
#if defined(__alpha) /* no __alpha__ ? */
@@ -78,12 +78,11 @@ main(int argc, char *argv[])
#endif
/*
- * On some platforms, unaligned memory accesses result in a kernel
- * trap; the default kernel behavior is to emulate the memory
- * access, but this results in a significant performance
- * penalty. We ought to fix PG not to make such unaligned memory
- * accesses, so this code disables the kernel emulation: unaligned
- * accesses will result in SIGBUS instead.
+ * On some platforms, unaligned memory accesses result in a kernel trap;
+ * the default kernel behavior is to emulate the memory access, but this
+ * results in a significant performance penalty. We ought to fix PG not to
+ * make such unaligned memory accesses, so this code disables the kernel
+ * emulation: unaligned accesses will result in SIGBUS instead.
*/
#ifdef NOFIXADE
@@ -125,31 +124,30 @@ main(int argc, char *argv[])
#endif
/*
- * Not-quite-so-platform-specific startup environment checks. Still
- * best to minimize these.
+ * Not-quite-so-platform-specific startup environment checks. Still best
+ * to minimize these.
*/
/*
- * Remember the physical location of the initially given argv[] array
- * for possible use by ps display. On some platforms, the argv[]
- * storage must be overwritten in order to set the process title for
- * ps. In such cases save_ps_display_args makes and returns a new copy
- * of the argv[] array.
+ * Remember the physical location of the initially given argv[] array for
+ * possible use by ps display. On some platforms, the argv[] storage must
+ * be overwritten in order to set the process title for ps. In such cases
+ * save_ps_display_args makes and returns a new copy of the argv[] array.
*
- * save_ps_display_args may also move the environment strings to make
- * extra room. Therefore this should be done as early as possible
- * during startup, to avoid entanglements with code that might save a
- * getenv() result pointer.
+ * save_ps_display_args may also move the environment strings to make extra
+ * room. Therefore this should be done as early as possible during
+ * startup, to avoid entanglements with code that might save a getenv()
+ * result pointer.
*/
argv = save_ps_display_args(argc, argv);
/*
* Set up locale information from environment. Note that LC_CTYPE and
* LC_COLLATE will be overridden later from pg_control if we are in an
- * already-initialized database. We set them here so that they will
- * be available to fill pg_control during initdb. LC_MESSAGES will
- * get set later during GUC option processing, but we set it here to
- * allow startup error messages to be localized.
+ * already-initialized database. We set them here so that they will be
+ * available to fill pg_control during initdb. LC_MESSAGES will get set
+ * later during GUC option processing, but we set it here to allow startup
+ * error messages to be localized.
*/
set_pglocale_pgservice(argv[0], "postgres");
@@ -157,11 +155,10 @@ main(int argc, char *argv[])
#ifdef WIN32
/*
- * Windows uses codepages rather than the environment, so we work
- * around that by querying the environment explicitly first for
- * LC_COLLATE and LC_CTYPE. We have to do this because initdb passes
- * those values in the environment. If there is nothing there we fall
- * back on the codepage.
+ * Windows uses codepages rather than the environment, so we work around
+ * that by querying the environment explicitly first for LC_COLLATE and
+ * LC_CTYPE. We have to do this because initdb passes those values in the
+ * environment. If there is nothing there we fall back on the codepage.
*/
if ((env_locale = getenv("LC_COLLATE")) != NULL)
@@ -183,17 +180,16 @@ main(int argc, char *argv[])
#endif
/*
- * We keep these set to "C" always, except transiently in pg_locale.c;
- * see that file for explanations.
+ * We keep these set to "C" always, except transiently in pg_locale.c; see
+ * that file for explanations.
*/
setlocale(LC_MONETARY, "C");
setlocale(LC_NUMERIC, "C");
setlocale(LC_TIME, "C");
/*
- * Skip permission checks if we're just trying to do --help or
- * --version; otherwise root will get unhelpful failure messages from
- * initdb.
+ * Skip permission checks if we're just trying to do --help or --version;
+ * otherwise root will get unhelpful failure messages from initdb.
*/
if (!(argc > 1
&& (strcmp(argv[1], "--help") == 0 ||
@@ -215,19 +211,19 @@ main(int argc, char *argv[])
write_stderr("\"root\" execution of the PostgreSQL server is not permitted.\n"
"The server must be started under an unprivileged user ID to prevent\n"
"possible system security compromise. See the documentation for\n"
- "more information on how to properly start the server.\n");
+ "more information on how to properly start the server.\n");
exit(1);
}
#endif /* !__BEOS__ */
/*
- * Also make sure that real and effective uids are the same.
- * Executing Postgres as a setuid program from a root shell is a
- * security hole, since on many platforms a nefarious subroutine
- * could setuid back to root if real uid is root. (Since nobody
- * actually uses Postgres as a setuid program, trying to actively
- * fix this situation seems more trouble than it's worth; we'll
- * just expend the effort to check for it.)
+ * Also make sure that real and effective uids are the same. Executing
+ * Postgres as a setuid program from a root shell is a security hole,
+ * since on many platforms a nefarious subroutine could setuid back to
+ * root if real uid is root. (Since nobody actually uses Postgres as
+ * a setuid program, trying to actively fix this situation seems more
+ * trouble than it's worth; we'll just expend the effort to check for
+ * it.)
*/
if (getuid() != geteuid())
{
@@ -242,7 +238,7 @@ main(int argc, char *argv[])
"permitted.\n"
"The server must be started under an unprivileged user ID to prevent\n"
"possible system security compromises. See the documentation for\n"
- "more information on how to properly start the server.\n");
+ "more information on how to properly start the server.\n");
exit(1);
}
#endif /* !WIN32 */
@@ -250,9 +246,9 @@ main(int argc, char *argv[])
/*
* Now dispatch to one of PostmasterMain, PostgresMain, GucInfoMain,
- * SubPostmasterMain, or BootstrapMain depending on the program name
- * (and possibly first argument) we were called with. The lack of
- * consistency here is historical.
+ * SubPostmasterMain, or BootstrapMain depending on the program name (and
+ * possibly first argument) we were called with. The lack of consistency
+ * here is historical.
*/
if (strcmp(get_progname(argv[0]), "postmaster") == 0)
{
@@ -262,8 +258,8 @@ main(int argc, char *argv[])
/*
* If the first argument begins with "-fork", then invoke
- * SubPostmasterMain. This is used for forking postmaster child
- * processes on systems where we can't simply fork.
+ * SubPostmasterMain. This is used for forking postmaster child processes
+ * on systems where we can't simply fork.
*/
#ifdef EXEC_BACKEND
if (argc > 1 && strncmp(argv[1], "-fork", 5) == 0)
@@ -271,11 +267,12 @@ main(int argc, char *argv[])
#endif
#ifdef WIN32
+
/*
* Start our win32 signal implementation
*
- * SubPostmasterMain() will do this for itself, but the remaining
- * modes need it here
+ * SubPostmasterMain() will do this for itself, but the remaining modes need
+ * it here
*/
pgwin32_signal_initialize();
#endif
@@ -295,9 +292,8 @@ main(int argc, char *argv[])
exit(GucInfoMain());
/*
- * Otherwise we're a standalone backend. Invoke PostgresMain,
- * specifying current userid as the "authenticated" Postgres user
- * name.
+ * Otherwise we're a standalone backend. Invoke PostgresMain, specifying
+ * current userid as the "authenticated" Postgres user name.
*/
#ifndef WIN32
pw = getpwuid(geteuid());
diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c
index d74ba6189ed..916833df0dc 100644
--- a/src/backend/nodes/bitmapset.c
+++ b/src/backend/nodes/bitmapset.c
@@ -14,7 +14,7 @@
* Copyright (c) 2003-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/bitmapset.c,v 1.9 2005/06/15 16:24:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/bitmapset.c,v 1.10 2005/10/15 02:49:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -769,7 +769,7 @@ bms_first_member(Bitmapset *a)
*
* Note: we must ensure that any two bitmapsets that are bms_equal() will
* hash to the same value; in practice this means that trailing all-zero
- * words cannot affect the result. The circular-shift-and-XOR hash method
+ * words cannot affect the result. The circular-shift-and-XOR hash method
* used here has this property, so long as we work from back to front.
*
* Note: you might wonder why we bother with the circular shift; at first
@@ -779,7 +779,7 @@ bms_first_member(Bitmapset *a)
* multiword bitmapsets is "a JOIN b JOIN c JOIN d ...", which gives rise
* to rangetables in which base tables and JOIN nodes alternate; so
* bitmapsets of base table RT indexes tend to use only odd-numbered or only
- * even-numbered bits. A straight longitudinal XOR would preserve this
+ * even-numbered bits. A straight longitudinal XOR would preserve this
* property, leading to a much smaller set of possible outputs than if
* we include a shift.
*/
@@ -791,7 +791,7 @@ bms_hash_value(const Bitmapset *a)
if (a == NULL || a->nwords <= 0)
return 0; /* All empty sets hash to 0 */
- for (wordnum = a->nwords; --wordnum > 0; )
+ for (wordnum = a->nwords; --wordnum > 0;)
{
result ^= a->words[wordnum];
if (result & ((bitmapword) 1 << (BITS_PER_BITMAPWORD - 1)))
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 9c21c2f977a..4a90b10b277 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.315 2005/08/01 20:31:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.316 2005/10/15 02:49:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -154,7 +154,7 @@ _copyAppend(Append *from)
static BitmapAnd *
_copyBitmapAnd(BitmapAnd *from)
{
- BitmapAnd *newnode = makeNode(BitmapAnd);
+ BitmapAnd *newnode = makeNode(BitmapAnd);
/*
* copy node superclass fields
@@ -175,7 +175,7 @@ _copyBitmapAnd(BitmapAnd *from)
static BitmapOr *
_copyBitmapOr(BitmapOr *from)
{
- BitmapOr *newnode = makeNode(BitmapOr);
+ BitmapOr *newnode = makeNode(BitmapOr);
/*
* copy node superclass fields
@@ -269,7 +269,7 @@ _copyIndexScan(IndexScan *from)
static BitmapIndexScan *
_copyBitmapIndexScan(BitmapIndexScan *from)
{
- BitmapIndexScan *newnode = makeNode(BitmapIndexScan);
+ BitmapIndexScan *newnode = makeNode(BitmapIndexScan);
/*
* copy node superclass fields
@@ -294,7 +294,7 @@ _copyBitmapIndexScan(BitmapIndexScan *from)
static BitmapHeapScan *
_copyBitmapHeapScan(BitmapHeapScan *from)
{
- BitmapHeapScan *newnode = makeNode(BitmapHeapScan);
+ BitmapHeapScan *newnode = makeNode(BitmapHeapScan);
/*
* copy node superclass fields
@@ -1262,8 +1262,7 @@ _copyRestrictInfo(RestrictInfo *from)
COPY_SCALAR_FIELD(right_sortop);
/*
- * Do not copy pathkeys, since they'd not be canonical in a copied
- * query
+ * Do not copy pathkeys, since they'd not be canonical in a copied query
*/
newnode->left_pathkey = NIL;
newnode->right_pathkey = NIL;
@@ -1791,7 +1790,7 @@ _copyFuncWithArgs(FuncWithArgs *from)
static GrantRoleStmt *
_copyGrantRoleStmt(GrantRoleStmt *from)
{
- GrantRoleStmt *newnode = makeNode(GrantRoleStmt);
+ GrantRoleStmt *newnode = makeNode(GrantRoleStmt);
COPY_NODE_FIELD(granted_roles);
COPY_NODE_FIELD(grantee_roles);
@@ -2906,8 +2905,8 @@ copyObject(void *from)
break;
/*
- * Lists of integers and OIDs don't need to be deep-copied, so
- * we perform a shallow copy via list_copy()
+ * Lists of integers and OIDs don't need to be deep-copied, so we
+ * perform a shallow copy via list_copy()
*/
case T_IntList:
case T_OidList:
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 326eb9c62aa..9baa79dd935 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -18,7 +18,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.252 2005/08/01 20:31:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.253 2005/10/15 02:49:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -135,8 +135,7 @@ _equalConst(Const *a, Const *b)
/*
* We treat all NULL constants of the same type as equal. Someday this
- * might need to change? But datumIsEqual doesn't work on nulls,
- * so...
+ * might need to change? But datumIsEqual doesn't work on nulls, so...
*/
if (a->constisnull)
return true;
@@ -202,8 +201,8 @@ _equalFuncExpr(FuncExpr *a, FuncExpr *b)
COMPARE_SCALAR_FIELD(funcretset);
/*
- * Special-case COERCE_DONTCARE, so that planner can build coercion
- * nodes that are equal() to both explicit and implicit coercions.
+ * Special-case COERCE_DONTCARE, so that planner can build coercion nodes
+ * that are equal() to both explicit and implicit coercions.
*/
if (a->funcformat != b->funcformat &&
a->funcformat != COERCE_DONTCARE &&
@@ -222,9 +221,9 @@ _equalOpExpr(OpExpr *a, OpExpr *b)
/*
* Special-case opfuncid: it is allowable for it to differ if one node
- * contains zero and the other doesn't. This just means that the one
- * node isn't as far along in the parse/plan pipeline and hasn't had
- * the opfuncid cache filled yet.
+ * contains zero and the other doesn't. This just means that the one node
+ * isn't as far along in the parse/plan pipeline and hasn't had the
+ * opfuncid cache filled yet.
*/
if (a->opfuncid != b->opfuncid &&
a->opfuncid != 0 &&
@@ -245,9 +244,9 @@ _equalDistinctExpr(DistinctExpr *a, DistinctExpr *b)
/*
* Special-case opfuncid: it is allowable for it to differ if one node
- * contains zero and the other doesn't. This just means that the one
- * node isn't as far along in the parse/plan pipeline and hasn't had
- * the opfuncid cache filled yet.
+ * contains zero and the other doesn't. This just means that the one node
+ * isn't as far along in the parse/plan pipeline and hasn't had the
+ * opfuncid cache filled yet.
*/
if (a->opfuncid != b->opfuncid &&
a->opfuncid != 0 &&
@@ -268,9 +267,9 @@ _equalScalarArrayOpExpr(ScalarArrayOpExpr *a, ScalarArrayOpExpr *b)
/*
* Special-case opfuncid: it is allowable for it to differ if one node
- * contains zero and the other doesn't. This just means that the one
- * node isn't as far along in the parse/plan pipeline and hasn't had
- * the opfuncid cache filled yet.
+ * contains zero and the other doesn't. This just means that the one node
+ * isn't as far along in the parse/plan pipeline and hasn't had the
+ * opfuncid cache filled yet.
*/
if (a->opfuncid != b->opfuncid &&
a->opfuncid != 0 &&
@@ -354,8 +353,8 @@ _equalRelabelType(RelabelType *a, RelabelType *b)
COMPARE_SCALAR_FIELD(resulttypmod);
/*
- * Special-case COERCE_DONTCARE, so that planner can build coercion
- * nodes that are equal() to both explicit and implicit coercions.
+ * Special-case COERCE_DONTCARE, so that planner can build coercion nodes
+ * that are equal() to both explicit and implicit coercions.
*/
if (a->relabelformat != b->relabelformat &&
a->relabelformat != COERCE_DONTCARE &&
@@ -372,8 +371,8 @@ _equalConvertRowtypeExpr(ConvertRowtypeExpr *a, ConvertRowtypeExpr *b)
COMPARE_SCALAR_FIELD(resulttype);
/*
- * Special-case COERCE_DONTCARE, so that planner can build coercion
- * nodes that are equal() to both explicit and implicit coercions.
+ * Special-case COERCE_DONTCARE, so that planner can build coercion nodes
+ * that are equal() to both explicit and implicit coercions.
*/
if (a->convertformat != b->convertformat &&
a->convertformat != COERCE_DONTCARE &&
@@ -430,8 +429,8 @@ _equalRowExpr(RowExpr *a, RowExpr *b)
COMPARE_SCALAR_FIELD(row_typeid);
/*
- * Special-case COERCE_DONTCARE, so that planner can build coercion
- * nodes that are equal() to both explicit and implicit coercions.
+ * Special-case COERCE_DONTCARE, so that planner can build coercion nodes
+ * that are equal() to both explicit and implicit coercions.
*/
if (a->row_format != b->row_format &&
a->row_format != COERCE_DONTCARE &&
@@ -467,9 +466,9 @@ _equalNullIfExpr(NullIfExpr *a, NullIfExpr *b)
/*
* Special-case opfuncid: it is allowable for it to differ if one node
- * contains zero and the other doesn't. This just means that the one
- * node isn't as far along in the parse/plan pipeline and hasn't had
- * the opfuncid cache filled yet.
+ * contains zero and the other doesn't. This just means that the one node
+ * isn't as far along in the parse/plan pipeline and hasn't had the
+ * opfuncid cache filled yet.
*/
if (a->opfuncid != b->opfuncid &&
a->opfuncid != 0 &&
@@ -509,8 +508,8 @@ _equalCoerceToDomain(CoerceToDomain *a, CoerceToDomain *b)
COMPARE_SCALAR_FIELD(resulttypmod);
/*
- * Special-case COERCE_DONTCARE, so that planner can build coercion
- * nodes that are equal() to both explicit and implicit coercions.
+ * Special-case COERCE_DONTCARE, so that planner can build coercion nodes
+ * that are equal() to both explicit and implicit coercions.
*/
if (a->coercionformat != b->coercionformat &&
a->coercionformat != COERCE_DONTCARE &&
@@ -606,8 +605,8 @@ _equalRestrictInfo(RestrictInfo *a, RestrictInfo *b)
COMPARE_BITMAPSET_FIELD(required_relids);
/*
- * We ignore all the remaining fields, since they may not be set yet,
- * and should be derivable from the clause anyway.
+ * We ignore all the remaining fields, since they may not be set yet, and
+ * should be derivable from the clause anyway.
*/
return true;
@@ -1717,15 +1716,15 @@ _equalList(List *a, List *b)
ListCell *item_b;
/*
- * Try to reject by simple scalar checks before grovelling through all
- * the list elements...
+ * Try to reject by simple scalar checks before grovelling through all the
+ * list elements...
*/
COMPARE_SCALAR_FIELD(type);
COMPARE_SCALAR_FIELD(length);
/*
- * We place the switch outside the loop for the sake of efficiency;
- * this may not be worth doing...
+ * We place the switch outside the loop for the sake of efficiency; this
+ * may not be worth doing...
*/
switch (a->type)
{
diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c
index 80043834b63..c775770f70f 100644
--- a/src/backend/nodes/list.c
+++ b/src/backend/nodes/list.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/list.c,v 1.65 2005/07/28 20:26:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/list.c,v 1.66 2005/10/15 02:49:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,7 +50,6 @@ check_list_invariants(List *list)
Assert(list->head->next == list->tail);
Assert(list->tail->next == NULL);
}
-
#else
#define check_list_invariants(l)
#endif /* USE_ASSERT_CHECKING */
@@ -532,9 +531,9 @@ list_delete_cell(List *list, ListCell *cell, ListCell *prev)
Assert(prev != NULL ? lnext(prev) == cell : list_head(list) == cell);
/*
- * If we're about to delete the last node from the list, free the
- * whole list instead and return NIL, which is the only valid
- * representation of a zero-length list.
+ * If we're about to delete the last node from the list, free the whole
+ * list instead and return NIL, which is the only valid representation of
+ * a zero-length list.
*/
if (list->length == 1)
{
@@ -543,9 +542,8 @@ list_delete_cell(List *list, ListCell *cell, ListCell *prev)
}
/*
- * Otherwise, adjust the necessary list links, deallocate the
- * particular node we have just removed, and return the list we were
- * given.
+ * Otherwise, adjust the necessary list links, deallocate the particular
+ * node we have just removed, and return the list we were given.
*/
list->length--;
@@ -951,7 +949,7 @@ list_append_unique_oid(List *list, Oid datum)
* via equal().
*
* This is almost the same functionality as list_union(), but list1 is
- * modified in-place rather than being copied. Note also that list2's cells
+ * modified in-place rather than being copied. Note also that list2's cells
* are not inserted in list1, so the analogy to list_concat() isn't perfect.
*/
List *
@@ -1110,8 +1108,8 @@ list_copy(List *oldlist)
newlist->length = oldlist->length;
/*
- * Copy over the data in the first cell; new_list() has already
- * allocated the head cell itself
+ * Copy over the data in the first cell; new_list() has already allocated
+ * the head cell itself
*/
newlist->head->data = oldlist->head->data;
@@ -1163,8 +1161,8 @@ list_copy_tail(List *oldlist, int nskip)
oldlist_cur = oldlist_cur->next;
/*
- * Copy over the data in the first remaining cell; new_list() has
- * already allocated the head cell itself
+ * Copy over the data in the first remaining cell; new_list() has already
+ * allocated the head cell itself
*/
newlist->head->data = oldlist_cur->data;
diff --git a/src/backend/nodes/makefuncs.c b/src/backend/nodes/makefuncs.c
index e1e6c3da836..28202af9ee5 100644
--- a/src/backend/nodes/makefuncs.c
+++ b/src/backend/nodes/makefuncs.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/makefuncs.c,v 1.47 2005/04/06 16:34:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/makefuncs.c,v 1.48 2005/10/15 02:49:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -73,11 +73,10 @@ makeVar(Index varno,
var->varlevelsup = varlevelsup;
/*
- * Since few if any routines ever create Var nodes with
- * varnoold/varoattno different from varno/varattno, we don't provide
- * separate arguments for them, but just initialize them to the given
- * varno/varattno. This reduces code clutter and chance of error for
- * most callers.
+ * Since few if any routines ever create Var nodes with varnoold/varoattno
+ * different from varno/varattno, we don't provide separate arguments for
+ * them, but just initialize them to the given varno/varattno. This
+ * reduces code clutter and chance of error for most callers.
*/
var->varnoold = varno;
var->varoattno = varattno;
@@ -102,8 +101,8 @@ makeTargetEntry(Expr *expr,
tle->resname = resname;
/*
- * We always set these fields to 0. If the caller wants to change them
- * he must do so explicitly. Few callers do that, so omitting these
+ * We always set these fields to 0. If the caller wants to change them he
+ * must do so explicitly. Few callers do that, so omitting these
* arguments reduces the chance of error.
*/
tle->ressortgroupref = 0;
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index d6d12363883..19306b3e53d 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.260 2005/08/27 22:13:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.261 2005/10/15 02:49:18 momjian Exp $
*
* NOTES
* Every node type that can appear in stored rules' parsetrees *must*
@@ -113,9 +113,9 @@ _outToken(StringInfo str, char *s)
}
/*
- * Look for characters or patterns that are treated specially by
- * read.c (either in pg_strtok() or in nodeRead()), and therefore need
- * a protective backslash.
+ * Look for characters or patterns that are treated specially by read.c
+ * (either in pg_strtok() or in nodeRead()), and therefore need a
+ * protective backslash.
*/
/* These characters only need to be quoted at the start of the string */
if (*s == '<' ||
@@ -151,8 +151,8 @@ _outList(StringInfo str, List *node)
{
/*
* For the sake of backward compatibility, we emit a slightly
- * different whitespace format for lists of nodes vs. other types
- * of lists. XXX: is this necessary?
+ * different whitespace format for lists of nodes vs. other types of
+ * lists. XXX: is this necessary?
*/
if (IsA(node, List))
{
@@ -1444,9 +1444,9 @@ _outQuery(StringInfo str, Query *node)
/*
* Hack to work around missing outfuncs routines for a lot of the
* utility-statement node types. (The only one we actually *need* for
- * rules support is NotifyStmt.) Someday we ought to support 'em all,
- * but for the meantime do this to avoid getting lots of warnings when
- * running with debug_print_parse on.
+ * rules support is NotifyStmt.) Someday we ought to support 'em all, but
+ * for the meantime do this to avoid getting lots of warnings when running
+ * with debug_print_parse on.
*/
if (node->utilityStmt)
{
@@ -1616,8 +1616,8 @@ _outValue(StringInfo str, Value *value)
case T_Float:
/*
- * We assume the value is a valid numeric literal and so does
- * not need quoting.
+ * We assume the value is a valid numeric literal and so does not
+ * need quoting.
*/
appendStringInfoString(str, value->val.str);
break;
@@ -2099,9 +2099,8 @@ _outNode(StringInfo str, void *obj)
default:
/*
- * This should be an ERROR, but it's too useful to be able
- * to dump structures that _outNode only understands part
- * of.
+ * This should be an ERROR, but it's too useful to be able to
+ * dump structures that _outNode only understands part of.
*/
elog(WARNING, "could not dump unrecognized node type: %d",
(int) nodeTag(obj));
diff --git a/src/backend/nodes/print.c b/src/backend/nodes/print.c
index 9d6511cf508..2f70355b328 100644
--- a/src/backend/nodes/print.c
+++ b/src/backend/nodes/print.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/print.c,v 1.76 2005/05/01 18:56:18 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/print.c,v 1.77 2005/10/15 02:49:19 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@@ -603,7 +603,7 @@ print_plan_recursive(Plan *p, Query *parsetree, int indentLevel, char *label)
if (IsA(p, BitmapAnd))
{
ListCell *l;
- BitmapAnd *bitmapandplan = (BitmapAnd *) p;
+ BitmapAnd *bitmapandplan = (BitmapAnd *) p;
foreach(l, bitmapandplan->bitmapplans)
{
@@ -616,7 +616,7 @@ print_plan_recursive(Plan *p, Query *parsetree, int indentLevel, char *label)
if (IsA(p, BitmapOr))
{
ListCell *l;
- BitmapOr *bitmaporplan = (BitmapOr *) p;
+ BitmapOr *bitmaporplan = (BitmapOr *) p;
foreach(l, bitmaporplan->bitmapplans)
{
diff --git a/src/backend/nodes/read.c b/src/backend/nodes/read.c
index df2165863d6..09175074d51 100644
--- a/src/backend/nodes/read.c
+++ b/src/backend/nodes/read.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/read.c,v 1.46 2004/12/31 21:59:55 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/read.c,v 1.47 2005/10/15 02:49:19 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@@ -41,10 +41,10 @@ stringToNode(char *str)
void *retval;
/*
- * We save and restore the pre-existing state of pg_strtok. This makes
- * the world safe for re-entrant invocation of stringToNode, without
- * incurring a lot of notational overhead by having to pass the
- * next-character pointer around through all the readfuncs.c code.
+ * We save and restore the pre-existing state of pg_strtok. This makes the
+ * world safe for re-entrant invocation of stringToNode, without incurring
+ * a lot of notational overhead by having to pass the next-character
+ * pointer around through all the readfuncs.c code.
*/
save_strtok = pg_strtok_ptr;
@@ -211,13 +211,13 @@ nodeTokenType(char *token, int length)
if (*numptr == '+' || *numptr == '-')
numptr++, numlen--;
if ((numlen > 0 && isdigit((unsigned char) *numptr)) ||
- (numlen > 1 && *numptr == '.' && isdigit((unsigned char) numptr[1])))
+ (numlen > 1 && *numptr == '.' && isdigit((unsigned char) numptr[1])))
{
/*
* Yes. Figure out whether it is integral or float; this requires
- * both a syntax check and a range check. strtol() can do both for
- * us. We know the token will end at a character that strtol will
- * stop at, so we do not need to modify the string.
+ * both a syntax check and a range check. strtol() can do both for us.
+ * We know the token will end at a character that strtol will stop at,
+ * so we do not need to modify the string.
*/
long val;
char *endptr;
@@ -386,8 +386,7 @@ nodeRead(char *token, int tok_len)
case T_Integer:
/*
- * we know that the token terminates on a char atol will stop
- * at
+ * we know that the token terminates on a char atol will stop at
*/
result = (Node *) makeInteger(atol(token));
break;
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index ff49ee21f2e..46c99834461 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/readfuncs.c,v 1.181 2005/08/01 20:31:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/readfuncs.c,v 1.182 2005/10/15 02:49:19 momjian Exp $
*
* NOTES
* Path and Plan nodes do not have any readfuncs support, because we
@@ -389,12 +389,12 @@ _readOpExpr(void)
READ_OID_FIELD(opfuncid);
/*
- * The opfuncid is stored in the textual format primarily for
- * debugging and documentation reasons. We want to always read it as
- * zero to force it to be re-looked-up in the pg_operator entry. This
- * ensures that stored rules don't have hidden dependencies on
- * operators' functions. (We don't currently support an ALTER OPERATOR
- * command, but might someday.)
+ * The opfuncid is stored in the textual format primarily for debugging
+ * and documentation reasons. We want to always read it as zero to force
+ * it to be re-looked-up in the pg_operator entry. This ensures that
+ * stored rules don't have hidden dependencies on operators' functions.
+ * (We don't currently support an ALTER OPERATOR command, but might
+ * someday.)
*/
local_node->opfuncid = InvalidOid;
@@ -417,12 +417,12 @@ _readDistinctExpr(void)
READ_OID_FIELD(opfuncid);
/*
- * The opfuncid is stored in the textual format primarily for
- * debugging and documentation reasons. We want to always read it as
- * zero to force it to be re-looked-up in the pg_operator entry. This
- * ensures that stored rules don't have hidden dependencies on
- * operators' functions. (We don't currently support an ALTER OPERATOR
- * command, but might someday.)
+ * The opfuncid is stored in the textual format primarily for debugging
+ * and documentation reasons. We want to always read it as zero to force
+ * it to be re-looked-up in the pg_operator entry. This ensures that
+ * stored rules don't have hidden dependencies on operators' functions.
+ * (We don't currently support an ALTER OPERATOR command, but might
+ * someday.)
*/
local_node->opfuncid = InvalidOid;
@@ -445,12 +445,12 @@ _readScalarArrayOpExpr(void)
READ_OID_FIELD(opfuncid);
/*
- * The opfuncid is stored in the textual format primarily for
- * debugging and documentation reasons. We want to always read it as
- * zero to force it to be re-looked-up in the pg_operator entry. This
- * ensures that stored rules don't have hidden dependencies on
- * operators' functions. (We don't currently support an ALTER OPERATOR
- * command, but might someday.)
+ * The opfuncid is stored in the textual format primarily for debugging
+ * and documentation reasons. We want to always read it as zero to force
+ * it to be re-looked-up in the pg_operator entry. This ensures that
+ * stored rules don't have hidden dependencies on operators' functions.
+ * (We don't currently support an ALTER OPERATOR command, but might
+ * someday.)
*/
local_node->opfuncid = InvalidOid;
@@ -686,12 +686,12 @@ _readNullIfExpr(void)
READ_OID_FIELD(opfuncid);
/*
- * The opfuncid is stored in the textual format primarily for
- * debugging and documentation reasons. We want to always read it as
- * zero to force it to be re-looked-up in the pg_operator entry. This
- * ensures that stored rules don't have hidden dependencies on
- * operators' functions. (We don't currently support an ALTER OPERATOR
- * command, but might someday.)
+ * The opfuncid is stored in the textual format primarily for debugging
+ * and documentation reasons. We want to always read it as zero to force
+ * it to be re-looked-up in the pg_operator entry. This ensures that
+ * stored rules don't have hidden dependencies on operators' functions.
+ * (We don't currently support an ALTER OPERATOR command, but might
+ * someday.)
*/
local_node->opfuncid = InvalidOid;
diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c
index a3b5c7d6d07..bcfc7d0920c 100644
--- a/src/backend/nodes/tidbitmap.c
+++ b/src/backend/nodes/tidbitmap.c
@@ -23,7 +23,7 @@
* Copyright (c) 2003-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/tidbitmap.c,v 1.7 2005/09/02 19:02:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/tidbitmap.c,v 1.8 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,7 +39,7 @@
/*
* The maximum number of tuples per page is not large (typically 256 with
* 8K pages, or 1024 with 32K pages). So there's not much point in making
- * the per-page bitmaps variable size. We just legislate that the size
+ * the per-page bitmaps variable size. We just legislate that the size
* is this:
*/
#define MAX_TUPLES_PER_PAGE MaxHeapTuplesPerPage
@@ -52,10 +52,10 @@
* for that page in the page table.
*
* We actually store both exact pages and lossy chunks in the same hash
- * table, using identical data structures. (This is because dynahash.c's
+ * table, using identical data structures. (This is because dynahash.c's
* memory management doesn't allow space to be transferred easily from one
* hashtable to another.) Therefore it's best if PAGES_PER_CHUNK is the
- * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we
+ * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we
* also want PAGES_PER_CHUNK to be a power of 2 to avoid expensive integer
* remainder operations. So, define it like this:
*/
@@ -69,7 +69,7 @@ typedef uint32 bitmapword; /* must be an unsigned type */
#define BITNUM(x) ((x) % BITS_PER_BITMAPWORD)
/* number of active words for an exact page: */
-#define WORDS_PER_PAGE ((MAX_TUPLES_PER_PAGE - 1) / BITS_PER_BITMAPWORD + 1)
+#define WORDS_PER_PAGE ((MAX_TUPLES_PER_PAGE - 1) / BITS_PER_BITMAPWORD + 1)
/* number of active words for a lossy chunk: */
#define WORDS_PER_CHUNK ((PAGES_PER_CHUNK - 1) / BITS_PER_BITMAPWORD + 1)
@@ -85,7 +85,7 @@ typedef uint32 bitmapword; /* must be an unsigned type */
*/
typedef struct PagetableEntry
{
- BlockNumber blockno; /* page number (hashtable key) */
+ BlockNumber blockno; /* page number (hashtable key) */
bool ischunk; /* T = lossy storage, F = exact */
bitmapword words[Max(WORDS_PER_PAGE, WORDS_PER_CHUNK)];
} PagetableEntry;
@@ -136,9 +136,9 @@ struct TIDBitmap
/* Local function prototypes */
static void tbm_union_page(TIDBitmap *a, const PagetableEntry *bpage);
static bool tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage,
- const TIDBitmap *b);
+ const TIDBitmap *b);
static const PagetableEntry *tbm_find_pageentry(const TIDBitmap *tbm,
- BlockNumber pageno);
+ BlockNumber pageno);
static PagetableEntry *tbm_get_pageentry(TIDBitmap *tbm, BlockNumber pageno);
static bool tbm_page_is_lossy(const TIDBitmap *tbm, BlockNumber pageno);
static void tbm_mark_page_lossy(TIDBitmap *tbm, BlockNumber pageno);
@@ -160,8 +160,8 @@ tbm_create(long maxbytes)
long nbuckets;
/*
- * Create the TIDBitmap struct, with enough trailing space to serve
- * the needs of the TBMIterateResult sub-struct.
+ * Create the TIDBitmap struct, with enough trailing space to serve the
+ * needs of the TBMIterateResult sub-struct.
*/
tbm = (TIDBitmap *) palloc(sizeof(TIDBitmap) +
MAX_TUPLES_PER_PAGE * sizeof(OffsetNumber));
@@ -173,17 +173,17 @@ tbm_create(long maxbytes)
tbm->status = TBM_EMPTY;
/*
- * Estimate number of hashtable entries we can have within maxbytes.
- * This estimates the hash overhead at MAXALIGN(sizeof(HASHELEMENT))
- * plus a pointer per hash entry, which is crude but good enough for
- * our purpose. Also count an extra Pointer per entry for the arrays
- * created during iteration readout.
+ * Estimate number of hashtable entries we can have within maxbytes. This
+ * estimates the hash overhead at MAXALIGN(sizeof(HASHELEMENT)) plus a
+ * pointer per hash entry, which is crude but good enough for our purpose.
+ * Also count an extra Pointer per entry for the arrays created during
+ * iteration readout.
*/
nbuckets = maxbytes /
(MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(sizeof(PagetableEntry))
+ sizeof(Pointer) + sizeof(Pointer));
- nbuckets = Min(nbuckets, INT_MAX-1); /* safety limit */
- nbuckets = Max(nbuckets, 16); /* sanity limit */
+ nbuckets = Min(nbuckets, INT_MAX - 1); /* safety limit */
+ nbuckets = Max(nbuckets, 16); /* sanity limit */
tbm->maxentries = (int) nbuckets;
return tbm;
@@ -319,7 +319,7 @@ static void
tbm_union_page(TIDBitmap *a, const PagetableEntry *bpage)
{
PagetableEntry *apage;
- int wordnum;
+ int wordnum;
if (bpage->ischunk)
{
@@ -330,7 +330,7 @@ tbm_union_page(TIDBitmap *a, const PagetableEntry *bpage)
if (w != 0)
{
- BlockNumber pg;
+ BlockNumber pg;
pg = bpage->blockno + (wordnum * BITS_PER_BITMAPWORD);
while (w != 0)
@@ -428,12 +428,12 @@ static bool
tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b)
{
const PagetableEntry *bpage;
- int wordnum;
+ int wordnum;
if (apage->ischunk)
{
/* Scan each bit in chunk, try to clear */
- bool candelete = true;
+ bool candelete = true;
for (wordnum = 0; wordnum < WORDS_PER_PAGE; wordnum++)
{
@@ -442,8 +442,8 @@ tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b)
if (w != 0)
{
bitmapword neww = w;
- BlockNumber pg;
- int bitnum;
+ BlockNumber pg;
+ int bitnum;
pg = apage->blockno + (wordnum * BITS_PER_BITMAPWORD);
bitnum = 0;
@@ -472,19 +472,19 @@ tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b)
else if (tbm_page_is_lossy(b, apage->blockno))
{
/*
- * When the page is lossy in b, we have to mark it lossy in a too.
- * We know that no bits need be set in bitmap a, but we do not know
- * which ones should be cleared, and we have no API for "at most
- * these tuples need be checked". (Perhaps it's worth adding that?)
+ * When the page is lossy in b, we have to mark it lossy in a too. We
+ * know that no bits need be set in bitmap a, but we do not know which
+ * ones should be cleared, and we have no API for "at most these
+ * tuples need be checked". (Perhaps it's worth adding that?)
*/
tbm_mark_page_lossy(a, apage->blockno);
/*
- * Note: tbm_mark_page_lossy will have removed apage from a, and
- * may have inserted a new lossy chunk instead. We can continue the
- * same seq_search scan at the caller level, because it does not
- * matter whether we visit such a new chunk or not: it will have
- * only the bit for apage->blockno set, which is correct.
+ * Note: tbm_mark_page_lossy will have removed apage from a, and may
+ * have inserted a new lossy chunk instead. We can continue the same
+ * seq_search scan at the caller level, because it does not matter
+ * whether we visit such a new chunk or not: it will have only the bit
+ * for apage->blockno set, which is correct.
*
* We must return false here since apage was already deleted.
*/
@@ -492,7 +492,7 @@ tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b)
}
else
{
- bool candelete = true;
+ bool candelete = true;
bpage = tbm_find_pageentry(b, apage->blockno);
if (bpage != NULL)
@@ -535,17 +535,20 @@ tbm_begin_iterate(TIDBitmap *tbm)
int nchunks;
tbm->iterating = true;
+
/*
* Reset iteration pointers.
*/
tbm->spageptr = 0;
tbm->schunkptr = 0;
tbm->schunkbit = 0;
+
/*
* Nothing else to do if no entries, nor if we don't have a hashtable.
*/
if (tbm->nentries == 0 || tbm->status != TBM_HASH)
return;
+
/*
* Create and fill the sorted page lists if we didn't already.
*/
@@ -591,6 +594,7 @@ tbm_iterate(TIDBitmap *tbm)
TBMIterateResult *output = &(tbm->output);
Assert(tbm->iterating);
+
/*
* If lossy chunk pages remain, make sure we've advanced schunkptr/
* schunkbit to the next set bit.
@@ -598,12 +602,12 @@ tbm_iterate(TIDBitmap *tbm)
while (tbm->schunkptr < tbm->nchunks)
{
PagetableEntry *chunk = tbm->schunks[tbm->schunkptr];
- int schunkbit = tbm->schunkbit;
+ int schunkbit = tbm->schunkbit;
while (schunkbit < PAGES_PER_CHUNK)
{
- int wordnum = WORDNUM(schunkbit);
- int bitnum = BITNUM(schunkbit);
+ int wordnum = WORDNUM(schunkbit);
+ int bitnum = BITNUM(schunkbit);
if ((chunk->words[wordnum] & ((bitmapword) 1 << bitnum)) != 0)
break;
@@ -618,6 +622,7 @@ tbm_iterate(TIDBitmap *tbm)
tbm->schunkptr++;
tbm->schunkbit = 0;
}
+
/*
* If both chunk and per-page data remain, must output the numerically
* earlier page.
@@ -717,7 +722,7 @@ tbm_find_pageentry(const TIDBitmap *tbm, BlockNumber pageno)
*
* If new, the entry is marked as an exact (non-chunk) entry.
*
- * This may cause the table to exceed the desired memory size. It is
+ * This may cause the table to exceed the desired memory size. It is
* up to the caller to call tbm_lossify() at the next safe point if so.
*/
static PagetableEntry *
@@ -785,8 +790,8 @@ tbm_page_is_lossy(const TIDBitmap *tbm, BlockNumber pageno)
HASH_FIND, NULL);
if (page != NULL && page->ischunk)
{
- int wordnum = WORDNUM(bitno);
- int bitnum = BITNUM(bitno);
+ int wordnum = WORDNUM(bitno);
+ int bitnum = BITNUM(bitno);
if ((page->words[wordnum] & ((bitmapword) 1 << bitnum)) != 0)
return true;
@@ -797,7 +802,7 @@ tbm_page_is_lossy(const TIDBitmap *tbm, BlockNumber pageno)
/*
* tbm_mark_page_lossy - mark the page number as lossily stored
*
- * This may cause the table to exceed the desired memory size. It is
+ * This may cause the table to exceed the desired memory size. It is
* up to the caller to call tbm_lossify() at the next safe point if so.
*/
static void
@@ -818,9 +823,8 @@ tbm_mark_page_lossy(TIDBitmap *tbm, BlockNumber pageno)
chunk_pageno = pageno - bitno;
/*
- * Remove any extant non-lossy entry for the page. If the page is
- * its own chunk header, however, we skip this and handle the case
- * below.
+ * Remove any extant non-lossy entry for the page. If the page is its own
+ * chunk header, however, we skip this and handle the case below.
*/
if (bitno != 0)
{
@@ -879,10 +883,9 @@ tbm_lossify(TIDBitmap *tbm)
/*
* XXX Really stupid implementation: this just lossifies pages in
- * essentially random order. We should be paying some attention
- * to the number of bits set in each page, instead. Also it might
- * be a good idea to lossify more than the minimum number of pages
- * during each call.
+ * essentially random order. We should be paying some attention to the
+ * number of bits set in each page, instead. Also it might be a good idea
+ * to lossify more than the minimum number of pages during each call.
*/
Assert(!tbm->iterating);
Assert(tbm->status == TBM_HASH);
@@ -892,9 +895,10 @@ tbm_lossify(TIDBitmap *tbm)
{
if (page->ischunk)
continue; /* already a chunk header */
+
/*
- * If the page would become a chunk header, we won't save anything
- * by converting it to lossy, so skip it.
+ * If the page would become a chunk header, we won't save anything by
+ * converting it to lossy, so skip it.
*/
if ((page->blockno % PAGES_PER_CHUNK) == 0)
continue;
@@ -906,9 +910,9 @@ tbm_lossify(TIDBitmap *tbm)
return; /* we have done enough */
/*
- * Note: tbm_mark_page_lossy may have inserted a lossy chunk into
- * the hashtable. We can continue the same seq_search scan since
- * we do not care whether we visit lossy chunks or not.
+ * Note: tbm_mark_page_lossy may have inserted a lossy chunk into the
+ * hashtable. We can continue the same seq_search scan since we do
+ * not care whether we visit lossy chunks or not.
*/
}
}
diff --git a/src/backend/optimizer/geqo/geqo_erx.c b/src/backend/optimizer/geqo/geqo_erx.c
index 05d7602fefe..9c7a3425858 100644
--- a/src/backend/optimizer/geqo/geqo_erx.c
+++ b/src/backend/optimizer/geqo/geqo_erx.c
@@ -3,7 +3,7 @@
* geqo_erx.c
* edge recombination crossover [ER]
*
-* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_erx.c,v 1.19 2003/11/29 22:39:49 pgsql Exp $
+* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_erx.c,v 1.20 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,8 +55,8 @@ alloc_edge_table(int num_gene)
Edge *edge_table;
/*
- * palloc one extra location so that nodes numbered 1..n can be
- * indexed directly; 0 will not be used
+ * palloc one extra location so that nodes numbered 1..n can be indexed
+ * directly; 0 will not be used
*/
edge_table = (Edge *) palloc((num_gene + 1) * sizeof(Edge));
@@ -94,8 +94,7 @@ gimme_edge_table(Gene *tour1, Gene *tour2, int num_gene, Edge *edge_table)
int i,
index1,
index2;
- int edge_total; /* total number of unique edges in two
- * genes */
+ int edge_total; /* total number of unique edges in two genes */
/* at first clear the edge table's old data */
for (i = 1; i <= num_gene; i++)
@@ -111,15 +110,15 @@ gimme_edge_table(Gene *tour1, Gene *tour2, int num_gene, Edge *edge_table)
for (index1 = 0; index1 < num_gene; index1++)
{
/*
- * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this
- * operaton maps n back to 1
+ * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operaton
+ * maps n back to 1
*/
index2 = (index1 + 1) % num_gene;
/*
- * edges are bidirectional, i.e. 1->2 is same as 2->1 call
- * gimme_edge twice per edge
+ * edges are bidirectional, i.e. 1->2 is same as 2->1 call gimme_edge
+ * twice per edge
*/
edge_total += gimme_edge(tour1[index1], tour1[index2], edge_table);
@@ -320,10 +319,10 @@ gimme_gene(Edge edge, Edge *edge_table)
*/
/*
- * The test for minimum_count can probably be removed at some
- * point but comments should probably indicate exactly why it is
- * guaranteed that the test will always succeed the first time
- * around. If it can fail then the code is in error
+ * The test for minimum_count can probably be removed at some point
+ * but comments should probably indicate exactly why it is guaranteed
+ * that the test will always succeed the first time around. If it can
+ * fail then the code is in error
*/
@@ -379,8 +378,8 @@ edge_failure(Gene *gene, int index, Edge *edge_table, int num_gene)
/*
- * how many edges remain? how many gene with four total (initial)
- * edges remain?
+ * how many edges remain? how many gene with four total (initial) edges
+ * remain?
*/
for (i = 1; i <= num_gene; i++)
@@ -395,8 +394,8 @@ edge_failure(Gene *gene, int index, Edge *edge_table, int num_gene)
}
/*
- * random decision of the gene with remaining edges and whose
- * total_edges == 4
+ * random decision of the gene with remaining edges and whose total_edges
+ * == 4
*/
if (four_count != 0)
@@ -444,15 +443,15 @@ edge_failure(Gene *gene, int index, Edge *edge_table, int num_gene)
}
/*
- * edge table seems to be empty; this happens sometimes on the last
- * point due to the fact that the first point is removed from the
- * table even though only one of its edges has been determined
+ * edge table seems to be empty; this happens sometimes on the last point
+ * due to the fact that the first point is removed from the table even
+ * though only one of its edges has been determined
*/
else
- { /* occurs only at the last point in the
- * tour; simply look for the point which
- * is not yet used */
+ { /* occurs only at the last point in the tour;
+ * simply look for the point which is not yet
+ * used */
for (i = 1; i <= num_gene; i++)
if (edge_table[i].unused_edges >= 0)
diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c
index d1bb3059fc0..0a2dee08dc8 100644
--- a/src/backend/optimizer/geqo/geqo_eval.c
+++ b/src/backend/optimizer/geqo/geqo_eval.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.76 2005/06/09 04:18:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.77 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,15 +52,15 @@ geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata)
struct HTAB *savehash;
/*
- * Because gimme_tree considers both left- and right-sided trees,
- * there is no difference between a tour (a,b,c,d,...) and a tour
- * (b,a,c,d,...) --- the same join orders will be considered. To avoid
- * redundant cost calculations, we simply reject tours where tour[0] >
- * tour[1], assigning them an artificially bad fitness.
+ * Because gimme_tree considers both left- and right-sided trees, there is
+ * no difference between a tour (a,b,c,d,...) and a tour (b,a,c,d,...) ---
+ * the same join orders will be considered. To avoid redundant cost
+ * calculations, we simply reject tours where tour[0] > tour[1], assigning
+ * them an artificially bad fitness.
*
* init_tour() is aware of this rule and so we should never reject a tour
- * during the initial filling of the pool. It seems difficult to
- * persuade the recombination logic never to break the rule, however.
+ * during the initial filling of the pool. It seems difficult to persuade
+ * the recombination logic never to break the rule, however.
*/
if (num_gene >= 2 && tour[0] > tour[1])
return DBL_MAX;
@@ -69,10 +69,10 @@ geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata)
* Create a private memory context that will hold all temp storage
* allocated inside gimme_tree().
*
- * Since geqo_eval() will be called many times, we can't afford to let
- * all that memory go unreclaimed until end of statement. Note we
- * make the temp context a child of the planner's normal context, so
- * that it will be freed even if we abort via ereport(ERROR).
+ * Since geqo_eval() will be called many times, we can't afford to let all
+ * that memory go unreclaimed until end of statement. Note we make the
+ * temp context a child of the planner's normal context, so that it will
+ * be freed even if we abort via ereport(ERROR).
*/
mycontext = AllocSetContextCreate(CurrentMemoryContext,
"GEQO",
@@ -84,15 +84,15 @@ geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata)
/*
* gimme_tree will add entries to root->join_rel_list, which may or may
* not already contain some entries. The newly added entries will be
- * recycled by the MemoryContextDelete below, so we must ensure that
- * the list is restored to its former state before exiting. We can
- * do this by truncating the list to its original length. NOTE this
- * assumes that any added entries are appended at the end!
+ * recycled by the MemoryContextDelete below, so we must ensure that the
+ * list is restored to its former state before exiting. We can do this by
+ * truncating the list to its original length. NOTE this assumes that any
+ * added entries are appended at the end!
*
- * We also must take care not to mess up the outer join_rel_hash,
- * if there is one. We can do this by just temporarily setting the
- * link to NULL. (If we are dealing with enough join rels, which we
- * very likely are, a new hash table will get built and used locally.)
+ * We also must take care not to mess up the outer join_rel_hash, if there is
+ * one. We can do this by just temporarily setting the link to NULL. (If
+ * we are dealing with enough join rels, which we very likely are, a new
+ * hash table will get built and used locally.)
*/
savelength = list_length(evaldata->root->join_rel_list);
savehash = evaldata->root->join_rel_hash;
@@ -170,23 +170,22 @@ gimme_tree(Gene *tour, int num_gene, GeqoEvalData *evaldata)
* Push each relation onto the stack in the specified order. After
* pushing each relation, see whether the top two stack entries are
* joinable according to the desirable_join() heuristics. If so, join
- * them into one stack entry, and try again to combine with the next
- * stack entry down (if any). When the stack top is no longer
- * joinable, continue to the next input relation. After we have
- * pushed the last input relation, the heuristics are disabled and we
- * force joining all the remaining stack entries.
+ * them into one stack entry, and try again to combine with the next stack
+ * entry down (if any). When the stack top is no longer joinable,
+ * continue to the next input relation. After we have pushed the last
+ * input relation, the heuristics are disabled and we force joining all
+ * the remaining stack entries.
*
* If desirable_join() always returns true, this produces a straight
- * left-to-right join just like the old code. Otherwise we may
- * produce a bushy plan or a left/right-sided plan that really
- * corresponds to some tour other than the one given. To the extent
- * that the heuristics are helpful, however, this will be a better
- * plan than the raw tour.
+ * left-to-right join just like the old code. Otherwise we may produce a
+ * bushy plan or a left/right-sided plan that really corresponds to some
+ * tour other than the one given. To the extent that the heuristics are
+ * helpful, however, this will be a better plan than the raw tour.
*
- * Also, when a join attempt fails (because of IN-clause constraints), we
- * may be able to recover and produce a workable plan, where the old
- * code just had to give up. This case acts the same as a false
- * result from desirable_join().
+ * Also, when a join attempt fails (because of IN-clause constraints), we may
+ * be able to recover and produce a workable plan, where the old code just
+ * had to give up. This case acts the same as a false result from
+ * desirable_join().
*/
for (rel_count = 0; rel_count < num_gene; rel_count++)
{
@@ -199,8 +198,8 @@ gimme_tree(Gene *tour, int num_gene, GeqoEvalData *evaldata)
stack_depth++;
/*
- * While it's feasible, pop the top two stack entries and replace
- * with their join.
+ * While it's feasible, pop the top two stack entries and replace with
+ * their join.
*/
while (stack_depth >= 2)
{
@@ -208,20 +207,18 @@ gimme_tree(Gene *tour, int num_gene, GeqoEvalData *evaldata)
RelOptInfo *inner_rel = stack[stack_depth - 1];
/*
- * Don't pop if heuristics say not to join now. However, once
- * we have exhausted the input, the heuristics can't prevent
- * popping.
+ * Don't pop if heuristics say not to join now. However, once we
+ * have exhausted the input, the heuristics can't prevent popping.
*/
if (rel_count < num_gene - 1 &&
!desirable_join(evaldata->root, outer_rel, inner_rel))
break;
/*
- * Construct a RelOptInfo representing the join of these two
- * input relations. These are always inner joins. Note that
- * we expect the joinrel not to exist in root->join_rel_list
- * yet, and so the paths constructed for it will only include
- * the ones we want.
+ * Construct a RelOptInfo representing the join of these two input
+ * relations. These are always inner joins. Note that we expect
+ * the joinrel not to exist in root->join_rel_list yet, and so the
+ * paths constructed for it will only include the ones we want.
*/
joinrel = make_join_rel(evaldata->root, outer_rel, inner_rel,
JOIN_INNER);
@@ -266,9 +263,9 @@ desirable_join(PlannerInfo *root,
return true;
/*
- * Join if the rels are members of the same IN sub-select. This is
- * needed to improve the odds that we will find a valid solution in a
- * case where an IN sub-select has a clauseless join.
+ * Join if the rels are members of the same IN sub-select. This is needed
+ * to improve the odds that we will find a valid solution in a case where
+ * an IN sub-select has a clauseless join.
*/
foreach(l, root->in_info_list)
{
diff --git a/src/backend/optimizer/geqo/geqo_main.c b/src/backend/optimizer/geqo/geqo_main.c
index c027f4370c3..d7618c5d67d 100644
--- a/src/backend/optimizer/geqo/geqo_main.c
+++ b/src/backend/optimizer/geqo/geqo_main.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.50 2005/06/08 23:02:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.51 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -106,10 +106,9 @@ geqo(PlannerInfo *root, int number_of_rels, List *initial_rels)
random_init_pool(pool, &evaldata);
/* sort the pool according to cheapest path as fitness */
- sort_pool(pool); /* we have to do it only one time, since
- * all kids replace the worst individuals
- * in future (-> geqo_pool.c:spread_chromo
- * ) */
+ sort_pool(pool); /* we have to do it only one time, since all
+ * kids replace the worst individuals in
+ * future (-> geqo_pool.c:spread_chromo ) */
#ifdef GEQO_DEBUG
elog(DEBUG1, "GEQO selected %d pool entries, best %.2f, worst %.2f",
diff --git a/src/backend/optimizer/geqo/geqo_misc.c b/src/backend/optimizer/geqo/geqo_misc.c
index 5afdcd7b8f5..ff5bd07e6ad 100644
--- a/src/backend/optimizer/geqo/geqo_misc.c
+++ b/src/backend/optimizer/geqo/geqo_misc.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_misc.c,v 1.42 2004/12/31 21:59:58 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_misc.c,v 1.43 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,10 +41,10 @@ avg_pool(Pool *pool)
elog(ERROR, "pool_size is zero");
/*
- * Since the pool may contain multiple occurrences of DBL_MAX, divide
- * by pool->size before summing, not after, to avoid overflow. This
- * loses a little in speed and accuracy, but this routine is only used
- * for debug printouts, so we don't care that much.
+ * Since the pool may contain multiple occurrences of DBL_MAX, divide by
+ * pool->size before summing, not after, to avoid overflow. This loses a
+ * little in speed and accuracy, but this routine is only used for debug
+ * printouts, so we don't care that much.
*/
for (i = 0; i < pool->size; i++)
cumulative += pool->data[i].worth / pool->size;
diff --git a/src/backend/optimizer/geqo/geqo_pool.c b/src/backend/optimizer/geqo/geqo_pool.c
index f6881c0f5ff..83927facae5 100644
--- a/src/backend/optimizer/geqo/geqo_pool.c
+++ b/src/backend/optimizer/geqo/geqo_pool.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_pool.c,v 1.26 2004/12/31 21:59:58 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_pool.c,v 1.27 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -96,13 +96,12 @@ random_init_pool(Pool *pool, GeqoEvalData *evaldata)
int bad = 0;
/*
- * We immediately discard any invalid individuals (those that
- * geqo_eval returns DBL_MAX for), thereby not wasting pool space on
- * them.
+ * We immediately discard any invalid individuals (those that geqo_eval
+ * returns DBL_MAX for), thereby not wasting pool space on them.
*
- * If we fail to make any valid individuals after 10000 tries, give up;
- * this probably means something is broken, and we shouldn't just let
- * ourselves get stuck in an infinite loop.
+ * If we fail to make any valid individuals after 10000 tries, give up; this
+ * probably means something is broken, and we shouldn't just let ourselves
+ * get stuck in an infinite loop.
*/
i = 0;
while (i < pool->size)
@@ -223,8 +222,8 @@ spread_chromo(Chromosome *chromo, Pool *pool)
/*
- * these 2 cases move the search indices since a new location has
- * not yet been found.
+ * these 2 cases move the search indices since a new location has not
+ * yet been found.
*/
else if (chromo->worth < pool->data[mid].worth)
@@ -242,8 +241,7 @@ spread_chromo(Chromosome *chromo, Pool *pool)
/* now we have index for chromo */
/*
- * move every gene from index on down one position to make room for
- * chromo
+ * move every gene from index on down one position to make room for chromo
*/
/*
diff --git a/src/backend/optimizer/geqo/geqo_recombination.c b/src/backend/optimizer/geqo/geqo_recombination.c
index d2ebee17653..c73e5b2a79e 100644
--- a/src/backend/optimizer/geqo/geqo_recombination.c
+++ b/src/backend/optimizer/geqo/geqo_recombination.c
@@ -3,7 +3,7 @@
* geqo_recombination.c
* misc recombination procedures
*
-* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_recombination.c,v 1.14 2004/08/29 05:06:43 momjian Exp $
+* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_recombination.c,v 1.15 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -62,8 +62,8 @@ init_tour(Gene *tour, int num_gene)
}
/*
- * Since geqo_eval() will reject tours where tour[0] > tour[1], we may
- * as well switch the two to make it a valid tour.
+ * Since geqo_eval() will reject tours where tour[0] > tour[1], we may as
+ * well switch the two to make it a valid tour.
*/
if (num_gene >= 2 && tour[0] > tour[1])
{
@@ -86,8 +86,8 @@ alloc_city_table(int num_gene)
City *city_table;
/*
- * palloc one extra location so that nodes numbered 1..n can be
- * indexed directly; 0 will not be used
+ * palloc one extra location so that nodes numbered 1..n can be indexed
+ * directly; 0 will not be used
*/
city_table = (City *) palloc((num_gene + 1) * sizeof(City));
diff --git a/src/backend/optimizer/geqo/geqo_selection.c b/src/backend/optimizer/geqo/geqo_selection.c
index 92b735cb282..32a3e83ae03 100644
--- a/src/backend/optimizer/geqo/geqo_selection.c
+++ b/src/backend/optimizer/geqo/geqo_selection.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_selection.c,v 1.19 2005/06/14 14:21:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_selection.c,v 1.20 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -86,13 +86,14 @@ linear(int pool_size, double bias) /* bias is y-intercept of linear
/*
* If geqo_rand() returns exactly 1.0 then we will get exactly max from
- * this equation, whereas we need 0 <= index < max. Also it seems possible
- * that roundoff error might deliver values slightly outside the range;
- * in particular avoid passing a value slightly less than 0 to sqrt().
- * If we get a bad value just try again.
+ * this equation, whereas we need 0 <= index < max. Also it seems
+ * possible that roundoff error might deliver values slightly outside the
+ * range; in particular avoid passing a value slightly less than 0 to
+ * sqrt(). If we get a bad value just try again.
*/
- do {
- double sqrtval;
+ do
+ {
+ double sqrtval;
sqrtval = (bias * bias) - 4.0 * (bias - 1.0) * geqo_rand();
if (sqrtval > 0.0)
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index aa14deacd0c..d8a42b82548 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.136 2005/08/22 17:34:58 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.137 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -62,7 +62,7 @@ static void compare_tlist_datatypes(List *tlist, List *colTypes,
static bool qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
bool *differentTypes);
static void subquery_push_qual(Query *subquery,
- RangeTblEntry *rte, Index rti, Node *qual);
+ RangeTblEntry *rte, Index rti, Node *qual);
static void recurse_push_qual(Node *setOp, Query *topquery,
RangeTblEntry *rte, Index rti, Node *qual);
@@ -105,7 +105,7 @@ make_one_rel(PlannerInfo *root)
if (brel == NULL)
continue;
- Assert(brel->relid == rti); /* sanity check on array */
+ Assert(brel->relid == rti); /* sanity check on array */
/* ignore RTEs that are "other rels" */
if (brel->reloptkind != RELOPT_BASEREL)
@@ -134,9 +134,9 @@ set_base_rel_pathlists(PlannerInfo *root)
Index rti;
/*
- * Note: because we call expand_inherited_rtentry inside the loop,
- * it's quite possible for the base_rel_array to be enlarged while
- * the loop runs. Hence don't try to optimize the loop.
+ * Note: because we call expand_inherited_rtentry inside the loop, it's
+ * quite possible for the base_rel_array to be enlarged while the loop
+ * runs. Hence don't try to optimize the loop.
*/
for (rti = 1; rti < root->base_rel_array_size; rti++)
{
@@ -255,8 +255,8 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
ListCell *il;
/*
- * XXX for now, can't handle inherited expansion of FOR UPDATE/SHARE;
- * can we do better?
+ * XXX for now, can't handle inherited expansion of FOR UPDATE/SHARE; can
+ * we do better?
*/
if (list_member_int(root->parse->rowMarks, parentRTindex))
ereport(ERROR,
@@ -270,8 +270,8 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
rel->width = 0;
/*
- * Generate access paths for each table in the tree (parent AND
- * children), and pick the cheapest path for each table.
+ * Generate access paths for each table in the tree (parent AND children),
+ * and pick the cheapest path for each table.
*/
foreach(il, inheritlist)
{
@@ -286,18 +286,17 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
childOID = childrte->relid;
/*
- * Make a RelOptInfo for the child so we can do planning.
- * Mark it as an "other rel" since it will not be part of the
- * main join tree.
+ * Make a RelOptInfo for the child so we can do planning. Mark it as
+ * an "other rel" since it will not be part of the main join tree.
*/
childrel = build_other_rel(root, childRTindex);
/*
- * Copy the parent's targetlist and restriction quals to the
- * child, with attribute-number adjustment as needed. We don't
- * bother to copy the join quals, since we can't do any joining of
- * the individual tables. Also, we just zap attr_needed rather
- * than trying to adjust it; it won't be looked at in the child.
+ * Copy the parent's targetlist and restriction quals to the child,
+ * with attribute-number adjustment as needed. We don't bother to
+ * copy the join quals, since we can't do any joining of the
+ * individual tables. Also, we just zap attr_needed rather than
+ * trying to adjust it; it won't be looked at in the child.
*/
childrel->reltargetlist = (List *)
adjust_inherited_attrs((Node *) rel->reltargetlist,
@@ -320,13 +319,14 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
*/
if (constraint_exclusion)
{
- List *constraint_pred;
+ List *constraint_pred;
constraint_pred = get_relation_constraints(childOID, childrel);
+
/*
- * We do not currently enforce that CHECK constraints contain
- * only immutable functions, so it's necessary to check here.
- * We daren't draw conclusions from plan-time evaluation of
+ * We do not currently enforce that CHECK constraints contain only
+ * immutable functions, so it's necessary to check here. We
+ * daren't draw conclusions from plan-time evaluation of
* non-immutable functions.
*/
if (!contain_mutable_functions((Node *) constraint_pred))
@@ -351,9 +351,9 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
subpaths = lappend(subpaths, childrel->cheapest_total_path);
/*
- * Propagate size information from the child back to the parent.
- * For simplicity, we use the largest widths from any child as the
- * parent estimates.
+ * Propagate size information from the child back to the parent. For
+ * simplicity, we use the largest widths from any child as the parent
+ * estimates.
*/
rel->rows += childrel->rows;
if (childrel->width > rel->width)
@@ -377,9 +377,9 @@ set_inherited_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * Finally, build Append path and install it as the only access path
- * for the parent rel. (Note: this is correct even if we have zero
- * or one live subpath due to constraint exclusion.)
+ * Finally, build Append path and install it as the only access path for
+ * the parent rel. (Note: this is correct even if we have zero or one
+ * live subpath due to constraint exclusion.)
*/
add_path(rel, (Path *) create_append_path(rel, subpaths));
@@ -430,18 +430,18 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* If there are any restriction clauses that have been attached to the
- * subquery relation, consider pushing them down to become WHERE or
- * HAVING quals of the subquery itself. This transformation is useful
- * because it may allow us to generate a better plan for the subquery
- * than evaluating all the subquery output rows and then filtering them.
+ * subquery relation, consider pushing them down to become WHERE or HAVING
+ * quals of the subquery itself. This transformation is useful because it
+ * may allow us to generate a better plan for the subquery than evaluating
+ * all the subquery output rows and then filtering them.
*
- * There are several cases where we cannot push down clauses.
- * Restrictions involving the subquery are checked by
- * subquery_is_pushdown_safe(). Restrictions on individual clauses
- * are checked by qual_is_pushdown_safe().
+ * There are several cases where we cannot push down clauses. Restrictions
+ * involving the subquery are checked by subquery_is_pushdown_safe().
+ * Restrictions on individual clauses are checked by
+ * qual_is_pushdown_safe().
*
- * Non-pushed-down clauses will get evaluated as qpquals of the
- * SubqueryScan node.
+ * Non-pushed-down clauses will get evaluated as qpquals of the SubqueryScan
+ * node.
*
* XXX Are there any cases where we want to make a policy decision not to
* push down a pushable qual, because it'd result in a worse plan?
@@ -475,10 +475,10 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
pfree(differentTypes);
/*
- * We can safely pass the outer tuple_fraction down to the subquery
- * if the outer level has no joining, aggregation, or sorting to do.
- * Otherwise we'd better tell the subquery to plan for full retrieval.
- * (XXX This could probably be made more intelligent ...)
+ * We can safely pass the outer tuple_fraction down to the subquery if the
+ * outer level has no joining, aggregation, or sorting to do. Otherwise
+ * we'd better tell the subquery to plan for full retrieval. (XXX This
+ * could probably be made more intelligent ...)
*/
if (parse->hasAggs ||
parse->groupClause ||
@@ -540,8 +540,8 @@ make_fromexpr_rel(PlannerInfo *root, FromExpr *from)
/*
* Count the number of child jointree nodes. This is the depth of the
- * dynamic-programming algorithm we must employ to consider all ways
- * of joining the child nodes.
+ * dynamic-programming algorithm we must employ to consider all ways of
+ * joining the child nodes.
*/
levels_needed = list_length(from->fromlist);
@@ -603,11 +603,11 @@ make_one_rel_by_joins(PlannerInfo *root, int levels_needed, List *initial_rels)
RelOptInfo *rel;
/*
- * We employ a simple "dynamic programming" algorithm: we first find
- * all ways to build joins of two jointree items, then all ways to
- * build joins of three items (from two-item joins and single items),
- * then four-item joins, and so on until we have considered all ways
- * to join all the items into one rel.
+ * We employ a simple "dynamic programming" algorithm: we first find all
+ * ways to build joins of two jointree items, then all ways to build joins
+ * of three items (from two-item joins and single items), then four-item
+ * joins, and so on until we have considered all ways to join all the
+ * items into one rel.
*
* joinitems[j] is a list of all the j-item rels. Initially we set
* joinitems[1] to represent all the single-jointree-item relations.
@@ -823,8 +823,8 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
return false;
/*
- * Examine all Vars used in clause; since it's a restriction clause,
- * all such Vars must refer to subselect output columns.
+ * Examine all Vars used in clause; since it's a restriction clause, all
+ * such Vars must refer to subselect output columns.
*/
vars = pull_var_clause(qual, false);
foreach(vl, vars)
@@ -835,9 +835,9 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
Assert(var->varno == rti);
/*
- * We use a bitmapset to avoid testing the same attno more than
- * once. (NB: this only works because subquery outputs can't have
- * negative attnos.)
+ * We use a bitmapset to avoid testing the same attno more than once.
+ * (NB: this only works because subquery outputs can't have negative
+ * attnos.)
*/
if (bms_is_member(var->varattno, tested))
continue;
@@ -893,11 +893,10 @@ subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
else
{
/*
- * We need to replace Vars in the qual (which must refer to
- * outputs of the subquery) with copies of the subquery's
- * targetlist expressions. Note that at this point, any uplevel
- * Vars in the qual should have been replaced with Params, so they
- * need no work.
+ * We need to replace Vars in the qual (which must refer to outputs of
+ * the subquery) with copies of the subquery's targetlist expressions.
+ * Note that at this point, any uplevel Vars in the qual should have
+ * been replaced with Params, so they need no work.
*
* This step also ensures that when we are pushing into a setop tree,
* each component query gets its own copy of the qual.
@@ -907,9 +906,9 @@ subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
CMD_SELECT, 0);
/*
- * Now attach the qual to the proper place: normally WHERE, but
- * if the subquery uses grouping or aggregation, put it in HAVING
- * (since the qual really refers to the group-result rows).
+ * Now attach the qual to the proper place: normally WHERE, but if the
+ * subquery uses grouping or aggregation, put it in HAVING (since the
+ * qual really refers to the group-result rows).
*/
if (subquery->hasAggs || subquery->groupClause || subquery->havingQual)
subquery->havingQual = make_and_qual(subquery->havingQual, qual);
@@ -919,8 +918,8 @@ subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
/*
* We need not change the subquery's hasAggs or hasSublinks flags,
- * since we can't be pushing down any aggregates that weren't
- * there before, and we don't push down subselects at all.
+ * since we can't be pushing down any aggregates that weren't there
+ * before, and we don't push down subselects at all.
*/
}
}
diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c
index aad977164a7..9a4990898e9 100644
--- a/src/backend/optimizer/path/clausesel.c
+++ b/src/backend/optimizer/path/clausesel.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.74 2005/10/11 16:44:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.75 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -82,7 +82,7 @@ static void addRangeClause(RangeQueryClause **rqlist, Node *clause,
* hisel + losel + null_frac - 1.)
*
* If either selectivity is exactly DEFAULT_INEQ_SEL, we forget this equation
- * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
+ * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
* yields an impossible (negative) result.
*
* A free side-effect is that we can recognize redundant inequalities such
@@ -102,9 +102,9 @@ clauselist_selectivity(PlannerInfo *root,
ListCell *l;
/*
- * Initial scan over clauses. Anything that doesn't look like a
- * potential rangequery clause gets multiplied into s1 and forgotten.
- * Anything that does gets inserted into an rqlist entry.
+ * Initial scan over clauses. Anything that doesn't look like a potential
+ * rangequery clause gets multiplied into s1 and forgotten. Anything that
+ * does gets inserted into an rqlist entry.
*/
foreach(l, clauses)
{
@@ -127,10 +127,10 @@ clauselist_selectivity(PlannerInfo *root,
rinfo = NULL;
/*
- * See if it looks like a restriction clause with a pseudoconstant
- * on one side. (Anything more complicated than that might not
- * behave in the simple way we are expecting.) Most of the tests
- * here can be done more efficiently with rinfo than without.
+ * See if it looks like a restriction clause with a pseudoconstant on
+ * one side. (Anything more complicated than that might not behave in
+ * the simple way we are expecting.) Most of the tests here can be
+ * done more efficiently with rinfo than without.
*/
if (is_opclause(clause) && list_length(((OpExpr *) clause)->args) == 2)
{
@@ -142,10 +142,10 @@ clauselist_selectivity(PlannerInfo *root,
{
ok = (bms_membership(rinfo->clause_relids) == BMS_SINGLETON) &&
(is_pseudo_constant_clause_relids(lsecond(expr->args),
- rinfo->right_relids) ||
+ rinfo->right_relids) ||
(varonleft = false,
- is_pseudo_constant_clause_relids(linitial(expr->args),
- rinfo->left_relids)));
+ is_pseudo_constant_clause_relids(linitial(expr->args),
+ rinfo->left_relids)));
}
else
{
@@ -159,8 +159,8 @@ clauselist_selectivity(PlannerInfo *root,
{
/*
* If it's not a "<" or ">" operator, just merge the
- * selectivity in generically. But if it's the right
- * oprrest, add the clause to rqlist for later processing.
+ * selectivity in generically. But if it's the right oprrest,
+ * add the clause to rqlist for later processing.
*/
switch (get_oprrest(expr->opno))
{
@@ -199,8 +199,8 @@ clauselist_selectivity(PlannerInfo *root,
/*
* Exact equality to the default value probably means the
- * selectivity function punted. This is not airtight but
- * should be good enough.
+ * selectivity function punted. This is not airtight but should
+ * be good enough.
*/
if (rqlist->hibound == DEFAULT_INEQ_SEL ||
rqlist->lobound == DEFAULT_INEQ_SEL)
@@ -289,8 +289,8 @@ addRangeClause(RangeQueryClause **rqlist, Node *clause,
for (rqelem = *rqlist; rqelem; rqelem = rqelem->next)
{
/*
- * We use full equal() here because the "var" might be a function
- * of one or more attributes of the same relation...
+ * We use full equal() here because the "var" might be a function of
+ * one or more attributes of the same relation...
*/
if (!equal(var, rqelem->var))
continue;
@@ -423,17 +423,16 @@ clause_selectivity(PlannerInfo *root,
rinfo = (RestrictInfo *) clause;
/*
- * If possible, cache the result of the selectivity calculation
- * for the clause. We can cache if varRelid is zero or the clause
- * contains only vars of that relid --- otherwise varRelid will
- * affect the result, so mustn't cache. We also have to be
- * careful about the jointype. It's OK to cache when jointype is
- * JOIN_INNER or one of the outer join types (any given outer-join
- * clause should always be examined with the same jointype, so
- * result won't change). It's not OK to cache when jointype is one
- * of the special types associated with IN processing, because the
- * same clause may be examined with different jointypes and the
- * result should vary.
+ * If possible, cache the result of the selectivity calculation for
+ * the clause. We can cache if varRelid is zero or the clause
+ * contains only vars of that relid --- otherwise varRelid will affect
+ * the result, so mustn't cache. We also have to be careful about the
+ * jointype. It's OK to cache when jointype is JOIN_INNER or one of
+ * the outer join types (any given outer-join clause should always be
+ * examined with the same jointype, so result won't change). It's not
+ * OK to cache when jointype is one of the special types associated
+ * with IN processing, because the same clause may be examined with
+ * different jointypes and the result should vary.
*/
if (varRelid == 0 ||
bms_is_subset_singleton(rinfo->clause_relids, varRelid))
@@ -477,8 +476,8 @@ clause_selectivity(PlannerInfo *root,
Var *var = (Var *) clause;
/*
- * We probably shouldn't ever see an uplevel Var here, but if we
- * do, return the default selectivity...
+ * We probably shouldn't ever see an uplevel Var here, but if we do,
+ * return the default selectivity...
*/
if (var->varlevelsup == 0 &&
(varRelid == 0 || varRelid == (int) var->varno))
@@ -488,23 +487,23 @@ clause_selectivity(PlannerInfo *root,
if (rte->rtekind == RTE_SUBQUERY)
{
/*
- * XXX not smart about subquery references... any way to
- * do better?
+ * XXX not smart about subquery references... any way to do
+ * better?
*/
s1 = 0.5;
}
else
{
/*
- * A Var at the top of a clause must be a bool Var. This
- * is equivalent to the clause reln.attribute = 't', so we
+ * A Var at the top of a clause must be a bool Var. This is
+ * equivalent to the clause reln.attribute = 't', so we
* compute the selectivity as if that is what we have.
*/
s1 = restriction_selectivity(root,
BooleanEqualOperator,
list_make2(var,
- makeBoolConst(true,
- false)),
+ makeBoolConst(true,
+ false)),
varRelid);
}
}
@@ -534,7 +533,7 @@ clause_selectivity(PlannerInfo *root,
{
/* inverse of the selectivity of the underlying clause */
s1 = 1.0 - clause_selectivity(root,
- (Node *) get_notclausearg((Expr *) clause),
+ (Node *) get_notclausearg((Expr *) clause),
varRelid,
jointype);
}
@@ -576,17 +575,16 @@ clause_selectivity(PlannerInfo *root,
{
/*
* If we are considering a nestloop join then all clauses are
- * restriction clauses, since we are only interested in the
- * one relation.
+ * restriction clauses, since we are only interested in the one
+ * relation.
*/
is_join_clause = false;
}
else
{
/*
- * Otherwise, it's a join if there's more than one relation
- * used. We can optimize this calculation if an rinfo was
- * passed.
+ * Otherwise, it's a join if there's more than one relation used.
+ * We can optimize this calculation if an rinfo was passed.
*/
if (rinfo)
is_join_clause = (bms_membership(rinfo->clause_relids) ==
@@ -613,8 +611,8 @@ clause_selectivity(PlannerInfo *root,
else if (is_funcclause(clause))
{
/*
- * This is not an operator, so we guess at the selectivity. THIS
- * IS A HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE
+ * This is not an operator, so we guess at the selectivity. THIS IS A
+ * HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE
* SELECTIVITIES THEMSELVES. -- JMH 7/9/92
*/
s1 = (Selectivity) 0.3333333;
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index bb506678ce4..8a1df9e0a2d 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -49,7 +49,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.148 2005/10/05 17:19:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.149 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -121,8 +121,8 @@ clamp_row_est(double nrows)
{
/*
* Force estimate to be at least one row, to make explain output look
- * better and to avoid possible divide-by-zero when interpolating
- * costs. Make it an integer, too.
+ * better and to avoid possible divide-by-zero when interpolating costs.
+ * Make it an integer, too.
*/
if (nrows < 1.0)
nrows = 1.0;
@@ -155,12 +155,11 @@ cost_seqscan(Path *path, PlannerInfo *root,
/*
* disk costs
*
- * The cost of reading a page sequentially is 1.0, by definition. Note
- * that the Unix kernel will typically do some amount of read-ahead
- * optimization, so that this cost is less than the true cost of
- * reading a page from disk. We ignore that issue here, but must take
- * it into account when estimating the cost of non-sequential
- * accesses!
+ * The cost of reading a page sequentially is 1.0, by definition. Note that
+ * the Unix kernel will typically do some amount of read-ahead
+ * optimization, so that this cost is less than the true cost of reading a
+ * page from disk. We ignore that issue here, but must take it into
+ * account when estimating the cost of non-sequential accesses!
*/
run_cost += baserel->pages; /* sequential fetches with cost 1.0 */
@@ -276,10 +275,10 @@ cost_index(IndexPath *path, PlannerInfo *root,
startup_cost += disable_cost;
/*
- * Call index-access-method-specific code to estimate the processing
- * cost for scanning the index, as well as the selectivity of the
- * index (ie, the fraction of main-table tuples we will have to
- * retrieve) and its correlation to the main-table tuple order.
+ * Call index-access-method-specific code to estimate the processing cost
+ * for scanning the index, as well as the selectivity of the index (ie,
+ * the fraction of main-table tuples we will have to retrieve) and its
+ * correlation to the main-table tuple order.
*/
OidFunctionCall7(index->amcostestimate,
PointerGetDatum(root),
@@ -292,8 +291,8 @@ cost_index(IndexPath *path, PlannerInfo *root,
/*
* Save amcostestimate's results for possible use in bitmap scan planning.
- * We don't bother to save indexStartupCost or indexCorrelation, because
- * a bitmap scan doesn't care about either.
+ * We don't bother to save indexStartupCost or indexCorrelation, because a
+ * bitmap scan doesn't care about either.
*/
path->indextotalcost = indexTotalCost;
path->indexselectivity = indexSelectivity;
@@ -366,19 +365,18 @@ cost_index(IndexPath *path, PlannerInfo *root,
}
/*
- * min_IO_cost corresponds to the perfectly correlated case
- * (csquared=1), max_IO_cost to the perfectly uncorrelated case
- * (csquared=0). Note that we just charge random_page_cost per page
- * in the uncorrelated case, rather than using
- * cost_nonsequential_access, since we've already accounted for
- * caching effects by using the Mackert model.
+ * min_IO_cost corresponds to the perfectly correlated case (csquared=1),
+ * max_IO_cost to the perfectly uncorrelated case (csquared=0). Note that
+ * we just charge random_page_cost per page in the uncorrelated case,
+ * rather than using cost_nonsequential_access, since we've already
+ * accounted for caching effects by using the Mackert model.
*/
min_IO_cost = ceil(indexSelectivity * T);
max_IO_cost = pages_fetched * random_page_cost;
/*
- * Now interpolate based on estimated index order correlation to get
- * total disk I/O cost for main table accesses.
+ * Now interpolate based on estimated index order correlation to get total
+ * disk I/O cost for main table accesses.
*/
csquared = indexCorrelation * indexCorrelation;
@@ -390,9 +388,9 @@ cost_index(IndexPath *path, PlannerInfo *root,
* Normally the indexquals will be removed from the list of restriction
* clauses that we have to evaluate as qpquals, so we should subtract
* their costs from baserestrictcost. But if we are doing a join then
- * some of the indexquals are join clauses and shouldn't be
- * subtracted. Rather than work out exactly how much to subtract, we
- * don't subtract anything.
+ * some of the indexquals are join clauses and shouldn't be subtracted.
+ * Rather than work out exactly how much to subtract, we don't subtract
+ * anything.
*/
startup_cost += baserel->baserestrictcost.startup;
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
@@ -467,9 +465,9 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
/*
* For small numbers of pages we should charge random_page_cost apiece,
* while if nearly all the table's pages are being read, it's more
- * appropriate to charge 1.0 apiece. The effect is nonlinear, too.
- * For lack of a better idea, interpolate like this to determine the
- * cost per page.
+ * appropriate to charge 1.0 apiece. The effect is nonlinear, too. For
+ * lack of a better idea, interpolate like this to determine the cost per
+ * page.
*/
if (pages_fetched >= 2.0)
cost_per_page = random_page_cost -
@@ -482,10 +480,10 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
/*
* Estimate CPU costs per tuple.
*
- * Often the indexquals don't need to be rechecked at each tuple ...
- * but not always, especially not if there are enough tuples involved
- * that the bitmaps become lossy. For the moment, just assume they
- * will be rechecked always.
+ * Often the indexquals don't need to be rechecked at each tuple ... but not
+ * always, especially not if there are enough tuples involved that the
+ * bitmaps become lossy. For the moment, just assume they will be
+ * rechecked always.
*/
startup_cost += baserel->baserestrictcost.startup;
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
@@ -527,7 +525,7 @@ cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
* Estimate the cost of a BitmapAnd node
*
* Note that this considers only the costs of index scanning and bitmap
- * creation, not the eventual heap access. In that sense the object isn't
+ * creation, not the eventual heap access. In that sense the object isn't
* truly a Path, but it has enough path-like properties (costs in particular)
* to warrant treating it as one.
*/
@@ -535,24 +533,24 @@ void
cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
{
Cost totalCost;
- Selectivity selec;
+ Selectivity selec;
ListCell *l;
/*
- * We estimate AND selectivity on the assumption that the inputs
- * are independent. This is probably often wrong, but we don't
- * have the info to do better.
+ * We estimate AND selectivity on the assumption that the inputs are
+ * independent. This is probably often wrong, but we don't have the info
+ * to do better.
*
* The runtime cost of the BitmapAnd itself is estimated at 100x
- * cpu_operator_cost for each tbm_intersect needed. Probably too
- * small, definitely too simplistic?
+ * cpu_operator_cost for each tbm_intersect needed. Probably too small,
+ * definitely too simplistic?
*/
totalCost = 0.0;
selec = 1.0;
foreach(l, path->bitmapquals)
{
- Path *subpath = (Path *) lfirst(l);
- Cost subCost;
+ Path *subpath = (Path *) lfirst(l);
+ Cost subCost;
Selectivity subselec;
cost_bitmap_tree_node(subpath, &subCost, &subselec);
@@ -578,25 +576,25 @@ void
cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
{
Cost totalCost;
- Selectivity selec;
+ Selectivity selec;
ListCell *l;
/*
- * We estimate OR selectivity on the assumption that the inputs
- * are non-overlapping, since that's often the case in "x IN (list)"
- * type situations. Of course, we clamp to 1.0 at the end.
+ * We estimate OR selectivity on the assumption that the inputs are
+ * non-overlapping, since that's often the case in "x IN (list)" type
+ * situations. Of course, we clamp to 1.0 at the end.
*
* The runtime cost of the BitmapOr itself is estimated at 100x
- * cpu_operator_cost for each tbm_union needed. Probably too
- * small, definitely too simplistic? We are aware that the tbm_unions
- * are optimized out when the inputs are BitmapIndexScans.
+ * cpu_operator_cost for each tbm_union needed. Probably too small,
+ * definitely too simplistic? We are aware that the tbm_unions are
+ * optimized out when the inputs are BitmapIndexScans.
*/
totalCost = 0.0;
selec = 0.0;
foreach(l, path->bitmapquals)
{
- Path *subpath = (Path *) lfirst(l);
- Cost subCost;
+ Path *subpath = (Path *) lfirst(l);
+ Cost subCost;
Selectivity subselec;
cost_bitmap_tree_node(subpath, &subCost, &subselec);
@@ -661,10 +659,9 @@ cost_subqueryscan(Path *path, RelOptInfo *baserel)
Assert(baserel->rtekind == RTE_SUBQUERY);
/*
- * Cost of path is cost of evaluating the subplan, plus cost of
- * evaluating any restriction clauses that will be attached to the
- * SubqueryScan node, plus cpu_tuple_cost to account for selection and
- * projection overhead.
+ * Cost of path is cost of evaluating the subplan, plus cost of evaluating
+ * any restriction clauses that will be attached to the SubqueryScan node,
+ * plus cpu_tuple_cost to account for selection and projection overhead.
*/
path->startup_cost = baserel->subplan->startup_cost;
path->total_cost = baserel->subplan->total_cost;
@@ -694,8 +691,8 @@ cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
/*
* For now, estimate function's cost at one operator eval per function
- * call. Someday we should revive the function cost estimate columns
- * in pg_proc...
+ * call. Someday we should revive the function cost estimate columns in
+ * pg_proc...
*/
cpu_per_tuple = cpu_operator_cost;
@@ -758,9 +755,8 @@ cost_sort(Path *path, PlannerInfo *root,
startup_cost += disable_cost;
/*
- * We want to be sure the cost of a sort is never estimated as zero,
- * even if passed-in tuple count is zero. Besides, mustn't do
- * log(0)...
+ * We want to be sure the cost of a sort is never estimated as zero, even
+ * if passed-in tuple count is zero. Besides, mustn't do log(0)...
*/
if (tuples < 2.0)
tuples = 2.0;
@@ -790,8 +786,8 @@ cost_sort(Path *path, PlannerInfo *root,
}
/*
- * Also charge a small amount (arbitrarily set equal to operator cost)
- * per extracted tuple.
+ * Also charge a small amount (arbitrarily set equal to operator cost) per
+ * extracted tuple.
*/
run_cost += cpu_operator_cost * tuples;
@@ -828,17 +824,16 @@ cost_material(Path *path,
/*
* Charge a very small amount per inserted tuple, to reflect bookkeeping
- * costs. We use cpu_tuple_cost/10 for this. This is needed to break
- * the tie that would otherwise exist between nestloop with A outer,
+ * costs. We use cpu_tuple_cost/10 for this. This is needed to break the
+ * tie that would otherwise exist between nestloop with A outer,
* materialized B inner and nestloop with B outer, materialized A inner.
* The extra cost ensures we'll prefer materializing the smaller rel.
*/
startup_cost += cpu_tuple_cost * 0.1 * tuples;
/*
- * Also charge a small amount per extracted tuple. We use
- * cpu_tuple_cost so that it doesn't appear worthwhile to materialize
- * a bare seqscan.
+ * Also charge a small amount per extracted tuple. We use cpu_tuple_cost
+ * so that it doesn't appear worthwhile to materialize a bare seqscan.
*/
run_cost += cpu_tuple_cost * tuples;
@@ -865,23 +860,22 @@ cost_agg(Path *path, PlannerInfo *root,
Cost total_cost;
/*
- * We charge one cpu_operator_cost per aggregate function per input
- * tuple, and another one per output tuple (corresponding to transfn
- * and finalfn calls respectively). If we are grouping, we charge an
- * additional cpu_operator_cost per grouping column per input tuple
- * for grouping comparisons.
+ * We charge one cpu_operator_cost per aggregate function per input tuple,
+ * and another one per output tuple (corresponding to transfn and finalfn
+ * calls respectively). If we are grouping, we charge an additional
+ * cpu_operator_cost per grouping column per input tuple for grouping
+ * comparisons.
*
* We will produce a single output tuple if not grouping, and a tuple per
* group otherwise. We charge cpu_tuple_cost for each output tuple.
*
- * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
- * same total CPU cost, but AGG_SORTED has lower startup cost. If the
- * input path is already sorted appropriately, AGG_SORTED should be
- * preferred (since it has no risk of memory overflow). This will
- * happen as long as the computed total costs are indeed exactly equal
- * --- but if there's roundoff error we might do the wrong thing. So
- * be sure that the computations below form the same intermediate
- * values in the same order.
+ * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the same
+ * total CPU cost, but AGG_SORTED has lower startup cost. If the input
+ * path is already sorted appropriately, AGG_SORTED should be preferred
+ * (since it has no risk of memory overflow). This will happen as long as
+ * the computed total costs are indeed exactly equal --- but if there's
+ * roundoff error we might do the wrong thing. So be sure that the
+ * computations below form the same intermediate values in the same order.
*/
if (aggstrategy == AGG_PLAIN)
{
@@ -937,8 +931,8 @@ cost_group(Path *path, PlannerInfo *root,
total_cost = input_total_cost;
/*
- * Charge one cpu_operator_cost per comparison per input tuple. We
- * assume all columns get compared at most of the tuples.
+ * Charge one cpu_operator_cost per comparison per input tuple. We assume
+ * all columns get compared at most of the tuples.
*/
total_cost += cpu_operator_cost * input_tuples * numGroupCols;
@@ -968,10 +962,10 @@ cost_nestloop(NestPath *path, PlannerInfo *root)
Selectivity joininfactor;
/*
- * If inner path is an indexscan, be sure to use its estimated output
- * row count, which may be lower than the restriction-clause-only row
- * count of its parent. (We don't include this case in the PATH_ROWS
- * macro because it applies *only* to a nestloop's inner relation.)
+ * If inner path is an indexscan, be sure to use its estimated output row
+ * count, which may be lower than the restriction-clause-only row count of
+ * its parent. (We don't include this case in the PATH_ROWS macro because
+ * it applies *only* to a nestloop's inner relation.)
*/
if (IsA(inner_path, IndexPath))
inner_path_rows = ((IndexPath *) inner_path)->rows;
@@ -982,11 +976,11 @@ cost_nestloop(NestPath *path, PlannerInfo *root)
startup_cost += disable_cost;
/*
- * If we're doing JOIN_IN then we will stop scanning inner tuples for
- * an outer tuple as soon as we have one match. Account for the
- * effects of this by scaling down the cost estimates in proportion to
- * the JOIN_IN selectivity. (This assumes that all the quals attached
- * to the join are IN quals, which should be true.)
+ * If we're doing JOIN_IN then we will stop scanning inner tuples for an
+ * outer tuple as soon as we have one match. Account for the effects of
+ * this by scaling down the cost estimates in proportion to the JOIN_IN
+ * selectivity. (This assumes that all the quals attached to the join are
+ * IN quals, which should be true.)
*/
joininfactor = join_in_selectivity(path, root);
@@ -996,9 +990,9 @@ cost_nestloop(NestPath *path, PlannerInfo *root)
* NOTE: clearly, we must pay both outer and inner paths' startup_cost
* before we can start returning tuples, so the join's startup cost is
* their sum. What's not so clear is whether the inner path's
- * startup_cost must be paid again on each rescan of the inner path.
- * This is not true if the inner path is materialized or is a
- * hashjoin, but probably is true otherwise.
+ * startup_cost must be paid again on each rescan of the inner path. This
+ * is not true if the inner path is materialized or is a hashjoin, but
+ * probably is true otherwise.
*/
startup_cost += outer_path->startup_cost + inner_path->startup_cost;
run_cost += outer_path->total_cost - outer_path->startup_cost;
@@ -1077,12 +1071,11 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
/*
* Compute cost and selectivity of the mergequals and qpquals (other
- * restriction clauses) separately. We use approx_selectivity here
- * for speed --- in most cases, any errors won't affect the result
- * much.
+ * restriction clauses) separately. We use approx_selectivity here for
+ * speed --- in most cases, any errors won't affect the result much.
*
- * Note: it's probably bogus to use the normal selectivity calculation
- * here when either the outer or inner path is a UniquePath.
+ * Note: it's probably bogus to use the normal selectivity calculation here
+ * when either the outer or inner path is a UniquePath.
*/
merge_selec = approx_selectivity(root, mergeclauses,
path->jpath.jointype);
@@ -1095,31 +1088,30 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows);
/*
- * When there are equal merge keys in the outer relation, the
- * mergejoin must rescan any matching tuples in the inner relation.
- * This means re-fetching inner tuples. Our cost model for this is
- * that a re-fetch costs the same as an original fetch, which is
- * probably an overestimate; but on the other hand we ignore the
- * bookkeeping costs of mark/restore. Not clear if it's worth
- * developing a more refined model.
+ * When there are equal merge keys in the outer relation, the mergejoin
+ * must rescan any matching tuples in the inner relation. This means
+ * re-fetching inner tuples. Our cost model for this is that a re-fetch
+ * costs the same as an original fetch, which is probably an overestimate;
+ * but on the other hand we ignore the bookkeeping costs of mark/restore.
+ * Not clear if it's worth developing a more refined model.
*
- * The number of re-fetches can be estimated approximately as size of
- * merge join output minus size of inner relation. Assume that the
- * distinct key values are 1, 2, ..., and denote the number of values
- * of each key in the outer relation as m1, m2, ...; in the inner
- * relation, n1, n2, ... Then we have
+ * The number of re-fetches can be estimated approximately as size of merge
+ * join output minus size of inner relation. Assume that the distinct key
+ * values are 1, 2, ..., and denote the number of values of each key in
+ * the outer relation as m1, m2, ...; in the inner relation, n1, n2, ...
+ * Then we have
*
* size of join = m1 * n1 + m2 * n2 + ...
*
- * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
- * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
+ * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 * n1
+ * + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
* relation
*
- * This equation works correctly for outer tuples having no inner match
- * (nk = 0), but not for inner tuples having no outer match (mk = 0);
- * we are effectively subtracting those from the number of rescanned
- * tuples, when we should not. Can we do better without expensive
- * selectivity computations?
+ * This equation works correctly for outer tuples having no inner match (nk =
+ * 0), but not for inner tuples having no outer match (mk = 0); we are
+ * effectively subtracting those from the number of rescanned tuples, when
+ * we should not. Can we do better without expensive selectivity
+ * computations?
*/
if (IsA(outer_path, UniquePath))
rescannedtuples = 0;
@@ -1140,9 +1132,9 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
* inputs that will actually need to be scanned. We use only the first
* (most significant) merge clause for this purpose.
*
- * Since this calculation is somewhat expensive, and will be the same for
- * all mergejoin paths associated with the merge clause, we cache the
- * results in the RestrictInfo node.
+ * Since this calculation is somewhat expensive, and will be the same for all
+ * mergejoin paths associated with the merge clause, we cache the results
+ * in the RestrictInfo node.
*/
if (mergeclauses && path->jpath.jointype != JOIN_FULL)
{
@@ -1181,9 +1173,8 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
/*
* Readjust scan selectivities to account for above rounding. This is
- * normally an insignificant effect, but when there are only a few
- * rows in the inputs, failing to do this makes for a large percentage
- * error.
+ * normally an insignificant effect, but when there are only a few rows in
+ * the inputs, failing to do this makes for a large percentage error.
*/
outerscansel = outer_rows / outer_path_rows;
innerscansel = inner_rows / inner_path_rows;
@@ -1231,20 +1222,20 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
/* CPU costs */
/*
- * If we're doing JOIN_IN then we will stop outputting inner tuples
- * for an outer tuple as soon as we have one match. Account for the
- * effects of this by scaling down the cost estimates in proportion to
- * the expected output size. (This assumes that all the quals
- * attached to the join are IN quals, which should be true.)
+ * If we're doing JOIN_IN then we will stop outputting inner tuples for an
+ * outer tuple as soon as we have one match. Account for the effects of
+ * this by scaling down the cost estimates in proportion to the expected
+ * output size. (This assumes that all the quals attached to the join are
+ * IN quals, which should be true.)
*/
joininfactor = join_in_selectivity(&path->jpath, root);
/*
- * The number of tuple comparisons needed is approximately number of
- * outer rows plus number of inner rows plus number of rescanned
- * tuples (can we refine this?). At each one, we need to evaluate the
- * mergejoin quals. NOTE: JOIN_IN mode does not save any work here,
- * so do NOT include joininfactor.
+ * The number of tuple comparisons needed is approximately number of outer
+ * rows plus number of inner rows plus number of rescanned tuples (can we
+ * refine this?). At each one, we need to evaluate the mergejoin quals.
+ * NOTE: JOIN_IN mode does not save any work here, so do NOT include
+ * joininfactor.
*/
startup_cost += merge_qual_cost.startup;
run_cost += merge_qual_cost.per_tuple *
@@ -1253,9 +1244,9 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
/*
* For each tuple that gets through the mergejoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
- * clauses that are to be applied at the join. (This is pessimistic
- * since not all of the quals may get evaluated at each tuple.) This
- * work is skipped in JOIN_IN mode, so apply the factor.
+ * clauses that are to be applied at the join. (This is pessimistic since
+ * not all of the quals may get evaluated at each tuple.) This work is
+ * skipped in JOIN_IN mode, so apply the factor.
*/
startup_cost += qp_qual_cost.startup;
cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
@@ -1290,9 +1281,9 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
double outer_path_rows = PATH_ROWS(outer_path);
double inner_path_rows = PATH_ROWS(inner_path);
double outerbytes = relation_byte_size(outer_path_rows,
- outer_path->parent->width);
+ outer_path->parent->width);
double innerbytes = relation_byte_size(inner_path_rows,
- inner_path->parent->width);
+ inner_path->parent->width);
int num_hashclauses = list_length(hashclauses);
int numbuckets;
int numbatches;
@@ -1306,12 +1297,11 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
/*
* Compute cost and selectivity of the hashquals and qpquals (other
- * restriction clauses) separately. We use approx_selectivity here
- * for speed --- in most cases, any errors won't affect the result
- * much.
+ * restriction clauses) separately. We use approx_selectivity here for
+ * speed --- in most cases, any errors won't affect the result much.
*
- * Note: it's probably bogus to use the normal selectivity calculation
- * here when either the outer or inner path is a UniquePath.
+ * Note: it's probably bogus to use the normal selectivity calculation here
+ * when either the outer or inner path is a UniquePath.
*/
hash_selec = approx_selectivity(root, hashclauses,
path->jpath.jointype);
@@ -1329,13 +1319,12 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
startup_cost += inner_path->total_cost;
/*
- * Cost of computing hash function: must do it once per input tuple.
- * We charge one cpu_operator_cost for each column's hash function.
+ * Cost of computing hash function: must do it once per input tuple. We
+ * charge one cpu_operator_cost for each column's hash function.
*
- * XXX when a hashclause is more complex than a single operator, we
- * really should charge the extra eval costs of the left or right
- * side, as appropriate, here. This seems more work than it's worth
- * at the moment.
+ * XXX when a hashclause is more complex than a single operator, we really
+ * should charge the extra eval costs of the left or right side, as
+ * appropriate, here. This seems more work than it's worth at the moment.
*/
startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows;
run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
@@ -1345,17 +1334,17 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
inner_path->parent->width,
&numbuckets,
&numbatches);
- virtualbuckets = (double) numbuckets * (double) numbatches;
+ virtualbuckets = (double) numbuckets *(double) numbatches;
/*
- * Determine bucketsize fraction for inner relation. We use the
- * smallest bucketsize estimated for any individual hashclause; this
- * is undoubtedly conservative.
+ * Determine bucketsize fraction for inner relation. We use the smallest
+ * bucketsize estimated for any individual hashclause; this is undoubtedly
+ * conservative.
*
- * BUT: if inner relation has been unique-ified, we can assume it's good
- * for hashing. This is important both because it's the right answer,
- * and because we avoid contaminating the cache with a value that's
- * wrong for non-unique-ified paths.
+ * BUT: if inner relation has been unique-ified, we can assume it's good for
+ * hashing. This is important both because it's the right answer, and
+ * because we avoid contaminating the cache with a value that's wrong for
+ * non-unique-ified paths.
*/
if (IsA(inner_path, UniquePath))
innerbucketsize = 1.0 / virtualbuckets;
@@ -1370,13 +1359,12 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
Assert(IsA(restrictinfo, RestrictInfo));
/*
- * First we have to figure out which side of the hashjoin
- * clause is the inner side.
+ * First we have to figure out which side of the hashjoin clause
+ * is the inner side.
*
* Since we tend to visit the same clauses over and over when
- * planning a large query, we cache the bucketsize estimate in
- * the RestrictInfo node to avoid repeated lookups of
- * statistics.
+ * planning a large query, we cache the bucketsize estimate in the
+ * RestrictInfo node to avoid repeated lookups of statistics.
*/
if (bms_is_subset(restrictinfo->right_relids,
inner_path->parent->relids))
@@ -1388,7 +1376,7 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
/* not cached yet */
thisbucketsize =
estimate_hash_bucketsize(root,
- get_rightop(restrictinfo->clause),
+ get_rightop(restrictinfo->clause),
virtualbuckets);
restrictinfo->right_bucketsize = thisbucketsize;
}
@@ -1404,7 +1392,7 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
/* not cached yet */
thisbucketsize =
estimate_hash_bucketsize(root,
- get_leftop(restrictinfo->clause),
+ get_leftop(restrictinfo->clause),
virtualbuckets);
restrictinfo->left_bucketsize = thisbucketsize;
}
@@ -1417,10 +1405,10 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
/*
* If inner relation is too big then we will need to "batch" the join,
- * which implies writing and reading most of the tuples to disk an
- * extra time. Charge one cost unit per page of I/O (correct since it
- * should be nice and sequential...). Writing the inner rel counts as
- * startup cost, all the rest as run cost.
+ * which implies writing and reading most of the tuples to disk an extra
+ * time. Charge one cost unit per page of I/O (correct since it should be
+ * nice and sequential...). Writing the inner rel counts as startup cost,
+ * all the rest as run cost.
*/
if (numbatches > 1)
{
@@ -1436,21 +1424,21 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
/* CPU costs */
/*
- * If we're doing JOIN_IN then we will stop comparing inner tuples to
- * an outer tuple as soon as we have one match. Account for the
- * effects of this by scaling down the cost estimates in proportion to
- * the expected output size. (This assumes that all the quals
- * attached to the join are IN quals, which should be true.)
+ * If we're doing JOIN_IN then we will stop comparing inner tuples to an
+ * outer tuple as soon as we have one match. Account for the effects of
+ * this by scaling down the cost estimates in proportion to the expected
+ * output size. (This assumes that all the quals attached to the join are
+ * IN quals, which should be true.)
*/
joininfactor = join_in_selectivity(&path->jpath, root);
/*
- * The number of tuple comparisons needed is the number of outer
- * tuples times the typical number of tuples in a hash bucket, which
- * is the inner relation size times its bucketsize fraction. At each
- * one, we need to evaluate the hashjoin quals. (Note: charging the
- * full qual eval cost at each tuple is pessimistic, since we don't
- * evaluate the quals unless the hash values match exactly.)
+ * The number of tuple comparisons needed is the number of outer tuples
+ * times the typical number of tuples in a hash bucket, which is the inner
+ * relation size times its bucketsize fraction. At each one, we need to
+ * evaluate the hashjoin quals. (Note: charging the full qual eval cost
+ * at each tuple is pessimistic, since we don't evaluate the quals unless
+ * the hash values match exactly.)
*/
startup_cost += hash_qual_cost.startup;
run_cost += hash_qual_cost.per_tuple *
@@ -1460,8 +1448,8 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
/*
* For each tuple that gets through the hashjoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
- * clauses that are to be applied at the join. (This is pessimistic
- * since not all of the quals may get evaluated at each tuple.)
+ * clauses that are to be applied at the join. (This is pessimistic since
+ * not all of the quals may get evaluated at each tuple.)
*/
startup_cost += qp_qual_cost.startup;
cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
@@ -1469,16 +1457,16 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
/*
* Bias against putting larger relation on inside. We don't want an
- * absolute prohibition, though, since larger relation might have
- * better bucketsize --- and we can't trust the size estimates
- * unreservedly, anyway. Instead, inflate the run cost by the square
- * root of the size ratio. (Why square root? No real good reason,
- * but it seems reasonable...)
+ * absolute prohibition, though, since larger relation might have better
+ * bucketsize --- and we can't trust the size estimates unreservedly,
+ * anyway. Instead, inflate the run cost by the square root of the size
+ * ratio. (Why square root? No real good reason, but it seems
+ * reasonable...)
*
* Note: before 7.4 we implemented this by inflating startup cost; but if
- * there's a disable_cost component in the input paths' startup cost,
- * that unfairly penalizes the hash. Probably it'd be better to keep
- * track of disable penalty separately from cost.
+ * there's a disable_cost component in the input paths' startup cost, that
+ * unfairly penalizes the hash. Probably it'd be better to keep track of
+ * disable penalty separately from cost.
*/
if (innerbytes > outerbytes && outerbytes > 0)
run_cost *= sqrt(innerbytes / outerbytes);
@@ -1545,13 +1533,13 @@ cost_qual_eval_walker(Node *node, QualCost *total)
return false;
/*
- * Our basic strategy is to charge one cpu_operator_cost for each
- * operator or function node in the given tree. Vars and Consts are
- * charged zero, and so are boolean operators (AND, OR, NOT).
- * Simplistic, but a lot better than no model at all.
+ * Our basic strategy is to charge one cpu_operator_cost for each operator
+ * or function node in the given tree. Vars and Consts are charged zero,
+ * and so are boolean operators (AND, OR, NOT). Simplistic, but a lot
+ * better than no model at all.
*
- * Should we try to account for the possibility of short-circuit
- * evaluation of AND/OR?
+ * Should we try to account for the possibility of short-circuit evaluation
+ * of AND/OR?
*/
if (IsA(node, FuncExpr) ||
IsA(node, OpExpr) ||
@@ -1572,12 +1560,12 @@ cost_qual_eval_walker(Node *node, QualCost *total)
{
/*
* A subplan node in an expression typically indicates that the
- * subplan will be executed on each evaluation, so charge
- * accordingly. (Sub-selects that can be executed as InitPlans
- * have already been removed from the expression.)
+ * subplan will be executed on each evaluation, so charge accordingly.
+ * (Sub-selects that can be executed as InitPlans have already been
+ * removed from the expression.)
*
- * An exception occurs when we have decided we can implement the
- * subplan by hashing.
+ * An exception occurs when we have decided we can implement the subplan
+ * by hashing.
*
*/
SubPlan *subplan = (SubPlan *) node;
@@ -1586,32 +1574,31 @@ cost_qual_eval_walker(Node *node, QualCost *total)
if (subplan->useHashTable)
{
/*
- * If we are using a hash table for the subquery outputs, then
- * the cost of evaluating the query is a one-time cost. We
- * charge one cpu_operator_cost per tuple for the work of
- * loading the hashtable, too.
+ * If we are using a hash table for the subquery outputs, then the
+ * cost of evaluating the query is a one-time cost. We charge one
+ * cpu_operator_cost per tuple for the work of loading the
+ * hashtable, too.
*/
total->startup += plan->total_cost +
cpu_operator_cost * plan->plan_rows;
/*
- * The per-tuple costs include the cost of evaluating the
- * lefthand expressions, plus the cost of probing the
- * hashtable. Recursion into the exprs list will handle the
- * lefthand expressions properly, and will count one
- * cpu_operator_cost for each comparison operator. That is
- * probably too low for the probing cost, but it's hard to
- * make a better estimate, so live with it for now.
+ * The per-tuple costs include the cost of evaluating the lefthand
+ * expressions, plus the cost of probing the hashtable. Recursion
+ * into the exprs list will handle the lefthand expressions
+ * properly, and will count one cpu_operator_cost for each
+ * comparison operator. That is probably too low for the probing
+ * cost, but it's hard to make a better estimate, so live with it
+ * for now.
*/
}
else
{
/*
* Otherwise we will be rescanning the subplan output on each
- * evaluation. We need to estimate how much of the output we
- * will actually need to scan. NOTE: this logic should agree
- * with the estimates used by make_subplan() in
- * plan/subselect.c.
+ * evaluation. We need to estimate how much of the output we will
+ * actually need to scan. NOTE: this logic should agree with the
+ * estimates used by make_subplan() in plan/subselect.c.
*/
Cost plan_run_cost = plan->total_cost - plan->startup_cost;
@@ -1636,10 +1623,10 @@ cost_qual_eval_walker(Node *node, QualCost *total)
/*
* Also account for subplan's startup cost. If the subplan is
- * uncorrelated or undirect correlated, AND its topmost node
- * is a Sort or Material node, assume that we'll only need to
- * pay its startup cost once; otherwise assume we pay the
- * startup cost every time.
+ * uncorrelated or undirect correlated, AND its topmost node is a
+ * Sort or Material node, assume that we'll only need to pay its
+ * startup cost once; otherwise assume we pay the startup cost
+ * every time.
*/
if (subplan->parParam == NIL &&
(IsA(plan, Sort) ||
@@ -1761,9 +1748,9 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
/*
* Compute joinclause selectivity. Note that we are only considering
- * clauses that become restriction clauses at this join level; we are
- * not double-counting them because they were not considered in
- * estimating the sizes of the component rels.
+ * clauses that become restriction clauses at this join level; we are not
+ * double-counting them because they were not considered in estimating the
+ * sizes of the component rels.
*/
selec = clauselist_selectivity(root,
restrictlist,
@@ -1773,13 +1760,13 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
/*
* Basically, we multiply size of Cartesian product by selectivity.
*
- * If we are doing an outer join, take that into account: the output must
- * be at least as large as the non-nullable input. (Is there any
- * chance of being even smarter?)
+ * If we are doing an outer join, take that into account: the output must be
+ * at least as large as the non-nullable input. (Is there any chance of
+ * being even smarter?)
*
- * For JOIN_IN and variants, the Cartesian product is figured with
- * respect to a unique-ified input, and then we can clamp to the size
- * of the other input.
+ * For JOIN_IN and variants, the Cartesian product is figured with respect to
+ * a unique-ified input, and then we can clamp to the size of the other
+ * input.
*/
switch (jointype)
{
@@ -1848,12 +1835,11 @@ join_in_selectivity(JoinPath *path, PlannerInfo *root)
return 1.0;
/*
- * Return 1.0 if the inner side is already known unique. The case
- * where the inner path is already a UniquePath probably cannot happen
- * in current usage, but check it anyway for completeness. The
- * interesting case is where we've determined the inner relation
- * itself is unique, which we can check by looking at the rows
- * estimate for its UniquePath.
+ * Return 1.0 if the inner side is already known unique. The case where
+ * the inner path is already a UniquePath probably cannot happen in
+ * current usage, but check it anyway for completeness. The interesting
+ * case is where we've determined the inner relation itself is unique,
+ * which we can check by looking at the rows estimate for its UniquePath.
*/
if (IsA(path->innerjoinpath, UniquePath))
return 1.0;
@@ -1866,10 +1852,9 @@ join_in_selectivity(JoinPath *path, PlannerInfo *root)
/*
* Compute same result set_joinrel_size_estimates would compute for
- * JOIN_INNER. Note that we use the input rels' absolute size
- * estimates, not PATH_ROWS() which might be less; if we used
- * PATH_ROWS() we'd be double-counting the effects of any join clauses
- * used in input scans.
+ * JOIN_INNER. Note that we use the input rels' absolute size estimates,
+ * not PATH_ROWS() which might be less; if we used PATH_ROWS() we'd be
+ * double-counting the effects of any join clauses used in input scans.
*/
selec = clauselist_selectivity(root,
path->joinrestrictinfo,
@@ -1908,8 +1893,8 @@ set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
/*
* Estimate number of rows the function itself will return.
*
- * XXX no idea how to do this yet; but we can at least check whether
- * function returns set or not...
+ * XXX no idea how to do this yet; but we can at least check whether function
+ * returns set or not...
*/
if (expression_returns_set(rte->funcexpr))
rel->tuples = 1000;
@@ -1957,8 +1942,7 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
ndx = var->varattno - rel->min_attr;
/*
- * The width probably hasn't been cached yet, but may as well
- * check
+ * The width probably hasn't been cached yet, but may as well check
*/
if (rel->attr_widths[ndx] > 0)
{
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index f186b89db44..1790cc5266b 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.190 2005/09/24 22:54:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.191 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -48,9 +48,9 @@
static List *find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
- List *clauses, List *outer_clauses,
- bool istoplevel, bool isjoininner,
- Relids outer_relids);
+ List *clauses, List *outer_clauses,
+ bool istoplevel, bool isjoininner,
+ Relids outer_relids);
static Path *choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths);
static int bitmap_path_comparator(const void *a, const void *b);
static Cost bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel, List *paths);
@@ -62,25 +62,25 @@ static Oid indexable_operator(Expr *clause, Oid opclass,
bool indexkey_on_left);
static Relids indexable_outerrelids(RelOptInfo *rel);
static bool matches_any_index(RestrictInfo *rinfo, RelOptInfo *rel,
- Relids outer_relids);
+ Relids outer_relids);
static List *find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel,
- Relids outer_relids, bool isouterjoin);
+ Relids outer_relids, bool isouterjoin);
static ScanDirection match_variant_ordering(PlannerInfo *root,
- IndexOptInfo *index,
- List *restrictclauses);
+ IndexOptInfo *index,
+ List *restrictclauses);
static List *identify_ignorable_ordering_cols(PlannerInfo *root,
- IndexOptInfo *index,
- List *restrictclauses);
+ IndexOptInfo *index,
+ List *restrictclauses);
static bool match_index_to_query_keys(PlannerInfo *root,
- IndexOptInfo *index,
- ScanDirection indexscandir,
- List *ignorables);
+ IndexOptInfo *index,
+ ScanDirection indexscandir,
+ List *ignorables);
static bool match_boolean_index_clause(Node *clause, int indexcol,
- IndexOptInfo *index);
+ IndexOptInfo *index);
static bool match_special_index_operator(Expr *clause, Oid opclass,
bool indexkey_on_left);
static Expr *expand_boolean_index_clause(Node *clause, int indexcol,
- IndexOptInfo *index);
+ IndexOptInfo *index);
static List *expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass);
static List *prefix_quals(Node *leftop, Oid opclass,
Const *prefix, Pattern_Prefix_Status pstatus);
@@ -153,8 +153,8 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
true, false, NULL);
/*
- * We can submit them all to add_path. (This generates access paths for
- * plain IndexScan plans.) However, for the next step we will only want
+ * We can submit them all to add_path. (This generates access paths for
+ * plain IndexScan plans.) However, for the next step we will only want
* the ones that have some selectivity; we must discard anything that was
* generated solely for ordering purposes.
*/
@@ -180,8 +180,8 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
bitindexpaths = list_concat(bitindexpaths, indexpaths);
/*
- * If we found anything usable, generate a BitmapHeapPath for the
- * most promising combination of bitmap index paths.
+ * If we found anything usable, generate a BitmapHeapPath for the most
+ * promising combination of bitmap index paths.
*/
if (bitindexpaths != NIL)
{
@@ -254,19 +254,19 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
bool index_is_ordered;
/*
- * Ignore partial indexes that do not match the query. If a partial
- * index is marked predOK then we know it's OK; otherwise, if we
- * are at top level we know it's not OK (since predOK is exactly
- * whether its predicate could be proven from the toplevel clauses).
- * Otherwise, we have to test whether the added clauses are
- * sufficient to imply the predicate. If so, we could use
- * the index in the current context.
+ * Ignore partial indexes that do not match the query. If a partial
+ * index is marked predOK then we know it's OK; otherwise, if we are
+ * at top level we know it's not OK (since predOK is exactly whether
+ * its predicate could be proven from the toplevel clauses).
+ * Otherwise, we have to test whether the added clauses are sufficient
+ * to imply the predicate. If so, we could use the index in the
+ * current context.
*
- * We set useful_predicate to true iff the predicate was proven
- * using the current set of clauses. This is needed to prevent
- * matching a predOK index to an arm of an OR, which would be
- * a legal but pointlessly inefficient plan. (A better plan will
- * be generated by just scanning the predOK index alone, no OR.)
+ * We set useful_predicate to true iff the predicate was proven using the
+ * current set of clauses. This is needed to prevent matching a
+ * predOK index to an arm of an OR, which would be a legal but
+ * pointlessly inefficient plan. (A better plan will be generated by
+ * just scanning the predOK index alone, no OR.)
*/
useful_predicate = false;
if (index->indpred != NIL)
@@ -282,7 +282,7 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
else
{
if (istoplevel)
- continue; /* no point in trying to prove it */
+ continue; /* no point in trying to prove it */
/* Form all_clauses if not done already */
if (all_clauses == NIL)
@@ -290,7 +290,7 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
outer_clauses);
if (!predicate_implied_by(index->indpred, all_clauses))
- continue; /* can't use it at all */
+ continue; /* can't use it at all */
if (!predicate_implied_by(index->indpred, outer_clauses))
useful_predicate = true;
@@ -309,17 +309,17 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
&found_clause);
/*
- * Not all index AMs support scans with no restriction clauses.
- * We can't generate a scan over an index with amoptionalkey = false
+ * Not all index AMs support scans with no restriction clauses. We
+ * can't generate a scan over an index with amoptionalkey = false
* unless there's at least one restriction clause.
*/
if (restrictclauses == NIL && !index->amoptionalkey)
continue;
/*
- * 2. Compute pathkeys describing index's ordering, if any, then
- * see how many of them are actually useful for this query. This
- * is not relevant unless we are at top level.
+ * 2. Compute pathkeys describing index's ordering, if any, then see
+ * how many of them are actually useful for this query. This is not
+ * relevant unless we are at top level.
*/
index_is_ordered = OidIsValid(index->ordering[0]);
if (istoplevel && index_is_ordered && !isjoininner)
@@ -335,9 +335,8 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
/*
* 3. Generate an indexscan path if there are relevant restriction
* clauses in the current clauses, OR the index ordering is
- * potentially useful for later merging or final output ordering,
- * OR the index has a predicate that was proven by the current
- * clauses.
+ * potentially useful for later merging or final output ordering, OR
+ * the index has a predicate that was proven by the current clauses.
*/
if (found_clause || useful_pathkeys != NIL || useful_predicate)
{
@@ -352,16 +351,15 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * 4. If the index is ordered, and there is a requested query
- * ordering that we failed to match, consider variant ways of
- * achieving the ordering. Again, this is only interesting
- * at top level.
+ * 4. If the index is ordered, and there is a requested query ordering
+ * that we failed to match, consider variant ways of achieving the
+ * ordering. Again, this is only interesting at top level.
*/
if (istoplevel && index_is_ordered && !isjoininner &&
root->query_pathkeys != NIL &&
pathkeys_useful_for_ordering(root, useful_pathkeys) == 0)
{
- ScanDirection scandir;
+ ScanDirection scandir;
scandir = match_variant_ordering(root, index, restrictclauses);
if (!ScanDirectionIsNoMovement(scandir))
@@ -409,9 +407,9 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
foreach(l, clauses)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
- List *pathlist;
- Path *bitmapqual;
- ListCell *j;
+ List *pathlist;
+ Path *bitmapqual;
+ ListCell *j;
Assert(IsA(rinfo, RestrictInfo));
/* Ignore RestrictInfos that aren't ORs */
@@ -419,19 +417,19 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
continue;
/*
- * We must be able to match at least one index to each of the arms
- * of the OR, else we can't use it.
+ * We must be able to match at least one index to each of the arms of
+ * the OR, else we can't use it.
*/
pathlist = NIL;
foreach(j, ((BoolExpr *) rinfo->orclause)->args)
{
- Node *orarg = (Node *) lfirst(j);
- List *indlist;
+ Node *orarg = (Node *) lfirst(j);
+ List *indlist;
/* OR arguments should be ANDs or sub-RestrictInfos */
if (and_clause(orarg))
{
- List *andargs = ((BoolExpr *) orarg)->args;
+ List *andargs = ((BoolExpr *) orarg)->args;
indlist = find_usable_indexes(root, rel,
andargs,
@@ -458,25 +456,28 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
isjoininner,
outer_relids);
}
+
/*
- * If nothing matched this arm, we can't do anything
- * with this OR clause.
+ * If nothing matched this arm, we can't do anything with this OR
+ * clause.
*/
if (indlist == NIL)
{
pathlist = NIL;
break;
}
+
/*
- * OK, pick the most promising AND combination,
- * and add it to pathlist.
+ * OK, pick the most promising AND combination, and add it to
+ * pathlist.
*/
bitmapqual = choose_bitmap_and(root, rel, indlist);
pathlist = lappend(pathlist, bitmapqual);
}
+
/*
- * If we have a match for every arm, then turn them
- * into a BitmapOrPath, and add to result list.
+ * If we have a match for every arm, then turn them into a
+ * BitmapOrPath, and add to result list.
*/
if (pathlist != NIL)
{
@@ -494,7 +495,7 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
* Given a nonempty list of bitmap paths, AND them into one path.
*
* This is a nontrivial decision since we can legally use any subset of the
- * given path set. We want to choose a good tradeoff between selectivity
+ * given path set. We want to choose a good tradeoff between selectivity
* and cost of computing the bitmap.
*
* The result is either a single one of the inputs, or a BitmapAndPath
@@ -511,7 +512,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
int i;
ListCell *l;
- Assert(npaths > 0); /* else caller error */
+ Assert(npaths > 0); /* else caller error */
if (npaths == 1)
return (Path *) linitial(paths); /* easy case */
@@ -519,24 +520,23 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
* In theory we should consider every nonempty subset of the given paths.
* In practice that seems like overkill, given the crude nature of the
* estimates, not to mention the possible effects of higher-level AND and
- * OR clauses. As a compromise, we sort the paths by selectivity.
- * We always take the first, and sequentially add on paths that result
- * in a lower estimated cost.
+ * OR clauses. As a compromise, we sort the paths by selectivity. We
+ * always take the first, and sequentially add on paths that result in a
+ * lower estimated cost.
*
- * We also make some effort to detect directly redundant input paths,
- * as can happen if there are multiple possibly usable indexes. For
- * this we look only at plain IndexPath inputs, not at sub-OR clauses.
- * And we consider an index redundant if all its index conditions were
- * already used by earlier indexes. (We could use predicate_implied_by
- * to have a more intelligent, but much more expensive, check --- but in
- * most cases simple pointer equality should suffice, since after all the
- * index conditions are all coming from the same RestrictInfo lists.)
+ * We also make some effort to detect directly redundant input paths, as can
+ * happen if there are multiple possibly usable indexes. For this we look
+ * only at plain IndexPath inputs, not at sub-OR clauses. And we consider
+ * an index redundant if all its index conditions were already used by
+ * earlier indexes. (We could use predicate_implied_by to have a more
+ * intelligent, but much more expensive, check --- but in most cases
+ * simple pointer equality should suffice, since after all the index
+ * conditions are all coming from the same RestrictInfo lists.)
*
- * XXX is there any risk of throwing away a useful partial index here
- * because we don't explicitly look at indpred? At least in simple
- * cases, the partial index will sort before competing non-partial
- * indexes and so it makes the right choice, but perhaps we need to
- * work harder.
+ * XXX is there any risk of throwing away a useful partial index here because
+ * we don't explicitly look at indpred? At least in simple cases, the
+ * partial index will sort before competing non-partial indexes and so it
+ * makes the right choice, but perhaps we need to work harder.
*
* Note: outputting the selected sub-paths in selectivity order is a good
* thing even if we weren't using that as part of the selection method,
@@ -559,13 +559,13 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
qualsofar = list_copy(((IndexPath *) patharray[0])->indexclauses);
else
qualsofar = NIL;
- lastcell = list_head(paths); /* for quick deletions */
+ lastcell = list_head(paths); /* for quick deletions */
for (i = 1; i < npaths; i++)
{
- Path *newpath = patharray[i];
- List *newqual = NIL;
- Cost newcost;
+ Path *newpath = patharray[i];
+ List *newqual = NIL;
+ Cost newcost;
if (IsA(newpath, IndexPath))
{
@@ -599,12 +599,12 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
static int
bitmap_path_comparator(const void *a, const void *b)
{
- Path *pa = *(Path * const *) a;
- Path *pb = *(Path * const *) b;
+ Path *pa = *(Path *const *) a;
+ Path *pb = *(Path *const *) b;
Cost acost;
Cost bcost;
- Selectivity aselec;
- Selectivity bselec;
+ Selectivity aselec;
+ Selectivity bselec;
cost_bitmap_tree_node(pa, &acost, &aselec);
cost_bitmap_tree_node(pb, &bcost, &bselec);
@@ -660,7 +660,7 @@ bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel, List *paths)
*
* We can use clauses from either the current clauses or outer_clauses lists,
* but *found_clause is set TRUE only if we used at least one clause from
- * the "current clauses" list. See find_usable_indexes() for motivation.
+ * the "current clauses" list. See find_usable_indexes() for motivation.
*
* outer_relids determines what Vars will be allowed on the other side
* of a possible index qual; see match_clause_to_indexcol().
@@ -770,7 +770,7 @@ group_clauses_by_indexkey(IndexOptInfo *index,
* to the caller-specified outer_relids relations (which had better not
* include the relation whose index is being tested). outer_relids should
* be NULL when checking simple restriction clauses, and the outer side
- * of the join when building a join inner scan. Other than that, the
+ * of the join when building a join inner scan. Other than that, the
* only thing we don't like is volatile functions.
*
* Note: in most cases we already know that the clause as a whole uses
@@ -836,8 +836,8 @@ match_clause_to_indexcol(IndexOptInfo *index,
return true;
/*
- * If we didn't find a member of the index's opclass, see whether
- * it is a "special" indexable operator.
+ * If we didn't find a member of the index's opclass, see whether it
+ * is a "special" indexable operator.
*/
if (match_special_index_operator(clause, opclass, true))
return true;
@@ -852,8 +852,8 @@ match_clause_to_indexcol(IndexOptInfo *index,
return true;
/*
- * If we didn't find a member of the index's opclass, see whether
- * it is a "special" indexable operator.
+ * If we didn't find a member of the index's opclass, see whether it
+ * is a "special" indexable operator.
*/
if (match_special_index_operator(clause, opclass, false))
return true;
@@ -914,14 +914,14 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
/*
* Note: if Postgres tried to optimize queries by forming equivalence
* classes over equi-joined attributes (i.e., if it recognized that a
- * qualification such as "where a.b=c.d and a.b=5" could make use of
- * an index on c.d), then we could use that equivalence class info
- * here with joininfo lists to do more complete tests for the usability
- * of a partial index. For now, the test only uses restriction
- * clauses (those in baserestrictinfo). --Nels, Dec '92
+ * qualification such as "where a.b=c.d and a.b=5" could make use of an
+ * index on c.d), then we could use that equivalence class info here with
+ * joininfo lists to do more complete tests for the usability of a partial
+ * index. For now, the test only uses restriction clauses (those in
+ * baserestrictinfo). --Nels, Dec '92
*
- * XXX as of 7.1, equivalence class info *is* available. Consider
- * improving this code as foreseen by Nels.
+ * XXX as of 7.1, equivalence class info *is* available. Consider improving
+ * this code as foreseen by Nels.
*/
foreach(ilist, rel->indexlist)
@@ -943,7 +943,7 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
/*
* indexable_outerrelids
* Finds all other relids that participate in any indexable join clause
- * for the specified table. Returns a set of relids.
+ * for the specified table. Returns a set of relids.
*/
static Relids
indexable_outerrelids(RelOptInfo *rel)
@@ -958,7 +958,7 @@ indexable_outerrelids(RelOptInfo *rel)
foreach(l, rel->joininfo)
{
RestrictInfo *joininfo = (RestrictInfo *) lfirst(l);
- Relids other_rels;
+ Relids other_rels;
other_rels = bms_difference(joininfo->required_relids, rel->relids);
if (matches_any_index(joininfo, rel, other_rels))
@@ -986,7 +986,7 @@ matches_any_index(RestrictInfo *rinfo, RelOptInfo *rel, Relids outer_relids)
{
foreach(l, ((BoolExpr *) rinfo->orclause)->args)
{
- Node *orarg = (Node *) lfirst(l);
+ Node *orarg = (Node *) lfirst(l);
/* OR arguments should be ANDs or sub-RestrictInfos */
if (and_clause(orarg))
@@ -1092,17 +1092,17 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
return NULL;
/*
- * Otherwise, we have to do path selection in the memory context of
- * the given rel, so that any created path can be safely attached to
- * the rel's cache of best inner paths. (This is not currently an
- * issue for normal planning, but it is an issue for GEQO planning.)
+ * Otherwise, we have to do path selection in the memory context of the
+ * given rel, so that any created path can be safely attached to the rel's
+ * cache of best inner paths. (This is not currently an issue for normal
+ * planning, but it is an issue for GEQO planning.)
*/
oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
/*
- * Intersect the given outer_relids with index_outer_relids to find
- * the set of outer relids actually relevant for this rel. If there
- * are none, again we can fail immediately.
+ * Intersect the given outer_relids with index_outer_relids to find the
+ * set of outer relids actually relevant for this rel. If there are none,
+ * again we can fail immediately.
*/
outer_relids = bms_intersect(rel->index_outer_relids, outer_relids);
if (bms_is_empty(outer_relids))
@@ -1113,11 +1113,10 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * Look to see if we already computed the result for this set of
- * relevant outerrels. (We include the isouterjoin status in the
- * cache lookup key for safety. In practice I suspect this is not
- * necessary because it should always be the same for a given
- * innerrel.)
+ * Look to see if we already computed the result for this set of relevant
+ * outerrels. (We include the isouterjoin status in the cache lookup key
+ * for safety. In practice I suspect this is not necessary because it
+ * should always be the same for a given innerrel.)
*/
foreach(l, rel->index_inner_paths)
{
@@ -1160,8 +1159,8 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
bitindexpaths = list_concat(bitindexpaths, list_copy(indexpaths));
/*
- * If we found anything usable, generate a BitmapHeapPath for the
- * most promising combination of bitmap index paths.
+ * If we found anything usable, generate a BitmapHeapPath for the most
+ * promising combination of bitmap index paths.
*/
if (bitindexpaths != NIL)
{
@@ -1218,12 +1217,11 @@ find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel,
ListCell *l;
/*
- * We can always use plain restriction clauses for the rel. We
- * scan these first because we want them first in the clause
- * list for the convenience of remove_redundant_join_clauses,
- * which can never remove non-join clauses and hence won't be able
- * to get rid of a non-join clause if it appears after a join
- * clause it is redundant with.
+ * We can always use plain restriction clauses for the rel. We scan these
+ * first because we want them first in the clause list for the convenience
+ * of remove_redundant_join_clauses, which can never remove non-join
+ * clauses and hence won't be able to get rid of a non-join clause if it
+ * appears after a join clause it is redundant with.
*/
foreach(l, rel->baserestrictinfo)
{
@@ -1305,7 +1303,7 @@ find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel,
*
* If able to match the requested query pathkeys, returns either
* ForwardScanDirection or BackwardScanDirection to indicate the proper index
- * scan direction. If no match, returns NoMovementScanDirection.
+ * scan direction. If no match, returns NoMovementScanDirection.
*/
static ScanDirection
match_variant_ordering(PlannerInfo *root,
@@ -1318,8 +1316,8 @@ match_variant_ordering(PlannerInfo *root,
* Forget the whole thing if not a btree index; our check for ignorable
* columns assumes we are dealing with btree opclasses. (It'd be possible
* to factor out just the try for backwards indexscan, but considering
- * that we presently have no orderable indexes except btrees anyway,
- * it's hardly worth contorting this code for that case.)
+ * that we presently have no orderable indexes except btrees anyway, it's
+ * hardly worth contorting this code for that case.)
*
* Note: if you remove this, you probably need to put in a check on
* amoptionalkey to prevent possible clauseless scan on an index that
@@ -1327,17 +1325,19 @@ match_variant_ordering(PlannerInfo *root,
*/
if (index->relam != BTREE_AM_OID)
return NoMovementScanDirection;
+
/*
- * Figure out which index columns can be optionally ignored because
- * they have an equality constraint. This is the same set for either
- * forward or backward scan, so we do it just once.
+ * Figure out which index columns can be optionally ignored because they
+ * have an equality constraint. This is the same set for either forward
+ * or backward scan, so we do it just once.
*/
ignorables = identify_ignorable_ordering_cols(root, index,
restrictclauses);
+
/*
- * Try to match to forward scan, then backward scan. However, we can
- * skip the forward-scan case if there are no ignorable columns,
- * because find_usable_indexes() would have found the match already.
+ * Try to match to forward scan, then backward scan. However, we can skip
+ * the forward-scan case if there are no ignorable columns, because
+ * find_usable_indexes() would have found the match already.
*/
if (ignorables &&
match_index_to_query_keys(root, index, ForwardScanDirection,
@@ -1365,24 +1365,24 @@ identify_ignorable_ordering_cols(PlannerInfo *root,
List *restrictclauses)
{
List *result = NIL;
- int indexcol = 0; /* note this is 0-based */
+ int indexcol = 0; /* note this is 0-based */
ListCell *l;
/* restrictclauses is either NIL or has a sublist per column */
foreach(l, restrictclauses)
{
- List *sublist = (List *) lfirst(l);
- Oid opclass = index->classlist[indexcol];
- ListCell *l2;
+ List *sublist = (List *) lfirst(l);
+ Oid opclass = index->classlist[indexcol];
+ ListCell *l2;
foreach(l2, sublist)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l2);
OpExpr *clause = (OpExpr *) rinfo->clause;
- Oid clause_op;
- int op_strategy;
- bool varonleft;
- bool ispc;
+ Oid clause_op;
+ int op_strategy;
+ bool varonleft;
+ bool ispc;
/* We know this clause passed match_clause_to_indexcol */
@@ -1393,11 +1393,11 @@ identify_ignorable_ordering_cols(PlannerInfo *root,
index))
{
/*
- * The clause means either col = TRUE or col = FALSE;
- * we do not care which, it's an equality constraint
- * either way.
+ * The clause means either col = TRUE or col = FALSE; we
+ * do not care which, it's an equality constraint either
+ * way.
*/
- result = lappend_int(result, indexcol+1);
+ result = lappend_int(result, indexcol + 1);
break;
}
}
@@ -1426,12 +1426,11 @@ identify_ignorable_ordering_cols(PlannerInfo *root,
op_strategy = get_op_opclass_strategy(clause_op, opclass);
/*
- * You might expect to see Assert(op_strategy != 0) here,
- * but you won't: the clause might contain a special indexable
- * operator rather than an ordinary opclass member. Currently
- * none of the special operators are very likely to expand to
- * an equality operator; we do not bother to check, but just
- * assume no match.
+ * You might expect to see Assert(op_strategy != 0) here, but you
+ * won't: the clause might contain a special indexable operator
+ * rather than an ordinary opclass member. Currently none of the
+ * special operators are very likely to expand to an equality
+ * operator; we do not bother to check, but just assume no match.
*/
if (op_strategy != BTEqualStrategyNumber)
continue;
@@ -1445,7 +1444,7 @@ identify_ignorable_ordering_cols(PlannerInfo *root,
rinfo->left_relids);
if (ispc)
{
- result = lappend_int(result, indexcol+1);
+ result = lappend_int(result, indexcol + 1);
break;
}
}
@@ -1480,8 +1479,8 @@ match_index_to_query_keys(PlannerInfo *root,
index_pathkeys = build_index_pathkeys(root, index, indexscandir);
/*
- * Can we match to the query's requested pathkeys? The inner loop
- * skips over ignorable index columns while trying to match.
+ * Can we match to the query's requested pathkeys? The inner loop skips
+ * over ignorable index columns while trying to match.
*/
index_cell = list_head(index_pathkeys);
index_col = 0;
@@ -1492,13 +1491,14 @@ match_index_to_query_keys(PlannerInfo *root,
for (;;)
{
- List *isubkey;
+ List *isubkey;
if (index_cell == NULL)
return false;
isubkey = (List *) lfirst(index_cell);
index_cell = lnext(index_cell);
index_col++; /* index_col is now 1-based */
+
/*
* Since we are dealing with canonicalized pathkeys, pointer
* comparison is sufficient to determine a match.
@@ -1561,9 +1561,9 @@ match_index_to_operand(Node *operand,
int indkey;
/*
- * Ignore any RelabelType node above the operand. This is needed to
- * be able to apply indexscanning in binary-compatible-operator cases.
- * Note: we can assume there is at most one RelabelType node;
+ * Ignore any RelabelType node above the operand. This is needed to be
+ * able to apply indexscanning in binary-compatible-operator cases. Note:
+ * we can assume there is at most one RelabelType node;
* eval_const_expressions() will have simplified if more than one.
*/
if (operand && IsA(operand, RelabelType))
@@ -1583,9 +1583,9 @@ match_index_to_operand(Node *operand,
else
{
/*
- * Index expression; find the correct expression. (This search
- * could be avoided, at the cost of complicating all the callers
- * of this routine; doesn't seem worth it.)
+ * Index expression; find the correct expression. (This search could
+ * be avoided, at the cost of complicating all the callers of this
+ * routine; doesn't seem worth it.)
*/
ListCell *indexpr_item;
int i;
@@ -1645,7 +1645,7 @@ match_index_to_operand(Node *operand,
*
* Another thing that we do with this machinery is to provide special
* smarts for "boolean" indexes (that is, indexes on boolean columns
- * that support boolean equality). We can transform a plain reference
+ * that support boolean equality). We can transform a plain reference
* to the indexkey into "indexkey = true", or "NOT indexkey" into
* "indexkey = false", so as to make the expression indexable using the
* regular index operators. (As of Postgres 8.1, we must do this here
@@ -1696,14 +1696,15 @@ match_boolean_index_clause(Node *clause,
indexcol, index))
return true;
}
+
/*
* Since we only consider clauses at top level of WHERE, we can convert
- * indexkey IS TRUE and indexkey IS FALSE to index searches as well.
- * The different meaning for NULL isn't important.
+ * indexkey IS TRUE and indexkey IS FALSE to index searches as well. The
+ * different meaning for NULL isn't important.
*/
else if (clause && IsA(clause, BooleanTest))
{
- BooleanTest *btest = (BooleanTest *) clause;
+ BooleanTest *btest = (BooleanTest *) clause;
if (btest->booltesttype == IS_TRUE ||
btest->booltesttype == IS_FALSE)
@@ -1737,8 +1738,8 @@ match_special_index_operator(Expr *clause, Oid opclass,
/*
* Currently, all known special operators require the indexkey on the
- * left, but this test could be pushed into the switch statement if
- * some are added that do not...
+ * left, but this test could be pushed into the switch statement if some
+ * are added that do not...
*/
if (!indexkey_on_left)
return false;
@@ -1760,12 +1761,12 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_LIKE_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_BYTEA_LIKE_OP:
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_ICLIKE_OP:
@@ -1773,7 +1774,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_ICLIKE_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like_IC,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_REGEXEQ_OP:
@@ -1781,7 +1782,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_REGEXEQ_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_ICREGEXEQ_OP:
@@ -1789,7 +1790,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_ICREGEXEQ_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex_IC,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_INET_SUB_OP:
@@ -1815,9 +1816,9 @@ match_special_index_operator(Expr *clause, Oid opclass,
* want to apply. (A hash index, for example, will not support ">=".)
* Currently, only btree supports the operators we need.
*
- * We insist on the opclass being the specific one we expect, else we'd
- * do the wrong thing if someone were to make a reverse-sort opclass
- * with the same operators.
+ * We insist on the opclass being the specific one we expect, else we'd do
+ * the wrong thing if someone were to make a reverse-sort opclass with the
+ * same operators.
*/
switch (expr_op)
{
@@ -1906,7 +1907,7 @@ expand_indexqual_conditions(IndexOptInfo *index, List *clausegroups)
/* First check for boolean cases */
if (IsBooleanOpclass(curClass))
{
- Expr *boolqual;
+ Expr *boolqual;
boolqual = expand_boolean_index_clause((Node *) rinfo->clause,
indexcol,
@@ -1960,7 +1961,7 @@ expand_boolean_index_clause(Node *clause,
/* NOT clause? */
if (not_clause(clause))
{
- Node *arg = (Node *) get_notclausearg((Expr *) clause);
+ Node *arg = (Node *) get_notclausearg((Expr *) clause);
/* It must have matched the indexkey */
Assert(match_index_to_operand(arg, indexcol, index));
@@ -1971,8 +1972,8 @@ expand_boolean_index_clause(Node *clause,
}
if (clause && IsA(clause, BooleanTest))
{
- BooleanTest *btest = (BooleanTest *) clause;
- Node *arg = (Node *) btest->arg;
+ BooleanTest *btest = (BooleanTest *) clause;
+ Node *arg = (Node *) btest->arg;
/* It must have matched the indexkey */
Assert(match_index_to_operand(arg, indexcol, index));
@@ -2007,6 +2008,7 @@ static List *
expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass)
{
Expr *clause = rinfo->clause;
+
/* we know these will succeed */
Node *leftop = get_leftop(clause);
Node *rightop = get_rightop(clause);
@@ -2020,10 +2022,9 @@ expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass)
switch (expr_op)
{
/*
- * LIKE and regex operators are not members of any index
- * opclass, so if we find one in an indexqual list we can
- * assume that it was accepted by
- * match_special_index_operator().
+ * LIKE and regex operators are not members of any index opclass,
+ * so if we find one in an indexqual list we can assume that it
+ * was accepted by match_special_index_operator().
*/
case OID_TEXT_LIKE_OP:
case OID_BPCHAR_LIKE_OP:
@@ -2128,8 +2129,8 @@ prefix_quals(Node *leftop, Oid opclass,
}
/*
- * If necessary, coerce the prefix constant to the right type. The
- * given prefix constant is either text or bytea type.
+ * If necessary, coerce the prefix constant to the right type. The given
+ * prefix constant is either text or bytea type.
*/
if (prefix_const->consttype != datatype)
{
@@ -2139,11 +2140,11 @@ prefix_quals(Node *leftop, Oid opclass,
{
case TEXTOID:
prefix = DatumGetCString(DirectFunctionCall1(textout,
- prefix_const->constvalue));
+ prefix_const->constvalue));
break;
case BYTEAOID:
prefix = DatumGetCString(DirectFunctionCall1(byteaout,
- prefix_const->constvalue));
+ prefix_const->constvalue));
break;
default:
elog(ERROR, "unexpected const type: %u",
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index b02f67ba1f6..ab3f902f02b 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.95 2005/06/05 22:32:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.96 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,9 +65,9 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* Find potential mergejoin clauses. We can skip this if we are not
- * interested in doing a mergejoin. However, mergejoin is currently
- * our only way of implementing full outer joins, so override
- * mergejoin disable if it's a full join.
+ * interested in doing a mergejoin. However, mergejoin is currently our
+ * only way of implementing full outer joins, so override mergejoin
+ * disable if it's a full join.
*/
if (enable_mergejoin || jointype == JOIN_FULL)
mergeclause_list = select_mergejoin_clauses(joinrel,
@@ -95,23 +95,22 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* 3. Consider paths where the inner relation need not be explicitly
- * sorted. This includes mergejoins only (nestloops were already
- * built in match_unsorted_outer).
+ * sorted. This includes mergejoins only (nestloops were already built in
+ * match_unsorted_outer).
*
* Diked out as redundant 2/13/2000 -- tgl. There isn't any really
- * significant difference between the inner and outer side of a
- * mergejoin, so match_unsorted_inner creates no paths that aren't
- * equivalent to those made by match_unsorted_outer when
- * add_paths_to_joinrel() is invoked with the two rels given in the
- * other order.
+ * significant difference between the inner and outer side of a mergejoin,
+ * so match_unsorted_inner creates no paths that aren't equivalent to
+ * those made by match_unsorted_outer when add_paths_to_joinrel() is
+ * invoked with the two rels given in the other order.
*/
match_unsorted_inner(root, joinrel, outerrel, innerrel,
restrictlist, mergeclause_list, jointype);
#endif
/*
- * 4. Consider paths where both outer and inner relations must be
- * hashed before being joined.
+ * 4. Consider paths where both outer and inner relations must be hashed
+ * before being joined.
*/
if (enable_hashjoin)
hash_inner_and_outer(root, joinrel, outerrel, innerrel,
@@ -174,11 +173,11 @@ sort_inner_and_outer(PlannerInfo *root,
/*
* We only consider the cheapest-total-cost input paths, since we are
* assuming here that a sort is required. We will consider
- * cheapest-startup-cost input paths later, and only if they don't
- * need a sort.
+ * cheapest-startup-cost input paths later, and only if they don't need a
+ * sort.
*
- * If unique-ification is requested, do it and then handle as a plain
- * inner join.
+ * If unique-ification is requested, do it and then handle as a plain inner
+ * join.
*/
outer_path = outerrel->cheapest_total_path;
inner_path = innerrel->cheapest_total_path;
@@ -194,31 +193,29 @@ sort_inner_and_outer(PlannerInfo *root,
}
/*
- * Each possible ordering of the available mergejoin clauses will
- * generate a differently-sorted result path at essentially the same
- * cost. We have no basis for choosing one over another at this level
- * of joining, but some sort orders may be more useful than others for
- * higher-level mergejoins, so it's worth considering multiple
- * orderings.
+ * Each possible ordering of the available mergejoin clauses will generate
+ * a differently-sorted result path at essentially the same cost. We have
+ * no basis for choosing one over another at this level of joining, but
+ * some sort orders may be more useful than others for higher-level
+ * mergejoins, so it's worth considering multiple orderings.
*
* Actually, it's not quite true that every mergeclause ordering will
* generate a different path order, because some of the clauses may be
- * redundant. Therefore, what we do is convert the mergeclause list
- * to a list of canonical pathkeys, and then consider different
- * orderings of the pathkeys.
+ * redundant. Therefore, what we do is convert the mergeclause list to a
+ * list of canonical pathkeys, and then consider different orderings of
+ * the pathkeys.
*
* Generating a path for *every* permutation of the pathkeys doesn't seem
* like a winning strategy; the cost in planning time is too high. For
- * now, we generate one path for each pathkey, listing that pathkey
- * first and the rest in random order. This should allow at least a
- * one-clause mergejoin without re-sorting against any other possible
- * mergejoin partner path. But if we've not guessed the right
- * ordering of secondary keys, we may end up evaluating clauses as
- * qpquals when they could have been done as mergeclauses. We need to
- * figure out a better way. (Two possible approaches: look at all the
- * relevant index relations to suggest plausible sort orders, or make
- * just one output path and somehow mark it as having a sort-order
- * that can be rearranged freely.)
+ * now, we generate one path for each pathkey, listing that pathkey first
+ * and the rest in random order. This should allow at least a one-clause
+ * mergejoin without re-sorting against any other possible mergejoin
+ * partner path. But if we've not guessed the right ordering of secondary
+ * keys, we may end up evaluating clauses as qpquals when they could have
+ * been done as mergeclauses. We need to figure out a better way. (Two
+ * possible approaches: look at all the relevant index relations to
+ * suggest plausible sort orders, or make just one output path and somehow
+ * mark it as having a sort-order that can be rearranged freely.)
*/
all_pathkeys = make_pathkeys_for_mergeclauses(root,
mergeclause_list,
@@ -243,26 +240,25 @@ sort_inner_and_outer(PlannerInfo *root,
/*
* Select mergeclause(s) that match this sort ordering. If we had
- * redundant merge clauses then we will get a subset of the
- * original clause list. There had better be some match,
- * however...
+ * redundant merge clauses then we will get a subset of the original
+ * clause list. There had better be some match, however...
*/
cur_mergeclauses = find_mergeclauses_for_pathkeys(root,
cur_pathkeys,
- mergeclause_list);
+ mergeclause_list);
Assert(cur_mergeclauses != NIL);
/* Forget it if can't use all the clauses in right/full join */
if (useallclauses &&
- list_length(cur_mergeclauses) != list_length(mergeclause_list))
+ list_length(cur_mergeclauses) != list_length(mergeclause_list))
continue;
/*
* Build sort pathkeys for both sides.
*
* Note: it's possible that the cheapest paths will already be sorted
- * properly. create_mergejoin_path will detect that case and
- * suppress an explicit sort step, so we needn't do so here.
+ * properly. create_mergejoin_path will detect that case and suppress
+ * an explicit sort step, so we needn't do so here.
*/
outerkeys = make_pathkeys_for_mergeclauses(root,
cur_mergeclauses,
@@ -343,10 +339,10 @@ match_unsorted_outer(PlannerInfo *root,
/*
* Nestloop only supports inner, left, and IN joins. Also, if we are
- * doing a right or full join, we must use *all* the mergeclauses as
- * join clauses, else we will not have a valid plan. (Although these
- * two flags are currently inverses, keep them separate for clarity
- * and possible future changes.)
+ * doing a right or full join, we must use *all* the mergeclauses as join
+ * clauses, else we will not have a valid plan. (Although these two flags
+ * are currently inverses, keep them separate for clarity and possible
+ * future changes.)
*/
switch (jointype)
{
@@ -385,10 +381,9 @@ match_unsorted_outer(PlannerInfo *root,
else if (nestjoinOK)
{
/*
- * If the cheapest inner path is a join or seqscan, we should
- * consider materializing it. (This is a heuristic: we could
- * consider it always, but for inner indexscans it's probably a
- * waste of time.)
+ * If the cheapest inner path is a join or seqscan, we should consider
+ * materializing it. (This is a heuristic: we could consider it
+ * always, but for inner indexscans it's probably a waste of time.)
*/
if (!(IsA(inner_cheapest_total, IndexPath) ||
IsA(inner_cheapest_total, BitmapHeapPath) ||
@@ -397,8 +392,8 @@ match_unsorted_outer(PlannerInfo *root,
create_material_path(innerrel, inner_cheapest_total);
/*
- * Get the best innerjoin indexpath (if any) for this outer rel.
- * It's the same for all outer paths.
+ * Get the best innerjoin indexpath (if any) for this outer rel. It's
+ * the same for all outer paths.
*/
bestinnerjoin = best_inner_indexscan(root, innerrel,
outerrel->relids, jointype);
@@ -417,8 +412,8 @@ match_unsorted_outer(PlannerInfo *root,
int sortkeycnt;
/*
- * If we need to unique-ify the outer path, it's pointless to
- * consider any but the cheapest outer.
+ * If we need to unique-ify the outer path, it's pointless to consider
+ * any but the cheapest outer.
*/
if (save_jointype == JOIN_UNIQUE_OUTER)
{
@@ -429,9 +424,9 @@ match_unsorted_outer(PlannerInfo *root,
}
/*
- * The result will have this sort order (even if it is implemented
- * as a nestloop, and even if some of the mergeclauses are
- * implemented by qpquals rather than as true mergeclauses):
+ * The result will have this sort order (even if it is implemented as
+ * a nestloop, and even if some of the mergeclauses are implemented by
+ * qpquals rather than as true mergeclauses):
*/
merge_pathkeys = build_join_pathkeys(root, joinrel, jointype,
outerpath->pathkeys);
@@ -516,9 +511,9 @@ match_unsorted_outer(PlannerInfo *root,
innerrel);
/*
- * Generate a mergejoin on the basis of sorting the cheapest
- * inner. Since a sort will be needed, only cheapest total cost
- * matters. (But create_mergejoin_path will do the right thing if
+ * Generate a mergejoin on the basis of sorting the cheapest inner.
+ * Since a sort will be needed, only cheapest total cost matters.
+ * (But create_mergejoin_path will do the right thing if
* inner_cheapest_total is already correctly sorted.)
*/
add_path(joinrel, (Path *)
@@ -538,10 +533,10 @@ match_unsorted_outer(PlannerInfo *root,
continue;
/*
- * Look for presorted inner paths that satisfy the innersortkey
- * list --- or any truncation thereof, if we are allowed to build
- * a mergejoin using a subset of the merge clauses. Here, we
- * consider both cheap startup cost and cheap total cost. Ignore
+ * Look for presorted inner paths that satisfy the innersortkey list
+ * --- or any truncation thereof, if we are allowed to build a
+ * mergejoin using a subset of the merge clauses. Here, we consider
+ * both cheap startup cost and cheap total cost. Ignore
* inner_cheapest_total, since we already made a path with it.
*/
num_sortkeys = list_length(innersortkeys);
@@ -559,8 +554,8 @@ match_unsorted_outer(PlannerInfo *root,
/*
* Look for an inner path ordered well enough for the first
- * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is
- * modified destructively, which is why we made a copy...
+ * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified
+ * destructively, which is why we made a copy...
*/
trialsortkeys = list_truncate(trialsortkeys, sortkeycnt);
innerpath = get_cheapest_path_for_pathkeys(innerrel->pathlist,
@@ -611,8 +606,8 @@ match_unsorted_outer(PlannerInfo *root,
if (innerpath != cheapest_total_inner)
{
/*
- * Avoid rebuilding clause list if we already made
- * one; saves memory in big join trees...
+ * Avoid rebuilding clause list if we already made one;
+ * saves memory in big join trees...
*/
if (newclauses == NIL)
{
@@ -620,8 +615,8 @@ match_unsorted_outer(PlannerInfo *root,
{
newclauses =
find_mergeclauses_for_pathkeys(root,
- trialsortkeys,
- mergeclauses);
+ trialsortkeys,
+ mergeclauses);
Assert(newclauses != NIL);
}
else
@@ -697,8 +692,8 @@ hash_inner_and_outer(PlannerInfo *root,
* We need to build only one hashpath for any given pair of outer and
* inner relations; all of the hashable clauses will be used as keys.
*
- * Scan the join's restrictinfo list to find hashjoinable clauses that
- * are usable with this pair of sub-relations.
+ * Scan the join's restrictinfo list to find hashjoinable clauses that are
+ * usable with this pair of sub-relations.
*/
hashclauses = NIL;
foreach(l, restrictlist)
@@ -725,7 +720,7 @@ hash_inner_and_outer(PlannerInfo *root,
/* righthand side is inner */
}
else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) &&
- bms_is_subset(restrictinfo->right_relids, outerrel->relids))
+ bms_is_subset(restrictinfo->right_relids, outerrel->relids))
{
/* lefthand side is inner */
}
@@ -739,9 +734,9 @@ hash_inner_and_outer(PlannerInfo *root,
if (hashclauses)
{
/*
- * We consider both the cheapest-total-cost and
- * cheapest-startup-cost outer paths. There's no need to consider
- * any but the cheapest-total-cost inner path, however.
+ * We consider both the cheapest-total-cost and cheapest-startup-cost
+ * outer paths. There's no need to consider any but the
+ * cheapest-total-cost inner path, however.
*/
Path *cheapest_startup_outer = outerrel->cheapest_startup_path;
Path *cheapest_total_outer = outerrel->cheapest_total_path;
@@ -807,15 +802,15 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(l);
/*
- * If processing an outer join, only use its own join clauses in
- * the merge. For inner joins we need not be so picky.
+ * If processing an outer join, only use its own join clauses in the
+ * merge. For inner joins we need not be so picky.
*
- * Furthermore, if it is a right/full join then *all* the explicit
- * join clauses must be mergejoinable, else the executor will
- * fail. If we are asked for a right join then just return NIL to
- * indicate no mergejoin is possible (we can handle it as a left
- * join instead). If we are asked for a full join then emit an
- * error, because there is no fallback.
+ * Furthermore, if it is a right/full join then *all* the explicit join
+ * clauses must be mergejoinable, else the executor will fail. If we
+ * are asked for a right join then just return NIL to indicate no
+ * mergejoin is possible (we can handle it as a left join instead). If
+ * we are asked for a full join then emit an error, because there is
+ * no fallback.
*/
if (isouterjoin)
{
@@ -847,8 +842,8 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
/*
* Check if clause is usable with these input rels. All the vars
- * needed on each side of the clause must be available from one or
- * the other of the input rels.
+ * needed on each side of the clause must be available from one or the
+ * other of the input rels.
*/
if (bms_is_subset(restrictinfo->left_relids, outerrel->relids) &&
bms_is_subset(restrictinfo->right_relids, innerrel->relids))
@@ -856,7 +851,7 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
/* righthand side is inner */
}
else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) &&
- bms_is_subset(restrictinfo->right_relids, outerrel->relids))
+ bms_is_subset(restrictinfo->right_relids, outerrel->relids))
{
/* lefthand side is inner */
}
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index f4f2d779b0a..ecb63156860 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.75 2005/07/28 22:27:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.76 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -49,17 +49,16 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels)
/*
* First, consider left-sided and right-sided plans, in which rels of
- * exactly level-1 member relations are joined against initial
- * relations. We prefer to join using join clauses, but if we find a
- * rel of level-1 members that has no join clauses, we will generate
- * Cartesian-product joins against all initial rels not already
- * contained in it.
+ * exactly level-1 member relations are joined against initial relations.
+ * We prefer to join using join clauses, but if we find a rel of level-1
+ * members that has no join clauses, we will generate Cartesian-product
+ * joins against all initial rels not already contained in it.
*
- * In the first pass (level == 2), we try to join each initial rel to
- * each initial rel that appears later in joinrels[1]. (The
- * mirror-image joins are handled automatically by make_join_rel.) In
- * later passes, we try to join rels of size level-1 from
- * joinrels[level-1] to each initial rel in joinrels[1].
+ * In the first pass (level == 2), we try to join each initial rel to each
+ * initial rel that appears later in joinrels[1]. (The mirror-image joins
+ * are handled automatically by make_join_rel.) In later passes, we try
+ * to join rels of size level-1 from joinrels[level-1] to each initial rel
+ * in joinrels[1].
*/
foreach(r, joinrels[level - 1])
{
@@ -76,23 +75,22 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels)
if (old_rel->joininfo != NIL)
{
/*
- * Note that if all available join clauses for this rel
- * require more than one other rel, we will fail to make any
- * joins against it here. In most cases that's OK; it'll be
- * considered by "bushy plan" join code in a higher-level pass
- * where we have those other rels collected into a join rel.
+ * Note that if all available join clauses for this rel require
+ * more than one other rel, we will fail to make any joins against
+ * it here. In most cases that's OK; it'll be considered by
+ * "bushy plan" join code in a higher-level pass where we have
+ * those other rels collected into a join rel.
*/
new_rels = make_rels_by_clause_joins(root,
old_rel,
other_rels);
/*
- * An exception occurs when there is a clauseless join inside
- * an IN (sub-SELECT) construct. Here, the members of the
- * subselect all have join clauses (against the stuff outside
- * the IN), but they *must* be joined to each other before we
- * can make use of those join clauses. So do the clauseless
- * join bit.
+ * An exception occurs when there is a clauseless join inside an
+ * IN (sub-SELECT) construct. Here, the members of the subselect
+ * all have join clauses (against the stuff outside the IN), but
+ * they *must* be joined to each other before we can make use of
+ * those join clauses. So do the clauseless join bit.
*
* See also the last-ditch case below.
*/
@@ -115,30 +113,29 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels)
/*
* At levels above 2 we will generate the same joined relation in
* multiple ways --- for example (a join b) join c is the same
- * RelOptInfo as (b join c) join a, though the second case will
- * add a different set of Paths to it. To avoid making extra work
- * for subsequent passes, do not enter the same RelOptInfo into
- * our output list multiple times.
+ * RelOptInfo as (b join c) join a, though the second case will add a
+ * different set of Paths to it. To avoid making extra work for
+ * subsequent passes, do not enter the same RelOptInfo into our output
+ * list multiple times.
*/
result_rels = list_concat_unique_ptr(result_rels, new_rels);
}
/*
- * Now, consider "bushy plans" in which relations of k initial rels
- * are joined to relations of level-k initial rels, for 2 <= k <=
- * level-2.
+ * Now, consider "bushy plans" in which relations of k initial rels are
+ * joined to relations of level-k initial rels, for 2 <= k <= level-2.
*
* We only consider bushy-plan joins for pairs of rels where there is a
- * suitable join clause, in order to avoid unreasonable growth of
- * planning time.
+ * suitable join clause, in order to avoid unreasonable growth of planning
+ * time.
*/
for (k = 2;; k++)
{
int other_level = level - k;
/*
- * Since make_join_rel(x, y) handles both x,y and y,x cases, we
- * only need to go as far as the halfway point.
+ * Since make_join_rel(x, y) handles both x,y and y,x cases, we only
+ * need to go as far as the halfway point.
*/
if (k > other_level)
break;
@@ -165,8 +162,8 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels)
{
/*
* OK, we can build a rel of the right level from this
- * pair of rels. Do so if there is at least one
- * usable join clause.
+ * pair of rels. Do so if there is at least one usable
+ * join clause.
*/
if (have_relevant_joinclause(old_rel, new_rel))
{
@@ -185,16 +182,16 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels)
}
/*
- * Last-ditch effort: if we failed to find any usable joins so far,
- * force a set of cartesian-product joins to be generated. This
- * handles the special case where all the available rels have join
- * clauses but we cannot use any of the joins yet. An example is
+ * Last-ditch effort: if we failed to find any usable joins so far, force
+ * a set of cartesian-product joins to be generated. This handles the
+ * special case where all the available rels have join clauses but we
+ * cannot use any of the joins yet. An example is
*
* SELECT * FROM a,b,c WHERE (a.f1 + b.f2 + c.f3) = 0;
*
* The join clause will be usable at level 3, but at level 2 we have no
- * choice but to make cartesian joins. We consider only left-sided
- * and right-sided cartesian joins in this case (no bushy).
+ * choice but to make cartesian joins. We consider only left-sided and
+ * right-sided cartesian joins in this case (no bushy).
*/
if (result_rels == NIL)
{
@@ -318,8 +315,8 @@ make_rels_by_clauseless_joins(PlannerInfo *root,
jrel = make_join_rel(root, old_rel, other_rel, JOIN_INNER);
/*
- * As long as given other_rels are distinct, don't need to
- * test to see if jrel is already part of output list.
+ * As long as given other_rels are distinct, don't need to test to
+ * see if jrel is already part of output list.
*/
if (jrel)
result = lcons(jrel, result);
@@ -393,10 +390,10 @@ make_jointree_rel(PlannerInfo *root, Node *jtnode)
elog(ERROR, "invalid join order");
/*
- * Since we are only going to consider this one way to do it,
- * we're done generating Paths for this joinrel and can now select
- * the cheapest. In fact we *must* do so now, since next level up
- * will need it!
+ * Since we are only going to consider this one way to do it, we're
+ * done generating Paths for this joinrel and can now select the
+ * cheapest. In fact we *must* do so now, since next level up will
+ * need it!
*/
set_cheapest(rel);
@@ -439,10 +436,10 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
joinrelids = bms_union(rel1->relids, rel2->relids);
/*
- * If we are implementing IN clauses as joins, there are some joins
- * that are illegal. Check to see if the proposed join is trouble. We
- * can skip the work if looking at an outer join, however, because
- * only top-level joins might be affected.
+ * If we are implementing IN clauses as joins, there are some joins that
+ * are illegal. Check to see if the proposed join is trouble. We can skip
+ * the work if looking at an outer join, however, because only top-level
+ * joins might be affected.
*/
if (jointype == JOIN_INNER)
{
@@ -454,8 +451,8 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
/*
* This IN clause is not relevant unless its RHS overlaps the
- * proposed join. (Check this first as a fast path for
- * dismissing most irrelevant INs quickly.)
+ * proposed join. (Check this first as a fast path for dismissing
+ * most irrelevant INs quickly.)
*/
if (!bms_overlap(ininfo->righthand, joinrelids))
continue;
@@ -468,10 +465,10 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
continue;
/*
- * Cannot join if proposed join contains rels not in the RHS
- * *and* contains only part of the RHS. We must build the
- * complete RHS (subselect's join) before it can be joined to
- * rels outside the subselect.
+ * Cannot join if proposed join contains rels not in the RHS *and*
+ * contains only part of the RHS. We must build the complete RHS
+ * (subselect's join) before it can be joined to rels outside the
+ * subselect.
*/
if (!bms_is_subset(ininfo->righthand, joinrelids))
{
@@ -480,13 +477,12 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
}
/*
- * At this point we are considering a join of the IN's RHS to
- * some other rel(s).
+ * At this point we are considering a join of the IN's RHS to some
+ * other rel(s).
*
- * If we already joined IN's RHS to any other rels in either
- * input path, then this join is not constrained (the
- * necessary work was done at the lower level where that join
- * occurred).
+ * If we already joined IN's RHS to any other rels in either input
+ * path, then this join is not constrained (the necessary work was
+ * done at the lower level where that join occurred).
*/
if (bms_is_subset(ininfo->righthand, rel1->relids) &&
!bms_equal(ininfo->righthand, rel1->relids))
@@ -500,12 +496,11 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
* innerrel is exactly RHS; conversely JOIN_REVERSE_IN handles
* RHS/LHS.
*
- * JOIN_UNIQUE_OUTER will work if outerrel is exactly RHS;
- * conversely JOIN_UNIQUE_INNER will work if innerrel is
- * exactly RHS.
+ * JOIN_UNIQUE_OUTER will work if outerrel is exactly RHS; conversely
+ * JOIN_UNIQUE_INNER will work if innerrel is exactly RHS.
*
- * But none of these will work if we already found another IN
- * that needs to trigger here.
+ * But none of these will work if we already found another IN that
+ * needs to trigger here.
*/
if (jointype != JOIN_INNER)
{
@@ -532,8 +527,8 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
}
/*
- * Find or build the join RelOptInfo, and compute the restrictlist
- * that goes with this particular joining.
+ * Find or build the join RelOptInfo, and compute the restrictlist that
+ * goes with this particular joining.
*/
joinrel = build_join_rel(root, joinrelids, rel1, rel2, jointype,
&restrictlist);
diff --git a/src/backend/optimizer/path/orindxpath.c b/src/backend/optimizer/path/orindxpath.c
index eb1e1a6ffcd..be5a0c3434f 100644
--- a/src/backend/optimizer/path/orindxpath.c
+++ b/src/backend/optimizer/path/orindxpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.74 2005/07/28 20:26:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.75 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -99,14 +99,14 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
if (restriction_is_or_clause(rinfo))
{
/*
- * Use the generate_bitmap_or_paths() machinery to estimate
- * the value of each OR clause. We can use regular
- * restriction clauses along with the OR clause contents to
- * generate indexquals. We pass outer_relids = NULL so that
- * sub-clauses that are actually joins will be ignored.
+ * Use the generate_bitmap_or_paths() machinery to estimate the
+ * value of each OR clause. We can use regular restriction
+ * clauses along with the OR clause contents to generate
+ * indexquals. We pass outer_relids = NULL so that sub-clauses
+ * that are actually joins will be ignored.
*/
- List *orpaths;
- ListCell *k;
+ List *orpaths;
+ ListCell *k;
orpaths = generate_bitmap_or_paths(root, rel,
list_make1(rinfo),
@@ -116,7 +116,7 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
/* Locate the cheapest OR path */
foreach(k, orpaths)
{
- BitmapOrPath *path = (BitmapOrPath *) lfirst(k);
+ BitmapOrPath *path = (BitmapOrPath *) lfirst(k);
Assert(IsA(path, BitmapOrPath));
if (bestpath == NULL ||
@@ -134,8 +134,8 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
return false;
/*
- * Convert the path's indexclauses structure to a RestrictInfo tree.
- * We include any partial-index predicates so as to get a reasonable
+ * Convert the path's indexclauses structure to a RestrictInfo tree. We
+ * include any partial-index predicates so as to get a reasonable
* representation of what the path is actually scanning.
*/
newrinfos = make_restrictinfo_from_bitmapqual((Path *) bestpath,
@@ -155,12 +155,12 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
rel->baserestrictinfo = list_concat(rel->baserestrictinfo, newrinfos);
/*
- * Adjust the original OR clause's cached selectivity to compensate
- * for the selectivity of the added (but redundant) lower-level qual.
- * This should result in the join rel getting approximately the same
- * rows estimate as it would have gotten without all these
- * shenanigans. (XXX major hack alert ... this depends on the
- * assumption that the selectivity will stay cached ...)
+ * Adjust the original OR clause's cached selectivity to compensate for
+ * the selectivity of the added (but redundant) lower-level qual. This
+ * should result in the join rel getting approximately the same rows
+ * estimate as it would have gotten without all these shenanigans. (XXX
+ * major hack alert ... this depends on the assumption that the
+ * selectivity will stay cached ...)
*/
or_selec = clause_selectivity(root, (Node *) or_rinfo,
0, JOIN_INNER);
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index 09ad68ecd93..a2626929826 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.72 2005/08/27 22:13:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.73 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,17 +33,17 @@
static PathKeyItem *makePathKeyItem(Node *key, Oid sortop, bool checkType);
static void generate_outer_join_implications(PlannerInfo *root,
- List *equi_key_set,
- Relids *relids);
+ List *equi_key_set,
+ Relids *relids);
static void sub_generate_join_implications(PlannerInfo *root,
- List *equi_key_set, Relids *relids,
- Node *item1, Oid sortop1,
- Relids item1_relids);
+ List *equi_key_set, Relids *relids,
+ Node *item1, Oid sortop1,
+ Relids item1_relids);
static void process_implied_const_eq(PlannerInfo *root,
- List *equi_key_set, Relids *relids,
- Node *item1, Oid sortop1,
- Relids item1_relids,
- bool delete_it);
+ List *equi_key_set, Relids *relids,
+ Node *item1, Oid sortop1,
+ Relids item1_relids,
+ bool delete_it);
static List *make_canonical_pathkey(PlannerInfo *root, PathKeyItem *item);
static Var *find_indexkey_var(PlannerInfo *root, RelOptInfo *rel,
AttrNumber varattno);
@@ -59,12 +59,11 @@ makePathKeyItem(Node *key, Oid sortop, bool checkType)
PathKeyItem *item = makeNode(PathKeyItem);
/*
- * Some callers pass expressions that are not necessarily of the same
- * type as the sort operator expects as input (for example when
- * dealing with an index that uses binary-compatible operators). We
- * must relabel these with the correct type so that the key
- * expressions will be seen as equal() to expressions that have been
- * correctly labeled.
+ * Some callers pass expressions that are not necessarily of the same type
+ * as the sort operator expects as input (for example when dealing with an
+ * index that uses binary-compatible operators). We must relabel these
+ * with the correct type so that the key expressions will be seen as
+ * equal() to expressions that have been correctly labeled.
*/
if (checkType)
{
@@ -116,20 +115,19 @@ add_equijoined_keys(PlannerInfo *root, RestrictInfo *restrictinfo)
return;
/*
- * Our plan is to make a two-element set, then sweep through the
- * existing equijoin sets looking for matches to item1 or item2. When
- * we find one, we remove that set from equi_key_list and union it
- * into our new set. When done, we add the new set to the front of
- * equi_key_list.
+ * Our plan is to make a two-element set, then sweep through the existing
+ * equijoin sets looking for matches to item1 or item2. When we find one,
+ * we remove that set from equi_key_list and union it into our new set.
+ * When done, we add the new set to the front of equi_key_list.
*
* It may well be that the two items we're given are already known to be
* equijoin-equivalent, in which case we don't need to change our data
* structure. If we find both of them in the same equivalence set to
* start with, we can quit immediately.
*
- * This is a standard UNION-FIND problem, for which there exist better
- * data structures than simple lists. If this code ever proves to be
- * a bottleneck then it could be sped up --- but for now, simple is
+ * This is a standard UNION-FIND problem, for which there exist better data
+ * structures than simple lists. If this code ever proves to be a
+ * bottleneck then it could be sped up --- but for now, simple is
* beautiful.
*/
newset = NIL;
@@ -148,8 +146,7 @@ add_equijoined_keys(PlannerInfo *root, RestrictInfo *restrictinfo)
if (item1here || item2here)
{
/*
- * If find both in same equivalence set, no need to do any
- * more
+ * If find both in same equivalence set, no need to do any more
*/
if (item1here && item2here)
{
@@ -228,18 +225,18 @@ generate_implied_equalities(PlannerInfo *root)
int i1;
/*
- * A set containing only two items cannot imply any equalities
- * beyond the one that created the set, so we can skip it ---
- * unless outer joins appear in the query.
+ * A set containing only two items cannot imply any equalities beyond
+ * the one that created the set, so we can skip it --- unless outer
+ * joins appear in the query.
*/
if (nitems < 3 && !root->hasOuterJoins)
continue;
/*
- * Collect info about relids mentioned in each item. For this
- * routine we only really care whether there are any at all in
- * each item, but process_implied_equality() needs the exact sets,
- * so we may as well pull them here.
+ * Collect info about relids mentioned in each item. For this routine
+ * we only really care whether there are any at all in each item, but
+ * process_implied_equality() needs the exact sets, so we may as well
+ * pull them here.
*/
relids = (Relids *) palloc(nitems * sizeof(Relids));
have_consts = false;
@@ -258,9 +255,9 @@ generate_implied_equalities(PlannerInfo *root)
* Match each item in the set with all that appear after it (it's
* sufficient to generate A=B, need not process B=A too).
*
- * A set containing only two items cannot imply any equalities
- * beyond the one that created the set, so we can skip this
- * processing in that case.
+ * A set containing only two items cannot imply any equalities beyond the
+ * one that created the set, so we can skip this processing in that
+ * case.
*/
if (nitems >= 3)
{
@@ -346,7 +343,7 @@ generate_implied_equalities(PlannerInfo *root)
* the time it gets here, the restriction will look like
* COALESCE(LEFTVAR, RIGHTVAR) = CONSTANT
* and we will have a join clause LEFTVAR = RIGHTVAR that we can match the
- * COALESCE expression to. In this situation we can push LEFTVAR = CONSTANT
+ * COALESCE expression to. In this situation we can push LEFTVAR = CONSTANT
* and RIGHTVAR = CONSTANT into the input relations, since any rows not
* meeting these conditions cannot contribute to the join result.
*
@@ -397,8 +394,8 @@ generate_outer_join_implications(PlannerInfo *root,
*/
static void
sub_generate_join_implications(PlannerInfo *root,
- List *equi_key_set, Relids *relids,
- Node *item1, Oid sortop1, Relids item1_relids)
+ List *equi_key_set, Relids *relids,
+ Node *item1, Oid sortop1, Relids item1_relids)
{
ListCell *l;
@@ -410,34 +407,36 @@ sub_generate_join_implications(PlannerInfo *root,
foreach(l, root->left_join_clauses)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
- Node *leftop = get_leftop(rinfo->clause);
+ Node *leftop = get_leftop(rinfo->clause);
if (equal(leftop, item1) && rinfo->left_sortop == sortop1)
{
/*
- * Match, so find constant member(s) of set and generate
- * implied INNERVAR = CONSTANT
+ * Match, so find constant member(s) of set and generate implied
+ * INNERVAR = CONSTANT
*/
- Node *rightop = get_rightop(rinfo->clause);
+ Node *rightop = get_rightop(rinfo->clause);
process_implied_const_eq(root, equi_key_set, relids,
rightop,
rinfo->right_sortop,
rinfo->right_relids,
false);
+
/*
* We can remove explicit tests of this outer-join qual, too,
- * since we now have tests forcing each of its sides
- * to the same value.
+ * since we now have tests forcing each of its sides to the same
+ * value.
*/
process_implied_equality(root,
leftop, rightop,
rinfo->left_sortop, rinfo->right_sortop,
rinfo->left_relids, rinfo->right_relids,
true);
+
/*
- * And recurse to see if we can deduce anything from
- * INNERVAR = CONSTANT
+ * And recurse to see if we can deduce anything from INNERVAR =
+ * CONSTANT
*/
sub_generate_join_implications(root, equi_key_set, relids,
rightop,
@@ -450,34 +449,36 @@ sub_generate_join_implications(PlannerInfo *root,
foreach(l, root->right_join_clauses)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
- Node *rightop = get_rightop(rinfo->clause);
+ Node *rightop = get_rightop(rinfo->clause);
if (equal(rightop, item1) && rinfo->right_sortop == sortop1)
{
/*
- * Match, so find constant member(s) of set and generate
- * implied INNERVAR = CONSTANT
+ * Match, so find constant member(s) of set and generate implied
+ * INNERVAR = CONSTANT
*/
- Node *leftop = get_leftop(rinfo->clause);
+ Node *leftop = get_leftop(rinfo->clause);
process_implied_const_eq(root, equi_key_set, relids,
leftop,
rinfo->left_sortop,
rinfo->left_relids,
false);
+
/*
* We can remove explicit tests of this outer-join qual, too,
- * since we now have tests forcing each of its sides
- * to the same value.
+ * since we now have tests forcing each of its sides to the same
+ * value.
*/
process_implied_equality(root,
leftop, rightop,
rinfo->left_sortop, rinfo->right_sortop,
rinfo->left_relids, rinfo->right_relids,
true);
+
/*
- * And recurse to see if we can deduce anything from
- * INNERVAR = CONSTANT
+ * And recurse to see if we can deduce anything from INNERVAR =
+ * CONSTANT
*/
sub_generate_join_implications(root, equi_key_set, relids,
leftop,
@@ -492,8 +493,8 @@ sub_generate_join_implications(PlannerInfo *root,
if (IsA(item1, CoalesceExpr))
{
CoalesceExpr *cexpr = (CoalesceExpr *) item1;
- Node *cfirst;
- Node *csecond;
+ Node *cfirst;
+ Node *csecond;
if (list_length(cexpr->args) != 2)
return;
@@ -501,26 +502,26 @@ sub_generate_join_implications(PlannerInfo *root,
csecond = (Node *) lsecond(cexpr->args);
/*
- * Examine each mergejoinable full-join clause, looking for a
- * clause of the form "x = y" matching the COALESCE(x,y) expression
+ * Examine each mergejoinable full-join clause, looking for a clause
+ * of the form "x = y" matching the COALESCE(x,y) expression
*/
foreach(l, root->full_join_clauses)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
- Node *leftop = get_leftop(rinfo->clause);
- Node *rightop = get_rightop(rinfo->clause);
+ Node *leftop = get_leftop(rinfo->clause);
+ Node *rightop = get_rightop(rinfo->clause);
/*
- * We can assume the COALESCE() inputs are in the same order
- * as the join clause, since both were automatically generated
- * in the cases we care about.
+ * We can assume the COALESCE() inputs are in the same order as
+ * the join clause, since both were automatically generated in the
+ * cases we care about.
*
- * XXX currently this may fail to match in cross-type cases
- * because the COALESCE will contain typecast operations while
- * the join clause may not (if there is a cross-type mergejoin
- * operator available for the two column types).
- * Is it OK to strip implicit coercions from the COALESCE
- * arguments? What of the sortops in such cases?
+ * XXX currently this may fail to match in cross-type cases because
+ * the COALESCE will contain typecast operations while the join
+ * clause may not (if there is a cross-type mergejoin operator
+ * available for the two column types). Is it OK to strip implicit
+ * coercions from the COALESCE arguments? What of the sortops in
+ * such cases?
*/
if (equal(leftop, cfirst) &&
equal(rightop, csecond) &&
@@ -548,10 +549,11 @@ sub_generate_join_implications(PlannerInfo *root,
sortop1,
item1_relids,
true);
+
/*
* We can remove explicit tests of this outer-join qual, too,
- * since we now have tests forcing each of its sides
- * to the same value.
+ * since we now have tests forcing each of its sides to the
+ * same value.
*/
process_implied_equality(root,
leftop, rightop,
@@ -560,9 +562,10 @@ sub_generate_join_implications(PlannerInfo *root,
rinfo->left_relids,
rinfo->right_relids,
true);
+
/*
- * And recurse to see if we can deduce anything from
- * LEFTVAR = CONSTANT
+ * And recurse to see if we can deduce anything from LEFTVAR =
+ * CONSTANT
*/
sub_generate_join_implications(root, equi_key_set, relids,
leftop,
@@ -700,19 +703,19 @@ canonicalize_pathkeys(PlannerInfo *root, List *pathkeys)
List *cpathkey;
/*
- * It's sufficient to look at the first entry in the sublist; if
- * there are more entries, they're already part of an equivalence
- * set by definition.
+ * It's sufficient to look at the first entry in the sublist; if there
+ * are more entries, they're already part of an equivalence set by
+ * definition.
*/
Assert(pathkey != NIL);
item = (PathKeyItem *) linitial(pathkey);
cpathkey = make_canonical_pathkey(root, item);
/*
- * Eliminate redundant ordering requests --- ORDER BY A,A is the
- * same as ORDER BY A. We want to check this only after we have
- * canonicalized the keys, so that equivalent-key knowledge is
- * used when deciding if an item is redundant.
+ * Eliminate redundant ordering requests --- ORDER BY A,A is the same
+ * as ORDER BY A. We want to check this only after we have
+ * canonicalized the keys, so that equivalent-key knowledge is used
+ * when deciding if an item is redundant.
*/
new_pathkeys = list_append_unique_ptr(new_pathkeys, cpathkey);
}
@@ -769,8 +772,8 @@ compare_pathkeys(List *keys1, List *keys2)
List *subkey2 = (List *) lfirst(key2);
/*
- * XXX would like to check that we've been given canonicalized
- * input, but PlannerInfo not accessible here...
+ * XXX would like to check that we've been given canonicalized input,
+ * but PlannerInfo not accessible here...
*/
#ifdef NOT_USED
Assert(list_member_ptr(root->equi_key_list, subkey1));
@@ -778,10 +781,10 @@ compare_pathkeys(List *keys1, List *keys2)
#endif
/*
- * We will never have two subkeys where one is a subset of the
- * other, because of the canonicalization process. Either they
- * are equal or they ain't. Furthermore, we only need pointer
- * comparison to detect equality.
+ * We will never have two subkeys where one is a subset of the other,
+ * because of the canonicalization process. Either they are equal or
+ * they ain't. Furthermore, we only need pointer comparison to detect
+ * equality.
*/
if (subkey1 != subkey2)
return PATHKEYS_DIFFERENT; /* no need to keep looking */
@@ -789,9 +792,9 @@ compare_pathkeys(List *keys1, List *keys2)
/*
* If we reached the end of only one list, the other is longer and
- * therefore not a subset. (We assume the additional sublist(s) of
- * the other list are not NIL --- no pathkey list should ever have a
- * NIL sublist.)
+ * therefore not a subset. (We assume the additional sublist(s) of the
+ * other list are not NIL --- no pathkey list should ever have a NIL
+ * sublist.)
*/
if (key1 == NULL && key2 == NULL)
return PATHKEYS_EQUAL;
@@ -840,8 +843,8 @@ get_cheapest_path_for_pathkeys(List *paths, List *pathkeys,
Path *path = (Path *) lfirst(l);
/*
- * Since cost comparison is a lot cheaper than pathkey comparison,
- * do that first. (XXX is that still true?)
+ * Since cost comparison is a lot cheaper than pathkey comparison, do
+ * that first. (XXX is that still true?)
*/
if (matched_path != NULL &&
compare_path_costs(matched_path, path, cost_criterion) <= 0)
@@ -879,11 +882,11 @@ get_cheapest_fractional_path_for_pathkeys(List *paths,
Path *path = (Path *) lfirst(l);
/*
- * Since cost comparison is a lot cheaper than pathkey comparison,
- * do that first.
+ * Since cost comparison is a lot cheaper than pathkey comparison, do
+ * that first.
*/
if (matched_path != NULL &&
- compare_fractional_path_costs(matched_path, path, fraction) <= 0)
+ compare_fractional_path_costs(matched_path, path, fraction) <= 0)
continue;
if (pathkeys_contained_in(pathkeys, path->pathkeys))
@@ -954,8 +957,8 @@ build_index_pathkeys(PlannerInfo *root,
cpathkey = make_canonical_pathkey(root, item);
/*
- * Eliminate redundant ordering info; could happen if query is
- * such that index keys are equijoined...
+ * Eliminate redundant ordering info; could happen if query is such
+ * that index keys are equijoined...
*/
retval = list_append_unique_ptr(retval, cpathkey);
@@ -1003,7 +1006,7 @@ find_indexkey_var(PlannerInfo *root, RelOptInfo *rel, AttrNumber varattno)
/*
* convert_subquery_pathkeys
* Build a pathkeys list that describes the ordering of a subquery's
- * result, in the terms of the outer query. This is essentially a
+ * result, in the terms of the outer query. This is essentially a
* task of conversion.
*
* 'rel': outer query's RelOptInfo for the subquery relation.
@@ -1033,19 +1036,18 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
/*
* The sub_pathkey could contain multiple elements (representing
- * knowledge that multiple items are effectively equal). Each
- * element might match none, one, or more of the output columns
- * that are visible to the outer query. This means we may have
- * multiple possible representations of the sub_pathkey in the
- * context of the outer query. Ideally we would generate them all
- * and put them all into a pathkey list of the outer query,
- * thereby propagating equality knowledge up to the outer query.
- * Right now we cannot do so, because the outer query's canonical
- * pathkey sets are already frozen when this is called. Instead
- * we prefer the one that has the highest "score" (number of
- * canonical pathkey peers, plus one if it matches the outer
- * query_pathkeys). This is the most likely to be useful in the
- * outer query.
+ * knowledge that multiple items are effectively equal). Each element
+ * might match none, one, or more of the output columns that are
+ * visible to the outer query. This means we may have multiple
+ * possible representations of the sub_pathkey in the context of the
+ * outer query. Ideally we would generate them all and put them all
+ * into a pathkey list of the outer query, thereby propagating
+ * equality knowledge up to the outer query. Right now we cannot do
+ * so, because the outer query's canonical pathkey sets are already
+ * frozen when this is called. Instead we prefer the one that has the
+ * highest "score" (number of canonical pathkey peers, plus one if it
+ * matches the outer query_pathkeys). This is the most likely to be
+ * useful in the outer query.
*/
foreach(j, sub_pathkey)
{
@@ -1144,13 +1146,13 @@ build_join_pathkeys(PlannerInfo *root,
return NIL;
/*
- * This used to be quite a complex bit of code, but now that all
- * pathkey sublists start out life canonicalized, we don't have to do
- * a darn thing here! The inner-rel vars we used to need to add are
- * *already* part of the outer pathkey!
+ * This used to be quite a complex bit of code, but now that all pathkey
+ * sublists start out life canonicalized, we don't have to do a darn thing
+ * here! The inner-rel vars we used to need to add are *already* part of
+ * the outer pathkey!
*
- * We do, however, need to truncate the pathkeys list, since it may
- * contain pathkeys that were useful for forming this joinrel but are
+ * We do, however, need to truncate the pathkeys list, since it may contain
+ * pathkeys that were useful for forming this joinrel but are
* uninteresting to higher levels.
*/
return truncate_useless_pathkeys(root, joinrel, outer_pathkeys);
@@ -1289,22 +1291,20 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
/*
* We can match a pathkey against either left or right side of any
- * mergejoin clause. (We examine both sides since we aren't told
- * if the given pathkeys are for inner or outer input path; no
- * confusion is possible.) Furthermore, if there are multiple
- * matching clauses, take them all. In plain inner-join scenarios
- * we expect only one match, because redundant-mergeclause
- * elimination will have removed any redundant mergeclauses from
- * the input list. However, in outer-join scenarios there might be
- * multiple matches. An example is
+ * mergejoin clause. (We examine both sides since we aren't told if
+ * the given pathkeys are for inner or outer input path; no confusion
+ * is possible.) Furthermore, if there are multiple matching clauses,
+ * take them all. In plain inner-join scenarios we expect only one
+ * match, because redundant-mergeclause elimination will have removed
+ * any redundant mergeclauses from the input list. However, in
+ * outer-join scenarios there might be multiple matches. An example is
*
- * select * from a full join b on a.v1 = b.v1 and a.v2 = b.v2 and
- * a.v1 = b.v2;
+ * select * from a full join b on a.v1 = b.v1 and a.v2 = b.v2 and a.v1 =
+ * b.v2;
*
* Given the pathkeys ((a.v1), (a.v2)) it is okay to return all three
- * clauses (in the order a.v1=b.v1, a.v1=b.v2, a.v2=b.v2) and
- * indeed we *must* do so or we will be unable to form a valid
- * plan.
+ * clauses (in the order a.v1=b.v1, a.v1=b.v2, a.v2=b.v2) and indeed
+ * we *must* do so or we will be unable to form a valid plan.
*/
foreach(j, restrictinfos)
{
@@ -1325,15 +1325,15 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
/*
* If we didn't find a mergeclause, we're done --- any additional
- * sort-key positions in the pathkeys are useless. (But we can
- * still mergejoin if we found at least one mergeclause.)
+ * sort-key positions in the pathkeys are useless. (But we can still
+ * mergejoin if we found at least one mergeclause.)
*/
if (matched_restrictinfos == NIL)
break;
/*
- * If we did find usable mergeclause(s) for this sort-key
- * position, add them to result list.
+ * If we did find usable mergeclause(s) for this sort-key position,
+ * add them to result list.
*/
mergeclauses = list_concat(mergeclauses, matched_restrictinfos);
}
@@ -1390,14 +1390,13 @@ make_pathkeys_for_mergeclauses(PlannerInfo *root,
}
/*
- * When we are given multiple merge clauses, it's possible that
- * some clauses refer to the same vars as earlier clauses. There's
- * no reason for us to specify sort keys like (A,B,A) when (A,B)
- * will do --- and adding redundant sort keys makes add_path think
- * that this sort order is different from ones that are really the
- * same, so don't do it. Since we now have a canonicalized
- * pathkey, a simple ptrMember test is sufficient to detect
- * redundant keys.
+ * When we are given multiple merge clauses, it's possible that some
+ * clauses refer to the same vars as earlier clauses. There's no
+ * reason for us to specify sort keys like (A,B,A) when (A,B) will do
+ * --- and adding redundant sort keys makes add_path think that this
+ * sort order is different from ones that are really the same, so
+ * don't do it. Since we now have a canonicalized pathkey, a simple
+ * ptrMember test is sufficient to detect redundant keys.
*/
pathkeys = list_append_unique_ptr(pathkeys, pathkey);
}
@@ -1447,8 +1446,8 @@ pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys)
cache_mergeclause_pathkeys(root, restrictinfo);
/*
- * We can compare canonical pathkey sublists by simple
- * pointer equality; see compare_pathkeys.
+ * We can compare canonical pathkey sublists by simple pointer
+ * equality; see compare_pathkeys.
*/
if (pathkey == restrictinfo->left_pathkey ||
pathkey == restrictinfo->right_pathkey)
@@ -1460,8 +1459,8 @@ pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys)
/*
* If we didn't find a mergeclause, we're done --- any additional
- * sort-key positions in the pathkeys are useless. (But we can
- * still mergejoin if we found at least one mergeclause.)
+ * sort-key positions in the pathkeys are useless. (But we can still
+ * mergejoin if we found at least one mergeclause.)
*/
if (matched)
useful++;
diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c
index 348524372e1..26058dc1b64 100644
--- a/src/backend/optimizer/path/tidpath.c
+++ b/src/backend/optimizer/path/tidpath.c
@@ -11,7 +11,7 @@
* WHERE ctid IN (tid1, tid2, ...)
*
* There is currently no special support for joins involving CTID; in
- * particular nothing corresponding to best_inner_indexscan(). Since it's
+ * particular nothing corresponding to best_inner_indexscan(). Since it's
* not very useful to store TIDs of one table in another table, there
* doesn't seem to be enough use-case to justify adding a lot of code
* for that.
@@ -22,7 +22,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/tidpath.c,v 1.24 2005/08/23 20:49:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/tidpath.c,v 1.25 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,7 +50,7 @@ static List *TidQualFromRestrictinfo(int varno, List *restrictinfo);
*
* If it is, return the pseudoconstant subnode; if not, return NULL.
*
- * We check that the CTID Var belongs to relation "varno". That is probably
+ * We check that the CTID Var belongs to relation "varno". That is probably
* redundant considering this is only applied to restriction clauses, but
* let's be safe.
*/
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index b7af04e1b9f..f0dd6548711 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.200 2005/10/13 00:06:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.201 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,10 +50,10 @@ static IndexScan *create_indexscan_plan(PlannerInfo *root, IndexPath *best_path,
List *tlist, List *scan_clauses,
List **nonlossy_clauses);
static BitmapHeapScan *create_bitmap_scan_plan(PlannerInfo *root,
- BitmapHeapPath *best_path,
- List *tlist, List *scan_clauses);
+ BitmapHeapPath *best_path,
+ List *tlist, List *scan_clauses);
static Plan *create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
- List **qual, List **indexqual);
+ List **qual, List **indexqual);
static TidScan *create_tidscan_plan(PlannerInfo *root, TidPath *best_path,
List *tlist, List *scan_clauses);
static SubqueryScan *create_subqueryscan_plan(PlannerInfo *root, Path *best_path,
@@ -72,7 +72,7 @@ static void fix_indexqual_references(List *indexquals, IndexPath *index_path,
List **indexstrategy,
List **indexsubtype);
static Node *fix_indexqual_operand(Node *node, IndexOptInfo *index,
- Oid *opclass);
+ Oid *opclass);
static List *get_switched_clauses(List *clauses, Relids outerrelids);
static void copy_path_costsize(Plan *dest, Path *src);
static void copy_plan_costsize(Plan *dest, Plan *src);
@@ -82,15 +82,15 @@ static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid,
List *indexstrategy, List *indexsubtype,
ScanDirection indexscandir);
static BitmapIndexScan *make_bitmap_indexscan(Index scanrelid, Oid indexid,
- List *indexqual,
- List *indexqualorig,
- List *indexstrategy,
- List *indexsubtype);
+ List *indexqual,
+ List *indexqualorig,
+ List *indexstrategy,
+ List *indexsubtype);
static BitmapHeapScan *make_bitmap_heapscan(List *qptlist,
- List *qpqual,
- Plan *lefttree,
- List *bitmapqualorig,
- Index scanrelid);
+ List *qpqual,
+ Plan *lefttree,
+ List *bitmapqualorig,
+ Index scanrelid);
static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
List *tideval);
static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
@@ -164,7 +164,7 @@ create_plan(PlannerInfo *root, Path *best_path)
break;
case T_Material:
plan = (Plan *) create_material_plan(root,
- (MaterialPath *) best_path);
+ (MaterialPath *) best_path);
break;
case T_Unique:
plan = (Plan *) create_unique_plan(root,
@@ -195,12 +195,12 @@ create_scan_plan(PlannerInfo *root, Path *best_path)
Scan *plan;
/*
- * For table scans, rather than using the relation targetlist (which
- * is only those Vars actually needed by the query), we prefer to
- * generate a tlist containing all Vars in order. This will allow the
- * executor to optimize away projection of the table tuples, if
- * possible. (Note that planner.c may replace the tlist we generate
- * here, forcing projection to occur.)
+ * For table scans, rather than using the relation targetlist (which is
+ * only those Vars actually needed by the query), we prefer to generate a
+ * tlist containing all Vars in order. This will allow the executor to
+ * optimize away projection of the table tuples, if possible. (Note that
+ * planner.c may replace the tlist we generate here, forcing projection to
+ * occur.)
*/
if (use_physical_tlist(rel))
{
@@ -213,8 +213,8 @@ create_scan_plan(PlannerInfo *root, Path *best_path)
tlist = build_relation_tlist(rel);
/*
- * Extract the relevant restriction clauses from the parent relation;
- * the executor must apply all these restrictions during the scan.
+ * Extract the relevant restriction clauses from the parent relation; the
+ * executor must apply all these restrictions during the scan.
*/
scan_clauses = rel->baserestrictinfo;
@@ -237,7 +237,7 @@ create_scan_plan(PlannerInfo *root, Path *best_path)
case T_BitmapHeapScan:
plan = (Scan *) create_bitmap_scan_plan(root,
- (BitmapHeapPath *) best_path,
+ (BitmapHeapPath *) best_path,
tlist,
scan_clauses);
break;
@@ -308,8 +308,8 @@ use_physical_tlist(RelOptInfo *rel)
int i;
/*
- * OK for subquery and function scans; otherwise, can't do it for
- * anything except real relations.
+ * OK for subquery and function scans; otherwise, can't do it for anything
+ * except real relations.
*/
if (rel->rtekind != RTE_RELATION)
{
@@ -328,9 +328,9 @@ use_physical_tlist(RelOptInfo *rel)
return false;
/*
- * Can't do it if any system columns are requested, either. (This
- * could possibly be fixed but would take some fragile assumptions in
- * setrefs.c, I think.)
+ * Can't do it if any system columns are requested, either. (This could
+ * possibly be fixed but would take some fragile assumptions in setrefs.c,
+ * I think.)
*/
for (i = rel->min_attr; i <= 0; i++)
{
@@ -415,14 +415,14 @@ create_join_plan(PlannerInfo *root, JoinPath *best_path)
#ifdef NOT_USED
/*
- * * Expensive function pullups may have pulled local predicates *
- * into this path node. Put them in the qpqual of the plan node. *
- * JMH, 6/15/92
+ * * Expensive function pullups may have pulled local predicates * into
+ * this path node. Put them in the qpqual of the plan node. * JMH,
+ * 6/15/92
*/
if (get_loc_restrictinfo(best_path) != NIL)
set_qpqual((Plan) plan,
list_concat(get_qpqual((Plan) plan),
- get_actual_clauses(get_loc_restrictinfo(best_path))));
+ get_actual_clauses(get_loc_restrictinfo(best_path))));
#endif
return plan;
@@ -444,13 +444,13 @@ create_append_plan(PlannerInfo *root, AppendPath *best_path)
ListCell *subpaths;
/*
- * It is possible for the subplans list to contain only one entry,
- * or even no entries. Handle these cases specially.
+ * It is possible for the subplans list to contain only one entry, or even
+ * no entries. Handle these cases specially.
*
- * XXX ideally, if there's just one entry, we'd not bother to generate
- * an Append node but just return the single child. At the moment this
- * does not work because the varno of the child scan plan won't match
- * the parent-rel Vars it'll be asked to emit.
+ * XXX ideally, if there's just one entry, we'd not bother to generate an
+ * Append node but just return the single child. At the moment this does
+ * not work because the varno of the child scan plan won't match the
+ * parent-rel Vars it'll be asked to emit.
*/
if (best_path->subpaths == NIL)
{
@@ -618,8 +618,8 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path)
if (newitems)
{
/*
- * If the top plan node can't do projections, we need to add a
- * Result node to help it along.
+ * If the top plan node can't do projections, we need to add a Result
+ * node to help it along.
*/
if (!is_projection_capable_plan(subplan))
subplan = (Plan *) make_result(newtlist, NULL, subplan);
@@ -628,8 +628,8 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path)
}
/*
- * Build control information showing which subplan output columns are
- * to be examined by the grouping step. Unfortunately we can't merge this
+ * Build control information showing which subplan output columns are to
+ * be examined by the grouping step. Unfortunately we can't merge this
* with the previous loop, since we didn't then know which version of the
* subplan tlist we'd end up using.
*/
@@ -656,9 +656,9 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path)
numGroups = (long) Min(best_path->rows, (double) LONG_MAX);
/*
- * Since the Agg node is going to project anyway, we can give it
- * the minimum output tlist, without any stuff we might have added
- * to the subplan tlist.
+ * Since the Agg node is going to project anyway, we can give it the
+ * minimum output tlist, without any stuff we might have added to the
+ * subplan tlist.
*/
plan = (Plan *) make_agg(root,
build_relation_tlist(best_path->path.parent),
@@ -776,9 +776,9 @@ create_indexscan_plan(PlannerInfo *root,
stripped_indexquals = get_actual_clauses(indexquals);
/*
- * The executor needs a copy with the indexkey on the left of each
- * clause and with index attr numbers substituted for table ones. This
- * pass also gets strategy info and looks for "lossy" operators.
+ * The executor needs a copy with the indexkey on the left of each clause
+ * and with index attr numbers substituted for table ones. This pass also
+ * gets strategy info and looks for "lossy" operators.
*/
fix_indexqual_references(indexquals, best_path,
&fixed_indexquals,
@@ -792,12 +792,11 @@ create_indexscan_plan(PlannerInfo *root,
/*
* If this is an innerjoin scan, the indexclauses will contain join
- * clauses that are not present in scan_clauses (since the passed-in
- * value is just the rel's baserestrictinfo list). We must add these
- * clauses to scan_clauses to ensure they get checked. In most cases
- * we will remove the join clauses again below, but if a join clause
- * contains a special operator, we need to make sure it gets into the
- * scan_clauses.
+ * clauses that are not present in scan_clauses (since the passed-in value
+ * is just the rel's baserestrictinfo list). We must add these clauses to
+ * scan_clauses to ensure they get checked. In most cases we will remove
+ * the join clauses again below, but if a join clause contains a special
+ * operator, we need to make sure it gets into the scan_clauses.
*
* Note: pointer comparison should be enough to determine RestrictInfo
* matches.
@@ -806,25 +805,25 @@ create_indexscan_plan(PlannerInfo *root,
scan_clauses = list_union_ptr(scan_clauses, best_path->indexclauses);
/*
- * The qpqual list must contain all restrictions not automatically
- * handled by the index. All the predicates in the indexquals will be
- * checked (either by the index itself, or by nodeIndexscan.c), but if
- * there are any "special" operators involved then they must be included
- * in qpqual. Also, any lossy index operators must be rechecked in
- * the qpqual. The upshot is that qpqual must contain scan_clauses
- * minus whatever appears in nonlossy_indexquals.
+ * The qpqual list must contain all restrictions not automatically handled
+ * by the index. All the predicates in the indexquals will be checked
+ * (either by the index itself, or by nodeIndexscan.c), but if there are
+ * any "special" operators involved then they must be included in qpqual.
+ * Also, any lossy index operators must be rechecked in the qpqual. The
+ * upshot is that qpqual must contain scan_clauses minus whatever appears
+ * in nonlossy_indexquals.
*
- * In normal cases simple pointer equality checks will be enough to
- * spot duplicate RestrictInfos, so we try that first. In some situations
- * (particularly with OR'd index conditions) we may have scan_clauses
- * that are not equal to, but are logically implied by, the index quals;
- * so we also try a predicate_implied_by() check to see if we can discard
- * quals that way. (predicate_implied_by assumes its first input contains
- * only immutable functions, so we have to check that.) We can also
- * discard quals that are implied by a partial index's predicate.
+ * In normal cases simple pointer equality checks will be enough to spot
+ * duplicate RestrictInfos, so we try that first. In some situations
+ * (particularly with OR'd index conditions) we may have scan_clauses that
+ * are not equal to, but are logically implied by, the index quals; so we
+ * also try a predicate_implied_by() check to see if we can discard quals
+ * that way. (predicate_implied_by assumes its first input contains only
+ * immutable functions, so we have to check that.) We can also discard
+ * quals that are implied by a partial index's predicate.
*
- * While at it, we strip off the RestrictInfos to produce a list of
- * plain expressions.
+ * While at it, we strip off the RestrictInfos to produce a list of plain
+ * expressions.
*/
qpqual = NIL;
foreach(l, scan_clauses)
@@ -836,7 +835,7 @@ create_indexscan_plan(PlannerInfo *root,
continue;
if (!contain_mutable_functions((Node *) rinfo->clause))
{
- List *clausel = list_make1(rinfo->clause);
+ List *clausel = list_make1(rinfo->clause);
if (predicate_implied_by(clausel, nonlossy_indexquals))
continue;
@@ -898,13 +897,12 @@ create_bitmap_scan_plan(PlannerInfo *root,
scan_clauses = get_actual_clauses(scan_clauses);
/*
- * If this is a innerjoin scan, the indexclauses will contain join
- * clauses that are not present in scan_clauses (since the passed-in
- * value is just the rel's baserestrictinfo list). We must add these
- * clauses to scan_clauses to ensure they get checked. In most cases
- * we will remove the join clauses again below, but if a join clause
- * contains a special operator, we need to make sure it gets into the
- * scan_clauses.
+ * If this is a innerjoin scan, the indexclauses will contain join clauses
+ * that are not present in scan_clauses (since the passed-in value is just
+ * the rel's baserestrictinfo list). We must add these clauses to
+ * scan_clauses to ensure they get checked. In most cases we will remove
+ * the join clauses again below, but if a join clause contains a special
+ * operator, we need to make sure it gets into the scan_clauses.
*/
if (best_path->isjoininner)
{
@@ -912,12 +910,12 @@ create_bitmap_scan_plan(PlannerInfo *root,
}
/*
- * The qpqual list must contain all restrictions not automatically
- * handled by the index. All the predicates in the indexquals will be
- * checked (either by the index itself, or by nodeBitmapHeapscan.c),
- * but if there are any "special" or lossy operators involved then they
- * must be added to qpqual. The upshot is that qpquals must contain
- * scan_clauses minus whatever appears in indexquals.
+ * The qpqual list must contain all restrictions not automatically handled
+ * by the index. All the predicates in the indexquals will be checked
+ * (either by the index itself, or by nodeBitmapHeapscan.c), but if there
+ * are any "special" or lossy operators involved then they must be added
+ * to qpqual. The upshot is that qpquals must contain scan_clauses minus
+ * whatever appears in indexquals.
*
* In normal cases simple equal() checks will be enough to spot duplicate
* clauses, so we try that first. In some situations (particularly with
@@ -930,25 +928,25 @@ create_bitmap_scan_plan(PlannerInfo *root,
*
* XXX For the moment, we only consider partial index predicates in the
* simple single-index-scan case. Is it worth trying to be smart about
- * more complex cases? Perhaps create_bitmap_subplan should be made to
+ * more complex cases? Perhaps create_bitmap_subplan should be made to
* include predicate info in what it constructs.
*/
qpqual = NIL;
foreach(l, scan_clauses)
{
- Node *clause = (Node *) lfirst(l);
+ Node *clause = (Node *) lfirst(l);
if (list_member(indexquals, clause))
continue;
if (!contain_mutable_functions(clause))
{
- List *clausel = list_make1(clause);
+ List *clausel = list_make1(clause);
if (predicate_implied_by(clausel, indexquals))
continue;
if (IsA(best_path->bitmapqual, IndexPath))
{
- IndexPath *ipath = (IndexPath *) best_path->bitmapqual;
+ IndexPath *ipath = (IndexPath *) best_path->bitmapqual;
if (predicate_implied_by(clausel, ipath->indexinfo->indpred))
continue;
@@ -1010,15 +1008,15 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
/*
* There may well be redundant quals among the subplans, since a
* top-level WHERE qual might have gotten used to form several
- * different index quals. We don't try exceedingly hard to
- * eliminate redundancies, but we do eliminate obvious duplicates
- * by using list_concat_unique.
+ * different index quals. We don't try exceedingly hard to eliminate
+ * redundancies, but we do eliminate obvious duplicates by using
+ * list_concat_unique.
*/
foreach(l, apath->bitmapquals)
{
- Plan *subplan;
- List *subqual;
- List *subindexqual;
+ Plan *subplan;
+ List *subqual;
+ List *subindexqual;
subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
&subqual, &subindexqual);
@@ -1048,7 +1046,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
/*
* Here, we only detect qual-free subplans. A qual-free subplan would
* cause us to generate "... OR true ..." which we may as well reduce
- * to just "true". We do not try to eliminate redundant subclauses
+ * to just "true". We do not try to eliminate redundant subclauses
* because (a) it's not as likely as in the AND case, and (b) we might
* well be working with hundreds or even thousands of OR conditions,
* perhaps from a long IN list. The performance of list_append_unique
@@ -1056,9 +1054,9 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
*/
foreach(l, opath->bitmapquals)
{
- Plan *subplan;
- List *subqual;
- List *subindexqual;
+ Plan *subplan;
+ List *subqual;
+ List *subindexqual;
subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
&subqual, &subindexqual);
@@ -1080,6 +1078,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
plan->plan_rows =
clamp_row_est(opath->bitmapselectivity * opath->path.parent->tuples);
plan->plan_width = 0; /* meaningless */
+
/*
* If there were constant-TRUE subquals, the OR reduces to constant
* TRUE. Also, avoid generating one-element ORs, which could happen
@@ -1100,9 +1099,9 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
}
else if (IsA(bitmapqual, IndexPath))
{
- IndexPath *ipath = (IndexPath *) bitmapqual;
- IndexScan *iscan;
- List *nonlossy_clauses;
+ IndexPath *ipath = (IndexPath *) bitmapqual;
+ IndexScan *iscan;
+ List *nonlossy_clauses;
/* Use the regular indexscan plan build machinery... */
iscan = create_indexscan_plan(root, ipath, NIL, NIL,
@@ -1245,18 +1244,18 @@ create_nestloop_plan(PlannerInfo *root,
if (IsA(best_path->innerjoinpath, IndexPath))
{
/*
- * An index is being used to reduce the number of tuples scanned
- * in the inner relation. If there are join clauses being used
- * with the index, we may remove those join clauses from the list
- * of clauses that have to be checked as qpquals at the join node.
+ * An index is being used to reduce the number of tuples scanned in
+ * the inner relation. If there are join clauses being used with the
+ * index, we may remove those join clauses from the list of clauses
+ * that have to be checked as qpquals at the join node.
*
* We can also remove any join clauses that are redundant with those
- * being used in the index scan; prior redundancy checks will not
- * have caught this case because the join clauses would never have
- * been put in the same joininfo list.
+ * being used in the index scan; prior redundancy checks will not have
+ * caught this case because the join clauses would never have been put
+ * in the same joininfo list.
*
- * We can skip this if the index path is an ordinary indexpath and
- * not a special innerjoin path.
+ * We can skip this if the index path is an ordinary indexpath and not a
+ * special innerjoin path.
*/
IndexPath *innerpath = (IndexPath *) best_path->innerjoinpath;
@@ -1266,7 +1265,7 @@ create_nestloop_plan(PlannerInfo *root,
select_nonredundant_join_clauses(root,
joinrestrictclauses,
innerpath->indexclauses,
- IS_OUTER_JOIN(best_path->jointype));
+ IS_OUTER_JOIN(best_path->jointype));
}
}
else if (IsA(best_path->innerjoinpath, BitmapHeapPath))
@@ -1275,11 +1274,11 @@ create_nestloop_plan(PlannerInfo *root,
* Same deal for bitmapped index scans.
*
* Note: both here and above, we ignore any implicit index restrictions
- * associated with the use of partial indexes. This is OK because
+ * associated with the use of partial indexes. This is OK because
* we're only trying to prove we can dispense with some join quals;
* failing to prove that doesn't result in an incorrect plan. It is
- * the right way to proceed because adding more quals to the stuff
- * we got from the original query would just make it harder to detect
+ * the right way to proceed because adding more quals to the stuff we
+ * got from the original query would just make it harder to detect
* duplication.
*/
BitmapHeapPath *innerpath = (BitmapHeapPath *) best_path->innerjoinpath;
@@ -1296,7 +1295,7 @@ create_nestloop_plan(PlannerInfo *root,
select_nonredundant_join_clauses(root,
joinrestrictclauses,
bitmapclauses,
- IS_OUTER_JOIN(best_path->jointype));
+ IS_OUTER_JOIN(best_path->jointype));
}
}
@@ -1355,18 +1354,18 @@ create_mergejoin_plan(PlannerInfo *root,
}
/*
- * Remove the mergeclauses from the list of join qual clauses, leaving
- * the list of quals that must be checked as qpquals.
+ * Remove the mergeclauses from the list of join qual clauses, leaving the
+ * list of quals that must be checked as qpquals.
*/
mergeclauses = get_actual_clauses(best_path->path_mergeclauses);
joinclauses = list_difference(joinclauses, mergeclauses);
/*
- * Rearrange mergeclauses, if needed, so that the outer variable is
- * always on the left.
+ * Rearrange mergeclauses, if needed, so that the outer variable is always
+ * on the left.
*/
mergeclauses = get_switched_clauses(best_path->path_mergeclauses,
- best_path->jpath.outerjoinpath->parent->relids);
+ best_path->jpath.outerjoinpath->parent->relids);
/* Sort clauses into best execution order */
/* NB: do NOT reorder the mergeclauses */
@@ -1375,8 +1374,8 @@ create_mergejoin_plan(PlannerInfo *root,
/*
* Create explicit sort nodes for the outer and inner join paths if
- * necessary. The sort cost was already accounted for in the path.
- * Make sure there are no excess columns in the inputs if sorting.
+ * necessary. The sort cost was already accounted for in the path. Make
+ * sure there are no excess columns in the inputs if sorting.
*/
if (best_path->outersortkeys)
{
@@ -1439,18 +1438,18 @@ create_hashjoin_plan(PlannerInfo *root,
}
/*
- * Remove the hashclauses from the list of join qual clauses, leaving
- * the list of quals that must be checked as qpquals.
+ * Remove the hashclauses from the list of join qual clauses, leaving the
+ * list of quals that must be checked as qpquals.
*/
hashclauses = get_actual_clauses(best_path->path_hashclauses);
joinclauses = list_difference(joinclauses, hashclauses);
/*
- * Rearrange hashclauses, if needed, so that the outer variable is
- * always on the left.
+ * Rearrange hashclauses, if needed, so that the outer variable is always
+ * on the left.
*/
hashclauses = get_switched_clauses(best_path->path_hashclauses,
- best_path->jpath.outerjoinpath->parent->relids);
+ best_path->jpath.outerjoinpath->parent->relids);
/* Sort clauses into best execution order */
joinclauses = order_qual_clauses(root, joinclauses);
@@ -1551,23 +1550,22 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path,
/*
* Make a copy that will become the fixed clause.
*
- * We used to try to do a shallow copy here, but that fails if there
- * is a subplan in the arguments of the opclause. So just do a
- * full copy.
+ * We used to try to do a shallow copy here, but that fails if there is a
+ * subplan in the arguments of the opclause. So just do a full copy.
*/
newclause = (OpExpr *) copyObject((Node *) clause);
/*
- * Check to see if the indexkey is on the right; if so, commute
- * the clause. The indexkey should be the side that refers to
- * (only) the base relation.
+ * Check to see if the indexkey is on the right; if so, commute the
+ * clause. The indexkey should be the side that refers to (only) the
+ * base relation.
*/
if (!bms_equal(rinfo->left_relids, index->rel->relids))
CommuteClause(newclause);
/*
- * Now, determine which index attribute this is, change the
- * indexkey operand as needed, and get the index opclass.
+ * Now, determine which index attribute this is, change the indexkey
+ * operand as needed, and get the index opclass.
*/
linitial(newclause->args) =
fix_indexqual_operand(linitial(newclause->args),
@@ -1577,10 +1575,9 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path,
*fixed_indexquals = lappend(*fixed_indexquals, newclause);
/*
- * Look up the (possibly commuted) operator in the operator class
- * to get its strategy numbers and the recheck indicator. This
- * also double-checks that we found an operator matching the
- * index.
+ * Look up the (possibly commuted) operator in the operator class to
+ * get its strategy numbers and the recheck indicator. This also
+ * double-checks that we found an operator matching the index.
*/
get_op_opclass_properties(newclause->opno, opclass,
&stratno, &stratsubtype, &recheck);
@@ -1598,11 +1595,11 @@ static Node *
fix_indexqual_operand(Node *node, IndexOptInfo *index, Oid *opclass)
{
/*
- * We represent index keys by Var nodes having the varno of the base
- * table but varattno equal to the index's attribute number (index
- * column position). This is a bit hokey ... would be cleaner to use
- * a special-purpose node type that could not be mistaken for a
- * regular Var. But it will do for now.
+ * We represent index keys by Var nodes having the varno of the base table
+ * but varattno equal to the index's attribute number (index column
+ * position). This is a bit hokey ... would be cleaner to use a
+ * special-purpose node type that could not be mistaken for a regular Var.
+ * But it will do for now.
*/
Var *result;
int pos;
@@ -1692,8 +1689,8 @@ get_switched_clauses(List *clauses, Relids outerrelids)
if (bms_is_subset(restrictinfo->right_relids, outerrelids))
{
/*
- * Duplicate just enough of the structure to allow commuting
- * the clause without changing the original list. Could use
+ * Duplicate just enough of the structure to allow commuting the
+ * clause without changing the original list. Could use
* copyObject, but a complete deep copy is overkill.
*/
OpExpr *temp = makeNode(OpExpr);
@@ -1934,9 +1931,9 @@ make_subqueryscan(List *qptlist,
Plan *plan = &node->scan.plan;
/*
- * Cost is figured here for the convenience of prepunion.c. Note this
- * is only correct for the case where qpqual is empty; otherwise
- * caller should overwrite cost with a better estimate.
+ * Cost is figured here for the convenience of prepunion.c. Note this is
+ * only correct for the case where qpqual is empty; otherwise caller
+ * should overwrite cost with a better estimate.
*/
copy_plan_costsize(plan, subplan);
plan->total_cost += cpu_tuple_cost * subplan->plan_rows;
@@ -1977,9 +1974,9 @@ make_append(List *appendplans, bool isTarget, List *tlist)
ListCell *subnode;
/*
- * Compute cost as sum of subplan costs. We charge nothing extra for
- * the Append itself, which perhaps is too optimistic, but since it
- * doesn't do any selection or projection, it is a pretty cheap node.
+ * Compute cost as sum of subplan costs. We charge nothing extra for the
+ * Append itself, which perhaps is too optimistic, but since it doesn't do
+ * any selection or projection, it is a pretty cheap node.
*/
plan->startup_cost = 0;
plan->total_cost = 0;
@@ -2094,8 +2091,8 @@ make_hash(Plan *lefttree)
copy_plan_costsize(plan, lefttree);
/*
- * For plausibility, make startup & total costs equal total cost of
- * input plan; this only affects EXPLAIN display not decisions.
+ * For plausibility, make startup & total costs equal total cost of input
+ * plan; this only affects EXPLAIN display not decisions.
*/
plan->startup_cost = plan->total_cost;
plan->targetlist = copyObject(lefttree->targetlist);
@@ -2217,8 +2214,7 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys)
Oid *sortOperators;
/*
- * We will need at most list_length(pathkeys) sort columns; possibly
- * less
+ * We will need at most list_length(pathkeys) sort columns; possibly less
*/
numsortkeys = list_length(pathkeys);
sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
@@ -2236,14 +2232,14 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys)
/*
* We can sort by any one of the sort key items listed in this
* sublist. For now, we take the first one that corresponds to an
- * available Var in the tlist. If there isn't any, use the first
- * one that is an expression in the input's vars.
+ * available Var in the tlist. If there isn't any, use the first one
+ * that is an expression in the input's vars.
*
- * XXX if we have a choice, is there any way of figuring out which
- * might be cheapest to execute? (For example, int4lt is likely
- * much cheaper to execute than numericlt, but both might appear
- * in the same pathkey sublist...) Not clear that we ever will
- * have a choice in practice, so it may not matter.
+ * XXX if we have a choice, is there any way of figuring out which might
+ * be cheapest to execute? (For example, int4lt is likely much
+ * cheaper to execute than numericlt, but both might appear in the
+ * same pathkey sublist...) Not clear that we ever will have a choice
+ * in practice, so it may not matter.
*/
foreach(j, keysublist)
{
@@ -2296,13 +2292,13 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys)
}
/*
- * The column might already be selected as a sort key, if the
- * pathkeys contain duplicate entries. (This can happen in
- * scenarios where multiple mergejoinable clauses mention the same
- * var, for example.) So enter it only once in the sort arrays.
+ * The column might already be selected as a sort key, if the pathkeys
+ * contain duplicate entries. (This can happen in scenarios where
+ * multiple mergejoinable clauses mention the same var, for example.)
+ * So enter it only once in the sort arrays.
*/
numsortkeys = add_sort_column(tle->resno, pathkey->sortop,
- numsortkeys, sortColIdx, sortOperators);
+ numsortkeys, sortColIdx, sortOperators);
}
Assert(numsortkeys > 0);
@@ -2328,8 +2324,7 @@ make_sort_from_sortclauses(PlannerInfo *root, List *sortcls, Plan *lefttree)
Oid *sortOperators;
/*
- * We will need at most list_length(sortcls) sort columns; possibly
- * less
+ * We will need at most list_length(sortcls) sort columns; possibly less
*/
numsortkeys = list_length(sortcls);
sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
@@ -2348,7 +2343,7 @@ make_sort_from_sortclauses(PlannerInfo *root, List *sortcls, Plan *lefttree)
* redundantly.
*/
numsortkeys = add_sort_column(tle->resno, sortcl->sortop,
- numsortkeys, sortColIdx, sortOperators);
+ numsortkeys, sortColIdx, sortOperators);
}
Assert(numsortkeys > 0);
@@ -2384,8 +2379,7 @@ make_sort_from_groupcols(PlannerInfo *root,
Oid *sortOperators;
/*
- * We will need at most list_length(groupcls) sort columns; possibly
- * less
+ * We will need at most list_length(groupcls) sort columns; possibly less
*/
numsortkeys = list_length(groupcls);
sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
@@ -2404,7 +2398,7 @@ make_sort_from_groupcols(PlannerInfo *root,
* redundantly.
*/
numsortkeys = add_sort_column(tle->resno, grpcl->sortop,
- numsortkeys, sortColIdx, sortOperators);
+ numsortkeys, sortColIdx, sortOperators);
grpno++;
}
@@ -2492,8 +2486,8 @@ make_agg(PlannerInfo *root, List *tlist, List *qual,
plan->total_cost = agg_path.total_cost;
/*
- * We will produce a single output tuple if not grouping, and a tuple
- * per group otherwise.
+ * We will produce a single output tuple if not grouping, and a tuple per
+ * group otherwise.
*/
if (aggstrategy == AGG_PLAIN)
plan->plan_rows = 1;
@@ -2501,13 +2495,13 @@ make_agg(PlannerInfo *root, List *tlist, List *qual,
plan->plan_rows = numGroups;
/*
- * We also need to account for the cost of evaluation of the qual (ie,
- * the HAVING clause) and the tlist. Note that cost_qual_eval doesn't
- * charge anything for Aggref nodes; this is okay since they are
- * really comparable to Vars.
+ * We also need to account for the cost of evaluation of the qual (ie, the
+ * HAVING clause) and the tlist. Note that cost_qual_eval doesn't charge
+ * anything for Aggref nodes; this is okay since they are really
+ * comparable to Vars.
*
- * See notes in grouping_planner about why this routine and make_group
- * are the only ones in this file that worry about tlist eval cost.
+ * See notes in grouping_planner about why this routine and make_group are
+ * the only ones in this file that worry about tlist eval cost.
*/
if (qual)
{
@@ -2559,16 +2553,15 @@ make_group(PlannerInfo *root,
plan->plan_rows = numGroups;
/*
- * We also need to account for the cost of evaluation of the qual (ie,
- * the HAVING clause) and the tlist.
+ * We also need to account for the cost of evaluation of the qual (ie, the
+ * HAVING clause) and the tlist.
*
- * XXX this double-counts the cost of evaluation of any expressions used
- * for grouping, since in reality those will have been evaluated at a
- * lower plan level and will only be copied by the Group node. Worth
- * fixing?
+ * XXX this double-counts the cost of evaluation of any expressions used for
+ * grouping, since in reality those will have been evaluated at a lower
+ * plan level and will only be copied by the Group node. Worth fixing?
*
- * See notes in grouping_planner about why this routine and make_agg are
- * the only ones in this file that worry about tlist eval cost.
+ * See notes in grouping_planner about why this routine and make_agg are the
+ * only ones in this file that worry about tlist eval cost.
*/
if (qual)
{
@@ -2607,16 +2600,16 @@ make_unique(Plan *lefttree, List *distinctList)
copy_plan_costsize(plan, lefttree);
/*
- * Charge one cpu_operator_cost per comparison per input tuple. We
- * assume all columns get compared at most of the tuples. (XXX
- * probably this is an overestimate.)
+ * Charge one cpu_operator_cost per comparison per input tuple. We assume
+ * all columns get compared at most of the tuples. (XXX probably this is
+ * an overestimate.)
*/
plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
/*
- * plan->plan_rows is left as a copy of the input subplan's plan_rows;
- * ie, we assume the filter removes nothing. The caller must alter
- * this if he has a better idea.
+ * plan->plan_rows is left as a copy of the input subplan's plan_rows; ie,
+ * we assume the filter removes nothing. The caller must alter this if he
+ * has a better idea.
*/
plan->targetlist = copyObject(lefttree->targetlist);
@@ -2625,8 +2618,7 @@ make_unique(Plan *lefttree, List *distinctList)
plan->righttree = NULL;
/*
- * convert SortClause list into array of attr indexes, as wanted by
- * exec
+ * convert SortClause list into array of attr indexes, as wanted by exec
*/
Assert(numCols > 0);
uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
@@ -2664,8 +2656,8 @@ make_setop(SetOpCmd cmd, Plan *lefttree,
copy_plan_costsize(plan, lefttree);
/*
- * Charge one cpu_operator_cost per comparison per input tuple. We
- * assume all columns get compared at most of the tuples.
+ * Charge one cpu_operator_cost per comparison per input tuple. We assume
+ * all columns get compared at most of the tuples.
*/
plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
@@ -2683,8 +2675,7 @@ make_setop(SetOpCmd cmd, Plan *lefttree,
plan->righttree = NULL;
/*
- * convert SortClause list into array of attr indexes, as wanted by
- * exec
+ * convert SortClause list into array of attr indexes, as wanted by exec
*/
Assert(numCols > 0);
dupColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
@@ -2727,8 +2718,8 @@ make_limit(Plan *lefttree, Node *limitOffset, Node *limitCount,
* building a subquery then it's important to report correct info to the
* outer planner.
*
- * When the offset or count couldn't be estimated, use 10% of the
- * estimated number of rows emitted from the subplan.
+ * When the offset or count couldn't be estimated, use 10% of the estimated
+ * number of rows emitted from the subplan.
*/
if (offset_est != 0)
{
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index 7e3d5bca55b..dd8fc4fa2d7 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.109 2005/09/28 21:17:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.110 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -221,7 +221,7 @@ distribute_quals_to_rels(PlannerInfo *root, Node *jtnode,
result = bms_add_members(result,
distribute_quals_to_rels(root,
lfirst(l),
- below_outer_join));
+ below_outer_join));
}
/*
@@ -243,17 +243,17 @@ distribute_quals_to_rels(PlannerInfo *root, Node *jtnode,
ListCell *qual;
/*
- * Order of operations here is subtle and critical. First we
- * recurse to handle sub-JOINs. Their join quals will be placed
- * without regard for whether this level is an outer join, which
- * is correct. Then we place our own join quals, which are
- * restricted by lower outer joins in any case, and are forced to
- * this level if this is an outer join and they mention the outer
- * side. Finally, if this is an outer join, we mark baserels
- * contained within the inner side(s) with our own rel set; this
- * will prevent quals above us in the join tree that use those
- * rels from being pushed down below this level. (It's okay for
- * upper quals to be pushed down to the outer side, however.)
+ * Order of operations here is subtle and critical. First we recurse
+ * to handle sub-JOINs. Their join quals will be placed without
+ * regard for whether this level is an outer join, which is correct.
+ * Then we place our own join quals, which are restricted by lower
+ * outer joins in any case, and are forced to this level if this is an
+ * outer join and they mention the outer side. Finally, if this is an
+ * outer join, we mark baserels contained within the inner side(s)
+ * with our own rel set; this will prevent quals above us in the join
+ * tree that use those rels from being pushed down below this level.
+ * (It's okay for upper quals to be pushed down to the outer side,
+ * however.)
*/
switch (j->jointype)
{
@@ -302,19 +302,19 @@ distribute_quals_to_rels(PlannerInfo *root, Node *jtnode,
case JOIN_UNION:
/*
- * This is where we fail if upper levels of planner
- * haven't rewritten UNION JOIN as an Append ...
+ * This is where we fail if upper levels of planner haven't
+ * rewritten UNION JOIN as an Append ...
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("UNION JOIN is not implemented")));
- nonnullable_rels = NULL; /* keep compiler quiet */
+ nonnullable_rels = NULL; /* keep compiler quiet */
nullable_rels = NULL;
break;
default:
elog(ERROR, "unrecognized join type: %d",
(int) j->jointype);
- nonnullable_rels = NULL; /* keep compiler quiet */
+ nonnullable_rels = NULL; /* keep compiler quiet */
nullable_rels = NULL;
break;
}
@@ -349,19 +349,19 @@ mark_baserels_for_outer_join(PlannerInfo *root, Relids rels, Relids outerrels)
RelOptInfo *rel = find_base_rel(root, relno);
/*
- * Since we do this bottom-up, any outer-rels previously marked
- * should be within the new outer join set.
+ * Since we do this bottom-up, any outer-rels previously marked should
+ * be within the new outer join set.
*/
Assert(bms_is_subset(rel->outerjoinset, outerrels));
/*
* Presently the executor cannot support FOR UPDATE/SHARE marking of
* rels appearing on the nullable side of an outer join. (It's
- * somewhat unclear what that would mean, anyway: what should we
- * mark when a result row is generated from no element of the
- * nullable relation?) So, complain if target rel is FOR UPDATE/SHARE.
- * It's sufficient to make this check once per rel, so do it only
- * if rel wasn't already known nullable.
+ * somewhat unclear what that would mean, anyway: what should we mark
+ * when a result row is generated from no element of the nullable
+ * relation?) So, complain if target rel is FOR UPDATE/SHARE. It's
+ * sufficient to make this check once per rel, so do it only if rel
+ * wasn't already known nullable.
*/
if (rel->outerjoinset == NULL)
{
@@ -430,9 +430,9 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
/*
* If the clause is variable-free, we force it to be evaluated at its
* original syntactic level. Note that this should not happen for
- * top-level clauses, because query_planner() special-cases them. But
- * it will happen for variable-free JOIN/ON clauses. We don't have to
- * be real smart about such a case, we just have to be correct.
+ * top-level clauses, because query_planner() special-cases them. But it
+ * will happen for variable-free JOIN/ON clauses. We don't have to be
+ * real smart about such a case, we just have to be correct.
*/
if (bms_is_empty(relids))
relids = qualscope;
@@ -446,8 +446,8 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
/*
* If the qual came from implied-equality deduction, we always
* evaluate the qual at its natural semantic level. It is the
- * responsibility of the deducer not to create any quals that
- * should be delayed by outer-join rules.
+ * responsibility of the deducer not to create any quals that should
+ * be delayed by outer-join rules.
*/
Assert(bms_equal(relids, qualscope));
/* Needn't feed it back for more deductions */
@@ -457,28 +457,28 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
else if (bms_overlap(relids, outerjoin_nonnullable))
{
/*
- * The qual is attached to an outer join and mentions (some of
- * the) rels on the nonnullable side. Force the qual to be
- * evaluated exactly at the level of joining corresponding to the
- * outer join. We cannot let it get pushed down into the
- * nonnullable side, since then we'd produce no output rows,
- * rather than the intended single null-extended row, for any
- * nonnullable-side rows failing the qual.
+ * The qual is attached to an outer join and mentions (some of the)
+ * rels on the nonnullable side. Force the qual to be evaluated
+ * exactly at the level of joining corresponding to the outer join. We
+ * cannot let it get pushed down into the nonnullable side, since then
+ * we'd produce no output rows, rather than the intended single
+ * null-extended row, for any nonnullable-side rows failing the qual.
*
- * Note: an outer-join qual that mentions only nullable-side rels can
- * be pushed down into the nullable side without changing the join
+ * Note: an outer-join qual that mentions only nullable-side rels can be
+ * pushed down into the nullable side without changing the join
* result, so we treat it the same as an ordinary inner-join qual,
* except for not setting maybe_equijoin (see below).
*/
relids = qualscope;
+
/*
- * We can't use such a clause to deduce equijoin (the left and
- * right sides might be unequal above the join because one of
- * them has gone to NULL) ... but we might be able to use it
- * for more limited purposes. Note: for the current uses of
- * deductions from an outer-join clause, it seems safe to make
- * the deductions even when the clause is below a higher-level
- * outer join; so we do not check below_outer_join here.
+ * We can't use such a clause to deduce equijoin (the left and right
+ * sides might be unequal above the join because one of them has gone
+ * to NULL) ... but we might be able to use it for more limited
+ * purposes. Note: for the current uses of deductions from an
+ * outer-join clause, it seems safe to make the deductions even when
+ * the clause is below a higher-level outer join; so we do not check
+ * below_outer_join here.
*/
maybe_equijoin = false;
maybe_outer_join = true;
@@ -486,15 +486,14 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
else
{
/*
- * For a non-outer-join qual, we can evaluate the qual as soon as
- * (1) we have all the rels it mentions, and (2) we are at or
- * above any outer joins that can null any of these rels and are
- * below the syntactic location of the given qual. To enforce the
- * latter, scan the base rels listed in relids, and merge their
- * outer-join sets into the clause's own reference list. At the
- * time we are called, the outerjoinset of each baserel will show
- * exactly those outer joins that are below the qual in the join
- * tree.
+ * For a non-outer-join qual, we can evaluate the qual as soon as (1)
+ * we have all the rels it mentions, and (2) we are at or above any
+ * outer joins that can null any of these rels and are below the
+ * syntactic location of the given qual. To enforce the latter, scan
+ * the base rels listed in relids, and merge their outer-join sets
+ * into the clause's own reference list. At the time we are called,
+ * the outerjoinset of each baserel will show exactly those outer
+ * joins that are below the qual in the join tree.
*/
Relids addrelids = NULL;
Relids tmprelids;
@@ -513,13 +512,13 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
if (bms_is_subset(addrelids, relids))
{
/*
- * Qual is not delayed by any lower outer-join restriction.
- * If it is not itself below or within an outer join, we
- * can consider it "valid everywhere", so consider feeding
- * it to the equijoin machinery. (If it is within an outer
- * join, we can't consider it "valid everywhere": once the
- * contained variables have gone to NULL, we'd be asserting
- * things like NULL = NULL, which is not true.)
+ * Qual is not delayed by any lower outer-join restriction. If it
+ * is not itself below or within an outer join, we can consider it
+ * "valid everywhere", so consider feeding it to the equijoin
+ * machinery. (If it is within an outer join, we can't consider
+ * it "valid everywhere": once the contained variables have gone
+ * to NULL, we'd be asserting things like NULL = NULL, which is
+ * not true.)
*/
if (!below_outer_join && outerjoin_nonnullable == NULL)
maybe_equijoin = true;
@@ -533,8 +532,8 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
Assert(bms_is_subset(relids, qualscope));
/*
- * Because application of the qual will be delayed by outer
- * join, we mustn't assume its vars are equal everywhere.
+ * Because application of the qual will be delayed by outer join,
+ * we mustn't assume its vars are equal everywhere.
*/
maybe_equijoin = false;
}
@@ -543,11 +542,10 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
}
/*
- * Mark the qual as "pushed down" if it can be applied at a level
- * below its original syntactic level. This allows us to distinguish
- * original JOIN/ON quals from higher-level quals pushed down to the
- * same joinrel. A qual originating from WHERE is always considered
- * "pushed down".
+ * Mark the qual as "pushed down" if it can be applied at a level below
+ * its original syntactic level. This allows us to distinguish original
+ * JOIN/ON quals from higher-level quals pushed down to the same joinrel.
+ * A qual originating from WHERE is always considered "pushed down".
*/
if (!is_pushed_down)
is_pushed_down = !bms_equal(relids, qualscope);
@@ -573,25 +571,24 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
rel = find_base_rel(root, bms_singleton_member(relids));
/*
- * Check for a "mergejoinable" clause even though it's not a
- * join clause. This is so that we can recognize that "a.x =
- * a.y" makes x and y eligible to be considered equal, even
- * when they belong to the same rel. Without this, we would
- * not recognize that "a.x = a.y AND a.x = b.z AND a.y = c.q"
- * allows us to consider z and q equal after their rels are
- * joined.
+ * Check for a "mergejoinable" clause even though it's not a join
+ * clause. This is so that we can recognize that "a.x = a.y"
+ * makes x and y eligible to be considered equal, even when they
+ * belong to the same rel. Without this, we would not recognize
+ * that "a.x = a.y AND a.x = b.z AND a.y = c.q" allows us to
+ * consider z and q equal after their rels are joined.
*/
check_mergejoinable(restrictinfo);
/*
- * If the clause was deduced from implied equality, check to
- * see whether it is redundant with restriction clauses we
- * already have for this rel. Note we cannot apply this check
- * to user-written clauses, since we haven't found the
- * canonical pathkey sets yet while processing user clauses.
- * (NB: no comparable check is done in the join-clause case;
- * redundancy will be detected when the join clause is moved
- * into a join rel's restriction list.)
+ * If the clause was deduced from implied equality, check to see
+ * whether it is redundant with restriction clauses we already
+ * have for this rel. Note we cannot apply this check to
+ * user-written clauses, since we haven't found the canonical
+ * pathkey sets yet while processing user clauses. (NB: no
+ * comparable check is done in the join-clause case; redundancy
+ * will be detected when the join clause is moved into a join
+ * rel's restriction list.)
*/
if (!is_deduced ||
!qual_is_redundant(root, restrictinfo,
@@ -605,17 +602,17 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
case BMS_MULTIPLE:
/*
- * 'clause' is a join clause, since there is more than one rel
- * in the relid set.
+ * 'clause' is a join clause, since there is more than one rel in
+ * the relid set.
*/
/*
* Check for hash or mergejoinable operators.
*
- * We don't bother setting the hashjoin info if we're not going
- * to need it. We do want to know about mergejoinable ops in
- * all cases, however, because we use mergejoinable ops for
- * other purposes such as detecting redundant clauses.
+ * We don't bother setting the hashjoin info if we're not going to
+ * need it. We do want to know about mergejoinable ops in all
+ * cases, however, because we use mergejoinable ops for other
+ * purposes such as detecting redundant clauses.
*/
check_mergejoinable(restrictinfo);
if (enable_hashjoin)
@@ -628,9 +625,9 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
/*
* Add vars used in the join clause to targetlists of their
- * relations, so that they will be emitted by the plan nodes
- * that scan those relations (else they won't be available at
- * the join node!).
+ * relations, so that they will be emitted by the plan nodes that
+ * scan those relations (else they won't be available at the join
+ * node!).
*/
vars = pull_var_clause(clause, false);
add_vars_to_targetlist(root, vars, relids);
@@ -639,17 +636,16 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
default:
/*
- * 'clause' references no rels, and therefore we have no place
- * to attach it. Shouldn't get here if callers are working
- * properly.
+ * 'clause' references no rels, and therefore we have no place to
+ * attach it. Shouldn't get here if callers are working properly.
*/
elog(ERROR, "cannot cope with variable-free clause");
break;
}
/*
- * If the clause has a mergejoinable operator, we may be able to
- * deduce more things from it under the principle of transitivity.
+ * If the clause has a mergejoinable operator, we may be able to deduce
+ * more things from it under the principle of transitivity.
*
* If it is not an outer-join qualification nor bubbled up due to an outer
* join, then the two sides represent equivalent PathKeyItems for path
@@ -744,8 +740,8 @@ process_implied_equality(PlannerInfo *root,
/*
* If the exprs involve a single rel, we need to look at that rel's
- * baserestrictinfo list. If multiple rels, we can scan the joininfo
- * list of any of 'em.
+ * baserestrictinfo list. If multiple rels, we can scan the joininfo list
+ * of any of 'em.
*/
if (membership == BMS_SINGLETON)
{
@@ -767,8 +763,8 @@ process_implied_equality(PlannerInfo *root,
}
/*
- * Scan to see if equality is already known. If so, we're done in the
- * add case, and done after removing it in the delete case.
+ * Scan to see if equality is already known. If so, we're done in the add
+ * case, and done after removing it in the delete case.
*/
foreach(itm, restrictlist)
{
@@ -791,7 +787,7 @@ process_implied_equality(PlannerInfo *root,
{
/* delete it from local restrictinfo list */
rel1->baserestrictinfo = list_delete_ptr(rel1->baserestrictinfo,
- restrictinfo);
+ restrictinfo);
}
else
{
@@ -808,8 +804,8 @@ process_implied_equality(PlannerInfo *root,
return;
/*
- * This equality is new information, so construct a clause
- * representing it to add to the query data structures.
+ * This equality is new information, so construct a clause representing it
+ * to add to the query data structures.
*/
ltype = exprType(item1);
rtype = exprType(item2);
@@ -818,14 +814,14 @@ process_implied_equality(PlannerInfo *root,
if (!HeapTupleIsValid(eq_operator))
{
/*
- * Would it be safe to just not add the equality to the query if
- * we have no suitable equality operator for the combination of
+ * Would it be safe to just not add the equality to the query if we
+ * have no suitable equality operator for the combination of
* datatypes? NO, because sortkey selection may screw up anyway.
*/
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for types %s and %s",
- format_type_be(ltype), format_type_be(rtype))));
+ errmsg("could not identify an equality operator for types %s and %s",
+ format_type_be(ltype), format_type_be(rtype))));
}
pgopform = (Form_pg_operator) GETSTRUCT(eq_operator);
@@ -856,8 +852,8 @@ process_implied_equality(PlannerInfo *root,
/*
* Push the new clause into all the appropriate restrictinfo lists.
*
- * Note: we mark the qual "pushed down" to ensure that it can never be
- * taken for an original JOIN/ON clause.
+ * Note: we mark the qual "pushed down" to ensure that it can never be taken
+ * for an original JOIN/ON clause.
*/
distribute_qual_to_rels(root, (Node *) clause,
true, true, false, NULL, relids);
@@ -911,9 +907,9 @@ qual_is_redundant(PlannerInfo *root,
return false;
/*
- * Scan existing quals to find those referencing same pathkeys.
- * Usually there will be few, if any, so build a list of just the
- * interesting ones.
+ * Scan existing quals to find those referencing same pathkeys. Usually
+ * there will be few, if any, so build a list of just the interesting
+ * ones.
*/
oldquals = NIL;
foreach(olditem, restrictlist)
@@ -933,11 +929,10 @@ qual_is_redundant(PlannerInfo *root,
/*
* Now, we want to develop a list of exprs that are known equal to the
- * left side of the new qual. We traverse the old-quals list
- * repeatedly to transitively expand the exprs list. If at any point
- * we find we can reach the right-side expr of the new qual, we are
- * done. We give up when we can't expand the equalexprs list any
- * more.
+ * left side of the new qual. We traverse the old-quals list repeatedly
+ * to transitively expand the exprs list. If at any point we find we can
+ * reach the right-side expr of the new qual, we are done. We give up
+ * when we can't expand the equalexprs list any more.
*/
equalexprs = list_make1(newleft);
do
diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c
index f2002a5228d..7c2f0211f10 100644
--- a/src/backend/optimizer/plan/planagg.c
+++ b/src/backend/optimizer/plan/planagg.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.9 2005/09/21 19:15:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.10 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,12 +43,12 @@ typedef struct
static bool find_minmax_aggs_walker(Node *node, List **context);
static bool build_minmax_path(PlannerInfo *root, RelOptInfo *rel,
- MinMaxAggInfo *info);
+ MinMaxAggInfo *info);
static ScanDirection match_agg_to_index_col(MinMaxAggInfo *info,
- IndexOptInfo *index, int indexcol);
+ IndexOptInfo *index, int indexcol);
static void make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info,
- List *constant_quals);
-static Node *replace_aggs_with_params_mutator(Node *node, List **context);
+ List *constant_quals);
+static Node *replace_aggs_with_params_mutator(Node *node, List **context);
static Oid fetch_agg_sort_op(Oid aggfnoid);
@@ -62,7 +62,7 @@ static Oid fetch_agg_sort_op(Oid aggfnoid);
* generic scan-all-the-rows plan.
*
* We are passed the preprocessed tlist, and the best path
- * devised for computing the input of a standard Agg node. If we are able
+ * devised for computing the input of a standard Agg node. If we are able
* to optimize all the aggregates, and the result is estimated to be cheaper
* than the generic aggregate method, then generate and return a Plan that
* does it that way. Otherwise, return NULL.
@@ -87,24 +87,24 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path)
if (!parse->hasAggs)
return NULL;
- Assert(!parse->setOperations); /* shouldn't get here if a setop */
- Assert(parse->rowMarks == NIL); /* nor if FOR UPDATE */
+ Assert(!parse->setOperations); /* shouldn't get here if a setop */
+ Assert(parse->rowMarks == NIL); /* nor if FOR UPDATE */
/*
* Reject unoptimizable cases.
*
- * We don't handle GROUP BY, because our current implementations of
- * grouping require looking at all the rows anyway, and so there's not
- * much point in optimizing MIN/MAX.
+ * We don't handle GROUP BY, because our current implementations of grouping
+ * require looking at all the rows anyway, and so there's not much point
+ * in optimizing MIN/MAX.
*/
if (parse->groupClause)
return NULL;
/*
- * We also restrict the query to reference exactly one table, since
- * join conditions can't be handled reasonably. (We could perhaps
- * handle a query containing cartesian-product joins, but it hardly
- * seems worth the trouble.)
+ * We also restrict the query to reference exactly one table, since join
+ * conditions can't be handled reasonably. (We could perhaps handle a
+ * query containing cartesian-product joins, but it hardly seems worth the
+ * trouble.)
*/
Assert(parse->jointree != NULL && IsA(parse->jointree, FromExpr));
if (list_length(parse->jointree->fromlist) != 1)
@@ -118,8 +118,8 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path)
rel = find_base_rel(root, rtr->rtindex);
/*
- * Also reject cases with subplans or volatile functions in WHERE.
- * This may be overly paranoid, but it's not entirely clear if the
+ * Also reject cases with subplans or volatile functions in WHERE. This
+ * may be overly paranoid, but it's not entirely clear if the
* transformation is safe then.
*/
if (contain_subplans(parse->jointree->quals) ||
@@ -127,17 +127,16 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path)
return NULL;
/*
- * Since this optimization is not applicable all that often, we want
- * to fall out before doing very much work if possible. Therefore
- * we do the work in several passes. The first pass scans the tlist
- * and HAVING qual to find all the aggregates and verify that
- * each of them is a MIN/MAX aggregate. If that succeeds, the second
- * pass looks at each aggregate to see if it is optimizable; if so
- * we make an IndexPath describing how we would scan it. (We do not
- * try to optimize if only some aggs are optimizable, since that means
- * we'll have to scan all the rows anyway.) If that succeeds, we have
- * enough info to compare costs against the generic implementation.
- * Only if that test passes do we build a Plan.
+ * Since this optimization is not applicable all that often, we want to
+ * fall out before doing very much work if possible. Therefore we do the
+ * work in several passes. The first pass scans the tlist and HAVING qual
+ * to find all the aggregates and verify that each of them is a MIN/MAX
+ * aggregate. If that succeeds, the second pass looks at each aggregate
+ * to see if it is optimizable; if so we make an IndexPath describing how
+ * we would scan it. (We do not try to optimize if only some aggs are
+ * optimizable, since that means we'll have to scan all the rows anyway.)
+ * If that succeeds, we have enough info to compare costs against the
+ * generic implementation. Only if that test passes do we build a Plan.
*/
/* Pass 1: find all the aggregates */
@@ -161,9 +160,9 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path)
/*
* Make the cost comparison.
*
- * Note that we don't include evaluation cost of the tlist here;
- * this is OK since it isn't included in best_path's cost either,
- * and should be the same in either case.
+ * Note that we don't include evaluation cost of the tlist here; this is OK
+ * since it isn't included in best_path's cost either, and should be the
+ * same in either case.
*/
cost_agg(&agg_p, root, AGG_PLAIN, list_length(aggs_list),
0, 0,
@@ -174,13 +173,13 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path)
return NULL; /* too expensive */
/*
- * OK, we are going to generate an optimized plan. The first thing we
- * need to do is look for any non-variable WHERE clauses that query_planner
- * might have removed from the basic plan. (Normal WHERE clauses will
- * be properly incorporated into the sub-plans by create_plan.) If there
- * are any, they will be in a gating Result node atop the best_path.
- * They have to be incorporated into a gating Result in each sub-plan
- * in order to produce the semantically correct result.
+ * OK, we are going to generate an optimized plan. The first thing we
+ * need to do is look for any non-variable WHERE clauses that
+ * query_planner might have removed from the basic plan. (Normal WHERE
+ * clauses will be properly incorporated into the sub-plans by
+ * create_plan.) If there are any, they will be in a gating Result node
+ * atop the best_path. They have to be incorporated into a gating Result
+ * in each sub-plan in order to produce the semantically correct result.
*/
if (IsA(best_path, ResultPath))
{
@@ -275,8 +274,8 @@ find_minmax_aggs_walker(Node *node, List **context)
*context = lappend(*context, info);
/*
- * We need not recurse into the argument, since it can't contain
- * any aggregates.
+ * We need not recurse into the argument, since it can't contain any
+ * aggregates.
*/
return false;
}
@@ -325,8 +324,8 @@ build_minmax_path(PlannerInfo *root, RelOptInfo *rel, MinMaxAggInfo *info)
/*
* Look for a match to one of the index columns. (In a stupidly
- * designed index, there could be multiple matches, but we only
- * care about the first one.)
+ * designed index, there could be multiple matches, but we only care
+ * about the first one.)
*/
for (indexcol = 0; indexcol < index->ncolumns; indexcol++)
{
@@ -340,12 +339,12 @@ build_minmax_path(PlannerInfo *root, RelOptInfo *rel, MinMaxAggInfo *info)
/*
* If the match is not at the first index column, we have to verify
* that there are "x = something" restrictions on all the earlier
- * index columns. Since we'll need the restrictclauses list anyway
- * to build the path, it's convenient to extract that first and then
- * look through it for the equality restrictions.
+ * index columns. Since we'll need the restrictclauses list anyway to
+ * build the path, it's convenient to extract that first and then look
+ * through it for the equality restrictions.
*/
restrictclauses = group_clauses_by_indexkey(index,
- index->rel->baserestrictinfo,
+ index->rel->baserestrictinfo,
NIL,
NULL,
&found_clause);
@@ -354,8 +353,8 @@ build_minmax_path(PlannerInfo *root, RelOptInfo *rel, MinMaxAggInfo *info)
continue; /* definitely haven't got enough */
for (prevcol = 0; prevcol < indexcol; prevcol++)
{
- List *rinfos = (List *) list_nth(restrictclauses, prevcol);
- ListCell *ll;
+ List *rinfos = (List *) list_nth(restrictclauses, prevcol);
+ ListCell *ll;
foreach(ll, rinfos)
{
@@ -453,9 +452,9 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals)
NullTest *ntest;
/*
- * Generate a suitably modified query. Much of the work here is
- * probably unnecessary in the normal case, but we want to make it look
- * good if someone tries to EXPLAIN the result.
+ * Generate a suitably modified query. Much of the work here is probably
+ * unnecessary in the normal case, but we want to make it look good if
+ * someone tries to EXPLAIN the result.
*/
memcpy(&subroot, root, sizeof(PlannerInfo));
subroot.parse = subparse = (Query *) copyObject(root->parse);
@@ -489,18 +488,17 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals)
false, true);
/*
- * Generate the plan for the subquery. We already have a Path for
- * the basic indexscan, but we have to convert it to a Plan and
- * attach a LIMIT node above it. We might need a gating Result, too,
- * to handle any non-variable qual clauses.
+ * Generate the plan for the subquery. We already have a Path for the
+ * basic indexscan, but we have to convert it to a Plan and attach a LIMIT
+ * node above it. We might need a gating Result, too, to handle any
+ * non-variable qual clauses.
*
- * Also we must add a "WHERE foo IS NOT NULL" restriction to the
- * indexscan, to be sure we don't return a NULL, which'd be contrary
- * to the standard behavior of MIN/MAX. XXX ideally this should be
- * done earlier, so that the selectivity of the restriction could be
- * included in our cost estimates. But that looks painful, and in
- * most cases the fraction of NULLs isn't high enough to change the
- * decision.
+ * Also we must add a "WHERE foo IS NOT NULL" restriction to the indexscan,
+ * to be sure we don't return a NULL, which'd be contrary to the standard
+ * behavior of MIN/MAX. XXX ideally this should be done earlier, so that
+ * the selectivity of the restriction could be included in our cost
+ * estimates. But that looks painful, and in most cases the fraction of
+ * NULLs isn't high enough to change the decision.
*/
plan = create_plan(&subroot, (Path *) info->path);
@@ -517,7 +515,7 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals)
copyObject(constant_quals),
plan);
- plan = (Plan *) make_limit(plan,
+ plan = (Plan *) make_limit(plan,
subparse->limitOffset,
subparse->limitCount,
0, 1);
@@ -534,7 +532,7 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals)
* Replace original aggregate calls with subplan output Params
*/
static Node *
-replace_aggs_with_params_mutator(Node *node, List **context)
+replace_aggs_with_params_mutator(Node *node, List **context)
{
if (node == NULL)
return NULL;
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
index 24d53be9e97..ecbf44400c9 100644
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.88 2005/09/28 21:17:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.89 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,7 +57,7 @@
* does not use grouping
*
* Note: the PlannerInfo node also includes a query_pathkeys field, which is
- * both an input and an output of query_planner(). The input value signals
+ * both an input and an output of query_planner(). The input value signals
* query_planner that the indicated sort order is wanted in the final output
* plan. But this value has not yet been "canonicalized", since the needed
* info does not get computed until we scan the qual clauses. We canonicalize
@@ -99,7 +99,7 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
if (parse->jointree->fromlist == NIL)
{
*cheapest_path = (Path *) create_result_path(NULL, NULL,
- (List *) parse->jointree->quals);
+ (List *) parse->jointree->quals);
*sorted_path = NULL;
return;
}
@@ -107,21 +107,21 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
/*
* Pull out any non-variable WHERE clauses so these can be put in a
* toplevel "Result" node, where they will gate execution of the whole
- * plan (the Result will not invoke its descendant plan unless the
- * quals are true). Note that any *really* non-variable quals will
- * have been optimized away by eval_const_expressions(). What we're
- * mostly interested in here is quals that depend only on outer-level
- * vars, although if the qual reduces to "WHERE FALSE" this path will
- * also be taken.
+ * plan (the Result will not invoke its descendant plan unless the quals
+ * are true). Note that any *really* non-variable quals will have been
+ * optimized away by eval_const_expressions(). What we're mostly
+ * interested in here is quals that depend only on outer-level vars,
+ * although if the qual reduces to "WHERE FALSE" this path will also be
+ * taken.
*/
parse->jointree->quals = (Node *)
pull_constant_clauses((List *) parse->jointree->quals,
&constant_quals);
/*
- * Init planner lists to empty. We create the base_rel_array with a
- * size that will be sufficient if no pullups or inheritance additions
- * happen ... otherwise it will be enlarged as needed.
+ * Init planner lists to empty. We create the base_rel_array with a size
+ * that will be sufficient if no pullups or inheritance additions happen
+ * ... otherwise it will be enlarged as needed.
*
* NOTE: in_info_list was set up by subquery_planner, do not touch here
*/
@@ -141,33 +141,32 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
add_base_rels_to_query(root, (Node *) parse->jointree);
/*
- * Examine the targetlist and qualifications, adding entries to
- * baserel targetlists for all referenced Vars. Restrict and join
- * clauses are added to appropriate lists belonging to the mentioned
- * relations. We also build lists of equijoined keys for pathkey
- * construction.
+ * Examine the targetlist and qualifications, adding entries to baserel
+ * targetlists for all referenced Vars. Restrict and join clauses are
+ * added to appropriate lists belonging to the mentioned relations. We
+ * also build lists of equijoined keys for pathkey construction.
*
- * Note: all subplan nodes will have "flat" (var-only) tlists. This
- * implies that all expression evaluations are done at the root of the
- * plan tree. Once upon a time there was code to try to push
- * expensive function calls down to lower plan nodes, but that's dead
- * code and has been for a long time...
+ * Note: all subplan nodes will have "flat" (var-only) tlists. This implies
+ * that all expression evaluations are done at the root of the plan tree.
+ * Once upon a time there was code to try to push expensive function calls
+ * down to lower plan nodes, but that's dead code and has been for a long
+ * time...
*/
build_base_rel_tlists(root, tlist);
(void) distribute_quals_to_rels(root, (Node *) parse->jointree, false);
/*
- * Use the completed lists of equijoined keys to deduce any implied
- * but unstated equalities (for example, A=B and B=C imply A=C).
+ * Use the completed lists of equijoined keys to deduce any implied but
+ * unstated equalities (for example, A=B and B=C imply A=C).
*/
generate_implied_equalities(root);
/*
- * We should now have all the pathkey equivalence sets built, so it's
- * now possible to convert the requested query_pathkeys to canonical
- * form. Also canonicalize the groupClause and sortClause pathkeys
- * for use later.
+ * We should now have all the pathkey equivalence sets built, so it's now
+ * possible to convert the requested query_pathkeys to canonical form.
+ * Also canonicalize the groupClause and sortClause pathkeys for use
+ * later.
*/
root->query_pathkeys = canonicalize_pathkeys(root, root->query_pathkeys);
root->group_pathkeys = canonicalize_pathkeys(root, root->group_pathkeys);
@@ -182,13 +181,13 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
elog(ERROR, "failed to construct the join relation");
/*
- * If there's grouping going on, estimate the number of result groups.
- * We couldn't do this any earlier because it depends on relation size
+ * If there's grouping going on, estimate the number of result groups. We
+ * couldn't do this any earlier because it depends on relation size
* estimates that were set up above.
*
- * Then convert tuple_fraction to fractional form if it is absolute,
- * and adjust it based on the knowledge that grouping_planner will be
- * doing grouping or aggregation work with our result.
+ * Then convert tuple_fraction to fractional form if it is absolute, and
+ * adjust it based on the knowledge that grouping_planner will be doing
+ * grouping or aggregation work with our result.
*
* This introduces some undesirable coupling between this code and
* grouping_planner, but the alternatives seem even uglier; we couldn't
@@ -205,18 +204,18 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
final_rel->rows);
/*
- * In GROUP BY mode, an absolute LIMIT is relative to the number
- * of groups not the number of tuples. If the caller gave us
- * a fraction, keep it as-is. (In both cases, we are effectively
- * assuming that all the groups are about the same size.)
+ * In GROUP BY mode, an absolute LIMIT is relative to the number of
+ * groups not the number of tuples. If the caller gave us a fraction,
+ * keep it as-is. (In both cases, we are effectively assuming that
+ * all the groups are about the same size.)
*/
if (tuple_fraction >= 1.0)
tuple_fraction /= *num_groups;
/*
* If both GROUP BY and ORDER BY are specified, we will need two
- * levels of sort --- and, therefore, certainly need to read all
- * the tuples --- unless ORDER BY is a subset of GROUP BY.
+ * levels of sort --- and, therefore, certainly need to read all the
+ * tuples --- unless ORDER BY is a subset of GROUP BY.
*/
if (parse->groupClause && parse->sortClause &&
!pathkeys_contained_in(root->sort_pathkeys, root->group_pathkeys))
@@ -225,8 +224,8 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
else if (parse->hasAggs || root->hasHavingQual)
{
/*
- * Ungrouped aggregate will certainly want to read all the tuples,
- * and it will deliver a single result row (so leave *num_groups 1).
+ * Ungrouped aggregate will certainly want to read all the tuples, and
+ * it will deliver a single result row (so leave *num_groups 1).
*/
tuple_fraction = 0.0;
}
@@ -234,11 +233,11 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
{
/*
* Since there was no grouping or aggregation, it's reasonable to
- * assume the UNIQUE filter has effects comparable to GROUP BY.
- * Return the estimated number of output rows for use by caller.
- * (If DISTINCT is used with grouping, we ignore its effects for
- * rowcount estimation purposes; this amounts to assuming the grouped
- * rows are distinct already.)
+ * assume the UNIQUE filter has effects comparable to GROUP BY. Return
+ * the estimated number of output rows for use by caller. (If DISTINCT
+ * is used with grouping, we ignore its effects for rowcount
+ * estimation purposes; this amounts to assuming the grouped rows are
+ * distinct already.)
*/
List *distinctExprs;
@@ -257,26 +256,26 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
else
{
/*
- * Plain non-grouped, non-aggregated query: an absolute tuple
- * fraction can be divided by the number of tuples.
+ * Plain non-grouped, non-aggregated query: an absolute tuple fraction
+ * can be divided by the number of tuples.
*/
if (tuple_fraction >= 1.0)
tuple_fraction /= final_rel->rows;
}
/*
- * Pick out the cheapest-total path and the cheapest presorted path
- * for the requested pathkeys (if there is one). We should take the
- * tuple fraction into account when selecting the cheapest presorted
- * path, but not when selecting the cheapest-total path, since if we
- * have to sort then we'll have to fetch all the tuples. (But there's
- * a special case: if query_pathkeys is NIL, meaning order doesn't
- * matter, then the "cheapest presorted" path will be the cheapest
- * overall for the tuple fraction.)
+ * Pick out the cheapest-total path and the cheapest presorted path for
+ * the requested pathkeys (if there is one). We should take the tuple
+ * fraction into account when selecting the cheapest presorted path, but
+ * not when selecting the cheapest-total path, since if we have to sort
+ * then we'll have to fetch all the tuples. (But there's a special case:
+ * if query_pathkeys is NIL, meaning order doesn't matter, then the
+ * "cheapest presorted" path will be the cheapest overall for the tuple
+ * fraction.)
*
- * The cheapest-total path is also the one to use if grouping_planner
- * decides to use hashed aggregation, so we return it separately even
- * if this routine thinks the presorted path is the winner.
+ * The cheapest-total path is also the one to use if grouping_planner decides
+ * to use hashed aggregation, so we return it separately even if this
+ * routine thinks the presorted path is the winner.
*/
cheapestpath = final_rel->cheapest_total_path;
@@ -291,8 +290,8 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
/*
* Forget about the presorted path if it would be cheaper to sort the
- * cheapest-total path. Here we need consider only the behavior at
- * the tuple fraction point.
+ * cheapest-total path. Here we need consider only the behavior at the
+ * tuple fraction point.
*/
if (sortedpath)
{
@@ -323,8 +322,7 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
}
/*
- * If we have constant quals, add a toplevel Result step to process
- * them.
+ * If we have constant quals, add a toplevel Result step to process them.
*/
if (constant_quals)
{
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index ace53d692fb..762dfb4b641 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.193 2005/09/24 22:54:37 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.194 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,8 +59,8 @@ static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
static Plan *inheritance_planner(PlannerInfo *root, List *inheritlist);
static Plan *grouping_planner(PlannerInfo *root, double tuple_fraction);
static double preprocess_limit(PlannerInfo *root,
- double tuple_fraction,
- int *offset_est, int *count_est);
+ double tuple_fraction,
+ int *offset_est, int *count_est);
static bool choose_hashed_grouping(PlannerInfo *root, double tuple_fraction,
Path *cheapest_path, Path *sorted_path,
double dNumGroups, AggClauseCounts *agg_counts);
@@ -95,14 +95,13 @@ planner(Query *parse, bool isCursor, int cursorOptions,
* these global state variables must be saved and restored.
*
* Query level and the param list cannot be moved into the per-query
- * PlannerInfo structure since their whole purpose is communication
- * across multiple sub-queries. Also, boundParams is explicitly info
- * from outside the query, and so is likewise better handled as a global
- * variable.
+ * PlannerInfo structure since their whole purpose is communication across
+ * multiple sub-queries. Also, boundParams is explicitly info from outside
+ * the query, and so is likewise better handled as a global variable.
*
- * Note we do NOT save and restore PlannerPlanId: it exists to assign
- * unique IDs to SubPlan nodes, and we want those IDs to be unique for
- * the life of a backend. Also, PlannerInitPlan is saved/restored in
+ * Note we do NOT save and restore PlannerPlanId: it exists to assign unique
+ * IDs to SubPlan nodes, and we want those IDs to be unique for the life
+ * of a backend. Also, PlannerInitPlan is saved/restored in
* subquery_planner, not here.
*/
save_PlannerQueryLevel = PlannerQueryLevel;
@@ -118,10 +117,10 @@ planner(Query *parse, bool isCursor, int cursorOptions,
if (isCursor)
{
/*
- * We have no real idea how many tuples the user will ultimately
- * FETCH from a cursor, but it seems a good bet that he doesn't
- * want 'em all. Optimize for 10% retrieval (you gotta better
- * number? Should this be a SETtable parameter?)
+ * We have no real idea how many tuples the user will ultimately FETCH
+ * from a cursor, but it seems a good bet that he doesn't want 'em
+ * all. Optimize for 10% retrieval (you gotta better number? Should
+ * this be a SETtable parameter?)
*/
tuple_fraction = 0.10;
}
@@ -207,10 +206,10 @@ subquery_planner(Query *parse, double tuple_fraction,
root->parse = parse;
/*
- * Look for IN clauses at the top level of WHERE, and transform them
- * into joins. Note that this step only handles IN clauses originally
- * at top level of WHERE; if we pull up any subqueries in the next
- * step, their INs are processed just before pulling them up.
+ * Look for IN clauses at the top level of WHERE, and transform them into
+ * joins. Note that this step only handles IN clauses originally at top
+ * level of WHERE; if we pull up any subqueries in the next step, their
+ * INs are processed just before pulling them up.
*/
root->in_info_list = NIL;
if (parse->hasSubLinks)
@@ -225,14 +224,14 @@ subquery_planner(Query *parse, double tuple_fraction,
pull_up_subqueries(root, (Node *) parse->jointree, false);
/*
- * Detect whether any rangetable entries are RTE_JOIN kind; if not, we
- * can avoid the expense of doing flatten_join_alias_vars(). Also
- * check for outer joins --- if none, we can skip reduce_outer_joins()
- * and some other processing. This must be done after we have done
+ * Detect whether any rangetable entries are RTE_JOIN kind; if not, we can
+ * avoid the expense of doing flatten_join_alias_vars(). Also check for
+ * outer joins --- if none, we can skip reduce_outer_joins() and some
+ * other processing. This must be done after we have done
* pull_up_subqueries, of course.
*
* Note: if reduce_outer_joins manages to eliminate all outer joins,
- * root->hasOuterJoins is not reset currently. This is OK since its
+ * root->hasOuterJoins is not reset currently. This is OK since its
* purpose is merely to suppress unnecessary processing in simple cases.
*/
root->hasJoinRTEs = false;
@@ -255,8 +254,8 @@ subquery_planner(Query *parse, double tuple_fraction,
/*
* Set hasHavingQual to remember if HAVING clause is present. Needed
- * because preprocess_expression will reduce a constant-true condition
- * to an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
+ * because preprocess_expression will reduce a constant-true condition to
+ * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
*/
root->hasHavingQual = (parse->havingQual != NULL);
@@ -292,29 +291,29 @@ subquery_planner(Query *parse, double tuple_fraction,
}
/*
- * In some cases we may want to transfer a HAVING clause into WHERE.
- * We cannot do so if the HAVING clause contains aggregates (obviously)
- * or volatile functions (since a HAVING clause is supposed to be executed
+ * In some cases we may want to transfer a HAVING clause into WHERE. We
+ * cannot do so if the HAVING clause contains aggregates (obviously) or
+ * volatile functions (since a HAVING clause is supposed to be executed
* only once per group). Also, it may be that the clause is so expensive
* to execute that we're better off doing it only once per group, despite
* the loss of selectivity. This is hard to estimate short of doing the
* entire planning process twice, so we use a heuristic: clauses
- * containing subplans are left in HAVING. Otherwise, we move or copy
- * the HAVING clause into WHERE, in hopes of eliminating tuples before
+ * containing subplans are left in HAVING. Otherwise, we move or copy the
+ * HAVING clause into WHERE, in hopes of eliminating tuples before
* aggregation instead of after.
*
- * If the query has explicit grouping then we can simply move such a
- * clause into WHERE; any group that fails the clause will not be
- * in the output because none of its tuples will reach the grouping
- * or aggregation stage. Otherwise we must have a degenerate
- * (variable-free) HAVING clause, which we put in WHERE so that
- * query_planner() can use it in a gating Result node, but also keep
- * in HAVING to ensure that we don't emit a bogus aggregated row.
- * (This could be done better, but it seems not worth optimizing.)
+ * If the query has explicit grouping then we can simply move such a clause
+ * into WHERE; any group that fails the clause will not be in the output
+ * because none of its tuples will reach the grouping or aggregation
+ * stage. Otherwise we must have a degenerate (variable-free) HAVING
+ * clause, which we put in WHERE so that query_planner() can use it in a
+ * gating Result node, but also keep in HAVING to ensure that we don't
+ * emit a bogus aggregated row. (This could be done better, but it seems
+ * not worth optimizing.)
*
* Note that both havingQual and parse->jointree->quals are in
- * implicitly-ANDed-list form at this point, even though they are
- * declared as Node *.
+ * implicitly-ANDed-list form at this point, even though they are declared
+ * as Node *.
*/
newHaving = NIL;
foreach(l, (List *) parse->havingQual)
@@ -346,28 +345,27 @@ subquery_planner(Query *parse, double tuple_fraction,
parse->havingQual = (Node *) newHaving;
/*
- * If we have any outer joins, try to reduce them to plain inner
- * joins. This step is most easily done after we've done expression
+ * If we have any outer joins, try to reduce them to plain inner joins.
+ * This step is most easily done after we've done expression
* preprocessing.
*/
if (root->hasOuterJoins)
reduce_outer_joins(root);
/*
- * See if we can simplify the jointree; opportunities for this may
- * come from having pulled up subqueries, or from flattening explicit
- * JOIN syntax. We must do this after flattening JOIN alias
- * variables, since eliminating explicit JOIN nodes from the jointree
- * will cause get_relids_for_join() to fail. But it should happen
- * after reduce_outer_joins, anyway.
+ * See if we can simplify the jointree; opportunities for this may come
+ * from having pulled up subqueries, or from flattening explicit JOIN
+ * syntax. We must do this after flattening JOIN alias variables, since
+ * eliminating explicit JOIN nodes from the jointree will cause
+ * get_relids_for_join() to fail. But it should happen after
+ * reduce_outer_joins, anyway.
*/
parse->jointree = (FromExpr *)
simplify_jointree(root, (Node *) parse->jointree);
/*
- * Do the main planning. If we have an inherited target relation,
- * that needs special processing, else go straight to
- * grouping_planner.
+ * Do the main planning. If we have an inherited target relation, that
+ * needs special processing, else go straight to grouping_planner.
*/
if (parse->resultRelation &&
(lst = expand_inherited_rtentry(root, parse->resultRelation)) != NIL)
@@ -377,8 +375,8 @@ subquery_planner(Query *parse, double tuple_fraction,
/*
* If any subplans were generated, or if we're inside a subplan, build
- * initPlan list and extParam/allParam sets for plan nodes, and attach
- * the initPlans to the top plan node.
+ * initPlan list and extParam/allParam sets for plan nodes, and attach the
+ * initPlans to the top plan node.
*/
if (PlannerPlanId != saved_planid || PlannerQueryLevel > 1)
SS_finalize_plan(plan, parse->rtable);
@@ -405,9 +403,9 @@ static Node *
preprocess_expression(PlannerInfo *root, Node *expr, int kind)
{
/*
- * Fall out quickly if expression is empty. This occurs often enough
- * to be worth checking. Note that null->null is the correct conversion
- * for implicit-AND result format, too.
+ * Fall out quickly if expression is empty. This occurs often enough to
+ * be worth checking. Note that null->null is the correct conversion for
+ * implicit-AND result format, too.
*/
if (expr == NULL)
return NULL;
@@ -415,8 +413,7 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind)
/*
* If the query has any join RTEs, replace join alias variables with
* base-relation variables. We must do this before sublink processing,
- * else sublinks expanded out from join aliases wouldn't get
- * processed.
+ * else sublinks expanded out from join aliases wouldn't get processed.
*/
if (root->hasJoinRTEs)
expr = flatten_join_alias_vars(root, expr);
@@ -429,13 +426,13 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind)
* careful to maintain AND/OR flatness --- that is, do not generate a tree
* with AND directly under AND, nor OR directly under OR.
*
- * Because this is a relatively expensive process, we skip it when the
- * query is trivial, such as "SELECT 2+2;" or "INSERT ... VALUES()".
- * The expression will only be evaluated once anyway, so no point in
+ * Because this is a relatively expensive process, we skip it when the query
+ * is trivial, such as "SELECT 2+2;" or "INSERT ... VALUES()". The
+ * expression will only be evaluated once anyway, so no point in
* pre-simplifying; we can't execute it any faster than the executor can,
* and we will waste cycles copying the tree. Notice however that we
- * still must do it for quals (to get AND/OR flatness); and if we are
- * in a subquery we should not assume it will be done only once.
+ * still must do it for quals (to get AND/OR flatness); and if we are in a
+ * subquery we should not assume it will be done only once.
*/
if (root->parse->jointree->fromlist != NIL ||
kind == EXPRKIND_QUAL ||
@@ -460,8 +457,8 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind)
expr = SS_process_sublinks(expr, (kind == EXPRKIND_QUAL));
/*
- * XXX do not insert anything here unless you have grokked the
- * comments in SS_replace_correlation_vars ...
+ * XXX do not insert anything here unless you have grokked the comments in
+ * SS_replace_correlation_vars ...
*/
/* Replace uplevel vars with Param nodes */
@@ -469,9 +466,9 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind)
expr = SS_replace_correlation_vars(expr);
/*
- * If it's a qual or havingQual, convert it to implicit-AND format.
- * (We don't want to do this before eval_const_expressions, since the
- * latter would be unable to simplify a top-level AND correctly. Also,
+ * If it's a qual or havingQual, convert it to implicit-AND format. (We
+ * don't want to do this before eval_const_expressions, since the latter
+ * would be unable to simplify a top-level AND correctly. Also,
* SS_process_sublinks expects explicit-AND format.)
*/
if (kind == EXPRKIND_QUAL)
@@ -557,9 +554,9 @@ inheritance_planner(PlannerInfo *root, List *inheritlist)
Plan *subplan;
/*
- * Generate modified query with this rel as target. We have to
- * be prepared to translate varnos in in_info_list as well as in
- * the Query proper.
+ * Generate modified query with this rel as target. We have to be
+ * prepared to translate varnos in in_info_list as well as in the
+ * Query proper.
*/
memcpy(&subroot, root, sizeof(PlannerInfo));
subroot.parse = (Query *)
@@ -580,26 +577,26 @@ inheritance_planner(PlannerInfo *root, List *inheritlist)
* XXX my goodness this next bit is ugly. Really need to think about
* ways to rein in planner's habit of scribbling on its input.
*
- * Planning of the subquery might have modified the rangetable,
- * either by addition of RTEs due to expansion of inherited source
- * tables, or by changes of the Query structures inside subquery
- * RTEs. We have to ensure that this gets propagated back to the
- * master copy. However, if we aren't done planning yet, we also
- * need to ensure that subsequent calls to grouping_planner have
- * virgin sub-Queries to work from. So, if we are at the last
- * list entry, just copy the subquery rangetable back to the master
- * copy; if we are not, then extend the master copy by adding
- * whatever the subquery added. (We assume these added entries
- * will go untouched by the future grouping_planner calls. We are
- * also effectively assuming that sub-Queries will get planned
- * identically each time, or at least that the impacts on their
- * rangetables will be the same each time. Did I say this is ugly?)
+ * Planning of the subquery might have modified the rangetable, either by
+ * addition of RTEs due to expansion of inherited source tables, or by
+ * changes of the Query structures inside subquery RTEs. We have to
+ * ensure that this gets propagated back to the master copy. However,
+ * if we aren't done planning yet, we also need to ensure that
+ * subsequent calls to grouping_planner have virgin sub-Queries to
+ * work from. So, if we are at the last list entry, just copy the
+ * subquery rangetable back to the master copy; if we are not, then
+ * extend the master copy by adding whatever the subquery added. (We
+ * assume these added entries will go untouched by the future
+ * grouping_planner calls. We are also effectively assuming that
+ * sub-Queries will get planned identically each time, or at least
+ * that the impacts on their rangetables will be the same each time.
+ * Did I say this is ugly?)
*/
if (lnext(l) == NULL)
parse->rtable = subroot.parse->rtable;
else
{
- int subrtlength = list_length(subroot.parse->rtable);
+ int subrtlength = list_length(subroot.parse->rtable);
if (subrtlength > mainrtlength)
{
@@ -666,38 +663,37 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
List *set_sortclauses;
/*
- * If there's a top-level ORDER BY, assume we have to fetch all
- * the tuples. This might seem too simplistic given all the
- * hackery below to possibly avoid the sort ... but a nonzero
- * tuple_fraction is only of use to plan_set_operations() when
- * the setop is UNION ALL, and the result of UNION ALL is always
- * unsorted.
+ * If there's a top-level ORDER BY, assume we have to fetch all the
+ * tuples. This might seem too simplistic given all the hackery below
+ * to possibly avoid the sort ... but a nonzero tuple_fraction is only
+ * of use to plan_set_operations() when the setop is UNION ALL, and
+ * the result of UNION ALL is always unsorted.
*/
if (parse->sortClause)
tuple_fraction = 0.0;
/*
- * Construct the plan for set operations. The result will not
- * need any work except perhaps a top-level sort and/or LIMIT.
+ * Construct the plan for set operations. The result will not need
+ * any work except perhaps a top-level sort and/or LIMIT.
*/
result_plan = plan_set_operations(root, tuple_fraction,
&set_sortclauses);
/*
- * Calculate pathkeys representing the sort order (if any) of the
- * set operation's result. We have to do this before overwriting
- * the sort key information...
+ * Calculate pathkeys representing the sort order (if any) of the set
+ * operation's result. We have to do this before overwriting the sort
+ * key information...
*/
current_pathkeys = make_pathkeys_for_sortclauses(set_sortclauses,
- result_plan->targetlist);
+ result_plan->targetlist);
current_pathkeys = canonicalize_pathkeys(root, current_pathkeys);
/*
- * We should not need to call preprocess_targetlist, since we must
- * be in a SELECT query node. Instead, use the targetlist
- * returned by plan_set_operations (since this tells whether it
- * returned any resjunk columns!), and transfer any sort key
- * information from the original tlist.
+ * We should not need to call preprocess_targetlist, since we must be
+ * in a SELECT query node. Instead, use the targetlist returned by
+ * plan_set_operations (since this tells whether it returned any
+ * resjunk columns!), and transfer any sort key information from the
+ * original tlist.
*/
Assert(parse->commandType == CMD_SELECT);
@@ -741,11 +737,11 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
tlist = preprocess_targetlist(root, tlist);
/*
- * Generate appropriate target list for subplan; may be different
- * from tlist if grouping or aggregation is needed.
+ * Generate appropriate target list for subplan; may be different from
+ * tlist if grouping or aggregation is needed.
*/
sub_tlist = make_subplanTargetList(root, tlist,
- &groupColIdx, &need_tlist_eval);
+ &groupColIdx, &need_tlist_eval);
/*
* Calculate pathkeys that represent grouping/ordering requirements.
@@ -763,10 +759,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* Note: we do not attempt to detect duplicate aggregates here; a
* somewhat-overestimated count is okay for our present purposes.
*
- * Note: think not that we can turn off hasAggs if we find no aggs.
- * It is possible for constant-expression simplification to remove
- * all explicit references to aggs, but we still have to follow
- * the aggregate semantics (eg, producing only one output row).
+ * Note: think not that we can turn off hasAggs if we find no aggs. It is
+ * possible for constant-expression simplification to remove all
+ * explicit references to aggs, but we still have to follow the
+ * aggregate semantics (eg, producing only one output row).
*/
if (parse->hasAggs)
{
@@ -777,13 +773,12 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* Figure out whether we need a sorted result from query_planner.
*
- * If we have a GROUP BY clause, then we want a result sorted
- * properly for grouping. Otherwise, if there is an ORDER BY
- * clause, we want to sort by the ORDER BY clause. (Note: if we
- * have both, and ORDER BY is a superset of GROUP BY, it would be
- * tempting to request sort by ORDER BY --- but that might just
- * leave us failing to exploit an available sort order at all.
- * Needs more thought...)
+ * If we have a GROUP BY clause, then we want a result sorted properly
+ * for grouping. Otherwise, if there is an ORDER BY clause, we want
+ * to sort by the ORDER BY clause. (Note: if we have both, and ORDER
+ * BY is a superset of GROUP BY, it would be tempting to request sort
+ * by ORDER BY --- but that might just leave us failing to exploit an
+ * available sort order at all. Needs more thought...)
*/
if (parse->groupClause)
root->query_pathkeys = root->group_pathkeys;
@@ -793,10 +788,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
root->query_pathkeys = NIL;
/*
- * Generate the best unsorted and presorted paths for this Query
- * (but note there may not be any presorted path). query_planner
- * will also estimate the number of groups in the query, and
- * canonicalize all the pathkeys.
+ * Generate the best unsorted and presorted paths for this Query (but
+ * note there may not be any presorted path). query_planner will also
+ * estimate the number of groups in the query, and canonicalize all
+ * the pathkeys.
*/
query_planner(root, sub_tlist, tuple_fraction,
&cheapest_path, &sorted_path, &dNumGroups);
@@ -820,8 +815,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* Select the best path. If we are doing hashed grouping, we will
- * always read all the input tuples, so use the cheapest-total
- * path. Otherwise, trust query_planner's decision about which to use.
+ * always read all the input tuples, so use the cheapest-total path.
+ * Otherwise, trust query_planner's decision about which to use.
*/
if (use_hashed_grouping || !sorted_path)
best_path = cheapest_path;
@@ -829,10 +824,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
best_path = sorted_path;
/*
- * Check to see if it's possible to optimize MIN/MAX aggregates.
- * If so, we will forget all the work we did so far to choose a
- * "regular" path ... but we had to do it anyway to be able to
- * tell which way is cheaper.
+ * Check to see if it's possible to optimize MIN/MAX aggregates. If
+ * so, we will forget all the work we did so far to choose a "regular"
+ * path ... but we had to do it anyway to be able to tell which way is
+ * cheaper.
*/
result_plan = optimize_minmax_aggregates(root,
tlist,
@@ -840,8 +835,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
if (result_plan != NULL)
{
/*
- * optimize_minmax_aggregates generated the full plan, with
- * the right tlist, and it has no sort order.
+ * optimize_minmax_aggregates generated the full plan, with the
+ * right tlist, and it has no sort order.
*/
current_pathkeys = NIL;
}
@@ -985,8 +980,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* GROUP BY without aggregation, so insert a group node (plus
* the appropriate sort node, if necessary).
*
- * Add an explicit sort if we couldn't make the path come
- * out the way the GROUP node needs it.
+ * Add an explicit sort if we couldn't make the path come out the
+ * way the GROUP node needs it.
*/
if (!pathkeys_contained_in(group_pathkeys, current_pathkeys))
{
@@ -1014,11 +1009,12 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* This is a degenerate case in which we are supposed to emit
* either 0 or 1 row depending on whether HAVING succeeds.
* Furthermore, there cannot be any variables in either HAVING
- * or the targetlist, so we actually do not need the FROM table
- * at all! We can just throw away the plan-so-far and generate
- * a Result node. This is a sufficiently unusual corner case
- * that it's not worth contorting the structure of this routine
- * to avoid having to generate the plan in the first place.
+ * or the targetlist, so we actually do not need the FROM
+ * table at all! We can just throw away the plan-so-far and
+ * generate a Result node. This is a sufficiently unusual
+ * corner case that it's not worth contorting the structure of
+ * this routine to avoid having to generate the plan in the
+ * first place.
*/
result_plan = (Plan *) make_result(tlist,
parse->havingQual,
@@ -1028,8 +1024,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
} /* end of if (setOperations) */
/*
- * If we were not able to make the plan come out in the right order,
- * add an explicit sort step.
+ * If we were not able to make the plan come out in the right order, add
+ * an explicit sort step.
*/
if (parse->sortClause)
{
@@ -1051,9 +1047,9 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
result_plan = (Plan *) make_unique(result_plan, parse->distinctClause);
/*
- * If there was grouping or aggregation, leave plan_rows as-is
- * (ie, assume the result was already mostly unique). If not,
- * use the number of distinct-groups calculated by query_planner.
+ * If there was grouping or aggregation, leave plan_rows as-is (ie,
+ * assume the result was already mostly unique). If not, use the
+ * number of distinct-groups calculated by query_planner.
*/
if (!parse->groupClause && !root->hasHavingQual && !parse->hasAggs)
result_plan->plan_rows = dNumGroups;
@@ -1072,8 +1068,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
}
/*
- * Return the actual output ordering in query_pathkeys for possible
- * use by an outer query level.
+ * Return the actual output ordering in query_pathkeys for possible use by
+ * an outer query level.
*/
root->query_pathkeys = current_pathkeys;
@@ -1084,7 +1080,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
*
* We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
- * results back in *count_est and *offset_est. These variables are set to
+ * results back in *count_est and *offset_est. These variables are set to
* 0 if the corresponding clause is not present, and -1 if it's present
* but we couldn't estimate the value for it. (The "0" convention is OK
* for OFFSET but a little bit bogus for LIMIT: effectively we estimate
@@ -1093,7 +1089,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* be passed to make_limit, which see if you change this code.
*
* The return value is the suitably adjusted tuple_fraction to use for
- * planning the query. This adjustment is not overridable, since it reflects
+ * planning the query. This adjustment is not overridable, since it reflects
* plan actions that grouping_planner() will certainly take, not assumptions
* about context.
*/
@@ -1120,7 +1116,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
if (((Const *) est)->constisnull)
{
/* NULL indicates LIMIT ALL, ie, no limit */
- *count_est = 0; /* treat as not present */
+ *count_est = 0; /* treat as not present */
}
else
{
@@ -1143,7 +1139,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
if (((Const *) est)->constisnull)
{
/* Treat NULL as no offset; the executor will too */
- *offset_est = 0; /* treat as not present */
+ *offset_est = 0; /* treat as not present */
}
else
{
@@ -1217,11 +1213,11 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
else if (*offset_est != 0 && tuple_fraction > 0.0)
{
/*
- * We have an OFFSET but no LIMIT. This acts entirely differently
- * from the LIMIT case: here, we need to increase rather than
- * decrease the caller's tuple_fraction, because the OFFSET acts
- * to cause more tuples to be fetched instead of fewer. This only
- * matters if we got a tuple_fraction > 0, however.
+ * We have an OFFSET but no LIMIT. This acts entirely differently
+ * from the LIMIT case: here, we need to increase rather than decrease
+ * the caller's tuple_fraction, because the OFFSET acts to cause more
+ * tuples to be fetched instead of fewer. This only matters if we got
+ * a tuple_fraction > 0, however.
*
* As above, use 10% if OFFSET is present but unestimatable.
*/
@@ -1232,9 +1228,9 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
/*
* If we have absolute counts from both caller and OFFSET, add them
- * together; likewise if they are both fractional. If one is
- * fractional and the other absolute, we want to take the larger,
- * and we heuristically assume that's the fractional one.
+ * together; likewise if they are both fractional. If one is
+ * fractional and the other absolute, we want to take the larger, and
+ * we heuristically assume that's the fractional one.
*/
if (tuple_fraction >= 1.0)
{
@@ -1260,7 +1256,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
/* both fractional, so add them together */
tuple_fraction += limit_fraction;
if (tuple_fraction >= 1.0)
- tuple_fraction = 0.0; /* assume fetch all */
+ tuple_fraction = 0.0; /* assume fetch all */
}
}
}
@@ -1303,9 +1299,8 @@ choose_hashed_grouping(PlannerInfo *root, double tuple_fraction,
* Don't do it if it doesn't look like the hashtable will fit into
* work_mem.
*
- * Beware here of the possibility that cheapest_path->parent is NULL.
- * This could happen if user does something silly like
- * SELECT 'foo' GROUP BY 1;
+ * Beware here of the possibility that cheapest_path->parent is NULL. This
+ * could happen if user does something silly like SELECT 'foo' GROUP BY 1;
*/
if (cheapest_path->parent)
{
@@ -1314,8 +1309,8 @@ choose_hashed_grouping(PlannerInfo *root, double tuple_fraction,
}
else
{
- cheapest_path_rows = 1; /* assume non-set result */
- cheapest_path_width = 100; /* arbitrary */
+ cheapest_path_rows = 1; /* assume non-set result */
+ cheapest_path_width = 100; /* arbitrary */
}
/* Estimate per-hash-entry space at tuple width... */
@@ -1329,23 +1324,19 @@ choose_hashed_grouping(PlannerInfo *root, double tuple_fraction,
return false;
/*
- * See if the estimated cost is no more than doing it the other way.
- * While avoiding the need for sorted input is usually a win, the fact
- * that the output won't be sorted may be a loss; so we need to do an
- * actual cost comparison.
+ * See if the estimated cost is no more than doing it the other way. While
+ * avoiding the need for sorted input is usually a win, the fact that the
+ * output won't be sorted may be a loss; so we need to do an actual cost
+ * comparison.
*
- * We need to consider
- * cheapest_path + hashagg [+ final sort]
- * versus either
- * cheapest_path [+ sort] + group or agg [+ final sort]
- * or
- * presorted_path + group or agg [+ final sort]
- * where brackets indicate a step that may not be needed. We assume
- * query_planner() will have returned a presorted path only if it's a
- * winner compared to cheapest_path for this purpose.
+ * We need to consider cheapest_path + hashagg [+ final sort] versus either
+ * cheapest_path [+ sort] + group or agg [+ final sort] or presorted_path
+ * + group or agg [+ final sort] where brackets indicate a step that may
+ * not be needed. We assume query_planner() will have returned a presorted
+ * path only if it's a winner compared to cheapest_path for this purpose.
*
- * These path variables are dummies that just hold cost fields; we don't
- * make actual Paths for these steps.
+ * These path variables are dummies that just hold cost fields; we don't make
+ * actual Paths for these steps.
*/
cost_agg(&hashed_p, root, AGG_HASHED, agg_counts->numAggs,
numGroupCols, dNumGroups,
@@ -1502,8 +1493,8 @@ make_subplanTargetList(PlannerInfo *root,
/*
* Otherwise, start with a "flattened" tlist (having just the vars
- * mentioned in the targetlist and HAVING qual --- but not upper-
- * level Vars; they will be replaced by Params later on).
+ * mentioned in the targetlist and HAVING qual --- but not upper- level
+ * Vars; they will be replaced by Params later on).
*/
sub_tlist = flatten_tlist(tlist);
extravars = pull_var_clause(parse->havingQual, false);
@@ -1513,9 +1504,8 @@ make_subplanTargetList(PlannerInfo *root,
/*
* If grouping, create sub_tlist entries for all GROUP BY expressions
- * (GROUP BY items that are simple Vars should be in the list
- * already), and make an array showing where the group columns are in
- * the sub_tlist.
+ * (GROUP BY items that are simple Vars should be in the list already),
+ * and make an array showing where the group columns are in the sub_tlist.
*/
numCols = list_length(parse->groupClause);
if (numCols > 0)
@@ -1634,7 +1624,7 @@ postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Assert(orig_tlist_item != NULL);
orig_tle = (TargetEntry *) lfirst(orig_tlist_item);
orig_tlist_item = lnext(orig_tlist_item);
- if (orig_tle->resjunk) /* should not happen */
+ if (orig_tle->resjunk) /* should not happen */
elog(ERROR, "resjunk output columns are not implemented");
Assert(new_tle->resno == orig_tle->resno);
new_tle->ressortgroupref = orig_tle->ressortgroupref;
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index fe01555a3c4..2ca616e118b 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.114 2005/09/05 18:59:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.115 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -38,7 +38,7 @@ typedef struct
int num_vars; /* number of plain Var tlist entries */
bool has_non_vars; /* are there non-plain-Var entries? */
/* array of num_vars entries: */
- tlist_vinfo vars[1]; /* VARIABLE LENGTH ARRAY */
+ tlist_vinfo vars[1]; /* VARIABLE LENGTH ARRAY */
} indexed_tlist; /* VARIABLE LENGTH STRUCT */
typedef struct
@@ -64,28 +64,28 @@ static void fix_expr_references(Plan *plan, Node *node);
static bool fix_expr_references_walker(Node *node, void *context);
static void set_join_references(Join *join, List *rtable);
static void set_inner_join_references(Plan *inner_plan,
- List *rtable,
- indexed_tlist *outer_itlist);
+ List *rtable,
+ indexed_tlist *outer_itlist);
static void set_uppernode_references(Plan *plan, Index subvarno);
static indexed_tlist *build_tlist_index(List *tlist);
static Var *search_indexed_tlist_for_var(Var *var,
- indexed_tlist *itlist,
- Index newvarno);
+ indexed_tlist *itlist,
+ Index newvarno);
static Var *search_indexed_tlist_for_non_var(Node *node,
- indexed_tlist *itlist,
- Index newvarno);
+ indexed_tlist *itlist,
+ Index newvarno);
static List *join_references(List *clauses,
- List *rtable,
- indexed_tlist *outer_itlist,
- indexed_tlist *inner_itlist,
- Index acceptable_rel);
+ List *rtable,
+ indexed_tlist *outer_itlist,
+ indexed_tlist *inner_itlist,
+ Index acceptable_rel);
static Node *join_references_mutator(Node *node,
join_references_context *context);
static Node *replace_vars_with_subplan_refs(Node *node,
- indexed_tlist *subplan_itlist,
- Index subvarno);
+ indexed_tlist *subplan_itlist,
+ Index subvarno);
static Node *replace_vars_with_subplan_refs_mutator(Node *node,
- replace_vars_with_subplan_refs_context *context);
+ replace_vars_with_subplan_refs_context *context);
static bool fix_opfuncids_walker(Node *node, void *context);
static void set_sa_opfuncid(ScalarArrayOpExpr *opexpr);
@@ -99,7 +99,7 @@ static void set_sa_opfuncid(ScalarArrayOpExpr *opexpr);
/*
* set_plan_references
*
- * This is the final processing pass of the planner/optimizer. The plan
+ * This is the final processing pass of the planner/optimizer. The plan
* tree is complete; we just have to adjust some representational details
* for the convenience of the executor. We update Vars in upper plan nodes
* to refer to the outputs of their subplans, and we compute regproc OIDs
@@ -150,22 +150,22 @@ set_plan_references(Plan *plan, List *rtable)
fix_expr_references(plan,
(Node *) ((IndexScan *) plan)->indexqual);
fix_expr_references(plan,
- (Node *) ((IndexScan *) plan)->indexqualorig);
+ (Node *) ((IndexScan *) plan)->indexqualorig);
break;
case T_BitmapIndexScan:
/* no need to fix targetlist and qual */
Assert(plan->targetlist == NIL);
Assert(plan->qual == NIL);
fix_expr_references(plan,
- (Node *) ((BitmapIndexScan *) plan)->indexqual);
+ (Node *) ((BitmapIndexScan *) plan)->indexqual);
fix_expr_references(plan,
- (Node *) ((BitmapIndexScan *) plan)->indexqualorig);
+ (Node *) ((BitmapIndexScan *) plan)->indexqualorig);
break;
case T_BitmapHeapScan:
fix_expr_references(plan, (Node *) plan->targetlist);
fix_expr_references(plan, (Node *) plan->qual);
fix_expr_references(plan,
- (Node *) ((BitmapHeapScan *) plan)->bitmapqualorig);
+ (Node *) ((BitmapHeapScan *) plan)->bitmapqualorig);
break;
case T_TidScan:
fix_expr_references(plan, (Node *) plan->targetlist);
@@ -200,7 +200,7 @@ set_plan_references(Plan *plan, List *rtable)
fix_expr_references(plan, (Node *) plan->qual);
fix_expr_references(plan, (Node *) ((Join *) plan)->joinqual);
fix_expr_references(plan,
- (Node *) ((MergeJoin *) plan)->mergeclauses);
+ (Node *) ((MergeJoin *) plan)->mergeclauses);
break;
case T_HashJoin:
set_join_references((Join *) plan, rtable);
@@ -208,7 +208,7 @@ set_plan_references(Plan *plan, List *rtable)
fix_expr_references(plan, (Node *) plan->qual);
fix_expr_references(plan, (Node *) ((Join *) plan)->joinqual);
fix_expr_references(plan,
- (Node *) ((HashJoin *) plan)->hashclauses);
+ (Node *) ((HashJoin *) plan)->hashclauses);
break;
case T_Hash:
case T_Material:
@@ -218,24 +218,24 @@ set_plan_references(Plan *plan, List *rtable)
/*
* These plan types don't actually bother to evaluate their
- * targetlists (because they just return their unmodified
- * input tuples). The optimizer is lazy about creating really
- * valid targetlists for them --- it tends to just put in a
- * pointer to the child plan node's tlist. Hence, we leave
- * the tlist alone. In particular, we do not want to process
- * subplans in the tlist, since we will likely end up reprocessing
- * subplans that also appear in lower levels of the plan tree!
+ * targetlists (because they just return their unmodified input
+ * tuples). The optimizer is lazy about creating really valid
+ * targetlists for them --- it tends to just put in a pointer to
+ * the child plan node's tlist. Hence, we leave the tlist alone.
+ * In particular, we do not want to process subplans in the tlist,
+ * since we will likely end up reprocessing subplans that also
+ * appear in lower levels of the plan tree!
*
- * Since these plan types don't check quals either, we should
- * not find any qual expression attached to them.
+ * Since these plan types don't check quals either, we should not
+ * find any qual expression attached to them.
*/
Assert(plan->qual == NIL);
break;
case T_Limit:
/*
- * Like the plan types above, Limit doesn't evaluate its tlist
- * or quals. It does have live expressions for limit/offset,
+ * Like the plan types above, Limit doesn't evaluate its tlist or
+ * quals. It does have live expressions for limit/offset,
* however.
*/
Assert(plan->qual == NIL);
@@ -251,8 +251,8 @@ set_plan_references(Plan *plan, List *rtable)
case T_Result:
/*
- * Result may or may not have a subplan; no need to fix up
- * subplan references if it hasn't got one...
+ * Result may or may not have a subplan; no need to fix up subplan
+ * references if it hasn't got one...
*
* XXX why does Result use a different subvarno from Agg/Group?
*/
@@ -300,9 +300,9 @@ set_plan_references(Plan *plan, List *rtable)
* NOTE: it is essential that we recurse into child plans AFTER we set
* subplan references in this plan's tlist and quals. If we did the
* reference-adjustments bottom-up, then we would fail to match this
- * plan's var nodes against the already-modified nodes of the
- * children. Fortunately, that consideration doesn't apply to SubPlan
- * nodes; else we'd need two passes over the expression trees.
+ * plan's var nodes against the already-modified nodes of the children.
+ * Fortunately, that consideration doesn't apply to SubPlan nodes; else
+ * we'd need two passes over the expression trees.
*/
plan->lefttree = set_plan_references(plan->lefttree, rtable);
plan->righttree = set_plan_references(plan->righttree, rtable);
@@ -339,8 +339,8 @@ set_subqueryscan_references(SubqueryScan *plan, List *rtable)
rte->subquery->rtable);
/*
- * We have to process any initplans too; set_plan_references can't do
- * it for us because of the possibility of double-processing.
+ * We have to process any initplans too; set_plan_references can't do it
+ * for us because of the possibility of double-processing.
*/
foreach(l, plan->scan.plan.initPlan)
{
@@ -353,12 +353,12 @@ set_subqueryscan_references(SubqueryScan *plan, List *rtable)
if (trivial_subqueryscan(plan))
{
/*
- * We can omit the SubqueryScan node and just pull up the subplan.
- * We have to merge its rtable into the outer rtable, which means
+ * We can omit the SubqueryScan node and just pull up the subplan. We
+ * have to merge its rtable into the outer rtable, which means
* adjusting varnos throughout the subtree.
*/
- int rtoffset = list_length(rtable);
- List *sub_rtable;
+ int rtoffset = list_length(rtable);
+ List *sub_rtable;
sub_rtable = copyObject(rte->subquery->rtable);
range_table_walker(sub_rtable,
@@ -382,11 +382,11 @@ set_subqueryscan_references(SubqueryScan *plan, List *rtable)
else
{
/*
- * Keep the SubqueryScan node. We have to do the processing that
- * set_plan_references would otherwise have done on it. Notice
- * we do not do set_uppernode_references() here, because a
- * SubqueryScan will always have been created with correct
- * references to its subplan's outputs to begin with.
+ * Keep the SubqueryScan node. We have to do the processing that
+ * set_plan_references would otherwise have done on it. Notice we do
+ * not do set_uppernode_references() here, because a SubqueryScan will
+ * always have been created with correct references to its subplan's
+ * outputs to begin with.
*/
result = (Plan *) plan;
@@ -532,9 +532,9 @@ adjust_plan_varnos(Plan *plan, int rtoffset)
case T_SetOp:
/*
- * Even though the targetlist won't be used by the executor,
- * we fix it up for possible use by EXPLAIN (not to mention
- * ease of debugging --- wrong varnos are very confusing).
+ * Even though the targetlist won't be used by the executor, we
+ * fix it up for possible use by EXPLAIN (not to mention ease of
+ * debugging --- wrong varnos are very confusing).
*/
adjust_expr_varnos((Node *) plan->targetlist, rtoffset);
Assert(plan->qual == NIL);
@@ -542,8 +542,8 @@ adjust_plan_varnos(Plan *plan, int rtoffset)
case T_Limit:
/*
- * Like the plan types above, Limit doesn't evaluate its tlist
- * or quals. It does have live expressions for limit/offset,
+ * Like the plan types above, Limit doesn't evaluate its tlist or
+ * quals. It does have live expressions for limit/offset,
* however.
*/
adjust_expr_varnos((Node *) plan->targetlist, rtoffset);
@@ -590,8 +590,8 @@ adjust_plan_varnos(Plan *plan, int rtoffset)
/*
* Now recurse into child plans.
*
- * We don't need to (and in fact mustn't) recurse into subqueries,
- * so no need to examine initPlan list.
+ * We don't need to (and in fact mustn't) recurse into subqueries, so no need
+ * to examine initPlan list.
*/
adjust_plan_varnos(plan->lefttree, rtoffset);
adjust_plan_varnos(plan->righttree, rtoffset);
@@ -603,7 +603,7 @@ adjust_plan_varnos(Plan *plan, int rtoffset)
*
* This is different from the rewriter's OffsetVarNodes in that it has to
* work on an already-planned expression tree; in particular, we should not
- * disturb INNER and OUTER references. On the other hand, we don't have to
+ * disturb INNER and OUTER references. On the other hand, we don't have to
* recurse into subqueries nor deal with outer-level Vars, so it's pretty
* simple.
*/
@@ -763,10 +763,10 @@ set_inner_join_references(Plan *inner_plan,
if (IsA(inner_plan, IndexScan))
{
/*
- * An index is being used to reduce the number of tuples
- * scanned in the inner relation. If there are join clauses
- * being used with the index, we must update their outer-rel
- * var nodes to refer to the outer side of the join.
+ * An index is being used to reduce the number of tuples scanned in
+ * the inner relation. If there are join clauses being used with the
+ * index, we must update their outer-rel var nodes to refer to the
+ * outer side of the join.
*/
IndexScan *innerscan = (IndexScan *) inner_plan;
List *indexqualorig = innerscan->indexqualorig;
@@ -789,9 +789,9 @@ set_inner_join_references(Plan *inner_plan,
innerrel);
/*
- * We must fix the inner qpqual too, if it has join
- * clauses (this could happen if special operators are
- * involved: some indexquals may get rechecked as qpquals).
+ * We must fix the inner qpqual too, if it has join clauses (this
+ * could happen if special operators are involved: some indexquals
+ * may get rechecked as qpquals).
*/
if (NumRelids((Node *) inner_plan->qual) > 1)
inner_plan->qual = join_references(inner_plan->qual,
@@ -832,11 +832,11 @@ set_inner_join_references(Plan *inner_plan,
else if (IsA(inner_plan, BitmapHeapScan))
{
/*
- * The inner side is a bitmap scan plan. Fix the top node,
- * and recurse to get the lower nodes.
+ * The inner side is a bitmap scan plan. Fix the top node, and
+ * recurse to get the lower nodes.
*
- * Note: create_bitmap_scan_plan removes clauses from bitmapqualorig
- * if they are duplicated in qpqual, so must test these independently.
+ * Note: create_bitmap_scan_plan removes clauses from bitmapqualorig if
+ * they are duplicated in qpqual, so must test these independently.
*/
BitmapHeapScan *innerscan = (BitmapHeapScan *) inner_plan;
Index innerrel = innerscan->scan.scanrelid;
@@ -851,9 +851,9 @@ set_inner_join_references(Plan *inner_plan,
innerrel);
/*
- * We must fix the inner qpqual too, if it has join
- * clauses (this could happen if special operators are
- * involved: some indexquals may get rechecked as qpquals).
+ * We must fix the inner qpqual too, if it has join clauses (this
+ * could happen if special operators are involved: some indexquals may
+ * get rechecked as qpquals).
*/
if (NumRelids((Node *) inner_plan->qual) > 1)
inner_plan->qual = join_references(inner_plan->qual,
@@ -870,8 +870,8 @@ set_inner_join_references(Plan *inner_plan,
else if (IsA(inner_plan, BitmapAnd))
{
/* All we need do here is recurse */
- BitmapAnd *innerscan = (BitmapAnd *) inner_plan;
- ListCell *l;
+ BitmapAnd *innerscan = (BitmapAnd *) inner_plan;
+ ListCell *l;
foreach(l, innerscan->bitmapplans)
{
@@ -883,8 +883,8 @@ set_inner_join_references(Plan *inner_plan,
else if (IsA(inner_plan, BitmapOr))
{
/* All we need do here is recurse */
- BitmapOr *innerscan = (BitmapOr *) inner_plan;
- ListCell *l;
+ BitmapOr *innerscan = (BitmapOr *) inner_plan;
+ ListCell *l;
foreach(l, innerscan->bitmapplans)
{
@@ -963,7 +963,7 @@ set_uppernode_references(Plan *plan, Index subvarno)
*
* In most cases, subplan tlists will be "flat" tlists with only Vars,
* so we try to optimize that case by extracting information about Vars
- * in advance. Matching a parent tlist to a child is still an O(N^2)
+ * in advance. Matching a parent tlist to a child is still an O(N^2)
* operation, but at least with a much smaller constant factor than plain
* tlist_member() searches.
*
@@ -994,7 +994,7 @@ build_tlist_index(List *tlist)
if (tle->expr && IsA(tle->expr, Var))
{
- Var *var = (Var *) tle->expr;
+ Var *var = (Var *) tle->expr;
vinfo->varno = var->varno;
vinfo->varattno = var->varattno;
@@ -1068,7 +1068,7 @@ search_indexed_tlist_for_non_var(Node *node,
exprType((Node *) tle->expr),
exprTypmod((Node *) tle->expr),
0);
- newvar->varnoold = 0; /* wasn't ever a plain Var */
+ newvar->varnoold = 0; /* wasn't ever a plain Var */
newvar->varoattno = 0;
return newvar;
}
@@ -1213,7 +1213,7 @@ replace_vars_with_subplan_refs(Node *node,
static Node *
replace_vars_with_subplan_refs_mutator(Node *node,
- replace_vars_with_subplan_refs_context *context)
+ replace_vars_with_subplan_refs_context *context)
{
Var *newvar;
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index ec037db514c..b0dc9c5bf7f 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.99 2005/06/05 22:32:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.100 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -110,19 +110,18 @@ replace_outer_var(Var *var)
abslevel = PlannerQueryLevel - var->varlevelsup;
/*
- * If there's already a PlannerParamList entry for this same Var, just
- * use it. NOTE: in sufficiently complex querytrees, it is possible
- * for the same varno/abslevel to refer to different RTEs in different
- * parts of the parsetree, so that different fields might end up
- * sharing the same Param number. As long as we check the vartype as
- * well, I believe that this sort of aliasing will cause no trouble.
- * The correct field should get stored into the Param slot at
- * execution in each part of the tree.
+ * If there's already a PlannerParamList entry for this same Var, just use
+ * it. NOTE: in sufficiently complex querytrees, it is possible for the
+ * same varno/abslevel to refer to different RTEs in different parts of
+ * the parsetree, so that different fields might end up sharing the same
+ * Param number. As long as we check the vartype as well, I believe that
+ * this sort of aliasing will cause no trouble. The correct field should
+ * get stored into the Param slot at execution in each part of the tree.
*
- * We also need to demand a match on vartypmod. This does not matter for
- * the Param itself, since those are not typmod-dependent, but it does
- * matter when make_subplan() instantiates a modified copy of the Var
- * for a subplan's args list.
+ * We also need to demand a match on vartypmod. This does not matter for the
+ * Param itself, since those are not typmod-dependent, but it does matter
+ * when make_subplan() instantiates a modified copy of the Var for a
+ * subplan's args list.
*/
i = 0;
foreach(ppl, PlannerParamList)
@@ -179,8 +178,8 @@ replace_outer_agg(Aggref *agg)
abslevel = PlannerQueryLevel - agg->agglevelsup;
/*
- * It does not seem worthwhile to try to match duplicate outer aggs.
- * Just make a new slot every time.
+ * It does not seem worthwhile to try to match duplicate outer aggs. Just
+ * make a new slot every time.
*/
agg = (Aggref *) copyObject(agg);
IncrementVarSublevelsUp((Node *) agg, -((int) agg->agglevelsup), 0);
@@ -253,33 +252,32 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
Node *result;
/*
- * Copy the source Query node. This is a quick and dirty kluge to
- * resolve the fact that the parser can generate trees with multiple
- * links to the same sub-Query node, but the planner wants to scribble
- * on the Query. Try to clean this up when we do querytree redesign...
+ * Copy the source Query node. This is a quick and dirty kluge to resolve
+ * the fact that the parser can generate trees with multiple links to the
+ * same sub-Query node, but the planner wants to scribble on the Query.
+ * Try to clean this up when we do querytree redesign...
*/
subquery = (Query *) copyObject(subquery);
/*
- * For an EXISTS subplan, tell lower-level planner to expect that only
- * the first tuple will be retrieved. For ALL and ANY subplans, we
- * will be able to stop evaluating if the test condition fails, so
- * very often not all the tuples will be retrieved; for lack of a
- * better idea, specify 50% retrieval. For EXPR and MULTIEXPR
- * subplans, use default behavior (we're only expecting one row out,
- * anyway).
+ * For an EXISTS subplan, tell lower-level planner to expect that only the
+ * first tuple will be retrieved. For ALL and ANY subplans, we will be
+ * able to stop evaluating if the test condition fails, so very often not
+ * all the tuples will be retrieved; for lack of a better idea, specify
+ * 50% retrieval. For EXPR and MULTIEXPR subplans, use default behavior
+ * (we're only expecting one row out, anyway).
*
- * NOTE: if you change these numbers, also change cost_qual_eval_walker()
- * in path/costsize.c.
+ * NOTE: if you change these numbers, also change cost_qual_eval_walker() in
+ * path/costsize.c.
*
* XXX If an ALL/ANY subplan is uncorrelated, we may decide to hash or
- * materialize its result below. In that case it would've been better
- * to specify full retrieval. At present, however, we can only detect
+ * materialize its result below. In that case it would've been better to
+ * specify full retrieval. At present, however, we can only detect
* correlation or lack of it after we've made the subplan :-(. Perhaps
- * detection of correlation should be done as a separate step.
- * Meanwhile, we don't want to be too optimistic about the percentage
- * of tuples retrieved, for fear of selecting a plan that's bad for
- * the materialization case.
+ * detection of correlation should be done as a separate step. Meanwhile,
+ * we don't want to be too optimistic about the percentage of tuples
+ * retrieved, for fear of selecting a plan that's bad for the
+ * materialization case.
*/
if (slink->subLinkType == EXISTS_SUBLINK)
tuple_fraction = 1.0; /* just like a LIMIT 1 */
@@ -294,8 +292,7 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
*/
node->plan = plan = subquery_planner(subquery, tuple_fraction, NULL);
- node->plan_id = PlannerPlanId++; /* Assign unique ID to this
- * SubPlan */
+ node->plan_id = PlannerPlanId++; /* Assign unique ID to this SubPlan */
node->rtable = subquery->rtable;
@@ -314,8 +311,8 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
node->args = NIL;
/*
- * Make parParam list of params that current query level will pass to
- * this child plan.
+ * Make parParam list of params that current query level will pass to this
+ * child plan.
*/
tmpset = bms_copy(plan->extParam);
while ((paramid = bms_first_member(tmpset)) >= 0)
@@ -328,13 +325,12 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
bms_free(tmpset);
/*
- * Un-correlated or undirect correlated plans of EXISTS, EXPR, ARRAY,
- * or MULTIEXPR types can be used as initPlans. For EXISTS, EXPR, or
- * ARRAY, we just produce a Param referring to the result of
- * evaluating the initPlan. For MULTIEXPR, we must build an AND or
- * OR-clause of the individual comparison operators, using the
- * appropriate lefthand side expressions and Params for the initPlan's
- * target items.
+ * Un-correlated or undirect correlated plans of EXISTS, EXPR, ARRAY, or
+ * MULTIEXPR types can be used as initPlans. For EXISTS, EXPR, or ARRAY,
+ * we just produce a Param referring to the result of evaluating the
+ * initPlan. For MULTIEXPR, we must build an AND or OR-clause of the
+ * individual comparison operators, using the appropriate lefthand side
+ * expressions and Params for the initPlan's target items.
*/
if (node->parParam == NIL && slink->subLinkType == EXISTS_SUBLINK)
{
@@ -387,9 +383,8 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
PlannerInitPlan = lappend(PlannerInitPlan, node);
/*
- * The executable expressions are returned to become part of the
- * outer plan's expression tree; they are not kept in the initplan
- * node.
+ * The executable expressions are returned to become part of the outer
+ * plan's expression tree; they are not kept in the initplan node.
*/
if (list_length(exprs) > 1)
result = (Node *) (node->useOr ? make_orclause(exprs) :
@@ -403,22 +398,22 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
ListCell *l;
/*
- * We can't convert subplans of ALL_SUBLINK or ANY_SUBLINK types
- * to initPlans, even when they are uncorrelated or undirect
- * correlated, because we need to scan the output of the subplan
- * for each outer tuple. But if it's an IN (= ANY) test, we might
- * be able to use a hashtable to avoid comparing all the tuples.
+ * We can't convert subplans of ALL_SUBLINK or ANY_SUBLINK types to
+ * initPlans, even when they are uncorrelated or undirect correlated,
+ * because we need to scan the output of the subplan for each outer
+ * tuple. But if it's an IN (= ANY) test, we might be able to use a
+ * hashtable to avoid comparing all the tuples.
*/
if (subplan_is_hashable(slink, node))
node->useHashTable = true;
/*
- * Otherwise, we have the option to tack a MATERIAL node onto the
- * top of the subplan, to reduce the cost of reading it
- * repeatedly. This is pointless for a direct-correlated subplan,
- * since we'd have to recompute its results each time anyway. For
- * uncorrelated/undirect correlated subplans, we add MATERIAL unless
- * the subplan's top plan node would materialize its output anyway.
+ * Otherwise, we have the option to tack a MATERIAL node onto the top
+ * of the subplan, to reduce the cost of reading it repeatedly. This
+ * is pointless for a direct-correlated subplan, since we'd have to
+ * recompute its results each time anyway. For uncorrelated/undirect
+ * correlated subplans, we add MATERIAL unless the subplan's top plan
+ * node would materialize its output anyway.
*/
else if (node->parParam == NIL)
{
@@ -455,9 +450,9 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
PlannerParamItem *pitem = list_nth(PlannerParamList, lfirst_int(l));
/*
- * The Var or Aggref has already been adjusted to have the
- * correct varlevelsup or agglevelsup. We probably don't even
- * need to copy it again, but be safe.
+ * The Var or Aggref has already been adjusted to have the correct
+ * varlevelsup or agglevelsup. We probably don't even need to
+ * copy it again, but be safe.
*/
args = lappend(args, copyObject(pitem->item));
}
@@ -545,8 +540,8 @@ convert_sublink_opers(List *lefthand, List *operOids,
*
* Note: we use make_op_expr in case runtime type conversion function
* calls must be inserted for this operator! (But we are not
- * expecting to have to resolve unknown Params, so it's okay to
- * pass a null pstate.)
+ * expecting to have to resolve unknown Params, so it's okay to pass a
+ * null pstate.)
*/
result = lappend(result,
make_op_expr(NULL,
@@ -580,8 +575,8 @@ subplan_is_hashable(SubLink *slink, SubPlan *node)
/*
* The sublink type must be "= ANY" --- that is, an IN operator. (We
* require the operator name to be unqualified, which may be overly
- * paranoid, or may not be.) XXX since we also check that the
- * operators are hashable, the test on operator name may be redundant?
+ * paranoid, or may not be.) XXX since we also check that the operators
+ * are hashable, the test on operator name may be redundant?
*/
if (slink->subLinkType != ANY_SUBLINK)
return false;
@@ -591,15 +586,15 @@ subplan_is_hashable(SubLink *slink, SubPlan *node)
/*
* The subplan must not have any direct correlation vars --- else we'd
- * have to recompute its output each time, so that the hashtable
- * wouldn't gain anything.
+ * have to recompute its output each time, so that the hashtable wouldn't
+ * gain anything.
*/
if (node->parParam != NIL)
return false;
/*
- * The estimated size of the subquery result must fit in work_mem.
- * (XXX what about hashtable overhead?)
+ * The estimated size of the subquery result must fit in work_mem. (XXX
+ * what about hashtable overhead?)
*/
subquery_size = node->plan->plan_rows *
(MAXALIGN(node->plan->plan_width) + MAXALIGN(sizeof(HeapTupleData)));
@@ -607,18 +602,17 @@ subplan_is_hashable(SubLink *slink, SubPlan *node)
return false;
/*
- * The combining operators must be hashable, strict, and
- * self-commutative. The need for hashability is obvious, since we
- * want to use hashing. Without strictness, behavior in the presence
- * of nulls is too unpredictable. (We actually must assume even more
- * than plain strictness, see nodeSubplan.c for details.) And
- * commutativity ensures that the left and right datatypes are the
- * same; this allows us to assume that the combining operators are
- * equality for the righthand datatype, so that they can be used to
- * compare righthand tuples as well as comparing lefthand to righthand
- * tuples. (This last restriction could be relaxed by using two
- * different sets of operators with the hash table, but there is no
- * obvious usefulness to that at present.)
+ * The combining operators must be hashable, strict, and self-commutative.
+ * The need for hashability is obvious, since we want to use hashing.
+ * Without strictness, behavior in the presence of nulls is too
+ * unpredictable. (We actually must assume even more than plain
+ * strictness, see nodeSubplan.c for details.) And commutativity ensures
+ * that the left and right datatypes are the same; this allows us to
+ * assume that the combining operators are equality for the righthand
+ * datatype, so that they can be used to compare righthand tuples as well
+ * as comparing lefthand to righthand tuples. (This last restriction
+ * could be relaxed by using two different sets of operators with the hash
+ * table, but there is no obvious usefulness to that at present.)
*/
foreach(l, slink->operOids)
{
@@ -679,24 +673,24 @@ convert_IN_to_join(PlannerInfo *root, SubLink *sublink)
return NULL;
/*
- * The sub-select must not refer to any Vars of the parent query.
- * (Vars of higher levels should be okay, though.)
+ * The sub-select must not refer to any Vars of the parent query. (Vars of
+ * higher levels should be okay, though.)
*/
if (contain_vars_of_level((Node *) subselect, 1))
return NULL;
/*
- * The left-hand expressions must contain some Vars of the current
- * query, else it's not gonna be a join.
+ * The left-hand expressions must contain some Vars of the current query,
+ * else it's not gonna be a join.
*/
left_varnos = pull_varnos((Node *) sublink->lefthand);
if (bms_is_empty(left_varnos))
return NULL;
/*
- * The left-hand expressions mustn't be volatile. (Perhaps we should
- * test the combining operators, too? We'd only need to point the
- * function directly at the sublink ...)
+ * The left-hand expressions mustn't be volatile. (Perhaps we should test
+ * the combining operators, too? We'd only need to point the function
+ * directly at the sublink ...)
*/
if (contain_volatile_functions((Node *) sublink->lefthand))
return NULL;
@@ -704,10 +698,10 @@ convert_IN_to_join(PlannerInfo *root, SubLink *sublink)
/*
* Okay, pull up the sub-select into top range table and jointree.
*
- * We rely here on the assumption that the outer query has no references
- * to the inner (necessarily true, other than the Vars that we build
- * below). Therefore this is a lot easier than what
- * pull_up_subqueries has to go through.
+ * We rely here on the assumption that the outer query has no references to
+ * the inner (necessarily true, other than the Vars that we build below).
+ * Therefore this is a lot easier than what pull_up_subqueries has to go
+ * through.
*/
rte = addRangeTableEntryForSubquery(NULL,
subselect,
@@ -729,8 +723,8 @@ convert_IN_to_join(PlannerInfo *root, SubLink *sublink)
/*
* Build the result qual expressions. As a side effect,
- * ininfo->sub_targetlist is filled with a list of Vars representing
- * the subselect outputs.
+ * ininfo->sub_targetlist is filled with a list of Vars representing the
+ * subselect outputs.
*/
exprs = convert_sublink_opers(sublink->lefthand,
sublink->operOids,
@@ -811,8 +805,7 @@ process_sublinks_mutator(Node *node, bool *isTopQual)
List *lefthand;
/*
- * First, recursively process the lefthand-side expressions, if
- * any.
+ * First, recursively process the lefthand-side expressions, if any.
*/
locTopQual = false;
lefthand = (List *)
@@ -825,22 +818,22 @@ process_sublinks_mutator(Node *node, bool *isTopQual)
}
/*
- * We should never see a SubPlan expression in the input (since this
- * is the very routine that creates 'em to begin with). We shouldn't
- * find ourselves invoked directly on a Query, either.
+ * We should never see a SubPlan expression in the input (since this is
+ * the very routine that creates 'em to begin with). We shouldn't find
+ * ourselves invoked directly on a Query, either.
*/
Assert(!is_subplan(node));
Assert(!IsA(node, Query));
/*
* Because make_subplan() could return an AND or OR clause, we have to
- * take steps to preserve AND/OR flatness of a qual. We assume the
- * input has been AND/OR flattened and so we need no recursion here.
+ * take steps to preserve AND/OR flatness of a qual. We assume the input
+ * has been AND/OR flattened and so we need no recursion here.
*
* If we recurse down through anything other than an AND node, we are
- * definitely not at top qual level anymore. (Due to the coding here,
- * we will not get called on the List subnodes of an AND, so no check
- * is needed for List.)
+ * definitely not at top qual level anymore. (Due to the coding here, we
+ * will not get called on the List subnodes of an AND, so no check is
+ * needed for List.)
*/
if (and_clause(node))
{
@@ -909,8 +902,8 @@ SS_finalize_plan(Plan *plan, List *rtable)
/*
* First, scan the param list to discover the sets of params that are
- * available from outer query levels and my own query level. We do
- * this once to save time in the per-plan recursion steps.
+ * available from outer query levels and my own query level. We do this
+ * once to save time in the per-plan recursion steps.
*/
paramid = 0;
foreach(l, PlannerParamList)
@@ -942,13 +935,12 @@ SS_finalize_plan(Plan *plan, List *rtable)
bms_free(valid_params);
/*
- * Finally, attach any initPlans to the topmost plan node,
- * and add their extParams to the topmost node's, too.
+ * Finally, attach any initPlans to the topmost plan node, and add their
+ * extParams to the topmost node's, too.
*
- * We also add the total_cost of each initPlan to the startup cost of
- * the top node. This is a conservative overestimate, since in
- * fact each initPlan might be executed later than plan startup,
- * or even not at all.
+ * We also add the total_cost of each initPlan to the startup cost of the top
+ * node. This is a conservative overestimate, since in fact each initPlan
+ * might be executed later than plan startup, or even not at all.
*/
plan->initPlan = PlannerInitPlan;
PlannerInitPlan = NIL; /* make sure they're not attached twice */
@@ -988,10 +980,10 @@ finalize_plan(Plan *plan, List *rtable,
context.outer_params = outer_params;
/*
- * When we call finalize_primnode, context.paramids sets are
- * automatically merged together. But when recursing to self, we have
- * to do it the hard way. We want the paramids set to include params
- * in subplans as well as at this level.
+ * When we call finalize_primnode, context.paramids sets are automatically
+ * merged together. But when recursing to self, we have to do it the hard
+ * way. We want the paramids set to include params in subplans as well as
+ * at this level.
*/
/* Find params in targetlist and qual */
@@ -1011,17 +1003,18 @@ finalize_plan(Plan *plan, List *rtable,
&context);
/*
- * we need not look at indexqualorig, since it will have the
- * same param references as indexqual.
+ * we need not look at indexqualorig, since it will have the same
+ * param references as indexqual.
*/
break;
case T_BitmapIndexScan:
finalize_primnode((Node *) ((BitmapIndexScan *) plan)->indexqual,
&context);
+
/*
- * we need not look at indexqualorig, since it will have the
- * same param references as indexqual.
+ * we need not look at indexqualorig, since it will have the same
+ * param references as indexqual.
*/
break;
@@ -1038,14 +1031,14 @@ finalize_plan(Plan *plan, List *rtable,
case T_SubqueryScan:
/*
- * In a SubqueryScan, SS_finalize_plan has already been run on
- * the subplan by the inner invocation of subquery_planner, so
- * there's no need to do it again. Instead, just pull out the
- * subplan's extParams list, which represents the params it
- * needs from my level and higher levels.
+ * In a SubqueryScan, SS_finalize_plan has already been run on the
+ * subplan by the inner invocation of subquery_planner, so there's
+ * no need to do it again. Instead, just pull out the subplan's
+ * extParams list, which represents the params it needs from my
+ * level and higher levels.
*/
context.paramids = bms_add_members(context.paramids,
- ((SubqueryScan *) plan)->subplan->extParam);
+ ((SubqueryScan *) plan)->subplan->extParam);
break;
case T_FunctionScan:
@@ -1170,8 +1163,8 @@ finalize_plan(Plan *plan, List *rtable,
plan->allParam = context.paramids;
/*
- * For speed at execution time, make sure extParam/allParam are
- * actually NULL if they are empty sets.
+ * For speed at execution time, make sure extParam/allParam are actually
+ * NULL if they are empty sets.
*/
if (bms_is_empty(plan->extParam))
{
@@ -1212,8 +1205,8 @@ finalize_primnode(Node *node, finalize_primnode_context *context)
/* Add outer-level params needed by the subplan to paramids */
context->paramids = bms_join(context->paramids,
- bms_intersect(subplan->plan->extParam,
- context->outer_params));
+ bms_intersect(subplan->plan->extParam,
+ context->outer_params));
/* fall through to recurse into subplan args */
}
return expression_tree_walker(node, finalize_primnode,
@@ -1241,7 +1234,7 @@ SS_make_initplan_from_plan(PlannerInfo *root, Plan *plan,
int paramid;
/*
- * Set up for a new level of subquery. This is just to keep
+ * Set up for a new level of subquery. This is just to keep
* SS_finalize_plan from becoming confused.
*/
PlannerQueryLevel++;
@@ -1262,16 +1255,15 @@ SS_make_initplan_from_plan(PlannerInfo *root, Plan *plan,
node = makeNode(SubPlan);
node->subLinkType = EXPR_SUBLINK;
node->plan = plan;
- node->plan_id = PlannerPlanId++; /* Assign unique ID to this
- * SubPlan */
+ node->plan_id = PlannerPlanId++; /* Assign unique ID to this SubPlan */
node->rtable = root->parse->rtable;
PlannerInitPlan = lappend(PlannerInitPlan, node);
/*
- * Make parParam list of params that current query level will pass to
- * this child plan. (In current usage there probably aren't any.)
+ * Make parParam list of params that current query level will pass to this
+ * child plan. (In current usage there probably aren't any.)
*/
tmpset = bms_copy(plan->extParam);
while ((paramid = bms_first_member(tmpset)) >= 0)
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index 9624a4ad135..ece6133c144 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.30 2005/08/01 20:31:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.31 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -143,8 +143,8 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
Query *subquery = rte->subquery;
/*
- * Is this a subquery RTE, and if so, is the subquery simple
- * enough to pull up? (If not, do nothing at this node.)
+ * Is this a subquery RTE, and if so, is the subquery simple enough to
+ * pull up? (If not, do nothing at this node.)
*
* If we are inside an outer join, only pull up subqueries whose
* targetlists are nullable --- otherwise substituting their tlist
@@ -153,8 +153,8 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
*
* XXX This could be improved by generating pseudo-variables for such
* expressions; we'd have to figure out how to get the pseudo-
- * variables evaluated at the right place in the modified plan
- * tree. Fix it someday.
+ * variables evaluated at the right place in the modified plan tree.
+ * Fix it someday.
*/
if (rte->rtekind == RTE_SUBQUERY &&
is_simple_subquery(subquery) &&
@@ -166,53 +166,53 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
ListCell *rt;
/*
- * Need a modifiable copy of the subquery to hack on. Even if
- * we didn't sometimes choose not to pull up below, we must do
- * this to avoid problems if the same subquery is referenced
- * from multiple jointree items (which can't happen normally,
- * but might after rule rewriting).
+ * Need a modifiable copy of the subquery to hack on. Even if we
+ * didn't sometimes choose not to pull up below, we must do this
+ * to avoid problems if the same subquery is referenced from
+ * multiple jointree items (which can't happen normally, but might
+ * after rule rewriting).
*/
subquery = copyObject(subquery);
/*
* Create a PlannerInfo data structure for this subquery.
*
- * NOTE: the next few steps should match the first processing
- * in subquery_planner(). Can we refactor to avoid code
- * duplication, or would that just make things uglier?
+ * NOTE: the next few steps should match the first processing in
+ * subquery_planner(). Can we refactor to avoid code duplication,
+ * or would that just make things uglier?
*/
subroot = makeNode(PlannerInfo);
subroot->parse = subquery;
/*
- * Pull up any IN clauses within the subquery's WHERE, so that
- * we don't leave unoptimized INs behind.
+ * Pull up any IN clauses within the subquery's WHERE, so that we
+ * don't leave unoptimized INs behind.
*/
subroot->in_info_list = NIL;
if (subquery->hasSubLinks)
subquery->jointree->quals = pull_up_IN_clauses(subroot,
- subquery->jointree->quals);
+ subquery->jointree->quals);
/*
* Recursively pull up the subquery's subqueries, so that this
* routine's processing is complete for its jointree and
* rangetable.
*
- * Note: 'false' is correct here even if we are within an outer
- * join in the upper query; the lower query starts with a
- * clean slate for outer-join semantics.
+ * Note: 'false' is correct here even if we are within an outer join
+ * in the upper query; the lower query starts with a clean slate
+ * for outer-join semantics.
*/
subquery->jointree = (FromExpr *)
pull_up_subqueries(subroot, (Node *) subquery->jointree,
false);
/*
- * Now we must recheck whether the subquery is still simple
- * enough to pull up. If not, abandon processing it.
+ * Now we must recheck whether the subquery is still simple enough
+ * to pull up. If not, abandon processing it.
*
- * We don't really need to recheck all the conditions involved,
- * but it's easier just to keep this "if" looking the same as
- * the one above.
+ * We don't really need to recheck all the conditions involved, but
+ * it's easier just to keep this "if" looking the same as the one
+ * above.
*/
if (is_simple_subquery(subquery) &&
(!below_outer_join || has_nullable_targetlist(subquery)))
@@ -224,10 +224,10 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
/*
* Give up, return unmodified RangeTblRef.
*
- * Note: The work we just did will be redone when the
- * subquery gets planned on its own. Perhaps we could
- * avoid that by storing the modified subquery back into
- * the rangetable, but I'm not gonna risk it now.
+ * Note: The work we just did will be redone when the subquery
+ * gets planned on its own. Perhaps we could avoid that by
+ * storing the modified subquery back into the rangetable, but
+ * I'm not gonna risk it now.
*/
return jtnode;
}
@@ -242,8 +242,8 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
OffsetVarNodes((Node *) subroot->in_info_list, rtoffset, 0);
/*
- * Upper-level vars in subquery are now one level closer to
- * their parent than before.
+ * Upper-level vars in subquery are now one level closer to their
+ * parent than before.
*/
IncrementVarSublevelsUp((Node *) subquery, -1, 1);
IncrementVarSublevelsUp((Node *) subroot->in_info_list, -1, 1);
@@ -251,9 +251,8 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
/*
* Replace all of the top query's references to the subquery's
* outputs with copies of the adjusted subtlist items, being
- * careful not to replace any of the jointree structure.
- * (This'd be a lot cleaner if we could use
- * query_tree_mutator.)
+ * careful not to replace any of the jointree structure. (This'd
+ * be a lot cleaner if we could use query_tree_mutator.)
*/
subtlist = subquery->targetList;
parse->targetList = (List *)
@@ -284,9 +283,9 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
}
/*
- * Now append the adjusted rtable entries to upper query. (We
- * hold off until after fixing the upper rtable entries; no
- * point in running that code on the subquery ones too.)
+ * Now append the adjusted rtable entries to upper query. (We hold
+ * off until after fixing the upper rtable entries; no point in
+ * running that code on the subquery ones too.)
*/
parse->rtable = list_concat(parse->rtable, subquery->rtable);
@@ -295,8 +294,8 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
* already adjusted the marker values, so just list_concat the
* list.)
*
- * Executor can't handle multiple FOR UPDATE/SHARE/NOWAIT flags,
- * so complain if they are valid but different
+ * Executor can't handle multiple FOR UPDATE/SHARE/NOWAIT flags, so
+ * complain if they are valid but different
*/
if (parse->rowMarks && subquery->rowMarks)
{
@@ -307,7 +306,7 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
if (parse->rowNoWait != subquery->rowNoWait)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use both wait and NOWAIT in one query")));
+ errmsg("cannot use both wait and NOWAIT in one query")));
}
parse->rowMarks = list_concat(parse->rowMarks, subquery->rowMarks);
if (subquery->rowMarks)
@@ -317,10 +316,9 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
}
/*
- * We also have to fix the relid sets of any parent
- * InClauseInfo nodes. (This could perhaps be done by
- * ResolveNew, but it would clutter that routine's API
- * unreasonably.)
+ * We also have to fix the relid sets of any parent InClauseInfo
+ * nodes. (This could perhaps be done by ResolveNew, but it would
+ * clutter that routine's API unreasonably.)
*/
if (root->in_info_list)
{
@@ -392,8 +390,8 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
case JOIN_UNION:
/*
- * This is where we fail if upper levels of planner
- * haven't rewritten UNION JOIN as an Append ...
+ * This is where we fail if upper levels of planner haven't
+ * rewritten UNION JOIN as an Append ...
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -436,8 +434,8 @@ is_simple_subquery(Query *subquery)
return false;
/*
- * Can't pull up a subquery involving grouping, aggregation, sorting,
- * or limiting.
+ * Can't pull up a subquery involving grouping, aggregation, sorting, or
+ * limiting.
*/
if (subquery->hasAggs ||
subquery->groupClause ||
@@ -449,21 +447,20 @@ is_simple_subquery(Query *subquery)
return false;
/*
- * Don't pull up a subquery that has any set-returning functions in
- * its targetlist. Otherwise we might well wind up inserting
- * set-returning functions into places where they mustn't go, such as
- * quals of higher queries.
+ * Don't pull up a subquery that has any set-returning functions in its
+ * targetlist. Otherwise we might well wind up inserting set-returning
+ * functions into places where they mustn't go, such as quals of higher
+ * queries.
*/
if (expression_returns_set((Node *) subquery->targetList))
return false;
/*
* Hack: don't try to pull up a subquery with an empty jointree.
- * query_planner() will correctly generate a Result plan for a
- * jointree that's totally empty, but I don't think the right things
- * happen if an empty FromExpr appears lower down in a jointree. Not
- * worth working hard on this, just to collapse SubqueryScan/Result
- * into Result...
+ * query_planner() will correctly generate a Result plan for a jointree
+ * that's totally empty, but I don't think the right things happen if an
+ * empty FromExpr appears lower down in a jointree. Not worth working hard
+ * on this, just to collapse SubqueryScan/Result into Result...
*/
if (subquery->jointree->fromlist == NIL)
return false;
@@ -545,8 +542,8 @@ resolvenew_in_jointree(Node *jtnode, int varno,
subtlist, CMD_SELECT, 0);
/*
- * We don't bother to update the colvars list, since it won't be
- * used again ...
+ * We don't bother to update the colvars list, since it won't be used
+ * again ...
*/
}
else
@@ -583,14 +580,13 @@ reduce_outer_joins(PlannerInfo *root)
reduce_outer_joins_state *state;
/*
- * To avoid doing strictness checks on more quals than necessary, we
- * want to stop descending the jointree as soon as there are no outer
- * joins below our current point. This consideration forces a
- * two-pass process. The first pass gathers information about which
- * base rels appear below each side of each join clause, and about
- * whether there are outer join(s) below each side of each join
- * clause. The second pass examines qual clauses and changes join
- * types as it descends the tree.
+ * To avoid doing strictness checks on more quals than necessary, we want
+ * to stop descending the jointree as soon as there are no outer joins
+ * below our current point. This consideration forces a two-pass process.
+ * The first pass gathers information about which base rels appear below
+ * each side of each join clause, and about whether there are outer
+ * join(s) below each side of each join clause. The second pass examines
+ * qual clauses and changes join types as it descends the tree.
*/
state = reduce_outer_joins_pass1((Node *) root->parse->jointree);
@@ -768,12 +764,11 @@ reduce_outer_joins_pass2(Node *jtnode,
/*
* If this join is (now) inner, we can add any nonnullability
- * constraints its quals provide to those we got from above.
- * But if it is outer, we can only pass down the local
- * constraints into the nullable side, because an outer join
- * never eliminates any rows from its non-nullable side. If
- * it's a FULL join then it doesn't eliminate anything from
- * either side.
+ * constraints its quals provide to those we got from above. But
+ * if it is outer, we can only pass down the local constraints
+ * into the nullable side, because an outer join never eliminates
+ * any rows from its non-nullable side. If it's a FULL join then
+ * it doesn't eliminate anything from either side.
*/
if (jointype != JOIN_FULL)
{
@@ -782,8 +777,7 @@ reduce_outer_joins_pass2(Node *jtnode,
nonnullable_rels);
}
else
- local_nonnullable = NULL; /* no use in calculating
- * it */
+ local_nonnullable = NULL; /* no use in calculating it */
if (left_state->contains_outer)
{
@@ -886,8 +880,8 @@ find_nonnullable_rels(Node *node, bool top_level)
NullTest *expr = (NullTest *) node;
/*
- * IS NOT NULL can be considered strict, but only at top level;
- * else we might have something like NOT (x IS NOT NULL).
+ * IS NOT NULL can be considered strict, but only at top level; else
+ * we might have something like NOT (x IS NOT NULL).
*/
if (top_level && expr->nulltesttype == IS_NOT_NULL)
result = find_nonnullable_rels((Node *) expr->arg, false);
@@ -960,10 +954,10 @@ simplify_jointree(PlannerInfo *root, Node *jtnode)
if (child && IsA(child, FromExpr))
{
/*
- * Yes, so do we want to merge it into parent? Always do
- * so if child has just one element (since that doesn't
- * make the parent's list any longer). Otherwise merge if
- * the resulting join list would be no longer than
+ * Yes, so do we want to merge it into parent? Always do so
+ * if child has just one element (since that doesn't make the
+ * parent's list any longer). Otherwise merge if the
+ * resulting join list would be no longer than
* from_collapse_limit.
*/
FromExpr *subf = (FromExpr *) child;
@@ -976,9 +970,9 @@ simplify_jointree(PlannerInfo *root, Node *jtnode)
newlist = list_concat(newlist, subf->fromlist);
/*
- * By now, the quals have been converted to
- * implicit-AND lists, so we just need to join the
- * lists. NOTE: we put the pulled-up quals first.
+ * By now, the quals have been converted to implicit-AND
+ * lists, so we just need to join the lists. NOTE: we put
+ * the pulled-up quals first.
*/
f->quals = (Node *) list_concat((List *) subf->quals,
(List *) f->quals);
@@ -1000,8 +994,8 @@ simplify_jointree(PlannerInfo *root, Node *jtnode)
j->rarg = simplify_jointree(root, j->rarg);
/*
- * If it is an outer join, we must not flatten it. An inner join
- * is semantically equivalent to a FromExpr; we convert it to one,
+ * If it is an outer join, we must not flatten it. An inner join is
+ * semantically equivalent to a FromExpr; we convert it to one,
* allowing it to be flattened into its parent, if the resulting
* FromExpr would have no more than join_collapse_limit members.
*/
diff --git a/src/backend/optimizer/prep/prepqual.c b/src/backend/optimizer/prep/prepqual.c
index 2c39859a811..9fad52acfe0 100644
--- a/src/backend/optimizer/prep/prepqual.c
+++ b/src/backend/optimizer/prep/prepqual.c
@@ -25,7 +25,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepqual.c,v 1.50 2005/07/29 21:40:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepqual.c,v 1.51 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -73,10 +73,10 @@ canonicalize_qual(Expr *qual)
return NULL;
/*
- * Push down NOTs. We do this only in the top-level boolean
- * expression, without examining arguments of operators/functions. The
- * main reason for doing this is to expose as much top-level AND/OR
- * structure as we can, so there's no point in descending further.
+ * Push down NOTs. We do this only in the top-level boolean expression,
+ * without examining arguments of operators/functions. The main reason for
+ * doing this is to expose as much top-level AND/OR structure as we can,
+ * so there's no point in descending further.
*/
newqual = find_nots(qual);
@@ -110,12 +110,12 @@ pull_ands(List *andlist)
/*
* Note: we can destructively concat the subexpression's arglist
* because we know the recursive invocation of pull_ands will have
- * built a new arglist not shared with any other expr. Otherwise
- * we'd need a list_copy here.
+ * built a new arglist not shared with any other expr. Otherwise we'd
+ * need a list_copy here.
*/
if (and_clause(subexpr))
out_list = list_concat(out_list,
- pull_ands(((BoolExpr *) subexpr)->args));
+ pull_ands(((BoolExpr *) subexpr)->args));
else
out_list = lappend(out_list, subexpr);
}
@@ -142,12 +142,12 @@ pull_ors(List *orlist)
/*
* Note: we can destructively concat the subexpression's arglist
* because we know the recursive invocation of pull_ors will have
- * built a new arglist not shared with any other expr. Otherwise
- * we'd need a list_copy here.
+ * built a new arglist not shared with any other expr. Otherwise we'd
+ * need a list_copy here.
*/
if (or_clause(subexpr))
out_list = list_concat(out_list,
- pull_ors(((BoolExpr *) subexpr)->args));
+ pull_ors(((BoolExpr *) subexpr)->args));
else
out_list = lappend(out_list, subexpr);
}
@@ -249,8 +249,8 @@ push_nots(Expr *qual)
{
/*
* Another NOT cancels this NOT, so eliminate the NOT and stop
- * negating this branch. But search the subexpression for more
- * NOTs to simplify.
+ * negating this branch. But search the subexpression for more NOTs
+ * to simplify.
*/
return find_nots(get_notclausearg(qual));
}
@@ -307,8 +307,8 @@ find_duplicate_ors(Expr *qual)
orlist = lappend(orlist, find_duplicate_ors(lfirst(temp)));
/*
- * Don't need pull_ors() since this routine will never introduce
- * an OR where there wasn't one before.
+ * Don't need pull_ors() since this routine will never introduce an OR
+ * where there wasn't one before.
*/
return process_duplicate_ors(orlist);
}
@@ -353,10 +353,10 @@ process_duplicate_ors(List *orlist)
return linitial(orlist);
/*
- * Choose the shortest AND clause as the reference list --- obviously,
- * any subclause not in this clause isn't in all the clauses. If we
- * find a clause that's not an AND, we can treat it as a one-element
- * AND clause, which necessarily wins as shortest.
+ * Choose the shortest AND clause as the reference list --- obviously, any
+ * subclause not in this clause isn't in all the clauses. If we find a
+ * clause that's not an AND, we can treat it as a one-element AND clause,
+ * which necessarily wins as shortest.
*/
foreach(temp, orlist)
{
@@ -386,8 +386,8 @@ process_duplicate_ors(List *orlist)
reference = list_union(NIL, reference);
/*
- * Check each element of the reference list to see if it's in all the
- * OR clauses. Build a new list of winning clauses.
+ * Check each element of the reference list to see if it's in all the OR
+ * clauses. Build a new list of winning clauses.
*/
winners = NIL;
foreach(temp, reference)
@@ -431,13 +431,12 @@ process_duplicate_ors(List *orlist)
/*
* Generate new OR list consisting of the remaining sub-clauses.
*
- * If any clause degenerates to empty, then we have a situation like (A
- * AND B) OR (A), which can be reduced to just A --- that is, the
- * additional conditions in other arms of the OR are irrelevant.
+ * If any clause degenerates to empty, then we have a situation like (A AND
+ * B) OR (A), which can be reduced to just A --- that is, the additional
+ * conditions in other arms of the OR are irrelevant.
*
- * Note that because we use list_difference, any multiple occurrences of
- * a winning clause in an AND sub-clause will be removed
- * automatically.
+ * Note that because we use list_difference, any multiple occurrences of a
+ * winning clause in an AND sub-clause will be removed automatically.
*/
neworlist = NIL;
foreach(temp, orlist)
@@ -475,10 +474,10 @@ process_duplicate_ors(List *orlist)
}
/*
- * Append reduced OR to the winners list, if it's not degenerate,
- * handling the special case of one element correctly (can that really
- * happen?). Also be careful to maintain AND/OR flatness in case we
- * pulled up a sub-sub-OR-clause.
+ * Append reduced OR to the winners list, if it's not degenerate, handling
+ * the special case of one element correctly (can that really happen?).
+ * Also be careful to maintain AND/OR flatness in case we pulled up a
+ * sub-sub-OR-clause.
*/
if (neworlist != NIL)
{
diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c
index fa56c5fc29c..f23d0554e7c 100644
--- a/src/backend/optimizer/prep/preptlist.c
+++ b/src/backend/optimizer/prep/preptlist.c
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/preptlist.c,v 1.77 2005/06/05 22:32:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/preptlist.c,v 1.78 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,10 +45,10 @@ static List *expand_targetlist(List *tlist, int command_type,
List *
preprocess_targetlist(PlannerInfo *root, List *tlist)
{
- Query *parse = root->parse;
- int result_relation = parse->resultRelation;
- List *range_table = parse->rtable;
- CmdType command_type = parse->commandType;
+ Query *parse = root->parse;
+ int result_relation = parse->resultRelation;
+ List *range_table = parse->rtable;
+ CmdType command_type = parse->commandType;
/*
* Sanity check: if there is a result relation, it'd better be a real
@@ -63,20 +63,20 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
}
/*
- * for heap_formtuple to work, the targetlist must match the exact
- * order of the attributes. We also need to fill in any missing
- * attributes. -ay 10/94
+ * for heap_formtuple to work, the targetlist must match the exact order
+ * of the attributes. We also need to fill in any missing attributes.
+ * -ay 10/94
*/
if (command_type == CMD_INSERT || command_type == CMD_UPDATE)
tlist = expand_targetlist(tlist, command_type,
result_relation, range_table);
/*
- * for "update" and "delete" queries, add ctid of the result relation
- * into the target list so that the ctid will propagate through
- * execution and ExecutePlan() will be able to identify the right
- * tuple to replace or delete. This extra field is marked "junk" so
- * that it is not stored back into the tuple.
+ * for "update" and "delete" queries, add ctid of the result relation into
+ * the target list so that the ctid will propagate through execution and
+ * ExecutePlan() will be able to identify the right tuple to replace or
+ * delete. This extra field is marked "junk" so that it is not stored
+ * back into the tuple.
*/
if (command_type == CMD_UPDATE || command_type == CMD_DELETE)
{
@@ -92,9 +92,9 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
true);
/*
- * For an UPDATE, expand_targetlist already created a fresh tlist.
- * For DELETE, better do a listCopy so that we don't destructively
- * modify the original tlist (is this really necessary?).
+ * For an UPDATE, expand_targetlist already created a fresh tlist. For
+ * DELETE, better do a listCopy so that we don't destructively modify
+ * the original tlist (is this really necessary?).
*/
if (command_type == CMD_DELETE)
tlist = list_copy(tlist);
@@ -103,31 +103,28 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
}
/*
- * Add TID targets for rels selected FOR UPDATE/SHARE. The executor
- * uses the TID to know which rows to lock, much as for UPDATE or
- * DELETE.
+ * Add TID targets for rels selected FOR UPDATE/SHARE. The executor uses
+ * the TID to know which rows to lock, much as for UPDATE or DELETE.
*/
if (parse->rowMarks)
{
ListCell *l;
/*
- * We've got trouble if the FOR UPDATE/SHARE appears inside
- * grouping, since grouping renders a reference to individual
- * tuple CTIDs invalid. This is also checked at parse time,
- * but that's insufficient because of rule substitution, query
- * pullup, etc.
+ * We've got trouble if the FOR UPDATE/SHARE appears inside grouping,
+ * since grouping renders a reference to individual tuple CTIDs
+ * invalid. This is also checked at parse time, but that's
+ * insufficient because of rule substitution, query pullup, etc.
*/
CheckSelectLocking(parse, parse->forUpdate);
/*
- * Currently the executor only supports FOR UPDATE/SHARE at top
- * level
+ * Currently the executor only supports FOR UPDATE/SHARE at top level
*/
if (PlannerQueryLevel > 1)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE/SHARE is not allowed in subqueries")));
+ errmsg("SELECT FOR UPDATE/SHARE is not allowed in subqueries")));
foreach(l, parse->rowMarks)
{
@@ -185,14 +182,13 @@ expand_targetlist(List *tlist, int command_type,
tlist_item = list_head(tlist);
/*
- * The rewriter should have already ensured that the TLEs are in
- * correct order; but we have to insert TLEs for any missing
- * attributes.
+ * The rewriter should have already ensured that the TLEs are in correct
+ * order; but we have to insert TLEs for any missing attributes.
*
- * Scan the tuple description in the relation's relcache entry to make
- * sure we have all the user attributes in the right order. We assume
- * that the rewriter already acquired at least AccessShareLock on the
- * relation, so we need no lock here.
+ * Scan the tuple description in the relation's relcache entry to make sure
+ * we have all the user attributes in the right order. We assume that the
+ * rewriter already acquired at least AccessShareLock on the relation, so
+ * we need no lock here.
*/
rel = heap_open(getrelid(result_relation, range_table), NoLock);
@@ -220,23 +216,22 @@ expand_targetlist(List *tlist, int command_type,
* Didn't find a matching tlist entry, so make one.
*
* For INSERT, generate a NULL constant. (We assume the rewriter
- * would have inserted any available default value.) Also, if
- * the column isn't dropped, apply any domain constraints that
- * might exist --- this is to catch domain NOT NULL.
+ * would have inserted any available default value.) Also, if the
+ * column isn't dropped, apply any domain constraints that might
+ * exist --- this is to catch domain NOT NULL.
*
- * For UPDATE, generate a Var reference to the existing value of
- * the attribute, so that it gets copied to the new tuple. But
- * generate a NULL for dropped columns (we want to drop any
- * old values).
+ * For UPDATE, generate a Var reference to the existing value of the
+ * attribute, so that it gets copied to the new tuple. But
+ * generate a NULL for dropped columns (we want to drop any old
+ * values).
*
- * When generating a NULL constant for a dropped column, we label
- * it INT4 (any other guaranteed-to-exist datatype would do as
- * well). We can't label it with the dropped column's
- * datatype since that might not exist anymore. It does not
- * really matter what we claim the type is, since NULL is NULL
- * --- its representation is datatype-independent. This could
- * perhaps confuse code comparing the finished plan to the
- * target relation, however.
+ * When generating a NULL constant for a dropped column, we label it
+ * INT4 (any other guaranteed-to-exist datatype would do as well).
+ * We can't label it with the dropped column's datatype since that
+ * might not exist anymore. It does not really matter what we
+ * claim the type is, since NULL is NULL --- its representation is
+ * datatype-independent. This could perhaps confuse code
+ * comparing the finished plan to the target relation, however.
*/
Oid atttype = att_tup->atttypid;
int32 atttypmod = att_tup->atttypmod;
@@ -305,12 +300,12 @@ expand_targetlist(List *tlist, int command_type,
}
/*
- * The remaining tlist entries should be resjunk; append them all to
- * the end of the new tlist, making sure they have resnos higher than
- * the last real attribute. (Note: although the rewriter already did
- * such renumbering, we have to do it again here in case we are doing
- * an UPDATE in a table with dropped columns, or an inheritance child
- * table with extra columns.)
+ * The remaining tlist entries should be resjunk; append them all to the
+ * end of the new tlist, making sure they have resnos higher than the last
+ * real attribute. (Note: although the rewriter already did such
+ * renumbering, we have to do it again here in case we are doing an UPDATE
+ * in a table with dropped columns, or an inheritance child table with
+ * extra columns.)
*/
while (tlist_item)
{
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index c8e9309354a..dc7d94e1c6c 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.126 2005/08/02 20:27:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.127 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,19 +51,19 @@ typedef struct
} adjust_inherited_attrs_context;
static Plan *recurse_set_operations(Node *setOp, PlannerInfo *root,
- double tuple_fraction,
- List *colTypes, bool junkOK,
- int flag, List *refnames_tlist,
- List **sortClauses);
+ double tuple_fraction,
+ List *colTypes, bool junkOK,
+ int flag, List *refnames_tlist,
+ List **sortClauses);
static Plan *generate_union_plan(SetOperationStmt *op, PlannerInfo *root,
- double tuple_fraction,
- List *refnames_tlist, List **sortClauses);
+ double tuple_fraction,
+ List *refnames_tlist, List **sortClauses);
static Plan *generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root,
List *refnames_tlist, List **sortClauses);
static List *recurse_union_children(Node *setOp, PlannerInfo *root,
- double tuple_fraction,
- SetOperationStmt *top_union,
- List *refnames_tlist);
+ double tuple_fraction,
+ SetOperationStmt *top_union,
+ List *refnames_tlist);
static List *generate_setop_tlist(List *colTypes, int flag,
Index varno,
bool hack_constants,
@@ -117,8 +117,8 @@ plan_set_operations(PlannerInfo *root, double tuple_fraction,
Assert(parse->distinctClause == NIL);
/*
- * Find the leftmost component Query. We need to use its column names
- * for all generated tlists (else SELECT INTO won't work right).
+ * Find the leftmost component Query. We need to use its column names for
+ * all generated tlists (else SELECT INTO won't work right).
*/
node = topop->larg;
while (node && IsA(node, SetOperationStmt))
@@ -129,10 +129,10 @@ plan_set_operations(PlannerInfo *root, double tuple_fraction,
Assert(leftmostQuery != NULL);
/*
- * Recurse on setOperations tree to generate plans for set ops. The
- * final output plan should have just the column types shown as the
- * output from the top-level node, plus possibly resjunk working
- * columns (we can rely on upper-level nodes to deal with that).
+ * Recurse on setOperations tree to generate plans for set ops. The final
+ * output plan should have just the column types shown as the output from
+ * the top-level node, plus possibly resjunk working columns (we can rely
+ * on upper-level nodes to deal with that).
*/
return recurse_set_operations((Node *) topop, root, tuple_fraction,
topop->colTypes, true, -1,
@@ -187,8 +187,8 @@ recurse_set_operations(Node *setOp, PlannerInfo *root,
subplan);
/*
- * We don't bother to determine the subquery's output ordering
- * since it won't be reflected in the set-op result anyhow.
+ * We don't bother to determine the subquery's output ordering since
+ * it won't be reflected in the set-op result anyhow.
*/
*sortClauses = NIL;
@@ -214,13 +214,13 @@ recurse_set_operations(Node *setOp, PlannerInfo *root,
* output columns.
*
* XXX you don't really want to know about this: setrefs.c will apply
- * replace_vars_with_subplan_refs() to the Result node's tlist.
- * This would fail if the Vars generated by generate_setop_tlist()
- * were not exactly equal() to the corresponding tlist entries of
- * the subplan. However, since the subplan was generated by
- * generate_union_plan() or generate_nonunion_plan(), and hence
- * its tlist was generated by generate_append_tlist(), this will
- * work. We just tell generate_setop_tlist() to use varno 0.
+ * replace_vars_with_subplan_refs() to the Result node's tlist. This
+ * would fail if the Vars generated by generate_setop_tlist() were not
+ * exactly equal() to the corresponding tlist entries of the subplan.
+ * However, since the subplan was generated by generate_union_plan()
+ * or generate_nonunion_plan(), and hence its tlist was generated by
+ * generate_append_tlist(), this will work. We just tell
+ * generate_setop_tlist() to use varno 0.
*/
if (flag >= 0 ||
!tlist_same_datatypes(plan->targetlist, colTypes, junkOK))
@@ -260,22 +260,22 @@ generate_union_plan(SetOperationStmt *op, PlannerInfo *root,
/*
* If plain UNION, tell children to fetch all tuples.
*
- * Note: in UNION ALL, we pass the top-level tuple_fraction unmodified
- * to each arm of the UNION ALL. One could make a case for reducing
- * the tuple fraction for later arms (discounting by the expected size
- * of the earlier arms' results) but it seems not worth the trouble.
- * The normal case where tuple_fraction isn't already zero is a LIMIT
- * at top level, and passing it down as-is is usually enough to get the
- * desired result of preferring fast-start plans.
+ * Note: in UNION ALL, we pass the top-level tuple_fraction unmodified to
+ * each arm of the UNION ALL. One could make a case for reducing the
+ * tuple fraction for later arms (discounting by the expected size of the
+ * earlier arms' results) but it seems not worth the trouble. The normal
+ * case where tuple_fraction isn't already zero is a LIMIT at top level,
+ * and passing it down as-is is usually enough to get the desired result
+ * of preferring fast-start plans.
*/
if (!op->all)
tuple_fraction = 0.0;
/*
- * If any of my children are identical UNION nodes (same op, all-flag,
- * and colTypes) then they can be merged into this node so that we
- * generate only one Append and Sort for the lot. Recurse to find
- * such nodes and compute their children's plans.
+ * If any of my children are identical UNION nodes (same op, all-flag, and
+ * colTypes) then they can be merged into this node so that we generate
+ * only one Append and Sort for the lot. Recurse to find such nodes and
+ * compute their children's plans.
*/
planlist = list_concat(recurse_union_children(op->larg, root,
tuple_fraction,
@@ -288,8 +288,8 @@ generate_union_plan(SetOperationStmt *op, PlannerInfo *root,
* Generate tlist for Append plan node.
*
* The tlist for an Append plan isn't important as far as the Append is
- * concerned, but we must make it look real anyway for the benefit of
- * the next plan level up.
+ * concerned, but we must make it look real anyway for the benefit of the
+ * next plan level up.
*/
tlist = generate_append_tlist(op->colTypes, false,
planlist, refnames_tlist);
@@ -300,8 +300,8 @@ generate_union_plan(SetOperationStmt *op, PlannerInfo *root,
plan = (Plan *) make_append(planlist, false, tlist);
/*
- * For UNION ALL, we just need the Append plan. For UNION, need to
- * add Sort and Unique nodes to produce unique output.
+ * For UNION ALL, we just need the Append plan. For UNION, need to add
+ * Sort and Unique nodes to produce unique output.
*/
if (!op->all)
{
@@ -340,12 +340,12 @@ generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root,
/* Recurse on children, ensuring their outputs are marked */
lplan = recurse_set_operations(op->larg, root,
- 0.0 /* all tuples needed */,
+ 0.0 /* all tuples needed */ ,
op->colTypes, false, 0,
refnames_tlist,
&child_sortclauses);
rplan = recurse_set_operations(op->rarg, root,
- 0.0 /* all tuples needed */,
+ 0.0 /* all tuples needed */ ,
op->colTypes, false, 1,
refnames_tlist,
&child_sortclauses);
@@ -355,10 +355,10 @@ generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root,
* Generate tlist for Append plan node.
*
* The tlist for an Append plan isn't important as far as the Append is
- * concerned, but we must make it look real anyway for the benefit of
- * the next plan level up. In fact, it has to be real enough that the
- * flag column is shown as a variable not a constant, else setrefs.c
- * will get confused.
+ * concerned, but we must make it look real anyway for the benefit of the
+ * next plan level up. In fact, it has to be real enough that the flag
+ * column is shown as a variable not a constant, else setrefs.c will get
+ * confused.
*/
tlist = generate_append_tlist(op->colTypes, true,
planlist, refnames_tlist);
@@ -439,12 +439,11 @@ recurse_union_children(Node *setOp, PlannerInfo *root,
/*
* Not same, so plan this child separately.
*
- * Note we disallow any resjunk columns in child results. This is
- * necessary since the Append node that implements the union won't do
- * any projection, and upper levels will get confused if some of our
- * output tuples have junk and some don't. This case only arises when
- * we have an EXCEPT or INTERSECT as child, else there won't be
- * resjunk anyway.
+ * Note we disallow any resjunk columns in child results. This is necessary
+ * since the Append node that implements the union won't do any
+ * projection, and upper levels will get confused if some of our output
+ * tuples have junk and some don't. This case only arises when we have an
+ * EXCEPT or INTERSECT as child, else there won't be resjunk anyway.
*/
return list_make1(recurse_set_operations(setOp, root,
tuple_fraction,
@@ -492,17 +491,17 @@ generate_setop_tlist(List *colTypes, int flag,
Assert(!reftle->resjunk);
/*
- * Generate columns referencing input columns and having
- * appropriate data types and column names. Insert datatype
- * coercions where necessary.
+ * Generate columns referencing input columns and having appropriate
+ * data types and column names. Insert datatype coercions where
+ * necessary.
*
- * HACK: constants in the input's targetlist are copied up as-is
- * rather than being referenced as subquery outputs. This is
- * mainly to ensure that when we try to coerce them to the output
- * column's datatype, the right things happen for UNKNOWN
- * constants. But do this only at the first level of
- * subquery-scan plans; we don't want phony constants appearing in
- * the output tlists of upper-level nodes!
+ * HACK: constants in the input's targetlist are copied up as-is rather
+ * than being referenced as subquery outputs. This is mainly to
+ * ensure that when we try to coerce them to the output column's
+ * datatype, the right things happen for UNKNOWN constants. But do
+ * this only at the first level of subquery-scan plans; we don't want
+ * phony constants appearing in the output tlists of upper-level
+ * nodes!
*/
if (hack_constants && inputtle->expr && IsA(inputtle->expr, Const))
expr = (Node *) inputtle->expr;
@@ -710,7 +709,7 @@ find_all_inheritors(Oid parentrel)
List *rels_list;
ListCell *l;
- /*
+ /*
* We build a list starting with the given rel and adding all direct and
* indirect children. We can use a single list as both the record of
* already-found rels and the agenda of rels yet to be scanned for more
@@ -728,11 +727,11 @@ find_all_inheritors(Oid parentrel)
currentchildren = find_inheritance_children(currentrel);
/*
- * Add to the queue only those children not already seen. This
- * avoids making duplicate entries in case of multiple inheritance
- * paths from the same parent. (It'll also keep us from getting
- * into an infinite loop, though theoretically there can't be any
- * cycles in the inheritance graph anyway.)
+ * Add to the queue only those children not already seen. This avoids
+ * making duplicate entries in case of multiple inheritance paths from
+ * the same parent. (It'll also keep us from getting into an infinite
+ * loop, though theoretically there can't be any cycles in the
+ * inheritance graph anyway.)
*/
rels_list = list_concat_unique_oid(rels_list, currentchildren);
}
@@ -790,8 +789,8 @@ expand_inherited_rtentry(PlannerInfo *root, Index rti)
/*
* Check that there's at least one descendant, else treat as no-child
- * case. This could happen despite above has_subclass() check, if
- * table once had a child but no longer does.
+ * case. This could happen despite above has_subclass() check, if table
+ * once had a child but no longer does.
*/
if (list_length(inhOIDs) < 2)
{
@@ -809,19 +808,19 @@ expand_inherited_rtentry(PlannerInfo *root, Index rti)
Index childRTindex;
/*
- * It is possible that the parent table has children that are
- * temp tables of other backends. We cannot safely access such
- * tables (because of buffering issues), and the best thing to do
- * seems to be to silently ignore them.
+ * It is possible that the parent table has children that are temp
+ * tables of other backends. We cannot safely access such tables
+ * (because of buffering issues), and the best thing to do seems to be
+ * to silently ignore them.
*/
if (childOID != parentOID &&
isOtherTempNamespace(get_rel_namespace(childOID)))
continue;
/*
- * Build an RTE for the child, and attach to query's rangetable
- * list. We copy most fields of the parent's RTE, but replace
- * relation OID, and set inh = false.
+ * Build an RTE for the child, and attach to query's rangetable list.
+ * We copy most fields of the parent's RTE, but replace relation OID,
+ * and set inh = false.
*/
childrte = copyObject(rte);
childrte->relid = childOID;
@@ -833,7 +832,8 @@ expand_inherited_rtentry(PlannerInfo *root, Index rti)
/*
* If all the children were temp tables, pretend it's a non-inheritance
- * situation. The duplicate RTE we added for the parent table is harmless.
+ * situation. The duplicate RTE we added for the parent table is
+ * harmless.
*/
if (list_length(inhRTIs) < 2)
{
@@ -843,11 +843,11 @@ expand_inherited_rtentry(PlannerInfo *root, Index rti)
}
/*
- * The executor will check the parent table's access permissions when
- * it examines the parent's inheritlist entry. There's no need to
- * check twice, so turn off access check bits in the original RTE.
- * (If we are invoked more than once, extra copies of the child RTEs
- * will also not cause duplicate permission checks.)
+ * The executor will check the parent table's access permissions when it
+ * examines the parent's inheritlist entry. There's no need to check
+ * twice, so turn off access check bits in the original RTE. (If we are
+ * invoked more than once, extra copies of the child RTEs will also not
+ * cause duplicate permission checks.)
*/
rte->requiredPerms = 0;
@@ -882,9 +882,8 @@ adjust_inherited_attrs(Node *node,
}
/*
- * We assume that by now the planner has acquired at least
- * AccessShareLock on both rels, and so we need no additional lock
- * now.
+ * We assume that by now the planner has acquired at least AccessShareLock
+ * on both rels, and so we need no additional lock now.
*/
oldrelation = heap_open(old_relid, NoLock);
newrelation = heap_open(new_relid, NoLock);
@@ -1035,7 +1034,7 @@ adjust_inherited_attrs_mutator(Node *node,
JoinExpr *j;
j = (JoinExpr *) expression_tree_mutator(node,
- adjust_inherited_attrs_mutator,
+ adjust_inherited_attrs_mutator,
(void *) context);
/* now fix JoinExpr's rtindex */
if (j->rtindex == context->old_rt_index)
@@ -1048,8 +1047,8 @@ adjust_inherited_attrs_mutator(Node *node,
InClauseInfo *ininfo;
ininfo = (InClauseInfo *) expression_tree_mutator(node,
- adjust_inherited_attrs_mutator,
- (void *) context);
+ adjust_inherited_attrs_mutator,
+ (void *) context);
/* now fix InClauseInfo's relid sets */
ininfo->lefthand = adjust_relid_set(ininfo->lefthand,
context->old_rt_index,
@@ -1119,10 +1118,10 @@ adjust_inherited_attrs_mutator(Node *node,
/*
* BUT: although we don't need to recurse into subplans, we do need to
* make sure that they are copied, not just referenced as
- * expression_tree_mutator will do by default. Otherwise we'll have
- * the same subplan node referenced from each arm of the inheritance
- * APPEND plan, which will cause trouble in the executor. This is a
- * kluge that should go away when we redesign querytrees.
+ * expression_tree_mutator will do by default. Otherwise we'll have the
+ * same subplan node referenced from each arm of the inheritance APPEND
+ * plan, which will cause trouble in the executor. This is a kluge that
+ * should go away when we redesign querytrees.
*/
if (is_subplan(node))
{
@@ -1205,8 +1204,8 @@ adjust_inherited_tlist(List *tlist,
/*
* If we changed anything, re-sort the tlist by resno, and make sure
* resjunk entries have resnos above the last real resno. The sort
- * algorithm is a bit stupid, but for such a seldom-taken path, small
- * is probably better than fast.
+ * algorithm is a bit stupid, but for such a seldom-taken path, small is
+ * probably better than fast.
*/
if (!changed_it)
return tlist;
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 496a4b03f44..5e2718dc635 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.200 2005/07/03 21:14:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.201 2005/10/15 02:49:21 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@@ -91,7 +91,7 @@ static Expr *inline_function(Oid funcid, Oid result_type, List *args,
static Node *substitute_actual_parameters(Node *expr, int nargs, List *args,
int *usecounts);
static Node *substitute_actual_parameters_mutator(Node *node,
- substitute_actual_parameters_context *context);
+ substitute_actual_parameters_context *context);
static void sql_inline_error_callback(void *arg);
static Expr *evaluate_expr(Expr *expr, Oid result_type);
@@ -308,10 +308,10 @@ List *
make_ands_implicit(Expr *clause)
{
/*
- * NB: because the parser sets the qual field to NULL in a query that
- * has no WHERE clause, we must consider a NULL input clause as TRUE,
- * even though one might more reasonably think it FALSE. Grumble. If
- * this causes trouble, consider changing the parser's behavior.
+ * NB: because the parser sets the qual field to NULL in a query that has
+ * no WHERE clause, we must consider a NULL input clause as TRUE, even
+ * though one might more reasonably think it FALSE. Grumble. If this
+ * causes trouble, consider changing the parser's behavior.
*/
if (clause == NULL)
return NIL; /* NULL -> NIL list == TRUE */
@@ -357,8 +357,7 @@ contain_agg_clause_walker(Node *node, void *context)
if (IsA(node, Aggref))
{
Assert(((Aggref *) node)->agglevelsup == 0);
- return true; /* abort the tree traversal and return
- * true */
+ return true; /* abort the tree traversal and return true */
}
Assert(!IsA(node, SubLink));
return expression_tree_walker(node, contain_agg_clause_walker, context);
@@ -438,9 +437,9 @@ count_agg_clauses_walker(Node *node, AggClauseCounts *counts)
/*
* If the transition type is pass-by-value then it doesn't add
- * anything to the required size of the hashtable. If it is
- * pass-by-reference then we have to add the estimated size of
- * the value itself, plus palloc overhead.
+ * anything to the required size of the hashtable. If it is
+ * pass-by-reference then we have to add the estimated size of the
+ * value itself, plus palloc overhead.
*/
if (!get_typbyval(aggtranstype))
{
@@ -470,7 +469,7 @@ count_agg_clauses_walker(Node *node, AggClauseCounts *counts)
if (contain_agg_clause((Node *) aggref->target))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("aggregate function calls may not be nested")));
+ errmsg("aggregate function calls may not be nested")));
/*
* Having checked that, we need not recurse into the argument.
@@ -579,8 +578,7 @@ contain_subplans_walker(Node *node, void *context)
return false;
if (IsA(node, SubPlan) ||
IsA(node, SubLink))
- return true; /* abort the tree traversal and return
- * true */
+ return true; /* abort the tree traversal and return true */
return expression_tree_walker(node, contain_subplans_walker, context);
}
@@ -882,9 +880,9 @@ is_pseudo_constant_clause(Node *clause)
{
/*
* We could implement this check in one recursive scan. But since the
- * check for volatile functions is both moderately expensive and
- * unlikely to fail, it seems better to look for Vars first and only
- * check for volatile functions if we find no Vars.
+ * check for volatile functions is both moderately expensive and unlikely
+ * to fail, it seems better to look for Vars first and only check for
+ * volatile functions if we find no Vars.
*/
if (!contain_var_clause(clause) &&
!contain_volatile_functions(clause))
@@ -958,13 +956,12 @@ has_distinct_on_clause(Query *query)
/*
* If the DISTINCT list contains all the nonjunk targetlist items, and
- * nothing else (ie, no junk tlist items), then it's a simple
- * DISTINCT, else it's DISTINCT ON. We do not require the lists to be
- * in the same order (since the parser may have adjusted the DISTINCT
- * clause ordering to agree with ORDER BY). Furthermore, a
- * non-DISTINCT junk tlist item that is in the sortClause is also
- * evidence of DISTINCT ON, since we don't allow ORDER BY on junk
- * tlist items when plain DISTINCT is used.
+ * nothing else (ie, no junk tlist items), then it's a simple DISTINCT,
+ * else it's DISTINCT ON. We do not require the lists to be in the same
+ * order (since the parser may have adjusted the DISTINCT clause ordering
+ * to agree with ORDER BY). Furthermore, a non-DISTINCT junk tlist item
+ * that is in the sortClause is also evidence of DISTINCT ON, since we
+ * don't allow ORDER BY on junk tlist items when plain DISTINCT is used.
*
* This code assumes that the DISTINCT list is valid, ie, all its entries
* match some entry of the tlist.
@@ -1224,7 +1221,7 @@ eval_const_expressions(Node *node)
*
* Currently the extra steps that are taken in this mode are:
* 1. Substitute values for Params, where a bound Param value has been made
- * available by the caller of planner().
+ * available by the caller of planner().
* 2. Fold stable, as well as immutable, functions to constants.
*--------------------
*/
@@ -1264,11 +1261,11 @@ eval_const_expressions_mutator(Node *node,
if (paramInfo)
{
/*
- * Found it, so return a Const representing the param
- * value. Note that we don't copy pass-by-ref datatypes,
- * so the Const will only be valid as long as the bound
- * parameter list exists. This is okay for intended uses
- * of estimate_expression_value().
+ * Found it, so return a Const representing the param value.
+ * Note that we don't copy pass-by-ref datatypes, so the Const
+ * will only be valid as long as the bound parameter list
+ * exists. This is okay for intended uses of
+ * estimate_expression_value().
*/
int16 typLen;
bool typByVal;
@@ -1294,16 +1291,16 @@ eval_const_expressions_mutator(Node *node,
/*
* Reduce constants in the FuncExpr's arguments. We know args is
- * either NIL or a List node, so we can call
- * expression_tree_mutator directly rather than recursing to self.
+ * either NIL or a List node, so we can call expression_tree_mutator
+ * directly rather than recursing to self.
*/
args = (List *) expression_tree_mutator((Node *) expr->args,
- eval_const_expressions_mutator,
+ eval_const_expressions_mutator,
(void *) context);
/*
- * Code for op/func reduction is pretty bulky, so split it out as
- * a separate function.
+ * Code for op/func reduction is pretty bulky, so split it out as a
+ * separate function.
*/
simple = simplify_function(expr->funcid, expr->funcresulttype, args,
true, context);
@@ -1312,8 +1309,8 @@ eval_const_expressions_mutator(Node *node,
/*
* The expression cannot be simplified any further, so build and
- * return a replacement FuncExpr node using the
- * possibly-simplified arguments.
+ * return a replacement FuncExpr node using the possibly-simplified
+ * arguments.
*/
newexpr = makeNode(FuncExpr);
newexpr->funcid = expr->funcid;
@@ -1331,23 +1328,23 @@ eval_const_expressions_mutator(Node *node,
OpExpr *newexpr;
/*
- * Reduce constants in the OpExpr's arguments. We know args is
- * either NIL or a List node, so we can call
- * expression_tree_mutator directly rather than recursing to self.
+ * Reduce constants in the OpExpr's arguments. We know args is either
+ * NIL or a List node, so we can call expression_tree_mutator directly
+ * rather than recursing to self.
*/
args = (List *) expression_tree_mutator((Node *) expr->args,
- eval_const_expressions_mutator,
+ eval_const_expressions_mutator,
(void *) context);
/*
- * Need to get OID of underlying function. Okay to scribble on
- * input to this extent.
+ * Need to get OID of underlying function. Okay to scribble on input
+ * to this extent.
*/
set_opfuncid(expr);
/*
- * Code for op/func reduction is pretty bulky, so split it out as
- * a separate function.
+ * Code for op/func reduction is pretty bulky, so split it out as a
+ * separate function.
*/
simple = simplify_function(expr->opfuncid, expr->opresulttype, args,
true, context);
@@ -1355,8 +1352,8 @@ eval_const_expressions_mutator(Node *node,
return (Node *) simple;
/*
- * If the operator is boolean equality, we know how to simplify
- * cases involving one constant and one non-constant argument.
+ * If the operator is boolean equality, we know how to simplify cases
+ * involving one constant and one non-constant argument.
*/
if (expr->opno == BooleanEqualOperator)
{
@@ -1390,18 +1387,17 @@ eval_const_expressions_mutator(Node *node,
DistinctExpr *newexpr;
/*
- * Reduce constants in the DistinctExpr's arguments. We know args
- * is either NIL or a List node, so we can call
- * expression_tree_mutator directly rather than recursing to self.
+ * Reduce constants in the DistinctExpr's arguments. We know args is
+ * either NIL or a List node, so we can call expression_tree_mutator
+ * directly rather than recursing to self.
*/
args = (List *) expression_tree_mutator((Node *) expr->args,
- eval_const_expressions_mutator,
+ eval_const_expressions_mutator,
(void *) context);
/*
* We must do our own check for NULLs because DistinctExpr has
- * different results for NULL input than the underlying operator
- * does.
+ * different results for NULL input than the underlying operator does.
*/
foreach(arg, args)
{
@@ -1429,15 +1425,14 @@ eval_const_expressions_mutator(Node *node,
/* (NOT okay to try to inline it, though!) */
/*
- * Need to get OID of underlying function. Okay to scribble
- * on input to this extent.
+ * Need to get OID of underlying function. Okay to scribble on
+ * input to this extent.
*/
- set_opfuncid((OpExpr *) expr); /* rely on struct
- * equivalence */
+ set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
/*
- * Code for op/func reduction is pretty bulky, so split it out
- * as a separate function.
+ * Code for op/func reduction is pretty bulky, so split it out as
+ * a separate function.
*/
simple = simplify_function(expr->opfuncid, expr->opresulttype,
args, false, context);
@@ -1482,7 +1477,7 @@ eval_const_expressions_mutator(Node *node,
bool forceTrue = false;
newargs = simplify_or_arguments(expr->args, context,
- &haveNull, &forceTrue);
+ &haveNull, &forceTrue);
if (forceTrue)
return makeBoolConst(true, false);
if (haveNull)
@@ -1503,7 +1498,7 @@ eval_const_expressions_mutator(Node *node,
bool forceFalse = false;
newargs = simplify_and_arguments(expr->args, context,
- &haveNull, &forceFalse);
+ &haveNull, &forceFalse);
if (forceFalse)
return makeBoolConst(false, false);
if (haveNull)
@@ -1554,17 +1549,17 @@ eval_const_expressions_mutator(Node *node,
/*
* Return a SubPlan unchanged --- too late to do anything with it.
*
- * XXX should we ereport() here instead? Probably this routine
- * should never be invoked after SubPlan creation.
+ * XXX should we ereport() here instead? Probably this routine should
+ * never be invoked after SubPlan creation.
*/
return node;
}
if (IsA(node, RelabelType))
{
/*
- * If we can simplify the input to a constant, then we don't need
- * the RelabelType node anymore: just change the type field of the
- * Const node. Otherwise, must copy the RelabelType node.
+ * If we can simplify the input to a constant, then we don't need the
+ * RelabelType node anymore: just change the type field of the Const
+ * node. Otherwise, must copy the RelabelType node.
*/
RelabelType *relabel = (RelabelType *) node;
Node *arg;
@@ -1573,8 +1568,8 @@ eval_const_expressions_mutator(Node *node,
context);
/*
- * If we find stacked RelabelTypes (eg, from foo :: int :: oid) we
- * can discard all but the top one.
+ * If we find stacked RelabelTypes (eg, from foo :: int :: oid) we can
+ * discard all but the top one.
*/
while (arg && IsA(arg, RelabelType))
arg = (Node *) ((RelabelType *) arg)->arg;
@@ -1586,10 +1581,9 @@ eval_const_expressions_mutator(Node *node,
con->consttype = relabel->resulttype;
/*
- * relabel's resulttypmod is discarded, which is OK for now;
- * if the type actually needs a runtime length coercion then
- * there should be a function call to do it just above this
- * node.
+ * relabel's resulttypmod is discarded, which is OK for now; if
+ * the type actually needs a runtime length coercion then there
+ * should be a function call to do it just above this node.
*/
return (Node *) con;
}
@@ -1692,7 +1686,7 @@ eval_const_expressions_mutator(Node *node,
/*
* Found a TRUE condition, so none of the remaining alternatives
- * can be reached. We treat the result as the default result.
+ * can be reached. We treat the result as the default result.
*/
defresult = caseresult;
break;
@@ -1720,9 +1714,9 @@ eval_const_expressions_mutator(Node *node,
if (IsA(node, CaseTestExpr))
{
/*
- * If we know a constant test value for the current CASE
- * construct, substitute it for the placeholder. Else just
- * return the placeholder as-is.
+ * If we know a constant test value for the current CASE construct,
+ * substitute it for the placeholder. Else just return the
+ * placeholder as-is.
*/
if (context->case_val)
return copyObject(context->case_val);
@@ -1803,15 +1797,15 @@ eval_const_expressions_mutator(Node *node,
if (IsA(node, FieldSelect))
{
/*
- * We can optimize field selection from a whole-row Var into a
- * simple Var. (This case won't be generated directly by the
- * parser, because ParseComplexProjection short-circuits it. But
- * it can arise while simplifying functions.) Also, we can
- * optimize field selection from a RowExpr construct.
+ * We can optimize field selection from a whole-row Var into a simple
+ * Var. (This case won't be generated directly by the parser, because
+ * ParseComplexProjection short-circuits it. But it can arise while
+ * simplifying functions.) Also, we can optimize field selection from
+ * a RowExpr construct.
*
- * We must however check that the declared type of the field is still
- * the same as when the FieldSelect was created --- this can
- * change if someone did ALTER COLUMN TYPE on the rowtype.
+ * We must however check that the declared type of the field is still the
+ * same as when the FieldSelect was created --- this can change if
+ * someone did ALTER COLUMN TYPE on the rowtype.
*/
FieldSelect *fselect = (FieldSelect *) node;
FieldSelect *newfselect;
@@ -1840,7 +1834,7 @@ eval_const_expressions_mutator(Node *node,
fselect->fieldnum <= list_length(rowexpr->args))
{
Node *fld = (Node *) list_nth(rowexpr->args,
- fselect->fieldnum - 1);
+ fselect->fieldnum - 1);
if (rowtype_field_matches(rowexpr->row_typeid,
fselect->fieldnum,
@@ -1861,10 +1855,10 @@ eval_const_expressions_mutator(Node *node,
/*
* For any node type not handled above, we recurse using
- * expression_tree_mutator, which will copy the node unchanged but try
- * to simplify its arguments (if any) using this routine. For example:
- * we cannot eliminate an ArrayRef node, but we might be able to
- * simplify constant expressions in its subscripts.
+ * expression_tree_mutator, which will copy the node unchanged but try to
+ * simplify its arguments (if any) using this routine. For example: we
+ * cannot eliminate an ArrayRef node, but we might be able to simplify
+ * constant expressions in its subscripts.
*/
return expression_tree_mutator(node, eval_const_expressions_mutator,
(void *) context);
@@ -1900,7 +1894,7 @@ simplify_or_arguments(List *args,
/*
* Since the parser considers OR to be a binary operator, long OR lists
* become deeply nested expressions. We must flatten these into long
- * argument lists of a single OR operator. To avoid blowing out the stack
+ * argument lists of a single OR operator. To avoid blowing out the stack
* with recursion of eval_const_expressions, we resort to some tenseness
* here: we keep a list of not-yet-processed inputs, and handle flattening
* of nested ORs by prepending to the to-do list instead of recursing.
@@ -1915,14 +1909,14 @@ simplify_or_arguments(List *args,
/* flatten nested ORs as per above comment */
if (or_clause(arg))
{
- List *subargs = list_copy(((BoolExpr *) arg)->args);
+ List *subargs = list_copy(((BoolExpr *) arg)->args);
/* overly tense code to avoid leaking unused list header */
if (!unprocessed_args)
unprocessed_args = subargs;
else
{
- List *oldhdr = unprocessed_args;
+ List *oldhdr = unprocessed_args;
unprocessed_args = list_concat(subargs, unprocessed_args);
pfree(oldhdr);
@@ -1934,23 +1928,22 @@ simplify_or_arguments(List *args,
arg = eval_const_expressions_mutator(arg, context);
/*
- * It is unlikely but not impossible for simplification of a
- * non-OR clause to produce an OR. Recheck, but don't be
- * too tense about it since it's not a mainstream case.
- * In particular we don't worry about const-simplifying
- * the input twice.
+ * It is unlikely but not impossible for simplification of a non-OR
+ * clause to produce an OR. Recheck, but don't be too tense about it
+ * since it's not a mainstream case. In particular we don't worry
+ * about const-simplifying the input twice.
*/
if (or_clause(arg))
{
- List *subargs = list_copy(((BoolExpr *) arg)->args);
+ List *subargs = list_copy(((BoolExpr *) arg)->args);
unprocessed_args = list_concat(subargs, unprocessed_args);
continue;
}
/*
- * OK, we have a const-simplified non-OR argument. Process it
- * per comments above.
+ * OK, we have a const-simplified non-OR argument. Process it per
+ * comments above.
*/
if (IsA(arg, Const))
{
@@ -2018,14 +2011,14 @@ simplify_and_arguments(List *args,
/* flatten nested ANDs as per above comment */
if (and_clause(arg))
{
- List *subargs = list_copy(((BoolExpr *) arg)->args);
+ List *subargs = list_copy(((BoolExpr *) arg)->args);
/* overly tense code to avoid leaking unused list header */
if (!unprocessed_args)
unprocessed_args = subargs;
else
{
- List *oldhdr = unprocessed_args;
+ List *oldhdr = unprocessed_args;
unprocessed_args = list_concat(subargs, unprocessed_args);
pfree(oldhdr);
@@ -2037,23 +2030,22 @@ simplify_and_arguments(List *args,
arg = eval_const_expressions_mutator(arg, context);
/*
- * It is unlikely but not impossible for simplification of a
- * non-AND clause to produce an AND. Recheck, but don't be
- * too tense about it since it's not a mainstream case.
- * In particular we don't worry about const-simplifying
- * the input twice.
+ * It is unlikely but not impossible for simplification of a non-AND
+ * clause to produce an AND. Recheck, but don't be too tense about it
+ * since it's not a mainstream case. In particular we don't worry
+ * about const-simplifying the input twice.
*/
if (and_clause(arg))
{
- List *subargs = list_copy(((BoolExpr *) arg)->args);
+ List *subargs = list_copy(((BoolExpr *) arg)->args);
unprocessed_args = list_concat(subargs, unprocessed_args);
continue;
}
/*
- * OK, we have a const-simplified non-AND argument. Process it
- * per comments above.
+ * OK, we have a const-simplified non-AND argument. Process it per
+ * comments above.
*/
if (IsA(arg, Const))
{
@@ -2111,7 +2103,7 @@ simplify_boolean_equality(List *args)
{
Assert(!((Const *) leftop)->constisnull);
if (DatumGetBool(((Const *) leftop)->constvalue))
- return rightop; /* true = foo */
+ return rightop; /* true = foo */
else
return make_notclause(rightop); /* false = foo */
}
@@ -2119,7 +2111,7 @@ simplify_boolean_equality(List *args)
{
Assert(!((Const *) rightop)->constisnull);
if (DatumGetBool(((Const *) rightop)->constvalue))
- return leftop; /* foo = true */
+ return leftop; /* foo = true */
else
return make_notclause(leftop); /* foo = false */
}
@@ -2146,12 +2138,12 @@ simplify_function(Oid funcid, Oid result_type, List *args,
Expr *newexpr;
/*
- * We have two strategies for simplification: either execute the
- * function to deliver a constant result, or expand in-line the body
- * of the function definition (which only works for simple
- * SQL-language functions, but that is a common case). In either case
- * we need access to the function's pg_proc tuple, so fetch it just
- * once to use in both attempts.
+ * We have two strategies for simplification: either execute the function
+ * to deliver a constant result, or expand in-line the body of the
+ * function definition (which only works for simple SQL-language
+ * functions, but that is a common case). In either case we need access
+ * to the function's pg_proc tuple, so fetch it just once to use in both
+ * attempts.
*/
func_tuple = SearchSysCache(PROCOID,
ObjectIdGetDatum(funcid),
@@ -2200,15 +2192,15 @@ evaluate_function(Oid funcid, Oid result_type, List *args,
return NULL;
/*
- * Can't simplify if it returns RECORD. The immediate problem is that
- * it will be needing an expected tupdesc which we can't supply here.
+ * Can't simplify if it returns RECORD. The immediate problem is that it
+ * will be needing an expected tupdesc which we can't supply here.
*
* In the case where it has OUT parameters, it could get by without an
* expected tupdesc, but we still have issues: get_expr_result_type()
- * doesn't know how to extract type info from a RECORD constant, and
- * in the case of a NULL function result there doesn't seem to be any
- * clean way to fix that. In view of the likelihood of there being
- * still other gotchas, seems best to leave the function call unreduced.
+ * doesn't know how to extract type info from a RECORD constant, and in
+ * the case of a NULL function result there doesn't seem to be any clean
+ * way to fix that. In view of the likelihood of there being still other
+ * gotchas, seems best to leave the function call unreduced.
*/
if (funcform->prorettype == RECORDOID)
return NULL;
@@ -2225,10 +2217,10 @@ evaluate_function(Oid funcid, Oid result_type, List *args,
}
/*
- * If the function is strict and has a constant-NULL input, it will
- * never be called at all, so we can replace the call by a NULL
- * constant, even if there are other inputs that aren't constant, and
- * even if the function is not otherwise immutable.
+ * If the function is strict and has a constant-NULL input, it will never
+ * be called at all, so we can replace the call by a NULL constant, even
+ * if there are other inputs that aren't constant, and even if the
+ * function is not otherwise immutable.
*/
if (funcform->proisstrict && has_null_input)
return (Expr *) makeNullConst(result_type);
@@ -2242,16 +2234,16 @@ evaluate_function(Oid funcid, Oid result_type, List *args,
return NULL;
/*
- * Ordinarily we are only allowed to simplify immutable functions.
- * But for purposes of estimation, we consider it okay to simplify
- * functions that are merely stable; the risk that the result might
- * change from planning time to execution time is worth taking in
- * preference to not being able to estimate the value at all.
+ * Ordinarily we are only allowed to simplify immutable functions. But for
+ * purposes of estimation, we consider it okay to simplify functions that
+ * are merely stable; the risk that the result might change from planning
+ * time to execution time is worth taking in preference to not being able
+ * to estimate the value at all.
*/
if (funcform->provolatile == PROVOLATILE_IMMUTABLE)
- /* okay */ ;
+ /* okay */ ;
else if (context->estimate && funcform->provolatile == PROVOLATILE_STABLE)
- /* okay */ ;
+ /* okay */ ;
else
return NULL;
@@ -2318,8 +2310,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
int i;
/*
- * Forget it if the function is not SQL-language or has other
- * showstopper properties. (The nargs check is just paranoia.)
+ * Forget it if the function is not SQL-language or has other showstopper
+ * properties. (The nargs check is just paranoia.)
*/
if (funcform->prolang != SQLlanguageId ||
funcform->prosecdef ||
@@ -2336,8 +2328,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
return NULL;
/*
- * Setup error traceback support for ereport(). This is so that we
- * can finger the function that bad information came from.
+ * Setup error traceback support for ereport(). This is so that we can
+ * finger the function that bad information came from.
*/
sqlerrcontext.callback = sql_inline_error_callback;
sqlerrcontext.arg = func_tuple;
@@ -2345,8 +2337,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
error_context_stack = &sqlerrcontext;
/*
- * Make a temporary memory context, so that we don't leak all the
- * stuff that parsing might create.
+ * Make a temporary memory context, so that we don't leak all the stuff
+ * that parsing might create.
*/
mycxt = AllocSetContextCreate(CurrentMemoryContext,
"inline_function",
@@ -2383,10 +2375,10 @@ inline_function(Oid funcid, Oid result_type, List *args,
src = DatumGetCString(DirectFunctionCall1(textout, tmp));
/*
- * We just do parsing and parse analysis, not rewriting, because
- * rewriting will not affect table-free-SELECT-only queries, which is
- * all that we care about. Also, we can punt as soon as we detect
- * more than one command in the function body.
+ * We just do parsing and parse analysis, not rewriting, because rewriting
+ * will not affect table-free-SELECT-only queries, which is all that we
+ * care about. Also, we can punt as soon as we detect more than one
+ * command in the function body.
*/
raw_parsetree_list = pg_parse_query(src);
if (list_length(raw_parsetree_list) != 1)
@@ -2425,24 +2417,24 @@ inline_function(Oid funcid, Oid result_type, List *args,
newexpr = (Node *) ((TargetEntry *) linitial(querytree->targetList))->expr;
/*
- * If the function has any arguments declared as polymorphic types,
- * then it wasn't type-checked at definition time; must do so now.
- * (This will raise an error if wrong, but that's okay since the
- * function would fail at runtime anyway. Note we do not try this
- * until we have verified that no rewriting was needed; that's
- * probably not important, but let's be careful.)
+ * If the function has any arguments declared as polymorphic types, then
+ * it wasn't type-checked at definition time; must do so now. (This will
+ * raise an error if wrong, but that's okay since the function would fail
+ * at runtime anyway. Note we do not try this until we have verified that
+ * no rewriting was needed; that's probably not important, but let's be
+ * careful.)
*/
if (polymorphic)
(void) check_sql_fn_retval(funcid, result_type, querytree_list, NULL);
/*
- * Additional validity checks on the expression. It mustn't return a
- * set, and it mustn't be more volatile than the surrounding function
- * (this is to avoid breaking hacks that involve pretending a function
- * is immutable when it really ain't). If the surrounding function is
- * declared strict, then the expression must contain only strict
- * constructs and must use all of the function parameters (this is
- * overkill, but an exact analysis is hard).
+ * Additional validity checks on the expression. It mustn't return a set,
+ * and it mustn't be more volatile than the surrounding function (this is
+ * to avoid breaking hacks that involve pretending a function is immutable
+ * when it really ain't). If the surrounding function is declared strict,
+ * then the expression must contain only strict constructs and must use
+ * all of the function parameters (this is overkill, but an exact analysis
+ * is hard).
*/
if (expression_returns_set(newexpr))
goto fail;
@@ -2459,10 +2451,10 @@ inline_function(Oid funcid, Oid result_type, List *args,
goto fail;
/*
- * We may be able to do it; there are still checks on parameter usage
- * to make, but those are most easily done in combination with the
- * actual substitution of the inputs. So start building expression
- * with inputs substituted.
+ * We may be able to do it; there are still checks on parameter usage to
+ * make, but those are most easily done in combination with the actual
+ * substitution of the inputs. So start building expression with inputs
+ * substituted.
*/
usecounts = (int *) palloc0(funcform->pronargs * sizeof(int));
newexpr = substitute_actual_parameters(newexpr, funcform->pronargs,
@@ -2486,8 +2478,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
QualCost eval_cost;
/*
- * We define "expensive" as "contains any subplan or more than
- * 10 operators". Note that the subplan search has to be done
+ * We define "expensive" as "contains any subplan or more than 10
+ * operators". Note that the subplan search has to be done
* explicitly, since cost_qual_eval() will barf on unplanned
* subselects.
*/
@@ -2509,8 +2501,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
}
/*
- * Whew --- we can make the substitution. Copy the modified
- * expression out of the temporary memory context, and clean up.
+ * Whew --- we can make the substitution. Copy the modified expression
+ * out of the temporary memory context, and clean up.
*/
MemoryContextSwitchTo(oldcxt);
@@ -2519,8 +2511,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
MemoryContextDelete(mycxt);
/*
- * Recursively try to simplify the modified expression. Here we must
- * add the current function to the context list of active functions.
+ * Recursively try to simplify the modified expression. Here we must add
+ * the current function to the context list of active functions.
*/
context->active_fns = lcons_oid(funcid, context->active_fns);
newexpr = eval_const_expressions_mutator(newexpr, context);
@@ -2557,7 +2549,7 @@ substitute_actual_parameters(Node *expr, int nargs, List *args,
static Node *
substitute_actual_parameters_mutator(Node *node,
- substitute_actual_parameters_context *context)
+ substitute_actual_parameters_context *context)
{
if (node == NULL)
return NULL;
@@ -2646,10 +2638,10 @@ evaluate_expr(Expr *expr, Oid result_type)
/*
* And evaluate it.
*
- * It is OK to use a default econtext because none of the ExecEvalExpr()
- * code used in this situation will use econtext. That might seem
- * fortuitous, but it's not so unreasonable --- a constant expression
- * does not depend on context, by definition, n'est ce pas?
+ * It is OK to use a default econtext because none of the ExecEvalExpr() code
+ * used in this situation will use econtext. That might seem fortuitous,
+ * but it's not so unreasonable --- a constant expression does not depend
+ * on context, by definition, n'est ce pas?
*/
const_val = ExecEvalExprSwitchContext(exprstate,
GetPerTupleExprContext(estate),
@@ -2779,12 +2771,12 @@ expression_tree_walker(Node *node,
ListCell *temp;
/*
- * The walker has already visited the current node, and so we need
- * only recurse into any sub-nodes it has.
+ * The walker has already visited the current node, and so we need only
+ * recurse into any sub-nodes it has.
*
- * We assume that the walker is not interested in List nodes per se, so
- * when we expect a List we just recurse directly to self without
- * bothering to call the walker.
+ * We assume that the walker is not interested in List nodes per se, so when
+ * we expect a List we just recurse directly to self without bothering to
+ * call the walker.
*/
if (node == NULL)
return false;
@@ -2877,8 +2869,8 @@ expression_tree_walker(Node *node,
return true;
/*
- * Also invoke the walker on the sublink's Query node, so
- * it can recurse into the sub-query if it wants to.
+ * Also invoke the walker on the sublink's Query node, so it
+ * can recurse into the sub-query if it wants to.
*/
return walker(sublink->subselect, context);
}
@@ -3167,8 +3159,8 @@ expression_tree_mutator(Node *node,
void *context)
{
/*
- * The mutator has already decided not to modify the current node, but
- * we must call the mutator for any sub-nodes.
+ * The mutator has already decided not to modify the current node, but we
+ * must call the mutator for any sub-nodes.
*/
#define FLATCOPY(newnode, node, nodetype) \
@@ -3286,8 +3278,8 @@ expression_tree_mutator(Node *node,
MUTATE(newnode->lefthand, sublink->lefthand, List *);
/*
- * Also invoke the mutator on the sublink's Query node, so
- * it can recurse into the sub-query if it wants to.
+ * Also invoke the mutator on the sublink's Query node, so it
+ * can recurse into the sub-query if it wants to.
*/
MUTATE(newnode->subselect, sublink->subselect, Node *);
return (Node *) newnode;
@@ -3468,10 +3460,9 @@ expression_tree_mutator(Node *node,
case T_List:
{
/*
- * We assume the mutator isn't interested in the list
- * nodes per se, so just invoke it on each list element.
- * NOTE: this would fail badly on a list with integer
- * elements!
+ * We assume the mutator isn't interested in the list nodes
+ * per se, so just invoke it on each list element. NOTE: this
+ * would fail badly on a list with integer elements!
*/
List *resultlist;
ListCell *temp;
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index fc76c89329c..934daf8b28f 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.124 2005/07/22 19:12:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.125 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,8 +59,8 @@ compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
return +1;
/*
- * If paths have the same startup cost (not at all unlikely),
- * order them by total cost.
+ * If paths have the same startup cost (not at all unlikely), order
+ * them by total cost.
*/
if (path1->total_cost < path2->total_cost)
return -1;
@@ -111,8 +111,8 @@ compare_fuzzy_path_costs(Path *path1, Path *path2, CostSelector criterion)
return -1;
/*
- * If paths have the same startup cost (not at all unlikely),
- * order them by total cost.
+ * If paths have the same startup cost (not at all unlikely), order
+ * them by total cost.
*/
if (path1->total_cost > path2->total_cost * 1.01)
return +1;
@@ -253,22 +253,21 @@ set_cheapest(RelOptInfo *parent_rel)
void
add_path(RelOptInfo *parent_rel, Path *new_path)
{
- bool accept_new = true; /* unless we find a superior old
- * path */
+ bool accept_new = true; /* unless we find a superior old path */
ListCell *insert_after = NULL; /* where to insert new item */
ListCell *p1_prev = NULL;
ListCell *p1;
/*
- * This is a convenient place to check for query cancel --- no part
- * of the planner goes very long without calling add_path().
+ * This is a convenient place to check for query cancel --- no part of the
+ * planner goes very long without calling add_path().
*/
CHECK_FOR_INTERRUPTS();
/*
- * Loop to check proposed new path against old paths. Note it is
- * possible for more than one old path to be tossed out because
- * new_path dominates it.
+ * Loop to check proposed new path against old paths. Note it is possible
+ * for more than one old path to be tossed out because new_path dominates
+ * it.
*/
p1 = list_head(parent_rel->pathlist); /* cannot use foreach here */
while (p1 != NULL)
@@ -278,20 +277,20 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
int costcmp;
/*
- * As of Postgres 8.0, we use fuzzy cost comparison to avoid
- * wasting cycles keeping paths that are really not significantly
- * different in cost.
+ * As of Postgres 8.0, we use fuzzy cost comparison to avoid wasting
+ * cycles keeping paths that are really not significantly different in
+ * cost.
*/
costcmp = compare_fuzzy_path_costs(new_path, old_path, TOTAL_COST);
/*
- * If the two paths compare differently for startup and total
- * cost, then we want to keep both, and we can skip the (much
- * slower) comparison of pathkeys. If they compare the same,
- * proceed with the pathkeys comparison. Note: this test relies
- * on the fact that compare_fuzzy_path_costs will only return 0 if
- * both costs are effectively equal (and, therefore, there's no
- * need to call it twice in that case).
+ * If the two paths compare differently for startup and total cost,
+ * then we want to keep both, and we can skip the (much slower)
+ * comparison of pathkeys. If they compare the same, proceed with the
+ * pathkeys comparison. Note: this test relies on the fact that
+ * compare_fuzzy_path_costs will only return 0 if both costs are
+ * effectively equal (and, therefore, there's no need to call it twice
+ * in that case).
*/
if (costcmp == 0 ||
costcmp == compare_fuzzy_path_costs(new_path, old_path,
@@ -307,16 +306,15 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
else
{
/*
- * Same pathkeys, and fuzzily the same cost, so
- * keep just one --- but we'll do an exact cost
- * comparison to decide which.
+ * Same pathkeys, and fuzzily the same cost, so keep
+ * just one --- but we'll do an exact cost comparison
+ * to decide which.
*/
if (compare_path_costs(new_path, old_path,
TOTAL_COST) < 0)
remove_old = true; /* new dominates old */
else
- accept_new = false; /* old equals or dominates
- * new */
+ accept_new = false; /* old equals or dominates new */
}
break;
case PATHKEYS_BETTER1:
@@ -340,6 +338,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
{
parent_rel->pathlist = list_delete_cell(parent_rel->pathlist,
p1, p1_prev);
+
/*
* Delete the data pointed-to by the deleted cell, if possible
*/
@@ -442,10 +441,9 @@ create_index_path(PlannerInfo *root,
/*
* For a join inner scan, there's no point in marking the path with any
* pathkeys, since it will only ever be used as the inner path of a
- * nestloop, and so its ordering does not matter. For the same reason
- * we don't really care what order it's scanned in. (We could expect
- * the caller to supply the correct values, but it's easier to force
- * it here.)
+ * nestloop, and so its ordering does not matter. For the same reason we
+ * don't really care what order it's scanned in. (We could expect the
+ * caller to supply the correct values, but it's easier to force it here.)
*/
if (isjoininner)
{
@@ -476,15 +474,15 @@ create_index_path(PlannerInfo *root,
/*
* We must compute the estimated number of output rows for the
* indexscan. This is less than rel->rows because of the additional
- * selectivity of the join clauses. Since clause_groups may
- * contain both restriction and join clauses, we have to do a set
- * union to get the full set of clauses that must be considered to
- * compute the correct selectivity. (Without the union operation,
- * we might have some restriction clauses appearing twice, which'd
- * mislead clauselist_selectivity into double-counting their
- * selectivity. However, since RestrictInfo nodes aren't copied when
- * linking them into different lists, it should be sufficient to use
- * pointer comparison to remove duplicates.)
+ * selectivity of the join clauses. Since clause_groups may contain
+ * both restriction and join clauses, we have to do a set union to get
+ * the full set of clauses that must be considered to compute the
+ * correct selectivity. (Without the union operation, we might have
+ * some restriction clauses appearing twice, which'd mislead
+ * clauselist_selectivity into double-counting their selectivity.
+ * However, since RestrictInfo nodes aren't copied when linking them
+ * into different lists, it should be sufficient to use pointer
+ * comparison to remove duplicates.)
*
* Always assume the join type is JOIN_INNER; even if some of the join
* clauses come from other contexts, that's not our problem.
@@ -493,7 +491,7 @@ create_index_path(PlannerInfo *root,
pathnode->rows = rel->tuples *
clauselist_selectivity(root,
allclauses,
- rel->relid, /* do not use 0! */
+ rel->relid, /* do not use 0! */
JOIN_INNER);
/* Like costsize.c, force estimate to be at least one row */
pathnode->rows = clamp_row_est(pathnode->rows);
@@ -501,8 +499,8 @@ create_index_path(PlannerInfo *root,
else
{
/*
- * The number of rows is the same as the parent rel's estimate,
- * since this isn't a join inner indexscan.
+ * The number of rows is the same as the parent rel's estimate, since
+ * this isn't a join inner indexscan.
*/
pathnode->rows = rel->rows;
}
@@ -528,7 +526,7 @@ create_bitmap_heap_path(PlannerInfo *root,
pathnode->path.pathtype = T_BitmapHeapScan;
pathnode->path.parent = rel;
- pathnode->path.pathkeys = NIL; /* always unordered */
+ pathnode->path.pathkeys = NIL; /* always unordered */
pathnode->bitmapqual = bitmapqual;
pathnode->isjoininner = isjoininner;
@@ -539,9 +537,9 @@ create_bitmap_heap_path(PlannerInfo *root,
* We must compute the estimated number of output rows for the
* indexscan. This is less than rel->rows because of the additional
* selectivity of the join clauses. We make use of the selectivity
- * estimated for the bitmap to do this; this isn't really quite
- * right since there may be restriction conditions not included
- * in the bitmap ...
+ * estimated for the bitmap to do this; this isn't really quite right
+ * since there may be restriction conditions not included in the
+ * bitmap ...
*/
Cost indexTotalCost;
Selectivity indexSelectivity;
@@ -556,8 +554,8 @@ create_bitmap_heap_path(PlannerInfo *root,
else
{
/*
- * The number of rows is the same as the parent rel's estimate,
- * since this isn't a join inner indexscan.
+ * The number of rows is the same as the parent rel's estimate, since
+ * this isn't a join inner indexscan.
*/
pathnode->rows = rel->rows;
}
@@ -580,7 +578,7 @@ create_bitmap_and_path(PlannerInfo *root,
pathnode->path.pathtype = T_BitmapAnd;
pathnode->path.parent = rel;
- pathnode->path.pathkeys = NIL; /* always unordered */
+ pathnode->path.pathkeys = NIL; /* always unordered */
pathnode->bitmapquals = bitmapquals;
@@ -603,7 +601,7 @@ create_bitmap_or_path(PlannerInfo *root,
pathnode->path.pathtype = T_BitmapOr;
pathnode->path.parent = rel;
- pathnode->path.pathkeys = NIL; /* always unordered */
+ pathnode->path.pathkeys = NIL; /* always unordered */
pathnode->bitmapquals = bitmapquals;
@@ -759,8 +757,8 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath)
return (UniquePath *) rel->cheapest_unique_path;
/*
- * We must ensure path struct is allocated in same context as parent
- * rel; otherwise GEQO memory management causes trouble. (Compare
+ * We must ensure path struct is allocated in same context as parent rel;
+ * otherwise GEQO memory management causes trouble. (Compare
* best_inner_indexscan().)
*/
oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
@@ -774,17 +772,17 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath)
pathnode->path.parent = rel;
/*
- * Treat the output as always unsorted, since we don't necessarily
- * have pathkeys to represent it.
+ * Treat the output as always unsorted, since we don't necessarily have
+ * pathkeys to represent it.
*/
pathnode->path.pathkeys = NIL;
pathnode->subpath = subpath;
/*
- * Try to identify the targetlist that will actually be unique-ified.
- * In current usage, this routine is only used for sub-selects of IN
- * clauses, so we should be able to find the tlist in in_info_list.
+ * Try to identify the targetlist that will actually be unique-ified. In
+ * current usage, this routine is only used for sub-selects of IN clauses,
+ * so we should be able to find the tlist in in_info_list.
*/
sub_targetlist = NIL;
foreach(l, root->in_info_list)
@@ -799,19 +797,19 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath)
}
/*
- * If the input is a subquery whose output must be unique already,
- * then we don't need to do anything. The test for uniqueness has
- * to consider exactly which columns we are extracting; for example
- * "SELECT DISTINCT x,y" doesn't guarantee that x alone is distinct.
- * So we cannot check for this optimization unless we found our own
- * targetlist above, and it consists only of simple Vars referencing
- * subquery outputs. (Possibly we could do something with expressions
- * in the subquery outputs, too, but for now keep it simple.)
+ * If the input is a subquery whose output must be unique already, then we
+ * don't need to do anything. The test for uniqueness has to consider
+ * exactly which columns we are extracting; for example "SELECT DISTINCT
+ * x,y" doesn't guarantee that x alone is distinct. So we cannot check for
+ * this optimization unless we found our own targetlist above, and it
+ * consists only of simple Vars referencing subquery outputs. (Possibly
+ * we could do something with expressions in the subquery outputs, too,
+ * but for now keep it simple.)
*/
if (sub_targetlist && rel->rtekind == RTE_SUBQUERY)
{
RangeTblEntry *rte = rt_fetch(rel->relid, root->parse->rtable);
- List *sub_tlist_colnos;
+ List *sub_tlist_colnos;
sub_tlist_colnos = translate_sub_tlist(sub_targetlist, rel->relid);
@@ -854,24 +852,23 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath)
rel->width);
/*
- * Charge one cpu_operator_cost per comparison per input tuple. We
- * assume all columns get compared at most of the tuples. (XXX
- * probably this is an overestimate.) This should agree with
- * make_unique.
+ * Charge one cpu_operator_cost per comparison per input tuple. We assume
+ * all columns get compared at most of the tuples. (XXX probably this is
+ * an overestimate.) This should agree with make_unique.
*/
sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
/*
- * Is it safe to use a hashed implementation? If so, estimate and
- * compare costs. We only try this if we know the targetlist for sure
- * (else we can't be sure about the datatypes involved).
+ * Is it safe to use a hashed implementation? If so, estimate and compare
+ * costs. We only try this if we know the targetlist for sure (else we
+ * can't be sure about the datatypes involved).
*/
pathnode->umethod = UNIQUE_PATH_SORT;
if (enable_hashagg && sub_targetlist && hash_safe_tlist(sub_targetlist))
{
/*
- * Estimate the overhead per hashtable entry at 64 bytes (same as
- * in planner.c).
+ * Estimate the overhead per hashtable entry at 64 bytes (same as in
+ * planner.c).
*/
int hashentrysize = rel->width + 64;
@@ -923,7 +920,7 @@ translate_sub_tlist(List *tlist, int relid)
foreach(l, tlist)
{
- Var *var = (Var *) lfirst(l);
+ Var *var = (Var *) lfirst(l);
if (!var || !IsA(var, Var) ||
var->varno != relid)
@@ -987,8 +984,8 @@ query_is_distinct_for(Query *query, List *colnos)
else
{
/*
- * If we have no GROUP BY, but do have aggregates or HAVING, then
- * the result is at most one row so it's surely unique.
+ * If we have no GROUP BY, but do have aggregates or HAVING, then the
+ * result is at most one row so it's surely unique.
*/
if (query->hasAggs || query->havingQual)
return true;
@@ -1167,8 +1164,8 @@ create_mergejoin_path(PlannerInfo *root,
MergePath *pathnode = makeNode(MergePath);
/*
- * If the given paths are already well enough ordered, we can skip
- * doing an explicit sort.
+ * If the given paths are already well enough ordered, we can skip doing
+ * an explicit sort.
*/
if (outersortkeys &&
pathkeys_contained_in(outersortkeys, outer_path->pathkeys))
@@ -1178,15 +1175,15 @@ create_mergejoin_path(PlannerInfo *root,
innersortkeys = NIL;
/*
- * If we are not sorting the inner path, we may need a materialize
- * node to ensure it can be marked/restored. (Sort does support
- * mark/restore, so no materialize is needed in that case.)
+ * If we are not sorting the inner path, we may need a materialize node to
+ * ensure it can be marked/restored. (Sort does support mark/restore, so
+ * no materialize is needed in that case.)
*
- * Since the inner side must be ordered, and only Sorts and IndexScans
- * can create order to begin with, you might think there's no problem
- * --- but you'd be wrong. Nestloop and merge joins can *preserve*
- * the order of their inputs, so they can be selected as the input of
- * a mergejoin, and they don't support mark/restore at present.
+ * Since the inner side must be ordered, and only Sorts and IndexScans can
+ * create order to begin with, you might think there's no problem --- but
+ * you'd be wrong. Nestloop and merge joins can *preserve* the order of
+ * their inputs, so they can be selected as the input of a mergejoin, and
+ * they don't support mark/restore at present.
*/
if (innersortkeys == NIL &&
!ExecSupportsMarkRestore(inner_path->pathtype))
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index d1656350f2c..16868939405 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.113 2005/07/23 21:05:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.114 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,7 +41,7 @@
static void estimate_rel_size(Relation rel, int32 *attr_widths,
- BlockNumber *pages, double *tuples);
+ BlockNumber *pages, double *tuples);
/*
@@ -71,18 +71,18 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
/*
* Normally, we can assume the rewriter already acquired at least
- * AccessShareLock on each relation used in the query. However this
- * will not be the case for relations added to the query because they
- * are inheritance children of some relation mentioned explicitly.
- * For them, this is the first access during the parse/rewrite/plan
- * pipeline, and so we need to obtain and keep a suitable lock.
+ * AccessShareLock on each relation used in the query. However this will
+ * not be the case for relations added to the query because they are
+ * inheritance children of some relation mentioned explicitly. For them,
+ * this is the first access during the parse/rewrite/plan pipeline, and so
+ * we need to obtain and keep a suitable lock.
*
- * XXX really, a suitable lock is RowShareLock if the relation is
- * an UPDATE/DELETE target, and AccessShareLock otherwise. However
- * we cannot easily tell here which to get, so for the moment just
- * get AccessShareLock always. The executor will get the right lock
- * when it runs, which means there is a very small chance of deadlock
- * trying to upgrade our lock.
+ * XXX really, a suitable lock is RowShareLock if the relation is an
+ * UPDATE/DELETE target, and AccessShareLock otherwise. However we cannot
+ * easily tell here which to get, so for the moment just get
+ * AccessShareLock always. The executor will get the right lock when it
+ * runs, which means there is a very small chance of deadlock trying to
+ * upgrade our lock.
*/
if (rel->reloptkind == RELOPT_BASEREL)
relation = heap_open(relationObjectId, NoLock);
@@ -105,8 +105,7 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
&rel->pages, &rel->tuples);
/*
- * Make list of indexes. Ignore indexes on system catalogs if told
- * to.
+ * Make list of indexes. Ignore indexes on system catalogs if told to.
*/
if (IsIgnoringSystemIndexes() && IsSystemClass(relation->rd_rel))
hasindex = false;
@@ -133,10 +132,10 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
/*
* Extract info from the relation descriptor for the index.
*
- * Note that we take no lock on the index; we assume our lock on
- * the parent table will protect the index's schema information.
- * When and if the executor actually uses the index, it will take
- * a lock as needed to protect the access to the index contents.
+ * Note that we take no lock on the index; we assume our lock on the
+ * parent table will protect the index's schema information. When
+ * and if the executor actually uses the index, it will take a
+ * lock as needed to protect the access to the index contents.
*/
indexRelation = index_open(indexoid);
index = indexRelation->rd_index;
@@ -148,8 +147,8 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
info->ncolumns = ncolumns = index->indnatts;
/*
- * Need to make classlist and ordering arrays large enough to
- * put a terminating 0 at the end of each one.
+ * Need to make classlist and ordering arrays large enough to put
+ * a terminating 0 at the end of each one.
*/
info->indexkeys = (int *) palloc(sizeof(int) * ncolumns);
info->classlist = (Oid *) palloc0(sizeof(Oid) * (ncolumns + 1));
@@ -166,8 +165,7 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
info->amoptionalkey = indexRelation->rd_am->amoptionalkey;
/*
- * Fetch the ordering operators associated with the index, if
- * any.
+ * Fetch the ordering operators associated with the index, if any.
*/
amorderstrategy = indexRelation->rd_am->amorderstrategy;
if (amorderstrategy != 0)
@@ -184,8 +182,8 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
/*
* Fetch the index expressions and predicate, if any. We must
* modify the copies we obtain from the relcache to have the
- * correct varno for the parent relation, so that they match
- * up correctly against qual clauses.
+ * correct varno for the parent relation, so that they match up
+ * correctly against qual clauses.
*/
info->indexprs = RelationGetIndexExpressions(indexRelation);
info->indpred = RelationGetIndexPredicate(indexRelation);
@@ -197,11 +195,11 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
info->unique = index->indisunique;
/*
- * Estimate the index size. If it's not a partial index, we
- * lock the number-of-tuples estimate to equal the parent table;
- * if it is partial then we have to use the same methods as we
- * would for a table, except we can be sure that the index is
- * not larger than the table.
+ * Estimate the index size. If it's not a partial index, we lock
+ * the number-of-tuples estimate to equal the parent table; if it
+ * is partial then we have to use the same methods as we would for
+ * a table, except we can be sure that the index is not larger
+ * than the table.
*/
if (info->indpred == NIL)
{
@@ -241,8 +239,8 @@ static void
estimate_rel_size(Relation rel, int32 *attr_widths,
BlockNumber *pages, double *tuples)
{
- BlockNumber curpages;
- BlockNumber relpages;
+ BlockNumber curpages;
+ BlockNumber relpages;
double reltuples;
double density;
@@ -256,22 +254,22 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
/*
* HACK: if the relation has never yet been vacuumed, use a
- * minimum estimate of 10 pages. This emulates a desirable
- * aspect of pre-8.0 behavior, which is that we wouldn't assume
- * a newly created relation is really small, which saves us from
- * making really bad plans during initial data loading. (The
- * plans are not wrong when they are made, but if they are cached
- * and used again after the table has grown a lot, they are bad.)
- * It would be better to force replanning if the table size has
- * changed a lot since the plan was made ... but we don't
- * currently have any infrastructure for redoing cached plans at
- * all, so we have to kluge things here instead.
+ * minimum estimate of 10 pages. This emulates a desirable aspect
+ * of pre-8.0 behavior, which is that we wouldn't assume a newly
+ * created relation is really small, which saves us from making
+ * really bad plans during initial data loading. (The plans are
+ * not wrong when they are made, but if they are cached and used
+ * again after the table has grown a lot, they are bad.) It would
+ * be better to force replanning if the table size has changed a
+ * lot since the plan was made ... but we don't currently have any
+ * infrastructure for redoing cached plans at all, so we have to
+ * kluge things here instead.
*
- * We approximate "never vacuumed" by "has relpages = 0", which
- * means this will also fire on genuinely empty relations. Not
- * great, but fortunately that's a seldom-seen case in the real
- * world, and it shouldn't degrade the quality of the plan too
- * much anyway to err in this direction.
+ * We approximate "never vacuumed" by "has relpages = 0", which means
+ * this will also fire on genuinely empty relations. Not great,
+ * but fortunately that's a seldom-seen case in the real world,
+ * and it shouldn't degrade the quality of the plan too much
+ * anyway to err in this direction.
*/
if (curpages < 10 && rel->rd_rel->relpages == 0)
curpages = 10;
@@ -287,6 +285,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
/* coerce values in pg_class to more desirable types */
relpages = (BlockNumber) rel->rd_rel->relpages;
reltuples = (double) rel->rd_rel->reltuples;
+
/*
* If it's an index, discount the metapage. This is a kluge
* because it assumes more than it ought to about index contents;
@@ -307,19 +306,19 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
* When we have no data because the relation was truncated,
* estimate tuple width from attribute datatypes. We assume
* here that the pages are completely full, which is OK for
- * tables (since they've presumably not been VACUUMed yet)
- * but is probably an overestimate for indexes. Fortunately
+ * tables (since they've presumably not been VACUUMed yet) but
+ * is probably an overestimate for indexes. Fortunately
* get_relation_info() can clamp the overestimate to the
* parent table's size.
*
* Note: this code intentionally disregards alignment
- * considerations, because (a) that would be gilding the
- * lily considering how crude the estimate is, and (b)
- * it creates platform dependencies in the default plans
- * which are kind of a headache for regression testing.
+ * considerations, because (a) that would be gilding the lily
+ * considering how crude the estimate is, and (b) it creates
+ * platform dependencies in the default plans which are kind
+ * of a headache for regression testing.
*/
- int32 tuple_width = 0;
- int i;
+ int32 tuple_width = 0;
+ int i;
for (i = 1; i <= RelationGetNumberOfAttributes(rel); i++)
{
@@ -391,12 +390,12 @@ get_relation_constraints(Oid relationObjectId, RelOptInfo *rel)
constr = relation->rd_att->constr;
if (constr != NULL)
{
- int num_check = constr->num_check;
- int i;
+ int num_check = constr->num_check;
+ int i;
for (i = 0; i < num_check; i++)
{
- Node *cexpr;
+ Node *cexpr;
cexpr = stringToNode(constr->check[i].ccbin);
@@ -425,8 +424,8 @@ get_relation_constraints(Oid relationObjectId, RelOptInfo *rel)
ChangeVarNodes(cexpr, 1, varno, 0);
/*
- * Finally, convert to implicit-AND format (that is, a List)
- * and append the resulting item(s) to our output list.
+ * Finally, convert to implicit-AND format (that is, a List) and
+ * append the resulting item(s) to our output list.
*/
result = list_concat(result,
make_ands_implicit((Expr *) cexpr));
@@ -532,11 +531,12 @@ build_physical_tlist(PlannerInfo *root, RelOptInfo *rel)
break;
case RTE_FUNCTION:
- expandRTE(rte, varno, 0, true /* include dropped */,
+ expandRTE(rte, varno, 0, true /* include dropped */ ,
NULL, &colvars);
foreach(l, colvars)
{
var = (Var *) lfirst(l);
+
/*
* A non-Var in expandRTE's output means a dropped column;
* must punt.
@@ -727,11 +727,11 @@ has_unique_index(RelOptInfo *rel, AttrNumber attno)
IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
/*
- * Note: ignore partial indexes, since they don't allow us to
- * conclude that all attr values are distinct. We don't take any
- * interest in expressional indexes either. Also, a multicolumn
- * unique index doesn't allow us to conclude that just the
- * specified attr is unique.
+ * Note: ignore partial indexes, since they don't allow us to conclude
+ * that all attr values are distinct. We don't take any interest in
+ * expressional indexes either. Also, a multicolumn unique index
+ * doesn't allow us to conclude that just the specified attr is
+ * unique.
*/
if (index->unique &&
index->ncolumns == 1 &&
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index 2a0896fa63e..48ae77ac55e 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.3 2005/10/06 16:01:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.4 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,7 +31,7 @@ static bool predicate_refuted_by_recurse(Node *clause, Node *predicate);
static bool predicate_implied_by_simple_clause(Expr *predicate, Node *clause);
static bool predicate_refuted_by_simple_clause(Expr *predicate, Node *clause);
static bool btree_predicate_proof(Expr *predicate, Node *clause,
- bool refute_it);
+ bool refute_it);
/*
@@ -66,9 +66,9 @@ predicate_implied_by(List *predicate_list, List *restrictinfo_list)
/*
* In all cases where the predicate is an AND-clause,
* predicate_implied_by_recurse() will prefer to iterate over the
- * predicate's components. So we can just do that to start with here,
- * and eliminate the need for predicate_implied_by_recurse() to handle
- * a bare List on the predicate side.
+ * predicate's components. So we can just do that to start with here, and
+ * eliminate the need for predicate_implied_by_recurse() to handle a bare
+ * List on the predicate side.
*
* Logic is: restriction must imply each of the AND'ed predicate items.
*/
@@ -110,11 +110,11 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
return false; /* no restriction: refutation must fail */
/*
- * Unlike the implication case, predicate_refuted_by_recurse needs to
- * be able to see the top-level AND structure on both sides --- otherwise
- * it will fail to handle the case where one restriction clause is an OR
- * that can refute the predicate AND as a whole, but not each predicate
- * clause separately.
+ * Unlike the implication case, predicate_refuted_by_recurse needs to be
+ * able to see the top-level AND structure on both sides --- otherwise it
+ * will fail to handle the case where one restriction clause is an OR that
+ * can refute the predicate AND as a whole, but not each predicate clause
+ * separately.
*/
return predicate_refuted_by_recurse((Node *) restrictinfo_list,
(Node *) predicate_list);
@@ -137,7 +137,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
* OR-expr A => AND-expr B iff: A => each of B's components
* OR-expr A => OR-expr B iff: each of A's components => any of B's
*
- * An "atom" is anything other than an AND or OR node. Notice that we don't
+ * An "atom" is anything other than an AND or OR node. Notice that we don't
* have any special logic to handle NOT nodes; these should have been pushed
* down or eliminated where feasible by prepqual.c.
*
@@ -152,7 +152,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
* under the assumption that both inputs have been AND/OR flattened.
*
* A bare List node on the restriction side is interpreted as an AND clause,
- * in order to handle the top-level restriction List properly. However we
+ * in order to handle the top-level restriction List properly. However we
* need not consider a List on the predicate side since predicate_implied_by()
* already expanded it.
*
@@ -228,8 +228,8 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
if (or_clause(predicate))
{
/*
- * OR-clause => OR-clause if each of A's items implies any of
- * B's items. Messy but can't do it any more simply.
+ * OR-clause => OR-clause if each of A's items implies any of B's
+ * items. Messy but can't do it any more simply.
*/
foreach(item, ((BoolExpr *) clause)->args)
{
@@ -242,7 +242,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
break;
}
if (item2 == NULL)
- return false; /* doesn't imply any of B's */
+ return false; /* doesn't imply any of B's */
}
return true;
}
@@ -520,7 +520,7 @@ predicate_implied_by_simple_clause(Expr *predicate, Node *clause)
*
* When the predicate is of the form "foo IS NULL", we can conclude that
* the predicate is refuted if the clause is a strict operator or function
- * that has "foo" as an input. See notes for implication case.
+ * that has "foo" as an input. See notes for implication case.
*
* Finally, we may be able to deduce something using knowledge about btree
* operator classes; this is encapsulated in btree_predicate_proof().
@@ -602,28 +602,28 @@ static const StrategyNumber BT_implic_table[6][6] = {
/*
* The target operator:
*
- * LT LE EQ GE GT NE
+ * LT LE EQ GE GT NE
*/
- {BTGE, BTGE, 0 , 0 , 0 , BTGE}, /* LT */
- {BTGT, BTGE, 0 , 0 , 0 , BTGT}, /* LE */
- {BTGT, BTGE, BTEQ, BTLE, BTLT, BTNE}, /* EQ */
- {0 , 0 , 0 , BTLE, BTLT, BTLT}, /* GE */
- {0 , 0 , 0 , BTLE, BTLE, BTLE}, /* GT */
- {0 , 0 , 0 , 0 , 0 , BTEQ} /* NE */
+ {BTGE, BTGE, 0, 0, 0, BTGE}, /* LT */
+ {BTGT, BTGE, 0, 0, 0, BTGT}, /* LE */
+ {BTGT, BTGE, BTEQ, BTLE, BTLT, BTNE}, /* EQ */
+ {0, 0, 0, BTLE, BTLT, BTLT}, /* GE */
+ {0, 0, 0, BTLE, BTLE, BTLE}, /* GT */
+ {0, 0, 0, 0, 0, BTEQ} /* NE */
};
static const StrategyNumber BT_refute_table[6][6] = {
/*
* The target operator:
*
- * LT LE EQ GE GT NE
+ * LT LE EQ GE GT NE
*/
- {0 , 0 , BTGE, BTGE, BTGE, 0 }, /* LT */
- {0 , 0 , BTGT, BTGT, BTGE, 0 }, /* LE */
- {BTLE, BTLT, BTNE, BTGT, BTGE, BTEQ}, /* EQ */
- {BTLE, BTLT, BTLT, 0 , 0 , 0 }, /* GE */
- {BTLE, BTLE, BTLE, 0 , 0 , 0 }, /* GT */
- {0 , 0 , BTEQ, 0 , 0 , 0 } /* NE */
+ {0, 0, BTGE, BTGE, BTGE, 0}, /* LT */
+ {0, 0, BTGT, BTGT, BTGE, 0}, /* LE */
+ {BTLE, BTLT, BTNE, BTGT, BTGE, BTEQ}, /* EQ */
+ {BTLE, BTLT, BTLT, 0, 0, 0}, /* GE */
+ {BTLE, BTLE, BTLE, 0, 0, 0}, /* GT */
+ {0, 0, BTEQ, 0, 0, 0} /* NE */
};
@@ -683,13 +683,13 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
MemoryContext oldcontext;
/*
- * Both expressions must be binary opclauses with a
- * Const on one side, and identical subexpressions on the other sides.
- * Note we don't have to think about binary relabeling of the Const
- * node, since that would have been folded right into the Const.
+ * Both expressions must be binary opclauses with a Const on one side, and
+ * identical subexpressions on the other sides. Note we don't have to
+ * think about binary relabeling of the Const node, since that would have
+ * been folded right into the Const.
*
- * If either Const is null, we also fail right away; this assumes that
- * the test operator will always be strict.
+ * If either Const is null, we also fail right away; this assumes that the
+ * test operator will always be strict.
*/
if (!is_opclause(predicate))
return false;
@@ -738,11 +738,11 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
return false;
/*
- * Check for matching subexpressions on the non-Const sides. We used
- * to only allow a simple Var, but it's about as easy to allow any
- * expression. Remember we already know that the pred expression does
- * not contain any non-immutable functions, so identical expressions
- * should yield identical results.
+ * Check for matching subexpressions on the non-Const sides. We used to
+ * only allow a simple Var, but it's about as easy to allow any
+ * expression. Remember we already know that the pred expression does not
+ * contain any non-immutable functions, so identical expressions should
+ * yield identical results.
*/
if (!equal(pred_var, clause_var))
return false;
@@ -772,24 +772,24 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
*
* We must find a btree opclass that contains both operators, else the
* implication can't be determined. Also, the pred_op has to be of
- * default subtype (implying left and right input datatypes are the
- * same); otherwise it's unsafe to put the pred_const on the left side
- * of the test. Also, the opclass must contain a suitable test
- * operator matching the clause_const's type (which we take to mean
- * that it has the same subtype as the original clause_operator).
+ * default subtype (implying left and right input datatypes are the same);
+ * otherwise it's unsafe to put the pred_const on the left side of the
+ * test. Also, the opclass must contain a suitable test operator matching
+ * the clause_const's type (which we take to mean that it has the same
+ * subtype as the original clause_operator).
*
* If there are multiple matching opclasses, assume we can use any one to
- * determine the logical relationship of the two operators and the
- * correct corresponding test operator. This should work for any
- * logically consistent opclasses.
+ * determine the logical relationship of the two operators and the correct
+ * corresponding test operator. This should work for any logically
+ * consistent opclasses.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(pred_op),
0, 0, 0);
/*
- * If we couldn't find any opclass containing the pred_op, perhaps it
- * is a <> operator. See if it has a negator that is in an opclass.
+ * If we couldn't find any opclass containing the pred_op, perhaps it is a
+ * <> operator. See if it has a negator that is in an opclass.
*/
pred_op_negated = false;
if (catlist->n_members == 0)
@@ -800,7 +800,7 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
pred_op_negated = true;
ReleaseSysCacheList(catlist);
catlist = SearchSysCacheList(AMOPOPID, 1,
- ObjectIdGetDatum(pred_op_negator),
+ ObjectIdGetDatum(pred_op_negator),
0, 0, 0);
}
}
@@ -837,8 +837,8 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
}
/*
- * From the same opclass, find a strategy number for the
- * clause_op, if possible
+ * From the same opclass, find a strategy number for the clause_op, if
+ * possible
*/
clause_tuple = SearchSysCache(AMOPOPID,
ObjectIdGetDatum(clause_op),
@@ -857,7 +857,7 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
else if (OidIsValid(clause_op_negator))
{
clause_tuple = SearchSysCache(AMOPOPID,
- ObjectIdGetDatum(clause_op_negator),
+ ObjectIdGetDatum(clause_op_negator),
ObjectIdGetDatum(opclass_id),
0, 0);
if (HeapTupleIsValid(clause_tuple))
@@ -896,8 +896,8 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
}
/*
- * See if opclass has an operator for the test strategy and the
- * clause datatype.
+ * See if opclass has an operator for the test strategy and the clause
+ * datatype.
*/
if (test_strategy == BTNE)
{
@@ -918,9 +918,9 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
*
* Note that we require only the test_op to be immutable, not the
* original clause_op. (pred_op is assumed to have been checked
- * immutable by the caller.) Essentially we are assuming that
- * the opclass is consistent even if it contains operators that
- * are merely stable.
+ * immutable by the caller.) Essentially we are assuming that the
+ * opclass is consistent even if it contains operators that are
+ * merely stable.
*/
if (op_volatile(test_op) == PROVOLATILE_IMMUTABLE)
{
@@ -958,7 +958,7 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
/* And execute it. */
test_result = ExecEvalExprSwitchContext(test_exprstate,
- GetPerTupleExprContext(estate),
+ GetPerTupleExprContext(estate),
&isNull, NULL);
/* Get back to outer memory context */
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index e595749c291..3ca43759e96 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.71 2005/07/28 22:27:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.72 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,9 +31,9 @@ typedef struct JoinHashEntry
} JoinHashEntry;
static RelOptInfo *make_reloptinfo(PlannerInfo *root, int relid,
- RelOptKind reloptkind);
+ RelOptKind reloptkind);
static void build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
- RelOptInfo *input_rel);
+ RelOptInfo *input_rel);
static List *build_joinrel_restrictlist(PlannerInfo *root,
RelOptInfo *joinrel,
RelOptInfo *outer_rel,
@@ -165,8 +165,8 @@ make_reloptinfo(PlannerInfo *root, int relid, RelOptKind reloptkind)
/* Add the finished struct to the base_rel_array */
if (relid >= root->base_rel_array_size)
{
- int oldsize = root->base_rel_array_size;
- int newsize;
+ int oldsize = root->base_rel_array_size;
+ int newsize;
newsize = Max(oldsize * 2, relid + 1);
root->base_rel_array = (RelOptInfo **)
@@ -225,7 +225,7 @@ build_join_rel_hash(PlannerInfo *root)
hashtab = hash_create("JoinRelHashTable",
256L,
&hash_ctl,
- HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+ HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
/* Insert all the already-existing joinrels */
foreach(l, root->join_rel_list)
@@ -254,7 +254,7 @@ RelOptInfo *
find_join_rel(PlannerInfo *root, Relids relids)
{
/*
- * Switch to using hash lookup when list grows "too long". The threshold
+ * Switch to using hash lookup when list grows "too long". The threshold
* is arbitrary and is known only here.
*/
if (!root->join_rel_hash && list_length(root->join_rel_list) > 32)
@@ -263,10 +263,10 @@ find_join_rel(PlannerInfo *root, Relids relids)
/*
* Use either hashtable lookup or linear search, as appropriate.
*
- * Note: the seemingly redundant hashkey variable is used to avoid
- * taking the address of relids; unless the compiler is exceedingly
- * smart, doing so would force relids out of a register and thus
- * probably slow down the list-search case.
+ * Note: the seemingly redundant hashkey variable is used to avoid taking the
+ * address of relids; unless the compiler is exceedingly smart, doing so
+ * would force relids out of a register and thus probably slow down the
+ * list-search case.
*/
if (root->join_rel_hash)
{
@@ -331,8 +331,8 @@ build_join_rel(PlannerInfo *root,
if (joinrel)
{
/*
- * Yes, so we only need to figure the restrictlist for this
- * particular pair of component relations.
+ * Yes, so we only need to figure the restrictlist for this particular
+ * pair of component relations.
*/
if (restrictlist_ptr)
*restrictlist_ptr = build_joinrel_restrictlist(root,
@@ -375,21 +375,20 @@ build_join_rel(PlannerInfo *root,
joinrel->index_inner_paths = NIL;
/*
- * Create a new tlist containing just the vars that need to be output
- * from this join (ie, are needed for higher joinclauses or final
- * output).
+ * Create a new tlist containing just the vars that need to be output from
+ * this join (ie, are needed for higher joinclauses or final output).
*
- * NOTE: the tlist order for a join rel will depend on which pair of
- * outer and inner rels we first try to build it from. But the
- * contents should be the same regardless.
+ * NOTE: the tlist order for a join rel will depend on which pair of outer
+ * and inner rels we first try to build it from. But the contents should
+ * be the same regardless.
*/
build_joinrel_tlist(root, joinrel, outer_rel);
build_joinrel_tlist(root, joinrel, inner_rel);
/*
* Construct restrict and join clause lists for the new joinrel. (The
- * caller might or might not need the restrictlist, but I need it
- * anyway for set_joinrel_size_estimates().)
+ * caller might or might not need the restrictlist, but I need it anyway
+ * for set_joinrel_size_estimates().)
*/
restrictlist = build_joinrel_restrictlist(root,
joinrel,
@@ -407,9 +406,9 @@ build_join_rel(PlannerInfo *root,
jointype, restrictlist);
/*
- * Add the joinrel to the query's joinrel list, and store it into
- * the auxiliary hashtable if there is one. NB: GEQO requires us
- * to append the new joinrel to the end of the list!
+ * Add the joinrel to the query's joinrel list, and store it into the
+ * auxiliary hashtable if there is one. NB: GEQO requires us to append
+ * the new joinrel to the end of the list!
*/
root->join_rel_list = lappend(root->join_rel_list, joinrel);
@@ -527,18 +526,18 @@ build_joinrel_restrictlist(PlannerInfo *root,
* Collect all the clauses that syntactically belong at this level.
*/
rlist = list_concat(subbuild_joinrel_restrictlist(joinrel,
- outer_rel->joininfo),
+ outer_rel->joininfo),
subbuild_joinrel_restrictlist(joinrel,
- inner_rel->joininfo));
+ inner_rel->joininfo));
/*
* Eliminate duplicate and redundant clauses.
*
- * We must eliminate duplicates, since we will see many of the same
- * clauses arriving from both input relations. Also, if a clause is a
- * mergejoinable clause, it's possible that it is redundant with
- * previous clauses (see optimizer/README for discussion). We detect
- * that case and omit the redundant clause from the result list.
+ * We must eliminate duplicates, since we will see many of the same clauses
+ * arriving from both input relations. Also, if a clause is a
+ * mergejoinable clause, it's possible that it is redundant with previous
+ * clauses (see optimizer/README for discussion). We detect that case and
+ * omit the redundant clause from the result list.
*/
result = remove_redundant_join_clauses(root, rlist,
IS_OUTER_JOIN(jointype));
@@ -571,18 +570,17 @@ subbuild_joinrel_restrictlist(RelOptInfo *joinrel,
if (bms_is_subset(rinfo->required_relids, joinrel->relids))
{
/*
- * This clause becomes a restriction clause for the joinrel,
- * since it refers to no outside rels. We don't bother to
- * check for duplicates here --- build_joinrel_restrictlist
- * will do that.
+ * This clause becomes a restriction clause for the joinrel, since
+ * it refers to no outside rels. We don't bother to check for
+ * duplicates here --- build_joinrel_restrictlist will do that.
*/
restrictlist = lappend(restrictlist, rinfo);
}
else
{
/*
- * This clause is still a join clause at this level, so we
- * ignore it in this routine.
+ * This clause is still a join clause at this level, so we ignore
+ * it in this routine.
*/
}
}
@@ -603,17 +601,17 @@ subbuild_joinrel_joinlist(RelOptInfo *joinrel,
if (bms_is_subset(rinfo->required_relids, joinrel->relids))
{
/*
- * This clause becomes a restriction clause for the joinrel,
- * since it refers to no outside rels. So we can ignore it
- * in this routine.
+ * This clause becomes a restriction clause for the joinrel, since
+ * it refers to no outside rels. So we can ignore it in this
+ * routine.
*/
}
else
{
/*
- * This clause is still a join clause at this level, so add
- * it to the joininfo list for the joinrel, being careful to
- * eliminate duplicates. (Since RestrictInfo nodes are normally
+ * This clause is still a join clause at this level, so add it to
+ * the joininfo list for the joinrel, being careful to eliminate
+ * duplicates. (Since RestrictInfo nodes are normally
* multiply-linked rather than copied, pointer equality should be
* a sufficient test. If two equal() nodes should happen to sneak
* in, no great harm is done --- they'll be detected by
diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c
index 47b90aef46a..d277cac7351 100644
--- a/src/backend/optimizer/util/restrictinfo.c
+++ b/src/backend/optimizer/util/restrictinfo.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.40 2005/10/13 00:06:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.41 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,8 +51,8 @@ RestrictInfo *
make_restrictinfo(Expr *clause, bool is_pushed_down, Relids required_relids)
{
/*
- * If it's an OR clause, build a modified copy with RestrictInfos
- * inserted above each subclause of the top-level AND/OR structure.
+ * If it's an OR clause, build a modified copy with RestrictInfos inserted
+ * above each subclause of the top-level AND/OR structure.
*/
if (or_clause((Node *) clause))
return (RestrictInfo *) make_sub_restrictinfos(clause, is_pushed_down);
@@ -101,9 +101,9 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
/*
* There may well be redundant quals among the subplans, since a
* top-level WHERE qual might have gotten used to form several
- * different index quals. We don't try exceedingly hard to
- * eliminate redundancies, but we do eliminate obvious duplicates
- * by using list_concat_unique.
+ * different index quals. We don't try exceedingly hard to eliminate
+ * redundancies, but we do eliminate obvious duplicates by using
+ * list_concat_unique.
*/
result = NIL;
foreach(l, apath->bitmapquals)
@@ -125,7 +125,7 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
/*
* Here, we only detect qual-free subplans. A qual-free subplan would
* cause us to generate "... OR true ..." which we may as well reduce
- * to just "true". We do not try to eliminate redundant subclauses
+ * to just "true". We do not try to eliminate redundant subclauses
* because (a) it's not as likely as in the AND case, and (b) we might
* well be working with hundreds or even thousands of OR conditions,
* perhaps from a long IN list. The performance of list_append_unique
@@ -142,8 +142,8 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
{
/*
* If we find a qual-less subscan, it represents a constant
- * TRUE, and hence the OR result is also constant TRUE, so
- * we can stop here.
+ * TRUE, and hence the OR result is also constant TRUE, so we
+ * can stop here.
*/
return NIL;
}
@@ -157,8 +157,8 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
}
/*
- * Avoid generating one-element ORs, which could happen
- * due to redundancy elimination.
+ * Avoid generating one-element ORs, which could happen due to
+ * redundancy elimination.
*/
if (list_length(withris) <= 1)
result = withris;
@@ -174,20 +174,20 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
}
else if (IsA(bitmapqual, IndexPath))
{
- IndexPath *ipath = (IndexPath *) bitmapqual;
+ IndexPath *ipath = (IndexPath *) bitmapqual;
result = list_copy(ipath->indexclauses);
if (include_predicates && ipath->indexinfo->indpred != NIL)
{
foreach(l, ipath->indexinfo->indpred)
{
- Expr *pred = (Expr *) lfirst(l);
+ Expr *pred = (Expr *) lfirst(l);
/*
- * We know that the index predicate must have been implied
- * by the query condition as a whole, but it may or may not
- * be implied by the conditions that got pushed into the
- * bitmapqual. Avoid generating redundant conditions.
+ * We know that the index predicate must have been implied by
+ * the query condition as a whole, but it may or may not be
+ * implied by the conditions that got pushed into the
+ * bitmapqual. Avoid generating redundant conditions.
*/
if (!predicate_implied_by(list_make1(pred), result))
result = lappend(result,
@@ -223,8 +223,8 @@ make_restrictinfo_internal(Expr *clause, Expr *orclause,
restrictinfo->can_join = false; /* may get set below */
/*
- * If it's a binary opclause, set up left/right relids info. In any
- * case set up the total clause relids info.
+ * If it's a binary opclause, set up left/right relids info. In any case
+ * set up the total clause relids info.
*/
if (is_opclause(clause) && list_length(((OpExpr *) clause)->args) == 2)
{
@@ -232,13 +232,13 @@ make_restrictinfo_internal(Expr *clause, Expr *orclause,
restrictinfo->right_relids = pull_varnos(get_rightop(clause));
restrictinfo->clause_relids = bms_union(restrictinfo->left_relids,
- restrictinfo->right_relids);
+ restrictinfo->right_relids);
/*
* Does it look like a normal join clause, i.e., a binary operator
- * relating expressions that come from distinct relations? If so
- * we might be able to use it in a join algorithm. Note that this
- * is a purely syntactic test that is made regardless of context.
+ * relating expressions that come from distinct relations? If so we
+ * might be able to use it in a join algorithm. Note that this is a
+ * purely syntactic test that is made regardless of context.
*/
if (!bms_is_empty(restrictinfo->left_relids) &&
!bms_is_empty(restrictinfo->right_relids) &&
@@ -262,11 +262,11 @@ make_restrictinfo_internal(Expr *clause, Expr *orclause,
restrictinfo->required_relids = restrictinfo->clause_relids;
/*
- * Fill in all the cacheable fields with "not yet set" markers. None
- * of these will be computed until/unless needed. Note in particular
- * that we don't mark a binary opclause as mergejoinable or
- * hashjoinable here; that happens only if it appears in the right
- * context (top level of a joinclause list).
+ * Fill in all the cacheable fields with "not yet set" markers. None of
+ * these will be computed until/unless needed. Note in particular that we
+ * don't mark a binary opclause as mergejoinable or hashjoinable here;
+ * that happens only if it appears in the right context (top level of a
+ * joinclause list).
*/
restrictinfo->eval_cost.startup = -1;
restrictinfo->this_selec = -1;
@@ -420,17 +420,16 @@ remove_redundant_join_clauses(PlannerInfo *root, List *restrictinfo_list,
QualCost cost;
/*
- * If there are any redundant clauses, we want to eliminate the ones
- * that are more expensive in favor of the ones that are less so. Run
+ * If there are any redundant clauses, we want to eliminate the ones that
+ * are more expensive in favor of the ones that are less so. Run
* cost_qual_eval() to ensure the eval_cost fields are set up.
*/
cost_qual_eval(&cost, restrictinfo_list);
/*
- * We don't have enough knowledge yet to be able to estimate the
- * number of times a clause might be evaluated, so it's hard to weight
- * the startup and per-tuple costs appropriately. For now just weight
- * 'em the same.
+ * We don't have enough knowledge yet to be able to estimate the number of
+ * times a clause might be evaluated, so it's hard to weight the startup
+ * and per-tuple costs appropriately. For now just weight 'em the same.
*/
#define CLAUSECOST(r) ((r)->eval_cost.startup + (r)->eval_cost.per_tuple)
diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c
index 1672cda77c0..955aceeffff 100644
--- a/src/backend/optimizer/util/tlist.c
+++ b/src/backend/optimizer/util/tlist.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/tlist.c,v 1.69 2005/04/06 16:34:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/tlist.c,v 1.70 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,7 +93,7 @@ add_to_flat_tlist(List *tlist, List *vars)
{
TargetEntry *tle;
- tle = makeTargetEntry(copyObject(var), /* copy needed?? */
+ tle = makeTargetEntry(copyObject(var), /* copy needed?? */
next_resno++,
NULL,
false);
diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c
index abd01ca157a..dc1004cbd0e 100644
--- a/src/backend/optimizer/util/var.c
+++ b/src/backend/optimizer/util/var.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/var.c,v 1.65 2005/06/05 22:32:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/var.c,v 1.66 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -88,8 +88,8 @@ pull_varnos(Node *node)
context.sublevels_up = 0;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, we don't want to increment sublevels_up.
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
*/
query_or_expression_tree_walker(node,
pull_varnos_walker,
@@ -149,8 +149,8 @@ contain_var_reference(Node *node, int varno, int varattno, int levelsup)
context.sublevels_up = levelsup;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, we don't want to increment sublevels_up.
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
*/
return query_or_expression_tree_walker(node,
contain_var_reference_walker,
@@ -215,8 +215,7 @@ contain_var_clause_walker(Node *node, void *context)
if (IsA(node, Var))
{
if (((Var *) node)->varlevelsup == 0)
- return true; /* abort the tree traversal and return
- * true */
+ return true; /* abort the tree traversal and return true */
return false;
}
return expression_tree_walker(node, contain_var_clause_walker, context);
@@ -286,7 +285,7 @@ contain_vars_above_level(Node *node, int levelsup)
int sublevels_up = levelsup;
return query_or_expression_tree_walker(node,
- contain_vars_above_level_walker,
+ contain_vars_above_level_walker,
(void *) &sublevels_up,
0);
}
@@ -370,8 +369,8 @@ find_minimum_var_level_walker(Node *node,
context->min_varlevel = varlevelsup;
/*
- * As soon as we find a local variable, we can abort the
- * tree traversal, since min_varlevel is then certainly 0.
+ * As soon as we find a local variable, we can abort the tree
+ * traversal, since min_varlevel is then certainly 0.
*/
if (varlevelsup == 0)
return true;
@@ -380,10 +379,9 @@ find_minimum_var_level_walker(Node *node,
}
/*
- * An Aggref must be treated like a Var of its level. Normally we'd
- * get the same result from looking at the Vars in the aggregate's
- * argument, but this fails in the case of a Var-less aggregate call
- * (COUNT(*)).
+ * An Aggref must be treated like a Var of its level. Normally we'd get
+ * the same result from looking at the Vars in the aggregate's argument,
+ * but this fails in the case of a Var-less aggregate call (COUNT(*)).
*/
if (IsA(node, Aggref))
{
@@ -400,8 +398,8 @@ find_minimum_var_level_walker(Node *node,
context->min_varlevel = agglevelsup;
/*
- * As soon as we find a local aggregate, we can abort the
- * tree traversal, since min_varlevel is then certainly 0.
+ * As soon as we find a local aggregate, we can abort the tree
+ * traversal, since min_varlevel is then certainly 0.
*/
if (agglevelsup == 0)
return true;
@@ -553,8 +551,8 @@ flatten_join_alias_vars_mutator(Node *node,
newvar = (Node *) list_nth(rte->joinaliasvars, var->varattno - 1);
/*
- * If we are expanding an alias carried down from an upper query,
- * must adjust its varlevelsup fields.
+ * If we are expanding an alias carried down from an upper query, must
+ * adjust its varlevelsup fields.
*/
if (context->sublevels_up != 0)
{
@@ -570,8 +568,8 @@ flatten_join_alias_vars_mutator(Node *node,
InClauseInfo *ininfo;
ininfo = (InClauseInfo *) expression_tree_mutator(node,
- flatten_join_alias_vars_mutator,
- (void *) context);
+ flatten_join_alias_vars_mutator,
+ (void *) context);
/* now fix InClauseInfo's relid sets */
if (context->sublevels_up == 0)
{
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index be91872df55..46dbb3f1488 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.325 2005/10/02 23:50:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.326 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,14 +59,13 @@ typedef struct
List *indexes; /* CREATE INDEX items */
List *triggers; /* CREATE TRIGGER items */
List *grants; /* GRANT items */
- List *fwconstraints; /* Forward referencing FOREIGN KEY
- * constraints */
+ List *fwconstraints; /* Forward referencing FOREIGN KEY constraints */
List *alters; /* Generated ALTER items (from the above) */
List *ixconstraints; /* index-creating constraints */
List *blist; /* "before list" of things to do before
* creating the schema */
- List *alist; /* "after list" of things to do after
- * creating the schema */
+ List *alist; /* "after list" of things to do after creating
+ * the schema */
} CreateSchemaStmtContext;
/* State shared by transformCreateStmt and its subroutines */
@@ -83,8 +82,8 @@ typedef struct
List *ixconstraints; /* index-creating constraints */
List *blist; /* "before list" of things to do before
* creating the table */
- List *alist; /* "after list" of things to do after
- * creating the table */
+ List *alist; /* "after list" of things to do after creating
+ * the table */
IndexStmt *pkey; /* PRIMARY KEY index, if any */
} CreateStmtContext;
@@ -140,7 +139,7 @@ static void transformColumnType(ParseState *pstate, ColumnDef *column);
static void release_pstate_resources(ParseState *pstate);
static FromExpr *makeFromExpr(List *fromlist, Node *quals);
static bool check_parameter_resolution_walker(Node *node,
- check_parameter_resolution_context *context);
+ check_parameter_resolution_context *context);
/*
@@ -255,11 +254,10 @@ do_parse_analyze(Node *parseTree, ParseState *pstate)
result = list_concat(result, parse_sub_analyze(lfirst(l), pstate));
/*
- * Make sure that only the original query is marked original. We have
- * to do this explicitly since recursive calls of do_parse_analyze
- * will have marked some of the added-on queries as "original". Also
- * mark only the original query as allowed to set the command-result
- * tag.
+ * Make sure that only the original query is marked original. We have to
+ * do this explicitly since recursive calls of do_parse_analyze will have
+ * marked some of the added-on queries as "original". Also mark only the
+ * original query as allowed to set the command-result tag.
*/
foreach(l, result)
{
@@ -371,19 +369,19 @@ transformStmt(ParseState *pstate, Node *parseTree,
(SelectStmt *) parseTree);
else
result = transformSetOperationStmt(pstate,
- (SelectStmt *) parseTree);
+ (SelectStmt *) parseTree);
break;
case T_DeclareCursorStmt:
result = transformDeclareCursorStmt(pstate,
- (DeclareCursorStmt *) parseTree);
+ (DeclareCursorStmt *) parseTree);
break;
default:
/*
- * other statements don't require any transformation-- just
- * return the original parsetree, yea!
+ * other statements don't require any transformation-- just return
+ * the original parsetree, yea!
*/
result = makeNode(Query);
result->commandType = CMD_UTILITY;
@@ -396,10 +394,9 @@ transformStmt(ParseState *pstate, Node *parseTree,
result->canSetTag = true;
/*
- * Check that we did not produce too many resnos; at the very
- * least we cannot allow more than 2^16, since that would exceed
- * the range of a AttrNumber. It seems safest to use
- * MaxTupleAttributeNumber.
+ * Check that we did not produce too many resnos; at the very least we
+ * cannot allow more than 2^16, since that would exceed the range of a
+ * AttrNumber. It seems safest to use MaxTupleAttributeNumber.
*/
if (pstate->p_next_resno - 1 > MaxTupleAttributeNumber)
ereport(ERROR,
@@ -423,11 +420,11 @@ transformViewStmt(ParseState *pstate, ViewStmt *stmt,
extras_before, extras_after);
/*
- * If a list of column names was given, run through and insert these
- * into the actual query tree. - thomas 2000-03-08
+ * If a list of column names was given, run through and insert these into
+ * the actual query tree. - thomas 2000-03-08
*
- * Outer loop is over targetlist to make it easier to skip junk
- * targetlist entries.
+ * Outer loop is over targetlist to make it easier to skip junk targetlist
+ * entries.
*/
if (stmt->aliases != NIL)
{
@@ -472,17 +469,17 @@ transformDeleteStmt(ParseState *pstate, DeleteStmt *stmt)
/* set up range table with just the result rel */
qry->resultRelation = setTargetTable(pstate, stmt->relation,
- interpretInhOption(stmt->relation->inhOpt),
+ interpretInhOption(stmt->relation->inhOpt),
true,
ACL_DELETE);
qry->distinctClause = NIL;
/*
- * The USING clause is non-standard SQL syntax, and is equivalent
- * in functionality to the FROM list that can be specified for
- * UPDATE. The USING keyword is used rather than FROM because FROM
- * is already a keyword in the DELETE syntax.
+ * The USING clause is non-standard SQL syntax, and is equivalent in
+ * functionality to the FROM list that can be specified for UPDATE. The
+ * USING keyword is used rather than FROM because FROM is already a
+ * keyword in the DELETE syntax.
*/
transformFromClause(pstate, stmt->usingClause);
@@ -526,11 +523,11 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
/*
* If a non-nil rangetable/namespace was passed in, and we are doing
* INSERT/SELECT, arrange to pass the rangetable/namespace down to the
- * SELECT. This can only happen if we are inside a CREATE RULE, and
- * in that case we want the rule's OLD and NEW rtable entries to
- * appear as part of the SELECT's rtable, not as outer references for
- * it. (Kluge!) The SELECT's joinlist is not affected however. We
- * must do this before adding the target table to the INSERT's rtable.
+ * SELECT. This can only happen if we are inside a CREATE RULE, and in
+ * that case we want the rule's OLD and NEW rtable entries to appear as
+ * part of the SELECT's rtable, not as outer references for it. (Kluge!)
+ * The SELECT's joinlist is not affected however. We must do this before
+ * adding the target table to the INSERT's rtable.
*/
if (stmt->selectStmt)
{
@@ -549,10 +546,10 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
}
/*
- * Must get write lock on INSERT target table before scanning SELECT,
- * else we will grab the wrong kind of initial lock if the target
- * table is also mentioned in the SELECT part. Note that the target
- * table is not added to the joinlist or namespace.
+ * Must get write lock on INSERT target table before scanning SELECT, else
+ * we will grab the wrong kind of initial lock if the target table is also
+ * mentioned in the SELECT part. Note that the target table is not added
+ * to the joinlist or namespace.
*/
qry->resultRelation = setTargetTable(pstate, stmt->relation,
false, false, ACL_INSERT);
@@ -563,11 +560,11 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
if (stmt->selectStmt)
{
/*
- * We make the sub-pstate a child of the outer pstate so that it
- * can see any Param definitions supplied from above. Since the
- * outer pstate's rtable and namespace are presently empty, there
- * are no side-effects of exposing names the sub-SELECT shouldn't
- * be able to see.
+ * We make the sub-pstate a child of the outer pstate so that it can
+ * see any Param definitions supplied from above. Since the outer
+ * pstate's rtable and namespace are presently empty, there are no
+ * side-effects of exposing names the sub-SELECT shouldn't be able to
+ * see.
*/
ParseState *sub_pstate = make_parsestate(pstate);
RangeTblEntry *rte;
@@ -576,19 +573,18 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
/*
* Process the source SELECT.
*
- * It is important that this be handled just like a standalone
- * SELECT; otherwise the behavior of SELECT within INSERT might be
- * different from a stand-alone SELECT. (Indeed, Postgres up
- * through 6.5 had bugs of just that nature...)
+ * It is important that this be handled just like a standalone SELECT;
+ * otherwise the behavior of SELECT within INSERT might be different
+ * from a stand-alone SELECT. (Indeed, Postgres up through 6.5 had
+ * bugs of just that nature...)
*/
sub_pstate->p_rtable = sub_rtable;
sub_pstate->p_relnamespace = sub_relnamespace;
sub_pstate->p_varnamespace = sub_varnamespace;
/*
- * Note: we are not expecting that extras_before and extras_after
- * are going to be used by the transformation of the SELECT
- * statement.
+ * Note: we are not expecting that extras_before and extras_after are
+ * going to be used by the transformation of the SELECT statement.
*/
selectQuery = transformStmt(sub_pstate, stmt->selectStmt,
extras_before, extras_after);
@@ -604,8 +600,8 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
errmsg("INSERT ... SELECT may not specify INTO")));
/*
- * Make the source be a subquery in the INSERT's rangetable, and
- * add it to the INSERT's joinlist.
+ * Make the source be a subquery in the INSERT's rangetable, and add
+ * it to the INSERT's joinlist.
*/
rte = addRangeTableEntryForSubquery(pstate,
selectQuery,
@@ -640,7 +636,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
if (tle->resjunk)
continue;
if (tle->expr &&
- (IsA(tle->expr, Const) || IsA(tle->expr, Param)) &&
+ (IsA(tle->expr, Const) ||IsA(tle->expr, Param)) &&
exprType((Node *) tle->expr) == UNKNOWNOID)
expr = tle->expr;
else
@@ -659,8 +655,8 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
else
{
/*
- * For INSERT ... VALUES, transform the given list of values to
- * form a targetlist for the INSERT.
+ * For INSERT ... VALUES, transform the given list of values to form a
+ * targetlist for the INSERT.
*/
qry->targetList = transformTargetList(pstate, stmt->targetList);
}
@@ -690,7 +686,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
if (icols == NULL || attnos == NULL)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("INSERT has more expressions than target columns")));
+ errmsg("INSERT has more expressions than target columns")));
col = (ResTarget *) lfirst(icols);
Assert(IsA(col, ResTarget));
@@ -711,7 +707,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
if (stmt->cols != NIL && (icols != NULL || attnos != NULL))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("INSERT has more target columns than expressions")));
+ errmsg("INSERT has more target columns than expressions")));
/* done building the range table and jointree */
qry->rtable = pstate->p_rtable;
@@ -756,8 +752,8 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt,
cxt.hasoids = interpretOidsOption(stmt->hasoids);
/*
- * Run through each primary element in the table creation clause.
- * Separate column defs from constraints, and do preliminary analysis.
+ * Run through each primary element in the table creation clause. Separate
+ * column defs from constraints, and do preliminary analysis.
*/
foreach(elements, stmt->tableElts)
{
@@ -870,11 +866,11 @@ transformColumnDefinition(ParseState *pstate, CreateStmtContext *cxt,
*
* Although we use ChooseRelationName, it's not guaranteed that the
* selected sequence name won't conflict; given sufficiently long
- * field names, two different serial columns in the same table
- * could be assigned the same sequence name, and we'd not notice
- * since we aren't creating the sequence quite yet. In practice
- * this seems quite unlikely to be a problem, especially since few
- * people would need two serial columns in one table.
+ * field names, two different serial columns in the same table could
+ * be assigned the same sequence name, and we'd not notice since we
+ * aren't creating the sequence quite yet. In practice this seems
+ * quite unlikely to be a problem, especially since few people would
+ * need two serial columns in one table.
*/
snamespaceid = RangeVarGetCreationNamespace(cxt->relation);
snamespace = get_namespace_name(snamespaceid);
@@ -889,9 +885,9 @@ transformColumnDefinition(ParseState *pstate, CreateStmtContext *cxt,
cxt->relation->relname, column->colname)));
/*
- * Build a CREATE SEQUENCE command to create the sequence object,
- * and add it to the list of things to be done before this
- * CREATE/ALTER TABLE.
+ * Build a CREATE SEQUENCE command to create the sequence object, and
+ * add it to the list of things to be done before this CREATE/ALTER
+ * TABLE.
*/
seqstmt = makeNode(CreateSeqStmt);
seqstmt->sequence = makeRangeVar(snamespace, sname);
@@ -907,14 +903,13 @@ transformColumnDefinition(ParseState *pstate, CreateStmtContext *cxt,
/*
* Create appropriate constraints for SERIAL. We do this in full,
- * rather than shortcutting, so that we will detect any
- * conflicting constraints the user wrote (like a different
- * DEFAULT).
+ * rather than shortcutting, so that we will detect any conflicting
+ * constraints the user wrote (like a different DEFAULT).
*
* Create an expression tree representing the function call
- * nextval('sequencename'). We cannot reduce the raw tree
- * to cooked form until after the sequence is created, but
- * there's no need to do so.
+ * nextval('sequencename'). We cannot reduce the raw tree to cooked
+ * form until after the sequence is created, but there's no need to do
+ * so.
*/
qstring = quote_qualified_identifier(snamespace, sname);
snamenode = makeNode(A_Const);
@@ -949,9 +944,9 @@ transformColumnDefinition(ParseState *pstate, CreateStmtContext *cxt,
constraint = lfirst(clist);
/*
- * If this column constraint is a FOREIGN KEY constraint, then we
- * fill in the current attribute's name and throw it into the list
- * of FK constraints to be processed later.
+ * If this column constraint is a FOREIGN KEY constraint, then we fill
+ * in the current attribute's name and throw it into the list of FK
+ * constraints to be processed later.
*/
if (IsA(constraint, FkConstraint))
{
@@ -971,7 +966,7 @@ transformColumnDefinition(ParseState *pstate, CreateStmtContext *cxt,
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("conflicting NULL/NOT NULL declarations for column \"%s\" of table \"%s\"",
- column->colname, cxt->relation->relname)));
+ column->colname, cxt->relation->relname)));
column->is_not_null = FALSE;
saw_nullable = true;
break;
@@ -981,7 +976,7 @@ transformColumnDefinition(ParseState *pstate, CreateStmtContext *cxt,
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("conflicting NULL/NOT NULL declarations for column \"%s\" of table \"%s\"",
- column->colname, cxt->relation->relname)));
+ column->colname, cxt->relation->relname)));
column->is_not_null = TRUE;
saw_nullable = true;
break;
@@ -991,7 +986,7 @@ transformColumnDefinition(ParseState *pstate, CreateStmtContext *cxt,
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("multiple default values specified for column \"%s\" of table \"%s\"",
- column->colname, cxt->relation->relname)));
+ column->colname, cxt->relation->relname)));
column->raw_default = constraint->raw_expr;
Assert(constraint->cooked_expr == NULL);
break;
@@ -1113,8 +1108,8 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
/*
* Create a new inherited column.
*
- * For constraints, ONLY the NOT NULL constraint is inherited by the
- * new column definition per SQL99.
+ * For constraints, ONLY the NOT NULL constraint is inherited by the new
+ * column definition per SQL99.
*/
def = makeNode(ColumnDef);
def->colname = pstrdup(attributeName);
@@ -1158,8 +1153,8 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
Assert(this_default != NULL);
/*
- * If default expr could contain any vars, we'd need to fix
- * 'em, but it can't; so default is ready to apply to child.
+ * If default expr could contain any vars, we'd need to fix 'em,
+ * but it can't; so default is ready to apply to child.
*/
def->cooked_default = pstrdup(this_default);
@@ -1168,8 +1163,8 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
- * commit. That will prevent someone else from deleting or ALTERing
- * the parent before the child is committed.
+ * commit. That will prevent someone else from deleting or ALTERing the
+ * parent before the child is committed.
*/
heap_close(relation, NoLock);
}
@@ -1183,10 +1178,9 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
ListCell *l;
/*
- * Run through the constraints that need to generate an index. For
- * PRIMARY KEY, mark each column as NOT NULL and create an index. For
- * UNIQUE, create an index as for PRIMARY KEY, but do not insist on
- * NOT NULL.
+ * Run through the constraints that need to generate an index. For PRIMARY
+ * KEY, mark each column as NOT NULL and create an index. For UNIQUE,
+ * create an index as for PRIMARY KEY, but do not insist on NOT NULL.
*/
foreach(listptr, cxt->ixconstraints)
{
@@ -1212,8 +1206,8 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
cxt->pkey = index;
/*
- * In ALTER TABLE case, a primary index might already exist,
- * but DefineIndex will check for it.
+ * In ALTER TABLE case, a primary index might already exist, but
+ * DefineIndex will check for it.
*/
}
index->isconstraint = true;
@@ -1230,10 +1224,10 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
index->whereClause = NULL;
/*
- * Make sure referenced keys exist. If we are making a PRIMARY
- * KEY index, also make sure they are NOT NULL, if possible.
- * (Although we could leave it to DefineIndex to mark the columns
- * NOT NULL, it's more efficient to get it right the first time.)
+ * Make sure referenced keys exist. If we are making a PRIMARY KEY
+ * index, also make sure they are NOT NULL, if possible. (Although we
+ * could leave it to DefineIndex to mark the columns NOT NULL, it's
+ * more efficient to get it right the first time.)
*/
foreach(keys, constraint->keys)
{
@@ -1261,9 +1255,9 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
else if (SystemAttributeByName(key, cxt->hasoids) != NULL)
{
/*
- * column will be a system column in the new table, so
- * accept it. System columns can't ever be null, so no
- * need to worry about PRIMARY/NOT NULL constraint.
+ * column will be a system column in the new table, so accept
+ * it. System columns can't ever be null, so no need to worry
+ * about PRIMARY/NOT NULL constraint.
*/
found = true;
}
@@ -1283,8 +1277,8 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
if (rel->rd_rel->relkind != RELKIND_RELATION)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("inherited relation \"%s\" is not a table",
- inh->relname)));
+ errmsg("inherited relation \"%s\" is not a table",
+ inh->relname)));
for (count = 0; count < rel->rd_att->natts; count++)
{
Form_pg_attribute inhattr = rel->rd_att->attrs[count];
@@ -1298,10 +1292,9 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
/*
* We currently have no easy way to force an
- * inherited column to be NOT NULL at
- * creation, if its parent wasn't so already.
- * We leave it to DefineIndex to fix things up
- * in this case.
+ * inherited column to be NOT NULL at creation, if
+ * its parent wasn't so already. We leave it to
+ * DefineIndex to fix things up in this case.
*/
break;
}
@@ -1313,16 +1306,16 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
}
/*
- * In the ALTER TABLE case, don't complain about index keys
- * not created in the command; they may well exist already.
- * DefineIndex will complain about them if not, and will also
- * take care of marking them NOT NULL.
+ * In the ALTER TABLE case, don't complain about index keys not
+ * created in the command; they may well exist already.
+ * DefineIndex will complain about them if not, and will also take
+ * care of marking them NOT NULL.
*/
if (!found && !cxt->isalter)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" named in key does not exist",
- key)));
+ errmsg("column \"%s\" named in key does not exist",
+ key)));
/* Check for PRIMARY KEY(foo, foo) */
foreach(columns, index->indexParams)
@@ -1355,14 +1348,13 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
}
/*
- * Scan the index list and remove any redundant index specifications.
- * This can happen if, for instance, the user writes UNIQUE PRIMARY
- * KEY. A strict reading of SQL92 would suggest raising an error
- * instead, but that strikes me as too anal-retentive. - tgl
- * 2001-02-14
+ * Scan the index list and remove any redundant index specifications. This
+ * can happen if, for instance, the user writes UNIQUE PRIMARY KEY. A
+ * strict reading of SQL92 would suggest raising an error instead, but
+ * that strikes me as too anal-retentive. - tgl 2001-02-14
*
- * XXX in ALTER TABLE case, it'd be nice to look for duplicate
- * pre-existing indexes, too.
+ * XXX in ALTER TABLE case, it'd be nice to look for duplicate pre-existing
+ * indexes, too.
*/
cxt->alist = NIL;
if (cxt->pkey != NULL)
@@ -1430,10 +1422,10 @@ transformFKConstraints(ParseState *pstate, CreateStmtContext *cxt,
}
/*
- * For CREATE TABLE or ALTER TABLE ADD COLUMN, gin up an ALTER TABLE
- * ADD CONSTRAINT command to execute after the basic command is
- * complete. (If called from ADD CONSTRAINT, that routine will add the
- * FK constraints to its own subcommand list.)
+ * For CREATE TABLE or ALTER TABLE ADD COLUMN, gin up an ALTER TABLE ADD
+ * CONSTRAINT command to execute after the basic command is complete. (If
+ * called from ADD CONSTRAINT, that routine will add the FK constraints to
+ * its own subcommand list.)
*
* Note: the ADD CONSTRAINT command must also execute after any index
* creation commands. Thus, this should run after
@@ -1481,11 +1473,11 @@ transformIndexStmt(ParseState *pstate, IndexStmt *stmt)
if (stmt->whereClause)
{
/*
- * Put the parent table into the rtable so that the WHERE clause
- * can refer to its fields without qualification. Note that this
- * only works if the parent table already exists --- so we can't
- * easily support predicates on indexes created implicitly by
- * CREATE TABLE. Fortunately, that's not necessary.
+ * Put the parent table into the rtable so that the WHERE clause can
+ * refer to its fields without qualification. Note that this only
+ * works if the parent table already exists --- so we can't easily
+ * support predicates on indexes created implicitly by CREATE TABLE.
+ * Fortunately, that's not necessary.
*/
rte = addRangeTableEntry(pstate, stmt->relation, NULL, false, true);
@@ -1514,14 +1506,14 @@ transformIndexStmt(ParseState *pstate, IndexStmt *stmt)
ielem->expr = transformExpr(pstate, ielem->expr);
/*
- * We check only that the result type is legitimate; this is
- * for consistency with what transformWhereClause() checks for
- * the predicate. DefineIndex() will make more checks.
+ * We check only that the result type is legitimate; this is for
+ * consistency with what transformWhereClause() checks for the
+ * predicate. DefineIndex() will make more checks.
*/
if (expression_returns_set(ielem->expr))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("index expression may not return a set")));
+ errmsg("index expression may not return a set")));
}
}
@@ -1560,9 +1552,9 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt,
rel = heap_openrv(stmt->relation, AccessExclusiveLock);
/*
- * NOTE: 'OLD' must always have a varno equal to 1 and 'NEW' equal to
- * 2. Set up their RTEs in the main pstate for use in parsing the
- * rule qualification.
+ * NOTE: 'OLD' must always have a varno equal to 1 and 'NEW' equal to 2.
+ * Set up their RTEs in the main pstate for use in parsing the rule
+ * qualification.
*/
Assert(pstate->p_rtable == NIL);
oldrte = addRangeTableEntryForRelation(pstate, rel,
@@ -1576,11 +1568,11 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt,
newrte->requiredPerms = 0;
/*
- * They must be in the namespace too for lookup purposes, but only add
- * the one(s) that are relevant for the current kind of rule. In an
- * UPDATE rule, quals must refer to OLD.field or NEW.field to be
- * unambiguous, but there's no need to be so picky for INSERT &
- * DELETE. We do not add them to the joinlist.
+ * They must be in the namespace too for lookup purposes, but only add the
+ * one(s) that are relevant for the current kind of rule. In an UPDATE
+ * rule, quals must refer to OLD.field or NEW.field to be unambiguous, but
+ * there's no need to be so picky for INSERT & DELETE. We do not add them
+ * to the joinlist.
*/
switch (stmt->event)
{
@@ -1616,17 +1608,16 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt,
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("rule WHERE condition may not contain aggregate functions")));
+ errmsg("rule WHERE condition may not contain aggregate functions")));
/* save info about sublinks in where clause */
qry->hasSubLinks = pstate->p_hasSubLinks;
/*
- * 'instead nothing' rules with a qualification need a query
- * rangetable so the rewrite handler can add the negated rule
- * qualification to the original query. We create a query with the new
- * command type CMD_NOTHING here that is treated specially by the
- * rewrite system.
+ * 'instead nothing' rules with a qualification need a query rangetable so
+ * the rewrite handler can add the negated rule qualification to the
+ * original query. We create a query with the new command type CMD_NOTHING
+ * here that is treated specially by the rewrite system.
*/
if (stmt->actions == NIL)
{
@@ -1656,11 +1647,11 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt,
has_new;
/*
- * Set up OLD/NEW in the rtable for this statement. The
- * entries are added only to relnamespace, not varnamespace,
- * because we don't want them to be referred to by unqualified
- * field names nor "*" in the rule actions. We decide later
- * whether to put them in the joinlist.
+ * Set up OLD/NEW in the rtable for this statement. The entries
+ * are added only to relnamespace, not varnamespace, because we
+ * don't want them to be referred to by unqualified field names
+ * nor "*" in the rule actions. We decide later whether to put
+ * them in the joinlist.
*/
oldrte = addRangeTableEntryForRelation(sub_pstate, rel,
makeAlias("*OLD*", NIL),
@@ -1678,9 +1669,9 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt,
extras_before, extras_after);
/*
- * We cannot support utility-statement actions (eg NOTIFY)
- * with nonempty rule WHERE conditions, because there's no way
- * to make the utility action execute conditionally.
+ * We cannot support utility-statement actions (eg NOTIFY) with
+ * nonempty rule WHERE conditions, because there's no way to make
+ * the utility action execute conditionally.
*/
if (top_subqry->commandType == CMD_UTILITY &&
stmt->whereClause != NULL)
@@ -1689,18 +1680,17 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt,
errmsg("rules with WHERE conditions may only have SELECT, INSERT, UPDATE, or DELETE actions")));
/*
- * If the action is INSERT...SELECT, OLD/NEW have been pushed
- * down into the SELECT, and that's what we need to look at.
- * (Ugly kluge ... try to fix this when we redesign
- * querytrees.)
+ * If the action is INSERT...SELECT, OLD/NEW have been pushed down
+ * into the SELECT, and that's what we need to look at. (Ugly
+ * kluge ... try to fix this when we redesign querytrees.)
*/
sub_qry = getInsertSelectQuery(top_subqry, NULL);
/*
- * If the sub_qry is a setop, we cannot attach any
- * qualifications to it, because the planner won't notice
- * them. This could perhaps be relaxed someday, but for now,
- * we may as well reject such a rule immediately.
+ * If the sub_qry is a setop, we cannot attach any qualifications
+ * to it, because the planner won't notice them. This could
+ * perhaps be relaxed someday, but for now, we may as well reject
+ * such a rule immediately.
*/
if (sub_qry->setOperations != NULL && stmt->whereClause != NULL)
ereport(ERROR,
@@ -1722,12 +1712,12 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt,
case CMD_SELECT:
if (has_old)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("ON SELECT rule may not use OLD")));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("ON SELECT rule may not use OLD")));
if (has_new)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("ON SELECT rule may not use NEW")));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("ON SELECT rule may not use NEW")));
break;
case CMD_UPDATE:
/* both are OK */
@@ -1735,14 +1725,14 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt,
case CMD_INSERT:
if (has_old)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("ON INSERT rule may not use OLD")));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("ON INSERT rule may not use OLD")));
break;
case CMD_DELETE:
if (has_new)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("ON DELETE rule may not use NEW")));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("ON DELETE rule may not use NEW")));
break;
default:
elog(ERROR, "unrecognized event type: %d",
@@ -1751,28 +1741,26 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt,
}
/*
- * For efficiency's sake, add OLD to the rule action's
- * jointree only if it was actually referenced in the
- * statement or qual.
+ * For efficiency's sake, add OLD to the rule action's jointree
+ * only if it was actually referenced in the statement or qual.
*
- * For INSERT, NEW is not really a relation (only a reference to
- * the to-be-inserted tuple) and should never be added to the
+ * For INSERT, NEW is not really a relation (only a reference to the
+ * to-be-inserted tuple) and should never be added to the
* jointree.
*
* For UPDATE, we treat NEW as being another kind of reference to
- * OLD, because it represents references to *transformed*
- * tuples of the existing relation. It would be wrong to
- * enter NEW separately in the jointree, since that would
- * cause a double join of the updated relation. It's also
- * wrong to fail to make a jointree entry if only NEW and not
- * OLD is mentioned.
+ * OLD, because it represents references to *transformed* tuples
+ * of the existing relation. It would be wrong to enter NEW
+ * separately in the jointree, since that would cause a double
+ * join of the updated relation. It's also wrong to fail to make
+ * a jointree entry if only NEW and not OLD is mentioned.
*/
if (has_old || (has_new && stmt->event == CMD_UPDATE))
{
/*
- * If sub_qry is a setop, manipulating its jointree will
- * do no good at all, because the jointree is dummy. (This
- * should be a can't-happen case because of prior tests.)
+ * If sub_qry is a setop, manipulating its jointree will do no
+ * good at all, because the jointree is dummy. (This should be
+ * a can't-happen case because of prior tests.)
*/
if (sub_qry->setOperations != NULL)
ereport(ERROR,
@@ -1919,8 +1907,8 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
qry->commandType = CMD_SELECT;
/*
- * Find leftmost leaf SelectStmt; extract the one-time-only items from
- * it and from the top-level node.
+ * Find leftmost leaf SelectStmt; extract the one-time-only items from it
+ * and from the top-level node.
*/
leftmostSelect = stmt->larg;
while (leftmostSelect && leftmostSelect->op != SETOP_NONE)
@@ -1935,9 +1923,9 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
leftmostSelect->intoColNames = NIL;
/*
- * These are not one-time, exactly, but we want to process them here
- * and not let transformSetOperationTree() see them --- else it'll
- * just recurse right back here!
+ * These are not one-time, exactly, but we want to process them here and
+ * not let transformSetOperationTree() see them --- else it'll just
+ * recurse right back here!
*/
sortClause = stmt->sortClause;
limitOffset = stmt->limitOffset;
@@ -1976,13 +1964,13 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
/*
* Generate dummy targetlist for outer query using column names of
* leftmost select and common datatypes of topmost set operation. Also
- * make lists of the dummy vars and their names for use in parsing
- * ORDER BY.
+ * make lists of the dummy vars and their names for use in parsing ORDER
+ * BY.
*
- * Note: we use leftmostRTI as the varno of the dummy variables. It
- * shouldn't matter too much which RT index they have, as long as they
- * have one that corresponds to a real RT entry; else funny things may
- * happen when the tree is mashed by rule rewriting.
+ * Note: we use leftmostRTI as the varno of the dummy variables. It shouldn't
+ * matter too much which RT index they have, as long as they have one that
+ * corresponds to a real RT entry; else funny things may happen when the
+ * tree is mashed by rule rewriting.
*/
qry->targetList = NIL;
targetvars = NIL;
@@ -2017,9 +2005,9 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
/*
* Handle SELECT INTO/CREATE TABLE AS.
*
- * Any column names from CREATE TABLE AS need to be attached to both the
- * top level and the leftmost subquery. We do not do this earlier
- * because we do *not* want the targetnames list to be affected.
+ * Any column names from CREATE TABLE AS need to be attached to both the top
+ * level and the leftmost subquery. We do not do this earlier because we
+ * do *not* want the targetnames list to be affected.
*/
qry->into = into;
if (intoColNames)
@@ -2029,15 +2017,14 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
}
/*
- * As a first step towards supporting sort clauses that are
- * expressions using the output columns, generate a varnamespace entry
- * that makes the output columns visible. A Join RTE node is handy
- * for this, since we can easily control the Vars generated upon
- * matches.
+ * As a first step towards supporting sort clauses that are expressions
+ * using the output columns, generate a varnamespace entry that makes the
+ * output columns visible. A Join RTE node is handy for this, since we
+ * can easily control the Vars generated upon matches.
*
- * Note: we don't yet do anything useful with such cases, but at least
- * "ORDER BY upper(foo)" will draw the right error message rather than
- * "foo not found".
+ * Note: we don't yet do anything useful with such cases, but at least "ORDER
+ * BY upper(foo)" will draw the right error message rather than "foo not
+ * found".
*/
jrte = addRangeTableEntryForJoin(NULL,
targetnames,
@@ -2050,7 +2037,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
pstate->p_rtable = list_make1(jrte);
sv_relnamespace = pstate->p_relnamespace;
- pstate->p_relnamespace = NIL; /* no qualified names allowed */
+ pstate->p_relnamespace = NIL; /* no qualified names allowed */
sv_varnamespace = pstate->p_varnamespace;
pstate->p_varnamespace = list_make1(jrte);
@@ -2058,15 +2045,15 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
/*
* For now, we don't support resjunk sort clauses on the output of a
* setOperation tree --- you can only use the SQL92-spec options of
- * selecting an output column by name or number. Enforce by checking
- * that transformSortClause doesn't add any items to tlist.
+ * selecting an output column by name or number. Enforce by checking that
+ * transformSortClause doesn't add any items to tlist.
*/
tllen = list_length(qry->targetList);
qry->sortClause = transformSortClause(pstate,
sortClause,
&qry->targetList,
- false /* no unknowns expected */ );
+ false /* no unknowns expected */ );
pstate->p_rtable = sv_rtable;
pstate->p_relnamespace = sv_relnamespace;
@@ -2122,9 +2109,9 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt)
/*
* If an internal node of a set-op tree has ORDER BY, UPDATE, or LIMIT
- * clauses attached, we need to treat it like a leaf node to generate
- * an independent sub-Query tree. Otherwise, it can be represented by
- * a SetOperationStmt node underneath the parent Query.
+ * clauses attached, we need to treat it like a leaf node to generate an
+ * independent sub-Query tree. Otherwise, it can be represented by a
+ * SetOperationStmt node underneath the parent Query.
*/
if (stmt->op == SETOP_NONE)
{
@@ -2153,9 +2140,9 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt)
/*
* Transform SelectStmt into a Query.
*
- * Note: previously transformed sub-queries don't affect the parsing
- * of this sub-query, because they are not in the toplevel
- * pstate's namespace list.
+ * Note: previously transformed sub-queries don't affect the parsing of
+ * this sub-query, because they are not in the toplevel pstate's
+ * namespace list.
*/
selectList = parse_sub_analyze((Node *) stmt, pstate);
@@ -2164,10 +2151,10 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt)
Assert(IsA(selectQuery, Query));
/*
- * Check for bogus references to Vars on the current query level
- * (but upper-level references are okay). Normally this can't
- * happen because the namespace will be empty, but it could happen
- * if we are inside a rule.
+ * Check for bogus references to Vars on the current query level (but
+ * upper-level references are okay). Normally this can't happen
+ * because the namespace will be empty, but it could happen if we are
+ * inside a rule.
*/
if (pstate->p_relnamespace || pstate->p_varnamespace)
{
@@ -2188,8 +2175,7 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt)
false);
/*
- * Return a RangeTblRef to replace the SelectStmt in the set-op
- * tree.
+ * Return a RangeTblRef to replace the SelectStmt in the set-op tree.
*/
rtr = makeNode(RangeTblRef);
/* assume new rte is at end */
@@ -2229,8 +2215,8 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt)
if (list_length(lcoltypes) != list_length(rcoltypes))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("each %s query must have the same number of columns",
- context)));
+ errmsg("each %s query must have the same number of columns",
+ context)));
op->colTypes = NIL;
forboth(l, lcoltypes, r, rcoltypes)
@@ -2300,7 +2286,7 @@ applyColumnNames(List *dst, List *src)
if (list_length(src) > list_length(dst))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("CREATE TABLE AS specifies too many column names")));
+ errmsg("CREATE TABLE AS specifies too many column names")));
forboth(dst_item, dst, src_item, src)
{
@@ -2329,13 +2315,13 @@ transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt)
pstate->p_is_update = true;
qry->resultRelation = setTargetTable(pstate, stmt->relation,
- interpretInhOption(stmt->relation->inhOpt),
+ interpretInhOption(stmt->relation->inhOpt),
true,
ACL_UPDATE);
/*
- * the FROM clause is non-standard SQL syntax. We used to be able to
- * do this with REPLACE in POSTQUEL so we keep the feature.
+ * the FROM clause is non-standard SQL syntax. We used to be able to do
+ * this with REPLACE in POSTQUEL so we keep the feature.
*/
transformFromClause(pstate, stmt->fromClause);
@@ -2371,10 +2357,10 @@ transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt)
if (tle->resjunk)
{
/*
- * Resjunk nodes need no additional processing, but be sure
- * they have resnos that do not match any target columns; else
- * rewriter or planner might get confused. They don't need a
- * resname either.
+ * Resjunk nodes need no additional processing, but be sure they
+ * have resnos that do not match any target columns; else rewriter
+ * or planner might get confused. They don't need a resname
+ * either.
*/
tle->resno = (AttrNumber) pstate->p_next_resno++;
tle->resname = NULL;
@@ -2428,9 +2414,9 @@ transformAlterTableStmt(ParseState *pstate, AlterTableStmt *stmt,
cxt.pkey = NULL;
/*
- * The only subtypes that currently require parse transformation
- * handling are ADD COLUMN and ADD CONSTRAINT. These largely re-use
- * code from CREATE TABLE.
+ * The only subtypes that currently require parse transformation handling
+ * are ADD COLUMN and ADD CONSTRAINT. These largely re-use code from
+ * CREATE TABLE.
*/
foreach(lcmd, stmt->cmds)
{
@@ -2472,8 +2458,8 @@ transformAlterTableStmt(ParseState *pstate, AlterTableStmt *stmt,
}
/*
- * All constraints are processed in other ways. Remove
- * the original list
+ * All constraints are processed in other ways. Remove the
+ * original list
*/
def->constraints = NIL;
@@ -2482,8 +2468,7 @@ transformAlterTableStmt(ParseState *pstate, AlterTableStmt *stmt,
case AT_AddConstraint:
/*
- * The original AddConstraint cmd node doesn't go to
- * newcmds
+ * The original AddConstraint cmd node doesn't go to newcmds
*/
if (IsA(cmd->def, Constraint))
@@ -2502,8 +2487,8 @@ transformAlterTableStmt(ParseState *pstate, AlterTableStmt *stmt,
case AT_ProcessedConstraint:
/*
- * Already-transformed ADD CONSTRAINT, so just make it
- * look like the standard case.
+ * Already-transformed ADD CONSTRAINT, so just make it look
+ * like the standard case.
*/
cmd->subtype = AT_AddConstraint;
newcmds = lappend(newcmds, cmd);
@@ -2521,8 +2506,8 @@ transformAlterTableStmt(ParseState *pstate, AlterTableStmt *stmt,
transformFKConstraints(pstate, &cxt, skipValidation, true);
/*
- * Push any index-creation commands into the ALTER, so that they can
- * be scheduled nicely by tablecmds.c.
+ * Push any index-creation commands into the ALTER, so that they can be
+ * scheduled nicely by tablecmds.c.
*/
foreach(l, cxt.alist)
{
@@ -2669,8 +2654,8 @@ transformExecuteStmt(ParseState *pstate, ExecuteStmt *stmt)
if (nparams != nexpected)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("wrong number of parameters for prepared statement \"%s\"",
- stmt->name),
+ errmsg("wrong number of parameters for prepared statement \"%s\"",
+ stmt->name),
errdetail("Expected %d parameters but got %d.",
nexpected, nparams)));
@@ -2686,7 +2671,7 @@ transformExecuteStmt(ParseState *pstate, ExecuteStmt *stmt)
if (pstate->p_hasSubLinks)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use subquery in EXECUTE parameter")));
+ errmsg("cannot use subquery in EXECUTE parameter")));
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
@@ -2706,7 +2691,7 @@ transformExecuteStmt(ParseState *pstate, ExecuteStmt *stmt)
i,
format_type_be(given_type_id),
format_type_be(expected_type_id)),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
lfirst(l) = expr;
i++;
@@ -2730,28 +2715,28 @@ CheckSelectLocking(Query *qry, bool forUpdate)
if (qry->setOperations)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL command, like SELECT FOR UPDATE */
- errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT", operation)));
+ /* translator: %s is a SQL command, like SELECT FOR UPDATE */
+ errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT", operation)));
if (qry->distinctClause != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL command, like SELECT FOR UPDATE */
- errmsg("%s is not allowed with DISTINCT clause", operation)));
+ /* translator: %s is a SQL command, like SELECT FOR UPDATE */
+ errmsg("%s is not allowed with DISTINCT clause", operation)));
if (qry->groupClause != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL command, like SELECT FOR UPDATE */
- errmsg("%s is not allowed with GROUP BY clause", operation)));
+ /* translator: %s is a SQL command, like SELECT FOR UPDATE */
+ errmsg("%s is not allowed with GROUP BY clause", operation)));
if (qry->havingQual != NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL command, like SELECT FOR UPDATE */
+ /* translator: %s is a SQL command, like SELECT FOR UPDATE */
errmsg("%s is not allowed with HAVING clause", operation)));
if (qry->hasAggs)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL command, like SELECT FOR UPDATE */
- errmsg("%s is not allowed with aggregate functions", operation)));
+ /* translator: %s is a SQL command, like SELECT FOR UPDATE */
+ errmsg("%s is not allowed with aggregate functions", operation)));
}
/*
@@ -2775,7 +2760,7 @@ transformLockingClause(Query *qry, LockingClause *lc)
if (lc->forUpdate != qry->forUpdate)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use both FOR UPDATE and FOR SHARE in one query")));
+ errmsg("cannot use both FOR UPDATE and FOR SHARE in one query")));
if (lc->nowait != qry->rowNoWait)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -2788,7 +2773,7 @@ transformLockingClause(Query *qry, LockingClause *lc)
/* make a clause we can pass down to subqueries to select all rels */
allrels = makeNode(LockingClause);
- allrels->lockedRels = NIL; /* indicates all rels */
+ allrels->lockedRels = NIL; /* indicates all rels */
allrels->forUpdate = lc->forUpdate;
allrels->nowait = lc->nowait;
@@ -2813,8 +2798,8 @@ transformLockingClause(Query *qry, LockingClause *lc)
case RTE_SUBQUERY:
/*
- * FOR UPDATE/SHARE of subquery is propagated to all
- * of subquery's rels
+ * FOR UPDATE/SHARE of subquery is propagated to all of
+ * subquery's rels
*/
transformLockingClause(rte->subquery, allrels);
break;
@@ -2856,18 +2841,18 @@ transformLockingClause(Query *qry, LockingClause *lc)
break;
case RTE_JOIN:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE/SHARE cannot be applied to a join")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SELECT FOR UPDATE/SHARE cannot be applied to a join")));
break;
case RTE_SPECIAL:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE/SHARE cannot be applied to NEW or OLD")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SELECT FOR UPDATE/SHARE cannot be applied to NEW or OLD")));
break;
case RTE_FUNCTION:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE/SHARE cannot be applied to a function")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SELECT FOR UPDATE/SHARE cannot be applied to a function")));
break;
default:
elog(ERROR, "unrecognized RTE type: %d",
@@ -2940,7 +2925,7 @@ transformConstraintAttrs(List *constraintList)
!IsA(lastprimarynode, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("misplaced NOT DEFERRABLE clause")));
+ errmsg("misplaced NOT DEFERRABLE clause")));
if (saw_deferrability)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
@@ -2958,7 +2943,7 @@ transformConstraintAttrs(List *constraintList)
!IsA(lastprimarynode, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("misplaced INITIALLY DEFERRED clause")));
+ errmsg("misplaced INITIALLY DEFERRED clause")));
if (saw_initially)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
@@ -2967,8 +2952,7 @@ transformConstraintAttrs(List *constraintList)
((FkConstraint *) lastprimarynode)->initdeferred = true;
/*
- * If only INITIALLY DEFERRED appears, assume
- * DEFERRABLE
+ * If only INITIALLY DEFERRED appears, assume DEFERRABLE
*/
if (!saw_deferrability)
((FkConstraint *) lastprimarynode)->deferrable = true;
@@ -2982,7 +2966,7 @@ transformConstraintAttrs(List *constraintList)
!IsA(lastprimarynode, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("misplaced INITIALLY IMMEDIATE clause")));
+ errmsg("misplaced INITIALLY IMMEDIATE clause")));
if (saw_initially)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
@@ -3082,8 +3066,8 @@ analyzeCreateSchemaStmt(CreateSchemaStmt *stmt)
cxt.alist = NIL;
/*
- * Run through each schema element in the schema element list.
- * Separate statements by type, and do preliminary analysis.
+ * Run through each schema element in the schema element list. Separate
+ * statements by type, and do preliminary analysis.
*/
foreach(elements, stmt->schemaElts)
{
@@ -3173,7 +3157,7 @@ analyzeCreateSchemaStmt(CreateSchemaStmt *stmt)
*/
static bool
check_parameter_resolution_walker(Node *node,
- check_parameter_resolution_context *context)
+ check_parameter_resolution_context *context)
{
if (node == NULL)
return false;
@@ -3194,8 +3178,8 @@ check_parameter_resolution_walker(Node *node,
if (param->paramtype != context->paramTypes[paramno - 1])
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_PARAMETER),
- errmsg("could not determine data type of parameter $%d",
- paramno)));
+ errmsg("could not determine data type of parameter $%d",
+ paramno)));
}
return false;
}
diff --git a/src/backend/parser/keywords.c b/src/backend/parser/keywords.c
index 6733d1b1240..f80b655280b 100644
--- a/src/backend/parser/keywords.c
+++ b/src/backend/parser/keywords.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/keywords.c,v 1.165 2005/08/23 22:40:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/keywords.c,v 1.166 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -393,8 +393,8 @@ ScanKeywordLookup(const char *text)
return NULL;
/*
- * Apply an ASCII-only downcasing. We must not use tolower() since it
- * may produce the wrong translation in some locales (eg, Turkish).
+ * Apply an ASCII-only downcasing. We must not use tolower() since it may
+ * produce the wrong translation in some locales (eg, Turkish).
*/
for (i = 0; i < len; i++)
{
diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c
index 799bacd233e..743442895a5 100644
--- a/src/backend/parser/parse_agg.c
+++ b/src/backend/parser/parse_agg.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_agg.c,v 1.69 2005/06/05 22:32:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_agg.c,v 1.70 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -56,22 +56,22 @@ transformAggregateCall(ParseState *pstate, Aggref *agg)
/*
* The aggregate's level is the same as the level of the lowest-level
- * variable or aggregate in its argument; or if it contains no
- * variables at all, we presume it to be local.
+ * variable or aggregate in its argument; or if it contains no variables
+ * at all, we presume it to be local.
*/
min_varlevel = find_minimum_var_level((Node *) agg->target);
/*
- * An aggregate can't directly contain another aggregate call of the
- * same level (though outer aggs are okay). We can skip this check if
- * we didn't find any local vars or aggs.
+ * An aggregate can't directly contain another aggregate call of the same
+ * level (though outer aggs are okay). We can skip this check if we
+ * didn't find any local vars or aggs.
*/
if (min_varlevel == 0)
{
if (checkExprHasAggs((Node *) agg->target))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("aggregate function calls may not be nested")));
+ errmsg("aggregate function calls may not be nested")));
}
if (min_varlevel < 0)
@@ -127,8 +127,8 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
/*
* No aggregates allowed in GROUP BY clauses, either.
*
- * While we are at it, build a list of the acceptable GROUP BY
- * expressions for use by check_ungrouped_columns().
+ * While we are at it, build a list of the acceptable GROUP BY expressions
+ * for use by check_ungrouped_columns().
*/
foreach(l, qry->groupClause)
{
@@ -141,15 +141,15 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
if (checkExprHasAggs(expr))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("aggregates not allowed in GROUP BY clause")));
+ errmsg("aggregates not allowed in GROUP BY clause")));
groupClauses = lcons(expr, groupClauses);
}
/*
- * If there are join alias vars involved, we have to flatten them to
- * the underlying vars, so that aliased and unaliased vars will be
- * correctly taken as equal. We can skip the expense of doing this if
- * no rangetable entries are RTE_JOIN kind.
+ * If there are join alias vars involved, we have to flatten them to the
+ * underlying vars, so that aliased and unaliased vars will be correctly
+ * taken as equal. We can skip the expense of doing this if no rangetable
+ * entries are RTE_JOIN kind.
*/
hasJoinRTEs = false;
foreach(l, pstate->p_rtable)
@@ -165,8 +165,8 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
/*
* We use the planner's flatten_join_alias_vars routine to do the
- * flattening; it wants a PlannerInfo root node, which fortunately
- * can be mostly dummy.
+ * flattening; it wants a PlannerInfo root node, which fortunately can be
+ * mostly dummy.
*/
if (hasJoinRTEs)
{
@@ -175,15 +175,15 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
root->hasJoinRTEs = true;
groupClauses = (List *) flatten_join_alias_vars(root,
- (Node *) groupClauses);
+ (Node *) groupClauses);
}
else
root = NULL; /* keep compiler quiet */
/*
- * Detect whether any of the grouping expressions aren't simple Vars;
- * if they're all Vars then we don't have to work so hard in the
- * recursive scans. (Note we have to flatten aliases before this.)
+ * Detect whether any of the grouping expressions aren't simple Vars; if
+ * they're all Vars then we don't have to work so hard in the recursive
+ * scans. (Note we have to flatten aliases before this.)
*/
have_non_var_grouping = false;
foreach(l, groupClauses)
@@ -259,23 +259,23 @@ check_ungrouped_columns_walker(Node *node,
return false; /* constants are always acceptable */
/*
- * If we find an aggregate call of the original level, do not recurse
- * into its arguments; ungrouped vars in the arguments are not an
- * error. We can also skip looking at the arguments of aggregates of
- * higher levels, since they could not possibly contain Vars that are
- * of concern to us (see transformAggregateCall). We do need to look
- * into the arguments of aggregates of lower levels, however.
+ * If we find an aggregate call of the original level, do not recurse into
+ * its arguments; ungrouped vars in the arguments are not an error. We can
+ * also skip looking at the arguments of aggregates of higher levels,
+ * since they could not possibly contain Vars that are of concern to us
+ * (see transformAggregateCall). We do need to look into the arguments of
+ * aggregates of lower levels, however.
*/
if (IsA(node, Aggref) &&
(int) ((Aggref *) node)->agglevelsup >= context->sublevels_up)
return false;
/*
- * If we have any GROUP BY items that are not simple Vars, check to
- * see if subexpression as a whole matches any GROUP BY item. We need
- * to do this at every recursion level so that we recognize GROUPed-BY
- * expressions before reaching variables within them. But this only
- * works at the outer query level, as noted above.
+ * If we have any GROUP BY items that are not simple Vars, check to see if
+ * subexpression as a whole matches any GROUP BY item. We need to do this
+ * at every recursion level so that we recognize GROUPed-BY expressions
+ * before reaching variables within them. But this only works at the outer
+ * query level, as noted above.
*/
if (context->have_non_var_grouping && context->sublevels_up == 0)
{
@@ -288,10 +288,9 @@ check_ungrouped_columns_walker(Node *node,
/*
* If we have an ungrouped Var of the original query level, we have a
- * failure. Vars below the original query level are not a problem,
- * and neither are Vars from above it. (If such Vars are ungrouped as
- * far as their own query level is concerned, that's someone else's
- * problem...)
+ * failure. Vars below the original query level are not a problem, and
+ * neither are Vars from above it. (If such Vars are ungrouped as far as
+ * their own query level is concerned, that's someone else's problem...)
*/
if (IsA(node, Var))
{
@@ -321,7 +320,7 @@ check_ungrouped_columns_walker(Node *node,
/* Found an ungrouped local variable; generate error message */
Assert(var->varno > 0 &&
- (int) var->varno <= list_length(context->pstate->p_rtable));
+ (int) var->varno <= list_length(context->pstate->p_rtable));
rte = rt_fetch(var->varno, context->pstate->p_rtable);
attname = get_rte_attribute_name(rte, var->varattno);
if (context->sublevels_up == 0)
@@ -390,10 +389,10 @@ build_aggregate_fnexprs(Oid agg_input_type,
transfn_nargs = get_func_nargs(transfn_oid);
/*
- * Build arg list to use in the transfn FuncExpr node. We really only
- * care that transfn can discover the actual argument types at runtime
- * using get_fn_expr_argtype(), so it's okay to use Param nodes that
- * don't correspond to any real Param.
+ * Build arg list to use in the transfn FuncExpr node. We really only care
+ * that transfn can discover the actual argument types at runtime using
+ * get_fn_expr_argtype(), so it's okay to use Param nodes that don't
+ * correspond to any real Param.
*/
arg0 = makeNode(Param);
arg0->paramkind = PARAM_EXEC;
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index 593f8f1f4b6..95e1045ba2d 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_clause.c,v 1.142 2005/06/05 00:38:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_clause.c,v 1.143 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -87,10 +87,10 @@ transformFromClause(ParseState *pstate, List *frmList)
ListCell *fl;
/*
- * The grammar will have produced a list of RangeVars,
- * RangeSubselects, RangeFunctions, and/or JoinExprs. Transform each
- * one (possibly adding entries to the rtable), check for duplicate
- * refnames, and then add it to the joinlist and namespaces.
+ * The grammar will have produced a list of RangeVars, RangeSubselects,
+ * RangeFunctions, and/or JoinExprs. Transform each one (possibly adding
+ * entries to the rtable), check for duplicate refnames, and then add it
+ * to the joinlist and namespaces.
*/
foreach(fl, frmList)
{
@@ -148,8 +148,8 @@ setTargetTable(ParseState *pstate, RangeVar *relation,
heap_close(pstate->p_target_relation, NoLock);
/*
- * Open target rel and grab suitable lock (which we will hold till end
- * of transaction).
+ * Open target rel and grab suitable lock (which we will hold till end of
+ * transaction).
*
* analyze.c will eventually do the corresponding heap_close(), but *not*
* release the lock.
@@ -168,14 +168,13 @@ setTargetTable(ParseState *pstate, RangeVar *relation,
Assert(rte == rt_fetch(rtindex, pstate->p_rtable));
/*
- * Override addRangeTableEntry's default ACL_SELECT permissions check,
- * and instead mark target table as requiring exactly the specified
+ * Override addRangeTableEntry's default ACL_SELECT permissions check, and
+ * instead mark target table as requiring exactly the specified
* permissions.
*
- * If we find an explicit reference to the rel later during parse
- * analysis, scanRTEForColumn will add the ACL_SELECT bit back again.
- * That can't happen for INSERT but it is possible for UPDATE and
- * DELETE.
+ * If we find an explicit reference to the rel later during parse analysis,
+ * scanRTEForColumn will add the ACL_SELECT bit back again. That can't
+ * happen for INSERT but it is possible for UPDATE and DELETE.
*/
rte->requiredPerms = requiredPerms;
@@ -294,10 +293,9 @@ transformJoinUsingClause(ParseState *pstate, List *leftVars, List *rightVars)
*rvars;
/*
- * We cheat a little bit here by building an untransformed operator
- * tree whose leaves are the already-transformed Vars. This is OK
- * because transformExpr() won't complain about already-transformed
- * subnodes.
+ * We cheat a little bit here by building an untransformed operator tree
+ * whose leaves are the already-transformed Vars. This is OK because
+ * transformExpr() won't complain about already-transformed subnodes.
*/
forboth(lvars, leftVars, rvars, rightVars)
{
@@ -319,10 +317,10 @@ transformJoinUsingClause(ParseState *pstate, List *leftVars, List *rightVars)
}
/*
- * Since the references are already Vars, and are certainly from the
- * input relations, we don't have to go through the same pushups that
- * transformJoinOnClause() does. Just invoke transformExpr() to fix
- * up the operators, and we're done.
+ * Since the references are already Vars, and are certainly from the input
+ * relations, we don't have to go through the same pushups that
+ * transformJoinOnClause() does. Just invoke transformExpr() to fix up
+ * the operators, and we're done.
*/
result = transformExpr(pstate, result);
@@ -349,14 +347,13 @@ transformJoinOnClause(ParseState *pstate, JoinExpr *j,
int varno;
/*
- * This is a tad tricky, for two reasons. First, the namespace that
- * the join expression should see is just the two subtrees of the JOIN
- * plus any outer references from upper pstate levels. So,
- * temporarily set this pstate's namespace accordingly. (We need not
- * check for refname conflicts, because transformFromClauseItem()
- * already did.) NOTE: this code is OK only because the ON clause
- * can't legally alter the namespace by causing implicit relation refs
- * to be added.
+ * This is a tad tricky, for two reasons. First, the namespace that the
+ * join expression should see is just the two subtrees of the JOIN plus
+ * any outer references from upper pstate levels. So, temporarily set
+ * this pstate's namespace accordingly. (We need not check for refname
+ * conflicts, because transformFromClauseItem() already did.) NOTE: this
+ * code is OK only because the ON clause can't legally alter the namespace
+ * by causing implicit relation refs to be added.
*/
save_relnamespace = pstate->p_relnamespace;
save_varnamespace = pstate->p_varnamespace;
@@ -371,11 +368,10 @@ transformJoinOnClause(ParseState *pstate, JoinExpr *j,
/*
* Second, we need to check that the ON condition doesn't refer to any
- * rels outside the input subtrees of the JOIN. It could do that
- * despite our hack on the namespace if it uses fully-qualified names.
- * So, grovel through the transformed clause and make sure there are
- * no bogus references. (Outer references are OK, and are ignored
- * here.)
+ * rels outside the input subtrees of the JOIN. It could do that despite
+ * our hack on the namespace if it uses fully-qualified names. So, grovel
+ * through the transformed clause and make sure there are no bogus
+ * references. (Outer references are OK, and are ignored here.)
*/
clause_varnos = pull_varnos(result);
clause_varnos = bms_del_members(clause_varnos, containedRels);
@@ -383,8 +379,8 @@ transformJoinOnClause(ParseState *pstate, JoinExpr *j,
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("JOIN/ON clause refers to \"%s\", which is not part of JOIN",
- rt_fetch(varno, pstate->p_rtable)->eref->aliasname)));
+ errmsg("JOIN/ON clause refers to \"%s\", which is not part of JOIN",
+ rt_fetch(varno, pstate->p_rtable)->eref->aliasname)));
}
bms_free(clause_varnos);
@@ -400,9 +396,9 @@ transformTableEntry(ParseState *pstate, RangeVar *r)
RangeTblEntry *rte;
/*
- * mark this entry to indicate it comes from the FROM clause. In SQL,
- * the target list can only refer to range variables specified in the
- * from clause but we follow the more powerful POSTQUEL semantics and
+ * mark this entry to indicate it comes from the FROM clause. In SQL, the
+ * target list can only refer to range variables specified in the from
+ * clause but we follow the more powerful POSTQUEL semantics and
* automatically generate the range variable if not specified. However
* there are times we need to know whether the entries are legitimate.
*/
@@ -424,9 +420,9 @@ transformRangeSubselect(ParseState *pstate, RangeSubselect *r)
RangeTblEntry *rte;
/*
- * We require user to supply an alias for a subselect, per SQL92. To
- * relax this, we'd have to be prepared to gin up a unique alias for
- * an unlabeled subselect.
+ * We require user to supply an alias for a subselect, per SQL92. To relax
+ * this, we'd have to be prepared to gin up a unique alias for an
+ * unlabeled subselect.
*/
if (r->alias == NULL)
ereport(ERROR,
@@ -439,9 +435,9 @@ transformRangeSubselect(ParseState *pstate, RangeSubselect *r)
parsetrees = parse_sub_analyze(r->subquery, pstate);
/*
- * Check that we got something reasonable. Most of these conditions
- * are probably impossible given restrictions of the grammar, but
- * check 'em anyway.
+ * Check that we got something reasonable. Most of these conditions are
+ * probably impossible given restrictions of the grammar, but check 'em
+ * anyway.
*/
if (list_length(parsetrees) != 1)
elog(ERROR, "unexpected parse analysis result for subquery in FROM");
@@ -457,19 +453,17 @@ transformRangeSubselect(ParseState *pstate, RangeSubselect *r)
errmsg("subquery in FROM may not have SELECT INTO")));
/*
- * The subquery cannot make use of any variables from FROM items
- * created earlier in the current query. Per SQL92, the scope of a
- * FROM item does not include other FROM items. Formerly we hacked
- * the namespace so that the other variables weren't even visible, but
- * it seems more useful to leave them visible and give a specific
- * error message.
+ * The subquery cannot make use of any variables from FROM items created
+ * earlier in the current query. Per SQL92, the scope of a FROM item does
+ * not include other FROM items. Formerly we hacked the namespace so that
+ * the other variables weren't even visible, but it seems more useful to
+ * leave them visible and give a specific error message.
*
* XXX this will need further work to support SQL99's LATERAL() feature,
* wherein such references would indeed be legal.
*
- * We can skip groveling through the subquery if there's not anything
- * visible in the current query. Also note that outer references are
- * OK.
+ * We can skip groveling through the subquery if there's not anything visible
+ * in the current query. Also note that outer references are OK.
*/
if (pstate->p_relnamespace || pstate->p_varnamespace)
{
@@ -500,9 +494,9 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
/*
* Get function name for possible use as alias. We use the same
- * transformation rules as for a SELECT output expression. For a
- * FuncCall node, the result will be the function name, but it is
- * possible for the grammar to hand back other node types.
+ * transformation rules as for a SELECT output expression. For a FuncCall
+ * node, the result will be the function name, but it is possible for the
+ * grammar to hand back other node types.
*/
funcname = FigureColname(r->funccallnode);
@@ -514,8 +508,8 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
/*
* The function parameters cannot make use of any variables from other
* FROM items. (Compare to transformRangeSubselect(); the coding is
- * different though because we didn't parse as a sub-select with its
- * own level of namespace.)
+ * different though because we didn't parse as a sub-select with its own
+ * level of namespace.)
*
* XXX this will need further work to support SQL99's LATERAL() feature,
* wherein such references would indeed be legal.
@@ -529,8 +523,8 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
}
/*
- * Disallow aggregate functions in the expression. (No reason to
- * postpone this check until parseCheckAggregates.)
+ * Disallow aggregate functions in the expression. (No reason to postpone
+ * this check until parseCheckAggregates.)
*/
if (pstate->p_hasAggs)
{
@@ -541,8 +535,8 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
}
/*
- * If a coldeflist is supplied, ensure it defines a legal set of names
- * (no duplicates) and datatypes (no pseudo-types, for instance).
+ * If a coldeflist is supplied, ensure it defines a legal set of names (no
+ * duplicates) and datatypes (no pseudo-types, for instance).
*/
if (r->coldeflist)
{
@@ -576,7 +570,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
* (We could extract this from the function return node, but it saves cycles
* to pass it back separately.)
*
- * *top_rti: receives the rangetable index of top_rte. (Ditto.)
+ * *top_rti: receives the rangetable index of top_rte. (Ditto.)
*
* *relnamespace: receives a List of the RTEs exposed as relation names
* by this item.
@@ -599,7 +593,7 @@ transformFromClauseItem(ParseState *pstate, Node *n,
/* Plain relation reference */
RangeTblRef *rtr;
RangeTblEntry *rte;
- int rtindex;
+ int rtindex;
rte = transformTableEntry(pstate, (RangeVar *) n);
/* assume new rte is at end */
@@ -618,7 +612,7 @@ transformFromClauseItem(ParseState *pstate, Node *n,
/* sub-SELECT is like a plain relation */
RangeTblRef *rtr;
RangeTblEntry *rte;
- int rtindex;
+ int rtindex;
rte = transformRangeSubselect(pstate, (RangeSubselect *) n);
/* assume new rte is at end */
@@ -637,7 +631,7 @@ transformFromClauseItem(ParseState *pstate, Node *n,
/* function is like a plain relation */
RangeTblRef *rtr;
RangeTblEntry *rte;
- int rtindex;
+ int rtindex;
rte = transformRangeFunction(pstate, (RangeFunction *) n);
/* assume new rte is at end */
@@ -688,8 +682,8 @@ transformFromClauseItem(ParseState *pstate, Node *n,
&r_containedRels);
/*
- * Check for conflicting refnames in left and right subtrees. Must
- * do this because higher levels will assume I hand back a self-
+ * Check for conflicting refnames in left and right subtrees. Must do
+ * this because higher levels will assume I hand back a self-
* consistent namespace subtree.
*/
checkNameSpaceConflicts(pstate, l_relnamespace, r_relnamespace);
@@ -715,12 +709,12 @@ transformFromClauseItem(ParseState *pstate, Node *n,
/*
* Natural join does not explicitly specify columns; must generate
- * columns to join. Need to run through the list of columns from
- * each table or join result and match up the column names. Use
- * the first table, and check every column in the second table for
- * a match. (We'll check that the matches were unique later on.)
- * The result of this step is a list of column names just like an
- * explicitly-written USING list.
+ * columns to join. Need to run through the list of columns from each
+ * table or join result and match up the column names. Use the first
+ * table, and check every column in the second table for a match.
+ * (We'll check that the matches were unique later on.) The result of
+ * this step is a list of column names just like an explicitly-written
+ * USING list.
*/
if (j->isNatural)
{
@@ -763,9 +757,9 @@ transformFromClauseItem(ParseState *pstate, Node *n,
if (j->using)
{
/*
- * JOIN/USING (or NATURAL JOIN, as transformed above).
- * Transform the list into an explicit ON-condition, and
- * generate a list of merged result columns.
+ * JOIN/USING (or NATURAL JOIN, as transformed above). Transform
+ * the list into an explicit ON-condition, and generate a list of
+ * merged result columns.
*/
List *ucols = j->using;
List *l_usingvars = NIL;
@@ -917,10 +911,10 @@ transformFromClauseItem(ParseState *pstate, Node *n,
*top_rti = j->rtindex;
/*
- * Prepare returned namespace list. If the JOIN has an alias
- * then it hides the contained RTEs as far as the relnamespace
- * goes; otherwise, put the contained RTEs and *not* the JOIN
- * into relnamespace.
+ * Prepare returned namespace list. If the JOIN has an alias then it
+ * hides the contained RTEs as far as the relnamespace goes;
+ * otherwise, put the contained RTEs and *not* the JOIN into
+ * relnamespace.
*/
if (j->alias)
{
@@ -975,10 +969,10 @@ buildMergedJoinVar(ParseState *pstate, JoinType jointype,
}
/*
- * Insert coercion functions if needed. Note that a difference in
- * typmod can only happen if input has typmod but outcoltypmod is -1.
- * In that case we insert a RelabelType to clearly mark that result's
- * typmod is not same as input. We never need coerce_type_typmod.
+ * Insert coercion functions if needed. Note that a difference in typmod
+ * can only happen if input has typmod but outcoltypmod is -1. In that
+ * case we insert a RelabelType to clearly mark that result's typmod is
+ * not same as input. We never need coerce_type_typmod.
*/
if (l_colvar->vartype != outcoltype)
l_node = coerce_type(pstate, (Node *) l_colvar, l_colvar->vartype,
@@ -1030,8 +1024,8 @@ buildMergedJoinVar(ParseState *pstate, JoinType jointype,
case JOIN_FULL:
{
/*
- * Here we must build a COALESCE expression to ensure that
- * the join output is non-null if either input is.
+ * Here we must build a COALESCE expression to ensure that the
+ * join output is non-null if either input is.
*/
CoalesceExpr *c = makeNode(CoalesceExpr);
@@ -1095,9 +1089,9 @@ transformLimitClause(ParseState *pstate, Node *clause,
qual = coerce_to_integer(pstate, qual, constructName);
/*
- * LIMIT can't refer to any vars or aggregates of the current query;
- * we don't allow subselects either (though that case would at least
- * be sensible)
+ * LIMIT can't refer to any vars or aggregates of the current query; we
+ * don't allow subselects either (though that case would at least be
+ * sensible)
*/
if (contain_vars_of_level(qual, 0))
{
@@ -1193,20 +1187,19 @@ findTargetlistEntry(ParseState *pstate, Node *node, List **tlist, int clause)
{
/*
* In GROUP BY, we must prefer a match against a FROM-clause
- * column to one against the targetlist. Look to see if there
- * is a matching column. If so, fall through to let
- * transformExpr() do the rest. NOTE: if name could refer
- * ambiguously to more than one column name exposed by FROM,
- * colNameToVar will ereport(ERROR). That's just what we want
- * here.
+ * column to one against the targetlist. Look to see if there is
+ * a matching column. If so, fall through to let transformExpr()
+ * do the rest. NOTE: if name could refer ambiguously to more
+ * than one column name exposed by FROM, colNameToVar will
+ * ereport(ERROR). That's just what we want here.
*
- * Small tweak for 7.4.3: ignore matches in upper query levels.
- * This effectively changes the search order for bare names to
- * (1) local FROM variables, (2) local targetlist aliases, (3)
- * outer FROM variables, whereas before it was (1) (3) (2).
- * SQL92 and SQL99 do not allow GROUPing BY an outer
- * reference, so this breaks no cases that are legal per spec,
- * and it seems a more self-consistent behavior.
+ * Small tweak for 7.4.3: ignore matches in upper query levels. This
+ * effectively changes the search order for bare names to (1)
+ * local FROM variables, (2) local targetlist aliases, (3) outer
+ * FROM variables, whereas before it was (1) (3) (2). SQL92 and
+ * SQL99 do not allow GROUPing BY an outer reference, so this
+ * breaks no cases that are legal per spec, and it seems a more
+ * self-consistent behavior.
*/
if (colNameToVar(pstate, name, true) != NULL)
name = NULL;
@@ -1292,9 +1285,9 @@ findTargetlistEntry(ParseState *pstate, Node *node, List **tlist, int clause)
}
/*
- * If no matches, construct a new target entry which is appended to
- * the end of the target list. This target is given resjunk = TRUE so
- * that it will not be projected into the final tuple.
+ * If no matches, construct a new target entry which is appended to the
+ * end of the target list. This target is given resjunk = TRUE so that it
+ * will not be projected into the final tuple.
*/
target_result = transformTargetEntry(pstate, node, expr, NULL, true);
@@ -1349,11 +1342,11 @@ transformGroupClause(ParseState *pstate, List *grouplist,
/*
* If the GROUP BY clause matches the ORDER BY clause, we want to
- * adopt the ordering operators from the latter rather than using
- * the default ops. This allows "GROUP BY foo ORDER BY foo DESC"
- * to be done with only one sort step. Note we are assuming that
- * any user-supplied ordering operator will bring equal values
- * together, which is all that GROUP BY needs.
+ * adopt the ordering operators from the latter rather than using the
+ * default ops. This allows "GROUP BY foo ORDER BY foo DESC" to be
+ * done with only one sort step. Note we are assuming that any
+ * user-supplied ordering operator will bring equal values together,
+ * which is all that GROUP BY needs.
*/
if (sortItem &&
((SortClause *) lfirst(sortItem))->tleSortGroupRef ==
@@ -1435,11 +1428,11 @@ transformDistinctClause(ParseState *pstate, List *distinctlist,
/* We had SELECT DISTINCT */
/*
- * All non-resjunk elements from target list that are not already
- * in the sort list should be added to it. (We don't really care
- * what order the DISTINCT fields are checked in, so we can leave
- * the user's ORDER BY spec alone, and just add additional sort
- * keys to it to ensure that all targetlist items get sorted.)
+ * All non-resjunk elements from target list that are not already in
+ * the sort list should be added to it. (We don't really care what
+ * order the DISTINCT fields are checked in, so we can leave the
+ * user's ORDER BY spec alone, and just add additional sort keys to it
+ * to ensure that all targetlist items get sorted.)
*/
*sortClause = addAllTargetsToSortList(pstate,
*sortClause,
@@ -1449,9 +1442,9 @@ transformDistinctClause(ParseState *pstate, List *distinctlist,
/*
* Now, DISTINCT list consists of all non-resjunk sortlist items.
* Actually, all the sortlist items had better be non-resjunk!
- * Otherwise, user wrote SELECT DISTINCT with an ORDER BY item
- * that does not appear anywhere in the SELECT targetlist, and we
- * can't implement that with only one sorting pass...
+ * Otherwise, user wrote SELECT DISTINCT with an ORDER BY item that
+ * does not appear anywhere in the SELECT targetlist, and we can't
+ * implement that with only one sorting pass...
*/
foreach(slitem, *sortClause)
{
@@ -1474,16 +1467,16 @@ transformDistinctClause(ParseState *pstate, List *distinctlist,
* If the user writes both DISTINCT ON and ORDER BY, then the two
* expression lists must match (until one or the other runs out).
* Otherwise the ORDER BY requires a different sort order than the
- * DISTINCT does, and we can't implement that with only one sort
- * pass (and if we do two passes, the results will be rather
+ * DISTINCT does, and we can't implement that with only one sort pass
+ * (and if we do two passes, the results will be rather
* unpredictable). However, it's OK to have more DISTINCT ON
- * expressions than ORDER BY expressions; we can just add the
- * extra DISTINCT values to the sort list, much as we did above
- * for ordinary DISTINCT fields.
+ * expressions than ORDER BY expressions; we can just add the extra
+ * DISTINCT values to the sort list, much as we did above for ordinary
+ * DISTINCT fields.
*
- * Actually, it'd be OK for the common prefixes of the two lists to
- * match in any order, but implementing that check seems like more
- * trouble than it's worth.
+ * Actually, it'd be OK for the common prefixes of the two lists to match
+ * in any order, but implementing that check seems like more trouble
+ * than it's worth.
*/
ListCell *nextsortlist = list_head(*sortClause);
@@ -1508,12 +1501,12 @@ transformDistinctClause(ParseState *pstate, List *distinctlist,
else
{
*sortClause = addTargetToSortList(pstate, tle,
- *sortClause, *targetlist,
+ *sortClause, *targetlist,
SORTBY_ASC, NIL, true);
/*
- * Probably, the tle should always have been added at the
- * end of the sort list ... but search to be safe.
+ * Probably, the tle should always have been added at the end
+ * of the sort list ... but search to be safe.
*/
foreach(slitem, *sortClause)
{
@@ -1638,7 +1631,7 @@ assignSortGroupRef(TargetEntry *tle, List *tlist)
Index maxRef;
ListCell *l;
- if (tle->ressortgroupref) /* already has one? */
+ if (tle->ressortgroupref) /* already has one? */
return tle->ressortgroupref;
/* easiest way to pick an unused refnumber: max used + 1 */
diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c
index 32a20fc3622..3bee3c31ad5 100644
--- a/src/backend/parser/parse_coerce.c
+++ b/src/backend/parser/parse_coerce.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_coerce.c,v 2.131 2005/06/04 19:19:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_coerce.c,v 2.132 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -82,9 +82,9 @@ coerce_to_target_type(ParseState *pstate, Node *expr, Oid exprtype,
ccontext, cformat);
/*
- * If the target is a fixed-length type, it may need a length coercion
- * as well as a type coercion. If we find ourselves adding both,
- * force the inner coercion node to implicit display form.
+ * If the target is a fixed-length type, it may need a length coercion as
+ * well as a type coercion. If we find ourselves adding both, force the
+ * inner coercion node to implicit display form.
*/
result = coerce_type_typmod(result,
targettype, targettypmod,
@@ -140,9 +140,9 @@ coerce_type(ParseState *pstate, Node *node,
if (inputTypeId == UNKNOWNOID && IsA(node, Const))
{
/*
- * Input is a string constant with previously undetermined type.
- * Apply the target type's typinput function to it to produce a
- * constant of the target type.
+ * Input is a string constant with previously undetermined type. Apply
+ * the target type's typinput function to it to produce a constant of
+ * the target type.
*
* NOTE: this case cannot be folded together with the other
* constant-input case, since the typinput function does not
@@ -151,10 +151,10 @@ coerce_type(ParseState *pstate, Node *node,
* float-to-int type conversion will round to integer.
*
* XXX if the typinput function is not immutable, we really ought to
- * postpone evaluation of the function call until runtime. But
- * there is no way to represent a typinput function call as an
- * expression tree, because C-string values are not Datums. (XXX
- * This *is* possible as of 7.3, do we want to do it?)
+ * postpone evaluation of the function call until runtime. But there
+ * is no way to represent a typinput function call as an expression
+ * tree, because C-string values are not Datums. (XXX This *is*
+ * possible as of 7.3, do we want to do it?)
*/
Const *con = (Const *) node;
Const *newcon = makeNode(Const);
@@ -176,14 +176,13 @@ coerce_type(ParseState *pstate, Node *node,
/*
* We pass typmod -1 to the input routine, primarily because
- * existing input routines follow implicit-coercion semantics
- * for length checks, which is not always what we want here.
- * Any length constraint will be applied later by our caller.
+ * existing input routines follow implicit-coercion semantics for
+ * length checks, which is not always what we want here. Any
+ * length constraint will be applied later by our caller.
*
- * Note that we call stringTypeDatum using the domain's pg_type
- * row, if it's a domain. This works because the domain row
- * has the same typinput and typelem as the base type ---
- * ugly...
+ * Note that we call stringTypeDatum using the domain's pg_type row,
+ * if it's a domain. This works because the domain row has the
+ * same typinput and typelem as the base type --- ugly...
*/
newcon->constvalue = stringTypeDatum(targetType, val, -1);
}
@@ -204,8 +203,8 @@ coerce_type(ParseState *pstate, Node *node,
pstate != NULL && pstate->p_variableparams)
{
/*
- * Input is a Param of previously undetermined type, and we want
- * to update our knowledge of the Param's type. Find the topmost
+ * Input is a Param of previously undetermined type, and we want to
+ * update our knowledge of the Param's type. Find the topmost
* ParseState and update the state.
*/
Param *param = (Param *) node;
@@ -236,10 +235,10 @@ coerce_type(ParseState *pstate, Node *node,
/* Ooops */
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_PARAMETER),
- errmsg("inconsistent types deduced for parameter $%d",
- paramno),
+ errmsg("inconsistent types deduced for parameter $%d",
+ paramno),
errdetail("%s versus %s",
- format_type_be(toppstate->p_paramtypes[paramno - 1]),
+ format_type_be(toppstate->p_paramtypes[paramno - 1]),
format_type_be(targetTypeId))));
}
@@ -252,11 +251,11 @@ coerce_type(ParseState *pstate, Node *node,
if (OidIsValid(funcId))
{
/*
- * Generate an expression tree representing run-time
- * application of the conversion function. If we are dealing
- * with a domain target type, the conversion function will
- * yield the base type, and we need to extract the correct
- * typmod to use from the domain's typtypmod.
+ * Generate an expression tree representing run-time application
+ * of the conversion function. If we are dealing with a domain
+ * target type, the conversion function will yield the base type,
+ * and we need to extract the correct typmod to use from the
+ * domain's typtypmod.
*/
Oid baseTypeId = getBaseType(targetTypeId);
int32 baseTypeMod;
@@ -269,13 +268,12 @@ coerce_type(ParseState *pstate, Node *node,
result = build_coercion_expression(node, funcId,
baseTypeId, baseTypeMod,
cformat,
- (cformat != COERCE_IMPLICIT_CAST));
+ (cformat != COERCE_IMPLICIT_CAST));
/*
- * If domain, coerce to the domain type and relabel with
- * domain type ID. We can skip the internal length-coercion
- * step if the selected coercion function was a type-and-length
- * coercion.
+ * If domain, coerce to the domain type and relabel with domain
+ * type ID. We can skip the internal length-coercion step if the
+ * selected coercion function was a type-and-length coercion.
*/
if (targetTypeId != baseTypeId)
result = coerce_to_domain(result, baseTypeId, targetTypeId,
@@ -286,10 +284,9 @@ coerce_type(ParseState *pstate, Node *node,
else
{
/*
- * We don't need to do a physical conversion, but we do need
- * to attach a RelabelType node so that the expression will be
- * seen to have the intended type when inspected by
- * higher-level code.
+ * We don't need to do a physical conversion, but we do need to
+ * attach a RelabelType node so that the expression will be seen
+ * to have the intended type when inspected by higher-level code.
*
* Also, domains may have value restrictions beyond the base type
* that must be accounted for. If the destination is a domain
@@ -300,11 +297,10 @@ coerce_type(ParseState *pstate, Node *node,
if (result == node)
{
/*
- * XXX could we label result with exprTypmod(node) instead
- * of default -1 typmod, to save a possible
- * length-coercion later? Would work if both types have
- * same interpretation of typmod, which is likely but not
- * certain.
+ * XXX could we label result with exprTypmod(node) instead of
+ * default -1 typmod, to save a possible length-coercion
+ * later? Would work if both types have same interpretation of
+ * typmod, which is likely but not certain.
*/
result = (Node *) makeRelabelType((Expr *) result,
targetTypeId, -1,
@@ -331,8 +327,8 @@ coerce_type(ParseState *pstate, Node *node,
{
/*
* Input class type is a subclass of target, so generate an
- * appropriate runtime conversion (removing unneeded columns
- * and possibly rearranging the ones that are wanted).
+ * appropriate runtime conversion (removing unneeded columns and
+ * possibly rearranging the ones that are wanted).
*/
ConvertRowtypeExpr *r = makeNode(ConvertRowtypeExpr);
@@ -386,23 +382,23 @@ can_coerce_type(int nargs, Oid *input_typeids, Oid *target_typeids,
}
/*
- * If input is an untyped string constant, assume we can convert
- * it to anything.
+ * If input is an untyped string constant, assume we can convert it to
+ * anything.
*/
if (inputTypeId == UNKNOWNOID)
continue;
/*
- * If pg_cast shows that we can coerce, accept. This test now
- * covers both binary-compatible and coercion-function cases.
+ * If pg_cast shows that we can coerce, accept. This test now covers
+ * both binary-compatible and coercion-function cases.
*/
if (find_coercion_pathway(targetTypeId, inputTypeId, ccontext,
&funcId))
continue;
/*
- * If input is RECORD and target is a composite type, assume we
- * can coerce (may need tighter checking here)
+ * If input is RECORD and target is a composite type, assume we can
+ * coerce (may need tighter checking here)
*/
if (inputTypeId == RECORDOID &&
ISCOMPLEX(targetTypeId))
@@ -472,22 +468,21 @@ coerce_to_domain(Node *arg, Oid baseTypeId, Oid typeId,
hide_coercion_node(arg);
/*
- * If the domain applies a typmod to its base type, build the
- * appropriate coercion step. Mark it implicit for display purposes,
- * because we don't want it shown separately by ruleutils.c; but the
- * isExplicit flag passed to the conversion function depends on the
- * manner in which the domain coercion is invoked, so that the
- * semantics of implicit and explicit coercion differ. (Is that
- * really the behavior we want?)
+ * If the domain applies a typmod to its base type, build the appropriate
+ * coercion step. Mark it implicit for display purposes, because we don't
+ * want it shown separately by ruleutils.c; but the isExplicit flag passed
+ * to the conversion function depends on the manner in which the domain
+ * coercion is invoked, so that the semantics of implicit and explicit
+ * coercion differ. (Is that really the behavior we want?)
*
* NOTE: because we apply this as part of the fixed expression structure,
- * ALTER DOMAIN cannot alter the typtypmod. But it's unclear that
- * that would be safe to do anyway, without lots of knowledge about
- * what the base type thinks the typmod means.
+ * ALTER DOMAIN cannot alter the typtypmod. But it's unclear that that
+ * would be safe to do anyway, without lots of knowledge about what the
+ * base type thinks the typmod means.
*/
if (!lengthCoercionDone)
{
- int32 typmod = get_typtypmod(typeId);
+ int32 typmod = get_typtypmod(typeId);
if (typmod >= 0)
arg = coerce_type_typmod(arg, baseTypeId, typmod,
@@ -497,10 +492,9 @@ coerce_to_domain(Node *arg, Oid baseTypeId, Oid typeId,
}
/*
- * Now build the domain coercion node. This represents run-time
- * checking of any constraints currently attached to the domain. This
- * also ensures that the expression is properly labeled as to result
- * type.
+ * Now build the domain coercion node. This represents run-time checking
+ * of any constraints currently attached to the domain. This also ensures
+ * that the expression is properly labeled as to result type.
*/
result = makeNode(CoerceToDomain);
result->arg = (Expr *) arg;
@@ -541,8 +535,8 @@ coerce_type_typmod(Node *node, Oid targetTypeId, int32 targetTypMod,
Oid funcId;
/*
- * A negative typmod is assumed to mean that no coercion is wanted.
- * Also, skip coercion if already done.
+ * A negative typmod is assumed to mean that no coercion is wanted. Also,
+ * skip coercion if already done.
*/
if (targetTypMod < 0 || targetTypMod == exprTypmod(node))
return node;
@@ -616,9 +610,9 @@ build_coercion_expression(Node *node, Oid funcId,
procstruct = (Form_pg_proc) GETSTRUCT(tp);
/*
- * Asserts essentially check that function is a legal coercion
- * function. We can't make the seemingly obvious tests on prorettype
- * and proargtypes[0], because of various binary-compatibility cases.
+ * Asserts essentially check that function is a legal coercion function.
+ * We can't make the seemingly obvious tests on prorettype and
+ * proargtypes[0], because of various binary-compatibility cases.
*/
/* Assert(targetTypeId == procstruct->prorettype); */
Assert(!procstruct->proretset);
@@ -685,8 +679,8 @@ coerce_record_to_complex(ParseState *pstate, Node *node,
if (node && IsA(node, RowExpr))
{
/*
- * Since the RowExpr must be of type RECORD, we needn't worry
- * about it containing any dropped columns.
+ * Since the RowExpr must be of type RECORD, we needn't worry about it
+ * containing any dropped columns.
*/
args = ((RowExpr *) node)->args;
}
@@ -721,8 +715,8 @@ coerce_record_to_complex(ParseState *pstate, Node *node,
if (tupdesc->attrs[i]->attisdropped)
{
/*
- * can't use atttypid here, but it doesn't really matter what
- * type the Const claims to be.
+ * can't use atttypid here, but it doesn't really matter what type
+ * the Const claims to be.
*/
newargs = lappend(newargs, makeNullConst(INT4OID));
continue;
@@ -752,7 +746,7 @@ coerce_record_to_complex(ParseState *pstate, Node *node,
format_type_be(targetTypeId)),
errdetail("Cannot cast type %s to %s in column %d.",
format_type_be(exprtype),
- format_type_be(tupdesc->attrs[i]->atttypid),
+ format_type_be(tupdesc->attrs[i]->atttypid),
ucolno)));
newargs = lappend(newargs, expr);
ucolno++;
@@ -798,8 +792,8 @@ coerce_to_boolean(ParseState *pstate, Node *node,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
/* translator: first %s is name of a SQL construct, eg WHERE */
- errmsg("argument of %s must be type boolean, not type %s",
- constructName, format_type_be(inputTypeId))));
+ errmsg("argument of %s must be type boolean, not type %s",
+ constructName, format_type_be(inputTypeId))));
}
if (expression_returns_set(node))
@@ -837,8 +831,8 @@ coerce_to_integer(ParseState *pstate, Node *node,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
/* translator: first %s is name of a SQL construct, eg LIMIT */
- errmsg("argument of %s must be type integer, not type %s",
- constructName, format_type_be(inputTypeId))));
+ errmsg("argument of %s must be type integer, not type %s",
+ constructName, format_type_be(inputTypeId))));
}
if (expression_returns_set(node))
@@ -889,15 +883,13 @@ select_common_type(List *typeids, const char *context)
else if (TypeCategory(ntype) != pcategory)
{
/*
- * both types in different categories? then not much
- * hope...
+ * both types in different categories? then not much hope...
*/
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
/*
- * translator: first %s is name of a SQL construct, eg
- * CASE
+ * translator: first %s is name of a SQL construct, eg CASE
*/
errmsg("%s types %s and %s cannot be matched",
context,
@@ -905,13 +897,12 @@ select_common_type(List *typeids, const char *context)
format_type_be(ntype))));
}
else if (!IsPreferredType(pcategory, ptype) &&
- can_coerce_type(1, &ptype, &ntype, COERCION_IMPLICIT) &&
- !can_coerce_type(1, &ntype, &ptype, COERCION_IMPLICIT))
+ can_coerce_type(1, &ptype, &ntype, COERCION_IMPLICIT) &&
+ !can_coerce_type(1, &ntype, &ptype, COERCION_IMPLICIT))
{
/*
- * take new type if can coerce to it implicitly but not
- * the other way; but if we have a preferred type, stay on
- * it.
+ * take new type if can coerce to it implicitly but not the
+ * other way; but if we have a preferred type, stay on it.
*/
ptype = ntype;
pcategory = TypeCategory(ptype);
@@ -920,15 +911,15 @@ select_common_type(List *typeids, const char *context)
}
/*
- * If all the inputs were UNKNOWN type --- ie, unknown-type literals
- * --- then resolve as type TEXT. This situation comes up with
- * constructs like SELECT (CASE WHEN foo THEN 'bar' ELSE 'baz' END);
- * SELECT 'foo' UNION SELECT 'bar'; It might seem desirable to leave
- * the construct's output type as UNKNOWN, but that really doesn't
- * work, because we'd probably end up needing a runtime coercion from
- * UNKNOWN to something else, and we usually won't have it. We need
- * to coerce the unknown literals while they are still literals, so a
- * decision has to be made now.
+ * If all the inputs were UNKNOWN type --- ie, unknown-type literals ---
+ * then resolve as type TEXT. This situation comes up with constructs
+ * like SELECT (CASE WHEN foo THEN 'bar' ELSE 'baz' END); SELECT 'foo'
+ * UNION SELECT 'bar'; It might seem desirable to leave the construct's
+ * output type as UNKNOWN, but that really doesn't work, because we'd
+ * probably end up needing a runtime coercion from UNKNOWN to something
+ * else, and we usually won't have it. We need to coerce the unknown
+ * literals while they are still literals, so a decision has to be made
+ * now.
*/
if (ptype == UNKNOWNOID)
ptype = TEXTOID;
@@ -1005,9 +996,8 @@ check_generic_type_consistency(Oid *actual_arg_types,
bool have_anyelement = false;
/*
- * Loop through the arguments to see if we have any that are ANYARRAY
- * or ANYELEMENT. If so, require the actual types to be
- * self-consistent
+ * Loop through the arguments to see if we have any that are ANYARRAY or
+ * ANYELEMENT. If so, require the actual types to be self-consistent
*/
for (j = 0; j < nargs; j++)
{
@@ -1050,8 +1040,7 @@ check_generic_type_consistency(Oid *actual_arg_types,
if (!OidIsValid(elem_typeid))
{
/*
- * if we don't have an element type yet, use the one we just
- * got
+ * if we don't have an element type yet, use the one we just got
*/
elem_typeid = array_typelem;
}
@@ -1118,9 +1107,8 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
bool have_anyelement = (rettype == ANYELEMENTOID);
/*
- * Loop through the arguments to see if we have any that are ANYARRAY
- * or ANYELEMENT. If so, require the actual types to be
- * self-consistent
+ * Loop through the arguments to see if we have any that are ANYARRAY or
+ * ANYELEMENT. If so, require the actual types to be self-consistent
*/
for (j = 0; j < nargs; j++)
{
@@ -1137,7 +1125,7 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
if (OidIsValid(elem_typeid) && actual_type != elem_typeid)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("arguments declared \"anyelement\" are not all alike"),
+ errmsg("arguments declared \"anyelement\" are not all alike"),
errdetail("%s versus %s",
format_type_be(elem_typeid),
format_type_be(actual_type))));
@@ -1154,7 +1142,7 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
if (OidIsValid(array_typeid) && actual_type != array_typeid)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("arguments declared \"anyarray\" are not all alike"),
+ errmsg("arguments declared \"anyarray\" are not all alike"),
errdetail("%s versus %s",
format_type_be(array_typeid),
format_type_be(actual_type))));
@@ -1163,8 +1151,8 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
}
/*
- * Fast Track: if none of the arguments are ANYARRAY or ANYELEMENT,
- * return the unmodified rettype.
+ * Fast Track: if none of the arguments are ANYARRAY or ANYELEMENT, return
+ * the unmodified rettype.
*/
if (!have_generics)
return rettype;
@@ -1190,8 +1178,7 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
if (!OidIsValid(elem_typeid))
{
/*
- * if we don't have an element type yet, use the one we just
- * got
+ * if we don't have an element type yet, use the one we just got
*/
elem_typeid = array_typelem;
}
@@ -1236,8 +1223,8 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
if (!OidIsValid(array_typeid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("could not find array type for data type %s",
- format_type_be(elem_typeid))));
+ errmsg("could not find array type for data type %s",
+ format_type_be(elem_typeid))));
}
declared_arg_types[j] = array_typeid;
}
@@ -1253,8 +1240,8 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
if (!OidIsValid(array_typeid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("could not find array type for data type %s",
- format_type_be(elem_typeid))));
+ errmsg("could not find array type for data type %s",
+ format_type_be(elem_typeid))));
}
return array_typeid;
}
@@ -1307,8 +1294,8 @@ resolve_generic_type(Oid declared_type,
if (!OidIsValid(array_typeid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("could not find array type for data type %s",
- format_type_be(context_actual_type))));
+ errmsg("could not find array type for data type %s",
+ format_type_be(context_actual_type))));
return array_typeid;
}
}
@@ -1471,8 +1458,8 @@ IsPreferredType(CATEGORY category, Oid type)
return false;
/*
- * This switch should agree with TypeCategory(), above. Note that at
- * this point, category certainly matches the type.
+ * This switch should agree with TypeCategory(), above. Note that at this
+ * point, category certainly matches the type.
*/
switch (category)
{
@@ -1679,17 +1666,16 @@ find_coercion_pathway(Oid targetTypeId, Oid sourceTypeId,
else
{
/*
- * If there's no pg_cast entry, perhaps we are dealing with a pair
- * of array types. If so, and if the element types have a
- * suitable cast, use array_type_coerce() or
- * array_type_length_coerce().
+ * If there's no pg_cast entry, perhaps we are dealing with a pair of
+ * array types. If so, and if the element types have a suitable cast,
+ * use array_type_coerce() or array_type_length_coerce().
*
- * Hack: disallow coercions to oidvector and int2vector, which
- * otherwise tend to capture coercions that should go to "real" array
- * types. We want those types to be considered "real" arrays for many
- * purposes, but not this one. (Also, array_type_coerce isn't
- * guaranteed to produce an output that meets the restrictions of
- * these datatypes, such as being 1-dimensional.)
+ * Hack: disallow coercions to oidvector and int2vector, which otherwise
+ * tend to capture coercions that should go to "real" array types. We
+ * want those types to be considered "real" arrays for many purposes,
+ * but not this one. (Also, array_type_coerce isn't guaranteed to
+ * produce an output that meets the restrictions of these datatypes,
+ * such as being 1-dimensional.)
*/
Oid targetElemType;
Oid sourceElemType;
@@ -1699,7 +1685,7 @@ find_coercion_pathway(Oid targetTypeId, Oid sourceTypeId,
return false;
if ((targetElemType = get_element_type(targetTypeId)) != InvalidOid &&
- (sourceElemType = get_element_type(sourceTypeId)) != InvalidOid)
+ (sourceElemType = get_element_type(sourceTypeId)) != InvalidOid)
{
if (find_coercion_pathway(targetElemType, sourceElemType,
ccontext, &elemfuncid))
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index fdb4c4dcf25..ab9279abd30 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.184 2005/06/26 22:05:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.185 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -278,8 +278,8 @@ transformIndirection(ParseState *pstate, Node *basenode, List *indirection)
/*
* We have to split any field-selection operations apart from
- * subscripting. Adjacent A_Indices nodes have to be treated as a
- * single multidimensional subscript operation.
+ * subscripting. Adjacent A_Indices nodes have to be treated as a single
+ * multidimensional subscript operation.
*/
foreach(i, indirection)
{
@@ -295,7 +295,7 @@ transformIndirection(ParseState *pstate, Node *basenode, List *indirection)
if (subscripts)
result = (Node *) transformArraySubscripts(pstate,
result,
- exprType(result),
+ exprType(result),
InvalidOid,
-1,
subscripts,
@@ -365,10 +365,10 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
/*
* Not known as a column of any range-table entry.
*
- * Consider the possibility that it's VALUE in a domain
- * check expression. (We handle VALUE as a name, not
- * a keyword, to avoid breaking a lot of applications
- * that have used VALUE as a column name in the past.)
+ * Consider the possibility that it's VALUE in a domain check
+ * expression. (We handle VALUE as a name, not a keyword,
+ * to avoid breaking a lot of applications that have used
+ * VALUE as a column name in the past.)
*/
if (pstate->p_value_substitute != NULL &&
strcmp(name, "value") == 0)
@@ -379,12 +379,12 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
/*
* Try to find the name as a relation. Note that only
- * relations already entered into the rangetable will
- * be recognized.
+ * relations already entered into the rangetable will be
+ * recognized.
*
* This is a hack for backwards compatibility with
- * PostQUEL-inspired syntax. The preferred form now
- * is "rel.*".
+ * PostQUEL-inspired syntax. The preferred form now is
+ * "rel.*".
*/
if (refnameRangeTblEntry(pstate, NULL, name,
&levels_up) != NULL)
@@ -414,13 +414,13 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
if (node == NULL)
{
/*
- * Not known as a column of any range-table entry, so
- * try it as a function call. Here, we will create an
+ * Not known as a column of any range-table entry, so try
+ * it as a function call. Here, we will create an
* implicit RTE for tables not already entered.
*/
node = transformWholeRowRef(pstate, NULL, name1);
node = ParseFuncOrColumn(pstate,
- list_make1(makeString(name2)),
+ list_make1(makeString(name2)),
list_make1(node),
false, false, true);
}
@@ -446,7 +446,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
/* Try it as a function call */
node = transformWholeRowRef(pstate, name1, name2);
node = ParseFuncOrColumn(pstate,
- list_make1(makeString(name3)),
+ list_make1(makeString(name3)),
list_make1(node),
false, false, true);
}
@@ -482,7 +482,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
/* Try it as a function call */
node = transformWholeRowRef(pstate, name2, name3);
node = ParseFuncOrColumn(pstate,
- list_make1(makeString(name4)),
+ list_make1(makeString(name4)),
list_make1(node),
false, false, true);
}
@@ -491,8 +491,8 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("improper qualified name (too many dotted names): %s",
- NameListToString(cref->fields))));
+ errmsg("improper qualified name (too many dotted names): %s",
+ NameListToString(cref->fields))));
node = NULL; /* keep compiler quiet */
break;
}
@@ -515,7 +515,7 @@ transformParamRef(ParseState *pstate, ParamRef *pref)
toppstate = toppstate->parentParseState;
/* Check parameter number is in range */
- if (paramno <= 0) /* probably can't happen? */
+ if (paramno <= 0) /* probably can't happen? */
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_PARAMETER),
errmsg("there is no parameter $%d", paramno)));
@@ -563,9 +563,9 @@ transformAExprOp(ParseState *pstate, A_Expr *a)
Node *result;
/*
- * Special-case "foo = NULL" and "NULL = foo" for compatibility
- * with standards-broken products (like Microsoft's). Turn these
- * into IS NULL exprs.
+ * Special-case "foo = NULL" and "NULL = foo" for compatibility with
+ * standards-broken products (like Microsoft's). Turn these into IS NULL
+ * exprs.
*/
if (Transform_null_equals &&
list_length(a->name) == 1 &&
@@ -588,10 +588,9 @@ transformAExprOp(ParseState *pstate, A_Expr *a)
((SubLink *) rexpr)->subLinkType == EXPR_SUBLINK)
{
/*
- * Convert "row op subselect" into a MULTIEXPR sublink.
- * Formerly the grammar did this, but now that a row construct
- * is allowed anywhere in expressions, it's easier to do it
- * here.
+ * Convert "row op subselect" into a MULTIEXPR sublink. Formerly the
+ * grammar did this, but now that a row construct is allowed anywhere
+ * in expressions, it's easier to do it here.
*/
SubLink *s = (SubLink *) rexpr;
@@ -738,8 +737,8 @@ static Node *
transformAExprOf(ParseState *pstate, A_Expr *a)
{
/*
- * Checking an expression for match to type. Will result in a
- * boolean constant node.
+ * Checking an expression for match to type. Will result in a boolean
+ * constant node.
*/
ListCell *telem;
A_Const *n;
@@ -758,8 +757,8 @@ transformAExprOf(ParseState *pstate, A_Expr *a)
}
/*
- * Expect two forms: equals or not equals. Flip the sense of the
- * result for not equals.
+ * Expect two forms: equals or not equals. Flip the sense of the result
+ * for not equals.
*/
if (strcmp(strVal(linitial(a->name)), "!=") == 0)
matched = (!matched);
@@ -779,12 +778,11 @@ transformFuncCall(ParseState *pstate, FuncCall *fn)
ListCell *args;
/*
- * Transform the list of arguments. We use a shallow list copy
- * and then transform-in-place to avoid O(N^2) behavior from
- * repeated lappend's.
+ * Transform the list of arguments. We use a shallow list copy and then
+ * transform-in-place to avoid O(N^2) behavior from repeated lappend's.
*
- * XXX: repeated lappend() would no longer result in O(n^2)
- * behavior; worth reconsidering this design?
+ * XXX: repeated lappend() would no longer result in O(n^2) behavior; worth
+ * reconsidering this design?
*/
targs = list_copy(fn->args);
foreach(args, targs)
@@ -826,11 +824,11 @@ transformCaseExpr(ParseState *pstate, CaseExpr *c)
if (arg)
{
/*
- * If test expression is an untyped literal, force it to text.
- * We have to do something now because we won't be able to do
- * this coercion on the placeholder. This is not as flexible
- * as what was done in 7.4 and before, but it's good enough to
- * handle the sort of silly coding commonly seen.
+ * If test expression is an untyped literal, force it to text. We have
+ * to do something now because we won't be able to do this coercion on
+ * the placeholder. This is not as flexible as what was done in 7.4
+ * and before, but it's good enough to handle the sort of silly coding
+ * commonly seen.
*/
if (exprType(arg) == UNKNOWNOID)
arg = coerce_to_common_type(pstate, arg, TEXTOID, "CASE");
@@ -891,9 +889,8 @@ transformCaseExpr(ParseState *pstate, CaseExpr *c)
/*
* Note: default result is considered the most significant type in
- * determining preferred type. This is how the code worked before,
- * but it seems a little bogus to me
- * --- tgl
+ * determining preferred type. This is how the code worked before, but it
+ * seems a little bogus to me --- tgl
*/
typeids = lcons_oid(exprType((Node *) newc->defresult), typeids);
@@ -947,8 +944,8 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
if (sublink->subLinkType == EXISTS_SUBLINK)
{
/*
- * EXISTS needs no lefthand or combining operator. These
- * fields should be NIL already, but make sure.
+ * EXISTS needs no lefthand or combining operator. These fields
+ * should be NIL already, but make sure.
*/
sublink->lefthand = NIL;
sublink->operName = NIL;
@@ -961,8 +958,8 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
ListCell *tlist_item = list_head(qtree->targetList);
/*
- * Make sure the subselect delivers a single column (ignoring
- * resjunk targets).
+ * Make sure the subselect delivers a single column (ignoring resjunk
+ * targets).
*/
if (tlist_item == NULL ||
((TargetEntry *) lfirst(tlist_item))->resjunk)
@@ -978,9 +975,8 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
}
/*
- * EXPR and ARRAY need no lefthand or combining
- * operator. These fields should be NIL already, but make
- * sure.
+ * EXPR and ARRAY need no lefthand or combining operator. These fields
+ * should be NIL already, but make sure.
*/
sublink->lefthand = NIL;
sublink->operName = NIL;
@@ -1004,9 +1000,9 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
lfirst(l) = transformExpr(pstate, lfirst(l));
/*
- * If the expression is "<> ALL" (with unqualified opname)
- * then convert it to "NOT IN". This is a hack to improve
- * efficiency of expressions output by pre-7.4 Postgres.
+ * If the expression is "<> ALL" (with unqualified opname) then
+ * convert it to "NOT IN". This is a hack to improve efficiency of
+ * expressions output by pre-7.4 Postgres.
*/
if (sublink->subLinkType == ALL_SUBLINK &&
list_length(op) == 1 && strcmp(opname, "<>") == 0)
@@ -1035,10 +1031,10 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
/*
* To build the list of combining operator OIDs, we must scan
- * subquery's targetlist to find values that will be matched
- * against lefthand values. We need to ignore resjunk
- * targets, so doing the outer iteration over right_list is
- * easier than doing it over left_list.
+ * subquery's targetlist to find values that will be matched against
+ * lefthand values. We need to ignore resjunk targets, so doing the
+ * outer iteration over right_list is easier than doing it over
+ * left_list.
*/
sublink->operOids = NIL;
@@ -1061,9 +1057,8 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
ll_item = lnext(ll_item);
/*
- * It's OK to use oper() not compatible_oper() here,
- * because make_subplan() will insert type coercion calls
- * if needed.
+ * It's OK to use oper() not compatible_oper() here, because
+ * make_subplan() will insert type coercion calls if needed.
*/
optup = oper(op,
exprType(lexpr),
@@ -1074,9 +1069,9 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
if (opform->oprresult != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("operator %s must return type boolean, not type %s",
- opname,
- format_type_be(opform->oprresult)),
+ errmsg("operator %s must return type boolean, not type %s",
+ opname,
+ format_type_be(opform->oprresult)),
errhint("The operator of a quantified predicate subquery must return type boolean.")));
if (get_func_retset(opform->oprcode))
@@ -1300,7 +1295,7 @@ transformBooleanTest(ParseState *pstate, BooleanTest *b)
default:
elog(ERROR, "unrecognized booltesttype: %d",
(int) b->booltesttype);
- clausename = NULL; /* keep compiler quiet */
+ clausename = NULL; /* keep compiler quiet */
}
b->arg = (Expr *) transformExpr(pstate, (Node *) b->arg);
@@ -1385,10 +1380,10 @@ transformWholeRowRef(ParseState *pstate, char *schemaname, char *relname)
default:
/*
- * RTE is a join or subselect. We represent this as a
- * whole-row Var of RECORD type. (Note that in most cases the
- * Var will be expanded to a RowExpr during planning, but that
- * is not our concern here.)
+ * RTE is a join or subselect. We represent this as a whole-row
+ * Var of RECORD type. (Note that in most cases the Var will be
+ * expanded to a RowExpr during planning, but that is not our
+ * concern here.)
*/
result = (Node *) makeVar(vnum,
InvalidAttrNumber,
@@ -1469,7 +1464,7 @@ exprType(Node *expr)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("could not find array type for data type %s",
- format_type_be(exprType((Node *) tent->expr)))));
+ format_type_be(exprType((Node *) tent->expr)))));
}
}
else
@@ -1482,10 +1477,9 @@ exprType(Node *expr)
case T_SubPlan:
{
/*
- * Although the parser does not ever deal with
- * already-planned expression trees, we support SubPlan
- * nodes in this routine for the convenience of
- * ruleutils.c.
+ * Although the parser does not ever deal with already-planned
+ * expression trees, we support SubPlan nodes in this routine
+ * for the convenience of ruleutils.c.
*/
SubPlan *subplan = (SubPlan *) expr;
@@ -1506,7 +1500,7 @@ exprType(Node *expr)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("could not find array type for data type %s",
- format_type_be(exprType((Node *) tent->expr)))));
+ format_type_be(exprType((Node *) tent->expr)))));
}
}
else
@@ -1600,7 +1594,7 @@ exprTypmod(Node *expr)
case BPCHAROID:
if (!con->constisnull)
{
- int32 len = VARSIZE(DatumGetPointer(con->constvalue)) - VARHDRSZ;
+ int32 len = VARSIZE(DatumGetPointer(con->constvalue)) - VARHDRSZ;
/* if multi-byte, take len and find # characters */
if (pg_database_encoding_max_length() > 1)
@@ -1629,8 +1623,8 @@ exprTypmod(Node *expr)
case T_CaseExpr:
{
/*
- * If all the alternatives agree on type/typmod, return
- * that typmod, else use -1
+ * If all the alternatives agree on type/typmod, return that
+ * typmod, else use -1
*/
CaseExpr *cexpr = (CaseExpr *) expr;
Oid casetype = cexpr->casetype;
@@ -1662,8 +1656,8 @@ exprTypmod(Node *expr)
case T_CoalesceExpr:
{
/*
- * If all the alternatives agree on type/typmod, return
- * that typmod, else use -1
+ * If all the alternatives agree on type/typmod, return that
+ * typmod, else use -1
*/
CoalesceExpr *cexpr = (CoalesceExpr *) expr;
Oid coalescetype = cexpr->coalescetype;
@@ -1686,8 +1680,8 @@ exprTypmod(Node *expr)
case T_MinMaxExpr:
{
/*
- * If all the alternatives agree on type/typmod, return
- * that typmod, else use -1
+ * If all the alternatives agree on type/typmod, return that
+ * typmod, else use -1
*/
MinMaxExpr *mexpr = (MinMaxExpr *) expr;
Oid minmaxtype = mexpr->minmaxtype;
@@ -1760,9 +1754,9 @@ exprIsLengthCoercion(Node *expr, int32 *coercedTypmod)
return false;
/*
- * If it's not a two-argument or three-argument function with the
- * second argument being an int4 constant, it can't have been created
- * from a length coercion (it must be a type coercion, instead).
+ * If it's not a two-argument or three-argument function with the second
+ * argument being an int4 constant, it can't have been created from a
+ * length coercion (it must be a type coercion, instead).
*/
nargs = list_length(func->args);
if (nargs < 2 || nargs > 3)
@@ -1844,9 +1838,9 @@ make_row_op(ParseState *pstate, List *opname, Node *ltree, Node *rtree)
errmsg("unequal number of entries in row expression")));
/*
- * XXX it's really wrong to generate a simple AND combination for < <=
- * > >=. We probably need to invent a new runtime node type to handle
- * those correctly. For the moment, though, keep on doing this ...
+ * XXX it's really wrong to generate a simple AND combination for < <= >
+ * >=. We probably need to invent a new runtime node type to handle those
+ * correctly. For the moment, though, keep on doing this ...
*/
oprname = strVal(llast(opname));
@@ -1862,8 +1856,8 @@ make_row_op(ParseState *pstate, List *opname, Node *ltree, Node *rtree)
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("operator %s is not supported for row expressions",
- oprname)));
+ errmsg("operator %s is not supported for row expressions",
+ oprname)));
boolop = 0; /* keep compiler quiet */
}
@@ -1957,7 +1951,7 @@ make_distinct_op(ParseState *pstate, List *opname, Node *ltree, Node *rtree)
if (((OpExpr *) result)->opresulttype != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("IS DISTINCT FROM requires = operator to yield boolean")));
+ errmsg("IS DISTINCT FROM requires = operator to yield boolean")));
/*
* We rely on DistinctExpr and OpExpr being same struct
diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c
index 88132bdbd58..a3a42326487 100644
--- a/src/backend/parser/parse_func.c
+++ b/src/backend/parser/parse_func.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_func.c,v 1.181 2005/06/22 15:19:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_func.c,v 1.182 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -74,10 +74,10 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
FuncDetailCode fdresult;
/*
- * Most of the rest of the parser just assumes that functions do not
- * have more than FUNC_MAX_ARGS parameters. We have to test here to
- * protect against array overruns, etc. Of course, this may not be a
- * function, but the test doesn't hurt.
+ * Most of the rest of the parser just assumes that functions do not have
+ * more than FUNC_MAX_ARGS parameters. We have to test here to protect
+ * against array overruns, etc. Of course, this may not be a function,
+ * but the test doesn't hurt.
*/
if (list_length(fargs) > FUNC_MAX_ARGS)
ereport(ERROR,
@@ -88,11 +88,11 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/*
* Extract arg type info in preparation for function lookup.
*
- * If any arguments are Param markers of type VOID, we discard them
- * from the parameter list. This is a hack to allow the JDBC driver
- * to not have to distinguish "input" and "output" parameter symbols
- * while parsing function-call constructs. We can't use foreach()
- * because we may modify the list ...
+ * If any arguments are Param markers of type VOID, we discard them from the
+ * parameter list. This is a hack to allow the JDBC driver to not have to
+ * distinguish "input" and "output" parameter symbols while parsing
+ * function-call constructs. We can't use foreach() because we may modify
+ * the list ...
*/
nargs = 0;
for (l = list_head(fargs); l != NULL; l = nextl)
@@ -102,7 +102,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
nextl = lnext(l);
- if (argtype == VOIDOID && IsA(arg, Param) && !is_column)
+ if (argtype == VOIDOID && IsA(arg, Param) &&!is_column)
{
fargs = list_delete_ptr(fargs, arg);
continue;
@@ -119,9 +119,9 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/*
* Check for column projection: if function has one argument, and that
- * argument is of complex type, and function name is not qualified,
- * then the "function call" could be a projection. We also check that
- * there wasn't any aggregate decoration.
+ * argument is of complex type, and function name is not qualified, then
+ * the "function call" could be a projection. We also check that there
+ * wasn't any aggregate decoration.
*/
if (nargs == 1 && !agg_star && !agg_distinct && list_length(funcname) == 1)
{
@@ -136,8 +136,8 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
return retval;
/*
- * If ParseComplexProjection doesn't recognize it as a
- * projection, just press on.
+ * If ParseComplexProjection doesn't recognize it as a projection,
+ * just press on.
*/
}
}
@@ -147,8 +147,8 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
* func_get_detail looks up the function in the catalogs, does
* disambiguation for polymorphic functions, handles inheritance, and
* returns the funcid and type and set or singleton status of the
- * function's return value. it also returns the true argument types
- * to the function.
+ * function's return value. it also returns the true argument types to
+ * the function.
*/
fdresult = func_get_detail(funcname, fargs, nargs, actual_arg_types,
&funcid, &rettype, &retset,
@@ -156,8 +156,8 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
if (fdresult == FUNCDETAIL_COERCION)
{
/*
- * We can do it as a trivial coercion. coerce_type can handle
- * these cases, so why duplicate code...
+ * We can do it as a trivial coercion. coerce_type can handle these
+ * cases, so why duplicate code...
*/
return coerce_type(pstate, linitial(fargs),
actual_arg_types[0], rettype, -1,
@@ -166,28 +166,28 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
else if (fdresult == FUNCDETAIL_NORMAL)
{
/*
- * Normal function found; was there anything indicating it must be
- * an aggregate?
+ * Normal function found; was there anything indicating it must be an
+ * aggregate?
*/
if (agg_star)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("%s(*) specified, but %s is not an aggregate function",
- NameListToString(funcname),
- NameListToString(funcname))));
+ errmsg("%s(*) specified, but %s is not an aggregate function",
+ NameListToString(funcname),
+ NameListToString(funcname))));
if (agg_distinct)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("DISTINCT specified, but %s is not an aggregate function",
- NameListToString(funcname))));
+ errmsg("DISTINCT specified, but %s is not an aggregate function",
+ NameListToString(funcname))));
}
else if (fdresult != FUNCDETAIL_AGGREGATE)
{
/*
* Oops. Time to die.
*
- * If we are dealing with the attribute notation rel.function, give
- * an error message that is appropriate for that case.
+ * If we are dealing with the attribute notation rel.function, give an
+ * error message that is appropriate for that case.
*/
if (is_column)
{
@@ -205,22 +205,22 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
errmsg("function %s is not unique",
func_signature_string(funcname, nargs,
actual_arg_types)),
- errhint("Could not choose a best candidate function. "
- "You may need to add explicit type casts.")));
+ errhint("Could not choose a best candidate function. "
+ "You may need to add explicit type casts.")));
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
errmsg("function %s does not exist",
func_signature_string(funcname, nargs,
actual_arg_types)),
- errhint("No function matches the given name and argument types. "
- "You may need to add explicit type casts.")));
+ errhint("No function matches the given name and argument types. "
+ "You may need to add explicit type casts.")));
}
/*
- * enforce consistency with ANYARRAY and ANYELEMENT argument and
- * return types, possibly adjusting return type or declared_arg_types
- * (which will be used as the cast destination by make_fn_arguments)
+ * enforce consistency with ANYARRAY and ANYELEMENT argument and return
+ * types, possibly adjusting return type or declared_arg_types (which will
+ * be used as the cast destination by make_fn_arguments)
*/
rettype = enforce_generic_type_consistency(actual_arg_types,
declared_arg_types,
@@ -394,15 +394,14 @@ func_select_candidate(int nargs,
FUNC_MAX_ARGS)));
/*
- * If any input types are domains, reduce them to their base types.
- * This ensures that we will consider functions on the base type to be
- * "exact matches" in the exact-match heuristic; it also makes it
- * possible to do something useful with the type-category heuristics.
- * Note that this makes it difficult, but not impossible, to use
- * functions declared to take a domain as an input datatype. Such a
- * function will be selected over the base-type function only if it is
- * an exact match at all argument positions, and so was already chosen
- * by our caller.
+ * If any input types are domains, reduce them to their base types. This
+ * ensures that we will consider functions on the base type to be "exact
+ * matches" in the exact-match heuristic; it also makes it possible to do
+ * something useful with the type-category heuristics. Note that this
+ * makes it difficult, but not impossible, to use functions declared to
+ * take a domain as an input datatype. Such a function will be selected
+ * over the base-type function only if it is an exact match at all
+ * argument positions, and so was already chosen by our caller.
*/
for (i = 0; i < nargs; i++)
input_base_typeids[i] = getBaseType(input_typeids[i]);
@@ -452,12 +451,11 @@ func_select_candidate(int nargs,
return candidates;
/*
- * Still too many candidates? Now look for candidates which have
- * either exact matches or preferred types at the args that will
- * require coercion. (Restriction added in 7.4: preferred type must be
- * of same category as input type; give no preference to
- * cross-category conversions to preferred types.) Keep all
- * candidates if none match.
+ * Still too many candidates? Now look for candidates which have either
+ * exact matches or preferred types at the args that will require
+ * coercion. (Restriction added in 7.4: preferred type must be of same
+ * category as input type; give no preference to cross-category
+ * conversions to preferred types.) Keep all candidates if none match.
*/
for (i = 0; i < nargs; i++) /* avoid multiple lookups */
slot_category[i] = TypeCategory(input_base_typeids[i]);
@@ -502,30 +500,28 @@ func_select_candidate(int nargs,
return candidates;
/*
- * Still too many candidates? Try assigning types for the unknown
- * columns.
+ * Still too many candidates? Try assigning types for the unknown columns.
*
- * NOTE: for a binary operator with one unknown and one non-unknown
- * input, we already tried the heuristic of looking for a candidate
- * with the known input type on both sides (see binary_oper_exact()).
- * That's essentially a special case of the general algorithm we try
- * next.
+ * NOTE: for a binary operator with one unknown and one non-unknown input, we
+ * already tried the heuristic of looking for a candidate with the known
+ * input type on both sides (see binary_oper_exact()). That's essentially
+ * a special case of the general algorithm we try next.
*
- * We do this by examining each unknown argument position to see if we
- * can determine a "type category" for it. If any candidate has an
- * input datatype of STRING category, use STRING category (this bias
- * towards STRING is appropriate since unknown-type literals look like
- * strings). Otherwise, if all the candidates agree on the type
- * category of this argument position, use that category. Otherwise,
- * fail because we cannot determine a category.
+ * We do this by examining each unknown argument position to see if we can
+ * determine a "type category" for it. If any candidate has an input
+ * datatype of STRING category, use STRING category (this bias towards
+ * STRING is appropriate since unknown-type literals look like strings).
+ * Otherwise, if all the candidates agree on the type category of this
+ * argument position, use that category. Otherwise, fail because we
+ * cannot determine a category.
*
- * If we are able to determine a type category, also notice whether any
- * of the candidates takes a preferred datatype within the category.
+ * If we are able to determine a type category, also notice whether any of
+ * the candidates takes a preferred datatype within the category.
*
- * Having completed this examination, remove candidates that accept the
- * wrong category at any unknown position. Also, if at least one
- * candidate accepted a preferred type at a position, remove
- * candidates that accept non-preferred types.
+ * Having completed this examination, remove candidates that accept the wrong
+ * category at any unknown position. Also, if at least one candidate
+ * accepted a preferred type at a position, remove candidates that accept
+ * non-preferred types.
*
* If we are down to one candidate at the end, we win.
*/
@@ -573,8 +569,7 @@ func_select_candidate(int nargs,
else
{
/*
- * Remember conflict, but keep going (might find
- * STRING)
+ * Remember conflict, but keep going (might find STRING)
*/
have_conflict = true;
}
@@ -687,8 +682,8 @@ func_get_detail(List *funcname,
raw_candidates = FuncnameGetCandidates(funcname, nargs);
/*
- * Quickly check if there is an exact match to the input datatypes
- * (there can be only one)
+ * Quickly check if there is an exact match to the input datatypes (there
+ * can be only one)
*/
for (best_candidate = raw_candidates;
best_candidate != NULL;
@@ -703,32 +698,30 @@ func_get_detail(List *funcname,
/*
* If we didn't find an exact match, next consider the possibility
* that this is really a type-coercion request: a single-argument
- * function call where the function name is a type name. If so,
- * and if we can do the coercion trivially (no run-time function
- * call needed), then go ahead and treat the "function call" as a
- * coercion. This interpretation needs to be given higher
- * priority than interpretations involving a type coercion
- * followed by a function call, otherwise we can produce
- * surprising results. For example, we want "text(varchar)" to be
- * interpreted as a trivial coercion, not as "text(name(varchar))"
- * which the code below this point is entirely capable of
- * selecting.
+ * function call where the function name is a type name. If so, and
+ * if we can do the coercion trivially (no run-time function call
+ * needed), then go ahead and treat the "function call" as a coercion.
+ * This interpretation needs to be given higher priority than
+ * interpretations involving a type coercion followed by a function
+ * call, otherwise we can produce surprising results. For example, we
+ * want "text(varchar)" to be interpreted as a trivial coercion, not
+ * as "text(name(varchar))" which the code below this point is
+ * entirely capable of selecting.
*
- * "Trivial" coercions are ones that involve binary-compatible types
- * and ones that are coercing a previously-unknown-type literal
- * constant to a specific type.
+ * "Trivial" coercions are ones that involve binary-compatible types and
+ * ones that are coercing a previously-unknown-type literal constant
+ * to a specific type.
*
- * The reason we can restrict our check to binary-compatible
- * coercions here is that we expect non-binary-compatible
- * coercions to have an implementation function named after the
- * target type. That function will be found by normal lookup if
- * appropriate.
+ * The reason we can restrict our check to binary-compatible coercions
+ * here is that we expect non-binary-compatible coercions to have an
+ * implementation function named after the target type. That function
+ * will be found by normal lookup if appropriate.
*
- * NB: it's important that this code stays in sync with what
- * coerce_type can do, because the caller will try to apply
- * coerce_type if we return FUNCDETAIL_COERCION. If we return
- * that result for something coerce_type can't handle, we'll cause
- * infinite recursion between this module and coerce_type!
+ * NB: it's important that this code stays in sync with what coerce_type
+ * can do, because the caller will try to apply coerce_type if we
+ * return FUNCDETAIL_COERCION. If we return that result for something
+ * coerce_type can't handle, we'll cause infinite recursion between
+ * this module and coerce_type!
*/
if (nargs == 1 && fargs != NIL)
{
@@ -761,8 +754,7 @@ func_get_detail(List *funcname,
}
/*
- * didn't find an exact match, so now try to match up
- * candidates...
+ * didn't find an exact match, so now try to match up candidates...
*/
if (raw_candidates != NULL)
{
@@ -788,8 +780,8 @@ func_get_detail(List *funcname,
current_candidates);
/*
- * If we were able to choose a best candidate, we're
- * done. Otherwise, ambiguous function call.
+ * If we were able to choose a best candidate, we're done.
+ * Otherwise, ambiguous function call.
*/
if (!best_candidate)
return FUNCDETAIL_MULTIPLE;
@@ -853,11 +845,10 @@ typeInheritsFrom(Oid subclassTypeId, Oid superclassTypeId)
inhrel = heap_open(InheritsRelationId, AccessShareLock);
/*
- * Use queue to do a breadth-first traversal of the inheritance graph
- * from the relid supplied up to the root. Notice that we append to
- * the queue inside the loop --- this is okay because the foreach()
- * macro doesn't advance queue_item until the next loop iteration
- * begins.
+ * Use queue to do a breadth-first traversal of the inheritance graph from
+ * the relid supplied up to the root. Notice that we append to the queue
+ * inside the loop --- this is okay because the foreach() macro doesn't
+ * advance queue_item until the next loop iteration begins.
*/
foreach(queue_item, queue)
{
@@ -872,9 +863,9 @@ typeInheritsFrom(Oid subclassTypeId, Oid superclassTypeId)
/*
* Okay, this is a not-yet-seen relid. Add it to the list of
- * already-visited OIDs, then find all the types this relid
- * inherits from and add them to the queue. The one exception is
- * we don't add the original relation to 'visited'.
+ * already-visited OIDs, then find all the types this relid inherits
+ * from and add them to the queue. The one exception is we don't add
+ * the original relation to 'visited'.
*/
if (queue_item != list_head(queue))
visited = lappend_oid(visited, this_relid);
@@ -889,7 +880,7 @@ typeInheritsFrom(Oid subclassTypeId, Oid superclassTypeId)
while ((inhtup = heap_getnext(inhscan, ForwardScanDirection)) != NULL)
{
Form_pg_inherits inh = (Form_pg_inherits) GETSTRUCT(inhtup);
- Oid inhparent = inh->inhparent;
+ Oid inhparent = inh->inhparent;
/* If this is the target superclass, we're done */
if (get_rel_type_id(inhparent) == superclassTypeId)
@@ -968,14 +959,14 @@ ParseComplexProjection(ParseState *pstate, char *funcname, Node *first_arg)
int i;
/*
- * Special case for whole-row Vars so that we can resolve (foo.*).bar
- * even when foo is a reference to a subselect, join, or RECORD
- * function. A bonus is that we avoid generating an unnecessary
- * FieldSelect; our result can omit the whole-row Var and just be a
- * Var for the selected field.
+ * Special case for whole-row Vars so that we can resolve (foo.*).bar even
+ * when foo is a reference to a subselect, join, or RECORD function. A
+ * bonus is that we avoid generating an unnecessary FieldSelect; our
+ * result can omit the whole-row Var and just be a Var for the selected
+ * field.
*
- * This case could be handled by expandRecordVariable, but it's
- * more efficient to do it this way when possible.
+ * This case could be handled by expandRecordVariable, but it's more
+ * efficient to do it this way when possible.
*/
if (IsA(first_arg, Var) &&
((Var *) first_arg)->varattno == InvalidAttrNumber)
@@ -992,9 +983,9 @@ ParseComplexProjection(ParseState *pstate, char *funcname, Node *first_arg)
/*
* Else do it the hard way with get_expr_result_type().
*
- * If it's a Var of type RECORD, we have to work even harder: we have
- * to find what the Var refers to, and pass that to get_expr_result_type.
- * That task is handled by expandRecordVariable().
+ * If it's a Var of type RECORD, we have to work even harder: we have to find
+ * what the Var refers to, and pass that to get_expr_result_type. That
+ * task is handled by expandRecordVariable().
*/
if (IsA(first_arg, Var) &&
((Var *) first_arg)->vartype == RECORDOID)
@@ -1057,8 +1048,8 @@ unknown_attribute(ParseState *pstate, Node *relref, char *attname)
else if (relTypeId == RECORDOID)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("could not identify column \"%s\" in record data type",
- attname)));
+ errmsg("could not identify column \"%s\" in record data type",
+ attname)));
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
@@ -1161,7 +1152,7 @@ find_aggregate_func(List *aggname, Oid basetype, bool noError)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("function %s(%s) is not an aggregate",
- NameListToString(aggname), format_type_be(basetype))));
+ NameListToString(aggname), format_type_be(basetype))));
}
ReleaseSysCache(ftup);
@@ -1198,7 +1189,7 @@ LookupFuncName(List *funcname, int nargs, const Oid *argtypes, bool noError)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
errmsg("function %s does not exist",
- func_signature_string(funcname, nargs, argtypes))));
+ func_signature_string(funcname, nargs, argtypes))));
return InvalidOid;
}
diff --git a/src/backend/parser/parse_node.c b/src/backend/parser/parse_node.c
index 20999f81ffe..f0900ec99cf 100644
--- a/src/backend/parser/parse_node.c
+++ b/src/backend/parser/parse_node.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_node.c,v 1.89 2005/05/30 01:20:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_node.c,v 1.90 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -92,8 +92,8 @@ transformArrayType(Oid arrayType)
if (elementType == InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot subscript type %s because it is not an array",
- format_type_be(arrayType))));
+ errmsg("cannot subscript type %s because it is not an array",
+ format_type_be(arrayType))));
ReleaseSysCache(type_tuple_array);
@@ -145,11 +145,11 @@ transformArraySubscripts(ParseState *pstate,
/*
* A list containing only single subscripts refers to a single array
- * element. If any of the items are double subscripts (lower:upper),
- * then the subscript expression means an array slice operation. In
- * this case, we supply a default lower bound of 1 for any items that
- * contain only a single subscript. We have to prescan the
- * indirection list to see if there are any double subscripts.
+ * element. If any of the items are double subscripts (lower:upper), then
+ * the subscript expression means an array slice operation. In this case,
+ * we supply a default lower bound of 1 for any items that contain only a
+ * single subscript. We have to prescan the indirection list to see if
+ * there are any double subscripts.
*/
foreach(idx, indirection)
{
@@ -163,9 +163,9 @@ transformArraySubscripts(ParseState *pstate,
}
/*
- * The type represented by the subscript expression is the element
- * type if we are fetching a single element, but it is the same as the
- * array type if we are fetching a slice or storing.
+ * The type represented by the subscript expression is the element type if
+ * we are fetching a single element, but it is the same as the array type
+ * if we are fetching a slice or storing.
*/
if (isSlice || assignFrom != NULL)
resultType = arrayType;
@@ -188,14 +188,14 @@ transformArraySubscripts(ParseState *pstate,
subexpr = transformExpr(pstate, ai->lidx);
/* If it's not int4 already, try to coerce */
subexpr = coerce_to_target_type(pstate,
- subexpr, exprType(subexpr),
+ subexpr, exprType(subexpr),
INT4OID, -1,
COERCION_ASSIGNMENT,
COERCE_IMPLICIT_CAST);
if (subexpr == NULL)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("array subscript must have type integer")));
+ errmsg("array subscript must have type integer")));
}
else
{
@@ -224,8 +224,7 @@ transformArraySubscripts(ParseState *pstate,
/*
* If doing an array store, coerce the source value to the right type.
- * (This should agree with the coercion done by
- * updateTargetListEntry.)
+ * (This should agree with the coercion done by updateTargetListEntry.)
*/
if (assignFrom != NULL)
{
@@ -244,7 +243,7 @@ transformArraySubscripts(ParseState *pstate,
" but expression is of type %s",
format_type_be(typeneeded),
format_type_be(typesource)),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
}
/*
@@ -308,7 +307,7 @@ make_const(Value *value)
* It might actually fit in int32. Probably only INT_MIN can
* occur, but we'll code the test generally just to be sure.
*/
- int32 val32 = (int32) val64;
+ int32 val32 = (int32) val64;
if (val64 == (int64) val32)
{
@@ -324,7 +323,7 @@ make_const(Value *value)
typeid = INT8OID;
typelen = sizeof(int64);
- typebyval = false; /* XXX might change someday */
+ typebyval = false; /* XXX might change someday */
}
}
else
@@ -341,6 +340,7 @@ make_const(Value *value)
break;
case T_String:
+
/*
* We assume here that UNKNOWN's internal representation is the
* same as CSTRING
@@ -348,7 +348,7 @@ make_const(Value *value)
val = CStringGetDatum(strVal(value));
typeid = UNKNOWNOID; /* will be coerced later */
- typelen = -2; /* cstring-style varwidth type */
+ typelen = -2; /* cstring-style varwidth type */
typebyval = false;
break;
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index 4b615542c81..764f729529f 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_oper.c,v 1.81 2004/12/31 22:00:27 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_oper.c,v 1.82 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -142,16 +142,16 @@ equality_oper(Oid argtype, bool noError)
/*
* Look for an "=" operator for the datatype. We require it to be an
- * exact or binary-compatible match, since most callers are not
- * prepared to cope with adding any run-time type coercion steps.
+ * exact or binary-compatible match, since most callers are not prepared
+ * to cope with adding any run-time type coercion steps.
*/
typentry = lookup_type_cache(argtype, TYPECACHE_EQ_OPR);
oproid = typentry->eq_opr;
/*
- * If the datatype is an array, then we can use array_eq ... but only
- * if there is a suitable equality operator for the element type.
- * (This check is not in the raw typcache.c code ... should it be?)
+ * If the datatype is an array, then we can use array_eq ... but only if
+ * there is a suitable equality operator for the element type. (This check
+ * is not in the raw typcache.c code ... should it be?)
*/
if (oproid == ARRAY_EQ_OP)
{
@@ -182,8 +182,8 @@ equality_oper(Oid argtype, bool noError)
if (!noError)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(argtype))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(argtype))));
return NULL;
}
@@ -200,22 +200,22 @@ ordering_oper(Oid argtype, bool noError)
Operator optup;
/*
- * Look for a "<" operator for the datatype. We require it to be an
- * exact or binary-compatible match, since most callers are not
- * prepared to cope with adding any run-time type coercion steps.
+ * Look for a "<" operator for the datatype. We require it to be an exact
+ * or binary-compatible match, since most callers are not prepared to cope
+ * with adding any run-time type coercion steps.
*
* Note: the search algorithm used by typcache.c ensures that if a "<"
* operator is returned, it will be consistent with the "=" operator
- * returned by equality_oper. This is critical for sorting and
- * grouping purposes.
+ * returned by equality_oper. This is critical for sorting and grouping
+ * purposes.
*/
typentry = lookup_type_cache(argtype, TYPECACHE_LT_OPR);
oproid = typentry->lt_opr;
/*
- * If the datatype is an array, then we can use array_lt ... but only
- * if there is a suitable less-than operator for the element type.
- * (This check is not in the raw typcache.c code ... should it be?)
+ * If the datatype is an array, then we can use array_lt ... but only if
+ * there is a suitable less-than operator for the element type. (This
+ * check is not in the raw typcache.c code ... should it be?)
*/
if (oproid == ARRAY_LT_OP)
{
@@ -246,9 +246,9 @@ ordering_oper(Oid argtype, bool noError)
if (!noError)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an ordering operator for type %s",
- format_type_be(argtype)),
- errhint("Use an explicit ordering operator or modify the query.")));
+ errmsg("could not identify an ordering operator for type %s",
+ format_type_be(argtype)),
+ errhint("Use an explicit ordering operator or modify the query.")));
return NULL;
}
@@ -265,22 +265,22 @@ reverse_ordering_oper(Oid argtype, bool noError)
Operator optup;
/*
- * Look for a ">" operator for the datatype. We require it to be an
- * exact or binary-compatible match, since most callers are not
- * prepared to cope with adding any run-time type coercion steps.
+ * Look for a ">" operator for the datatype. We require it to be an exact
+ * or binary-compatible match, since most callers are not prepared to cope
+ * with adding any run-time type coercion steps.
*
* Note: the search algorithm used by typcache.c ensures that if a ">"
* operator is returned, it will be consistent with the "=" operator
- * returned by equality_oper. This is critical for sorting and
- * grouping purposes.
+ * returned by equality_oper. This is critical for sorting and grouping
+ * purposes.
*/
typentry = lookup_type_cache(argtype, TYPECACHE_GT_OPR);
oproid = typentry->gt_opr;
/*
- * If the datatype is an array, then we can use array_gt ... but only
- * if there is a suitable greater-than operator for the element type.
- * (This check is not in the raw typcache.c code ... should it be?)
+ * If the datatype is an array, then we can use array_gt ... but only if
+ * there is a suitable greater-than operator for the element type. (This
+ * check is not in the raw typcache.c code ... should it be?)
*/
if (oproid == ARRAY_GT_OP)
{
@@ -311,9 +311,9 @@ reverse_ordering_oper(Oid argtype, bool noError)
if (!noError)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an ordering operator for type %s",
- format_type_be(argtype)),
- errhint("Use an explicit ordering operator or modify the query.")));
+ errmsg("could not identify an ordering operator for type %s",
+ format_type_be(argtype)),
+ errhint("Use an explicit ordering operator or modify the query.")));
return NULL;
}
@@ -528,8 +528,8 @@ oper(List *opname, Oid ltypeId, Oid rtypeId, bool noError)
*/
/*
- * Unspecified type for one of the arguments? then use the
- * other (XXX this is probably dead code?)
+ * Unspecified type for one of the arguments? then use the other
+ * (XXX this is probably dead code?)
*/
if (rtypeId == InvalidOid)
rtypeId = ltypeId;
@@ -654,9 +654,8 @@ right_oper(List *op, Oid arg, bool noError)
if (!OidIsValid(operOid))
{
/*
- * We must run oper_select_candidate even if only one
- * candidate, otherwise we may falsely return a
- * non-type-compatible operator.
+ * We must run oper_select_candidate even if only one candidate,
+ * otherwise we may falsely return a non-type-compatible operator.
*/
fdresult = oper_select_candidate(1, &arg, clist, &operOid);
}
@@ -703,9 +702,9 @@ left_oper(List *op, Oid arg, bool noError)
* First, quickly check to see if there is an exactly matching
* operator (there can be only one such entry in the list).
*
- * The returned list has args in the form (0, oprright). Move the
- * useful data into args[0] to keep oper_select_candidate simple.
- * XXX we are assuming here that we may scribble on the list!
+ * The returned list has args in the form (0, oprright). Move the useful
+ * data into args[0] to keep oper_select_candidate simple. XXX we are
+ * assuming here that we may scribble on the list!
*/
FuncCandidateList clisti;
@@ -722,9 +721,8 @@ left_oper(List *op, Oid arg, bool noError)
if (!OidIsValid(operOid))
{
/*
- * We must run oper_select_candidate even if only one
- * candidate, otherwise we may falsely return a
- * non-type-compatible operator.
+ * We must run oper_select_candidate even if only one candidate,
+ * otherwise we may falsely return a non-type-compatible operator.
*/
fdresult = oper_select_candidate(1, &arg, clist, &operOid);
}
@@ -784,8 +782,8 @@ op_error(List *op, char oprkind, Oid arg1, Oid arg2, FuncDetailCode fdresult)
(errcode(ERRCODE_UNDEFINED_FUNCTION),
errmsg("operator does not exist: %s",
op_signature_string(op, oprkind, arg1, arg2)),
- errhint("No operator matches the given name and argument type(s). "
- "You may need to add explicit type casts.")));
+ errhint("No operator matches the given name and argument type(s). "
+ "You may need to add explicit type casts.")));
}
/*
@@ -862,9 +860,9 @@ make_scalar_array_op(ParseState *pstate, List *opname,
atypeId = exprType(rtree);
/*
- * The right-hand input of the operator will be the element type of
- * the array. However, if we currently have just an untyped literal
- * on the right, stay with that and hope we can resolve the operator.
+ * The right-hand input of the operator will be the element type of the
+ * array. However, if we currently have just an untyped literal on the
+ * right, stay with that and hope we can resolve the operator.
*/
if (atypeId == UNKNOWNOID)
rtypeId = UNKNOWNOID;
@@ -874,7 +872,7 @@ make_scalar_array_op(ParseState *pstate, List *opname,
if (!OidIsValid(rtypeId))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("op ANY/ALL (array) requires array on right side")));
+ errmsg("op ANY/ALL (array) requires array on right side")));
}
/* Now resolve the operator */
@@ -888,9 +886,9 @@ make_scalar_array_op(ParseState *pstate, List *opname,
declared_arg_types[1] = opform->oprright;
/*
- * enforce consistency with ANYARRAY and ANYELEMENT argument and
- * return types, possibly adjusting return type or declared_arg_types
- * (which will be used as the cast destination by make_fn_arguments)
+ * enforce consistency with ANYARRAY and ANYELEMENT argument and return
+ * types, possibly adjusting return type or declared_arg_types (which will
+ * be used as the cast destination by make_fn_arguments)
*/
rettype = enforce_generic_type_consistency(actual_arg_types,
declared_arg_types,
@@ -903,11 +901,11 @@ make_scalar_array_op(ParseState *pstate, List *opname,
if (rettype != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("op ANY/ALL (array) requires operator to yield boolean")));
+ errmsg("op ANY/ALL (array) requires operator to yield boolean")));
if (get_func_retset(opform->oprcode))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("op ANY/ALL (array) requires operator not to return a set")));
+ errmsg("op ANY/ALL (array) requires operator not to return a set")));
/*
* Now switch back to the array type on the right, arranging for any
@@ -985,9 +983,9 @@ make_op_expr(ParseState *pstate, Operator op,
}
/*
- * enforce consistency with ANYARRAY and ANYELEMENT argument and
- * return types, possibly adjusting return type or declared_arg_types
- * (which will be used as the cast destination by make_fn_arguments)
+ * enforce consistency with ANYARRAY and ANYELEMENT argument and return
+ * types, possibly adjusting return type or declared_arg_types (which will
+ * be used as the cast destination by make_fn_arguments)
*/
rettype = enforce_generic_type_consistency(actual_arg_types,
declared_arg_types,
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index 196936bb094..61a0549ee0b 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_relation.c,v 1.114 2005/10/06 19:51:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_relation.c,v 1.115 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,7 +35,7 @@
bool add_missing_from;
static RangeTblEntry *scanNameSpaceForRefname(ParseState *pstate,
- const char *refname);
+ const char *refname);
static RangeTblEntry *scanNameSpaceForRelid(ParseState *pstate, Oid relid);
static bool isLockedRel(ParseState *pstate, char *refname);
static void expandRelation(Oid relid, Alias *eref,
@@ -43,9 +43,9 @@ static void expandRelation(Oid relid, Alias *eref,
bool include_dropped,
List **colnames, List **colvars);
static void expandTupleDesc(TupleDesc tupdesc, Alias *eref,
- int rtindex, int sublevels_up,
- bool include_dropped,
- List **colnames, List **colvars);
+ int rtindex, int sublevels_up,
+ bool include_dropped,
+ List **colnames, List **colvars);
static int specialAttNum(const char *attname);
static void warnAutoRange(ParseState *pstate, RangeVar *relation);
@@ -297,15 +297,14 @@ scanRTEForColumn(ParseState *pstate, RangeTblEntry *rte, char *colname)
* Scan the user column names (or aliases) for a match. Complain if
* multiple matches.
*
- * Note: eref->colnames may include entries for dropped columns, but
- * those will be empty strings that cannot match any legal SQL
- * identifier, so we don't bother to test for that case here.
+ * Note: eref->colnames may include entries for dropped columns, but those
+ * will be empty strings that cannot match any legal SQL identifier, so we
+ * don't bother to test for that case here.
*
- * Should this somehow go wrong and we try to access a dropped column,
- * we'll still catch it by virtue of the checks in
- * get_rte_attribute_type(), which is called by make_var(). That
- * routine has to do a cache lookup anyway, so the check there is
- * cheap.
+ * Should this somehow go wrong and we try to access a dropped column, we'll
+ * still catch it by virtue of the checks in get_rte_attribute_type(),
+ * which is called by make_var(). That routine has to do a cache lookup
+ * anyway, so the check there is cheap.
*/
foreach(c, rte->eref->colnames)
{
@@ -385,8 +384,8 @@ colNameToVar(ParseState *pstate, char *colname, bool localonly)
if (result)
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_COLUMN),
- errmsg("column reference \"%s\" is ambiguous",
- colname)));
+ errmsg("column reference \"%s\" is ambiguous",
+ colname)));
result = newresult;
}
}
@@ -502,7 +501,7 @@ buildRelationAliases(TupleDesc tupdesc, Alias *alias, Alias *eref)
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
errmsg("table \"%s\" has %d columns available but %d columns specified",
- eref->aliasname, maxattrs - numdropped, numaliases)));
+ eref->aliasname, maxattrs - numdropped, numaliases)));
}
/*
@@ -531,8 +530,8 @@ buildScalarFunctionAlias(Node *funcexpr, char *funcname,
if (list_length(alias->colnames) != 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("too many column aliases specified for function %s",
- funcname)));
+ errmsg("too many column aliases specified for function %s",
+ funcname)));
eref->colnames = copyObject(alias->colnames);
return;
}
@@ -583,26 +582,26 @@ addRangeTableEntry(ParseState *pstate,
rte->alias = alias;
/*
- * Get the rel's OID. This access also ensures that we have an
- * up-to-date relcache entry for the rel. Since this is typically the
- * first access to a rel in a statement, be careful to get the right
- * access level depending on whether we're doing SELECT FOR UPDATE/SHARE.
+ * Get the rel's OID. This access also ensures that we have an up-to-date
+ * relcache entry for the rel. Since this is typically the first access
+ * to a rel in a statement, be careful to get the right access level
+ * depending on whether we're doing SELECT FOR UPDATE/SHARE.
*/
lockmode = isLockedRel(pstate, refname) ? RowShareLock : AccessShareLock;
rel = heap_openrv(relation, lockmode);
rte->relid = RelationGetRelid(rel);
/*
- * Build the list of effective column names using user-supplied
- * aliases and/or actual column names.
+ * Build the list of effective column names using user-supplied aliases
+ * and/or actual column names.
*/
rte->eref = makeAlias(refname, NIL);
buildRelationAliases(rel->rd_att, alias, rte->eref);
/*
- * Drop the rel refcount, but keep the access lock till end of
- * transaction so that the table can't be deleted or have its schema
- * modified underneath us.
+ * Drop the rel refcount, but keep the access lock till end of transaction
+ * so that the table can't be deleted or have its schema modified
+ * underneath us.
*/
heap_close(rel, NoLock);
@@ -623,8 +622,8 @@ addRangeTableEntry(ParseState *pstate,
rte->checkAsUser = InvalidOid; /* not set-uid by default, either */
/*
- * Add completed RTE to pstate's range table list, but not to join
- * list nor namespace --- caller must do that if appropriate.
+ * Add completed RTE to pstate's range table list, but not to join list
+ * nor namespace --- caller must do that if appropriate.
*/
if (pstate != NULL)
pstate->p_rtable = lappend(pstate->p_rtable, rte);
@@ -653,8 +652,8 @@ addRangeTableEntryForRelation(ParseState *pstate,
rte->relid = RelationGetRelid(rel);
/*
- * Build the list of effective column names using user-supplied
- * aliases and/or actual column names.
+ * Build the list of effective column names using user-supplied aliases
+ * and/or actual column names.
*/
rte->eref = makeAlias(refname, NIL);
buildRelationAliases(rel->rd_att, alias, rte->eref);
@@ -676,8 +675,8 @@ addRangeTableEntryForRelation(ParseState *pstate,
rte->checkAsUser = InvalidOid; /* not set-uid by default, either */
/*
- * Add completed RTE to pstate's range table list, but not to join
- * list nor namespace --- caller must do that if appropriate.
+ * Add completed RTE to pstate's range table list, but not to join list
+ * nor namespace --- caller must do that if appropriate.
*/
if (pstate != NULL)
pstate->p_rtable = lappend(pstate->p_rtable, rte);
@@ -754,8 +753,8 @@ addRangeTableEntryForSubquery(ParseState *pstate,
rte->checkAsUser = InvalidOid;
/*
- * Add completed RTE to pstate's range table list, but not to join
- * list nor namespace --- caller must do that if appropriate.
+ * Add completed RTE to pstate's range table list, but not to join list
+ * nor namespace --- caller must do that if appropriate.
*/
if (pstate != NULL)
pstate->p_rtable = lappend(pstate->p_rtable, rte);
@@ -801,8 +800,8 @@ addRangeTableEntryForFunction(ParseState *pstate,
&tupdesc);
/*
- * A coldeflist is required if the function returns RECORD and hasn't
- * got a predetermined record type, and is prohibited otherwise.
+ * A coldeflist is required if the function returns RECORD and hasn't got
+ * a predetermined record type, and is prohibited otherwise.
*/
if (coldeflist != NIL)
{
@@ -848,8 +847,8 @@ addRangeTableEntryForFunction(ParseState *pstate,
else
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("function \"%s\" in FROM has unsupported return type %s",
- funcname, format_type_be(funcrettype))));
+ errmsg("function \"%s\" in FROM has unsupported return type %s",
+ funcname, format_type_be(funcrettype))));
/*----------
* Flags:
@@ -868,8 +867,8 @@ addRangeTableEntryForFunction(ParseState *pstate,
rte->checkAsUser = InvalidOid;
/*
- * Add completed RTE to pstate's range table list, but not to join
- * list nor namespace --- caller must do that if appropriate.
+ * Add completed RTE to pstate's range table list, but not to join list
+ * nor namespace --- caller must do that if appropriate.
*/
if (pstate != NULL)
pstate->p_rtable = lappend(pstate->p_rtable, rte);
@@ -907,7 +906,7 @@ addRangeTableEntryForJoin(ParseState *pstate,
/* fill in any unspecified alias columns */
if (numaliases < list_length(colnames))
eref->colnames = list_concat(eref->colnames,
- list_copy_tail(colnames, numaliases));
+ list_copy_tail(colnames, numaliases));
rte->eref = eref;
@@ -927,8 +926,8 @@ addRangeTableEntryForJoin(ParseState *pstate,
rte->checkAsUser = InvalidOid;
/*
- * Add completed RTE to pstate's range table list, but not to join
- * list nor namespace --- caller must do that if appropriate.
+ * Add completed RTE to pstate's range table list, but not to join list
+ * nor namespace --- caller must do that if appropriate.
*/
if (pstate != NULL)
pstate->p_rtable = lappend(pstate->p_rtable, rte);
@@ -983,7 +982,7 @@ addRTEtoQuery(ParseState *pstate, RangeTblEntry *rte,
{
if (addToJoinList)
{
- int rtindex = RTERangeTablePosn(pstate, rte, NULL);
+ int rtindex = RTERangeTablePosn(pstate, rte, NULL);
RangeTblRef *rtr = makeNode(RangeTblRef);
rtr->rtindex = rtindex;
@@ -1111,7 +1110,7 @@ expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up,
/* Base data type, i.e. scalar */
if (colnames)
*colnames = lappend(*colnames,
- linitial(rte->eref->colnames));
+ linitial(rte->eref->colnames));
if (colvars)
{
@@ -1184,11 +1183,11 @@ expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up,
/*
* During ordinary parsing, there will never be any
- * deleted columns in the join; but we have to check
- * since this routine is also used by the rewriter,
- * and joins found in stored rules might have join
- * columns for since-deleted columns. This will be
- * signaled by a NULL Const in the alias-vars list.
+ * deleted columns in the join; but we have to check since
+ * this routine is also used by the rewriter, and joins
+ * found in stored rules might have join columns for
+ * since-deleted columns. This will be signaled by a NULL
+ * Const in the alias-vars list.
*/
if (IsA(avar, Const))
{
@@ -1274,8 +1273,8 @@ expandTupleDesc(TupleDesc tupdesc, Alias *eref,
if (colvars)
{
/*
- * can't use atttypid here, but it doesn't really
- * matter what type the Const claims to be.
+ * can't use atttypid here, but it doesn't really matter
+ * what type the Const claims to be.
*/
*colvars = lappend(*colvars, makeNullConst(INT4OID));
}
@@ -1342,8 +1341,7 @@ expandRelAttrs(ParseState *pstate, RangeTblEntry *rte,
te_list = lappend(te_list, te);
}
- Assert(name == NULL && var == NULL); /* lists not the same
- * length? */
+ Assert(name == NULL && var == NULL); /* lists not the same length? */
return te_list;
}
@@ -1382,8 +1380,7 @@ get_rte_attribute_name(RangeTblEntry *rte, AttrNumber attnum)
return get_relid_attribute_name(rte->relid, attnum);
/*
- * Otherwise use the column name from eref. There should always be
- * one.
+ * Otherwise use the column name from eref. There should always be one.
*/
if (attnum > 0 && attnum <= list_length(rte->eref->colnames))
return strVal(list_nth(rte->eref->colnames, attnum - 1));
@@ -1420,15 +1417,15 @@ get_rte_attribute_type(RangeTblEntry *rte, AttrNumber attnum,
att_tup = (Form_pg_attribute) GETSTRUCT(tp);
/*
- * If dropped column, pretend it ain't there. See notes
- * in scanRTEForColumn.
+ * If dropped column, pretend it ain't there. See notes in
+ * scanRTEForColumn.
*/
if (att_tup->attisdropped)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- NameStr(att_tup->attname),
- get_rel_name(rte->relid))));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ NameStr(att_tup->attname),
+ get_rel_name(rte->relid))));
*vartype = att_tup->atttypid;
*vartypmod = att_tup->atttypmod;
ReleaseSysCache(tp);
@@ -1468,15 +1465,15 @@ get_rte_attribute_type(RangeTblEntry *rte, AttrNumber attnum,
if (attnum < 1 || attnum > tupdesc->natts)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column %d of relation \"%s\" does not exist",
- attnum,
- rte->eref->aliasname)));
+ errmsg("column %d of relation \"%s\" does not exist",
+ attnum,
+ rte->eref->aliasname)));
att_tup = tupdesc->attrs[attnum - 1];
/*
- * If dropped column, pretend it ain't there. See
- * notes in scanRTEForColumn.
+ * If dropped column, pretend it ain't there. See notes
+ * in scanRTEForColumn.
*/
if (att_tup->attisdropped)
ereport(ERROR,
@@ -1510,8 +1507,7 @@ get_rte_attribute_type(RangeTblEntry *rte, AttrNumber attnum,
case RTE_JOIN:
{
/*
- * Join RTE --- get type info from join RTE's alias
- * variable
+ * Join RTE --- get type info from join RTE's alias variable
*/
Node *aliasvar;
@@ -1540,8 +1536,7 @@ get_rte_attribute_is_dropped(RangeTblEntry *rte, AttrNumber attnum)
case RTE_RELATION:
{
/*
- * Plain relation RTE --- get the attribute's catalog
- * entry
+ * Plain relation RTE --- get the attribute's catalog entry
*/
HeapTuple tp;
Form_pg_attribute att_tup;
@@ -1565,12 +1560,11 @@ get_rte_attribute_is_dropped(RangeTblEntry *rte, AttrNumber attnum)
case RTE_JOIN:
{
/*
- * A join RTE would not have dropped columns when
- * constructed, but one in a stored rule might contain
- * columns that were dropped from the underlying tables,
- * if said columns are nowhere explicitly referenced in
- * the rule. This will be signaled to us by a NULL Const
- * in the joinaliasvars list.
+ * A join RTE would not have dropped columns when constructed,
+ * but one in a stored rule might contain columns that were
+ * dropped from the underlying tables, if said columns are
+ * nowhere explicitly referenced in the rule. This will be
+ * signaled to us by a NULL Const in the joinaliasvars list.
*/
Var *aliasvar;
@@ -1766,8 +1760,8 @@ warnAutoRange(ParseState *pstate, RangeVar *relation)
if (pstate->parentParseState != NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("missing FROM-clause entry in subquery for table \"%s\"",
- relation->relname)));
+ errmsg("missing FROM-clause entry in subquery for table \"%s\"",
+ relation->relname)));
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
@@ -1785,7 +1779,7 @@ warnAutoRange(ParseState *pstate, RangeVar *relation)
else
ereport(NOTICE,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("adding missing FROM-clause entry for table \"%s\"",
- relation->relname)));
+ errmsg("adding missing FROM-clause entry for table \"%s\"",
+ relation->relname)));
}
}
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index 00185a05e12..88c29ebf1e4 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_target.c,v 1.137 2005/06/26 22:05:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_target.c,v 1.138 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,7 +32,7 @@
static void markTargetListOrigin(ParseState *pstate, TargetEntry *tle,
- Var *var, int levelsup);
+ Var *var, int levelsup);
static Node *transformAssignmentIndirection(ParseState *pstate,
Node *basenode,
const char *targetName,
@@ -73,8 +73,8 @@ transformTargetEntry(ParseState *pstate,
if (colname == NULL && !resjunk)
{
/*
- * Generate a suitable column name for a column without any
- * explicit 'AS ColumnName' clause.
+ * Generate a suitable column name for a column without any explicit
+ * 'AS ColumnName' clause.
*/
colname = FigureColname(node);
}
@@ -105,8 +105,8 @@ transformTargetList(ParseState *pstate, List *targetlist)
/*
* Check for "something.*". Depending on the complexity of the
- * "something", the star could appear as the last name in
- * ColumnRef, or as the last indirection item in A_Indirection.
+ * "something", the star could appear as the last name in ColumnRef,
+ * or as the last indirection item in A_Indirection.
*/
if (IsA(res->val, ColumnRef))
{
@@ -130,7 +130,7 @@ transformTargetList(ParseState *pstate, List *targetlist)
{
/* It is something.*, expand into multiple items */
p_target = list_concat(p_target,
- ExpandIndirectionStar(pstate, ind));
+ ExpandIndirectionStar(pstate, ind));
continue;
}
}
@@ -271,11 +271,11 @@ updateTargetListEntry(ParseState *pstate,
/*
* If the expression is a DEFAULT placeholder, insert the attribute's
- * type/typmod into it so that exprType will report the right things.
- * (We expect that the eventually substituted default expression will
- * in fact have this type and typmod.) Also, reject trying to update
- * a subfield or array element with DEFAULT, since there can't be any
- * default for portions of a column.
+ * type/typmod into it so that exprType will report the right things. (We
+ * expect that the eventually substituted default expression will in fact
+ * have this type and typmod.) Also, reject trying to update a subfield
+ * or array element with DEFAULT, since there can't be any default for
+ * portions of a column.
*/
if (tle->expr && IsA(tle->expr, SetToDefault))
{
@@ -288,7 +288,7 @@ updateTargetListEntry(ParseState *pstate,
if (IsA(linitial(indirection), A_Indices))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot set an array element to DEFAULT")));
+ errmsg("cannot set an array element to DEFAULT")));
else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -301,9 +301,9 @@ updateTargetListEntry(ParseState *pstate,
/*
* If there is indirection on the target column, prepare an array or
- * subfield assignment expression. This will generate a new column
- * value that the source value has been inserted into, which can then
- * be placed in the new tuple constructed by INSERT or UPDATE.
+ * subfield assignment expression. This will generate a new column value
+ * that the source value has been inserted into, which can then be placed
+ * in the new tuple constructed by INSERT or UPDATE.
*/
if (indirection)
{
@@ -312,9 +312,9 @@ updateTargetListEntry(ParseState *pstate,
if (pstate->p_is_insert)
{
/*
- * The command is INSERT INTO table (col.something) ... so
- * there is not really a source value to work with. Insert a
- * NULL constant as the source value.
+ * The command is INSERT INTO table (col.something) ... so there
+ * is not really a source value to work with. Insert a NULL
+ * constant as the source value.
*/
colVar = (Node *) makeNullConst(attrtype);
}
@@ -358,15 +358,14 @@ updateTargetListEntry(ParseState *pstate,
colname,
format_type_be(attrtype),
format_type_be(type_id)),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
}
/*
* Set the resno to identify the target column --- the rewriter and
- * planner depend on this. We also set the resname to identify the
- * target column, but this is only for debugging purposes; it should
- * not be relied on. (In particular, it might be out of date in a
- * stored rule.)
+ * planner depend on this. We also set the resname to identify the target
+ * column, but this is only for debugging purposes; it should not be
+ * relied on. (In particular, it might be out of date in a stored rule.)
*/
tle->resno = (AttrNumber) attrno;
tle->resname = colname;
@@ -424,8 +423,8 @@ transformAssignmentIndirection(ParseState *pstate,
/*
* We have to split any field-selection operations apart from
- * subscripting. Adjacent A_Indices nodes have to be treated as a
- * single multidimensional subscript operation.
+ * subscripting. Adjacent A_Indices nodes have to be treated as a single
+ * multidimensional subscript operation.
*/
for_each_cell(i, indirection)
{
@@ -561,7 +560,7 @@ transformAssignmentIndirection(ParseState *pstate,
targetName,
format_type_be(targetTypeId),
format_type_be(exprType(rhs))),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
else
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
@@ -570,7 +569,7 @@ transformAssignmentIndirection(ParseState *pstate,
targetName,
format_type_be(targetTypeId),
format_type_be(exprType(rhs))),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
}
return result;
@@ -631,8 +630,8 @@ checkInsertTargets(ParseState *pstate, List *cols, List **attrnos)
attrno = attnameAttNum(pstate->p_target_relation, name, false);
/*
- * Check for duplicates, but only of whole columns --- we
- * allow INSERT INTO foo (col.subcol1, col.subcol2)
+ * Check for duplicates, but only of whole columns --- we allow
+ * INSERT INTO foo (col.subcol1, col.subcol2)
*/
if (col->indirection == NIL)
{
@@ -641,8 +640,8 @@ checkInsertTargets(ParseState *pstate, List *cols, List **attrnos)
bms_is_member(attrno, partialcols))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("column \"%s\" specified more than once",
- name)));
+ errmsg("column \"%s\" specified more than once",
+ name)));
wholecols = bms_add_member(wholecols, attrno);
}
else
@@ -651,8 +650,8 @@ checkInsertTargets(ParseState *pstate, List *cols, List **attrnos)
if (bms_is_member(attrno, wholecols))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("column \"%s\" specified more than once",
- name)));
+ errmsg("column \"%s\" specified more than once",
+ name)));
partialcols = bms_add_member(partialcols, attrno);
}
@@ -727,8 +726,8 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref)
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("improper qualified name (too many dotted names): %s",
- NameListToString(fields))));
+ errmsg("improper qualified name (too many dotted names): %s",
+ NameListToString(fields))));
schemaname = NULL; /* keep compiler quiet */
relname = NULL;
break;
@@ -765,12 +764,12 @@ ExpandAllTables(ParseState *pstate)
if (!pstate->p_varnamespace)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("SELECT * with no tables specified is not valid")));
+ errmsg("SELECT * with no tables specified is not valid")));
foreach(l, pstate->p_varnamespace)
{
RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
- int rtindex = RTERangeTablePosn(pstate, rte, NULL);
+ int rtindex = RTERangeTablePosn(pstate, rte, NULL);
target = list_concat(target,
expandRelAttrs(pstate, rte, rtindex, 0));
@@ -804,14 +803,14 @@ ExpandIndirectionStar(ParseState *pstate, A_Indirection *ind)
/*
* Verify it's a composite type, and get the tupdesc. We use
- * get_expr_result_type() because that can handle references to
- * functions returning anonymous record types. If that fails,
- * use lookup_rowtype_tupdesc(), which will almost certainly fail
- * as well, but it will give an appropriate error message.
+ * get_expr_result_type() because that can handle references to functions
+ * returning anonymous record types. If that fails, use
+ * lookup_rowtype_tupdesc(), which will almost certainly fail as well, but
+ * it will give an appropriate error message.
*
- * If it's a Var of type RECORD, we have to work even harder: we have
- * to find what the Var refers to, and pass that to get_expr_result_type.
- * That task is handled by expandRecordVariable().
+ * If it's a Var of type RECORD, we have to work even harder: we have to find
+ * what the Var refers to, and pass that to get_expr_result_type. That
+ * task is handled by expandRecordVariable().
*/
if (IsA(expr, Var) &&
((Var *) expr)->vartype == RECORDOID)
@@ -832,9 +831,9 @@ ExpandIndirectionStar(ParseState *pstate, A_Indirection *ind)
continue;
/*
- * If we got a whole-row Var from the rowtype reference, we can
- * expand the fields as simple Vars. Otherwise we must generate
- * multiple copies of the rowtype reference and do FieldSelects.
+ * If we got a whole-row Var from the rowtype reference, we can expand
+ * the fields as simple Vars. Otherwise we must generate multiple
+ * copies of the rowtype reference and do FieldSelects.
*/
if (IsA(expr, Var) &&
((Var *) expr)->varattno == InvalidAttrNumber)
@@ -874,7 +873,7 @@ ExpandIndirectionStar(ParseState *pstate, A_Indirection *ind)
* Get the tuple descriptor for a Var of type RECORD, if possible.
*
* Since no actual table or view column is allowed to have type RECORD, such
- * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
+ * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
* drill down to find the ultimate defining expression and attempt to infer
* the tupdesc from it. We ereport if we can't determine the tupdesc.
*
@@ -934,6 +933,7 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup)
{
case RTE_RELATION:
case RTE_SPECIAL:
+
/*
* This case should not occur: a column of a table shouldn't have
* type RECORD. Fall through and fail (most likely) at the
@@ -954,7 +954,7 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup)
{
/*
* Recurse into the sub-select to see what its Var refers
- * to. We have to build an additional level of ParseState
+ * to. We have to build an additional level of ParseState
* to keep in step with varlevelsup in the subselect.
*/
ParseState mypstate;
@@ -978,18 +978,19 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup)
/* else fall through to inspect the expression */
break;
case RTE_FUNCTION:
+
/*
- * We couldn't get here unless a function is declared with one
- * of its result columns as RECORD, which is not allowed.
+ * We couldn't get here unless a function is declared with one of
+ * its result columns as RECORD, which is not allowed.
*/
break;
}
/*
* We now have an expression we can't expand any more, so see if
- * get_expr_result_type() can do anything with it. If not, pass
- * to lookup_rowtype_tupdesc() which will probably fail, but will
- * give an appropriate error message while failing.
+ * get_expr_result_type() can do anything with it. If not, pass to
+ * lookup_rowtype_tupdesc() which will probably fail, but will give an
+ * appropriate error message while failing.
*/
if (get_expr_result_type(expr, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE)
tupleDesc = lookup_rowtype_tupdesc(exprType(expr), exprTypmod(expr));
@@ -1125,7 +1126,7 @@ FigureColnameInternal(Node *node, char **name)
return 2;
case T_MinMaxExpr:
/* make greatest/least act like a regular function */
- switch (((MinMaxExpr*) node)->op)
+ switch (((MinMaxExpr *) node)->op)
{
case IS_GREATEST:
*name = "greatest";
diff --git a/src/backend/parser/parse_type.c b/src/backend/parser/parse_type.c
index 008c1fe6a54..ec8dfef68d3 100644
--- a/src/backend/parser/parse_type.c
+++ b/src/backend/parser/parse_type.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_type.c,v 1.76 2005/08/01 20:31:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_type.c,v 1.77 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,8 +59,8 @@ LookupTypeName(const TypeName *typename)
case 1:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("improper %%TYPE reference (too few dotted names): %s",
- NameListToString(typename->names))));
+ errmsg("improper %%TYPE reference (too few dotted names): %s",
+ NameListToString(typename->names))));
break;
case 2:
rel->relname = strVal(linitial(typename->names));
@@ -91,8 +91,8 @@ LookupTypeName(const TypeName *typename)
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- field, rel->relname)));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ field, rel->relname)));
restype = get_atttype(relid, attnum);
/* this construct should never have an array indicator */
@@ -364,8 +364,8 @@ pts_error_callback(void *arg)
/*
* Currently we just suppress any syntax error position report, rather
- * than transforming to an "internal query" error. It's unlikely that
- * a type name is complex enough to need positioning.
+ * than transforming to an "internal query" error. It's unlikely that a
+ * type name is complex enough to need positioning.
*/
errposition(0);
}
@@ -406,8 +406,8 @@ parseTypeString(const char *str, Oid *type_id, int32 *typmod)
error_context_stack = ptserrcontext.previous;
/*
- * Make sure we got back exactly what we expected and no more;
- * paranoia is justified since the string might contain anything.
+ * Make sure we got back exactly what we expected and no more; paranoia is
+ * justified since the string might contain anything.
*/
if (list_length(raw_parsetree_list) != 1)
goto fail;
diff --git a/src/backend/parser/scansup.c b/src/backend/parser/scansup.c
index cf588c1de8b..efa851ea0ba 100644
--- a/src/backend/parser/scansup.c
+++ b/src/backend/parser/scansup.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/scansup.c,v 1.29 2004/12/31 22:00:27 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/scansup.c,v 1.30 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -53,8 +53,8 @@ scanstr(const char *s)
if (s[i] == '\'')
{
/*
- * Note: if scanner is working right, unescaped quotes can
- * only appear in pairs, so there should be another character.
+ * Note: if scanner is working right, unescaped quotes can only
+ * appear in pairs, so there should be another character.
*/
i++;
newStr[j] = s[i];
@@ -135,13 +135,13 @@ downcase_truncate_identifier(const char *ident, int len, bool warn)
result = palloc(len + 1);
/*
- * SQL99 specifies Unicode-aware case normalization, which we don't
- * yet have the infrastructure for. Instead we use tolower() to
- * provide a locale-aware translation. However, there are some
- * locales where this is not right either (eg, Turkish may do strange
- * things with 'i' and 'I'). Our current compromise is to use
- * tolower() for characters with the high bit set, and use an
- * ASCII-only downcasing for 7-bit characters.
+ * SQL99 specifies Unicode-aware case normalization, which we don't yet
+ * have the infrastructure for. Instead we use tolower() to provide a
+ * locale-aware translation. However, there are some locales where this
+ * is not right either (eg, Turkish may do strange things with 'i' and
+ * 'I'). Our current compromise is to use tolower() for characters with
+ * the high bit set, and use an ASCII-only downcasing for 7-bit
+ * characters.
*/
for (i = 0; i < len; i++)
{
@@ -179,8 +179,8 @@ truncate_identifier(char *ident, int len, bool warn)
if (warn)
ereport(NOTICE,
(errcode(ERRCODE_NAME_TOO_LONG),
- errmsg("identifier \"%s\" will be truncated to \"%.*s\"",
- ident, len, ident)));
+ errmsg("identifier \"%s\" will be truncated to \"%.*s\"",
+ ident, len, ident)));
ident[len] = '\0';
}
}
diff --git a/src/backend/port/beos/sem.c b/src/backend/port/beos/sem.c
index 5de018b22bf..2d0dabadb3c 100644
--- a/src/backend/port/beos/sem.c
+++ b/src/backend/port/beos/sem.c
@@ -129,8 +129,8 @@ semctl(int semId, int semNum, int flag, union semun semun)
delete_sem(Address[2 * i + 1]);
/*
- * Reset to an invalid semId (in case other process try to get
- * the infos from a cloned area
+ * Reset to an invalid semId (in case other process try to get the
+ * infos from a cloned area
*/
Address[2 * i + 1] = 0;
}
@@ -139,9 +139,9 @@ semctl(int semId, int semNum, int flag, union semun semun)
Address[0] = 0;
/*
- * Delete the area (it might be cloned by other process. Let them
- * live with it, in all cases semIds are 0 so if another process
- * try to use it, it will fail
+ * Delete the area (it might be cloned by other process. Let them live
+ * with it, in all cases semIds are 0 so if another process try to use
+ * it, it will fail
*/
delete_area(semId);
@@ -202,8 +202,8 @@ semget(int semKey, int semNum, int flags)
/* Get an area clone (in case it's not in our address space) */
/*
- * TODO : a check of address space might be done to avoid
- * duplicate areas in the same address space
+ * TODO : a check of address space might be done to avoid duplicate
+ * areas in the same address space
*/
parea = clone_area(Nom, &Address, B_ANY_ADDRESS, B_READ_AREA | B_WRITE_AREA, parea);
return parea;
@@ -218,8 +218,8 @@ semget(int semKey, int semNum, int flags)
long i;
/*
- * Limit to 250 (8 byte per sem : 4 for the semid and 4 for
- * the last pid which accessed the semaphore in a pool
+ * Limit to 250 (8 byte per sem : 4 for the semid and 4 for the
+ * last pid which accessed the semaphore in a pool
*/
if (semNum > 250)
{
@@ -291,8 +291,8 @@ semop(int semId, struct sembuf * sops, int nsops)
if (sops[i].sem_op < 0)
{
/*
- * Try acquiring the semaphore till we are not interrupted by
- * a signal
+ * Try acquiring the semaphore till we are not interrupted by a
+ * signal
*/
if (sops[i].sem_flg == IPC_NOWAIT)
{
diff --git a/src/backend/port/beos/shm.c b/src/backend/port/beos/shm.c
index 94da461ea2c..c7791ce7b4e 100644
--- a/src/backend/port/beos/shm.c
+++ b/src/backend/port/beos/shm.c
@@ -48,16 +48,15 @@ shmat(int memId, int m1, int m2)
if (ainfo.team == teinfo.team)
{
/*
- * the area is already in our address space, just return the
- * address
+ * the area is already in our address space, just return the address
*/
return (int *) ainfo.address;
}
else
{
/*
- * the area is not in our address space, clone it before and
- * return the address
+ * the area is not in our address space, clone it before and return
+ * the address
*/
area_id narea;
@@ -131,8 +130,8 @@ shmget(int memKey, int size, int flag)
return -1;
/*
- * area does not exist and its creation is requested, create it (be
- * sure to have a 4ko multiple size
+ * area does not exist and its creation is requested, create it (be sure
+ * to have a 4ko multiple size
*/
return create_area(nom, &Address, B_ANY_ADDRESS, ((size / 4096) + 1) * 4096, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
}
diff --git a/src/backend/port/beos/support.c b/src/backend/port/beos/support.c
index 3bfb6ae0d4a..228889f68ea 100644
--- a/src/backend/port/beos/support.c
+++ b/src/backend/port/beos/support.c
@@ -168,13 +168,12 @@ beos_startup(int argc, char **argv)
/* Main server loop */
for (;;)
{
- int32 opcode = 0;
+ int32 opcode = 0;
char datas[4000];
/*
- * Wait for a message from the backend : 1 : load a shared
- * object 2 : unload a shared object any other : exit support
- * server
+ * Wait for a message from the backend : 1 : load a shared object
+ * 2 : unload a shared object any other : exit support server
*/
read_port(port_in, &opcode, datas, 4000);
@@ -216,8 +215,8 @@ beos_startup(int argc, char **argv)
case 2:
/*
- * Unload shared object and send back the result of
- * the operation
+ * Unload shared object and send back the result of the
+ * operation
*/
write_port(port_out, unload_add_on(*((int *) (datas))), NULL, 0);
break;
@@ -234,10 +233,9 @@ beos_startup(int argc, char **argv)
if (get_image_symbol(addon, datas, B_SYMBOL_TYPE_TEXT, &fpt) == B_OK);
{
/*
- * Sometime the loader return B_OK for an
- * inexistant function with an invalid address !!!
- * Check that the return address is in the image
- * range
+ * Sometime the loader return B_OK for an inexistant
+ * function with an invalid address !!! Check that the
+ * return address is in the image range
*/
get_image_info(addon, &info_im);
diff --git a/src/backend/port/dynloader/aix.c b/src/backend/port/dynloader/aix.c
index a5d355c2cc2..3ace7fc3915 100644
--- a/src/backend/port/dynloader/aix.c
+++ b/src/backend/port/dynloader/aix.c
@@ -84,8 +84,8 @@ dlopen(const char *path, int mode)
static void *mainModule;
/*
- * Upon the first call register a terminate handler that will close
- * all libraries. Also get a reference to the main module for use with
+ * Upon the first call register a terminate handler that will close all
+ * libraries. Also get a reference to the main module for use with
* loadbind.
*/
if (!mainModule)
@@ -121,8 +121,8 @@ dlopen(const char *path, int mode)
}
/*
- * load should be declared load(const char *...). Thus we cast the
- * path to a normal char *. Ugly.
+ * load should be declared load(const char *...). Thus we cast the path to
+ * a normal char *. Ugly.
*/
if ((mp->entry = (void *) load((char *) path, L_NOAUTODEFER, NULL)) == NULL)
{
@@ -134,8 +134,8 @@ dlopen(const char *path, int mode)
strcat(errbuf, ": ");
/*
- * If AIX says the file is not executable, the error can be
- * further described by querying the loader about the last error.
+ * If AIX says the file is not executable, the error can be further
+ * described by querying the loader about the last error.
*/
if (errno == ENOEXEC)
{
@@ -203,8 +203,8 @@ dlopen(const char *path, int mode)
errvalid = 0;
/*
- * If the shared object was compiled using xlC we will need to call
- * static constructors (and later on dlclose destructors).
+ * If the shared object was compiled using xlC we will need to call static
+ * constructors (and later on dlclose destructors).
*/
if (mp->cdtors = (CdtorPtr) dlsym(mp, "__cdtors"))
{
@@ -268,8 +268,8 @@ dlsym(void *handle, const char *symbol)
int i;
/*
- * Could speed up the search, but I assume that one assigns the result
- * to function pointers anyways.
+ * Could speed up the search, but I assume that one assigns the result to
+ * function pointers anyways.
*/
for (ep = mp->exports, i = mp->nExports; i; i--, ep++)
if (strcmp(ep->name, symbol) == 0)
@@ -377,8 +377,8 @@ readExports(ModulePtr mp)
}
/*
- * The module might be loaded due to the LIBPATH environment
- * variable. Search for the loaded module using L_GETINFO.
+ * The module might be loaded due to the LIBPATH environment variable.
+ * Search for the loaded module using L_GETINFO.
*/
if ((buf = malloc(size)) == NULL)
{
@@ -409,8 +409,8 @@ readExports(ModulePtr mp)
}
/*
- * Traverse the list of loaded modules. The entry point returned
- * by load() does actually point to the data segment origin.
+ * Traverse the list of loaded modules. The entry point returned by
+ * load() does actually point to the data segment origin.
*/
lp = (struct ld_info *) buf;
while (lp)
@@ -445,8 +445,8 @@ readExports(ModulePtr mp)
/*
* Get the padding for the data section. This is needed for AIX 4.1
- * compilers. This is used when building the final function pointer to
- * the exported symbol.
+ * compilers. This is used when building the final function pointer to the
+ * exported symbol.
*/
if (ldnshread(ldp, _DATA, &shdata) != SUCCESS)
{
@@ -466,8 +466,8 @@ readExports(ModulePtr mp)
}
/*
- * We read the complete loader section in one chunk, this makes
- * finding long symbol names residing in the string table easier.
+ * We read the complete loader section in one chunk, this makes finding
+ * long symbol names residing in the string table easier.
*/
if ((ldbuf = (char *) malloc(sh.s_size)) == NULL)
{
@@ -520,8 +520,8 @@ readExports(ModulePtr mp)
}
/*
- * Fill in the export table. All entries are relative to the entry
- * point we got from load.
+ * Fill in the export table. All entries are relative to the entry point
+ * we got from load.
*/
ep = mp->exports;
ls = (LDSYM *) (ldbuf + LDHDRSZ);
@@ -538,8 +538,8 @@ readExports(ModulePtr mp)
{
/*
* The l_name member is not zero terminated, we must copy the
- * first SYMNMLEN chars and make sure we have a zero byte at
- * the end.
+ * first SYMNMLEN chars and make sure we have a zero byte at the
+ * end.
*/
strncpy(tmpsym, ls->l_name, SYMNMLEN);
tmpsym[SYMNMLEN] = '\0';
@@ -598,8 +598,8 @@ findMain(void)
}
/*
- * The first entry is the main module. The entry point returned by
- * load() does actually point to the data segment origin.
+ * The first entry is the main module. The entry point returned by load()
+ * does actually point to the data segment origin.
*/
lp = (struct ld_info *) buf;
ret = lp->ldinfo_dataorg;
diff --git a/src/backend/port/dynloader/aix.h b/src/backend/port/dynloader/aix.h
index 69e20ebae36..29d385986e1 100644
--- a/src/backend/port/dynloader/aix.h
+++ b/src/backend/port/dynloader/aix.h
@@ -1,5 +1,5 @@
/*
- * $PostgreSQL: pgsql/src/backend/port/dynloader/aix.h,v 1.12 2003/11/29 22:39:51 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/dynloader/aix.h,v 1.13 2005/10/15 02:49:23 momjian Exp $
*
* @(#)dlfcn.h 1.4 revision of 95/04/25 09:36:52
* This is an unpublished work copyright (c) 1992 HELIOS Software GmbH
@@ -12,7 +12,6 @@
#ifdef HAVE_DLOPEN
#include <dlfcn.h>
-
#else /* HAVE_DLOPEN */
#ifdef __cplusplus
@@ -42,7 +41,6 @@ void *dlopen(const char *path, int mode);
void *dlsym(void *handle, const char *symbol);
char *dlerror(void);
int dlclose(void *handle);
-
#else
void *dlopen();
void *dlsym();
diff --git a/src/backend/port/dynloader/bsdi.c b/src/backend/port/dynloader/bsdi.c
index 6b17a929f49..bc51569a30b 100644
--- a/src/backend/port/dynloader/bsdi.c
+++ b/src/backend/port/dynloader/bsdi.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/dynloader/bsdi.c,v 1.26 2004/12/31 22:00:32 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/dynloader/bsdi.c,v 1.27 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -26,8 +26,8 @@ pg_dlopen(char *filename)
static int dl_initialized = 0;
/*
- * initializes the dynamic loader with the executable's pathname.
- * (only needs to do this the first time pg_dlopen is called.)
+ * initializes the dynamic loader with the executable's pathname. (only
+ * needs to do this the first time pg_dlopen is called.)
*/
if (!dl_initialized)
{
@@ -48,9 +48,8 @@ pg_dlopen(char *filename)
return NULL;
/*
- * If undefined symbols: try to link with the C and math libraries!
- * This could be smarter, if the dynamic linker was able to handle
- * shared libs!
+ * If undefined symbols: try to link with the C and math libraries! This
+ * could be smarter, if the dynamic linker was able to handle shared libs!
*/
if (dld_undefined_sym_count > 0)
{
diff --git a/src/backend/port/dynloader/bsdi.h b/src/backend/port/dynloader/bsdi.h
index c7f2ab5e8cc..b73fa6141f8 100644
--- a/src/backend/port/dynloader/bsdi.h
+++ b/src/backend/port/dynloader/bsdi.h
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/port/dynloader/bsdi.h,v 1.21 2004/12/31 22:00:32 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/dynloader/bsdi.h,v 1.22 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,7 +37,6 @@
#define pg_dlsym dlsym
#define pg_dlclose dlclose
#define pg_dlerror dlerror
-
#else /* not HAVE_DLOPEN */
#define pg_dlsym(handle, funcname) ((PGFunction) dld_get_func((funcname)))
diff --git a/src/backend/port/dynloader/hpux.c b/src/backend/port/dynloader/hpux.c
index b424e5b4c99..6a516387ba1 100644
--- a/src/backend/port/dynloader/hpux.c
+++ b/src/backend/port/dynloader/hpux.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/dynloader/hpux.c,v 1.27 2004/12/31 22:00:32 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/dynloader/hpux.c,v 1.28 2005/10/15 02:49:23 momjian Exp $
*
* NOTES
* all functions are defined here -- it's impossible to trace the
@@ -34,7 +34,7 @@ pg_dlopen(char *filename)
* call the library!
*/
shl_t handle = shl_load(filename,
- BIND_IMMEDIATE | BIND_VERBOSE | DYNAMIC_PATH,
+ BIND_IMMEDIATE | BIND_VERBOSE | DYNAMIC_PATH,
0L);
return (void *) handle;
diff --git a/src/backend/port/dynloader/linux.c b/src/backend/port/dynloader/linux.c
index 325e8f9920b..e62431140cc 100644
--- a/src/backend/port/dynloader/linux.c
+++ b/src/backend/port/dynloader/linux.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/dynloader/linux.c,v 1.30 2004/12/31 22:00:32 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/dynloader/linux.c,v 1.31 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -38,8 +38,8 @@ pg_dlopen(char *filename)
static int dl_initialized = 0;
/*
- * initializes the dynamic loader with the executable's pathname.
- * (only needs to do this the first time pg_dlopen is called.)
+ * initializes the dynamic loader with the executable's pathname. (only
+ * needs to do this the first time pg_dlopen is called.)
*/
if (!dl_initialized)
{
@@ -60,9 +60,8 @@ pg_dlopen(char *filename)
return NULL;
/*
- * If undefined symbols: try to link with the C and math libraries!
- * This could be smarter, if the dynamic linker was able to handle
- * shared libs!
+ * If undefined symbols: try to link with the C and math libraries! This
+ * could be smarter, if the dynamic linker was able to handle shared libs!
*/
if (dld_undefined_sym_count > 0)
{
diff --git a/src/backend/port/dynloader/ultrix4.c b/src/backend/port/dynloader/ultrix4.c
index c0e4555a382..c6315380839 100644
--- a/src/backend/port/dynloader/ultrix4.c
+++ b/src/backend/port/dynloader/ultrix4.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/dynloader/ultrix4.c,v 1.22 2004/12/31 22:00:32 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/dynloader/ultrix4.c,v 1.23 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -26,8 +26,8 @@ pg_dlopen(char *filename)
void *handle;
/*
- * initializes the dynamic loader with the executable's pathname.
- * (only needs to do this the first time pg_dlopen is called.)
+ * initializes the dynamic loader with the executable's pathname. (only
+ * needs to do this the first time pg_dlopen is called.)
*/
if (!dl_initialized)
{
@@ -43,8 +43,8 @@ pg_dlopen(char *filename)
}
/*
- * open the file. We do the symbol resolution right away so that we
- * will know if there are undefined symbols. (This is in fact the same
+ * open the file. We do the symbol resolution right away so that we will
+ * know if there are undefined symbols. (This is in fact the same
* semantics as "ld -A". ie. you cannot have undefined symbols.
*/
if ((handle = dl_open(filename, DL_NOW)) == NULL)
diff --git a/src/backend/port/dynloader/win32.c b/src/backend/port/dynloader/win32.c
index c2c496a5295..c2547f5a28d 100644
--- a/src/backend/port/dynloader/win32.c
+++ b/src/backend/port/dynloader/win32.c
@@ -1,31 +1,32 @@
-/* $PostgreSQL: pgsql/src/backend/port/dynloader/win32.c,v 1.6 2005/08/12 21:23:10 momjian Exp $ */
+/* $PostgreSQL: pgsql/src/backend/port/dynloader/win32.c,v 1.7 2005/10/15 02:49:23 momjian Exp $ */
#include <windows.h>
#include <stdio.h>
-char *dlerror(void);
-int dlclose(void *handle);
-void *dlsym(void *handle, const char *symbol);
-void *dlopen(const char *path, int mode);
+char *dlerror(void);
+int dlclose(void *handle);
+void *dlsym(void *handle, const char *symbol);
+void *dlopen(const char *path, int mode);
static char last_dyn_error[512];
-static void set_dl_error(void)
+static void
+set_dl_error(void)
{
- DWORD err = GetLastError();
+ DWORD err = GetLastError();
if (FormatMessage(FORMAT_MESSAGE_IGNORE_INSERTS |
- FORMAT_MESSAGE_FROM_SYSTEM,
- NULL,
- err,
- MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
- last_dyn_error,
- sizeof(last_dyn_error)-1,
- NULL) == 0)
+ FORMAT_MESSAGE_FROM_SYSTEM,
+ NULL,
+ err,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ last_dyn_error,
+ sizeof(last_dyn_error) - 1,
+ NULL) == 0)
{
- snprintf(last_dyn_error, sizeof(last_dyn_error)-1,
- "unknown error %lu", err);
- }
+ snprintf(last_dyn_error, sizeof(last_dyn_error) - 1,
+ "unknown error %lu", err);
+ }
}
char *
@@ -52,9 +53,10 @@ dlclose(void *handle)
void *
dlsym(void *handle, const char *symbol)
{
- void *ptr;
+ void *ptr;
+
ptr = GetProcAddress((HMODULE) handle, symbol);
- if (!ptr)
+ if (!ptr)
{
set_dl_error();
return NULL;
@@ -66,15 +68,15 @@ dlsym(void *handle, const char *symbol)
void *
dlopen(const char *path, int mode)
{
- HMODULE h;
- int prevmode;
+ HMODULE h;
+ int prevmode;
/* Disable popup error messages when loading DLLs */
prevmode = SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOOPENFILEERRORBOX);
h = LoadLibrary(path);
SetErrorMode(prevmode);
-
- if (!h)
+
+ if (!h)
{
set_dl_error();
return NULL;
diff --git a/src/backend/port/ipc_test.c b/src/backend/port/ipc_test.c
index 3e2068f19a5..89ebe1ded73 100644
--- a/src/backend/port/ipc_test.c
+++ b/src/backend/port/ipc_test.c
@@ -21,7 +21,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/ipc_test.c,v 1.17 2005/02/05 20:07:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/port/ipc_test.c,v 1.18 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -75,7 +75,7 @@ proc_exit(int code)
shmem_exit(code);
while (--on_proc_exit_index >= 0)
(*on_proc_exit_list[on_proc_exit_index].function) (code,
- on_proc_exit_list[on_proc_exit_index].arg);
+ on_proc_exit_list[on_proc_exit_index].arg);
exit(code);
}
@@ -84,7 +84,7 @@ shmem_exit(int code)
{
while (--on_shmem_exit_index >= 0)
(*on_shmem_exit_list[on_shmem_exit_index].function) (code,
- on_shmem_exit_list[on_shmem_exit_index].arg);
+ on_shmem_exit_list[on_shmem_exit_index].arg);
on_shmem_exit_index = 0;
}
diff --git a/src/backend/port/posix_sema.c b/src/backend/port/posix_sema.c
index d2229011072..2024b3ebdea 100644
--- a/src/backend/port/posix_sema.c
+++ b/src/backend/port/posix_sema.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/posix_sema.c,v 1.13 2004/12/31 22:00:29 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/posix_sema.c,v 1.14 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,14 +93,13 @@ PosixSemaphoreCreate(void)
}
/*
- * Unlink the semaphore immediately, so it can't be accessed
- * externally. This also ensures that it will go away if we crash.
+ * Unlink the semaphore immediately, so it can't be accessed externally.
+ * This also ensures that it will go away if we crash.
*/
sem_unlink(semname);
return mySem;
}
-
#else /* !USE_NAMED_POSIX_SEMAPHORES */
/*
@@ -243,38 +242,36 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
int errStatus;
/*
- * Note: if errStatus is -1 and errno == EINTR then it means we
- * returned from the operation prematurely because we were sent a
- * signal. So we try and lock the semaphore again.
+ * Note: if errStatus is -1 and errno == EINTR then it means we returned
+ * from the operation prematurely because we were sent a signal. So we
+ * try and lock the semaphore again.
*
- * Each time around the loop, we check for a cancel/die interrupt. We
- * assume that if such an interrupt comes in while we are waiting, it
- * will cause the sem_wait() call to exit with errno == EINTR, so that
- * we will be able to service the interrupt (if not in a critical
- * section already).
+ * Each time around the loop, we check for a cancel/die interrupt. We assume
+ * that if such an interrupt comes in while we are waiting, it will cause
+ * the sem_wait() call to exit with errno == EINTR, so that we will be
+ * able to service the interrupt (if not in a critical section already).
*
* Once we acquire the lock, we do NOT check for an interrupt before
- * returning. The caller needs to be able to record ownership of the
- * lock before any interrupt can be accepted.
+ * returning. The caller needs to be able to record ownership of the lock
+ * before any interrupt can be accepted.
*
- * There is a window of a few instructions between CHECK_FOR_INTERRUPTS
- * and entering the sem_wait() call. If a cancel/die interrupt occurs
- * in that window, we would fail to notice it until after we acquire
- * the lock (or get another interrupt to escape the sem_wait()). We
- * can avoid this problem by temporarily setting ImmediateInterruptOK
- * to true before we do CHECK_FOR_INTERRUPTS; then, a die() interrupt
- * in this interval will execute directly. However, there is a huge
- * pitfall: there is another window of a few instructions after the
- * sem_wait() before we are able to reset ImmediateInterruptOK. If an
- * interrupt occurs then, we'll lose control, which means that the
- * lock has been acquired but our caller did not get a chance to
- * record the fact. Therefore, we only set ImmediateInterruptOK if the
- * caller tells us it's OK to do so, ie, the caller does not need to
- * record acquiring the lock. (This is currently true for lockmanager
- * locks, since the process that granted us the lock did all the
- * necessary state updates. It's not true for Posix semaphores used to
- * implement LW locks or emulate spinlocks --- but the wait time for
- * such locks should not be very long, anyway.)
+ * There is a window of a few instructions between CHECK_FOR_INTERRUPTS and
+ * entering the sem_wait() call. If a cancel/die interrupt occurs in that
+ * window, we would fail to notice it until after we acquire the lock (or
+ * get another interrupt to escape the sem_wait()). We can avoid this
+ * problem by temporarily setting ImmediateInterruptOK to true before we
+ * do CHECK_FOR_INTERRUPTS; then, a die() interrupt in this interval will
+ * execute directly. However, there is a huge pitfall: there is another
+ * window of a few instructions after the sem_wait() before we are able to
+ * reset ImmediateInterruptOK. If an interrupt occurs then, we'll lose
+ * control, which means that the lock has been acquired but our caller did
+ * not get a chance to record the fact. Therefore, we only set
+ * ImmediateInterruptOK if the caller tells us it's OK to do so, ie, the
+ * caller does not need to record acquiring the lock. (This is currently
+ * true for lockmanager locks, since the process that granted us the lock
+ * did all the necessary state updates. It's not true for Posix semaphores
+ * used to implement LW locks or emulate spinlocks --- but the wait time
+ * for such locks should not be very long, anyway.)
*/
do
{
@@ -299,10 +296,10 @@ PGSemaphoreUnlock(PGSemaphore sema)
int errStatus;
/*
- * Note: if errStatus is -1 and errno == EINTR then it means we
- * returned from the operation prematurely because we were sent a
- * signal. So we try and unlock the semaphore again. Not clear this
- * can really happen, but might as well cope.
+ * Note: if errStatus is -1 and errno == EINTR then it means we returned
+ * from the operation prematurely because we were sent a signal. So we
+ * try and unlock the semaphore again. Not clear this can really happen,
+ * but might as well cope.
*/
do
{
@@ -324,9 +321,9 @@ PGSemaphoreTryLock(PGSemaphore sema)
int errStatus;
/*
- * Note: if errStatus is -1 and errno == EINTR then it means we
- * returned from the operation prematurely because we were sent a
- * signal. So we try and lock the semaphore again.
+ * Note: if errStatus is -1 and errno == EINTR then it means we returned
+ * from the operation prematurely because we were sent a signal. So we
+ * try and lock the semaphore again.
*/
do
{
diff --git a/src/backend/port/qnx4/sem.c b/src/backend/port/qnx4/sem.c
index 484a85b6fcc..4a8d6a348fa 100644
--- a/src/backend/port/qnx4/sem.c
+++ b/src/backend/port/qnx4/sem.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/qnx4/sem.c,v 1.12 2003/11/29 19:51:54 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/qnx4/sem.c,v 1.13 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,8 +47,7 @@ struct sem_set_info
int nsems;
sem_t sem[SEMMAX]; /* array of POSIX semaphores */
struct sem semV[SEMMAX]; /* array of System V semaphore structures */
- struct pending_ops pendingOps[SEMMAX]; /* array of pending
- * operations */
+ struct pending_ops pendingOps[SEMMAX]; /* array of pending operations */
};
struct sem_info
@@ -189,7 +188,7 @@ semget(key_t key, int nsems, int semflg)
fprintf(stderr,
"Found a pre-existing shared memory block for the semaphore memory\n"
"of a different size (%ld instead %ld). Make sure that all executables\n"
- "are from the same release or remove the file \"/dev/shmem/%s\"\n"
+ "are from the same release or remove the file \"/dev/shmem/%s\"\n"
"left by a previous version.\n",
(long) statbuf.st_size,
(long) sem_info_size,
diff --git a/src/backend/port/qnx4/shm.c b/src/backend/port/qnx4/shm.c
index fc2a70aa8fe..7dde22773e9 100644
--- a/src/backend/port/qnx4/shm.c
+++ b/src/backend/port/qnx4/shm.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/qnx4/shm.c,v 1.9 2003/11/29 19:51:54 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/qnx4/shm.c,v 1.10 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -200,8 +200,8 @@ shmctl(int shmid, int cmd, struct shmid_ds * buf)
case IPC_STAT:
/*
- * we have to open it first. stat() does no prefix tracking ->
- * the call would go to fsys instead of proc
+ * we have to open it first. stat() does no prefix tracking -> the
+ * call would go to fsys instead of proc
*/
keytoname(shmid, name);
fd = shm_open(name, 0, MODE);
@@ -210,8 +210,8 @@ shmctl(int shmid, int cmd, struct shmid_ds * buf)
result = fstat(fd, &statbuf);
/*
- * if the file exists, subtract 2 from linkcount : one for
- * our own open and one for the dir entry
+ * if the file exists, subtract 2 from linkcount : one for our
+ * own open and one for the dir entry
*/
if (!result)
buf->shm_nattch = statbuf.st_nlink - 2;
@@ -221,8 +221,8 @@ shmctl(int shmid, int cmd, struct shmid_ds * buf)
else
{
/*
- * if there's no entry for this key it doesn't matter the
- * next shmget() would get a different shm anyway
+ * if there's no entry for this key it doesn't matter the next
+ * shmget() would get a different shm anyway
*/
buf->shm_nattch = 0;
return 0;
diff --git a/src/backend/port/sysv_sema.c b/src/backend/port/sysv_sema.c
index 1d44a40033c..d42e8c87684 100644
--- a/src/backend/port/sysv_sema.c
+++ b/src/backend/port/sysv_sema.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/sysv_sema.c,v 1.16 2004/12/31 22:00:29 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/sysv_sema.c,v 1.17 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,8 +58,7 @@ typedef int IpcSemaphoreId; /* semaphore ID returned by semget(2) */
#define PGSemaMagic 537 /* must be less than SEMVMX */
-static IpcSemaphoreId *mySemaSets; /* IDs of sema sets acquired so
- * far */
+static IpcSemaphoreId *mySemaSets; /* IDs of sema sets acquired so far */
static int numSemaSets; /* number of sema sets acquired so far */
static int maxSemaSets; /* allocated size of mySemaSets array */
static IpcSemaphoreKey nextSemaKey; /* next key to try using */
@@ -97,11 +96,10 @@ InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey, int numSems)
if (semId < 0)
{
/*
- * Fail quietly if error indicates a collision with existing set.
- * One would expect EEXIST, given that we said IPC_EXCL, but
- * perhaps we could get a permission violation instead? Also,
- * EIDRM might occur if an old set is slated for destruction but
- * not gone yet.
+ * Fail quietly if error indicates a collision with existing set. One
+ * would expect EEXIST, given that we said IPC_EXCL, but perhaps we
+ * could get a permission violation instead? Also, EIDRM might occur
+ * if an old set is slated for destruction but not gone yet.
*/
if (errno == EEXIST || errno == EACCES
#ifdef EIDRM
@@ -120,13 +118,13 @@ InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey, int numSems)
IPC_CREAT | IPC_EXCL | IPCProtection),
(errno == ENOSPC) ?
errhint("This error does *not* mean that you have run out of disk space.\n"
- "It occurs when either the system limit for the maximum number of "
- "semaphore sets (SEMMNI), or the system wide maximum number of "
- "semaphores (SEMMNS), would be exceeded. You need to raise the "
- "respective kernel parameter. Alternatively, reduce PostgreSQL's "
- "consumption of semaphores by reducing its max_connections parameter "
+ "It occurs when either the system limit for the maximum number of "
+ "semaphore sets (SEMMNI), or the system wide maximum number of "
+ "semaphores (SEMMNS), would be exceeded. You need to raise the "
+ "respective kernel parameter. Alternatively, reduce PostgreSQL's "
+ "consumption of semaphores by reducing its max_connections parameter "
"(currently %d).\n"
- "The PostgreSQL documentation contains more information about "
+ "The PostgreSQL documentation contains more information about "
"configuring your system for PostgreSQL.",
MaxBackends) : 0));
}
@@ -149,7 +147,7 @@ IpcSemaphoreInitialize(IpcSemaphoreId semId, int semNum, int value)
semId, semNum, value),
(errno == ERANGE) ?
errhint("You possibly need to raise your kernel's SEMVMX value to be at least "
- "%d. Look into the PostgreSQL documentation for details.",
+ "%d. Look into the PostgreSQL documentation for details.",
value) : 0));
}
@@ -224,8 +222,8 @@ IpcSemaphoreCreate(int numSems)
continue; /* sema belongs to a non-Postgres app */
/*
- * If the creator PID is my own PID or does not belong to any
- * extant process, it's safe to zap it.
+ * If the creator PID is my own PID or does not belong to any extant
+ * process, it's safe to zap it.
*/
creatorPID = IpcSemaphoreGetLastPID(semId, numSems);
if (creatorPID <= 0)
@@ -237,11 +235,10 @@ IpcSemaphoreCreate(int numSems)
}
/*
- * The sema set appears to be from a dead Postgres process, or
- * from a previous cycle of life in this same process. Zap it, if
- * possible. This probably shouldn't fail, but if it does, assume
- * the sema set belongs to someone else after all, and continue
- * quietly.
+ * The sema set appears to be from a dead Postgres process, or from a
+ * previous cycle of life in this same process. Zap it, if possible.
+ * This probably shouldn't fail, but if it does, assume the sema set
+ * belongs to someone else after all, and continue quietly.
*/
semun.val = 0; /* unused, but keep compiler quiet */
if (semctl(semId, 0, IPC_RMID, semun) < 0)
@@ -255,17 +252,17 @@ IpcSemaphoreCreate(int numSems)
break; /* successful create */
/*
- * Can only get here if some other process managed to create the
- * same sema key before we did. Let him have that one, loop
- * around to try next key.
+ * Can only get here if some other process managed to create the same
+ * sema key before we did. Let him have that one, loop around to try
+ * next key.
*/
}
/*
- * OK, we created a new sema set. Mark it as created by this process.
- * We do this by setting the spare semaphore to PGSemaMagic-1 and then
- * incrementing it with semop(). That leaves it with value
- * PGSemaMagic and sempid referencing this process.
+ * OK, we created a new sema set. Mark it as created by this process. We
+ * do this by setting the spare semaphore to PGSemaMagic-1 and then
+ * incrementing it with semop(). That leaves it with value PGSemaMagic
+ * and sempid referencing this process.
*/
IpcSemaphoreInitialize(semId, numSems, PGSemaMagic - 1);
mysema.semId = semId;
@@ -303,8 +300,7 @@ PGReserveSemaphores(int maxSemas, int port)
elog(PANIC, "out of memory");
numSemaSets = 0;
nextSemaKey = port * 1000;
- nextSemaNumber = SEMAS_PER_SET; /* force sema set alloc on 1st
- * call */
+ nextSemaNumber = SEMAS_PER_SET; /* force sema set alloc on 1st call */
on_shmem_exit(ReleaseSemaphores, 0);
}
@@ -378,38 +374,36 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
sops.sem_num = sema->semNum;
/*
- * Note: if errStatus is -1 and errno == EINTR then it means we
- * returned from the operation prematurely because we were sent a
- * signal. So we try and lock the semaphore again.
+ * Note: if errStatus is -1 and errno == EINTR then it means we returned
+ * from the operation prematurely because we were sent a signal. So we
+ * try and lock the semaphore again.
*
- * Each time around the loop, we check for a cancel/die interrupt. We
- * assume that if such an interrupt comes in while we are waiting, it
- * will cause the semop() call to exit with errno == EINTR, so that we
- * will be able to service the interrupt (if not in a critical section
- * already).
+ * Each time around the loop, we check for a cancel/die interrupt. We assume
+ * that if such an interrupt comes in while we are waiting, it will cause
+ * the semop() call to exit with errno == EINTR, so that we will be able
+ * to service the interrupt (if not in a critical section already).
*
* Once we acquire the lock, we do NOT check for an interrupt before
- * returning. The caller needs to be able to record ownership of the
- * lock before any interrupt can be accepted.
+ * returning. The caller needs to be able to record ownership of the lock
+ * before any interrupt can be accepted.
*
- * There is a window of a few instructions between CHECK_FOR_INTERRUPTS
- * and entering the semop() call. If a cancel/die interrupt occurs in
- * that window, we would fail to notice it until after we acquire the
- * lock (or get another interrupt to escape the semop()). We can
- * avoid this problem by temporarily setting ImmediateInterruptOK to
- * true before we do CHECK_FOR_INTERRUPTS; then, a die() interrupt in
- * this interval will execute directly. However, there is a huge
- * pitfall: there is another window of a few instructions after the
- * semop() before we are able to reset ImmediateInterruptOK. If an
- * interrupt occurs then, we'll lose control, which means that the
- * lock has been acquired but our caller did not get a chance to
- * record the fact. Therefore, we only set ImmediateInterruptOK if the
- * caller tells us it's OK to do so, ie, the caller does not need to
- * record acquiring the lock. (This is currently true for lockmanager
- * locks, since the process that granted us the lock did all the
- * necessary state updates. It's not true for SysV semaphores used to
- * implement LW locks or emulate spinlocks --- but the wait time for
- * such locks should not be very long, anyway.)
+ * There is a window of a few instructions between CHECK_FOR_INTERRUPTS and
+ * entering the semop() call. If a cancel/die interrupt occurs in that
+ * window, we would fail to notice it until after we acquire the lock (or
+ * get another interrupt to escape the semop()). We can avoid this
+ * problem by temporarily setting ImmediateInterruptOK to true before we
+ * do CHECK_FOR_INTERRUPTS; then, a die() interrupt in this interval will
+ * execute directly. However, there is a huge pitfall: there is another
+ * window of a few instructions after the semop() before we are able to
+ * reset ImmediateInterruptOK. If an interrupt occurs then, we'll lose
+ * control, which means that the lock has been acquired but our caller did
+ * not get a chance to record the fact. Therefore, we only set
+ * ImmediateInterruptOK if the caller tells us it's OK to do so, ie, the
+ * caller does not need to record acquiring the lock. (This is currently
+ * true for lockmanager locks, since the process that granted us the lock
+ * did all the necessary state updates. It's not true for SysV semaphores
+ * used to implement LW locks or emulate spinlocks --- but the wait time
+ * for such locks should not be very long, anyway.)
*/
do
{
@@ -439,10 +433,10 @@ PGSemaphoreUnlock(PGSemaphore sema)
sops.sem_num = sema->semNum;
/*
- * Note: if errStatus is -1 and errno == EINTR then it means we
- * returned from the operation prematurely because we were sent a
- * signal. So we try and unlock the semaphore again. Not clear this
- * can really happen, but might as well cope.
+ * Note: if errStatus is -1 and errno == EINTR then it means we returned
+ * from the operation prematurely because we were sent a signal. So we
+ * try and unlock the semaphore again. Not clear this can really happen,
+ * but might as well cope.
*/
do
{
@@ -469,9 +463,9 @@ PGSemaphoreTryLock(PGSemaphore sema)
sops.sem_num = sema->semNum;
/*
- * Note: if errStatus is -1 and errno == EINTR then it means we
- * returned from the operation prematurely because we were sent a
- * signal. So we try and lock the semaphore again.
+ * Note: if errStatus is -1 and errno == EINTR then it means we returned
+ * from the operation prematurely because we were sent a signal. So we
+ * try and lock the semaphore again.
*/
do
{
diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c
index 23b945f0850..3092ca2a377 100644
--- a/src/backend/port/sysv_shmem.c
+++ b/src/backend/port/sysv_shmem.c
@@ -10,7 +10,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/sysv_shmem.c,v 1.43 2005/08/20 23:26:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/port/sysv_shmem.c,v 1.44 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -81,11 +81,10 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, Size size)
if (shmid < 0)
{
/*
- * Fail quietly if error indicates a collision with existing
- * segment. One would expect EEXIST, given that we said IPC_EXCL,
- * but perhaps we could get a permission violation instead? Also,
- * EIDRM might occur if an old seg is slated for destruction but
- * not gone yet.
+ * Fail quietly if error indicates a collision with existing segment.
+ * One would expect EEXIST, given that we said IPC_EXCL, but perhaps
+ * we could get a permission violation instead? Also, EIDRM might
+ * occur if an old seg is slated for destruction but not gone yet.
*/
if (errno == EEXIST || errno == EACCES
#ifdef EIDRM
@@ -99,41 +98,41 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, Size size)
*/
ereport(FATAL,
(errmsg("could not create shared memory segment: %m"),
- errdetail("Failed system call was shmget(key=%lu, size=%lu, 0%o).",
- (unsigned long) memKey, (unsigned long) size,
- IPC_CREAT | IPC_EXCL | IPCProtection),
+ errdetail("Failed system call was shmget(key=%lu, size=%lu, 0%o).",
+ (unsigned long) memKey, (unsigned long) size,
+ IPC_CREAT | IPC_EXCL | IPCProtection),
(errno == EINVAL) ?
errhint("This error usually means that PostgreSQL's request for a shared memory "
- "segment exceeded your kernel's SHMMAX parameter. You can either "
+ "segment exceeded your kernel's SHMMAX parameter. You can either "
"reduce the request size or reconfigure the kernel with larger SHMMAX. "
- "To reduce the request size (currently %lu bytes), reduce "
- "PostgreSQL's shared_buffers parameter (currently %d) and/or "
+ "To reduce the request size (currently %lu bytes), reduce "
+ "PostgreSQL's shared_buffers parameter (currently %d) and/or "
"its max_connections parameter (currently %d).\n"
"If the request size is already small, it's possible that it is less than "
"your kernel's SHMMIN parameter, in which case raising the request size or "
"reconfiguring SHMMIN is called for.\n"
- "The PostgreSQL documentation contains more information about shared "
+ "The PostgreSQL documentation contains more information about shared "
"memory configuration.",
(unsigned long) size, NBuffers, MaxBackends) : 0,
(errno == ENOMEM) ?
errhint("This error usually means that PostgreSQL's request for a shared "
- "memory segment exceeded available memory or swap space. "
- "To reduce the request size (currently %lu bytes), reduce "
- "PostgreSQL's shared_buffers parameter (currently %d) and/or "
+ "memory segment exceeded available memory or swap space. "
+ "To reduce the request size (currently %lu bytes), reduce "
+ "PostgreSQL's shared_buffers parameter (currently %d) and/or "
"its max_connections parameter (currently %d).\n"
- "The PostgreSQL documentation contains more information about shared "
+ "The PostgreSQL documentation contains more information about shared "
"memory configuration.",
(unsigned long) size, NBuffers, MaxBackends) : 0,
(errno == ENOSPC) ?
errhint("This error does *not* mean that you have run out of disk space. "
"It occurs either if all available shared memory IDs have been taken, "
"in which case you need to raise the SHMMNI parameter in your kernel, "
- "or because the system's overall limit for shared memory has been "
- "reached. If you cannot increase the shared memory limit, "
- "reduce PostgreSQL's shared memory request (currently %lu bytes), "
- "by reducing its shared_buffers parameter (currently %d) and/or "
+ "or because the system's overall limit for shared memory has been "
+ "reached. If you cannot increase the shared memory limit, "
+ "reduce PostgreSQL's shared memory request (currently %lu bytes), "
+ "by reducing its shared_buffers parameter (currently %d) and/or "
"its max_connections parameter (currently %d).\n"
- "The PostgreSQL documentation contains more information about shared "
+ "The PostgreSQL documentation contains more information about shared "
"memory configuration.",
(unsigned long) size, NBuffers, MaxBackends) : 0));
}
@@ -187,7 +186,7 @@ IpcMemoryDelete(int status, Datum shmId)
* Is a previously-existing shmem segment still existing and in use?
*
* The point of this exercise is to detect the case where a prior postmaster
- * crashed, but it left child backends that are still running. Therefore
+ * crashed, but it left child backends that are still running. Therefore
* we only care about shmem segments that are associated with the intended
* DataDir. This is an important consideration since accidental matches of
* shmem segment IDs are reasonably common.
@@ -197,35 +196,38 @@ PGSharedMemoryIsInUse(unsigned long id1, unsigned long id2)
{
IpcMemoryId shmId = (IpcMemoryId) id2;
struct shmid_ds shmStat;
+
#ifndef WIN32
struct stat statbuf;
PGShmemHeader *hdr;
#endif
/*
- * We detect whether a shared memory segment is in use by seeing
- * whether it (a) exists and (b) has any processes are attached to it.
+ * We detect whether a shared memory segment is in use by seeing whether
+ * it (a) exists and (b) has any processes are attached to it.
*/
if (shmctl(shmId, IPC_STAT, &shmStat) < 0)
{
/*
* EINVAL actually has multiple possible causes documented in the
- * shmctl man page, but we assume it must mean the segment no
- * longer exists.
+ * shmctl man page, but we assume it must mean the segment no longer
+ * exists.
*/
if (errno == EINVAL)
return false;
+
/*
- * EACCES implies that the segment belongs to some other userid,
- * which means it is not a Postgres shmem segment (or at least,
- * not one that is relevant to our data directory).
+ * EACCES implies that the segment belongs to some other userid, which
+ * means it is not a Postgres shmem segment (or at least, not one that
+ * is relevant to our data directory).
*/
if (errno == EACCES)
return false;
+
/*
- * Otherwise, we had better assume that the segment is in use.
- * The only likely case is EIDRM, which implies that the segment
- * has been IPC_RMID'd but there are still processes attached to it.
+ * Otherwise, we had better assume that the segment is in use. The
+ * only likely case is EIDRM, which implies that the segment has been
+ * IPC_RMID'd but there are still processes attached to it.
*/
return true;
}
@@ -295,6 +297,7 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port)
void *memAddress;
PGShmemHeader *hdr;
IpcMemoryId shmid;
+
#ifndef WIN32
struct stat statbuf;
#endif
@@ -338,11 +341,10 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port)
}
/*
- * The segment appears to be from a dead Postgres process, or from
- * a previous cycle of life in this same process. Zap it, if
- * possible. This probably shouldn't fail, but if it does, assume
- * the segment belongs to someone else after all, and continue
- * quietly.
+ * The segment appears to be from a dead Postgres process, or from a
+ * previous cycle of life in this same process. Zap it, if possible.
+ * This probably shouldn't fail, but if it does, assume the segment
+ * belongs to someone else after all, and continue quietly.
*/
shmdt(memAddress);
if (shmctl(shmid, IPC_RMID, NULL) < 0)
@@ -356,17 +358,16 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port)
break; /* successful create and attach */
/*
- * Can only get here if some other process managed to create the
- * same shmem key before we did. Let him have that one, loop
- * around to try next key.
+ * Can only get here if some other process managed to create the same
+ * shmem key before we did. Let him have that one, loop around to try
+ * next key.
*/
}
/*
- * OK, we created a new segment. Mark it as created by this process.
- * The order of assignments here is critical so that another Postgres
- * process can't see the header as valid but belonging to an invalid
- * PID!
+ * OK, we created a new segment. Mark it as created by this process. The
+ * order of assignments here is critical so that another Postgres process
+ * can't see the header as valid but belonging to an invalid PID!
*/
hdr = (PGShmemHeader *) memAddress;
hdr->creatorPID = getpid();
@@ -401,7 +402,7 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port)
/*
* PGSharedMemoryReAttach
*
- * Re-attach to an already existing shared memory segment. In the non
+ * Re-attach to an already existing shared memory segment. In the non
* EXEC_BACKEND case this is not used, because postmaster children inherit
* the shared memory segment attachment via fork().
*
@@ -436,8 +437,7 @@ PGSharedMemoryReAttach(void)
UsedShmemSegAddr = hdr; /* probably redundant */
}
-
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
/*
* PGSharedMemoryDetach
diff --git a/src/backend/port/win32/error.c b/src/backend/port/win32/error.c
index c7f0a24f102..eb660a3ef83 100644
--- a/src/backend/port/win32/error.c
+++ b/src/backend/port/win32/error.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/error.c,v 1.5 2005/10/07 16:34:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/error.c,v 1.6 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -178,7 +178,7 @@ _dosmaperr(unsigned long e)
errno = doserrors[i].doserr;
ereport(DEBUG5,
(errmsg_internal("mapped win32 error code %lu to %d",
- e, errno)));
+ e, errno)));
return;
}
}
diff --git a/src/backend/port/win32/security.c b/src/backend/port/win32/security.c
index 9283f3f6942..f610b893617 100644
--- a/src/backend/port/win32/security.c
+++ b/src/backend/port/win32/security.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/security.c,v 1.8 2004/12/31 22:00:37 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/security.c,v 1.9 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -15,8 +15,8 @@
static BOOL pgwin32_get_dynamic_tokeninfo(HANDLE token,
- TOKEN_INFORMATION_CLASS class, char **InfoBuffer,
- char *errbuf, int errsize);
+ TOKEN_INFORMATION_CLASS class, char **InfoBuffer,
+ char *errbuf, int errsize);
/*
* Returns nonzero if the current user has administrative privileges,
@@ -30,7 +30,7 @@ pgwin32_is_admin(void)
{
HANDLE AccessToken;
char *InfoBuffer = NULL;
- char errbuf[256];
+ char errbuf[256];
PTOKEN_GROUPS Groups;
PSID AdministratorsSid;
PSID PowerUsersSid;
@@ -57,7 +57,7 @@ pgwin32_is_admin(void)
CloseHandle(AccessToken);
if (!AllocateAndInitializeSid(&NtAuthority, 2,
- SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_ADMINS, 0, 0, 0, 0, 0,
+ SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_ADMINS, 0, 0, 0, 0, 0,
0, &AdministratorsSid))
{
write_stderr("could not get SID for Administrators group: error code %d\n",
@@ -66,7 +66,7 @@ pgwin32_is_admin(void)
}
if (!AllocateAndInitializeSid(&NtAuthority, 2,
- SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_POWER_USERS, 0, 0, 0, 0, 0,
+ SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_POWER_USERS, 0, 0, 0, 0, 0,
0, &PowerUsersSid))
{
write_stderr("could not get SID for PowerUsers group: error code %d\n",
@@ -114,8 +114,8 @@ pgwin32_is_service(void)
{
static int _is_service = -1;
HANDLE AccessToken;
- char *InfoBuffer = NULL;
- char errbuf[256];
+ char *InfoBuffer = NULL;
+ char errbuf[256];
PTOKEN_GROUPS Groups;
PTOKEN_USER User;
PSID ServiceSid;
@@ -138,14 +138,14 @@ pgwin32_is_service(void)
if (!pgwin32_get_dynamic_tokeninfo(AccessToken, TokenUser, &InfoBuffer,
errbuf, sizeof(errbuf)))
{
- fprintf(stderr,errbuf);
+ fprintf(stderr, errbuf);
return -1;
}
User = (PTOKEN_USER) InfoBuffer;
if (!AllocateAndInitializeSid(&NtAuthority, 1,
- SECURITY_LOCAL_SYSTEM_RID, 0, 0, 0, 0, 0, 0, 0,
+ SECURITY_LOCAL_SYSTEM_RID, 0, 0, 0, 0, 0, 0, 0,
&LocalSystemSid))
{
fprintf(stderr, "could not get SID for local system account\n");
@@ -169,14 +169,14 @@ pgwin32_is_service(void)
if (!pgwin32_get_dynamic_tokeninfo(AccessToken, TokenGroups, &InfoBuffer,
errbuf, sizeof(errbuf)))
{
- fprintf(stderr,errbuf);
+ fprintf(stderr, errbuf);
return -1;
}
Groups = (PTOKEN_GROUPS) InfoBuffer;
if (!AllocateAndInitializeSid(&NtAuthority, 1,
- SECURITY_SERVICE_RID, 0, 0, 0, 0, 0, 0, 0,
+ SECURITY_SERVICE_RID, 0, 0, 0, 0, 0, 0, 0,
&ServiceSid))
{
fprintf(stderr, "could not get SID for service group\n");
@@ -213,17 +213,17 @@ static BOOL
pgwin32_get_dynamic_tokeninfo(HANDLE token, TOKEN_INFORMATION_CLASS class,
char **InfoBuffer, char *errbuf, int errsize)
{
- DWORD InfoBufferSize;
+ DWORD InfoBufferSize;
if (GetTokenInformation(token, class, NULL, 0, &InfoBufferSize))
{
- snprintf(errbuf,errsize,"could not get token information: got zero size\n");
+ snprintf(errbuf, errsize, "could not get token information: got zero size\n");
return FALSE;
}
if (GetLastError() != ERROR_INSUFFICIENT_BUFFER)
{
- snprintf(errbuf,errsize,"could not get token information: error code %d\n",
+ snprintf(errbuf, errsize, "could not get token information: error code %d\n",
(int) GetLastError());
return FALSE;
}
@@ -231,18 +231,18 @@ pgwin32_get_dynamic_tokeninfo(HANDLE token, TOKEN_INFORMATION_CLASS class,
*InfoBuffer = malloc(InfoBufferSize);
if (*InfoBuffer == NULL)
{
- snprintf(errbuf,errsize,"could not allocate %d bytes for token information\n",
+ snprintf(errbuf, errsize, "could not allocate %d bytes for token information\n",
(int) InfoBufferSize);
return FALSE;
}
- if (!GetTokenInformation(token, class, *InfoBuffer,
+ if (!GetTokenInformation(token, class, *InfoBuffer,
InfoBufferSize, &InfoBufferSize))
{
- snprintf(errbuf,errsize,"could not get token information: error code %d\n",
+ snprintf(errbuf, errsize, "could not get token information: error code %d\n",
(int) GetLastError());
return FALSE;
}
-
+
return TRUE;
}
diff --git a/src/backend/port/win32/sema.c b/src/backend/port/win32/sema.c
index 7942e696e3c..a9d62f057f1 100644
--- a/src/backend/port/win32/sema.c
+++ b/src/backend/port/win32/sema.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/sema.c,v 1.10 2004/12/31 22:00:37 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/sema.c,v 1.11 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -216,8 +216,8 @@ semop(int semId, struct sembuf * sops, int nsops)
if (nsops != 1)
{
/*
- * Not supported (we return on 1st success, and don't cancel
- * earlier ops)
+ * Not supported (we return on 1st success, and don't cancel earlier
+ * ops)
*/
errno = E2BIG;
return -1;
diff --git a/src/backend/port/win32/shmem.c b/src/backend/port/win32/shmem.c
index 49f5696ceb7..dbb9cdc0f1c 100644
--- a/src/backend/port/win32/shmem.c
+++ b/src/backend/port/win32/shmem.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/shmem.c,v 1.10 2004/12/31 22:00:37 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/shmem.c,v 1.11 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,7 +35,7 @@ shmat(int memId, void *shmaddr, int flag)
/* TODO -- shmat needs to count # attached to shared mem */
void *lpmem = MapViewOfFileEx((HANDLE) memId,
FILE_MAP_WRITE | FILE_MAP_READ,
- 0, 0, /* (DWORD)pshmdsc->segsize */ 0 /* s_segsize */ , shmaddr);
+ 0, 0, /* (DWORD)pshmdsc->segsize */ 0 /* s_segsize */ , shmaddr);
if (lpmem == NULL)
{
diff --git a/src/backend/port/win32/signal.c b/src/backend/port/win32/signal.c
index e0c9dba16f4..a32427f28f5 100644
--- a/src/backend/port/win32/signal.c
+++ b/src/backend/port/win32/signal.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/signal.c,v 1.11 2004/12/31 22:00:37 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/signal.c,v 1.12 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -26,7 +26,7 @@ static pqsigfunc pg_signal_defaults[PG_SIGNAL_COUNT];
static int pg_signal_mask;
DLLIMPORT HANDLE pgwin32_signal_event;
-HANDLE pgwin32_initial_signal_pipe = INVALID_HANDLE_VALUE;
+HANDLE pgwin32_initial_signal_pipe = INVALID_HANDLE_VALUE;
/* Signal handling thread function */
@@ -73,12 +73,12 @@ pgwin32_signal_initialize(void)
signal_thread_handle = CreateThread(NULL, 0, pg_signal_thread, NULL, 0, NULL);
if (signal_thread_handle == NULL)
ereport(FATAL,
- (errmsg_internal("failed to create signal handler thread")));
+ (errmsg_internal("failed to create signal handler thread")));
/* Create console control handle to pick up Ctrl-C etc */
if (!SetConsoleCtrlHandler(pg_console_handler, TRUE))
ereport(FATAL,
- (errmsg_internal("failed to set console control handler")));
+ (errmsg_internal("failed to set console control handler")));
}
@@ -112,9 +112,9 @@ pgwin32_dispatch_queued_signals(void)
LeaveCriticalSection(&pg_signal_crit_sec);
sig(i);
EnterCriticalSection(&pg_signal_crit_sec);
- break; /* Restart outer loop, in case signal mask
- * or queue has been modified inside
- * signal handler */
+ break; /* Restart outer loop, in case signal mask or
+ * queue has been modified inside signal
+ * handler */
}
}
}
@@ -133,8 +133,8 @@ pqsigsetmask(int mask)
pg_signal_mask = mask;
/*
- * Dispatch any signals queued up right away, in case we have
- * unblocked one or more signals previously queued
+ * Dispatch any signals queued up right away, in case we have unblocked
+ * one or more signals previously queued
*/
pgwin32_dispatch_queued_signals();
@@ -165,7 +165,7 @@ pgwin32_create_signal_listener(pid_t pid)
wsprintf(pipename, "\\\\.\\pipe\\pgsignal_%d", (int) pid);
pipe = CreateNamedPipe(pipename, PIPE_ACCESS_DUPLEX,
- PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
+ PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
PIPE_UNLIMITED_INSTANCES, 16, 16, 1000, NULL);
if (pipe == INVALID_HANDLE_VALUE)
@@ -218,8 +218,8 @@ pg_signal_dispatch_thread(LPVOID param)
CloseHandle(pipe);
return 0;
}
- WriteFile(pipe, &sigNum, 1, &bytes, NULL); /* Don't care if it works
- * or not.. */
+ WriteFile(pipe, &sigNum, 1, &bytes, NULL); /* Don't care if it works or
+ * not.. */
FlushFileBuffers(pipe);
DisconnectNamedPipe(pipe);
CloseHandle(pipe);
@@ -233,7 +233,7 @@ static DWORD WINAPI
pg_signal_thread(LPVOID param)
{
char pipename[128];
- HANDLE pipe = pgwin32_initial_signal_pipe;
+ HANDLE pipe = pgwin32_initial_signal_pipe;
wsprintf(pipename, "\\\\.\\pipe\\pgsignal_%d", GetCurrentProcessId());
@@ -245,8 +245,8 @@ pg_signal_thread(LPVOID param)
if (pipe == INVALID_HANDLE_VALUE)
{
pipe = CreateNamedPipe(pipename, PIPE_ACCESS_DUPLEX,
- PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
- PIPE_UNLIMITED_INSTANCES, 16, 16, 1000, NULL);
+ PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
+ PIPE_UNLIMITED_INSTANCES, 16, 16, 1000, NULL);
if (pipe == INVALID_HANDLE_VALUE)
{
@@ -260,7 +260,7 @@ pg_signal_thread(LPVOID param)
if (fConnected)
{
hThread = CreateThread(NULL, 0,
- (LPTHREAD_START_ROUTINE) pg_signal_dispatch_thread,
+ (LPTHREAD_START_ROUTINE) pg_signal_dispatch_thread,
(LPVOID) pipe, 0, NULL);
if (hThread == INVALID_HANDLE_VALUE)
write_stderr("could not create signal dispatch thread: error code %d\n",
diff --git a/src/backend/port/win32/socket.c b/src/backend/port/win32/socket.c
index e65197e4d96..808977a2374 100644
--- a/src/backend/port/win32/socket.c
+++ b/src/backend/port/win32/socket.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.8 2004/12/31 22:00:37 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.9 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -178,8 +178,8 @@ pgwin32_accept(SOCKET s, struct sockaddr * addr, int *addrlen)
SOCKET rs;
/*
- * Poll for signals, but don't return with EINTR, since we don't
- * handle that in pqcomm.c
+ * Poll for signals, but don't return with EINTR, since we don't handle
+ * that in pqcomm.c
*/
pgwin32_poll_signals();
@@ -351,8 +351,8 @@ pgwin32_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, c
if (WSAGetLastError() != WSAEWOULDBLOCK)
/*
- * Not completed, and not just "would block", so an
- * error occured
+ * Not completed, and not just "would block", so an error
+ * occured
*/
FD_SET(writefds->fd_array[i], &outwritefds);
}
@@ -423,8 +423,8 @@ pgwin32_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, c
if (r != WAIT_TIMEOUT && r != WAIT_IO_COMPLETION && r != (WAIT_OBJECT_0 + numevents))
{
/*
- * We scan all events, even those not signalled, in case more than
- * one event has been tagged but Wait.. can only return one.
+ * We scan all events, even those not signalled, in case more than one
+ * event has been tagged but Wait.. can only return one.
*/
WSANETWORKEVENTS resEvents;
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 9aeecec7a6e..1081cf83e9c 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/autovacuum.c,v 1.4 2005/08/15 16:25:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/autovacuum.c,v 1.5 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -69,17 +69,17 @@ static time_t last_autovac_start_time = 0;
static time_t last_autovac_stop_time = 0;
/* Memory context for long-lived data */
-static MemoryContext AutovacMemCxt;
+static MemoryContext AutovacMemCxt;
/* struct to keep list of candidate databases for vacuum */
typedef struct autovac_dbase
{
- Oid oid;
- char *name;
- TransactionId frozenxid;
- TransactionId vacuumxid;
+ Oid oid;
+ char *name;
+ TransactionId frozenxid;
+ TransactionId vacuumxid;
PgStat_StatDBEntry *entry;
- int32 age;
+ int32 age;
} autovac_dbase;
/* struct to keep track of tables to vacuum and/or analyze */
@@ -102,12 +102,12 @@ static void process_whole_db(void);
static void do_autovacuum(PgStat_StatDBEntry *dbentry);
static List *autovac_get_database_list(void);
static void test_rel_for_autovac(Oid relid, PgStat_StatTabEntry *tabentry,
- Form_pg_class classForm,
- Form_pg_autovacuum avForm,
- List **vacuum_tables,
- List **toast_table_ids);
+ Form_pg_class classForm,
+ Form_pg_autovacuum avForm,
+ List **vacuum_tables,
+ List **toast_table_ids);
static void autovacuum_do_vac_analyze(List *relids, bool dovacuum,
- bool doanalyze, bool freeze);
+ bool doanalyze, bool freeze);
/*
@@ -126,16 +126,16 @@ autovac_start(void)
return 0;
/*
- * Do nothing if too soon since last autovacuum exit. This limits
- * how often the daemon runs. Since the time per iteration can be
- * quite variable, it seems more useful to measure/control the time
- * since last subprocess exit than since last subprocess launch.
+ * Do nothing if too soon since last autovacuum exit. This limits how
+ * often the daemon runs. Since the time per iteration can be quite
+ * variable, it seems more useful to measure/control the time since last
+ * subprocess exit than since last subprocess launch.
*
- * However, we *also* check the time since last subprocess launch;
- * this prevents thrashing under fork-failure conditions.
+ * However, we *also* check the time since last subprocess launch; this
+ * prevents thrashing under fork-failure conditions.
*
- * Note that since we will be re-called from the postmaster main loop,
- * we will get another chance later if we do nothing now.
+ * Note that since we will be re-called from the postmaster main loop, we
+ * will get another chance later if we do nothing now.
*
* XXX todo: implement sleep scale factor that existed in contrib code.
*/
@@ -151,14 +151,14 @@ autovac_start(void)
last_autovac_start_time = curtime;
#ifdef EXEC_BACKEND
- switch((AutoVacPID = autovac_forkexec()))
+ switch ((AutoVacPID = autovac_forkexec()))
#else
- switch((AutoVacPID = fork_process()))
+ switch ((AutoVacPID = fork_process()))
#endif
{
case -1:
ereport(LOG,
- (errmsg("could not fork autovacuum process: %m")));
+ (errmsg("could not fork autovacuum process: %m")));
return 0;
#ifndef EXEC_BACKEND
@@ -201,14 +201,14 @@ autovac_forkexec(void)
av[ac++] = "postgres";
av[ac++] = "-forkautovac";
- av[ac++] = NULL; /* filled in by postmaster_forkexec */
+ av[ac++] = NULL; /* filled in by postmaster_forkexec */
av[ac] = NULL;
Assert(ac < lengthof(av));
return postmaster_forkexec(ac, av);
}
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
/*
* AutoVacMain
@@ -216,12 +216,12 @@ autovac_forkexec(void)
NON_EXEC_STATIC void
AutoVacMain(int argc, char *argv[])
{
- ListCell *cell;
- List *dblist;
- TransactionId nextXid;
- autovac_dbase *db;
- bool whole_db;
- sigjmp_buf local_sigjmp_buf;
+ ListCell *cell;
+ List *dblist;
+ TransactionId nextXid;
+ autovac_dbase *db;
+ bool whole_db;
+ sigjmp_buf local_sigjmp_buf;
/* we are a postmaster subprocess now */
IsUnderPostmaster = true;
@@ -240,18 +240,18 @@ AutoVacMain(int argc, char *argv[])
SetProcessingMode(InitProcessing);
/*
- * Set up signal handlers. We operate on databases much like a
- * regular backend, so we use the same signal handling. See
- * equivalent code in tcop/postgres.c.
+ * Set up signal handlers. We operate on databases much like a regular
+ * backend, so we use the same signal handling. See equivalent code in
+ * tcop/postgres.c.
*
- * Currently, we don't pay attention to postgresql.conf changes
- * that happen during a single daemon iteration, so we can ignore
- * SIGHUP.
+ * Currently, we don't pay attention to postgresql.conf changes that happen
+ * during a single daemon iteration, so we can ignore SIGHUP.
*/
pqsignal(SIGHUP, SIG_IGN);
+
/*
- * Presently, SIGINT will lead to autovacuum shutdown, because that's
- * how we handle ereport(ERROR). It could be improved however.
+ * Presently, SIGINT will lead to autovacuum shutdown, because that's how
+ * we handle ereport(ERROR). It could be improved however.
*/
pqsignal(SIGINT, StatementCancelHandler);
pqsignal(SIGTERM, die);
@@ -282,9 +282,9 @@ AutoVacMain(int argc, char *argv[])
EmitErrorReport();
/*
- * We can now go away. Note that because we'll call InitProcess,
- * a callback will be registered to do ProcKill, which will clean
- * up necessary state.
+ * We can now go away. Note that because we'll call InitProcess, a
+ * callback will be registered to do ProcKill, which will clean up
+ * necessary state.
*/
proc_exit(0);
}
@@ -298,9 +298,8 @@ AutoVacMain(int argc, char *argv[])
dblist = autovac_get_database_list();
/*
- * Get the next Xid that was current as of the last checkpoint.
- * We need it to determine whether databases are about to need
- * database-wide vacuums.
+ * Get the next Xid that was current as of the last checkpoint. We need it
+ * to determine whether databases are about to need database-wide vacuums.
*/
nextXid = GetRecentNextXid();
@@ -309,37 +308,36 @@ AutoVacMain(int argc, char *argv[])
* recently auto-vacuumed, or one that needs database-wide vacuum (to
* prevent Xid wraparound-related data loss).
*
- * Note that a database with no stats entry is not considered, except
- * for Xid wraparound purposes. The theory is that if no one has ever
- * connected to it since the stats were last initialized, it doesn't
- * need vacuuming.
+ * Note that a database with no stats entry is not considered, except for Xid
+ * wraparound purposes. The theory is that if no one has ever connected
+ * to it since the stats were last initialized, it doesn't need vacuuming.
*
* XXX This could be improved if we had more info about whether it needs
* vacuuming before connecting to it. Perhaps look through the pgstats
* data for the database's tables? One idea is to keep track of the
* number of new and dead tuples per database in pgstats. However it
- * isn't clear how to construct a metric that measures that and not
- * cause starvation for less busy databases.
+ * isn't clear how to construct a metric that measures that and not cause
+ * starvation for less busy databases.
*/
db = NULL;
whole_db = false;
foreach(cell, dblist)
{
- autovac_dbase *tmp = lfirst(cell);
- bool this_whole_db;
- int32 freeze_age,
- vacuum_age;
+ autovac_dbase *tmp = lfirst(cell);
+ bool this_whole_db;
+ int32 freeze_age,
+ vacuum_age;
/*
* We look for the database that most urgently needs a database-wide
- * vacuum. We decide that a database-wide vacuum is needed 100000
+ * vacuum. We decide that a database-wide vacuum is needed 100000
* transactions sooner than vacuum.c's vac_truncate_clog() would
* decide to start giving warnings. If any such db is found, we
* ignore all other dbs.
*
- * Unlike vacuum.c, we also look at vacuumxid. This is so that
- * pg_clog can be kept trimmed to a reasonable size.
+ * Unlike vacuum.c, we also look at vacuumxid. This is so that pg_clog
+ * can be kept trimmed to a reasonable size.
*/
freeze_age = (int32) (nextXid - tmp->frozenxid);
vacuum_age = (int32) (nextXid - tmp->vacuumxid);
@@ -373,8 +371,8 @@ AutoVacMain(int argc, char *argv[])
* modified, after the database was dropped from the pg_database
* table. (This is of course a not-very-bulletproof test, but it's
* cheap to make. If we do mistakenly choose a recently dropped
- * database, InitPostgres will fail and we'll drop out until the
- * next autovac run.)
+ * database, InitPostgres will fail and we'll drop out until the next
+ * autovac run.)
*/
if (tmp->entry->destroy != 0)
continue;
@@ -390,12 +388,12 @@ AutoVacMain(int argc, char *argv[])
if (db)
{
/*
- * Report autovac startup to the stats collector. We deliberately
- * do this before InitPostgres, so that the last_autovac_time will
- * get updated even if the connection attempt fails. This is to
- * prevent autovac from getting "stuck" repeatedly selecting an
- * unopenable database, rather than making any progress on stuff
- * it can connect to.
+ * Report autovac startup to the stats collector. We deliberately do
+ * this before InitPostgres, so that the last_autovac_time will get
+ * updated even if the connection attempt fails. This is to prevent
+ * autovac from getting "stuck" repeatedly selecting an unopenable
+ * database, rather than making any progress on stuff it can connect
+ * to.
*/
pgstat_report_autovac(db->oid);
@@ -431,18 +429,18 @@ AutoVacMain(int argc, char *argv[])
/*
* autovac_get_database_list
*
- * Return a list of all databases. Note we cannot use pg_database,
+ * Return a list of all databases. Note we cannot use pg_database,
* because we aren't connected yet; we use the flat database file.
*/
static List *
autovac_get_database_list(void)
{
- char *filename;
- List *dblist = NIL;
- char thisname[NAMEDATALEN];
- FILE *db_file;
- Oid db_id;
- Oid db_tablespace;
+ char *filename;
+ List *dblist = NIL;
+ char thisname[NAMEDATALEN];
+ FILE *db_file;
+ Oid db_id;
+ Oid db_tablespace;
TransactionId db_frozenxid;
TransactionId db_vacuumxid;
@@ -457,7 +455,7 @@ autovac_get_database_list(void)
&db_tablespace, &db_frozenxid,
&db_vacuumxid))
{
- autovac_dbase *db;
+ autovac_dbase *db;
db = (autovac_dbase *) palloc(sizeof(autovac_dbase));
@@ -486,12 +484,12 @@ autovac_get_database_list(void)
static void
process_whole_db(void)
{
- Relation dbRel;
- ScanKeyData entry[1];
- SysScanDesc scan;
- HeapTuple tup;
+ Relation dbRel;
+ ScanKeyData entry[1];
+ SysScanDesc scan;
+ HeapTuple tup;
Form_pg_database dbForm;
- bool freeze;
+ bool freeze;
/* Start a transaction so our commands have one to play into. */
StartTransactionCommand();
@@ -545,23 +543,22 @@ process_whole_db(void)
static void
do_autovacuum(PgStat_StatDBEntry *dbentry)
{
- Relation classRel,
- avRel;
- HeapTuple tuple;
- HeapScanDesc relScan;
- List *vacuum_tables = NIL;
- List *toast_table_ids = NIL;
- ListCell *cell;
+ Relation classRel,
+ avRel;
+ HeapTuple tuple;
+ HeapScanDesc relScan;
+ List *vacuum_tables = NIL;
+ List *toast_table_ids = NIL;
+ ListCell *cell;
PgStat_StatDBEntry *shared;
/* Start a transaction so our commands have one to play into. */
StartTransactionCommand();
/*
- * StartTransactionCommand and CommitTransactionCommand will
- * automatically switch to other contexts. We need this one
- * to keep the list of relations to vacuum/analyze across
- * transactions.
+ * StartTransactionCommand and CommitTransactionCommand will automatically
+ * switch to other contexts. We need this one to keep the list of
+ * relations to vacuum/analyze across transactions.
*/
MemoryContextSwitchTo(AutovacMemCxt);
@@ -574,19 +571,19 @@ do_autovacuum(PgStat_StatDBEntry *dbentry)
/*
* Scan pg_class and determine which tables to vacuum.
*
- * The stats subsystem collects stats for toast tables independently
- * of the stats for their parent tables. We need to check those stats
- * since in cases with short, wide tables there might be proportionally
- * much more activity in the toast table than in its parent.
+ * The stats subsystem collects stats for toast tables independently of the
+ * stats for their parent tables. We need to check those stats since in
+ * cases with short, wide tables there might be proportionally much more
+ * activity in the toast table than in its parent.
*
* Since we can only issue VACUUM against the parent table, we need to
* transpose a decision to vacuum a toast table into a decision to vacuum
- * its parent. There's no point in considering ANALYZE on a toast table,
- * either. To support this, we keep a list of OIDs of toast tables that
+ * its parent. There's no point in considering ANALYZE on a toast table,
+ * either. To support this, we keep a list of OIDs of toast tables that
* need vacuuming alongside the list of regular tables. Regular tables
* will be entered into the table list even if they appear not to need
- * vacuuming; we go back and re-mark them after finding all the
- * vacuumable toast tables.
+ * vacuuming; we go back and re-mark them after finding all the vacuumable
+ * toast tables.
*/
relScan = heap_beginscan(classRel, SnapshotNow, 0, NULL);
@@ -595,9 +592,9 @@ do_autovacuum(PgStat_StatDBEntry *dbentry)
Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple);
Form_pg_autovacuum avForm = NULL;
PgStat_StatTabEntry *tabentry;
- SysScanDesc avScan;
+ SysScanDesc avScan;
HeapTuple avTup;
- ScanKeyData entry[1];
+ ScanKeyData entry[1];
Oid relid;
/* Consider only regular and toast tables. */
@@ -606,8 +603,8 @@ do_autovacuum(PgStat_StatDBEntry *dbentry)
continue;
/*
- * Skip temp tables (i.e. those in temp namespaces). We cannot
- * safely process other backends' temp tables.
+ * Skip temp tables (i.e. those in temp namespaces). We cannot safely
+ * process other backends' temp tables.
*/
if (isTempNamespace(classForm->relnamespace))
continue;
@@ -687,7 +684,7 @@ do_autovacuum(PgStat_StatDBEntry *dbentry)
/*
* test_rel_for_autovac
*
- * Check whether a table needs to be vacuumed or analyzed. Add it to the
+ * Check whether a table needs to be vacuumed or analyzed. Add it to the
* appropriate output list if so.
*
* A table needs to be vacuumed if the number of dead tuples exceeds a
@@ -718,33 +715,37 @@ test_rel_for_autovac(Oid relid, PgStat_StatTabEntry *tabentry,
List **vacuum_tables,
List **toast_table_ids)
{
- Relation rel;
- float4 reltuples; /* pg_class.reltuples */
+ Relation rel;
+ float4 reltuples; /* pg_class.reltuples */
+
/* constants from pg_autovacuum or GUC variables */
- int vac_base_thresh,
- anl_base_thresh;
- float4 vac_scale_factor,
- anl_scale_factor;
+ int vac_base_thresh,
+ anl_base_thresh;
+ float4 vac_scale_factor,
+ anl_scale_factor;
+
/* thresholds calculated from above constants */
- float4 vacthresh,
- anlthresh;
+ float4 vacthresh,
+ anlthresh;
+
/* number of vacuum (resp. analyze) tuples at this time */
- float4 vactuples,
- anltuples;
+ float4 vactuples,
+ anltuples;
+
/* cost-based vacuum delay parameters */
- int vac_cost_limit;
- int vac_cost_delay;
- bool dovacuum;
- bool doanalyze;
+ int vac_cost_limit;
+ int vac_cost_delay;
+ bool dovacuum;
+ bool doanalyze;
/* User disabled it in pg_autovacuum? */
if (avForm && !avForm->enabled)
return;
/*
- * Skip a table not found in stat hash. If it's not acted upon,
- * there's no need to vacuum it. (Note that database-level check
- * will take care of Xid wraparound.)
+ * Skip a table not found in stat hash. If it's not acted upon, there's
+ * no need to vacuum it. (Note that database-level check will take care
+ * of Xid wraparound.)
*/
if (!PointerIsValid(tabentry))
return;
@@ -805,9 +806,9 @@ test_rel_for_autovac(Oid relid, PgStat_StatTabEntry *tabentry,
anlthresh = (float4) anl_base_thresh + anl_scale_factor * reltuples;
/*
- * Note that we don't need to take special consideration for stat
- * reset, because if that happens, the last vacuum and analyze counts
- * will be reset too.
+ * Note that we don't need to take special consideration for stat reset,
+ * because if that happens, the last vacuum and analyze counts will be
+ * reset too.
*/
elog(DEBUG3, "%s: vac: %.0f (threshold %.0f), anl: %.0f (threshold %.0f)",
@@ -863,27 +864,27 @@ test_rel_for_autovac(Oid relid, PgStat_StatTabEntry *tabentry,
/*
* autovacuum_do_vac_analyze
- * Vacuum and/or analyze a list of tables; or all tables if relids = NIL
+ * Vacuum and/or analyze a list of tables; or all tables if relids = NIL
*/
static void
autovacuum_do_vac_analyze(List *relids, bool dovacuum, bool doanalyze,
bool freeze)
{
- VacuumStmt *vacstmt;
- MemoryContext old_cxt;
-
+ VacuumStmt *vacstmt;
+ MemoryContext old_cxt;
+
/*
* The node must survive transaction boundaries, so make sure we create it
* in a long-lived context
*/
old_cxt = MemoryContextSwitchTo(AutovacMemCxt);
-
+
vacstmt = makeNode(VacuumStmt);
/*
* Point QueryContext to the autovac memory context to fake out the
- * PreventTransactionChain check inside vacuum(). Note that this
- * is also why we palloc vacstmt instead of just using a local variable.
+ * PreventTransactionChain check inside vacuum(). Note that this is also
+ * why we palloc vacstmt instead of just using a local variable.
*/
QueryContext = CurrentMemoryContext;
@@ -904,8 +905,8 @@ autovacuum_do_vac_analyze(List *relids, bool dovacuum, bool doanalyze,
/*
* AutoVacuumingActive
- * Check GUC vars and report whether the autovacuum process should be
- * running.
+ * Check GUC vars and report whether the autovacuum process should be
+ * running.
*/
bool
AutoVacuumingActive(void)
@@ -918,7 +919,7 @@ AutoVacuumingActive(void)
/*
* autovac_init
- * This is called at postmaster initialization.
+ * This is called at postmaster initialization.
*
* Annoy the user if he got it wrong.
*/
@@ -933,6 +934,7 @@ autovac_init(void)
ereport(WARNING,
(errmsg("autovacuum not started because of misconfiguration"),
errhint("Enable options \"stats_start_collector\" and \"stats_row_level\".")));
+
/*
* Set the GUC var so we don't fork autovacuum uselessly, and also to
* help debugging.
@@ -943,7 +945,7 @@ autovac_init(void)
/*
* IsAutoVacuumProcess
- * Return whether this process is an autovacuum process.
+ * Return whether this process is an autovacuum process.
*/
bool
IsAutoVacuumProcess(void)
diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c
index ed1a7b2f271..78fecf4dd6e 100644
--- a/src/backend/postmaster/bgwriter.c
+++ b/src/backend/postmaster/bgwriter.c
@@ -37,7 +37,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/bgwriter.c,v 1.20 2005/09/12 22:20:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/bgwriter.c,v 1.21 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -171,11 +171,11 @@ BackgroundWriterMain(void)
*
* Note: we deliberately ignore SIGTERM, because during a standard Unix
* system shutdown cycle, init will SIGTERM all processes at once. We
- * want to wait for the backends to exit, whereupon the postmaster
- * will tell us it's okay to shut down (via SIGUSR2).
+ * want to wait for the backends to exit, whereupon the postmaster will
+ * tell us it's okay to shut down (via SIGUSR2).
*
- * SIGUSR1 is presently unused; keep it spare in case someday we want
- * this process to participate in sinval messaging.
+ * SIGUSR1 is presently unused; keep it spare in case someday we want this
+ * process to participate in sinval messaging.
*/
pqsignal(SIGHUP, BgSigHupHandler); /* set flag to read config file */
pqsignal(SIGINT, ReqCheckpointHandler); /* request checkpoint */
@@ -203,15 +203,15 @@ BackgroundWriterMain(void)
#endif
/*
- * Initialize so that first time-driven checkpoint happens at the
- * correct time.
+ * Initialize so that first time-driven checkpoint happens at the correct
+ * time.
*/
last_checkpoint_time = time(NULL);
/*
- * Create a memory context that we will do all our work in. We do this
- * so that we can reset the context during error recovery and thereby
- * avoid possible memory leaks. Formerly this code just ran in
+ * Create a memory context that we will do all our work in. We do this so
+ * that we can reset the context during error recovery and thereby avoid
+ * possible memory leaks. Formerly this code just ran in
* TopMemoryContext, but resetting that would be a really bad idea.
*/
bgwriter_context = AllocSetContextCreate(TopMemoryContext,
@@ -258,8 +258,8 @@ BackgroundWriterMain(void)
}
/*
- * Now return to normal top-level context and clear ErrorContext
- * for next time.
+ * Now return to normal top-level context and clear ErrorContext for
+ * next time.
*/
MemoryContextSwitchTo(bgwriter_context);
FlushErrorState();
@@ -271,9 +271,9 @@ BackgroundWriterMain(void)
RESUME_INTERRUPTS();
/*
- * Sleep at least 1 second after any error. A write error is
- * likely to be repeated, and we don't want to be filling the
- * error logs as fast as we can.
+ * Sleep at least 1 second after any error. A write error is likely
+ * to be repeated, and we don't want to be filling the error logs as
+ * fast as we can.
*/
pg_usleep(1000000L);
}
@@ -329,8 +329,8 @@ BackgroundWriterMain(void)
}
/*
- * Do an unforced checkpoint if too much time has elapsed since
- * the last one.
+ * Do an unforced checkpoint if too much time has elapsed since the
+ * last one.
*/
now = time(NULL);
elapsed_secs = now - last_checkpoint_time;
@@ -346,8 +346,8 @@ BackgroundWriterMain(void)
/*
* We will warn if (a) too soon since last checkpoint (whatever
* caused it) and (b) somebody has set the ckpt_time_warn flag
- * since the last checkpoint start. Note in particular that
- * this implementation will not generate warnings caused by
+ * since the last checkpoint start. Note in particular that this
+ * implementation will not generate warnings caused by
* CheckPointTimeout < CheckPointWarning.
*/
if (BgWriterShmem->ckpt_time_warn &&
@@ -368,8 +368,7 @@ BackgroundWriterMain(void)
/*
* After any checkpoint, close all smgr files. This is so we
- * won't hang onto smgr references to deleted files
- * indefinitely.
+ * won't hang onto smgr references to deleted files indefinitely.
*/
smgrcloseall();
@@ -381,8 +380,8 @@ BackgroundWriterMain(void)
/*
* Note we record the checkpoint start time not end time as
- * last_checkpoint_time. This is so that time-driven
- * checkpoints happen at a predictable spacing.
+ * last_checkpoint_time. This is so that time-driven checkpoints
+ * happen at a predictable spacing.
*/
last_checkpoint_time = now;
}
@@ -390,13 +389,13 @@ BackgroundWriterMain(void)
BgBufferSync();
/*
- * Nap for the configured time, or sleep for 10 seconds if there
- * is no bgwriter activity configured.
+ * Nap for the configured time, or sleep for 10 seconds if there is no
+ * bgwriter activity configured.
*
- * On some platforms, signals won't interrupt the sleep. To ensure
- * we respond reasonably promptly when someone signals us, break
- * down the sleep into 1-second increments, and check for
- * interrupts after each nap.
+ * On some platforms, signals won't interrupt the sleep. To ensure we
+ * respond reasonably promptly when someone signals us, break down the
+ * sleep into 1-second increments, and check for interrupts after each
+ * nap.
*
* We absorb pending requests after each short sleep.
*/
@@ -437,13 +436,13 @@ bg_quickdie(SIGNAL_ARGS)
/*
* DO NOT proc_exit() -- we're here because shared memory may be
- * corrupted, so we don't want to try to clean up our transaction.
- * Just nail the windows shut and get out of town.
+ * corrupted, so we don't want to try to clean up our transaction. Just
+ * nail the windows shut and get out of town.
*
- * Note we do exit(1) not exit(0). This is to force the postmaster into
- * a system reset cycle if some idiot DBA sends a manual SIGQUIT to a
- * random backend. This is necessary precisely because we don't clean
- * up our shared memory state.
+ * Note we do exit(1) not exit(0). This is to force the postmaster into a
+ * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
+ * backend. This is necessary precisely because we don't clean up our
+ * shared memory state.
*/
exit(1);
}
@@ -485,8 +484,8 @@ BgWriterShmemSize(void)
Size size;
/*
- * Currently, the size of the requests[] array is arbitrarily set
- * equal to NBuffers. This may prove too large or small ...
+ * Currently, the size of the requests[] array is arbitrarily set equal to
+ * NBuffers. This may prove too large or small ...
*/
size = offsetof(BgWriterShmemStruct, requests);
size = add_size(size, mul_size(NBuffers, sizeof(BgWriterRequest)));
@@ -546,9 +545,8 @@ RequestCheckpoint(bool waitforit, bool warnontime)
CreateCheckPoint(false, true);
/*
- * After any checkpoint, close all smgr files. This is so we
- * won't hang onto smgr references to deleted files
- * indefinitely.
+ * After any checkpoint, close all smgr files. This is so we won't
+ * hang onto smgr references to deleted files indefinitely.
*/
smgrcloseall();
@@ -571,8 +569,8 @@ RequestCheckpoint(bool waitforit, bool warnontime)
"could not signal for checkpoint: %m");
/*
- * If requested, wait for completion. We detect completion according
- * to the algorithm given above.
+ * If requested, wait for completion. We detect completion according to
+ * the algorithm given above.
*/
if (waitforit)
{
@@ -585,13 +583,12 @@ RequestCheckpoint(bool waitforit, bool warnontime)
/*
* We are waiting for ckpt_done >= old_started, in a modulo sense.
- * This is a little tricky since we don't know the width or
- * signedness of sig_atomic_t. We make the lowest common
- * denominator assumption that it is only as wide as "char". This
- * means that this algorithm will cope correctly as long as we
- * don't sleep for more than 127 completed checkpoints. (If we
- * do, we will get another chance to exit after 128 more
- * checkpoints...)
+ * This is a little tricky since we don't know the width or signedness
+ * of sig_atomic_t. We make the lowest common denominator assumption
+ * that it is only as wide as "char". This means that this algorithm
+ * will cope correctly as long as we don't sleep for more than 127
+ * completed checkpoints. (If we do, we will get another chance to
+ * exit after 128 more checkpoints...)
*/
while (((signed char) (bgs->ckpt_done - old_started)) < 0)
{
@@ -666,17 +663,17 @@ AbsorbFsyncRequests(void)
return;
/*
- * We have to PANIC if we fail to absorb all the pending requests
- * (eg, because our hashtable runs out of memory). This is because
- * the system cannot run safely if we are unable to fsync what we
- * have been told to fsync. Fortunately, the hashtable is so small
- * that the problem is quite unlikely to arise in practice.
+ * We have to PANIC if we fail to absorb all the pending requests (eg,
+ * because our hashtable runs out of memory). This is because the system
+ * cannot run safely if we are unable to fsync what we have been told to
+ * fsync. Fortunately, the hashtable is so small that the problem is
+ * quite unlikely to arise in practice.
*/
START_CRIT_SECTION();
/*
- * We try to avoid holding the lock for a long time by copying the
- * request array.
+ * We try to avoid holding the lock for a long time by copying the request
+ * array.
*/
LWLockAcquire(BgWriterCommLock, LW_EXCLUSIVE);
diff --git a/src/backend/postmaster/fork_process.c b/src/backend/postmaster/fork_process.c
index 4064d7ccd41..54395b06546 100644
--- a/src/backend/postmaster/fork_process.c
+++ b/src/backend/postmaster/fork_process.c
@@ -1,13 +1,13 @@
/*
* fork_process.c
- * A simple wrapper on top of fork(). This does not handle the
- * EXEC_BACKEND case; it might be extended to do so, but it would be
- * considerably more complex.
+ * A simple wrapper on top of fork(). This does not handle the
+ * EXEC_BACKEND case; it might be extended to do so, but it would be
+ * considerably more complex.
*
* Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/fork_process.c,v 1.3 2005/03/16 00:02:39 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/fork_process.c,v 1.4 2005/10/15 02:49:23 momjian Exp $
*/
#include "postgres.h"
#include "postmaster/fork_process.h"
@@ -25,29 +25,29 @@
pid_t
fork_process(void)
{
- pid_t result;
+ pid_t result;
+
#ifdef LINUX_PROFILE
struct itimerval prof_itimer;
#endif
/*
- * Flush stdio channels just before fork, to avoid double-output
- * problems. Ideally we'd use fflush(NULL) here, but there are still a
- * few non-ANSI stdio libraries out there (like SunOS 4.1.x) that
- * coredump if we do. Presently stdout and stderr are the only stdio
- * output channels used by the postmaster, so fflush'ing them should
- * be sufficient.
+ * Flush stdio channels just before fork, to avoid double-output problems.
+ * Ideally we'd use fflush(NULL) here, but there are still a few non-ANSI
+ * stdio libraries out there (like SunOS 4.1.x) that coredump if we do.
+ * Presently stdout and stderr are the only stdio output channels used by
+ * the postmaster, so fflush'ing them should be sufficient.
*/
fflush(stdout);
fflush(stderr);
#ifdef LINUX_PROFILE
+
/*
- * Linux's fork() resets the profiling timer in the child process. If
- * we want to profile child processes then we need to save and restore
- * the timer setting. This is a waste of time if not profiling,
- * however, so only do it if commanded by specific -DLINUX_PROFILE
- * switch.
+ * Linux's fork() resets the profiling timer in the child process. If we
+ * want to profile child processes then we need to save and restore the
+ * timer setting. This is a waste of time if not profiling, however, so
+ * only do it if commanded by specific -DLINUX_PROFILE switch.
*/
getitimer(ITIMER_PROF, &prof_itimer);
#endif
@@ -81,4 +81,5 @@ fork_process(void)
return result;
}
-#endif /* ! WIN32 */
+
+#endif /* ! WIN32 */
diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c
index 2f52053a2c3..61019d7a5e3 100644
--- a/src/backend/postmaster/pgarch.c
+++ b/src/backend/postmaster/pgarch.c
@@ -19,7 +19,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/pgarch.c,v 1.17 2005/07/04 04:51:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/pgarch.c,v 1.18 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -49,11 +49,11 @@
* Timer definitions.
* ----------
*/
-#define PGARCH_AUTOWAKE_INTERVAL 60 /* How often to force a poll of
- * the archive status directory;
- * in seconds. */
-#define PGARCH_RESTART_INTERVAL 10 /* How often to attempt to restart
- * a failed archiver; in seconds. */
+#define PGARCH_AUTOWAKE_INTERVAL 60 /* How often to force a poll of the
+ * archive status directory; in
+ * seconds. */
+#define PGARCH_RESTART_INTERVAL 10 /* How often to attempt to restart a
+ * failed archiver; in seconds. */
/* ----------
* Archiver control info.
@@ -131,10 +131,9 @@ pgarch_start(void)
/*
* Do nothing if too soon since last archiver start. This is a safety
- * valve to protect against continuous respawn attempts if the
- * archiver is dying immediately at launch. Note that since we will be
- * re-called from the postmaster main loop, we will get another chance
- * later.
+ * valve to protect against continuous respawn attempts if the archiver is
+ * dying immediately at launch. Note that since we will be re-called from
+ * the postmaster main loop, we will get another chance later.
*/
curtime = time(NULL);
if ((unsigned int) (curtime - last_pgarch_start_time) <
@@ -292,9 +291,9 @@ pgarch_MainLoop(void)
/*
* We run the copy loop immediately upon entry, in case there are
- * unarchived files left over from a previous database run (or maybe
- * the archiver died unexpectedly). After that we wait for a signal
- * or timeout before doing more.
+ * unarchived files left over from a previous database run (or maybe the
+ * archiver died unexpectedly). After that we wait for a signal or
+ * timeout before doing more.
*/
wakened = true;
@@ -319,11 +318,11 @@ pgarch_MainLoop(void)
}
/*
- * There shouldn't be anything for the archiver to do except to
- * wait for a signal, ... however, the archiver exists to
- * protect our data, so she wakes up occasionally to allow
- * herself to be proactive. In particular this avoids getting
- * stuck if a signal arrives just before we sleep.
+ * There shouldn't be anything for the archiver to do except to wait
+ * for a signal, ... however, the archiver exists to protect our data,
+ * so she wakes up occasionally to allow herself to be proactive. In
+ * particular this avoids getting stuck if a signal arrives just
+ * before we sleep.
*/
if (!wakened)
{
@@ -349,9 +348,9 @@ pgarch_ArchiverCopyLoop(void)
/*
* loop through all xlogs with archive_status of .ready and archive
- * them...mostly we expect this to be a single file, though it is
- * possible some backend will add files onto the list of those that
- * need archiving while we are still copying earlier archives
+ * them...mostly we expect this to be a single file, though it is possible
+ * some backend will add files onto the list of those that need archiving
+ * while we are still copying earlier archives
*/
while (pgarch_readyXlog(xlog))
{
@@ -488,10 +487,10 @@ static bool
pgarch_readyXlog(char *xlog)
{
/*
- * open xlog status directory and read through list of xlogs that have
- * the .ready suffix, looking for earliest file. It is possible to
- * optimise this code, though only a single file is expected on the
- * vast majority of calls, so....
+ * open xlog status directory and read through list of xlogs that have the
+ * .ready suffix, looking for earliest file. It is possible to optimise
+ * this code, though only a single file is expected on the vast majority
+ * of calls, so....
*/
char XLogArchiveStatusDir[MAXPGPATH];
char newxlog[MAX_XFN_CHARS + 6 + 1];
@@ -504,8 +503,8 @@ pgarch_readyXlog(char *xlog)
if (rldir == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open archive status directory \"%s\": %m",
- XLogArchiveStatusDir)));
+ errmsg("could not open archive status directory \"%s\": %m",
+ XLogArchiveStatusDir)));
while ((rlde = ReadDir(rldir, XLogArchiveStatusDir)) != NULL)
{
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index d03f8124763..8d767a0b4c8 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -13,7 +13,7 @@
*
* Copyright (c) 2001-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.109 2005/10/06 02:29:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.110 2005/10/15 02:49:23 momjian Exp $
* ----------
*/
#include "postgres.h"
@@ -66,19 +66,18 @@
* Timer definitions.
* ----------
*/
-#define PGSTAT_STAT_INTERVAL 500 /* How often to write the status
- * file; in milliseconds. */
+#define PGSTAT_STAT_INTERVAL 500 /* How often to write the status file;
+ * in milliseconds. */
-#define PGSTAT_DESTROY_DELAY 10000 /* How long to keep destroyed
- * objects known, to give delayed
- * UDP packets time to arrive; in
- * milliseconds. */
+#define PGSTAT_DESTROY_DELAY 10000 /* How long to keep destroyed objects
+ * known, to give delayed UDP packets
+ * time to arrive; in milliseconds. */
#define PGSTAT_DESTROY_COUNT (PGSTAT_DESTROY_DELAY / PGSTAT_STAT_INTERVAL)
-#define PGSTAT_RESTART_INTERVAL 60 /* How often to attempt to restart
- * a failed statistics collector;
- * in seconds. */
+#define PGSTAT_RESTART_INTERVAL 60 /* How often to attempt to restart a
+ * failed statistics collector; in
+ * seconds. */
/* ----------
* Amount of space reserved in pgstat_recvbuffer().
@@ -110,7 +109,7 @@ bool pgstat_collect_blocklevel = false;
* ----------
*/
NON_EXEC_STATIC int pgStatSock = -1;
-NON_EXEC_STATIC int pgStatPipe[2] = {-1,-1};
+NON_EXEC_STATIC int pgStatPipe[2] = {-1, -1};
static struct sockaddr_storage pgStatAddr;
static pid_t pgStatCollectorPid = 0;
@@ -127,15 +126,15 @@ static bool pgStatRunningInCollector = FALSE;
*/
typedef struct TabStatArray
{
- int tsa_alloc; /* num allocated */
- int tsa_used; /* num actually used */
+ int tsa_alloc; /* num allocated */
+ int tsa_used; /* num actually used */
PgStat_MsgTabstat **tsa_messages; /* the array itself */
} TabStatArray;
#define TABSTAT_QUANTUM 4 /* we alloc this many at a time */
-static TabStatArray RegularTabStat = { 0, 0, NULL };
-static TabStatArray SharedTabStat = { 0, 0, NULL };
+static TabStatArray RegularTabStat = {0, 0, NULL};
+static TabStatArray SharedTabStat = {0, 0, NULL};
static int pgStatXactCommit = 0;
static int pgStatXactRollback = 0;
@@ -266,12 +265,12 @@ pgstat_init(void)
}
/*
- * On some platforms, getaddrinfo_all() may return multiple addresses
- * only one of which will actually work (eg, both IPv6 and IPv4
- * addresses when kernel will reject IPv6). Worse, the failure may
- * occur at the bind() or perhaps even connect() stage. So we must
- * loop through the results till we find a working combination. We
- * will generate LOG messages, but no error, for bogus combinations.
+ * On some platforms, getaddrinfo_all() may return multiple addresses only
+ * one of which will actually work (eg, both IPv6 and IPv4 addresses when
+ * kernel will reject IPv6). Worse, the failure may occur at the bind()
+ * or perhaps even connect() stage. So we must loop through the results
+ * till we find a working combination. We will generate LOG messages, but
+ * no error, for bogus combinations.
*/
for (addr = addrs; addr; addr = addr->ai_next)
{
@@ -288,19 +287,19 @@ pgstat_init(void)
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not create socket for statistics collector: %m")));
+ errmsg("could not create socket for statistics collector: %m")));
continue;
}
/*
- * Bind it to a kernel assigned port on localhost and get the
- * assigned port via getsockname().
+ * Bind it to a kernel assigned port on localhost and get the assigned
+ * port via getsockname().
*/
if (bind(pgStatSock, addr->ai_addr, addr->ai_addrlen) < 0)
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not bind socket for statistics collector: %m")));
+ errmsg("could not bind socket for statistics collector: %m")));
closesocket(pgStatSock);
pgStatSock = -1;
continue;
@@ -318,26 +317,26 @@ pgstat_init(void)
}
/*
- * Connect the socket to its own address. This saves a few cycles
- * by not having to respecify the target address on every send.
- * This also provides a kernel-level check that only packets from
- * this same address will be received.
+ * Connect the socket to its own address. This saves a few cycles by
+ * not having to respecify the target address on every send. This also
+ * provides a kernel-level check that only packets from this same
+ * address will be received.
*/
if (connect(pgStatSock, (struct sockaddr *) & pgStatAddr, alen) < 0)
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not connect socket for statistics collector: %m")));
+ errmsg("could not connect socket for statistics collector: %m")));
closesocket(pgStatSock);
pgStatSock = -1;
continue;
}
/*
- * Try to send and receive a one-byte test message on the socket.
- * This is to catch situations where the socket can be created but
- * will not actually pass data (for instance, because kernel
- * packet filtering rules prevent it).
+ * Try to send and receive a one-byte test message on the socket. This
+ * is to catch situations where the socket can be created but will not
+ * actually pass data (for instance, because kernel packet filtering
+ * rules prevent it).
*/
test_byte = TESTBYTEVAL;
if (send(pgStatSock, &test_byte, 1, 0) != 1)
@@ -351,9 +350,9 @@ pgstat_init(void)
}
/*
- * There could possibly be a little delay before the message can
- * be received. We arbitrarily allow up to half a second before
- * deciding it's broken.
+ * There could possibly be a little delay before the message can be
+ * received. We arbitrarily allow up to half a second before deciding
+ * it's broken.
*/
for (;;) /* need a loop to handle EINTR */
{
@@ -369,7 +368,7 @@ pgstat_init(void)
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("select() failed in statistics collector: %m")));
+ errmsg("select() failed in statistics collector: %m")));
closesocket(pgStatSock);
pgStatSock = -1;
continue;
@@ -377,8 +376,8 @@ pgstat_init(void)
if (sel_res == 0 || !FD_ISSET(pgStatSock, &rset))
{
/*
- * This is the case we actually think is likely, so take pains
- * to give a specific message for it.
+ * This is the case we actually think is likely, so take pains to
+ * give a specific message for it.
*
* errno will not be set meaningfully here, so don't use it.
*/
@@ -421,10 +420,10 @@ pgstat_init(void)
goto startup_failed;
/*
- * Set the socket to non-blocking IO. This ensures that if the
- * collector falls behind (despite the buffering process), statistics
- * messages will be discarded; backends won't block waiting to send
- * messages to the collector.
+ * Set the socket to non-blocking IO. This ensures that if the collector
+ * falls behind (despite the buffering process), statistics messages will
+ * be discarded; backends won't block waiting to send messages to the
+ * collector.
*/
if (!pg_set_noblock(pgStatSock))
{
@@ -440,7 +439,7 @@ pgstat_init(void)
startup_failed:
ereport(LOG,
- (errmsg("disabling statistics collector for lack of working socket")));
+ (errmsg("disabling statistics collector for lack of working socket")));
if (addrs)
freeaddrinfo_all(hints.ai_family, addrs);
@@ -459,7 +458,7 @@ startup_failed:
/*
* pgstat_reset_all() -
*
- * Remove the stats file. This is used on server start if the
+ * Remove the stats file. This is used on server start if the
* stats_reset_on_server_start feature is enabled, or if WAL
* recovery is needed after a crash.
*/
@@ -560,11 +559,10 @@ pgstat_start(void)
return 0;
/*
- * Do nothing if too soon since last collector start. This is a
- * safety valve to protect against continuous respawn attempts if the
- * collector is dying immediately at launch. Note that since we will
- * be re-called from the postmaster main loop, we will get another
- * chance later.
+ * Do nothing if too soon since last collector start. This is a safety
+ * valve to protect against continuous respawn attempts if the collector
+ * is dying immediately at launch. Note that since we will be re-called
+ * from the postmaster main loop, we will get another chance later.
*/
curtime = time(NULL);
if ((unsigned int) (curtime - last_pgstat_start_time) <
@@ -650,7 +648,7 @@ pgstat_beterm(int pid)
/* ----------
* pgstat_report_autovac() -
*
- * Called from autovacuum.c to report startup of an autovacuum process.
+ * Called from autovacuum.c to report startup of an autovacuum process.
* We are called before InitPostgres is done, so can't rely on MyDatabaseId;
* the db OID must be passed in, instead.
* ----------
@@ -693,8 +691,8 @@ pgstat_bestart(void)
/*
* We may not have a MyProcPort (eg, if this is the autovacuum process).
- * For the moment, punt and don't send BESTART --- would be better to
- * work out a clean way of handling "unknown clientaddr".
+ * For the moment, punt and don't send BESTART --- would be better to work
+ * out a clean way of handling "unknown clientaddr".
*/
if (MyProcPort)
{
@@ -738,7 +736,7 @@ pgstat_report_vacuum(Oid tableoid, bool shared,
/* --------
* pgstat_report_analyze() -
*
- * Tell the collector about the table we just analyzed.
+ * Tell the collector about the table we just analyzed.
* --------
*/
void
@@ -898,8 +896,8 @@ pgstat_vacuum_tabstat(void)
return 0;
/*
- * If not done for this transaction, read the statistics collector
- * stats file into some hash tables.
+ * If not done for this transaction, read the statistics collector stats
+ * file into some hash tables.
*/
backend_read_statsfile();
@@ -926,8 +924,8 @@ pgstat_vacuum_tabstat(void)
while ((tabentry = (PgStat_StatTabEntry *) hash_seq_search(&hstat)) != NULL)
{
/*
- * Check if this relation is still alive by looking up it's
- * pg_class tuple in the system catalog cache.
+ * Check if this relation is still alive by looking up it's pg_class
+ * tuple in the system catalog cache.
*/
reltup = SearchSysCache(RELOID,
ObjectIdGetDatum(tabentry->tableid),
@@ -1072,7 +1070,7 @@ pgstat_reset_counters(void)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to reset statistics counters")));
+ errmsg("must be superuser to reset statistics counters")));
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_RESETCOUNTER);
msg.m_databaseid = MyDatabaseId;
@@ -1150,7 +1148,7 @@ pgstat_initstats(PgStat_Info *stats, Relation rel)
{
Oid rel_id = rel->rd_id;
PgStat_TableEntry *useent;
- TabStatArray *tsarr;
+ TabStatArray *tsarr;
PgStat_MsgTabstat *tsmsg;
int mb;
int i;
@@ -1187,8 +1185,8 @@ pgstat_initstats(PgStat_Info *stats, Relation rel)
continue;
/*
- * Not found, but found a message buffer with an empty slot
- * instead. Fine, let's use this one.
+ * Not found, but found a message buffer with an empty slot instead.
+ * Fine, let's use this one.
*/
i = tsmsg->m_nentries++;
useent = &tsmsg->m_entry[i];
@@ -1234,9 +1232,9 @@ pgstat_count_xact_commit(void)
pgStatXactCommit++;
/*
- * If there was no relation activity yet, just make one existing
- * message buffer used without slots, causing the next report to tell
- * new xact-counters.
+ * If there was no relation activity yet, just make one existing message
+ * buffer used without slots, causing the next report to tell new
+ * xact-counters.
*/
if (RegularTabStat.tsa_alloc == 0)
more_tabstat_space(&RegularTabStat);
@@ -1266,9 +1264,9 @@ pgstat_count_xact_rollback(void)
pgStatXactRollback++;
/*
- * If there was no relation activity yet, just make one existing
- * message buffer used without slots, causing the next report to tell
- * new xact-counters.
+ * If there was no relation activity yet, just make one existing message
+ * buffer used without slots, causing the next report to tell new
+ * xact-counters.
*/
if (RegularTabStat.tsa_alloc == 0)
more_tabstat_space(&RegularTabStat);
@@ -1294,8 +1292,8 @@ PgStat_StatDBEntry *
pgstat_fetch_stat_dbentry(Oid dbid)
{
/*
- * If not done for this transaction, read the statistics collector
- * stats file into some hash tables.
+ * If not done for this transaction, read the statistics collector stats
+ * file into some hash tables.
*/
backend_read_statsfile();
@@ -1325,8 +1323,8 @@ pgstat_fetch_stat_tabentry(Oid relid)
PgStat_StatTabEntry *tabentry;
/*
- * If not done for this transaction, read the statistics collector
- * stats file into some hash tables.
+ * If not done for this transaction, read the statistics collector stats
+ * file into some hash tables.
*/
backend_read_statsfile();
@@ -1492,21 +1490,20 @@ PgstatBufferMain(int argc, char *argv[])
#endif
/*
- * Start a buffering process to read from the socket, so we have a
- * little more time to process incoming messages.
+ * Start a buffering process to read from the socket, so we have a little
+ * more time to process incoming messages.
*
- * NOTE: the process structure is: postmaster is parent of buffer process
- * is parent of collector process. This way, the buffer can detect
- * collector failure via SIGCHLD, whereas otherwise it wouldn't notice
- * collector failure until it tried to write on the pipe. That would
- * mean that after the postmaster started a new collector, we'd have
- * two buffer processes competing to read from the UDP socket --- not
- * good.
+ * NOTE: the process structure is: postmaster is parent of buffer process is
+ * parent of collector process. This way, the buffer can detect collector
+ * failure via SIGCHLD, whereas otherwise it wouldn't notice collector
+ * failure until it tried to write on the pipe. That would mean that
+ * after the postmaster started a new collector, we'd have two buffer
+ * processes competing to read from the UDP socket --- not good.
*/
if (pgpipe(pgStatPipe) < 0)
ereport(ERROR,
(errcode_for_socket_access(),
- errmsg("could not create pipe for statistics buffer: %m")));
+ errmsg("could not create pipe for statistics buffer: %m")));
/* child becomes collector process */
#ifdef EXEC_BACKEND
@@ -1561,10 +1558,10 @@ PgstatCollectorMain(int argc, char *argv[])
MyProcPid = getpid(); /* reset MyProcPid */
/*
- * Reset signal handling. With the exception of restoring default
- * SIGCHLD and SIGQUIT handling, this is a no-op in the
- * non-EXEC_BACKEND case because we'll have inherited these settings
- * from the buffer process; but it's not a no-op for EXEC_BACKEND.
+ * Reset signal handling. With the exception of restoring default SIGCHLD
+ * and SIGQUIT handling, this is a no-op in the non-EXEC_BACKEND case
+ * because we'll have inherited these settings from the buffer process;
+ * but it's not a no-op for EXEC_BACKEND.
*/
pqsignal(SIGHUP, SIG_IGN);
pqsignal(SIGINT, SIG_IGN);
@@ -1607,8 +1604,8 @@ PgstatCollectorMain(int argc, char *argv[])
need_statwrite = TRUE;
/*
- * Read in an existing statistics stats file or initialize the stats
- * to zero.
+ * Read in an existing statistics stats file or initialize the stats to
+ * zero.
*/
pgStatRunningInCollector = TRUE;
pgstat_read_statsfile(&pgStatDBHash, InvalidOid, NULL, NULL);
@@ -1638,9 +1635,9 @@ PgstatCollectorMain(int argc, char *argv[])
for (;;)
{
/*
- * If we need to write the status file again (there have been
- * changes in the statistics since we wrote it last) calculate the
- * timeout until we have to do so.
+ * If we need to write the status file again (there have been changes
+ * in the statistics since we wrote it last) calculate the timeout
+ * until we have to do so.
*/
if (need_statwrite)
{
@@ -1684,7 +1681,7 @@ PgstatCollectorMain(int argc, char *argv[])
continue;
ereport(ERROR,
(errcode_for_socket_access(),
- errmsg("select() failed in statistics collector: %m")));
+ errmsg("select() failed in statistics collector: %m")));
}
/*
@@ -1706,10 +1703,10 @@ PgstatCollectorMain(int argc, char *argv[])
{
/*
* We may need to issue multiple read calls in case the buffer
- * process didn't write the message in a single write, which
- * is possible since it dumps its buffer bytewise. In any
- * case, we'd need two reads since we don't know the message
- * length initially.
+ * process didn't write the message in a single write, which is
+ * possible since it dumps its buffer bytewise. In any case, we'd
+ * need two reads since we don't know the message length
+ * initially.
*/
int nread = 0;
int targetlen = sizeof(PgStat_MsgHdr); /* initial */
@@ -1742,25 +1739,24 @@ PgstatCollectorMain(int argc, char *argv[])
{
/*
* Bogus message length implies that we got out of
- * sync with the buffer process somehow. Abort so
- * that we can restart both processes.
+ * sync with the buffer process somehow. Abort so that
+ * we can restart both processes.
*/
ereport(ERROR,
- (errmsg("invalid statistics message length")));
+ (errmsg("invalid statistics message length")));
}
}
}
/*
- * EOF on the pipe implies that the buffer process exited.
- * Fall out of outer loop.
+ * EOF on the pipe implies that the buffer process exited. Fall
+ * out of outer loop.
*/
if (pipeEOF)
break;
/*
- * Distribute the message to the specific function handling
- * it.
+ * Distribute the message to the specific function handling it.
*/
switch (msg.msg_hdr.m_type)
{
@@ -1818,8 +1814,8 @@ PgstatCollectorMain(int argc, char *argv[])
pgStatNumMessages++;
/*
- * If this is the first message after we wrote the stats file
- * the last time, setup the timeout that it'd be written.
+ * If this is the first message after we wrote the stats file the
+ * last time, setup the timeout that it'd be written.
*/
if (!need_statwrite)
{
@@ -1832,20 +1828,20 @@ PgstatCollectorMain(int argc, char *argv[])
}
/*
- * Note that we do NOT check for postmaster exit inside the loop;
- * only EOF on the buffer pipe causes us to fall out. This
- * ensures we don't exit prematurely if there are still a few
- * messages in the buffer or pipe at postmaster shutdown.
+ * Note that we do NOT check for postmaster exit inside the loop; only
+ * EOF on the buffer pipe causes us to fall out. This ensures we
+ * don't exit prematurely if there are still a few messages in the
+ * buffer or pipe at postmaster shutdown.
*/
}
/*
- * Okay, we saw EOF on the buffer pipe, so there are no more messages
- * to process. If the buffer process quit because of postmaster
- * shutdown, we want to save the final stats to reuse at next startup.
- * But if the buffer process failed, it seems best not to (there may
- * even now be a new collector firing up, and we don't want it to read
- * a partially-rewritten stats file).
+ * Okay, we saw EOF on the buffer pipe, so there are no more messages to
+ * process. If the buffer process quit because of postmaster shutdown, we
+ * want to save the final stats to reuse at next startup. But if the
+ * buffer process failed, it seems best not to (there may even now be a
+ * new collector firing up, and we don't want it to read a
+ * partially-rewritten stats file).
*/
if (!PostmasterIsAlive(false))
pgstat_write_statsfile();
@@ -1887,18 +1883,18 @@ pgstat_recvbuffer(void)
set_ps_display("");
/*
- * We want to die if our child collector process does. There are two
- * ways we might notice that it has died: receive SIGCHLD, or get a
- * write failure on the pipe leading to the child. We can set SIGPIPE
- * to kill us here. Our SIGCHLD handler was already set up before we
- * forked (must do it that way, else it's a race condition).
+ * We want to die if our child collector process does. There are two ways
+ * we might notice that it has died: receive SIGCHLD, or get a write
+ * failure on the pipe leading to the child. We can set SIGPIPE to kill
+ * us here. Our SIGCHLD handler was already set up before we forked (must
+ * do it that way, else it's a race condition).
*/
pqsignal(SIGPIPE, SIG_DFL);
PG_SETMASK(&UnBlockSig);
/*
- * Set the write pipe to nonblock mode, so that we cannot block when
- * the collector falls behind.
+ * Set the write pipe to nonblock mode, so that we cannot block when the
+ * collector falls behind.
*/
if (!pg_set_noblock(writePipe))
ereport(ERROR,
@@ -1951,9 +1947,9 @@ pgstat_recvbuffer(void)
}
/*
- * Wait for some work to do; but not for more than 10 seconds.
- * (This determines how quickly we will shut down after an
- * ungraceful postmaster termination; so it needn't be very fast.)
+ * Wait for some work to do; but not for more than 10 seconds. (This
+ * determines how quickly we will shut down after an ungraceful
+ * postmaster termination; so it needn't be very fast.)
*/
timeout.tv_sec = 10;
timeout.tv_usec = 0;
@@ -1979,7 +1975,7 @@ pgstat_recvbuffer(void)
if (len < 0)
ereport(ERROR,
(errcode_for_socket_access(),
- errmsg("could not read statistics message: %m")));
+ errmsg("could not read statistics message: %m")));
/*
* We ignore messages that are smaller than our common header
@@ -2020,14 +2016,14 @@ pgstat_recvbuffer(void)
* If the collector is ready to receive, write some data into his
* pipe. We may or may not be able to write all that we have.
*
- * NOTE: if what we have is less than PIPE_BUF bytes but more than
- * the space available in the pipe buffer, most kernels will
- * refuse to write any of it, and will return EAGAIN. This means
- * we will busy-loop until the situation changes (either because
- * the collector caught up, or because more data arrives so that
- * we have more than PIPE_BUF bytes buffered). This is not good,
- * but is there any way around it? We have no way to tell when
- * the collector has caught up...
+ * NOTE: if what we have is less than PIPE_BUF bytes but more than the
+ * space available in the pipe buffer, most kernels will refuse to
+ * write any of it, and will return EAGAIN. This means we will
+ * busy-loop until the situation changes (either because the collector
+ * caught up, or because more data arrives so that we have more than
+ * PIPE_BUF bytes buffered). This is not good, but is there any way
+ * around it? We have no way to tell when the collector has caught
+ * up...
*/
if (FD_ISSET(writePipe, &wfds))
{
@@ -2042,7 +2038,7 @@ pgstat_recvbuffer(void)
continue; /* not enough space in pipe */
ereport(ERROR,
(errcode_for_socket_access(),
- errmsg("could not write to statistics collector pipe: %m")));
+ errmsg("could not write to statistics collector pipe: %m")));
}
/* NB: len < xfr is okay */
msg_send += len;
@@ -2052,15 +2048,15 @@ pgstat_recvbuffer(void)
}
/*
- * Make sure we forwarded all messages before we check for
- * postmaster termination.
+ * Make sure we forwarded all messages before we check for postmaster
+ * termination.
*/
if (msg_have != 0 || FD_ISSET(pgStatSock, &rfds))
continue;
/*
- * If the postmaster has terminated, we die too. (This is no
- * longer the normal exit path, however.)
+ * If the postmaster has terminated, we die too. (This is no longer
+ * the normal exit path, however.)
*/
if (!PostmasterIsAlive(true))
exit(0);
@@ -2072,9 +2068,9 @@ static void
pgstat_exit(SIGNAL_ARGS)
{
/*
- * For now, we just nail the doors shut and get out of town. It might
- * be cleaner to allow any pending messages to be sent, but that
- * creates a tradeoff against speed of exit.
+ * For now, we just nail the doors shut and get out of town. It might be
+ * cleaner to allow any pending messages to be sent, but that creates a
+ * tradeoff against speed of exit.
*/
/*
@@ -2115,7 +2111,7 @@ pgstat_add_backend(PgStat_MsgHdr *msg)
if (msg->m_backendid < 1 || msg->m_backendid > MaxBackends)
{
ereport(LOG,
- (errmsg("invalid server process ID %d", msg->m_backendid)));
+ (errmsg("invalid server process ID %d", msg->m_backendid)));
return -1;
}
@@ -2125,20 +2121,20 @@ pgstat_add_backend(PgStat_MsgHdr *msg)
beentry = &pgStatBeTable[msg->m_backendid - 1];
/*
- * If the slot contains the PID of this backend, everything is
- * fine and we have nothing to do. Note that all the slots are
- * zero'd out when the collector is started. We assume that a slot
- * is "empty" iff procpid == 0.
+ * If the slot contains the PID of this backend, everything is fine and we
+ * have nothing to do. Note that all the slots are zero'd out when the
+ * collector is started. We assume that a slot is "empty" iff procpid ==
+ * 0.
*/
if (beentry->procpid > 0 && beentry->procpid == msg->m_procpid)
return 0;
/*
- * Lookup if this backend is known to be dead. This can be caused due
- * to messages arriving in the wrong order - e.g. postmaster's BETERM
- * message might have arrived before we received all the backends
- * stats messages, or even a new backend with the same backendid was
- * faster in sending his BESTART.
+ * Lookup if this backend is known to be dead. This can be caused due to
+ * messages arriving in the wrong order - e.g. postmaster's BETERM message
+ * might have arrived before we received all the backends stats messages,
+ * or even a new backend with the same backendid was faster in sending his
+ * BESTART.
*
* If the backend is known to be dead, we ignore this add.
*/
@@ -2149,8 +2145,8 @@ pgstat_add_backend(PgStat_MsgHdr *msg)
return 1;
/*
- * Backend isn't known to be dead. If it's slot is currently used, we
- * have to kick out the old backend.
+ * Backend isn't known to be dead. If it's slot is currently used, we have
+ * to kick out the old backend.
*/
if (beentry->procpid > 0)
pgstat_sub_backend(beentry->procpid);
@@ -2165,12 +2161,11 @@ pgstat_add_backend(PgStat_MsgHdr *msg)
beentry->activity[0] = '\0';
/*
- * We can't initialize the rest of the data in this slot until we
- * see the BESTART message. Therefore, we set the database and
- * user to sentinel values, to indicate "undefined". There is no
- * easy way to do this for the client address, so make sure to
- * check that the database or user are defined before accessing
- * the client address.
+ * We can't initialize the rest of the data in this slot until we see the
+ * BESTART message. Therefore, we set the database and user to sentinel
+ * values, to indicate "undefined". There is no easy way to do this for
+ * the client address, so make sure to check that the database or user are
+ * defined before accessing the client address.
*/
beentry->userid = InvalidOid;
beentry->databaseid = InvalidOid;
@@ -2187,8 +2182,8 @@ static PgStat_StatDBEntry *
pgstat_get_db_entry(Oid databaseid, bool create)
{
PgStat_StatDBEntry *result;
- bool found;
- HASHACTION action = (create ? HASH_ENTER : HASH_FIND);
+ bool found;
+ HASHACTION action = (create ? HASH_ENTER : HASH_FIND);
/* Lookup or create the hash table entry for this database */
result = (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
@@ -2216,9 +2211,9 @@ pgstat_get_db_entry(Oid databaseid, bool create)
hash_ctl.entrysize = sizeof(PgStat_StatTabEntry);
hash_ctl.hash = oid_hash;
result->tables = hash_create("Per-database table",
- PGSTAT_TAB_HASH_SIZE,
- &hash_ctl,
- HASH_ELEM | HASH_FUNCTION);
+ PGSTAT_TAB_HASH_SIZE,
+ &hash_ctl,
+ HASH_ELEM | HASH_FUNCTION);
}
return result;
@@ -2238,22 +2233,21 @@ pgstat_sub_backend(int procpid)
bool found;
/*
- * Search in the known-backends table for the slot containing this
- * PID.
+ * Search in the known-backends table for the slot containing this PID.
*/
for (i = 0; i < MaxBackends; i++)
{
if (pgStatBeTable[i].procpid == procpid)
{
/*
- * That's him. Add an entry to the known to be dead backends.
- * Due to possible misorder in the arrival of UDP packets it's
- * possible that even if we know the backend is dead, there
- * could still be messages queued that arrive later. Those
- * messages must not cause our number of backends statistics
- * to get screwed up, so we remember for a couple of seconds
- * that this PID is dead and ignore them (only the counting of
- * backends, not the table access stats they sent).
+ * That's him. Add an entry to the known to be dead backends. Due
+ * to possible misorder in the arrival of UDP packets it's
+ * possible that even if we know the backend is dead, there could
+ * still be messages queued that arrive later. Those messages must
+ * not cause our number of backends statistics to get screwed up,
+ * so we remember for a couple of seconds that this PID is dead
+ * and ignore them (only the counting of backends, not the table
+ * access stats they sent).
*/
deadbe = (PgStat_StatBeDead *) hash_search(pgStatBeDead,
(void *) &procpid,
@@ -2275,8 +2269,8 @@ pgstat_sub_backend(int procpid)
}
/*
- * No big problem if not found. This can happen if UDP messages arrive
- * out of order here.
+ * No big problem if not found. This can happen if UDP messages arrive out
+ * of order here.
*/
}
@@ -2307,8 +2301,8 @@ pgstat_write_statsfile(void)
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not open temporary statistics file \"%s\": %m",
- PGSTAT_STAT_TMPFILE)));
+ errmsg("could not open temporary statistics file \"%s\": %m",
+ PGSTAT_STAT_TMPFILE)));
return;
}
@@ -2325,8 +2319,8 @@ pgstat_write_statsfile(void)
while ((dbentry = (PgStat_StatDBEntry *) hash_seq_search(&hstat)) != NULL)
{
/*
- * If this database is marked destroyed, count down and do so if
- * it reaches 0.
+ * If this database is marked destroyed, count down and do so if it
+ * reaches 0.
*/
if (dbentry->destroy > 0)
{
@@ -2362,8 +2356,8 @@ pgstat_write_statsfile(void)
while ((tabentry = (PgStat_StatTabEntry *) hash_seq_search(&tstat)) != NULL)
{
/*
- * If table entry marked for destruction, same as above for
- * the database entry.
+ * If table entry marked for destruction, same as above for the
+ * database entry.
*/
if (tabentry->destroy > 0)
{
@@ -2384,8 +2378,8 @@ pgstat_write_statsfile(void)
}
/*
- * At least we think this is still a live table. Print its
- * access stats.
+ * At least we think this is still a live table. Print its access
+ * stats.
*/
fputc('T', fpout);
fwrite(tabentry, sizeof(PgStat_StatTabEntry), 1, fpout);
@@ -2422,8 +2416,8 @@ pgstat_write_statsfile(void)
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not close temporary statistics file \"%s\": %m",
- PGSTAT_STAT_TMPFILE)));
+ errmsg("could not close temporary statistics file \"%s\": %m",
+ PGSTAT_STAT_TMPFILE)));
}
else
{
@@ -2443,8 +2437,7 @@ pgstat_write_statsfile(void)
while ((deadbe = (PgStat_StatBeDead *) hash_seq_search(&hstat)) != NULL)
{
/*
- * Count down the destroy delay and remove entries where it
- * reaches 0.
+ * Count down the destroy delay and remove entries where it reaches 0.
*/
if (--(deadbe->destroy) <= 0)
{
@@ -2453,8 +2446,8 @@ pgstat_write_statsfile(void)
HASH_REMOVE, NULL) == NULL)
{
ereport(ERROR,
- (errmsg("dead-server-process hash table corrupted "
- "during cleanup --- abort")));
+ (errmsg("dead-server-process hash table corrupted "
+ "during cleanup --- abort")));
}
}
}
@@ -2491,7 +2484,7 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb,
/*
* If running in the collector or the autovacuum process, we use the
- * DynaHashCxt memory context. If running in a backend, we use the
+ * DynaHashCxt memory context. If running in a backend, we use the
* TopTransactionContext instead, so the caller must only know the last
* XactId when this call happened to know if his tables are still valid or
* already gone!
@@ -2525,8 +2518,8 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb,
HASH_ELEM | HASH_FUNCTION | mcxt_flags);
/*
- * Initialize the number of known backends to zero, just in case we do
- * a silent error return below.
+ * Initialize the number of known backends to zero, just in case we do a
+ * silent error return below.
*/
if (numbackends != NULL)
*numbackends = 0;
@@ -2534,9 +2527,9 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb,
*betab = NULL;
/*
- * Try to open the status file. If it doesn't exist, the backends
- * simply return zero for anything and the collector simply starts
- * from scratch with empty counters.
+ * Try to open the status file. If it doesn't exist, the backends simply
+ * return zero for anything and the collector simply starts from scratch
+ * with empty counters.
*/
if ((fpin = AllocateFile(PGSTAT_STAT_FILENAME, PG_BINARY_R)) == NULL)
return;
@@ -2562,8 +2555,8 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb,
{
/*
* 'D' A PgStat_StatDBEntry struct describing a database
- * follows. Subsequently, zero to many 'T' entries will
- * follow until a 'd' is encountered.
+ * follows. Subsequently, zero to many 'T' entries will follow
+ * until a 'd' is encountered.
*/
case 'D':
if (fread(&dbbuf, 1, sizeof(dbbuf), fpin) != sizeof(dbbuf))
@@ -2577,7 +2570,7 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb,
* Add to the DB hash
*/
dbentry = (PgStat_StatDBEntry *) hash_search(*dbhash,
- (void *) &dbbuf.databaseid,
+ (void *) &dbbuf.databaseid,
HASH_ENTER,
&found);
if (found)
@@ -2600,7 +2593,7 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb,
{
if (dbbuf.databaseid != onlydb &&
dbbuf.databaseid != InvalidOid)
- break;
+ break;
}
memset(&hash_ctl, 0, sizeof(hash_ctl));
@@ -2611,11 +2604,11 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb,
dbentry->tables = hash_create("Per-database table",
PGSTAT_TAB_HASH_SIZE,
&hash_ctl,
- HASH_ELEM | HASH_FUNCTION | mcxt_flags);
+ HASH_ELEM | HASH_FUNCTION | mcxt_flags);
/*
- * Arrange that following 'T's add entries to this
- * databases tables hash table.
+ * Arrange that following 'T's add entries to this databases
+ * tables hash table.
*/
tabhash = dbentry->tables;
break;
@@ -2645,8 +2638,8 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb,
break;
tabentry = (PgStat_StatTabEntry *) hash_search(tabhash,
- (void *) &tabbuf.tableid,
- HASH_ENTER, &found);
+ (void *) &tabbuf.tableid,
+ HASH_ENTER, &found);
if (found)
{
@@ -2684,7 +2677,7 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb,
else
*betab = (PgStat_StatBeEntry *)
MemoryContextAlloc(use_mcxt,
- sizeof(PgStat_StatBeEntry) * maxbackends);
+ sizeof(PgStat_StatBeEntry) * maxbackends);
break;
/*
@@ -2811,9 +2804,9 @@ pgstat_recv_bestart(PgStat_MsgBestart *msg, int len)
PgStat_StatBeEntry *entry;
/*
- * If the backend is known dead, we ignore the message -- we don't
- * want to update the backend entry's state since this BESTART
- * message refers to an old, dead backend
+ * If the backend is known dead, we ignore the message -- we don't want to
+ * update the backend entry's state since this BESTART message refers to
+ * an old, dead backend
*/
if (pgstat_add_backend(&msg->m_hdr) != 0)
return;
@@ -2840,7 +2833,7 @@ pgstat_recv_beterm(PgStat_MsgBeterm *msg, int len)
/* ----------
* pgstat_recv_autovac() -
*
- * Process an autovacuum signalling message.
+ * Process an autovacuum signalling message.
* ----------
*/
static void
@@ -2851,10 +2844,9 @@ pgstat_recv_autovac(PgStat_MsgAutovacStart *msg, int len)
/*
* Lookup the database in the hashtable. Don't create the entry if it
* doesn't exist, because autovacuum may be processing a template
- * database. If this isn't the case, the database is most likely to
- * have an entry already. (If it doesn't, not much harm is done
- * anyway -- it'll get created as soon as somebody actually uses
- * the database.)
+ * database. If this isn't the case, the database is most likely to have
+ * an entry already. (If it doesn't, not much harm is done anyway --
+ * it'll get created as soon as somebody actually uses the database.)
*/
dbentry = pgstat_get_db_entry(msg->m_databaseid, false);
if (dbentry == NULL)
@@ -2869,7 +2861,7 @@ pgstat_recv_autovac(PgStat_MsgAutovacStart *msg, int len)
/* ----------
* pgstat_recv_vacuum() -
*
- * Process a VACUUM message.
+ * Process a VACUUM message.
* ----------
*/
static void
@@ -2881,10 +2873,10 @@ pgstat_recv_vacuum(PgStat_MsgVacuum *msg, int len)
bool create;
/*
- * If we don't know about the database, ignore the message, because it
- * may be autovacuum processing a template database. But if the message
- * is for database InvalidOid, don't ignore it, because we are getting
- * a message from vacuuming a shared relation.
+ * If we don't know about the database, ignore the message, because it may
+ * be autovacuum processing a template database. But if the message is
+ * for database InvalidOid, don't ignore it, because we are getting a
+ * message from vacuuming a shared relation.
*/
create = (msg->m_databaseid == InvalidOid);
@@ -2933,7 +2925,7 @@ pgstat_recv_vacuum(PgStat_MsgVacuum *msg, int len)
/* ----------
* pgstat_recv_analyze() -
*
- * Process an ANALYZE message.
+ * Process an ANALYZE message.
* ----------
*/
static void
@@ -2944,9 +2936,9 @@ pgstat_recv_analyze(PgStat_MsgAnalyze *msg, int len)
bool found;
/*
- * Note that we do create the database entry here, as opposed to what
- * we do on AutovacStart and Vacuum messages. This is because
- * autovacuum never executes ANALYZE on template databases.
+ * Note that we do create the database entry here, as opposed to what we
+ * do on AutovacStart and Vacuum messages. This is because autovacuum
+ * never executes ANALYZE on template databases.
*/
dbentry = pgstat_get_db_entry(msg->m_databaseid, true);
@@ -2995,9 +2987,8 @@ pgstat_recv_activity(PgStat_MsgActivity *msg, int len)
PgStat_StatBeEntry *entry;
/*
- * Here we check explicitly for 0 return, since we don't want to
- * mangle the activity of an active backend by a delayed packet from a
- * dead one.
+ * Here we check explicitly for 0 return, since we don't want to mangle
+ * the activity of an active backend by a delayed packet from a dead one.
*/
if (pgstat_add_backend(&msg->m_hdr) != 0)
return;
@@ -3034,8 +3025,8 @@ pgstat_recv_tabstat(PgStat_MsgTabstat *msg, int len)
dbentry = pgstat_get_db_entry(msg->m_databaseid, true);
/*
- * If the database is marked for destroy, this is a delayed UDP packet
- * and not worth being counted.
+ * If the database is marked for destroy, this is a delayed UDP packet and
+ * not worth being counted.
*/
if (dbentry->destroy > 0)
return;
@@ -3049,14 +3040,14 @@ pgstat_recv_tabstat(PgStat_MsgTabstat *msg, int len)
for (i = 0; i < msg->m_nentries; i++)
{
tabentry = (PgStat_StatTabEntry *) hash_search(dbentry->tables,
- (void *) &(tabmsg[i].t_id),
- HASH_ENTER, &found);
+ (void *) &(tabmsg[i].t_id),
+ HASH_ENTER, &found);
if (!found)
{
/*
- * If it's a new table entry, initialize counters to the
- * values we just got.
+ * If it's a new table entry, initialize counters to the values we
+ * just got.
*/
tabentry->numscans = tabmsg[i].t_numscans;
tabentry->tuples_returned = tabmsg[i].t_tuples_returned;
@@ -3064,7 +3055,7 @@ pgstat_recv_tabstat(PgStat_MsgTabstat *msg, int len)
tabentry->tuples_inserted = tabmsg[i].t_tuples_inserted;
tabentry->tuples_updated = tabmsg[i].t_tuples_updated;
tabentry->tuples_deleted = tabmsg[i].t_tuples_deleted;
-
+
tabentry->n_live_tuples = tabmsg[i].t_tuples_inserted;
tabentry->n_dead_tuples = tabmsg[i].t_tuples_updated +
tabmsg[i].t_tuples_deleted;
@@ -3132,8 +3123,8 @@ pgstat_recv_tabpurge(PgStat_MsgTabpurge *msg, int len)
return;
/*
- * If the database is marked for destroy, this is a delayed UDP packet
- * and the tables will go away at DB destruction.
+ * If the database is marked for destroy, this is a delayed UDP packet and
+ * the tables will go away at DB destruction.
*/
if (dbentry->destroy > 0)
return;
@@ -3144,7 +3135,7 @@ pgstat_recv_tabpurge(PgStat_MsgTabpurge *msg, int len)
for (i = 0; i < msg->m_nentries; i++)
{
tabentry = (PgStat_StatTabEntry *) hash_search(dbentry->tables,
- (void *) &(msg->m_tableid[i]),
+ (void *) &(msg->m_tableid[i]),
HASH_FIND, NULL);
if (tabentry)
tabentry->destroy = PGSTAT_DESTROY_COUNT;
@@ -3209,8 +3200,8 @@ pgstat_recv_resetcounter(PgStat_MsgResetcounter *msg, int len)
return;
/*
- * We simply throw away all the database's table entries by
- * recreating a new hash table for them.
+ * We simply throw away all the database's table entries by recreating a
+ * new hash table for them.
*/
if (dbentry->tables != NULL)
hash_destroy(dbentry->tables);
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index aee26add558..fd7b27193c2 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -37,7 +37,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.468 2005/09/22 15:33:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.469 2005/10/15 02:49:23 momjian Exp $
*
* NOTES
*
@@ -301,9 +301,10 @@ static pid_t internal_forkexec(int argc, char *argv[], Port *port);
#ifdef WIN32
typedef struct
{
- SOCKET origsocket; /* Original socket value, or -1 if not a socket */
+ SOCKET origsocket; /* Original socket value, or -1 if not a
+ * socket */
WSAPROTOCOL_INFO wsainfo;
-} InheritableSocket;
+} InheritableSocket;
#else
typedef int InheritableSocket;
#endif
@@ -315,51 +316,51 @@ typedef struct LWLock LWLock; /* ugly kluge */
*/
typedef struct
{
- Port port;
+ Port port;
InheritableSocket portsocket;
- char DataDir[MAXPGPATH];
- int ListenSocket[MAXLISTEN];
- long MyCancelKey;
+ char DataDir[MAXPGPATH];
+ int ListenSocket[MAXLISTEN];
+ long MyCancelKey;
unsigned long UsedShmemSegID;
- void *UsedShmemSegAddr;
- slock_t *ShmemLock;
- slock_t *ShmemIndexLock;
+ void *UsedShmemSegAddr;
+ slock_t *ShmemLock;
+ slock_t *ShmemIndexLock;
VariableCache ShmemVariableCache;
- void *ShmemIndexAlloc;
- Backend *ShmemBackendArray;
- LWLock *LWLockArray;
- slock_t *ProcStructLock;
+ void *ShmemIndexAlloc;
+ Backend *ShmemBackendArray;
+ LWLock *LWLockArray;
+ slock_t *ProcStructLock;
InheritableSocket pgStatSock;
InheritableSocket pgStatPipe0;
InheritableSocket pgStatPipe1;
- pid_t PostmasterPid;
+ pid_t PostmasterPid;
TimestampTz PgStartTime;
#ifdef WIN32
- HANDLE PostmasterHandle;
- HANDLE initial_signal_pipe;
- HANDLE syslogPipe[2];
+ HANDLE PostmasterHandle;
+ HANDLE initial_signal_pipe;
+ HANDLE syslogPipe[2];
#else
- int syslogPipe[2];
+ int syslogPipe[2];
#endif
- char my_exec_path[MAXPGPATH];
- char pkglib_path[MAXPGPATH];
- char ExtraOptions[MAXPGPATH];
- char lc_collate[LOCALE_NAME_BUFLEN];
- char lc_ctype[LOCALE_NAME_BUFLEN];
-} BackendParameters;
+ char my_exec_path[MAXPGPATH];
+ char pkglib_path[MAXPGPATH];
+ char ExtraOptions[MAXPGPATH];
+ char lc_collate[LOCALE_NAME_BUFLEN];
+ char lc_ctype[LOCALE_NAME_BUFLEN];
+} BackendParameters;
static void read_backend_variables(char *id, Port *port);
-static void restore_backend_variables(BackendParameters *param, Port *port);
+static void restore_backend_variables(BackendParameters * param, Port *port);
+
#ifndef WIN32
-static bool save_backend_variables(BackendParameters *param, Port *port);
+static bool save_backend_variables(BackendParameters * param, Port *port);
#else
-static bool save_backend_variables(BackendParameters *param, Port *port,
- HANDLE childProcess, pid_t childPid);
+static bool save_backend_variables(BackendParameters * param, Port *port,
+ HANDLE childProcess, pid_t childPid);
#endif
static void ShmemBackendArrayAdd(Backend *bn);
static void ShmemBackendArrayRemove(pid_t pid);
-
#endif /* EXEC_BACKEND */
#define StartupDataBase() StartChildProcess(BS_XLOG_STARTUP)
@@ -378,7 +379,7 @@ PostmasterMain(int argc, char *argv[])
int i;
/* This will call exit() if strdup() fails. */
- progname = get_progname(argv[0]);
+ progname = get_progname(argv[0]);
MyProcPid = PostmasterPid = getpid();
@@ -408,8 +409,7 @@ PostmasterMain(int argc, char *argv[])
#endif
/*
- * for security, no dir or file created can be group or other
- * accessible
+ * for security, no dir or file created can be group or other accessible
*/
umask((mode_t) 0077);
@@ -419,10 +419,10 @@ PostmasterMain(int argc, char *argv[])
MemoryContextInit();
/*
- * By default, palloc() requests in the postmaster will be allocated
- * in the PostmasterContext, which is space that can be recycled by
- * backends. Allocated data that needs to be available to backends
- * should be allocated in TopMemoryContext.
+ * By default, palloc() requests in the postmaster will be allocated in
+ * the PostmasterContext, which is space that can be recycled by backends.
+ * Allocated data that needs to be available to backends should be
+ * allocated in TopMemoryContext.
*/
PostmasterContext = AllocSetContextCreate(TopMemoryContext,
"Postmaster",
@@ -496,8 +496,7 @@ PostmasterMain(int argc, char *argv[])
/*
* ignore this flag. This may be passed in because the
- * program was run as 'postgres -M' instead of
- * 'postmaster'
+ * program was run as 'postgres -M' instead of 'postmaster'
*/
break;
case 'N':
@@ -511,8 +510,7 @@ PostmasterMain(int argc, char *argv[])
case 'o':
/*
- * Other options to pass to the backend on the command
- * line
+ * Other options to pass to the backend on the command line
*/
snprintf(ExtraOptions + strlen(ExtraOptions),
sizeof(ExtraOptions) - strlen(ExtraOptions),
@@ -524,20 +522,18 @@ PostmasterMain(int argc, char *argv[])
case 'S':
/*
- * Start in 'S'ilent mode (disassociate from controlling
- * tty). You may also think of this as 'S'ysV mode since
- * it's most badly needed on SysV-derived systems like
- * SVR4 and HP-UX.
+ * Start in 'S'ilent mode (disassociate from controlling tty).
+ * You may also think of this as 'S'ysV mode since it's most
+ * badly needed on SysV-derived systems like SVR4 and HP-UX.
*/
SetConfigOption("silent_mode", "true", PGC_POSTMASTER, PGC_S_ARGV);
break;
case 's':
/*
- * In the event that some backend dumps core, send
- * SIGSTOP, rather than SIGQUIT, to all its peers. This
- * lets the wily post_hacker collect core dumps from
- * everyone.
+ * In the event that some backend dumps core, send SIGSTOP,
+ * rather than SIGQUIT, to all its peers. This lets the wily
+ * post_hacker collect core dumps from everyone.
*/
SendStop = true;
break;
@@ -593,13 +589,13 @@ PostmasterMain(int argc, char *argv[])
if (find_other_exec(argv[0], "postgres", PG_VERSIONSTR,
postgres_exec_path) < 0)
ereport(FATAL,
- (errmsg("%s: could not locate matching postgres executable",
- progname)));
+ (errmsg("%s: could not locate matching postgres executable",
+ progname)));
#endif
/*
- * Locate the proper configuration files and data directory, and
- * read postgresql.conf for the first time.
+ * Locate the proper configuration files and data directory, and read
+ * postgresql.conf for the first time.
*/
if (!SelectConfigFiles(userDoption, progname))
ExitPostmaster(2);
@@ -616,9 +612,8 @@ PostmasterMain(int argc, char *argv[])
if (NBuffers < 2 * MaxBackends || NBuffers < 16)
{
/*
- * Do not accept -B so small that backends are likely to starve
- * for lack of buffers. The specific choices here are somewhat
- * arbitrary.
+ * Do not accept -B so small that backends are likely to starve for
+ * lack of buffers. The specific choices here are somewhat arbitrary.
*/
write_stderr("%s: the number of buffers (-B) must be at least twice the number of allowed connections (-N) and at least 16\n", progname);
ExitPostmaster(1);
@@ -654,15 +649,15 @@ PostmasterMain(int argc, char *argv[])
char **p;
ereport(DEBUG3,
- (errmsg_internal("%s: PostmasterMain: initial environ dump:",
- progname)));
+ (errmsg_internal("%s: PostmasterMain: initial environ dump:",
+ progname)));
ereport(DEBUG3,
- (errmsg_internal("-----------------------------------------")));
+ (errmsg_internal("-----------------------------------------")));
for (p = environ; *p; ++p)
ereport(DEBUG3,
(errmsg_internal("\t%s", *p)));
ereport(DEBUG3,
- (errmsg_internal("-----------------------------------------")));
+ (errmsg_internal("-----------------------------------------")));
}
/*
@@ -683,8 +678,8 @@ PostmasterMain(int argc, char *argv[])
/*
* Fork away from controlling terminal, if -S specified.
*
- * Must do this before we grab any interlock files, else the interlocks
- * will show the wrong PID.
+ * Must do this before we grab any interlock files, else the interlocks will
+ * show the wrong PID.
*/
if (SilentMode)
pmdaemonize();
@@ -692,18 +687,17 @@ PostmasterMain(int argc, char *argv[])
/*
* Create lockfile for data directory.
*
- * We want to do this before we try to grab the input sockets, because
- * the data directory interlock is more reliable than the socket-file
- * interlock (thanks to whoever decided to put socket files in /tmp
- * :-(). For the same reason, it's best to grab the TCP socket(s)
- * before the Unix socket.
+ * We want to do this before we try to grab the input sockets, because the
+ * data directory interlock is more reliable than the socket-file
+ * interlock (thanks to whoever decided to put socket files in /tmp :-().
+ * For the same reason, it's best to grab the TCP socket(s) before the
+ * Unix socket.
*/
CreateDataDirLockFile(true);
/*
* Remove old temporary files. At this point there can be no other
- * Postgres processes running in this directory, so this should be
- * safe.
+ * Postgres processes running in this directory, so this should be safe.
*/
RemovePgTempFiles();
@@ -729,7 +723,7 @@ PostmasterMain(int argc, char *argv[])
/* syntax error in list */
ereport(FATAL,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid list syntax for \"listen_addresses\"")));
+ errmsg("invalid list syntax for \"listen_addresses\"")));
}
foreach(l, elemlist)
@@ -750,8 +744,8 @@ PostmasterMain(int argc, char *argv[])
success++;
else
ereport(WARNING,
- (errmsg("could not create listen socket for \"%s\"",
- curhost)));
+ (errmsg("could not create listen socket for \"%s\"",
+ curhost)));
}
if (!success && list_length(elemlist))
@@ -771,7 +765,7 @@ PostmasterMain(int argc, char *argv[])
"",
htonl(PostPortNumber),
"",
- (DNSServiceRegistrationReply) reg_reply,
+ (DNSServiceRegistrationReply) reg_reply,
NULL);
}
#endif
@@ -799,9 +793,8 @@ PostmasterMain(int argc, char *argv[])
reset_shared(PostPortNumber);
/*
- * Estimate number of openable files. This must happen after setting
- * up semaphores, because on some platforms semaphores count as open
- * files.
+ * Estimate number of openable files. This must happen after setting up
+ * semaphores, because on some platforms semaphores count as open files.
*/
set_max_safe_fds();
@@ -836,14 +829,13 @@ PostmasterMain(int argc, char *argv[])
TRUE,
DUPLICATE_SAME_ACCESS) == 0)
ereport(FATAL,
- (errmsg_internal("could not duplicate postmaster handle: error code %d",
- (int) GetLastError())));
+ (errmsg_internal("could not duplicate postmaster handle: error code %d",
+ (int) GetLastError())));
#endif
/*
- * Record postmaster options. We delay this till now to avoid
- * recording bogus options (eg, NBuffers too high for available
- * memory).
+ * Record postmaster options. We delay this till now to avoid recording
+ * bogus options (eg, NBuffers too high for available memory).
*/
if (!CreateOptsFile(argc, argv, my_exec_path))
ExitPostmaster(1);
@@ -904,8 +896,8 @@ PostmasterMain(int argc, char *argv[])
SysLoggerPID = SysLogger_Start();
/*
- * Reset whereToSendOutput from Debug (its starting state) to None.
- * This stops ereport from sending log messages to stderr unless
+ * Reset whereToSendOutput from Debug (its starting state) to None. This
+ * stops ereport from sending log messages to stderr unless
* Log_destination permits. We don't do this until the postmaster is
* fully launched, since startup failures may as well be reported to
* stderr.
@@ -941,8 +933,7 @@ PostmasterMain(int argc, char *argv[])
status = ServerLoop();
/*
- * ServerLoop probably shouldn't ever return, but if it does, close
- * down.
+ * ServerLoop probably shouldn't ever return, but if it does, close down.
*/
ExitPostmaster(status != STATUS_OK);
@@ -972,8 +963,8 @@ checkDataDir(void)
else
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not read permissions of directory \"%s\": %m",
- DataDir)));
+ errmsg("could not read permissions of directory \"%s\": %m",
+ DataDir)));
}
/*
@@ -997,13 +988,13 @@ checkDataDir(void)
/*
* Check if the directory has group or world access. If so, reject.
*
- * It would be possible to allow weaker constraints (for example, allow
- * group access) but we cannot make a general assumption that that is
- * okay; for example there are platforms where nearly all users customarily
- * belong to the same group. Perhaps this test should be configurable.
+ * It would be possible to allow weaker constraints (for example, allow group
+ * access) but we cannot make a general assumption that that is okay; for
+ * example there are platforms where nearly all users customarily belong
+ * to the same group. Perhaps this test should be configurable.
*
- * XXX temporarily suppress check when on Windows, because there may not
- * be proper support for Unix-y file permissions. Need to think of a
+ * XXX temporarily suppress check when on Windows, because there may not be
+ * proper support for Unix-y file permissions. Need to think of a
* reasonable check to apply on Windows.
*/
#if !defined(WIN32) && !defined(__CYGWIN__)
@@ -1129,9 +1120,9 @@ usage(const char *progname)
printf(_(" -s send SIGSTOP to all backend servers if one dies\n"));
printf(_("\nPlease read the documentation for the complete list of run-time\n"
- "configuration settings and how to set them on the command line or in\n"
- "the configuration file.\n\n"
- "Report bugs to <pgsql-bugs@postgresql.org>.\n"));
+ "configuration settings and how to set them on the command line or in\n"
+ "the configuration file.\n\n"
+ "Report bugs to <pgsql-bugs@postgresql.org>.\n"));
}
@@ -1165,9 +1156,9 @@ ServerLoop(void)
/*
* Wait for something to happen.
*
- * We wait at most one minute, or the minimum autovacuum delay, to
- * ensure that the other background tasks handled below get done
- * even when no requests are arriving.
+ * We wait at most one minute, or the minimum autovacuum delay, to ensure
+ * that the other background tasks handled below get done even when no
+ * requests are arriving.
*/
memcpy((char *) &rmask, (char *) &readmask, sizeof(fd_set));
@@ -1179,8 +1170,8 @@ ServerLoop(void)
selres = select(nSockets, &rmask, NULL, NULL, &timeout);
/*
- * Block all signals until we wait again. (This makes it safe for
- * our signal handlers to do nontrivial work.)
+ * Block all signals until we wait again. (This makes it safe for our
+ * signal handlers to do nontrivial work.)
*/
PG_SETMASK(&BlockSig);
@@ -1196,14 +1187,13 @@ ServerLoop(void)
}
/*
- * New connection pending on any of our sockets? If so, fork a
- * child process to deal with it.
+ * New connection pending on any of our sockets? If so, fork a child
+ * process to deal with it.
*/
if (selres > 0)
{
/*
- * Select a random seed at the time of first receiving a
- * request.
+ * Select a random seed at the time of first receiving a request.
*/
while (random_seed == 0)
{
@@ -1212,8 +1202,8 @@ ServerLoop(void)
/*
* We are not sure how much precision is in tv_usec, so we
* swap the high and low 16 bits of 'later' and XOR them with
- * 'earlier'. On the off chance that the result is 0, we
- * loop until it isn't.
+ * 'earlier'. On the off chance that the result is 0, we loop
+ * until it isn't.
*/
random_seed = earlier.tv_usec ^
((later.tv_usec << 16) |
@@ -1232,8 +1222,8 @@ ServerLoop(void)
BackendStartup(port);
/*
- * We no longer need the open socket or port
- * structure in this process
+ * We no longer need the open socket or port structure
+ * in this process
*/
StreamClose(port->sock);
ConnFree(port);
@@ -1261,8 +1251,8 @@ ServerLoop(void)
/*
* Start a new autovacuum process, if there isn't one running already.
- * (It'll die relatively quickly.) We check that it's not started
- * too frequently in autovac_start.
+ * (It'll die relatively quickly.) We check that it's not started too
+ * frequently in autovac_start.
*/
if (AutoVacuumingActive() && AutoVacPID == 0 &&
StartupPID == 0 && !FatalError && Shutdown == NoShutdown)
@@ -1279,10 +1269,9 @@ ServerLoop(void)
PgStatPID = pgstat_start();
/*
- * Touch the socket and lock file every 58 minutes, to
- * ensure that they are not removed by overzealous /tmp-cleaning
- * tasks. We assume no one runs cleaners with cutoff times of
- * less than an hour ...
+ * Touch the socket and lock file every 58 minutes, to ensure that
+ * they are not removed by overzealous /tmp-cleaning tasks. We assume
+ * no one runs cleaners with cutoff times of less than an hour ...
*/
now = time(NULL);
if (now - last_touch_time >= 58 * SECS_PER_MINUTE)
@@ -1345,8 +1334,8 @@ ProcessStartupPacket(Port *port, bool SSLdone)
{
/*
* EOF after SSLdone probably means the client didn't like our
- * response to NEGOTIATE_SSL_CODE. That's not an error condition,
- * so don't clutter the log with a complaint.
+ * response to NEGOTIATE_SSL_CODE. That's not an error condition, so
+ * don't clutter the log with a complaint.
*/
if (!SSLdone)
ereport(COMMERROR,
@@ -1369,9 +1358,9 @@ ProcessStartupPacket(Port *port, bool SSLdone)
/*
* Allocate at least the size of an old-style startup packet, plus one
- * extra byte, and make sure all are zeroes. This ensures we will
- * have null termination of all strings, in both fixed- and
- * variable-length packet layouts.
+ * extra byte, and make sure all are zeroes. This ensures we will have
+ * null termination of all strings, in both fixed- and variable-length
+ * packet layouts.
*/
if (len <= (int32) sizeof(StartupPacket))
buf = palloc0(sizeof(StartupPacket) + 1);
@@ -1415,7 +1404,7 @@ ProcessStartupPacket(Port *port, bool SSLdone)
{
ereport(COMMERROR,
(errcode_for_socket_access(),
- errmsg("failed to send SSL negotiation response: %m")));
+ errmsg("failed to send SSL negotiation response: %m")));
return STATUS_ERROR; /* close the connection */
}
@@ -1431,32 +1420,32 @@ ProcessStartupPacket(Port *port, bool SSLdone)
/* Could add additional special packet types here */
/*
- * Set FrontendProtocol now so that ereport() knows what format to
- * send if we fail during startup.
+ * Set FrontendProtocol now so that ereport() knows what format to send if
+ * we fail during startup.
*/
FrontendProtocol = proto;
/* Check we can handle the protocol the frontend is using. */
if (PG_PROTOCOL_MAJOR(proto) < PG_PROTOCOL_MAJOR(PG_PROTOCOL_EARLIEST) ||
- PG_PROTOCOL_MAJOR(proto) > PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST) ||
- (PG_PROTOCOL_MAJOR(proto) == PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST) &&
- PG_PROTOCOL_MINOR(proto) > PG_PROTOCOL_MINOR(PG_PROTOCOL_LATEST)))
+ PG_PROTOCOL_MAJOR(proto) > PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST) ||
+ (PG_PROTOCOL_MAJOR(proto) == PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST) &&
+ PG_PROTOCOL_MINOR(proto) > PG_PROTOCOL_MINOR(PG_PROTOCOL_LATEST)))
ereport(FATAL,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("unsupported frontend protocol %u.%u: server supports %u.0 to %u.%u",
- PG_PROTOCOL_MAJOR(proto), PG_PROTOCOL_MINOR(proto),
+ PG_PROTOCOL_MAJOR(proto), PG_PROTOCOL_MINOR(proto),
PG_PROTOCOL_MAJOR(PG_PROTOCOL_EARLIEST),
PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST),
PG_PROTOCOL_MINOR(PG_PROTOCOL_LATEST))));
/*
- * Now fetch parameters out of startup packet and save them into the
- * Port structure. All data structures attached to the Port struct
- * must be allocated in TopMemoryContext so that they won't disappear
- * when we pass them to PostgresMain (see BackendRun). We need not
- * worry about leaking this storage on failure, since we aren't in the
- * postmaster process anymore.
+ * Now fetch parameters out of startup packet and save them into the Port
+ * structure. All data structures attached to the Port struct must be
+ * allocated in TopMemoryContext so that they won't disappear when we pass
+ * them to PostgresMain (see BackendRun). We need not worry about leaking
+ * this storage on failure, since we aren't in the postmaster process
+ * anymore.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
@@ -1465,9 +1454,9 @@ ProcessStartupPacket(Port *port, bool SSLdone)
int32 offset = sizeof(ProtocolVersion);
/*
- * Scan packet body for name/option pairs. We can assume any
- * string beginning within the packet body is null-terminated,
- * thanks to zeroing extra byte above.
+ * Scan packet body for name/option pairs. We can assume any string
+ * beginning within the packet body is null-terminated, thanks to
+ * zeroing extra byte above.
*/
port->guc_options = NIL;
@@ -1513,11 +1502,10 @@ ProcessStartupPacket(Port *port, bool SSLdone)
else
{
/*
- * Get the parameters from the old-style, fixed-width-fields
- * startup packet as C strings. The packet destination was
- * cleared first so a short packet has zeros silently added. We
- * have to be prepared to truncate the pstrdup result for oversize
- * fields, though.
+ * Get the parameters from the old-style, fixed-width-fields startup
+ * packet as C strings. The packet destination was cleared first so a
+ * short packet has zeros silently added. We have to be prepared to
+ * truncate the pstrdup result for oversize fields, though.
*/
StartupPacket *packet = (StartupPacket *) buf;
@@ -1537,7 +1525,7 @@ ProcessStartupPacket(Port *port, bool SSLdone)
if (port->user_name == NULL || port->user_name[0] == '\0')
ereport(FATAL,
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("no PostgreSQL user name specified in startup packet")));
+ errmsg("no PostgreSQL user name specified in startup packet")));
/* The database defaults to the user name. */
if (port->database_name == NULL || port->database_name[0] == '\0')
@@ -1546,10 +1534,10 @@ ProcessStartupPacket(Port *port, bool SSLdone)
if (Db_user_namespace)
{
/*
- * If user@, it is a global user, remove '@'. We only want to do
- * this if there is an '@' at the end and no earlier in the user
- * string or they may fake as a local user of another database
- * attaching to this database.
+ * If user@, it is a global user, remove '@'. We only want to do this
+ * if there is an '@' at the end and no earlier in the user string or
+ * they may fake as a local user of another database attaching to this
+ * database.
*/
if (strchr(port->user_name, '@') ==
port->user_name + strlen(port->user_name) - 1)
@@ -1567,8 +1555,8 @@ ProcessStartupPacket(Port *port, bool SSLdone)
}
/*
- * Truncate given database and user names to length of a Postgres
- * name. This avoids lookup failures when overlength names are given.
+ * Truncate given database and user names to length of a Postgres name.
+ * This avoids lookup failures when overlength names are given.
*/
if (strlen(port->database_name) >= NAMEDATALEN)
port->database_name[NAMEDATALEN - 1] = '\0';
@@ -1581,9 +1569,9 @@ ProcessStartupPacket(Port *port, bool SSLdone)
MemoryContextSwitchTo(oldcontext);
/*
- * If we're going to reject the connection due to database state, say
- * so now instead of wasting cycles on an authentication exchange.
- * (This also allows a pg_ping utility to be written.)
+ * If we're going to reject the connection due to database state, say so
+ * now instead of wasting cycles on an authentication exchange. (This also
+ * allows a pg_ping utility to be written.)
*/
switch (port->canAcceptConnections)
{
@@ -1628,6 +1616,7 @@ processCancelRequest(Port *port, void *pkt)
int backendPID;
long cancelAuthCode;
Backend *bp;
+
#ifndef EXEC_BACKEND
Dlelem *curr;
#else
@@ -1638,9 +1627,9 @@ processCancelRequest(Port *port, void *pkt)
cancelAuthCode = (long) ntohl(canc->cancelAuthCode);
/*
- * See if we have a matching backend. In the EXEC_BACKEND case, we
- * can no longer access the postmaster's own backend list, and must
- * rely on the duplicate array in shared memory.
+ * See if we have a matching backend. In the EXEC_BACKEND case, we can no
+ * longer access the postmaster's own backend list, and must rely on the
+ * duplicate array in shared memory.
*/
#ifndef EXEC_BACKEND
for (curr = DLGetHead(BackendList); curr; curr = DLGetSucc(curr))
@@ -1664,8 +1653,8 @@ processCancelRequest(Port *port, void *pkt)
else
/* Right PID, wrong key: no way, Jose */
ereport(DEBUG2,
- (errmsg_internal("bad key in cancel request for process %d",
- backendPID)));
+ (errmsg_internal("bad key in cancel request for process %d",
+ backendPID)));
return;
}
}
@@ -1694,10 +1683,10 @@ canAcceptConnections(void)
* Don't start too many children.
*
* We allow more connections than we can have backends here because some
- * might still be authenticating; they might fail auth, or some
- * existing backend might exit before the auth cycle is completed. The
- * exact MaxBackends limit is enforced when a new backend tries to
- * join the shared-inval backend array.
+ * might still be authenticating; they might fail auth, or some existing
+ * backend might exit before the auth cycle is completed. The exact
+ * MaxBackends limit is enforced when a new backend tries to join the
+ * shared-inval backend array.
*/
if (CountChildren() >= 2 * MaxBackends)
return CAC_TOOMANY;
@@ -1731,12 +1720,11 @@ ConnCreate(int serverFd)
else
{
/*
- * Precompute password salt values to use for this connection.
- * It's slightly annoying to do this long in advance of knowing
- * whether we'll need 'em or not, but we must do the random()
- * calls before we fork, not after. Else the postmaster's random
- * sequence won't get advanced, and all backends would end up
- * using the same salt...
+ * Precompute password salt values to use for this connection. It's
+ * slightly annoying to do this long in advance of knowing whether
+ * we'll need 'em or not, but we must do the random() calls before we
+ * fork, not after. Else the postmaster's random sequence won't get
+ * advanced, and all backends would end up using the same salt...
*/
RandomSalt(port->cryptSalt, port->md5Salt);
}
@@ -1808,10 +1796,10 @@ reset_shared(int port)
/*
* Create or re-create shared memory and semaphores.
*
- * Note: in each "cycle of life" we will normally assign the same IPC
- * keys (if using SysV shmem and/or semas), since the port number is
- * used to determine IPC keys. This helps ensure that we will clean
- * up dead IPC objects if the postmaster crashes and is restarted.
+ * Note: in each "cycle of life" we will normally assign the same IPC keys
+ * (if using SysV shmem and/or semas), since the port number is used to
+ * determine IPC keys. This helps ensure that we will clean up dead IPC
+ * objects if the postmaster crashes and is restarted.
*/
CreateSharedMemoryAndSemaphores(false, port);
}
@@ -1830,7 +1818,7 @@ SIGHUP_handler(SIGNAL_ARGS)
if (Shutdown <= SmartShutdown)
{
ereport(LOG,
- (errmsg("received SIGHUP, reloading configuration files")));
+ (errmsg("received SIGHUP, reloading configuration files")));
ProcessConfigFile(PGC_SIGHUP);
SignalChildren(SIGHUP);
if (BgWriterPID != 0)
@@ -1925,8 +1913,8 @@ pmdie(SIGNAL_ARGS)
/*
* Fast Shutdown:
*
- * Abort all children with SIGTERM (rollback active transactions
- * and exit) and shut down when they are gone.
+ * Abort all children with SIGTERM (rollback active transactions and
+ * exit) and shut down when they are gone.
*/
if (Shutdown >= FastShutdown)
break;
@@ -1951,8 +1939,8 @@ pmdie(SIGNAL_ARGS)
/*
* No children left. Begin shutdown of data base system.
*
- * Note: if we previously got SIGTERM then we may send SIGUSR2 to
- * the bgwriter a second time here. This should be harmless.
+ * Note: if we previously got SIGTERM then we may send SIGUSR2 to the
+ * bgwriter a second time here. This should be harmless.
*/
if (StartupPID != 0 || FatalError)
break; /* let reaper() handle this */
@@ -2011,7 +1999,6 @@ reaper(SIGNAL_ARGS)
#ifdef HAVE_WAITPID
int status; /* backend exit status */
-
#else
#ifndef WIN32
union wait status; /* backend exit status */
@@ -2037,10 +2024,9 @@ reaper(SIGNAL_ARGS)
while ((pid = win32_waitpid(&exitstatus)) > 0)
{
/*
- * We need to do this here, and not in CleanupBackend, since this
- * is to be called on all children when we are done with them.
- * Could move to LogChildExit, but that seems like asking for
- * future trouble...
+ * We need to do this here, and not in CleanupBackend, since this is
+ * to be called on all children when we are done with them. Could move
+ * to LogChildExit, but that seems like asking for future trouble...
*/
win32_RemoveChild(pid);
#endif /* WIN32 */
@@ -2057,7 +2043,7 @@ reaper(SIGNAL_ARGS)
LogChildExit(LOG, _("startup process"),
pid, exitstatus);
ereport(LOG,
- (errmsg("aborting startup due to startup process failure")));
+ (errmsg("aborting startup due to startup process failure")));
ExitPostmaster(1);
}
@@ -2068,9 +2054,9 @@ reaper(SIGNAL_ARGS)
FatalError = false;
/*
- * Load the flat authorization file into postmaster's cache.
- * The startup process has recomputed this from the database
- * contents, so we wait till it finishes before loading it.
+ * Load the flat authorization file into postmaster's cache. The
+ * startup process has recomputed this from the database contents,
+ * so we wait till it finishes before loading it.
*/
load_role();
@@ -2083,8 +2069,8 @@ reaper(SIGNAL_ARGS)
/*
* Go to shutdown mode if a shutdown request was pending.
- * Otherwise, try to start the archiver and stats collector
- * too. (We could, but don't, try to start autovacuum here.)
+ * Otherwise, try to start the archiver and stats collector too.
+ * (We could, but don't, try to start autovacuum here.)
*/
if (Shutdown > NoShutdown && BgWriterPID != 0)
kill(BgWriterPID, SIGUSR2);
@@ -2109,16 +2095,15 @@ reaper(SIGNAL_ARGS)
!DLGetHead(BackendList) && AutoVacPID == 0)
{
/*
- * Normal postmaster exit is here: we've seen normal exit
- * of the bgwriter after it's been told to shut down. We
- * expect that it wrote a shutdown checkpoint. (If for
- * some reason it didn't, recovery will occur on next
- * postmaster start.)
+ * Normal postmaster exit is here: we've seen normal exit of
+ * the bgwriter after it's been told to shut down. We expect
+ * that it wrote a shutdown checkpoint. (If for some reason
+ * it didn't, recovery will occur on next postmaster start.)
*
- * Note: we do not wait around for exit of the archiver or
- * stats processes. They've been sent SIGQUIT by this
- * point, and in any case contain logic to commit
- * hara-kiri if they notice the postmaster is gone.
+ * Note: we do not wait around for exit of the archiver or stats
+ * processes. They've been sent SIGQUIT by this point, and in
+ * any case contain logic to commit hara-kiri if they notice
+ * the postmaster is gone.
*/
ExitPostmaster(0);
}
@@ -2132,9 +2117,9 @@ reaper(SIGNAL_ARGS)
}
/*
- * Was it the autovacuum process? Normal exit can be ignored;
- * we'll start a new one at the next iteration of the postmaster's
- * main loop, if necessary.
+ * Was it the autovacuum process? Normal exit can be ignored; we'll
+ * start a new one at the next iteration of the postmaster's main
+ * loop, if necessary.
*
* An unexpected exit must crash the system.
*/
@@ -2149,9 +2134,9 @@ reaper(SIGNAL_ARGS)
}
/*
- * Was it the archiver? If so, just try to start a new one; no
- * need to force reset of the rest of the system. (If fail, we'll
- * try again in future cycles of the main loop.)
+ * Was it the archiver? If so, just try to start a new one; no need
+ * to force reset of the rest of the system. (If fail, we'll try
+ * again in future cycles of the main loop.)
*/
if (PgArchPID != 0 && pid == PgArchPID)
{
@@ -2166,9 +2151,9 @@ reaper(SIGNAL_ARGS)
}
/*
- * Was it the statistics collector? If so, just try to start a
- * new one; no need to force reset of the rest of the system. (If
- * fail, we'll try again in future cycles of the main loop.)
+ * Was it the statistics collector? If so, just try to start a new
+ * one; no need to force reset of the rest of the system. (If fail,
+ * we'll try again in future cycles of the main loop.)
*/
if (PgStatPID != 0 && pid == PgStatPID)
{
@@ -2203,14 +2188,14 @@ reaper(SIGNAL_ARGS)
{
/*
* Wait for all important children to exit, then reset shmem and
- * StartupDataBase. (We can ignore the archiver and stats
- * processes here since they are not connected to shmem.)
+ * StartupDataBase. (We can ignore the archiver and stats processes
+ * here since they are not connected to shmem.)
*/
if (DLGetHead(BackendList) || StartupPID != 0 || BgWriterPID != 0 ||
AutoVacPID != 0)
goto reaper_done;
ereport(LOG,
- (errmsg("all server processes terminated; reinitializing")));
+ (errmsg("all server processes terminated; reinitializing")));
shmem_exit(0);
reset_shared(PostPortNumber);
@@ -2259,10 +2244,10 @@ CleanupBackend(int pid,
LogChildExit(DEBUG2, _("server process"), pid, exitstatus);
/*
- * If a backend dies in an ugly way (i.e. exit status not 0) then we
- * must signal all other backends to quickdie. If exit status is zero
- * we assume everything is hunky dory and simply remove the backend
- * from the active backend list.
+ * If a backend dies in an ugly way (i.e. exit status not 0) then we must
+ * signal all other backends to quickdie. If exit status is zero we
+ * assume everything is hunky dory and simply remove the backend from the
+ * active backend list.
*/
if (exitstatus != 0)
{
@@ -2303,15 +2288,14 @@ HandleChildCrash(int pid, int exitstatus, const char *procname)
Backend *bp;
/*
- * Make log entry unless there was a previous crash (if so, nonzero
- * exit status is to be expected in SIGQUIT response; don't clutter
- * log)
+ * Make log entry unless there was a previous crash (if so, nonzero exit
+ * status is to be expected in SIGQUIT response; don't clutter log)
*/
if (!FatalError)
{
LogChildExit(LOG, procname, pid, exitstatus);
ereport(LOG,
- (errmsg("terminating any other active server processes")));
+ (errmsg("terminating any other active server processes")));
}
/* Process regular backends */
@@ -2337,19 +2321,19 @@ HandleChildCrash(int pid, int exitstatus, const char *procname)
else
{
/*
- * This backend is still alive. Unless we did so already,
- * tell it to commit hara-kiri.
+ * This backend is still alive. Unless we did so already, tell it
+ * to commit hara-kiri.
*
- * SIGQUIT is the special signal that says exit without proc_exit
- * and let the user know what's going on. But if SendStop is
- * set (-s on command line), then we send SIGSTOP instead, so
- * that we can get core dumps from all backends by hand.
+ * SIGQUIT is the special signal that says exit without proc_exit and
+ * let the user know what's going on. But if SendStop is set (-s
+ * on command line), then we send SIGSTOP instead, so that we can
+ * get core dumps from all backends by hand.
*/
if (!FatalError)
{
ereport(DEBUG2,
(errmsg_internal("sending %s to process %d",
- (SendStop ? "SIGSTOP" : "SIGQUIT"),
+ (SendStop ? "SIGSTOP" : "SIGQUIT"),
(int) bp->pid)));
kill(bp->pid, (SendStop ? SIGSTOP : SIGQUIT));
}
@@ -2417,8 +2401,8 @@ LogChildExit(int lev, const char *procname, int pid, int exitstatus)
ereport(lev,
/*
- * translator: %s is a noun phrase describing a child process,
- * such as "server process"
+ * translator: %s is a noun phrase describing a child process, such as
+ * "server process"
*/
(errmsg("%s (PID %d) exited with exit code %d",
procname, pid, WEXITSTATUS(exitstatus))));
@@ -2426,8 +2410,8 @@ LogChildExit(int lev, const char *procname, int pid, int exitstatus)
ereport(lev,
/*
- * translator: %s is a noun phrase describing a child process,
- * such as "server process"
+ * translator: %s is a noun phrase describing a child process, such as
+ * "server process"
*/
(errmsg("%s (PID %d) was terminated by signal %d",
procname, pid, WTERMSIG(exitstatus))));
@@ -2435,8 +2419,8 @@ LogChildExit(int lev, const char *procname, int pid, int exitstatus)
ereport(lev,
/*
- * translator: %s is a noun phrase describing a child process,
- * such as "server process"
+ * translator: %s is a noun phrase describing a child process, such as
+ * "server process"
*/
(errmsg("%s (PID %d) exited with unexpected status %d",
procname, pid, exitstatus)));
@@ -2480,8 +2464,8 @@ BackendStartup(Port *port)
MyCancelKey = PostmasterRandom();
/*
- * Make room for backend data structure. Better before the fork() so
- * we can handle failure cleanly.
+ * Make room for backend data structure. Better before the fork() so we
+ * can handle failure cleanly.
*/
bn = (Backend *) malloc(sizeof(Backend));
if (!bn)
@@ -2514,7 +2498,7 @@ BackendStartup(Port *port)
free(bn);
errno = save_errno;
ereport(LOG,
- (errmsg("could not fork new process for connection: %m")));
+ (errmsg("could not fork new process for connection: %m")));
report_fork_failure_to_client(port, save_errno);
return STATUS_ERROR;
}
@@ -2525,8 +2509,8 @@ BackendStartup(Port *port)
(int) pid, port->sock)));
/*
- * Everything's been successful, it's safe to add this backend to our
- * list of backends.
+ * Everything's been successful, it's safe to add this backend to our list
+ * of backends.
*/
bn->pid = pid;
bn->cancel_key = MyCancelKey;
@@ -2638,10 +2622,10 @@ BackendRun(Port *port)
/*
* PreAuthDelay is a debugging aid for investigating problems in the
- * authentication cycle: it can be set in postgresql.conf to allow
- * time to attach to the newly-forked backend with a debugger. (See
- * also the -W backend switch, which we allow clients to pass through
- * PGOPTIONS, but it is not honored until after authentication.)
+ * authentication cycle: it can be set in postgresql.conf to allow time to
+ * attach to the newly-forked backend with a debugger. (See also the -W
+ * backend switch, which we allow clients to pass through PGOPTIONS, but
+ * it is not honored until after authentication.)
*/
if (PreAuthDelay > 0)
pg_usleep(PreAuthDelay * 1000000L);
@@ -2657,18 +2641,17 @@ BackendRun(Port *port)
port->commandTag = "";
/*
- * Initialize libpq and enable reporting of ereport errors to the
- * client. Must do this now because authentication uses libpq to send
- * messages.
+ * Initialize libpq and enable reporting of ereport errors to the client.
+ * Must do this now because authentication uses libpq to send messages.
*/
pq_init(); /* initialize libpq to talk to client */
whereToSendOutput = Remote; /* now safe to ereport to client */
/*
- * We arrange for a simple exit(0) if we receive SIGTERM or SIGQUIT
- * during any client authentication related communication. Otherwise
- * the postmaster cannot shutdown the database FAST or IMMED cleanly
- * if a buggy client blocks a backend during authentication.
+ * We arrange for a simple exit(0) if we receive SIGTERM or SIGQUIT during
+ * any client authentication related communication. Otherwise the
+ * postmaster cannot shutdown the database FAST or IMMED cleanly if a
+ * buggy client blocks a backend during authentication.
*/
pqsignal(SIGTERM, authdie);
pqsignal(SIGQUIT, authdie);
@@ -2683,12 +2666,12 @@ BackendRun(Port *port)
if (getnameinfo_all(&port->raddr.addr, port->raddr.salen,
remote_host, sizeof(remote_host),
remote_port, sizeof(remote_port),
- (log_hostname ? 0 : NI_NUMERICHOST) | NI_NUMERICSERV))
+ (log_hostname ? 0 : NI_NUMERICHOST) | NI_NUMERICSERV))
{
int ret = getnameinfo_all(&port->raddr.addr, port->raddr.salen,
- remote_host, sizeof(remote_host),
- remote_port, sizeof(remote_port),
- NI_NUMERICHOST | NI_NUMERICSERV);
+ remote_host, sizeof(remote_host),
+ remote_port, sizeof(remote_port),
+ NI_NUMERICHOST | NI_NUMERICSERV);
if (ret)
ereport(WARNING,
@@ -2713,9 +2696,9 @@ BackendRun(Port *port)
/*
* In EXEC_BACKEND case, we didn't inherit the contents of pg_hba.conf
- * etcetera from the postmaster, and have to load them ourselves.
- * Build the PostmasterContext (which didn't exist before, in this
- * process) to contain the data.
+ * etcetera from the postmaster, and have to load them ourselves. Build
+ * the PostmasterContext (which didn't exist before, in this process) to
+ * contain the data.
*
* FIXME: [fork/exec] Ugh. Is there a way around this overhead?
*/
@@ -2734,16 +2717,16 @@ BackendRun(Port *port)
#endif
/*
- * Ready to begin client interaction. We will give up and exit(0)
- * after a time delay, so that a broken client can't hog a connection
+ * Ready to begin client interaction. We will give up and exit(0) after a
+ * time delay, so that a broken client can't hog a connection
* indefinitely. PreAuthDelay doesn't count against the time limit.
*/
if (!enable_sig_alarm(AuthenticationTimeout * 1000, false))
elog(FATAL, "could not set timer for authorization timeout");
/*
- * Receive the startup packet (which might turn out to be a cancel
- * request packet).
+ * Receive the startup packet (which might turn out to be a cancel request
+ * packet).
*/
status = ProcessStartupPacket(port, false);
@@ -2752,8 +2735,7 @@ BackendRun(Port *port)
/*
* Now that we have the user and database name, we can set the process
- * title for ps. It's good to do this as early as possible in
- * startup.
+ * title for ps. It's good to do this as early as possible in startup.
*/
init_ps_display(port->user_name, port->database_name, remote_ps_data);
set_ps_display("authentication");
@@ -2764,8 +2746,8 @@ BackendRun(Port *port)
ClientAuthentication(port); /* might not return, if failure */
/*
- * Done with authentication. Disable timeout, and prevent
- * SIGTERM/SIGQUIT again until backend startup is complete.
+ * Done with authentication. Disable timeout, and prevent SIGTERM/SIGQUIT
+ * again until backend startup is complete.
*/
if (!disable_sig_alarm(false))
elog(FATAL, "could not disable timer for authorization timeout");
@@ -2778,8 +2760,8 @@ BackendRun(Port *port)
/*
* Don't want backend to be able to see the postmaster random number
- * generator state. We have to clobber the static random_seed *and*
- * start a new random sequence in the random() library function.
+ * generator state. We have to clobber the static random_seed *and* start
+ * a new random sequence in the random() library function.
*/
random_seed = 0;
srandom((unsigned int) (MyProcPid ^ port->session_start.tv_usec));
@@ -2826,8 +2808,8 @@ BackendRun(Port *port)
av[ac++] = port->database_name;
/*
- * Pass the (insecure) option switches from the connection request.
- * (It's OK to mangle port->cmdline_options now.)
+ * Pass the (insecure) option switches from the connection request. (It's
+ * OK to mangle port->cmdline_options now.)
*/
if (port->cmdline_options)
split_opts(av, &ac, port->cmdline_options);
@@ -2837,11 +2819,11 @@ BackendRun(Port *port)
Assert(ac < maxac);
/*
- * Release postmaster's working memory context so that backend can
- * recycle the space. Note this does not trash *MyProcPort, because
- * ConnCreate() allocated that space with malloc() ... else we'd need
- * to copy the Port data here. Also, subsidiary data such as the
- * username isn't lost either; see ProcessStartupPacket().
+ * Release postmaster's working memory context so that backend can recycle
+ * the space. Note this does not trash *MyProcPort, because ConnCreate()
+ * allocated that space with malloc() ... else we'd need to copy the Port
+ * data here. Also, subsidiary data such as the username isn't lost
+ * either; see ProcessStartupPacket().
*/
MemoryContextSwitchTo(TopMemoryContext);
MemoryContextDelete(PostmasterContext);
@@ -2852,15 +2834,14 @@ BackendRun(Port *port)
*/
ereport(DEBUG3,
(errmsg_internal("%s child[%d]: starting with (",
- progname, (int)getpid())));
+ progname, (int) getpid())));
for (i = 0; i < ac; ++i)
ereport(DEBUG3,
(errmsg_internal("\t%s", av[i])));
ereport(DEBUG3,
(errmsg_internal(")")));
- ClientAuthInProgress = false; /* client_min_messages is active
- * now */
+ ClientAuthInProgress = false; /* client_min_messages is active now */
return (PostgresMain(ac, av, port->user_name));
}
@@ -2926,7 +2907,7 @@ internal_forkexec(int argc, char *argv[], Port *port)
pid_t pid;
char tmpfilename[MAXPGPATH];
BackendParameters param;
- FILE *fp;
+ FILE *fp;
if (!save_backend_variables(&param, port))
return -1; /* log made by save_backend_variables */
@@ -2994,21 +2975,19 @@ internal_forkexec(int argc, char *argv[], Port *port)
}
}
- return pid; /* Parent returns pid, or -1 on fork
- * failure */
+ return pid; /* Parent returns pid, or -1 on fork failure */
}
-
-#else /* WIN32 */
+#else /* WIN32 */
/*
* internal_forkexec win32 implementation
*
* - starts backend using CreateProcess(), in suspended state
* - writes out backend variables to the parameter file
- * - during this, duplicates handles and sockets required for
- * inheritance into the new process
+ * - during this, duplicates handles and sockets required for
+ * inheritance into the new process
* - resumes execution of the new process once the backend parameter
- * file is complete.
+ * file is complete.
*/
static pid_t
internal_forkexec(int argc, char *argv[], Port *port)
@@ -3020,10 +2999,10 @@ internal_forkexec(int argc, char *argv[], Port *port)
char cmdLine[MAXPGPATH * 2];
HANDLE childHandleCopy;
HANDLE waiterThread;
- HANDLE paramHandle;
+ HANDLE paramHandle;
BackendParameters *param;
SECURITY_ATTRIBUTES sa;
- char paramHandleStr[32];
+ char paramHandleStr[32];
/* Make sure caller set up argv properly */
Assert(argc >= 3);
@@ -3032,7 +3011,7 @@ internal_forkexec(int argc, char *argv[], Port *port)
Assert(argv[2] == NULL);
/* Set up shared memory for parameter passing */
- ZeroMemory(&sa,sizeof(sa));
+ ZeroMemory(&sa, sizeof(sa));
sa.nLength = sizeof(sa);
sa.bInheritHandle = TRUE;
paramHandle = CreateFileMapping(INVALID_HANDLE_VALUE,
@@ -3058,7 +3037,7 @@ internal_forkexec(int argc, char *argv[], Port *port)
}
/* Insert temp file name after -fork argument */
- sprintf(paramHandleStr, "%lu", (DWORD)paramHandle);
+ sprintf(paramHandleStr, "%lu", (DWORD) paramHandle);
argv[2] = paramHandleStr;
/* Format the cmd line */
@@ -3080,9 +3059,10 @@ internal_forkexec(int argc, char *argv[], Port *port)
memset(&pi, 0, sizeof(pi));
memset(&si, 0, sizeof(si));
si.cb = sizeof(si);
+
/*
- * Create the subprocess in a suspended state. This will be resumed
- * later, once we have written out the parameter file.
+ * Create the subprocess in a suspended state. This will be resumed later,
+ * once we have written out the parameter file.
*/
if (!CreateProcess(NULL, cmdLine, NULL, NULL, TRUE, CREATE_SUSPENDED,
NULL, NULL, &si, &pi))
@@ -3095,8 +3075,8 @@ internal_forkexec(int argc, char *argv[], Port *port)
if (!save_backend_variables(param, port, pi.hProcess, pi.dwProcessId))
{
/*
- * log made by save_backend_variables, but we have to clean
- * up the mess with the half-started process
+ * log made by save_backend_variables, but we have to clean up the
+ * mess with the half-started process
*/
if (!TerminateProcess(pi.hProcess, 255))
ereport(ERROR,
@@ -3116,9 +3096,9 @@ internal_forkexec(int argc, char *argv[], Port *port)
(int) GetLastError());
/*
- * Now that the backend variables are written out, we start the
- * child thread so it can start initializing while we set up
- * the rest of the parent state.
+ * Now that the backend variables are written out, we start the child
+ * thread so it can start initializing while we set up the rest of the
+ * parent state.
*/
if (ResumeThread(pi.hThread) == -1)
{
@@ -3154,15 +3134,15 @@ internal_forkexec(int argc, char *argv[], Port *port)
FALSE,
DUPLICATE_SAME_ACCESS) == 0)
ereport(FATAL,
- (errmsg_internal("could not duplicate child handle: error code %d",
- (int) GetLastError())));
+ (errmsg_internal("could not duplicate child handle: error code %d",
+ (int) GetLastError())));
waiterThread = CreateThread(NULL, 64 * 1024, win32_sigchld_waiter,
(LPVOID) childHandleCopy, 0, NULL);
if (!waiterThread)
ereport(FATAL,
- (errmsg_internal("could not create sigchld waiter thread: error code %d",
- (int) GetLastError())));
+ (errmsg_internal("could not create sigchld waiter thread: error code %d",
+ (int) GetLastError())));
CloseHandle(waiterThread);
if (IsUnderPostmaster)
@@ -3171,8 +3151,7 @@ internal_forkexec(int argc, char *argv[], Port *port)
return pi.dwProcessId;
}
-
-#endif /* WIN32 */
+#endif /* WIN32 */
/*
@@ -3213,9 +3192,9 @@ SubPostmasterMain(int argc, char *argv[])
elog(FATAL, "invalid subpostmaster invocation");
/*
- * If appropriate, physically re-attach to shared memory segment.
- * We want to do this before going any further to ensure that we
- * can attach at the same address the postmaster used.
+ * If appropriate, physically re-attach to shared memory segment. We want
+ * to do this before going any further to ensure that we can attach at the
+ * same address the postmaster used.
*/
if (strcmp(argv[1], "-forkbackend") == 0 ||
strcmp(argv[1], "-forkautovac") == 0 ||
@@ -3223,9 +3202,9 @@ SubPostmasterMain(int argc, char *argv[])
PGSharedMemoryReAttach();
/*
- * Start our win32 signal implementation. This has to be done
- * after we read the backend variables, because we need to pick
- * up the signal pipe from the parent process.
+ * Start our win32 signal implementation. This has to be done after we
+ * read the backend variables, because we need to pick up the signal pipe
+ * from the parent process.
*/
#ifdef WIN32
pgwin32_signal_initialize();
@@ -3247,10 +3226,11 @@ SubPostmasterMain(int argc, char *argv[])
CreateSharedMemoryAndSemaphores(false, 0);
#ifdef USE_SSL
+
/*
- * Need to reinitialize the SSL library in the backend,
- * since the context structures contain function pointers
- * and cannot be passed through the parameter file.
+ * Need to reinitialize the SSL library in the backend, since the
+ * context structures contain function pointers and cannot be passed
+ * through the parameter file.
*/
if (EnableSSL)
secure_initialize();
@@ -3304,8 +3284,8 @@ SubPostmasterMain(int argc, char *argv[])
if (strcmp(argv[1], "-forkcol") == 0)
{
/*
- * Do NOT close postmaster sockets here, because we are forking
- * from pgstat buffer process, which already did it.
+ * Do NOT close postmaster sockets here, because we are forking from
+ * pgstat buffer process, which already did it.
*/
/* Do not want to attach to shared memory */
@@ -3326,7 +3306,6 @@ SubPostmasterMain(int argc, char *argv[])
return 1; /* shouldn't get here */
}
-
#endif /* EXEC_BACKEND */
@@ -3341,8 +3320,8 @@ ExitPostmaster(int status)
/* should cleanup shared memory and kill all backends */
/*
- * Not sure of the semantics here. When the Postmaster dies, should
- * the backends all be killed? probably not.
+ * Not sure of the semantics here. When the Postmaster dies, should the
+ * backends all be killed? probably not.
*
* MUST -- vadim 05-10-1999
*/
@@ -3371,9 +3350,8 @@ sigusr1_handler(SIGNAL_ARGS)
if (CheckPostmasterSignal(PMSIGNAL_WAKEN_CHILDREN))
{
/*
- * Send SIGUSR1 to all children (triggers
- * CatchupInterruptHandler). See storage/ipc/sinval[adt].c for the
- * use of this.
+ * Send SIGUSR1 to all children (triggers CatchupInterruptHandler).
+ * See storage/ipc/sinval[adt].c for the use of this.
*/
if (Shutdown <= SmartShutdown)
{
@@ -3387,8 +3365,8 @@ sigusr1_handler(SIGNAL_ARGS)
PgArchPID != 0 && Shutdown == NoShutdown)
{
/*
- * Send SIGUSR1 to archiver process, to wake it up and begin
- * archiving next transaction log file.
+ * Send SIGUSR1 to archiver process, to wake it up and begin archiving
+ * next transaction log file.
*/
kill(PgArchPID, SIGUSR1);
}
@@ -3397,7 +3375,7 @@ sigusr1_handler(SIGNAL_ARGS)
SysLoggerPID != 0)
{
/* Tell syslogger to rotate logfile */
- kill(SysLoggerPID, SIGUSR1);
+ kill(SysLoggerPID, SIGUSR1);
}
PG_SETMASK(&UnBlockSig);
@@ -3459,9 +3437,9 @@ RandomSalt(char *cryptSalt, char *md5Salt)
* bytes, since only one of the two salts will be sent to the client.
* After that we need to compute more random bits.
*
- * We use % 255, sacrificing one possible byte value, so as to ensure
- * that all bits of the random() value participate in the result.
- * While at it, add one to avoid generating any null bytes.
+ * We use % 255, sacrificing one possible byte value, so as to ensure that
+ * all bits of the random() value participate in the result. While at it,
+ * add one to avoid generating any null bytes.
*/
md5Salt[0] = (rand % 255) + 1;
rand = PostmasterRandom();
@@ -3508,7 +3486,7 @@ CountChildren(void)
/*
* StartChildProcess -- start a non-backend child process for the postmaster
*
- * xlop determines what kind of child will be started. All child types
+ * xlop determines what kind of child will be started. All child types
* initially go to BootstrapMain, which will handle common setup.
*
* Return value of StartChildProcess is subprocess' PID, or 0 if failed
@@ -3548,8 +3526,7 @@ StartChildProcess(int xlop)
if (pid == 0) /* child */
{
- IsUnderPostmaster = true; /* we are a postmaster subprocess
- * now */
+ IsUnderPostmaster = true; /* we are a postmaster subprocess now */
/* Close the postmaster's sockets */
ClosePostmasterPorts(false);
@@ -3571,6 +3548,7 @@ StartChildProcess(int xlop)
{
/* in parent, fork failed */
int save_errno = errno;
+
errno = save_errno;
switch (xlop)
{
@@ -3580,7 +3558,7 @@ StartChildProcess(int xlop)
break;
case BS_XLOG_BGWRITER:
ereport(LOG,
- (errmsg("could not fork background writer process: %m")));
+ (errmsg("could not fork background writer process: %m")));
break;
default:
ereport(LOG,
@@ -3589,8 +3567,8 @@ StartChildProcess(int xlop)
}
/*
- * fork failure is fatal during startup, but there's no need to
- * choke immediately if starting other child types fails.
+ * fork failure is fatal during startup, but there's no need to choke
+ * immediately if starting other child types fails.
*/
if (xlop == BS_XLOG_STARTUP)
ExitPostmaster(1);
@@ -3648,26 +3626,26 @@ extern void *ShmemIndexAlloc;
extern LWLock *LWLockArray;
extern slock_t *ProcStructLock;
extern int pgStatSock;
-extern int pgStatPipe[2];
+extern int pgStatPipe[2];
#ifndef WIN32
#define write_inheritable_socket(dest, src, childpid) (*(dest) = (src))
#define read_inheritable_socket(dest, src) (*(dest) = *(src))
#else
-static void write_duplicated_handle(HANDLE *dest, HANDLE src, HANDLE child);
-static void write_inheritable_socket(InheritableSocket *dest, SOCKET src,
- pid_t childPid);
-static void read_inheritable_socket(SOCKET *dest, InheritableSocket *src);
+static void write_duplicated_handle(HANDLE * dest, HANDLE src, HANDLE child);
+static void write_inheritable_socket(InheritableSocket * dest, SOCKET src,
+ pid_t childPid);
+static void read_inheritable_socket(SOCKET * dest, InheritableSocket * src);
#endif
/* Save critical backend variables into the BackendParameters struct */
#ifndef WIN32
static bool
-save_backend_variables(BackendParameters *param, Port *port)
+save_backend_variables(BackendParameters * param, Port *port)
#else
static bool
-save_backend_variables(BackendParameters *param, Port *port,
+save_backend_variables(BackendParameters * param, Port *port,
HANDLE childProcess, pid_t childPid)
#endif
{
@@ -3726,9 +3704,9 @@ save_backend_variables(BackendParameters *param, Port *port,
* process instance of the handle to the parameter file.
*/
static void
-write_duplicated_handle(HANDLE *dest, HANDLE src, HANDLE childProcess)
+write_duplicated_handle(HANDLE * dest, HANDLE src, HANDLE childProcess)
{
- HANDLE hChild = INVALID_HANDLE_VALUE;
+ HANDLE hChild = INVALID_HANDLE_VALUE;
if (!DuplicateHandle(GetCurrentProcess(),
src,
@@ -3752,7 +3730,7 @@ write_duplicated_handle(HANDLE *dest, HANDLE src, HANDLE childProcess)
* straight socket inheritance.
*/
static void
-write_inheritable_socket(InheritableSocket *dest, SOCKET src, pid_t childpid)
+write_inheritable_socket(InheritableSocket * dest, SOCKET src, pid_t childpid)
{
dest->origsocket = src;
if (src != 0 && src != -1)
@@ -3769,11 +3747,11 @@ write_inheritable_socket(InheritableSocket *dest, SOCKET src, pid_t childpid)
* Read a duplicate socket structure back, and get the socket descriptor.
*/
static void
-read_inheritable_socket(SOCKET *dest, InheritableSocket *src)
+read_inheritable_socket(SOCKET * dest, InheritableSocket * src)
{
- SOCKET s;
+ SOCKET s;
- if (src->origsocket == -1 || src->origsocket == 0)
+ if (src->origsocket == -1 || src->origsocket == 0)
{
/* Not a real socket! */
*dest = src->origsocket;
@@ -3796,9 +3774,9 @@ read_inheritable_socket(SOCKET *dest, InheritableSocket *src)
*dest = s;
/*
- * To make sure we don't get two references to the same socket,
- * close the original one. (This would happen when inheritance
- * actually works..
+ * To make sure we don't get two references to the same socket, close
+ * the original one. (This would happen when inheritance actually
+ * works..
*/
closesocket(src->origsocket);
}
@@ -3812,7 +3790,7 @@ read_backend_variables(char *id, Port *port)
#ifndef WIN32
/* Non-win32 implementation reads from file */
- FILE *fp;
+ FILE *fp;
/* Open file */
fp = AllocateFile(id, PG_BINARY_R);
@@ -3840,10 +3818,10 @@ read_backend_variables(char *id, Port *port)
}
#else
/* Win32 version uses mapped file */
- HANDLE paramHandle;
+ HANDLE paramHandle;
BackendParameters *paramp;
- paramHandle = (HANDLE)atol(id);
+ paramHandle = (HANDLE) atol(id);
paramp = MapViewOfFile(paramHandle, FILE_MAP_READ, 0, 0, 0);
if (!paramp)
{
@@ -3874,7 +3852,7 @@ read_backend_variables(char *id, Port *port)
/* Restore critical backend variables from the BackendParameters struct */
static void
-restore_backend_variables(BackendParameters *param, Port *port)
+restore_backend_variables(BackendParameters * param, Port *port)
{
memcpy(port, &param->port, sizeof(Port));
read_inheritable_socket(&port->sock, &param->portsocket);
@@ -3975,7 +3953,6 @@ ShmemBackendArrayRemove(pid_t pid)
(errmsg_internal("could not find backend entry with pid %d",
(int) pid)));
}
-
#endif /* EXEC_BACKEND */
@@ -4059,7 +4036,7 @@ win32_waitpid(int *exitstatus)
case WAIT_FAILED:
ereport(LOG,
(errmsg_internal("failed to wait on %lu of %lu children: error code %d",
- num, win32_numChildren, (int) GetLastError())));
+ num, win32_numChildren, (int) GetLastError())));
return -1;
case WAIT_TIMEOUT:
@@ -4069,21 +4046,21 @@ win32_waitpid(int *exitstatus)
default:
/*
- * Get the exit code, and return the PID of, the
- * respective process
+ * Get the exit code, and return the PID of, the respective
+ * process
*/
index = offset + ret - WAIT_OBJECT_0;
Assert(index >= 0 && index < win32_numChildren);
if (!GetExitCodeProcess(win32_childHNDArray[index], &exitCode))
{
/*
- * If we get this far, this should never happen, but,
- * then again... No choice other than to assume a
- * catastrophic failure.
+ * If we get this far, this should never happen, but, then
+ * again... No choice other than to assume a catastrophic
+ * failure.
*/
ereport(FATAL,
- (errmsg_internal("failed to get exit code for child %lu",
- (unsigned long) win32_childPIDArray[index])));
+ (errmsg_internal("failed to get exit code for child %lu",
+ (unsigned long) win32_childPIDArray[index])));
}
*exitstatus = (int) exitCode;
return win32_childPIDArray[index];
diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c
index e5aa153dd47..b2e3add6a8f 100644
--- a/src/backend/postmaster/syslogger.c
+++ b/src/backend/postmaster/syslogger.c
@@ -18,7 +18,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/syslogger.c,v 1.19 2005/08/12 03:23:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/syslogger.c,v 1.20 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -87,7 +87,6 @@ static char *last_file_name = NULL;
/* These must be exported for EXEC_BACKEND case ... annoying */
#ifndef WIN32
int syslogPipe[2] = {-1, -1};
-
#else
HANDLE syslogPipe[2] = {0, 0};
#endif
@@ -149,22 +148,21 @@ SysLoggerMain(int argc, char *argv[])
set_ps_display("");
/*
- * If we restarted, our stderr is already redirected into our own
- * input pipe. This is of course pretty useless, not to mention that
- * it interferes with detecting pipe EOF. Point stderr to /dev/null.
- * This assumes that all interesting messages generated in the
- * syslogger will come through elog.c and will be sent to
- * write_syslogger_file.
+ * If we restarted, our stderr is already redirected into our own input
+ * pipe. This is of course pretty useless, not to mention that it
+ * interferes with detecting pipe EOF. Point stderr to /dev/null. This
+ * assumes that all interesting messages generated in the syslogger will
+ * come through elog.c and will be sent to write_syslogger_file.
*/
if (redirection_done)
{
int fd = open(NULL_DEV, O_WRONLY);
/*
- * The closes might look redundant, but they are not: we want to
- * be darn sure the pipe gets closed even if the open failed. We
- * can survive running with stderr pointing nowhere, but we can't
- * afford to have extra pipe input descriptors hanging around.
+ * The closes might look redundant, but they are not: we want to be
+ * darn sure the pipe gets closed even if the open failed. We can
+ * survive running with stderr pointing nowhere, but we can't afford
+ * to have extra pipe input descriptors hanging around.
*/
close(fileno(stdout));
close(fileno(stderr));
@@ -174,9 +172,9 @@ SysLoggerMain(int argc, char *argv[])
}
/*
- * Also close our copy of the write end of the pipe. This is needed
- * to ensure we can detect pipe EOF correctly. (But note that in the
- * restart case, the postmaster already did this.)
+ * Also close our copy of the write end of the pipe. This is needed to
+ * ensure we can detect pipe EOF correctly. (But note that in the restart
+ * case, the postmaster already did this.)
*/
#ifndef WIN32
if (syslogPipe[1] >= 0)
@@ -191,9 +189,9 @@ SysLoggerMain(int argc, char *argv[])
/*
* Properly accept or ignore signals the postmaster might send us
*
- * Note: we ignore all termination signals, and instead exit only when
- * all upstream processes are gone, to ensure we don't miss any dying
- * gasps of broken backends...
+ * Note: we ignore all termination signals, and instead exit only when all
+ * upstream processes are gone, to ensure we don't miss any dying gasps of
+ * broken backends...
*/
pqsignal(SIGHUP, sigHupHandler); /* set flag to read config file */
@@ -202,7 +200,7 @@ SysLoggerMain(int argc, char *argv[])
pqsignal(SIGQUIT, SIG_IGN);
pqsignal(SIGALRM, SIG_IGN);
pqsignal(SIGPIPE, SIG_IGN);
- pqsignal(SIGUSR1, sigUsr1Handler); /* request log rotation */
+ pqsignal(SIGUSR1, sigUsr1Handler); /* request log rotation */
pqsignal(SIGUSR2, SIG_IGN);
/*
@@ -253,8 +251,8 @@ SysLoggerMain(int argc, char *argv[])
ProcessConfigFile(PGC_SIGHUP);
/*
- * Check if the log directory or filename pattern changed in
- * postgresql.conf. If so, force rotation to make sure we're
+ * Check if the log directory or filename pattern changed in
+ * postgresql.conf. If so, force rotation to make sure we're
* writing the logfiles in the right place.
*/
if (strcmp(Log_directory, currentLogDir) != 0)
@@ -269,6 +267,7 @@ SysLoggerMain(int argc, char *argv[])
currentLogFilename = pstrdup(Log_filename);
rotation_requested = true;
}
+
/*
* If rotation time parameter changed, reset next rotation time,
* but don't immediately force a rotation.
@@ -316,7 +315,7 @@ SysLoggerMain(int argc, char *argv[])
if (errno != EINTR)
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("select() failed in logger process: %m")));
+ errmsg("select() failed in logger process: %m")));
}
else if (rc > 0 && FD_ISSET(syslogPipe[0], &rfds))
{
@@ -328,7 +327,7 @@ SysLoggerMain(int argc, char *argv[])
if (errno != EINTR)
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not read from logger pipe: %m")));
+ errmsg("could not read from logger pipe: %m")));
}
else if (bytesRead > 0)
{
@@ -338,11 +337,10 @@ SysLoggerMain(int argc, char *argv[])
else
{
/*
- * Zero bytes read when select() is saying read-ready
- * means EOF on the pipe: that is, there are no longer any
- * processes with the pipe write end open. Therefore, the
- * postmaster and all backends are shut down, and we are
- * done.
+ * Zero bytes read when select() is saying read-ready means
+ * EOF on the pipe: that is, there are no longer any processes
+ * with the pipe write end open. Therefore, the postmaster
+ * and all backends are shut down, and we are done.
*/
pipe_eof_seen = true;
}
@@ -350,9 +348,9 @@ SysLoggerMain(int argc, char *argv[])
#else /* WIN32 */
/*
- * On Windows we leave it to a separate thread to transfer data
- * and detect pipe EOF. The main thread just wakes up once a
- * second to check for SIGHUP and rotation conditions.
+ * On Windows we leave it to a separate thread to transfer data and
+ * detect pipe EOF. The main thread just wakes up once a second to
+ * check for SIGHUP and rotation conditions.
*/
pgwin32_backend_usleep(1000000);
#endif /* WIN32 */
@@ -364,10 +362,10 @@ SysLoggerMain(int argc, char *argv[])
/*
* Normal exit from the syslogger is here. Note that we
- * deliberately do not close syslogFile before exiting; this
- * is to allow for the possibility of elog messages being
- * generated inside proc_exit. Regular exit() will take care
- * of flushing and closing stdio channels.
+ * deliberately do not close syslogFile before exiting; this is to
+ * allow for the possibility of elog messages being generated
+ * inside proc_exit. Regular exit() will take care of flushing
+ * and closing stdio channels.
*/
proc_exit(0);
}
@@ -390,13 +388,13 @@ SysLogger_Start(void)
* If first time through, create the pipe which will receive stderr
* output.
*
- * If the syslogger crashes and needs to be restarted, we continue to use
- * the same pipe (indeed must do so, since extant backends will be
- * writing into that pipe).
+ * If the syslogger crashes and needs to be restarted, we continue to use the
+ * same pipe (indeed must do so, since extant backends will be writing
+ * into that pipe).
*
- * This means the postmaster must continue to hold the read end of the
- * pipe open, so we can pass it down to the reincarnated syslogger.
- * This is a bit klugy but we have little choice.
+ * This means the postmaster must continue to hold the read end of the pipe
+ * open, so we can pass it down to the reincarnated syslogger. This is a
+ * bit klugy but we have little choice.
*/
#ifndef WIN32
if (syslogPipe[0] < 0)
@@ -404,7 +402,7 @@ SysLogger_Start(void)
if (pgpipe(syslogPipe) < 0)
ereport(FATAL,
(errcode_for_socket_access(),
- (errmsg("could not create pipe for syslog: %m"))));
+ (errmsg("could not create pipe for syslog: %m"))));
}
#else
if (!syslogPipe[0])
@@ -418,7 +416,7 @@ SysLogger_Start(void)
if (!CreatePipe(&syslogPipe[0], &syslogPipe[1], &sa, 32768))
ereport(FATAL,
(errcode_for_file_access(),
- (errmsg("could not create pipe for syslog: %m"))));
+ (errmsg("could not create pipe for syslog: %m"))));
}
#endif
@@ -428,8 +426,8 @@ SysLogger_Start(void)
mkdir(Log_directory, 0700);
/*
- * The initial logfile is created right in the postmaster, to verify
- * that the Log_directory is writable.
+ * The initial logfile is created right in the postmaster, to verify that
+ * the Log_directory is writable.
*/
filename = logfile_getname(time(NULL));
@@ -730,9 +728,9 @@ logfile_rotate(bool time_based_rotation)
rotation_requested = false;
/*
- * When doing a time-based rotation, invent the new logfile name based
- * on the planned rotation time, not current time, to avoid "slippage"
- * in the file name when we don't do the rotation immediately.
+ * When doing a time-based rotation, invent the new logfile name based on
+ * the planned rotation time, not current time, to avoid "slippage" in the
+ * file name when we don't do the rotation immediately.
*/
if (time_based_rotation)
filename = logfile_getname(next_rotation_time);
@@ -742,14 +740,14 @@ logfile_rotate(bool time_based_rotation)
/*
* Decide whether to overwrite or append. We can overwrite if (a)
* Log_truncate_on_rotation is set, (b) the rotation was triggered by
- * elapsed time and not something else, and (c) the computed file name
- * is different from what we were previously logging into.
+ * elapsed time and not something else, and (c) the computed file name is
+ * different from what we were previously logging into.
*
* Note: during the first rotation after forking off from the postmaster,
* last_file_name will be NULL. (We don't bother to set it in the
- * postmaster because it ain't gonna work in the EXEC_BACKEND case.)
- * So we will always append in that situation, even though truncating
- * would usually be safe.
+ * postmaster because it ain't gonna work in the EXEC_BACKEND case.) So we
+ * will always append in that situation, even though truncating would
+ * usually be safe.
*/
if (Log_truncate_on_rotation && time_based_rotation &&
last_file_name != NULL && strcmp(filename, last_file_name) != 0)
@@ -767,15 +765,15 @@ logfile_rotate(bool time_based_rotation)
filename)));
/*
- * ENFILE/EMFILE are not too surprising on a busy system; just
- * keep using the old file till we manage to get a new one.
- * Otherwise, assume something's wrong with Log_directory and stop
- * trying to create files.
+ * ENFILE/EMFILE are not too surprising on a busy system; just keep
+ * using the old file till we manage to get a new one. Otherwise,
+ * assume something's wrong with Log_directory and stop trying to
+ * create files.
*/
if (saveerrno != ENFILE && saveerrno != EMFILE)
{
ereport(LOG,
- (errmsg("disabling automatic rotation (use SIGHUP to reenable)")));
+ (errmsg("disabling automatic rotation (use SIGHUP to reenable)")));
Log_RotationAge = 0;
Log_RotationSize = 0;
}
@@ -828,7 +826,7 @@ logfile_getname(pg_time_t timestamp)
tm = pg_localtime(&timestamp, global_timezone);
pg_strftime(filename + len, MAXPGPATH - len, Log_filename, tm);
}
- else
+ else
{
/* no strftime escapes, so append timestamp to new filename */
snprintf(filename + len, MAXPGPATH - len, "%s.%lu",
@@ -855,10 +853,10 @@ set_next_rotation_time(void)
/*
* The requirements here are to choose the next time > now that is a
* "multiple" of the log rotation interval. "Multiple" can be interpreted
- * fairly loosely. In this version we align to local time rather than
+ * fairly loosely. In this version we align to local time rather than
* GMT.
*/
- rotinterval = Log_RotationAge * SECS_PER_MINUTE; /* convert to seconds */
+ rotinterval = Log_RotationAge * SECS_PER_MINUTE; /* convert to seconds */
now = time(NULL);
tm = pg_localtime(&now, global_timezone);
now += tm->tm_gmtoff;
diff --git a/src/backend/regex/regc_color.c b/src/backend/regex/regc_color.c
index fc62626d1fa..33a6c792065 100644
--- a/src/backend/regex/regc_color.c
+++ b/src/backend/regex/regc_color.c
@@ -28,7 +28,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/backend/regex/regc_color.c,v 1.4 2003/11/29 19:51:55 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regc_color.c,v 1.5 2005/10/15 02:49:24 momjian Exp $
*
*
* Note that there are some incestuous relationships between this code and
@@ -179,7 +179,7 @@ setcolor(struct colormap * cm,
if (t == fillt || t == cb)
{ /* must allocate a new block */
newt = (union tree *) MALLOC((bottom) ?
- sizeof(struct colors) : sizeof(struct ptrs));
+ sizeof(struct colors) : sizeof(struct ptrs));
if (newt == NULL)
{
CERR(REG_ESPACE);
@@ -256,7 +256,7 @@ newcolor(struct colormap * cm)
}
else
new = (struct colordesc *) REALLOC(cm->cd,
- n * sizeof(struct colordesc));
+ n * sizeof(struct colordesc));
if (new == NULL)
{
CERR(REG_ESPACE);
diff --git a/src/backend/regex/regc_cvec.c b/src/backend/regex/regc_cvec.c
index 1abeacc246e..719c4c5ef3b 100644
--- a/src/backend/regex/regc_cvec.c
+++ b/src/backend/regex/regc_cvec.c
@@ -28,7 +28,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/backend/regex/regc_cvec.c,v 1.4 2003/11/29 19:51:55 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regc_cvec.c,v 1.5 2005/10/15 02:49:24 momjian Exp $
*
*/
@@ -52,8 +52,7 @@ newcvec(int nchrs, /* to hold this many chrs... */
if (cv == NULL)
return NULL;
cv->chrspace = nchrs;
- cv->chrs = (chr *) &cv->mcces[nmcces]; /* chrs just after MCCE
- * ptrs */
+ cv->chrs = (chr *) &cv->mcces[nmcces]; /* chrs just after MCCE ptrs */
cv->mccespace = nmcces;
cv->ranges = cv->chrs + nchrs + nmcces * (MAXMCCE + 1);
cv->rangespace = nranges;
diff --git a/src/backend/regex/regc_lex.c b/src/backend/regex/regc_lex.c
index 55ef530c3e7..df45701e5aa 100644
--- a/src/backend/regex/regc_lex.c
+++ b/src/backend/regex/regc_lex.c
@@ -28,7 +28,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/backend/regex/regc_lex.c,v 1.4 2003/11/29 19:51:55 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regc_lex.c,v 1.5 2005/10/15 02:49:24 momjian Exp $
*
*/
@@ -712,8 +712,7 @@ next(struct vars * v)
* lexescape - parse an ARE backslash escape (backslash already eaten)
* Note slightly nonstandard use of the CCLASS type code.
*/
-static int /* not actually used, but convenient for
- * RETV */
+static int /* not actually used, but convenient for RETV */
lexescape(struct vars * v)
{
chr c;
@@ -816,8 +815,7 @@ lexescape(struct vars * v)
break;
case CHR('x'):
NOTE(REG_UUNPORT);
- c = lexdigits(v, 16, 1, 255); /* REs >255 long outside
- * spec */
+ c = lexdigits(v, 16, 1, 255); /* REs >255 long outside spec */
if (ISERR())
FAILW(REG_EESCAPE);
RETV(PLAIN, c);
@@ -844,8 +842,7 @@ lexescape(struct vars * v)
case CHR('9'):
save = v->now;
v->now--; /* put first digit back */
- c = lexdigits(v, 10, 1, 255); /* REs >255 long outside
- * spec */
+ c = lexdigits(v, 10, 1, 255); /* REs >255 long outside spec */
if (ISERR())
FAILW(REG_EESCAPE);
/* ugly heuristic (first test is "exactly 1 digit?") */
diff --git a/src/backend/regex/regc_locale.c b/src/backend/regex/regc_locale.c
index 06c5f46a128..75f32730497 100644
--- a/src/backend/regex/regc_locale.c
+++ b/src/backend/regex/regc_locale.c
@@ -47,7 +47,7 @@
* permission to use and distribute the software in accordance with the
* terms specified in this license.
*
- * $PostgreSQL: pgsql/src/backend/regex/regc_locale.c,v 1.6 2004/05/07 00:24:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regc_locale.c,v 1.7 2005/10/15 02:49:24 momjian Exp $
*/
/* ASCII character-name table */
@@ -520,10 +520,9 @@ range(struct vars * v, /* context */
}
/*
- * When case-independent, it's hard to decide when cvec ranges are
- * usable, so for now at least, we won't try. We allocate enough
- * space for two case variants plus a little extra for the two title
- * case variants.
+ * When case-independent, it's hard to decide when cvec ranges are usable,
+ * so for now at least, we won't try. We allocate enough space for two
+ * case variants plus a little extra for the two title case variants.
*/
nchrs = (b - a + 1) * 2 + 4;
@@ -656,8 +655,7 @@ cclass(struct vars * v, /* context */
/*
* Now compute the character class contents.
*
- * For the moment, assume that only char codes < 256 can be in these
- * classes.
+ * For the moment, assume that only char codes < 256 can be in these classes.
*/
switch ((enum classes) index)
diff --git a/src/backend/regex/regc_nfa.c b/src/backend/regex/regc_nfa.c
index ad081bf71e0..fa68d021bc2 100644
--- a/src/backend/regex/regc_nfa.c
+++ b/src/backend/regex/regc_nfa.c
@@ -28,7 +28,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/backend/regex/regc_nfa.c,v 1.3 2003/11/29 19:51:55 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regc_nfa.c,v 1.4 2005/10/15 02:49:24 momjian Exp $
*
*
* One or two things that technically ought to be in here
@@ -218,8 +218,7 @@ freestate(struct nfa * nfa,
nfa->states = s->next;
}
s->prev = NULL;
- s->next = nfa->free; /* don't delete it, put it on the free
- * list */
+ s->next = nfa->free; /* don't delete it, put it on the free list */
nfa->free = s;
}
@@ -275,10 +274,10 @@ newarc(struct nfa * nfa,
a->from = from;
/*
- * Put the new arc on the beginning, not the end, of the chains. Not
- * only is this easier, it has the very useful side effect that
- * deleting the most-recently-added arc is the cheapest case rather
- * than the most expensive one.
+ * Put the new arc on the beginning, not the end, of the chains. Not only
+ * is this easier, it has the very useful side effect that deleting the
+ * most-recently-added arc is the cheapest case rather than the most
+ * expensive one.
*/
a->inchain = to->ins;
to->ins = a;
@@ -1155,8 +1154,7 @@ cleanup(struct nfa * nfa)
static void
markreachable(struct nfa * nfa,
struct state * s,
- struct state * okay, /* consider only states with this
- * mark */
+ struct state * okay, /* consider only states with this mark */
struct state * mark) /* the value to mark with */
{
struct arc *a;
@@ -1175,8 +1173,7 @@ markreachable(struct nfa * nfa,
static void
markcanreach(struct nfa * nfa,
struct state * s,
- struct state * okay, /* consider only states with this
- * mark */
+ struct state * okay, /* consider only states with this mark */
struct state * mark) /* the value to mark with */
{
struct arc *a;
diff --git a/src/backend/regex/regcomp.c b/src/backend/regex/regcomp.c
index 8ba34512458..069244060b8 100644
--- a/src/backend/regex/regcomp.c
+++ b/src/backend/regex/regcomp.c
@@ -28,7 +28,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/backend/regex/regcomp.c,v 1.43 2005/05/25 21:40:40 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regcomp.c,v 1.44 2005/10/15 02:49:24 momjian Exp $
*
*/
@@ -208,8 +208,7 @@ struct vars
regex_t *re;
chr *now; /* scan pointer into string */
chr *stop; /* end of string */
- chr *savenow; /* saved now and stop for "subroutine
- * call" */
+ chr *savenow; /* saved now and stop for "subroutine call" */
chr *savestop;
int err; /* error code (0 if none) */
int cflags; /* copy of compile flags */
@@ -251,8 +250,7 @@ struct vars
#define NOERR() {if (ISERR()) return;} /* if error seen, return */
#define NOERRN() {if (ISERR()) return NULL;} /* NOERR with retval */
#define NOERRZ() {if (ISERR()) return 0;} /* NOERR with retval */
-#define INSIST(c, e) ((c) ? 0 : ERR(e)) /* if condition false,
- * error */
+#define INSIST(c, e) ((c) ? 0 : ERR(e)) /* if condition false, error */
#define NOTE(b) (v->re->re_info |= (b)) /* note visible condition */
#define EMPTYARC(x, y) newarc(v->nfa, EMPTY, 0, x, y)
@@ -306,7 +304,6 @@ pg_regcomp(regex_t *re,
#ifdef REG_DEBUG
FILE *debug = (flags & REG_PROGRESS) ? stdout : (FILE *) NULL;
-
#else
FILE *debug = (FILE *) NULL;
#endif
@@ -572,11 +569,10 @@ makesearch(struct vars * v,
/*
* Now here's the subtle part. Because many REs have no lookback
* constraints, often knowing when you were in the pre state tells you
- * little; it's the next state(s) that are informative. But some of
- * them may have other inarcs, i.e. it may be possible to make actual
- * progress and then return to one of them. We must de-optimize such
- * cases, splitting each such state into progress and no-progress
- * states.
+ * little; it's the next state(s) that are informative. But some of them
+ * may have other inarcs, i.e. it may be possible to make actual progress
+ * and then return to one of them. We must de-optimize such cases,
+ * splitting each such state into progress and no-progress states.
*/
/* first, make a list of the states */
@@ -591,8 +587,8 @@ makesearch(struct vars * v,
{ /* must be split */
if (s->tmp == NULL)
{ /* if not already in the list */
- /* (fixes bugs 505048, 230589, */
- /* 840258, 504785) */
+ /* (fixes bugs 505048, 230589, */
+ /* 840258, 504785) */
s->tmp = slist;
slist = s;
}
@@ -1043,9 +1039,8 @@ parseqatom(struct vars * v,
}
/*
- * hard part: something messy That is, capturing parens, back
- * reference, short/long clash, or an atom with substructure
- * containing one of those.
+ * hard part: something messy That is, capturing parens, back reference,
+ * short/long clash, or an atom with substructure containing one of those.
*/
/* now we'll need a subre for the contents even if they're boring */
@@ -1522,9 +1517,8 @@ brackpart(struct vars * v,
endc = startc;
/*
- * Ranges are unportable. Actually, standard C does guarantee that
- * digits are contiguous, but making that an exception is just too
- * complicated.
+ * Ranges are unportable. Actually, standard C does guarantee that digits
+ * are contiguous, but making that an exception is just too complicated.
*/
if (startc != endc)
NOTE(REG_UUNPORT);
@@ -1600,8 +1594,7 @@ leaders(struct vars * v,
assert(s != v->mccepend);
}
p++;
- assert(*p != 0 && *(p + 1) == 0); /* only 2-char MCCEs for
- * now */
+ assert(*p != 0 && *(p + 1) == 0); /* only 2-char MCCEs for now */
newarc(v->nfa, PLAIN, subcolor(v->cm, *p), s, v->mccepend);
okcolors(v->nfa, v->cm);
}
@@ -2053,7 +2046,7 @@ newlacon(struct vars * v,
else
{
v->lacons = (struct subre *) REALLOC(v->lacons,
- (v->nlacons + 1) * sizeof(struct subre));
+ (v->nlacons + 1) * sizeof(struct subre));
n = v->nlacons++;
}
if (v->lacons == NULL)
diff --git a/src/backend/regex/rege_dfa.c b/src/backend/regex/rege_dfa.c
index c612761d873..c769994d12b 100644
--- a/src/backend/regex/rege_dfa.c
+++ b/src/backend/regex/rege_dfa.c
@@ -28,7 +28,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/backend/regex/rege_dfa.c,v 1.5 2005/09/24 22:54:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/rege_dfa.c,v 1.6 2005/10/15 02:49:24 momjian Exp $
*
*/
@@ -145,8 +145,7 @@ shortest(struct vars * v,
chr *start, /* where the match should start */
chr *min, /* match must end at or after here */
chr *max, /* match must end at or before here */
- chr **coldp, /* store coldstart pointer here, if
- * nonNULL */
+ chr **coldp, /* store coldstart pointer here, if nonNULL */
int *hitstopp) /* record whether hit v->stop, if non-NULL */
{
chr *cp;
@@ -222,8 +221,7 @@ shortest(struct vars * v,
if (ss == NULL)
return NULL;
- if (coldp != NULL) /* report last no-progress state set, if
- * any */
+ if (coldp != NULL) /* report last no-progress state set, if any */
*coldp = lastcold(v, d);
if ((ss->flags & POSTSTATE) && cp > min)
diff --git a/src/backend/regex/regexec.c b/src/backend/regex/regexec.c
index 7d32c268982..d8adec6cf09 100644
--- a/src/backend/regex/regexec.c
+++ b/src/backend/regex/regexec.c
@@ -27,7 +27,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/backend/regex/regexec.c,v 1.26 2005/09/24 22:54:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regexec.c,v 1.27 2005/10/15 02:49:24 momjian Exp $
*
*/
@@ -75,8 +75,7 @@ struct dfa
struct cnfa *cnfa;
struct colormap *cm;
chr *lastpost; /* location of last cache-flushed success */
- chr *lastnopr; /* location of last cache-flushed
- * NOPROGRESS */
+ chr *lastnopr; /* location of last cache-flushed NOPROGRESS */
struct sset *search; /* replacement-search-pointer memory */
int cptsmalloced; /* were the areas individually malloced? */
char *mallocarea; /* self, or master malloced area, or NULL */
@@ -122,8 +121,7 @@ struct vars
#define ISERR() VISERR(v)
#define VERR(vv,e) (((vv)->err) ? (vv)->err : ((vv)->err = (e)))
#define ERR(e) VERR(v, e) /* record an error */
-#define NOERR() {if (ISERR()) return v->err;} /* if error seen, return
- * it */
+#define NOERR() {if (ISERR()) return v->err;} /* if error seen, return it */
#define OFF(p) ((p) - v->start)
#define LOFF(p) ((long)OFF(p))
@@ -279,8 +277,7 @@ find(struct vars * v,
chr *begin;
chr *end = NULL;
chr *cold;
- chr *open; /* open and close of range of possible
- * starts */
+ chr *open; /* open and close of range of possible starts */
chr *close;
int hitend;
int shorter = (v->g->tree->flags & SHORTER) ? 1 : 0;
@@ -408,8 +405,7 @@ cfindloop(struct vars * v,
chr *begin;
chr *end;
chr *cold;
- chr *open; /* open and close of range of possible
- * starts */
+ chr *open; /* open and close of range of possible starts */
chr *close;
chr *estart;
chr *estop;
@@ -1033,8 +1029,7 @@ caltdissect(struct vars * v,
#define UNTRIED 0 /* not yet tried at all */
#define TRYING 1 /* top matched, trying submatches */
-#define TRIED 2 /* top didn't match or submatches
- * exhausted */
+#define TRIED 2 /* top didn't match or submatches exhausted */
if (t == NULL)
return REG_NOMATCH;
diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c
index c28ea627e50..76b25766465 100644
--- a/src/backend/rewrite/rewriteDefine.c
+++ b/src/backend/rewrite/rewriteDefine.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteDefine.c,v 1.105 2005/06/28 05:08:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteDefine.c,v 1.106 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -101,8 +101,8 @@ InsertRule(char *rulname,
if (!replace)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("rule \"%s\" for relation \"%s\" already exists",
- rulname, get_rel_name(eventrel_oid))));
+ errmsg("rule \"%s\" for relation \"%s\" already exists",
+ rulname, get_rel_name(eventrel_oid))));
/*
* When replacing, we don't need to replace every attribute
@@ -143,8 +143,8 @@ InsertRule(char *rulname,
/*
* Install dependency on rule's relation to ensure it will go away on
* relation deletion. If the rule is ON SELECT, make the dependency
- * implicit --- this prevents deleting a view's SELECT rule. Other
- * kinds of rules can be AUTO.
+ * implicit --- this prevents deleting a view's SELECT rule. Other kinds
+ * of rules can be AUTO.
*/
myself.classId = RewriteRelationId;
myself.objectId = rewriteObjectId;
@@ -155,7 +155,7 @@ InsertRule(char *rulname,
referenced.objectSubId = 0;
recordDependencyOn(&myself, &referenced,
- (evtype == CMD_SELECT) ? DEPENDENCY_INTERNAL : DEPENDENCY_AUTO);
+ (evtype == CMD_SELECT) ? DEPENDENCY_INTERNAL : DEPENDENCY_AUTO);
/*
* Also install dependencies on objects referenced in action and qual.
@@ -199,11 +199,10 @@ DefineQueryRewrite(RuleStmt *stmt)
/*
* If we are installing an ON SELECT rule, we had better grab
- * AccessExclusiveLock to ensure no SELECTs are currently running on
- * the event relation. For other types of rules, it might be
- * sufficient to grab ShareLock to lock out insert/update/delete
- * actions. But for now, let's just grab AccessExclusiveLock all the
- * time.
+ * AccessExclusiveLock to ensure no SELECTs are currently running on the
+ * event relation. For other types of rules, it might be sufficient to
+ * grab ShareLock to lock out insert/update/delete actions. But for now,
+ * let's just grab AccessExclusiveLock all the time.
*/
event_relation = heap_openrv(event_obj, AccessExclusiveLock);
ev_relid = RelationGetRelid(event_relation);
@@ -253,7 +252,7 @@ DefineQueryRewrite(RuleStmt *stmt)
if (list_length(action) == 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("INSTEAD NOTHING rules on SELECT are not implemented"),
+ errmsg("INSTEAD NOTHING rules on SELECT are not implemented"),
errhint("Use views instead.")));
/*
@@ -271,7 +270,7 @@ DefineQueryRewrite(RuleStmt *stmt)
if (!is_instead || query->commandType != CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("rules on SELECT must have action INSTEAD SELECT")));
+ errmsg("rules on SELECT must have action INSTEAD SELECT")));
/*
* ... there can be no rule qual, ...
@@ -299,18 +298,17 @@ DefineQueryRewrite(RuleStmt *stmt)
if (i > event_relation->rd_att->natts)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("SELECT rule's target list has too many entries")));
+ errmsg("SELECT rule's target list has too many entries")));
attr = event_relation->rd_att->attrs[i - 1];
attname = NameStr(attr->attname);
/*
- * Disallow dropped columns in the relation. This won't
- * happen in the cases we actually care about (namely creating
- * a view via CREATE TABLE then CREATE RULE). Trying to cope
- * with it is much more trouble than it's worth, because we'd
- * have to modify the rule to insert dummy NULLs at the right
- * positions.
+ * Disallow dropped columns in the relation. This won't happen in
+ * the cases we actually care about (namely creating a view via
+ * CREATE TABLE then CREATE RULE). Trying to cope with it is much
+ * more trouble than it's worth, because we'd have to modify the
+ * rule to insert dummy NULLs at the right positions.
*/
if (attr->attisdropped)
ereport(ERROR,
@@ -328,11 +326,10 @@ DefineQueryRewrite(RuleStmt *stmt)
errmsg("SELECT rule's target entry %d has different type from column \"%s\"", i, attname)));
/*
- * Allow typmods to be different only if one of them is -1,
- * ie, "unspecified". This is necessary for cases like
- * "numeric", where the table will have a filled-in default
- * length but the select rule's expression will probably have
- * typmod = -1.
+ * Allow typmods to be different only if one of them is -1, ie,
+ * "unspecified". This is necessary for cases like "numeric",
+ * where the table will have a filled-in default length but the
+ * select rule's expression will probably have typmod = -1.
*/
tletypmod = exprTypmod((Node *) tle->expr);
if (attr->atttypmod != tletypmod &&
@@ -345,7 +342,7 @@ DefineQueryRewrite(RuleStmt *stmt)
if (i != event_relation->rd_att->natts)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("SELECT rule's target list has too few entries")));
+ errmsg("SELECT rule's target list has too few entries")));
/*
* ... there must not be another ON SELECT rule already ...
@@ -359,9 +356,9 @@ DefineQueryRewrite(RuleStmt *stmt)
rule = event_relation->rd_rules->rules[i];
if (rule->event == CMD_SELECT)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("\"%s\" is already a view",
- RelationGetRelationName(event_relation))));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("\"%s\" is already a view",
+ RelationGetRelationName(event_relation))));
}
}
@@ -371,30 +368,30 @@ DefineQueryRewrite(RuleStmt *stmt)
if (strcmp(stmt->rulename, ViewSelectRuleName) != 0)
{
/*
- * In versions before 7.3, the expected name was _RETviewname.
- * For backwards compatibility with old pg_dump output, accept
- * that and silently change it to _RETURN. Since this is just
- * a quick backwards-compatibility hack, limit the number of
- * characters checked to a few less than NAMEDATALEN; this
- * saves having to worry about where a multibyte character
- * might have gotten truncated.
+ * In versions before 7.3, the expected name was _RETviewname. For
+ * backwards compatibility with old pg_dump output, accept that
+ * and silently change it to _RETURN. Since this is just a quick
+ * backwards-compatibility hack, limit the number of characters
+ * checked to a few less than NAMEDATALEN; this saves having to
+ * worry about where a multibyte character might have gotten
+ * truncated.
*/
if (strncmp(stmt->rulename, "_RET", 4) != 0 ||
strncmp(stmt->rulename + 4, event_obj->relname,
NAMEDATALEN - 4 - 4) != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("view rule for \"%s\" must be named \"%s\"",
- event_obj->relname, ViewSelectRuleName)));
+ errmsg("view rule for \"%s\" must be named \"%s\"",
+ event_obj->relname, ViewSelectRuleName)));
stmt->rulename = pstrdup(ViewSelectRuleName);
}
/*
* Are we converting a relation to a view?
*
- * If so, check that the relation is empty because the storage for
- * the relation is going to be deleted. Also insist that the rel
- * not have any triggers, indexes, or child tables.
+ * If so, check that the relation is empty because the storage for the
+ * relation is going to be deleted. Also insist that the rel not have
+ * any triggers, indexes, or child tables.
*/
if (event_relation->rd_rel->relkind != RELKIND_VIEW)
{
@@ -403,29 +400,29 @@ DefineQueryRewrite(RuleStmt *stmt)
scanDesc = heap_beginscan(event_relation, SnapshotNow, 0, NULL);
if (heap_getnext(scanDesc, ForwardScanDirection) != NULL)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not convert table \"%s\" to a view because it is not empty",
- event_obj->relname)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not convert table \"%s\" to a view because it is not empty",
+ event_obj->relname)));
heap_endscan(scanDesc);
if (event_relation->rd_rel->reltriggers != 0)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not convert table \"%s\" to a view because it has triggers",
- event_obj->relname),
- errhint("In particular, the table may not be involved in any foreign key relationships.")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not convert table \"%s\" to a view because it has triggers",
+ event_obj->relname),
+ errhint("In particular, the table may not be involved in any foreign key relationships.")));
if (event_relation->rd_rel->relhasindex)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not convert table \"%s\" to a view because it has indexes",
- event_obj->relname)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not convert table \"%s\" to a view because it has indexes",
+ event_obj->relname)));
if (event_relation->rd_rel->relhassubclass)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not convert table \"%s\" to a view because it has child tables",
- event_obj->relname)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not convert table \"%s\" to a view because it has child tables",
+ event_obj->relname)));
RelisBecomingView = true;
}
@@ -438,11 +435,10 @@ DefineQueryRewrite(RuleStmt *stmt)
event_attype = InvalidOid;
/*
- * We want the rule's table references to be checked as though by the
- * rule owner, not the user referencing the rule. Therefore, scan
- * through the rule's rtables and set the checkAsUser field on all
- * rtable entries. We have to look at event_qual as well, in case it
- * contains sublinks.
+ * We want the rule's table references to be checked as though by the rule
+ * owner, not the user referencing the rule. Therefore, scan through the
+ * rule's rtables and set the checkAsUser field on all rtable entries. We
+ * have to look at event_qual as well, in case it contains sublinks.
*/
foreach(l, action)
{
@@ -468,17 +464,15 @@ DefineQueryRewrite(RuleStmt *stmt)
* appropriate, also modify the 'relkind' field to show that the
* relation is now a view.
*
- * Important side effect: an SI notice is broadcast to force all
- * backends (including me!) to update relcache entries with the
- * new rule.
+ * Important side effect: an SI notice is broadcast to force all backends
+ * (including me!) to update relcache entries with the new rule.
*/
SetRelationRuleStatus(ev_relid, true, RelisBecomingView);
}
/*
- * IF the relation is becoming a view, delete the storage files
- * associated with it. NB: we had better have AccessExclusiveLock to
- * do this ...
+ * IF the relation is becoming a view, delete the storage files associated
+ * with it. NB: we had better have AccessExclusiveLock to do this ...
*
* XXX what about getting rid of its TOAST table? For now, we don't.
*/
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 1c58ccd7ca3..3513cf67c4b 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteHandler.c,v 1.157 2005/08/01 20:31:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteHandler.c,v 1.158 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -53,7 +53,7 @@ static TargetEntry *process_matched_tle(TargetEntry *src_tle,
const char *attrName);
static Node *get_assignment_input(Node *node);
static void markQueryForLocking(Query *qry, bool forUpdate, bool noWait,
- bool skipOldNew);
+ bool skipOldNew);
static List *matchLocks(CmdType event, RuleLock *rulelocks,
int varno, Query *parsetree);
static Query *fireRIRrules(Query *parsetree, List *activeRIRs);
@@ -115,17 +115,17 @@ AcquireRewriteLocks(Query *parsetree)
switch (rte->rtekind)
{
case RTE_RELATION:
+
/*
- * Grab the appropriate lock type for the relation, and
- * do not release it until end of transaction. This protects
- * the rewriter and planner against schema changes mid-query.
+ * Grab the appropriate lock type for the relation, and do not
+ * release it until end of transaction. This protects the
+ * rewriter and planner against schema changes mid-query.
*
- * If the relation is the query's result relation, then we
- * need RowExclusiveLock. Otherwise, check to see if the
- * relation is accessed FOR UPDATE/SHARE or not. We can't
- * just grab AccessShareLock because then the executor
- * would be trying to upgrade the lock, leading to possible
- * deadlocks.
+ * If the relation is the query's result relation, then we need
+ * RowExclusiveLock. Otherwise, check to see if the relation
+ * is accessed FOR UPDATE/SHARE or not. We can't just grab
+ * AccessShareLock because then the executor would be trying
+ * to upgrade the lock, leading to possible deadlocks.
*/
if (rt_index == parsetree->resultRelation)
lockmode = RowExclusiveLock;
@@ -139,14 +139,15 @@ AcquireRewriteLocks(Query *parsetree)
break;
case RTE_JOIN:
+
/*
- * Scan the join's alias var list to see if any columns
- * have been dropped, and if so replace those Vars with
- * NULL Consts.
+ * Scan the join's alias var list to see if any columns have
+ * been dropped, and if so replace those Vars with NULL
+ * Consts.
*
- * Since a join has only two inputs, we can expect to
- * see multiple references to the same input RTE; optimize
- * away multiple fetches.
+ * Since a join has only two inputs, we can expect to see
+ * multiple references to the same input RTE; optimize away
+ * multiple fetches.
*/
newaliasvars = NIL;
curinputvarno = 0;
@@ -159,19 +160,19 @@ AcquireRewriteLocks(Query *parsetree)
* If the list item isn't a simple Var, then it must
* represent a merged column, ie a USING column, and so it
* couldn't possibly be dropped, since it's referenced in
- * the join clause. (Conceivably it could also be a
- * NULL constant already? But that's OK too.)
+ * the join clause. (Conceivably it could also be a NULL
+ * constant already? But that's OK too.)
*/
if (IsA(aliasvar, Var))
{
/*
* The elements of an alias list have to refer to
- * earlier RTEs of the same rtable, because that's
- * the order the planner builds things in. So we
- * already processed the referenced RTE, and so it's
- * safe to use get_rte_attribute_is_dropped on it.
- * (This might not hold after rewriting or planning,
- * but it's OK to assume here.)
+ * earlier RTEs of the same rtable, because that's the
+ * order the planner builds things in. So we already
+ * processed the referenced RTE, and so it's safe to
+ * use get_rte_attribute_is_dropped on it. (This might
+ * not hold after rewriting or planning, but it's OK
+ * to assume here.)
*/
Assert(aliasvar->varlevelsup == 0);
if (aliasvar->varno != curinputvarno)
@@ -200,6 +201,7 @@ AcquireRewriteLocks(Query *parsetree)
break;
case RTE_SUBQUERY:
+
/*
* The subquery RTE itself is all right, but we have to
* recurse to process the represented subquery.
@@ -214,8 +216,8 @@ AcquireRewriteLocks(Query *parsetree)
}
/*
- * Recurse into sublink subqueries, too. But we already did the ones
- * in the rtable.
+ * Recurse into sublink subqueries, too. But we already did the ones in
+ * the rtable.
*/
if (parsetree->hasSubLinks)
query_tree_walker(parsetree, acquireLocksOnSubLinks, NULL,
@@ -266,8 +268,8 @@ rewriteRuleAction(Query *parsetree,
Query **sub_action_ptr;
/*
- * Make modifiable copies of rule action and qual (what we're passed
- * are the stored versions in the relcache; don't touch 'em!).
+ * Make modifiable copies of rule action and qual (what we're passed are
+ * the stored versions in the relcache; don't touch 'em!).
*/
rule_action = (Query *) copyObject(rule_action);
rule_qual = (Node *) copyObject(rule_qual);
@@ -283,12 +285,12 @@ rewriteRuleAction(Query *parsetree,
new_varno = PRS2_NEW_VARNO + rt_length;
/*
- * Adjust rule action and qual to offset its varnos, so that we can
- * merge its rtable with the main parsetree's rtable.
+ * Adjust rule action and qual to offset its varnos, so that we can merge
+ * its rtable with the main parsetree's rtable.
*
- * If the rule action is an INSERT...SELECT, the OLD/NEW rtable entries
- * will be in the SELECT part, and we have to modify that rather than
- * the top-level INSERT (kluge!).
+ * If the rule action is an INSERT...SELECT, the OLD/NEW rtable entries will
+ * be in the SELECT part, and we have to modify that rather than the
+ * top-level INSERT (kluge!).
*/
sub_action = getInsertSelectQuery(rule_action, &sub_action_ptr);
@@ -303,50 +305,47 @@ rewriteRuleAction(Query *parsetree,
/*
* Generate expanded rtable consisting of main parsetree's rtable plus
* rule action's rtable; this becomes the complete rtable for the rule
- * action. Some of the entries may be unused after we finish
- * rewriting, but we leave them all in place for two reasons:
+ * action. Some of the entries may be unused after we finish rewriting,
+ * but we leave them all in place for two reasons:
*
- * We'd have a much harder job to adjust the query's varnos if we
- * selectively removed RT entries.
+ * We'd have a much harder job to adjust the query's varnos if we selectively
+ * removed RT entries.
*
- * If the rule is INSTEAD, then the original query won't be executed at
- * all, and so its rtable must be preserved so that the executor will
- * do the correct permissions checks on it.
+ * If the rule is INSTEAD, then the original query won't be executed at all,
+ * and so its rtable must be preserved so that the executor will do the
+ * correct permissions checks on it.
*
* RT entries that are not referenced in the completed jointree will be
- * ignored by the planner, so they do not affect query semantics. But
- * any permissions checks specified in them will be applied during
- * executor startup (see ExecCheckRTEPerms()). This allows us to
- * check that the caller has, say, insert-permission on a view, when
- * the view is not semantically referenced at all in the resulting
- * query.
+ * ignored by the planner, so they do not affect query semantics. But any
+ * permissions checks specified in them will be applied during executor
+ * startup (see ExecCheckRTEPerms()). This allows us to check that the
+ * caller has, say, insert-permission on a view, when the view is not
+ * semantically referenced at all in the resulting query.
*
- * When a rule is not INSTEAD, the permissions checks done on its copied
- * RT entries will be redundant with those done during execution of
- * the original query, but we don't bother to treat that case
- * differently.
+ * When a rule is not INSTEAD, the permissions checks done on its copied RT
+ * entries will be redundant with those done during execution of the
+ * original query, but we don't bother to treat that case differently.
*
- * NOTE: because planner will destructively alter rtable, we must ensure
- * that rule action's rtable is separate and shares no substructure
- * with the main rtable. Hence do a deep copy here.
+ * NOTE: because planner will destructively alter rtable, we must ensure that
+ * rule action's rtable is separate and shares no substructure with the
+ * main rtable. Hence do a deep copy here.
*/
sub_action->rtable = list_concat((List *) copyObject(parsetree->rtable),
sub_action->rtable);
/*
* Each rule action's jointree should be the main parsetree's jointree
- * plus that rule's jointree, but usually *without* the original
- * rtindex that we're replacing (if present, which it won't be for
- * INSERT). Note that if the rule action refers to OLD, its jointree
- * will add a reference to rt_index. If the rule action doesn't refer
- * to OLD, but either the rule_qual or the user query quals do, then
- * we need to keep the original rtindex in the jointree to provide
- * data for the quals. We don't want the original rtindex to be
- * joined twice, however, so avoid keeping it if the rule action
- * mentions it.
+ * plus that rule's jointree, but usually *without* the original rtindex
+ * that we're replacing (if present, which it won't be for INSERT). Note
+ * that if the rule action refers to OLD, its jointree will add a
+ * reference to rt_index. If the rule action doesn't refer to OLD, but
+ * either the rule_qual or the user query quals do, then we need to keep
+ * the original rtindex in the jointree to provide data for the quals. We
+ * don't want the original rtindex to be joined twice, however, so avoid
+ * keeping it if the rule action mentions it.
*
- * As above, the action's jointree must not share substructure with the
- * main parsetree's.
+ * As above, the action's jointree must not share substructure with the main
+ * parsetree's.
*/
if (sub_action->commandType != CMD_UTILITY)
{
@@ -357,15 +356,15 @@ rewriteRuleAction(Query *parsetree,
keeporig = (!rangeTableEntry_used((Node *) sub_action->jointree,
rt_index, 0)) &&
(rangeTableEntry_used(rule_qual, rt_index, 0) ||
- rangeTableEntry_used(parsetree->jointree->quals, rt_index, 0));
+ rangeTableEntry_used(parsetree->jointree->quals, rt_index, 0));
newjointree = adjustJoinTreeList(parsetree, !keeporig, rt_index);
if (newjointree != NIL)
{
/*
- * If sub_action is a setop, manipulating its jointree will do
- * no good at all, because the jointree is dummy. (Perhaps
- * someday we could push the joining and quals down to the
- * member statements of the setop?)
+ * If sub_action is a setop, manipulating its jointree will do no
+ * good at all, because the jointree is dummy. (Perhaps someday
+ * we could push the joining and quals down to the member
+ * statements of the setop?)
*/
if (sub_action->setOperations != NULL)
ereport(ERROR,
@@ -378,9 +377,9 @@ rewriteRuleAction(Query *parsetree,
}
/*
- * Event Qualification forces copying of parsetree and splitting into
- * two queries one w/rule_qual, one w/NOT rule_qual. Also add user
- * query qual onto rule action
+ * Event Qualification forces copying of parsetree and splitting into two
+ * queries one w/rule_qual, one w/NOT rule_qual. Also add user query qual
+ * onto rule action
*/
AddQual(sub_action, rule_qual);
@@ -390,9 +389,9 @@ rewriteRuleAction(Query *parsetree,
* Rewrite new.attribute w/ right hand side of target-list entry for
* appropriate field name in insert/update.
*
- * KLUGE ALERT: since ResolveNew returns a mutated copy, we can't just
- * apply it to sub_action; we have to remember to update the sublink
- * inside rule_action, too.
+ * KLUGE ALERT: since ResolveNew returns a mutated copy, we can't just apply
+ * it to sub_action; we have to remember to update the sublink inside
+ * rule_action, too.
*/
if ((event == CMD_INSERT || event == CMD_UPDATE) &&
sub_action->commandType != CMD_UTILITY)
@@ -440,8 +439,7 @@ adjustJoinTreeList(Query *parsetree, bool removert, int rt_index)
newjointree = list_delete_ptr(newjointree, rtr);
/*
- * foreach is safe because we exit loop after
- * list_delete...
+ * foreach is safe because we exit loop after list_delete...
*/
break;
}
@@ -494,13 +492,13 @@ rewriteTargetList(Query *parsetree, Relation target_relation)
ListCell *temp;
/*
- * We process the normal (non-junk) attributes by scanning the input
- * tlist once and transferring TLEs into an array, then scanning the
- * array to build an output tlist. This avoids O(N^2) behavior for
- * large numbers of attributes.
+ * We process the normal (non-junk) attributes by scanning the input tlist
+ * once and transferring TLEs into an array, then scanning the array to
+ * build an output tlist. This avoids O(N^2) behavior for large numbers
+ * of attributes.
*
- * Junk attributes are tossed into a separate list during the same
- * tlist scan, then appended to the reconstructed tlist.
+ * Junk attributes are tossed into a separate list during the same tlist
+ * scan, then appended to the reconstructed tlist.
*/
numattrs = RelationGetNumberOfAttributes(target_relation);
new_tles = (TargetEntry **) palloc0(numattrs * sizeof(TargetEntry *));
@@ -531,11 +529,11 @@ rewriteTargetList(Query *parsetree, Relation target_relation)
else
{
/*
- * Copy all resjunk tlist entries to junk_tlist, and
- * assign them resnos above the last real resno.
+ * Copy all resjunk tlist entries to junk_tlist, and assign them
+ * resnos above the last real resno.
*
- * Typical junk entries include ORDER BY or GROUP BY expressions
- * (are these actually possible in an INSERT or UPDATE?), system
+ * Typical junk entries include ORDER BY or GROUP BY expressions (are
+ * these actually possible in an INSERT or UPDATE?), system
* attribute references, etc.
*/
@@ -561,9 +559,9 @@ rewriteTargetList(Query *parsetree, Relation target_relation)
continue;
/*
- * Handle the two cases where we need to insert a default
- * expression: it's an INSERT and there's no tlist entry for the
- * column, or the tlist entry is a DEFAULT placeholder node.
+ * Handle the two cases where we need to insert a default expression:
+ * it's an INSERT and there's no tlist entry for the column, or the
+ * tlist entry is a DEFAULT placeholder node.
*/
if ((new_tle == NULL && commandType == CMD_INSERT) ||
(new_tle && new_tle->expr && IsA(new_tle->expr, SetToDefault)))
@@ -573,12 +571,11 @@ rewriteTargetList(Query *parsetree, Relation target_relation)
new_expr = build_column_default(target_relation, attrno);
/*
- * If there is no default (ie, default is effectively NULL),
- * we can omit the tlist entry in the INSERT case, since the
- * planner can insert a NULL for itself, and there's no point
- * in spending any more rewriter cycles on the entry. But in
- * the UPDATE case we've got to explicitly set the column to
- * NULL.
+ * If there is no default (ie, default is effectively NULL), we
+ * can omit the tlist entry in the INSERT case, since the planner
+ * can insert a NULL for itself, and there's no point in spending
+ * any more rewriter cycles on the entry. But in the UPDATE case
+ * we've got to explicitly set the column to NULL.
*/
if (!new_expr)
{
@@ -640,8 +637,7 @@ process_matched_tle(TargetEntry *src_tle,
if (prior_tle == NULL)
{
/*
- * Normal case where this is the first assignment to the
- * attribute.
+ * Normal case where this is the first assignment to the attribute.
*/
return src_tle;
}
@@ -682,8 +678,7 @@ process_matched_tle(TargetEntry *src_tle,
attrName)));
/*
- * Prior TLE could be a nest of assignments if we do this more than
- * once.
+ * Prior TLE could be a nest of assignments if we do this more than once.
*/
priorbottom = prior_input;
for (;;)
@@ -713,10 +708,10 @@ process_matched_tle(TargetEntry *src_tle,
memcpy(fstore, prior_expr, sizeof(FieldStore));
fstore->newvals =
list_concat(list_copy(((FieldStore *) prior_expr)->newvals),
- list_copy(((FieldStore *) src_expr)->newvals));
+ list_copy(((FieldStore *) src_expr)->newvals));
fstore->fieldnums =
list_concat(list_copy(((FieldStore *) prior_expr)->fieldnums),
- list_copy(((FieldStore *) src_expr)->fieldnums));
+ list_copy(((FieldStore *) src_expr)->fieldnums));
}
else
{
@@ -809,8 +804,7 @@ build_column_default(Relation rel, int attrno)
if (expr == NULL)
{
/*
- * No per-column default, so look for a default for the type
- * itself.
+ * No per-column default, so look for a default for the type itself.
*/
expr = get_typdefault(atttype);
}
@@ -821,8 +815,8 @@ build_column_default(Relation rel, int attrno)
/*
* Make sure the value is coerced to the target column type; this will
* generally be true already, but there seem to be some corner cases
- * involving domain defaults where it might not be true. This should
- * match the parser's processing of non-defaulted expressions --- see
+ * involving domain defaults where it might not be true. This should match
+ * the parser's processing of non-defaulted expressions --- see
* updateTargetListEntry().
*/
exprtype = exprType(expr);
@@ -840,7 +834,7 @@ build_column_default(Relation rel, int attrno)
NameStr(att_tup->attname),
format_type_be(atttype),
format_type_be(exprtype)),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
return expr;
}
@@ -913,8 +907,8 @@ ApplyRetrieveRule(Query *parsetree,
elog(ERROR, "cannot handle per-attribute ON SELECT rule");
/*
- * Make a modifiable copy of the view query, and acquire needed locks
- * on the relations it mentions.
+ * Make a modifiable copy of the view query, and acquire needed locks on
+ * the relations it mentions.
*/
rule_action = copyObject(linitial(rule->actions));
@@ -926,8 +920,8 @@ ApplyRetrieveRule(Query *parsetree,
rule_action = fireRIRrules(rule_action, activeRIRs);
/*
- * VIEWs are really easy --- just plug the view query in as a
- * subselect, replacing the relation's original RTE.
+ * VIEWs are really easy --- just plug the view query in as a subselect,
+ * replacing the relation's original RTE.
*/
rte = rt_fetch(rt_index, parsetree->rtable);
@@ -937,8 +931,8 @@ ApplyRetrieveRule(Query *parsetree,
rte->inh = false; /* must not be set for a subquery */
/*
- * We move the view's permission check data down to its rangetable.
- * The checks will actually be done against the *OLD* entry therein.
+ * We move the view's permission check data down to its rangetable. The
+ * checks will actually be done against the *OLD* entry therein.
*/
subrte = rt_fetch(PRS2_OLD_VARNO, rule_action->rtable);
Assert(subrte->relid == relation->rd_id);
@@ -954,9 +948,9 @@ ApplyRetrieveRule(Query *parsetree,
if (list_member_int(parsetree->rowMarks, rt_index))
{
/*
- * Remove the view from the list of rels that will actually be
- * marked FOR UPDATE/SHARE by the executor. It will still be access-
- * checked for write access, though.
+ * Remove the view from the list of rels that will actually be marked
+ * FOR UPDATE/SHARE by the executor. It will still be access- checked
+ * for write access, though.
*/
parsetree->rowMarks = list_delete_int(parsetree->rowMarks, rt_index);
@@ -989,7 +983,7 @@ markQueryForLocking(Query *qry, bool forUpdate, bool noWait, bool skipOldNew)
if (forUpdate != qry->forUpdate)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use both FOR UPDATE and FOR SHARE in one query")));
+ errmsg("cannot use both FOR UPDATE and FOR SHARE in one query")));
if (noWait != qry->rowNoWait)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -1052,8 +1046,8 @@ fireRIRonSubLink(Node *node, List *activeRIRs)
}
/*
- * Do NOT recurse into Query nodes, because fireRIRrules already
- * processed subselects of subselects for us.
+ * Do NOT recurse into Query nodes, because fireRIRrules already processed
+ * subselects of subselects for us.
*/
return expression_tree_walker(node, fireRIRonSubLink,
(void *) activeRIRs);
@@ -1070,8 +1064,8 @@ fireRIRrules(Query *parsetree, List *activeRIRs)
int rt_index;
/*
- * don't try to convert this into a foreach loop, because rtable list
- * can get changed each time through...
+ * don't try to convert this into a foreach loop, because rtable list can
+ * get changed each time through...
*/
rt_index = 0;
while (rt_index < list_length(parsetree->rtable))
@@ -1088,8 +1082,8 @@ fireRIRrules(Query *parsetree, List *activeRIRs)
rte = rt_fetch(rt_index, parsetree->rtable);
/*
- * A subquery RTE can't have associated rules, so there's nothing
- * to do to this level of the query, but we must recurse into the
+ * A subquery RTE can't have associated rules, so there's nothing to
+ * do to this level of the query, but we must recurse into the
* subquery to expand any rule references in it.
*/
if (rte->rtekind == RTE_SUBQUERY)
@@ -1108,8 +1102,8 @@ fireRIRrules(Query *parsetree, List *activeRIRs)
* If the table is not referenced in the query, then we ignore it.
* This prevents infinite expansion loop due to new rtable entries
* inserted by expansion of a rule. A table is referenced if it is
- * part of the join set (a source table), or is referenced by any
- * Var nodes, or is the result table.
+ * part of the join set (a source table), or is referenced by any Var
+ * nodes, or is the result table.
*/
if (rt_index != parsetree->resultRelation &&
!rangeTableEntry_used((Node *) parsetree, rt_index, 0))
@@ -1181,8 +1175,8 @@ fireRIRrules(Query *parsetree, List *activeRIRs)
}
/*
- * Recurse into sublink subqueries, too. But we already did the ones
- * in the rtable.
+ * Recurse into sublink subqueries, too. But we already did the ones in
+ * the rtable.
*/
if (parsetree->hasSubLinks)
query_tree_walker(parsetree, fireRIRonSubLink, (void *) activeRIRs,
@@ -1217,8 +1211,8 @@ CopyAndAddInvertedQual(Query *parsetree,
/*
* In case there are subqueries in the qual, acquire necessary locks and
* fix any deleted JOIN RTE entries. (This is somewhat redundant with
- * rewriteRuleAction, but not entirely ... consider restructuring so
- * that we only need to process the qual this way once.)
+ * rewriteRuleAction, but not entirely ... consider restructuring so that
+ * we only need to process the qual this way once.)
*/
(void) acquireLocksOnSubLinks(new_qual, NULL);
@@ -1302,13 +1296,13 @@ fireRules(Query *parsetree,
if (qsrc == QSRC_QUAL_INSTEAD_RULE)
{
/*
- * If there are INSTEAD rules with qualifications, the
- * original query is still performed. But all the negated rule
- * qualifications of the INSTEAD rules are added so it does
- * its actions only in cases where the rule quals of all
- * INSTEAD rules are false. Think of it as the default action
- * in a case. We save this in *qual_product so RewriteQuery()
- * can add it to the query list after we mangled it up enough.
+ * If there are INSTEAD rules with qualifications, the original
+ * query is still performed. But all the negated rule
+ * qualifications of the INSTEAD rules are added so it does its
+ * actions only in cases where the rule quals of all INSTEAD rules
+ * are false. Think of it as the default action in a case. We save
+ * this in *qual_product so RewriteQuery() can add it to the query
+ * list after we mangled it up enough.
*
* If we have already found an unqualified INSTEAD rule, then
* *qual_product won't be used, so don't bother building it.
@@ -1364,9 +1358,9 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
/*
* If the statement is an update, insert or delete - fire rules on it.
*
- * SELECT rules are handled later when we have all the queries that
- * should get executed. Also, utilities aren't rewritten at all (do
- * we still need that check?)
+ * SELECT rules are handled later when we have all the queries that should
+ * get executed. Also, utilities aren't rewritten at all (do we still
+ * need that check?)
*/
if (event != CMD_SELECT && event != CMD_UTILITY)
{
@@ -1387,10 +1381,9 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
rt_entry_relation = heap_open(rt_entry->relid, NoLock);
/*
- * If it's an INSERT or UPDATE, rewrite the targetlist into
- * standard form. This will be needed by the planner anyway, and
- * doing it now ensures that any references to NEW.field will
- * behave sanely.
+ * If it's an INSERT or UPDATE, rewrite the targetlist into standard
+ * form. This will be needed by the planner anyway, and doing it now
+ * ensures that any references to NEW.field will behave sanely.
*/
if (event == CMD_INSERT || event == CMD_UPDATE)
rewriteTargetList(parsetree, rt_entry_relation);
@@ -1413,8 +1406,8 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
&qual_product);
/*
- * If we got any product queries, recursively rewrite them ---
- * but first check for recursion!
+ * If we got any product queries, recursively rewrite them --- but
+ * first check for recursion!
*/
if (product_queries != NIL)
{
@@ -1427,9 +1420,9 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
if (rev->relation == RelationGetRelid(rt_entry_relation) &&
rev->event == event)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("infinite recursion detected in rules for relation \"%s\"",
- RelationGetRelationName(rt_entry_relation))));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("infinite recursion detected in rules for relation \"%s\"",
+ RelationGetRelationName(rt_entry_relation))));
}
rev = (rewrite_event *) palloc(sizeof(rewrite_event));
@@ -1454,13 +1447,12 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
}
/*
- * For INSERTs, the original query is done first; for UPDATE/DELETE,
- * it is done last. This is needed because update and delete rule
- * actions might not do anything if they are invoked after the update
- * or delete is performed. The command counter increment between the
- * query executions makes the deleted (and maybe the updated) tuples
- * disappear so the scans for them in the rule actions cannot find
- * them.
+ * For INSERTs, the original query is done first; for UPDATE/DELETE, it is
+ * done last. This is needed because update and delete rule actions might
+ * not do anything if they are invoked after the update or delete is
+ * performed. The command counter increment between the query executions
+ * makes the deleted (and maybe the updated) tuples disappear so the scans
+ * for them in the rule actions cannot find them.
*
* If we found any unqualified INSTEAD, the original query is not done at
* all, in any form. Otherwise, we add the modified form if qualified
@@ -1569,19 +1561,18 @@ QueryRewrite(Query *parsetree)
/*
* Step 3
*
- * Determine which, if any, of the resulting queries is supposed to set
- * the command-result tag; and update the canSetTag fields
- * accordingly.
+ * Determine which, if any, of the resulting queries is supposed to set the
+ * command-result tag; and update the canSetTag fields accordingly.
*
* If the original query is still in the list, it sets the command tag.
- * Otherwise, the last INSTEAD query of the same kind as the original
- * is allowed to set the tag. (Note these rules can leave us with no
- * query setting the tag. The tcop code has to cope with this by
- * setting up a default tag based on the original un-rewritten query.)
+ * Otherwise, the last INSTEAD query of the same kind as the original is
+ * allowed to set the tag. (Note these rules can leave us with no query
+ * setting the tag. The tcop code has to cope with this by setting up a
+ * default tag based on the original un-rewritten query.)
*
* The Asserts verify that at most one query in the result list is marked
- * canSetTag. If we aren't checking asserts, we can fall out of the
- * loop as soon as we find the original query.
+ * canSetTag. If we aren't checking asserts, we can fall out of the loop
+ * as soon as we find the original query.
*/
origCmdType = parsetree->commandType;
foundOriginalQuery = false;
diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c
index 353e4ca76f3..9e6bc4808e7 100644
--- a/src/backend/rewrite/rewriteManip.c
+++ b/src/backend/rewrite/rewriteManip.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteManip.c,v 1.91 2005/06/04 19:19:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteManip.c,v 1.92 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -53,8 +53,8 @@ checkExprHasAggs(Node *node)
context.sublevels_up = 0;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, we don't want to increment sublevels_up.
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
*/
return query_or_expression_tree_walker(node,
checkExprHasAggs_walker,
@@ -70,8 +70,7 @@ checkExprHasAggs_walker(Node *node, checkExprHasAggs_context *context)
if (IsA(node, Aggref))
{
if (((Aggref *) node)->agglevelsup == context->sublevels_up)
- return true; /* abort the tree traversal and return
- * true */
+ return true; /* abort the tree traversal and return true */
/* else fall through to examine argument */
}
if (IsA(node, Query))
@@ -113,8 +112,7 @@ checkExprHasSubLink_walker(Node *node, void *context)
if (node == NULL)
return false;
if (IsA(node, SubLink))
- return true; /* abort the tree traversal and return
- * true */
+ return true; /* abort the tree traversal and return true */
return expression_tree_walker(node, checkExprHasSubLink_walker, context);
}
@@ -208,8 +206,8 @@ OffsetVarNodes(Node *node, int offset, int sublevels_up)
context.sublevels_up = sublevels_up;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, go straight to query_tree_walker to make sure that
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, go straight to query_tree_walker to make sure that
* sublevels_up doesn't get incremented prematurely.
*/
if (node && IsA(node, Query))
@@ -217,11 +215,11 @@ OffsetVarNodes(Node *node, int offset, int sublevels_up)
Query *qry = (Query *) node;
/*
- * If we are starting at a Query, and sublevels_up is zero, then
- * we must also fix rangetable indexes in the Query itself ---
- * namely resultRelation and rowMarks entries. sublevels_up
- * cannot be zero when recursing into a subquery, so there's no
- * need to have the same logic inside OffsetVarNodes_walker.
+ * If we are starting at a Query, and sublevels_up is zero, then we
+ * must also fix rangetable indexes in the Query itself --- namely
+ * resultRelation and rowMarks entries. sublevels_up cannot be zero
+ * when recursing into a subquery, so there's no need to have the same
+ * logic inside OffsetVarNodes_walker.
*/
if (sublevels_up == 0)
{
@@ -349,8 +347,8 @@ ChangeVarNodes(Node *node, int rt_index, int new_index, int sublevels_up)
context.sublevels_up = sublevels_up;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, go straight to query_tree_walker to make sure that
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, go straight to query_tree_walker to make sure that
* sublevels_up doesn't get incremented prematurely.
*/
if (node && IsA(node, Query))
@@ -358,11 +356,11 @@ ChangeVarNodes(Node *node, int rt_index, int new_index, int sublevels_up)
Query *qry = (Query *) node;
/*
- * If we are starting at a Query, and sublevels_up is zero, then
- * we must also fix rangetable indexes in the Query itself ---
- * namely resultRelation and rowMarks entries. sublevels_up
- * cannot be zero when recursing into a subquery, so there's no
- * need to have the same logic inside ChangeVarNodes_walker.
+ * If we are starting at a Query, and sublevels_up is zero, then we
+ * must also fix rangetable indexes in the Query itself --- namely
+ * resultRelation and rowMarks entries. sublevels_up cannot be zero
+ * when recursing into a subquery, so there's no need to have the same
+ * logic inside ChangeVarNodes_walker.
*/
if (sublevels_up == 0)
{
@@ -473,8 +471,8 @@ IncrementVarSublevelsUp(Node *node, int delta_sublevels_up,
context.min_sublevels_up = min_sublevels_up;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, we don't want to increment sublevels_up.
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
*/
query_or_expression_tree_walker(node,
IncrementVarSublevelsUp_walker,
@@ -562,8 +560,8 @@ rangeTableEntry_used(Node *node, int rt_index, int sublevels_up)
context.sublevels_up = sublevels_up;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, we don't want to increment sublevels_up.
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
*/
return query_or_expression_tree_walker(node,
rangeTableEntry_used_walker,
@@ -626,8 +624,8 @@ attribute_used(Node *node, int rt_index, int attno, int sublevels_up)
context.sublevels_up = sublevels_up;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, we don't want to increment sublevels_up.
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
*/
return query_or_expression_tree_walker(node,
attribute_used_walker,
@@ -671,10 +669,10 @@ getInsertSelectQuery(Query *parsetree, Query ***subquery_ptr)
* they've been pushed down to the SELECT.
*/
if (list_length(parsetree->rtable) >= 2 &&
- strcmp(rt_fetch(PRS2_OLD_VARNO, parsetree->rtable)->eref->aliasname,
- "*OLD*") == 0 &&
- strcmp(rt_fetch(PRS2_NEW_VARNO, parsetree->rtable)->eref->aliasname,
- "*NEW*") == 0)
+ strcmp(rt_fetch(PRS2_OLD_VARNO, parsetree->rtable)->eref->aliasname,
+ "*OLD*") == 0 &&
+ strcmp(rt_fetch(PRS2_NEW_VARNO, parsetree->rtable)->eref->aliasname,
+ "*NEW*") == 0)
return parsetree;
Assert(parsetree->jointree && IsA(parsetree->jointree, FromExpr));
if (list_length(parsetree->jointree->fromlist) != 1)
@@ -687,10 +685,10 @@ getInsertSelectQuery(Query *parsetree, Query ***subquery_ptr)
selectquery->commandType == CMD_SELECT))
elog(ERROR, "expected to find SELECT subquery");
if (list_length(selectquery->rtable) >= 2 &&
- strcmp(rt_fetch(PRS2_OLD_VARNO, selectquery->rtable)->eref->aliasname,
- "*OLD*") == 0 &&
- strcmp(rt_fetch(PRS2_NEW_VARNO, selectquery->rtable)->eref->aliasname,
- "*NEW*") == 0)
+ strcmp(rt_fetch(PRS2_OLD_VARNO, selectquery->rtable)->eref->aliasname,
+ "*OLD*") == 0 &&
+ strcmp(rt_fetch(PRS2_NEW_VARNO, selectquery->rtable)->eref->aliasname,
+ "*NEW*") == 0)
{
if (subquery_ptr)
*subquery_ptr = &(selectrte->subquery);
@@ -717,30 +715,30 @@ AddQual(Query *parsetree, Node *qual)
/*
* There's noplace to put the qual on a utility statement.
*
- * If it's a NOTIFY, silently ignore the qual; this means that the
- * NOTIFY will execute, whether or not there are any qualifying
- * rows. While clearly wrong, this is much more useful than
- * refusing to execute the rule at all, and extra NOTIFY events
- * are harmless for typical uses of NOTIFY.
+ * If it's a NOTIFY, silently ignore the qual; this means that the NOTIFY
+ * will execute, whether or not there are any qualifying rows. While
+ * clearly wrong, this is much more useful than refusing to execute
+ * the rule at all, and extra NOTIFY events are harmless for typical
+ * uses of NOTIFY.
*
* If it isn't a NOTIFY, error out, since unconditional execution of
- * other utility stmts is unlikely to be wanted. (This case is
- * not currently allowed anyway, but keep the test for safety.)
+ * other utility stmts is unlikely to be wanted. (This case is not
+ * currently allowed anyway, but keep the test for safety.)
*/
if (parsetree->utilityStmt && IsA(parsetree->utilityStmt, NotifyStmt))
return;
else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("conditional utility statements are not implemented")));
+ errmsg("conditional utility statements are not implemented")));
}
if (parsetree->setOperations != NULL)
{
/*
- * There's noplace to put the qual on a setop statement, either.
- * (This could be fixed, but right now the planner simply ignores
- * any qual condition on a setop query.)
+ * There's noplace to put the qual on a setop statement, either. (This
+ * could be fixed, but right now the planner simply ignores any qual
+ * condition on a setop query.)
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -759,8 +757,8 @@ AddQual(Query *parsetree, Node *qual)
Assert(!checkExprHasAggs(copy));
/*
- * Make sure query is marked correctly if added qual has sublinks.
- * Need not search qual when query is already marked.
+ * Make sure query is marked correctly if added qual has sublinks. Need
+ * not search qual when query is already marked.
*/
if (!parsetree->hasSubLinks)
parsetree->hasSubLinks = checkExprHasSubLink(copy);
@@ -880,9 +878,9 @@ ResolveNew_mutator(Node *node, ResolveNew_context *context)
/*
* If generating an expansion for a var of a named rowtype
- * (ie, this is a plain relation RTE), then we must
- * include dummy items for dropped columns. If the var is
- * RECORD (ie, this is a JOIN), then omit dropped columns.
+ * (ie, this is a plain relation RTE), then we must include
+ * dummy items for dropped columns. If the var is RECORD (ie,
+ * this is a JOIN), then omit dropped columns.
*/
expandRTE(context->target_rte,
this_varno, this_varlevelsup,
@@ -943,8 +941,8 @@ ResolveNew(Node *node, int target_varno, int sublevels_up,
context.inserted_sublink = false;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, we don't want to increment sublevels_up.
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
*/
return query_or_expression_tree_mutator(node,
ResolveNew_mutator,
diff --git a/src/backend/rewrite/rewriteRemove.c b/src/backend/rewrite/rewriteRemove.c
index f06d23cc322..900b3489df0 100644
--- a/src/backend/rewrite/rewriteRemove.c
+++ b/src/backend/rewrite/rewriteRemove.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteRemove.c,v 1.62 2005/04/14 20:03:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteRemove.c,v 1.63 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -117,10 +117,10 @@ RemoveRewriteRuleById(Oid ruleOid)
elog(ERROR, "could not find tuple for rule %u", ruleOid);
/*
- * We had better grab AccessExclusiveLock so that we know no other
- * rule additions/deletions are going on for this relation. Else we
- * cannot set relhasrules correctly. Besides, we don't want to be
- * changing the ruleset while queries are executing on the rel.
+ * We had better grab AccessExclusiveLock so that we know no other rule
+ * additions/deletions are going on for this relation. Else we cannot set
+ * relhasrules correctly. Besides, we don't want to be changing the
+ * ruleset while queries are executing on the rel.
*/
eventRelationOid = ((Form_pg_rewrite) GETSTRUCT(tuple))->ev_class;
event_relation = heap_open(eventRelationOid, AccessExclusiveLock);
diff --git a/src/backend/rewrite/rewriteSupport.c b/src/backend/rewrite/rewriteSupport.c
index 8fd3bba7cde..6150c904d7e 100644
--- a/src/backend/rewrite/rewriteSupport.c
+++ b/src/backend/rewrite/rewriteSupport.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteSupport.c,v 1.61 2005/04/14 20:03:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteSupport.c,v 1.62 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -56,8 +56,7 @@ SetRelationRuleStatus(Oid relationId, bool relHasRules,
Form_pg_class classForm;
/*
- * Find the tuple to update in pg_class, using syscache for the
- * lookup.
+ * Find the tuple to update in pg_class, using syscache for the lookup.
*/
relationRelation = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c
index d74d356f0e6..fa0ea2dc6d8 100644
--- a/src/backend/storage/buffer/buf_init.c
+++ b/src/backend/storage/buffer/buf_init.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_init.c,v 1.76 2005/08/20 23:26:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_init.c,v 1.77 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -120,8 +120,8 @@ InitBufferPool(void)
buf->buf_id = i;
/*
- * Initially link all the buffers together as unused.
- * Subsequent management of this list is done by freelist.c.
+ * Initially link all the buffers together as unused. Subsequent
+ * management of this list is done by freelist.c.
*/
buf->freeNext = i + 1;
diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c
index 99dbbacd298..8d1ad267f7d 100644
--- a/src/backend/storage/buffer/buf_table.c
+++ b/src/backend/storage/buffer/buf_table.c
@@ -3,7 +3,7 @@
* buf_table.c
* routines for mapping BufferTags to buffer indexes.
*
- * Note: the routines in this file do no locking of their own. The caller
+ * Note: the routines in this file do no locking of their own. The caller
* must hold a suitable lock on the BufMappingLock, as specified in the
* comments.
*
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_table.c,v 1.42 2005/08/20 23:26:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_table.c,v 1.43 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,7 +93,7 @@ BufTableLookup(BufferTag *tagPtr)
* Insert a hashtable entry for given tag and buffer ID,
* unless an entry already exists for that tag
*
- * Returns -1 on successful insertion. If a conflicting entry exists
+ * Returns -1 on successful insertion. If a conflicting entry exists
* already, returns the buffer ID in that entry.
*
* Caller must hold write lock on BufMappingLock
@@ -105,7 +105,7 @@ BufTableInsert(BufferTag *tagPtr, int buf_id)
bool found;
Assert(buf_id >= 0); /* -1 is reserved for not-in-table */
- Assert(tagPtr->blockNum != P_NEW); /* invalid tag */
+ Assert(tagPtr->blockNum != P_NEW); /* invalid tag */
result = (BufferLookupEnt *)
hash_search(SharedBufHash, (void *) tagPtr, HASH_ENTER, &found);
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index fb3efcbca96..8341a25e055 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.196 2005/10/12 16:45:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.197 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,7 +58,7 @@
#define BufferGetLSN(bufHdr) (*((XLogRecPtr*) BufHdrGetBlock(bufHdr)))
/* Note: this macro only works on local buffers, not shared ones! */
-#define LocalBufHdrGetBlock(bufHdr) \
+#define LocalBufHdrGetBlock(bufHdr) \
LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
@@ -70,14 +70,15 @@ int bgwriter_lru_maxpages = 5;
int bgwriter_all_maxpages = 5;
-long NDirectFileRead; /* some I/O's are direct file access.
- * bypass bufmgr */
+long NDirectFileRead; /* some I/O's are direct file access. bypass
+ * bufmgr */
long NDirectFileWrite; /* e.g., I/O in psort and hashjoin. */
/* local state for StartBufferIO and related functions */
static volatile BufferDesc *InProgressBuf = NULL;
static bool IsForInput;
+
/* local state for LockBufferForCleanup */
static volatile BufferDesc *PinCountWaitBuf = NULL;
@@ -89,7 +90,7 @@ static bool SyncOneBuffer(int buf_id, bool skip_pinned);
static void WaitIO(volatile BufferDesc *buf);
static bool StartBufferIO(volatile BufferDesc *buf, bool forInput);
static void TerminateBufferIO(volatile BufferDesc *buf, bool clear_dirty,
- int set_flag_bits);
+ int set_flag_bits);
static void buffer_write_error_callback(void *arg);
static volatile BufferDesc *BufferAlloc(Relation reln, BlockNumber blockNum,
bool *foundPtr);
@@ -149,8 +150,8 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
ReadBufferCount++;
/*
- * lookup the buffer. IO_IN_PROGRESS is set if the requested
- * block is not currently in memory.
+ * lookup the buffer. IO_IN_PROGRESS is set if the requested block is
+ * not currently in memory.
*/
bufHdr = BufferAlloc(reln, blockNum, &found);
if (found)
@@ -173,17 +174,16 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
/*
* if we have gotten to this point, we have allocated a buffer for the
- * page but its contents are not yet valid. IO_IN_PROGRESS is set for
- * it, if it's a shared buffer.
+ * page but its contents are not yet valid. IO_IN_PROGRESS is set for it,
+ * if it's a shared buffer.
*
- * Note: if smgrextend fails, we will end up with a buffer that is
- * allocated but not marked BM_VALID. P_NEW will still select the
- * same block number (because the relation didn't get any longer on
- * disk) and so future attempts to extend the relation will find the
- * same buffer (if it's not been recycled) but come right back here to
- * try smgrextend again.
+ * Note: if smgrextend fails, we will end up with a buffer that is allocated
+ * but not marked BM_VALID. P_NEW will still select the same block number
+ * (because the relation didn't get any longer on disk) and so future
+ * attempts to extend the relation will find the same buffer (if it's not
+ * been recycled) but come right back here to try smgrextend again.
*/
- Assert(!(bufHdr->flags & BM_VALID)); /* spinlock not needed */
+ Assert(!(bufHdr->flags & BM_VALID)); /* spinlock not needed */
bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr);
@@ -201,25 +201,24 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
if (!PageHeaderIsValid((PageHeader) bufBlock))
{
/*
- * During WAL recovery, the first access to any data page
- * should overwrite the whole page from the WAL; so a
- * clobbered page header is not reason to fail. Hence, when
- * InRecovery we may always act as though zero_damaged_pages
- * is ON.
+ * During WAL recovery, the first access to any data page should
+ * overwrite the whole page from the WAL; so a clobbered page
+ * header is not reason to fail. Hence, when InRecovery we may
+ * always act as though zero_damaged_pages is ON.
*/
if (zero_damaged_pages || InRecovery)
{
ereport(WARNING,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("invalid page header in block %u of relation \"%s\"; zeroing out page",
- blockNum, RelationGetRelationName(reln))));
+ blockNum, RelationGetRelationName(reln))));
MemSet((char *) bufBlock, 0, BLCKSZ);
}
else
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("invalid page header in block %u of relation \"%s\"",
- blockNum, RelationGetRelationName(reln))));
+ errmsg("invalid page header in block %u of relation \"%s\"",
+ blockNum, RelationGetRelationName(reln))));
}
}
@@ -277,8 +276,8 @@ BufferAlloc(Relation reln,
{
/*
* Found it. Now, pin the buffer so no one can steal it from the
- * buffer pool, and check to see if the correct data has been
- * loaded into the buffer.
+ * buffer pool, and check to see if the correct data has been loaded
+ * into the buffer.
*/
buf = &BufferDescriptors[buf_id];
@@ -292,17 +291,17 @@ BufferAlloc(Relation reln,
if (!valid)
{
/*
- * We can only get here if (a) someone else is still reading
- * in the page, or (b) a previous read attempt failed. We
- * have to wait for any active read attempt to finish, and
- * then set up our own read attempt if the page is still not
- * BM_VALID. StartBufferIO does it all.
+ * We can only get here if (a) someone else is still reading in
+ * the page, or (b) a previous read attempt failed. We have to
+ * wait for any active read attempt to finish, and then set up our
+ * own read attempt if the page is still not BM_VALID.
+ * StartBufferIO does it all.
*/
if (StartBufferIO(buf, true))
{
/*
- * If we get here, previous attempts to read the buffer
- * must have failed ... but we shall bravely try again.
+ * If we get here, previous attempts to read the buffer must
+ * have failed ... but we shall bravely try again.
*/
*foundPtr = FALSE;
}
@@ -313,7 +312,7 @@ BufferAlloc(Relation reln,
/*
* Didn't find it in the buffer pool. We'll have to initialize a new
- * buffer. Remember to unlock BufMappingLock while doing the work.
+ * buffer. Remember to unlock BufMappingLock while doing the work.
*/
LWLockRelease(BufMappingLock);
@@ -321,10 +320,10 @@ BufferAlloc(Relation reln,
for (;;)
{
/*
- * Select a victim buffer. The buffer is returned with its
- * header spinlock still held! Also the BufFreelistLock is
- * still held, since it would be bad to hold the spinlock
- * while possibly waking up other processes.
+ * Select a victim buffer. The buffer is returned with its header
+ * spinlock still held! Also the BufFreelistLock is still held, since
+ * it would be bad to hold the spinlock while possibly waking up other
+ * processes.
*/
buf = StrategyGetBuffer();
@@ -341,8 +340,8 @@ BufferAlloc(Relation reln,
/*
* If the buffer was dirty, try to write it out. There is a race
- * condition here, in that someone might dirty it after we released
- * it above, or even while we are writing it out (since our share-lock
+ * condition here, in that someone might dirty it after we released it
+ * above, or even while we are writing it out (since our share-lock
* won't prevent hint-bit updates). We will recheck the dirty bit
* after re-locking the buffer header.
*/
@@ -350,14 +349,14 @@ BufferAlloc(Relation reln,
{
/*
* We need a share-lock on the buffer contents to write it out
- * (else we might write invalid data, eg because someone else
- * is compacting the page contents while we write). We must use
- * a conditional lock acquisition here to avoid deadlock. Even
+ * (else we might write invalid data, eg because someone else is
+ * compacting the page contents while we write). We must use a
+ * conditional lock acquisition here to avoid deadlock. Even
* though the buffer was not pinned (and therefore surely not
* locked) when StrategyGetBuffer returned it, someone else could
- * have pinned and exclusive-locked it by the time we get here.
- * If we try to get the lock unconditionally, we'd block waiting
- * for them; if they later block waiting for us, deadlock ensues.
+ * have pinned and exclusive-locked it by the time we get here. If
+ * we try to get the lock unconditionally, we'd block waiting for
+ * them; if they later block waiting for us, deadlock ensues.
* (This has been observed to happen when two backends are both
* trying to split btree index pages, and the second one just
* happens to be trying to split the page the first one got from
@@ -371,8 +370,8 @@ BufferAlloc(Relation reln,
else
{
/*
- * Someone else has pinned the buffer, so give it up and
- * loop back to get another one.
+ * Someone else has pinned the buffer, so give it up and loop
+ * back to get another one.
*/
UnpinBuffer(buf, true, false /* evidently recently used */ );
continue;
@@ -380,8 +379,8 @@ BufferAlloc(Relation reln,
}
/*
- * Acquire exclusive mapping lock in preparation for changing
- * the buffer's association.
+ * Acquire exclusive mapping lock in preparation for changing the
+ * buffer's association.
*/
LWLockAcquire(BufMappingLock, LW_EXCLUSIVE);
@@ -389,20 +388,19 @@ BufferAlloc(Relation reln,
* Try to make a hashtable entry for the buffer under its new tag.
* This could fail because while we were writing someone else
* allocated another buffer for the same block we want to read in.
- * Note that we have not yet removed the hashtable entry for the
- * old tag.
+ * Note that we have not yet removed the hashtable entry for the old
+ * tag.
*/
buf_id = BufTableInsert(&newTag, buf->buf_id);
if (buf_id >= 0)
{
/*
- * Got a collision. Someone has already done what we were about
- * to do. We'll just handle this as if it were found in
- * the buffer pool in the first place. First, give up the
- * buffer we were planning to use. Don't allow it to be
- * thrown in the free list (we don't want to hold both
- * global locks at once).
+ * Got a collision. Someone has already done what we were about to
+ * do. We'll just handle this as if it were found in the buffer
+ * pool in the first place. First, give up the buffer we were
+ * planning to use. Don't allow it to be thrown in the free list
+ * (we don't want to hold both global locks at once).
*/
UnpinBuffer(buf, true, false);
@@ -421,7 +419,7 @@ BufferAlloc(Relation reln,
{
/*
* We can only get here if (a) someone else is still reading
- * in the page, or (b) a previous read attempt failed. We
+ * in the page, or (b) a previous read attempt failed. We
* have to wait for any active read attempt to finish, and
* then set up our own read attempt if the page is still not
* BM_VALID. StartBufferIO does it all.
@@ -446,9 +444,9 @@ BufferAlloc(Relation reln,
/*
* Somebody could have pinned or re-dirtied the buffer while we were
- * doing the I/O and making the new hashtable entry. If so, we
- * can't recycle this buffer; we must undo everything we've done and
- * start over with a new victim buffer.
+ * doing the I/O and making the new hashtable entry. If so, we can't
+ * recycle this buffer; we must undo everything we've done and start
+ * over with a new victim buffer.
*/
if (buf->refcount == 1 && !(buf->flags & BM_DIRTY))
break;
@@ -462,9 +460,9 @@ BufferAlloc(Relation reln,
/*
* Okay, it's finally safe to rename the buffer.
*
- * Clearing BM_VALID here is necessary, clearing the dirtybits
- * is just paranoia. We also clear the usage_count since any
- * recency of use of the old content is no longer relevant.
+ * Clearing BM_VALID here is necessary, clearing the dirtybits is just
+ * paranoia. We also clear the usage_count since any recency of use of
+ * the old content is no longer relevant.
*/
oldTag = buf->tag;
oldFlags = buf->flags;
@@ -482,9 +480,8 @@ BufferAlloc(Relation reln,
/*
* Buffer contents are currently invalid. Try to get the io_in_progress
- * lock. If StartBufferIO returns false, then someone else managed
- * to read it before we did, so there's nothing left for BufferAlloc()
- * to do.
+ * lock. If StartBufferIO returns false, then someone else managed to
+ * read it before we did, so there's nothing left for BufferAlloc() to do.
*/
if (StartBufferIO(buf, true))
*foundPtr = FALSE;
@@ -505,7 +502,7 @@ BufferAlloc(Relation reln,
* This is used only in contexts such as dropping a relation. We assume
* that no other backend could possibly be interested in using the page,
* so the only reason the buffer might be pinned is if someone else is
- * trying to write it out. We have to let them finish before we can
+ * trying to write it out. We have to let them finish before we can
* reclaim the buffer.
*
* The buffer could get reclaimed by someone else while we are waiting
@@ -523,9 +520,10 @@ InvalidateBuffer(volatile BufferDesc *buf)
UnlockBufHdr(buf);
retry:
+
/*
- * Acquire exclusive mapping lock in preparation for changing
- * the buffer's association.
+ * Acquire exclusive mapping lock in preparation for changing the buffer's
+ * association.
*/
LWLockAcquire(BufMappingLock, LW_EXCLUSIVE);
@@ -541,13 +539,13 @@ retry:
}
/*
- * We assume the only reason for it to be pinned is that someone else
- * is flushing the page out. Wait for them to finish. (This could be
- * an infinite loop if the refcount is messed up... it would be nice
- * to time out after awhile, but there seems no way to be sure how
- * many loops may be needed. Note that if the other guy has pinned
- * the buffer but not yet done StartBufferIO, WaitIO will fall through
- * and we'll effectively be busy-looping here.)
+ * We assume the only reason for it to be pinned is that someone else is
+ * flushing the page out. Wait for them to finish. (This could be an
+ * infinite loop if the refcount is messed up... it would be nice to time
+ * out after awhile, but there seems no way to be sure how many loops may
+ * be needed. Note that if the other guy has pinned the buffer but not
+ * yet done StartBufferIO, WaitIO will fall through and we'll effectively
+ * be busy-looping here.)
*/
if (buf->refcount != 0)
{
@@ -561,8 +559,8 @@ retry:
}
/*
- * Clear out the buffer's tag and flags. We must do this to ensure
- * that linear scans of the buffer array don't think the buffer is valid.
+ * Clear out the buffer's tag and flags. We must do this to ensure that
+ * linear scans of the buffer array don't think the buffer is valid.
*/
oldFlags = buf->flags;
CLEAR_BUFFERTAG(buf->tag);
@@ -666,7 +664,7 @@ WriteNoReleaseBuffer(Buffer buffer)
*
* Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
* compared to calling the two routines separately. Now it's mainly just
- * a convenience function. However, if the passed buffer is valid and
+ * a convenience function. However, if the passed buffer is valid and
* already contains the desired block, we just return it as-is; and that
* does save considerable work compared to a full release and reacquire.
*
@@ -718,7 +716,7 @@ ReleaseAndReadBuffer(Buffer buffer,
*
* Note that ResourceOwnerEnlargeBuffers must have been done already.
*
- * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows
+ * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows
* some callers to avoid an extra spinlock cycle.
*/
static bool
@@ -731,8 +729,8 @@ PinBuffer(volatile BufferDesc *buf)
{
/*
* Use NoHoldoff here because we don't want the unlock to be a
- * potential place to honor a QueryCancel request.
- * (The caller should be holding off interrupts anyway.)
+ * potential place to honor a QueryCancel request. (The caller should
+ * be holding off interrupts anyway.)
*/
LockBufHdr_NoHoldoff(buf);
buf->refcount++;
@@ -799,7 +797,7 @@ UnpinBuffer(volatile BufferDesc *buf, bool fixOwner, bool trashOK)
PrivateRefCount[b]--;
if (PrivateRefCount[b] == 0)
{
- bool trash_buffer = false;
+ bool trash_buffer = false;
/* I'd better not still hold any locks on the buffer */
Assert(!LWLockHeldByMe(buf->content_lock));
@@ -818,7 +816,7 @@ UnpinBuffer(volatile BufferDesc *buf, bool fixOwner, bool trashOK)
if (buf->usage_count < BM_MAX_USAGE_COUNT)
buf->usage_count++;
}
- else if (trashOK &&
+ else if (trashOK &&
buf->refcount == 0 &&
buf->usage_count == 0)
trash_buffer = true;
@@ -827,7 +825,7 @@ UnpinBuffer(volatile BufferDesc *buf, bool fixOwner, bool trashOK)
buf->refcount == 1)
{
/* we just released the last pin other than the waiter's */
- int wait_backend_pid = buf->wait_backend_pid;
+ int wait_backend_pid = buf->wait_backend_pid;
buf->flags &= ~BM_PIN_COUNT_WAITER;
UnlockBufHdr_NoHoldoff(buf);
@@ -837,9 +835,9 @@ UnpinBuffer(volatile BufferDesc *buf, bool fixOwner, bool trashOK)
UnlockBufHdr_NoHoldoff(buf);
/*
- * If VACUUM is releasing an otherwise-unused buffer, send it to
- * the freelist for near-term reuse. We put it at the tail so that
- * it won't be used before any invalid buffers that may exist.
+ * If VACUUM is releasing an otherwise-unused buffer, send it to the
+ * freelist for near-term reuse. We put it at the tail so that it
+ * won't be used before any invalid buffers that may exist.
*/
if (trash_buffer)
StrategyFreeBuffer(buf, false);
@@ -897,19 +895,19 @@ BgBufferSync(void)
* To minimize work at checkpoint time, we want to try to keep all the
* buffers clean; this motivates a scan that proceeds sequentially through
* all buffers. But we are also charged with ensuring that buffers that
- * will be recycled soon are clean when needed; these buffers are the
- * ones just ahead of the StrategySyncStart point. We make a separate
- * scan through those.
+ * will be recycled soon are clean when needed; these buffers are the ones
+ * just ahead of the StrategySyncStart point. We make a separate scan
+ * through those.
*/
/*
- * This loop runs over all buffers, including pinned ones. The
- * starting point advances through the buffer pool on successive calls.
+ * This loop runs over all buffers, including pinned ones. The starting
+ * point advances through the buffer pool on successive calls.
*
- * Note that we advance the static counter *before* trying to write.
- * This ensures that, if we have a persistent write failure on a dirty
- * buffer, we'll still be able to make progress writing other buffers.
- * (The bgwriter will catch the error and just call us again later.)
+ * Note that we advance the static counter *before* trying to write. This
+ * ensures that, if we have a persistent write failure on a dirty buffer,
+ * we'll still be able to make progress writing other buffers. (The
+ * bgwriter will catch the error and just call us again later.)
*/
if (bgwriter_all_percent > 0.0 && bgwriter_all_maxpages > 0)
{
@@ -958,7 +956,7 @@ BgBufferSync(void)
* If skip_pinned is true, we don't write currently-pinned buffers, nor
* buffers marked recently used, as these are not replacement candidates.
*
- * Returns true if buffer was written, else false. (This could be in error
+ * Returns true if buffer was written, else false. (This could be in error
* if FlushBuffers finds the buffer clean after locking it, but we don't
* care all that much.)
*
@@ -972,12 +970,11 @@ SyncOneBuffer(int buf_id, bool skip_pinned)
/*
* Check whether buffer needs writing.
*
- * We can make this check without taking the buffer content lock
- * so long as we mark pages dirty in access methods *before* logging
- * changes with XLogInsert(): if someone marks the buffer dirty
- * just after our check we don't worry because our checkpoint.redo
- * points before log record for upcoming changes and so we are not
- * required to write such dirty buffer.
+ * We can make this check without taking the buffer content lock so long as
+ * we mark pages dirty in access methods *before* logging changes with
+ * XLogInsert(): if someone marks the buffer dirty just after our check we
+ * don't worry because our checkpoint.redo points before log record for
+ * upcoming changes and so we are not required to write such dirty buffer.
*/
LockBufHdr(bufHdr);
if (!(bufHdr->flags & BM_VALID) || !(bufHdr->flags & BM_DIRTY))
@@ -993,8 +990,8 @@ SyncOneBuffer(int buf_id, bool skip_pinned)
}
/*
- * Pin it, share-lock it, write it. (FlushBuffer will do nothing
- * if the buffer is clean by the time we've locked it.)
+ * Pin it, share-lock it, write it. (FlushBuffer will do nothing if the
+ * buffer is clean by the time we've locked it.)
*/
PinBuffer_Locked(bufHdr);
LWLockAcquire(bufHdr->content_lock, LW_SHARED);
@@ -1031,10 +1028,10 @@ ShowBufferUsage(void)
localhitrate = (float) LocalBufferHitCount *100.0 / ReadLocalBufferCount;
appendStringInfo(&str,
- "!\tShared blocks: %10ld read, %10ld written, buffer hit rate = %.2f%%\n",
- ReadBufferCount - BufferHitCount, BufferFlushCount, hitrate);
+ "!\tShared blocks: %10ld read, %10ld written, buffer hit rate = %.2f%%\n",
+ ReadBufferCount - BufferHitCount, BufferFlushCount, hitrate);
appendStringInfo(&str,
- "!\tLocal blocks: %10ld read, %10ld written, buffer hit rate = %.2f%%\n",
+ "!\tLocal blocks: %10ld read, %10ld written, buffer hit rate = %.2f%%\n",
ReadLocalBufferCount - LocalBufferHitCount, LocalBufferFlushCount, localhitrate);
appendStringInfo(&str,
"!\tDirect blocks: %10ld read, %10ld written\n",
@@ -1259,8 +1256,8 @@ FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln)
/*
* Acquire the buffer's io_in_progress lock. If StartBufferIO returns
- * false, then someone else flushed the buffer before we could, so
- * we need not do anything.
+ * false, then someone else flushed the buffer before we could, so we need
+ * not do anything.
*/
if (!StartBufferIO(buf, false))
return;
@@ -1277,16 +1274,16 @@ FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln)
/*
* Force XLOG flush up to buffer's LSN. This implements the basic WAL
- * rule that log updates must hit disk before any of the data-file
- * changes they describe do.
+ * rule that log updates must hit disk before any of the data-file changes
+ * they describe do.
*/
recptr = BufferGetLSN(buf);
XLogFlush(recptr);
/*
* Now it's safe to write buffer to disk. Note that no one else should
- * have been able to write it while we were busy with log flushing
- * because we have the io_in_progress lock.
+ * have been able to write it while we were busy with log flushing because
+ * we have the io_in_progress lock.
*/
/* To check if block content changes while flushing. - vadim 01/17/97 */
@@ -1302,8 +1299,8 @@ FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln)
BufferFlushCount++;
/*
- * Mark the buffer as clean (unless BM_JUST_DIRTIED has become set)
- * and end the io_in_progress state.
+ * Mark the buffer as clean (unless BM_JUST_DIRTIED has become set) and
+ * end the io_in_progress state.
*/
TerminateBufferIO(buf, true, 0);
@@ -1351,7 +1348,7 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
* specified relation that have block numbers >= firstDelBlock.
* (In particular, with firstDelBlock = 0, all pages are removed.)
* Dirty pages are simply dropped, without bothering to write them
- * out first. Therefore, this is NOT rollback-able, and so should be
+ * out first. Therefore, this is NOT rollback-able, and so should be
* used only with extreme caution!
*
* Currently, this is called only from smgr.c when the underlying file
@@ -1360,7 +1357,7 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
* be deleted momentarily anyway, and there is no point in writing it.
* It is the responsibility of higher-level code to ensure that the
* deletion or truncation does not lose any data that could be needed
- * later. It is also the responsibility of higher-level code to ensure
+ * later. It is also the responsibility of higher-level code to ensure
* that no other process could be trying to load more pages of the
* relation into buffers.
*
@@ -1406,7 +1403,7 @@ DropRelFileNodeBuffers(RelFileNode rnode, bool istemp,
LockBufHdr(bufHdr);
if (RelFileNodeEquals(bufHdr->tag.rnode, rnode) &&
bufHdr->tag.blockNum >= firstDelBlock)
- InvalidateBuffer(bufHdr); /* releases spinlock */
+ InvalidateBuffer(bufHdr); /* releases spinlock */
else
UnlockBufHdr(bufHdr);
}
@@ -1439,7 +1436,7 @@ DropBuffers(Oid dbid)
bufHdr = &BufferDescriptors[i];
LockBufHdr(bufHdr);
if (bufHdr->tag.rnode.dbNode == dbid)
- InvalidateBuffer(bufHdr); /* releases spinlock */
+ InvalidateBuffer(bufHdr); /* releases spinlock */
else
UnlockBufHdr(bufHdr);
}
@@ -1703,9 +1700,8 @@ UnlockBuffers(void)
LockBufHdr_NoHoldoff(buf);
/*
- * Don't complain if flag bit not set; it could have been
- * reset but we got a cancel/die interrupt before getting the
- * signal.
+ * Don't complain if flag bit not set; it could have been reset but we
+ * got a cancel/die interrupt before getting the signal.
*/
if ((buf->flags & BM_PIN_COUNT_WAITER) != 0 &&
buf->wait_backend_pid == MyProcPid)
@@ -1744,10 +1740,10 @@ LockBuffer(Buffer buffer, int mode)
LWLockAcquire(buf->content_lock, LW_EXCLUSIVE);
/*
- * This is not the best place to mark buffer dirty (eg indices do
- * not always change buffer they lock in excl mode). But please
- * remember that it's critical to set dirty bit *before* logging
- * changes with XLogInsert() - see comments in SyncOneBuffer().
+ * This is not the best place to mark buffer dirty (eg indices do not
+ * always change buffer they lock in excl mode). But please remember
+ * that it's critical to set dirty bit *before* logging changes with
+ * XLogInsert() - see comments in SyncOneBuffer().
*/
LockBufHdr_NoHoldoff(buf);
buf->flags |= (BM_DIRTY | BM_JUST_DIRTIED);
@@ -1776,10 +1772,10 @@ ConditionalLockBuffer(Buffer buffer)
if (LWLockConditionalAcquire(buf->content_lock, LW_EXCLUSIVE))
{
/*
- * This is not the best place to mark buffer dirty (eg indices do
- * not always change buffer they lock in excl mode). But please
- * remember that it's critical to set dirty bit *before* logging
- * changes with XLogInsert() - see comments in SyncOneBuffer().
+ * This is not the best place to mark buffer dirty (eg indices do not
+ * always change buffer they lock in excl mode). But please remember
+ * that it's critical to set dirty bit *before* logging changes with
+ * XLogInsert() - see comments in SyncOneBuffer().
*/
LockBufHdr_NoHoldoff(buf);
buf->flags |= (BM_DIRTY | BM_JUST_DIRTIED);
@@ -1880,18 +1876,17 @@ WaitIO(volatile BufferDesc *buf)
/*
* Changed to wait until there's no IO - Inoue 01/13/2000
*
- * Note this is *necessary* because an error abort in the process doing
- * I/O could release the io_in_progress_lock prematurely. See
- * AbortBufferIO.
+ * Note this is *necessary* because an error abort in the process doing I/O
+ * could release the io_in_progress_lock prematurely. See AbortBufferIO.
*/
for (;;)
{
BufFlags sv_flags;
/*
- * It may not be necessary to acquire the spinlock to check the
- * flag here, but since this test is essential for correctness,
- * we'd better play it safe.
+ * It may not be necessary to acquire the spinlock to check the flag
+ * here, but since this test is essential for correctness, we'd better
+ * play it safe.
*/
LockBufHdr(buf);
sv_flags = buf->flags;
@@ -2027,11 +2022,10 @@ AbortBufferIO(void)
if (buf)
{
/*
- * Since LWLockReleaseAll has already been called, we're not
- * holding the buffer's io_in_progress_lock. We have to re-acquire
- * it so that we can use TerminateBufferIO. Anyone who's executing
- * WaitIO on the buffer will be in a busy spin until we succeed in
- * doing this.
+ * Since LWLockReleaseAll has already been called, we're not holding
+ * the buffer's io_in_progress_lock. We have to re-acquire it so that
+ * we can use TerminateBufferIO. Anyone who's executing WaitIO on the
+ * buffer will be in a busy spin until we succeed in doing this.
*/
LWLockAcquire(buf->io_in_progress_lock, LW_EXCLUSIVE);
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index 4739512ad36..e204b0d0094 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/freelist.c,v 1.53 2005/10/12 16:45:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/freelist.c,v 1.54 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,11 +28,11 @@ typedef struct
int nextVictimBuffer;
int firstFreeBuffer; /* Head of list of unused buffers */
- int lastFreeBuffer; /* Tail of list of unused buffers */
+ int lastFreeBuffer; /* Tail of list of unused buffers */
/*
- * NOTE: lastFreeBuffer is undefined when firstFreeBuffer is -1
- * (that is, when the list is empty)
+ * NOTE: lastFreeBuffer is undefined when firstFreeBuffer is -1 (that is,
+ * when the list is empty)
*/
} BufferStrategyControl;
@@ -79,10 +79,10 @@ StrategyGetBuffer(void)
buf->freeNext = FREENEXT_NOT_IN_LIST;
/*
- * If the buffer is pinned or has a nonzero usage_count,
- * we cannot use it; discard it and retry. (This can only happen
- * if VACUUM put a valid buffer in the freelist and then someone
- * else used it before we got to it.)
+ * If the buffer is pinned or has a nonzero usage_count, we cannot use
+ * it; discard it and retry. (This can only happen if VACUUM put a
+ * valid buffer in the freelist and then someone else used it before
+ * we got to it.)
*/
LockBufHdr(buf);
if (buf->refcount == 0 && buf->usage_count == 0)
@@ -100,8 +100,8 @@ StrategyGetBuffer(void)
StrategyControl->nextVictimBuffer = 0;
/*
- * If the buffer is pinned or has a nonzero usage_count,
- * we cannot use it; decrement the usage_count and keep scanning.
+ * If the buffer is pinned or has a nonzero usage_count, we cannot use
+ * it; decrement the usage_count and keep scanning.
*/
LockBufHdr(buf);
if (buf->refcount == 0 && buf->usage_count == 0)
@@ -114,11 +114,11 @@ StrategyGetBuffer(void)
else if (--trycounter == 0)
{
/*
- * We've scanned all the buffers without making any state
- * changes, so all the buffers are pinned (or were when we
- * looked at them). We could hope that someone will free
- * one eventually, but it's probably better to fail than to
- * risk getting stuck in an infinite loop.
+ * We've scanned all the buffers without making any state changes,
+ * so all the buffers are pinned (or were when we looked at them).
+ * We could hope that someone will free one eventually, but it's
+ * probably better to fail than to risk getting stuck in an
+ * infinite loop.
*/
UnlockBufHdr(buf);
elog(ERROR, "no unpinned buffers available");
@@ -143,8 +143,8 @@ StrategyFreeBuffer(volatile BufferDesc *buf, bool at_head)
LWLockAcquire(BufFreelistLock, LW_EXCLUSIVE);
/*
- * It is possible that we are told to put something in the freelist
- * that is already in it; don't screw up the list if so.
+ * It is possible that we are told to put something in the freelist that
+ * is already in it; don't screw up the list if so.
*/
if (buf->freeNext == FREENEXT_NOT_IN_LIST)
{
@@ -181,8 +181,8 @@ StrategySyncStart(void)
int result;
/*
- * We could probably dispense with the locking here, but just to be
- * safe ...
+ * We could probably dispense with the locking here, but just to be safe
+ * ...
*/
LWLockAcquire(BufFreelistLock, LW_EXCLUSIVE);
result = StrategyControl->nextVictimBuffer;
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 6dce5086562..ca80255e15e 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/localbuf.c,v 1.69 2005/08/20 23:26:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/localbuf.c,v 1.70 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,7 +33,7 @@ typedef struct
} LocalBufferLookupEnt;
/* Note: this macro only works on local buffers, not shared ones! */
-#define LocalBufHdrGetBlock(bufHdr) \
+#define LocalBufHdrGetBlock(bufHdr) \
LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
int NLocBuffer = 0; /* until buffers are initialized */
@@ -107,8 +107,8 @@ LocalBufferAlloc(Relation reln, BlockNumber blockNum, bool *foundPtr)
#endif
/*
- * Need to get a new buffer. We use a clock sweep algorithm
- * (essentially the same as what freelist.c does now...)
+ * Need to get a new buffer. We use a clock sweep algorithm (essentially
+ * the same as what freelist.c does now...)
*/
trycounter = NLocBuffer;
for (;;)
@@ -140,8 +140,8 @@ LocalBufferAlloc(Relation reln, BlockNumber blockNum, bool *foundPtr)
}
/*
- * this buffer is not referenced but it might still be dirty. if
- * that's the case, write it out before reusing it!
+ * this buffer is not referenced but it might still be dirty. if that's
+ * the case, write it out before reusing it!
*/
if (bufHdr->flags & BM_DIRTY)
{
@@ -183,7 +183,7 @@ LocalBufferAlloc(Relation reln, BlockNumber blockNum, bool *foundPtr)
hresult = (LocalBufferLookupEnt *)
hash_search(LocalBufHash, (void *) &bufHdr->tag,
HASH_REMOVE, NULL);
- if (!hresult) /* shouldn't happen */
+ if (!hresult) /* shouldn't happen */
elog(ERROR, "local buffer hash table corrupted");
/* mark buffer invalid just in case hash insert fails */
CLEAR_BUFFERTAG(bufHdr->tag);
@@ -192,7 +192,7 @@ LocalBufferAlloc(Relation reln, BlockNumber blockNum, bool *foundPtr)
hresult = (LocalBufferLookupEnt *)
hash_search(LocalBufHash, (void *) &newTag, HASH_ENTER, &found);
- if (found) /* shouldn't happen */
+ if (found) /* shouldn't happen */
elog(ERROR, "local buffer hash table corrupted");
hresult->id = b;
@@ -271,10 +271,10 @@ InitLocalBuffers(void)
BufferDesc *buf = &LocalBufferDescriptors[i];
/*
- * negative to indicate local buffer. This is tricky: shared
- * buffers start with 0. We have to start with -2. (Note that the
- * routine BufferDescriptorGetBuffer adds 1 to buf_id so our first
- * buffer id is -1.)
+ * negative to indicate local buffer. This is tricky: shared buffers
+ * start with 0. We have to start with -2. (Note that the routine
+ * BufferDescriptorGetBuffer adds 1 to buf_id so our first buffer id
+ * is -1.)
*/
buf->buf_id = -i - 2;
}
diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c
index 29a01f02c92..95f2885c619 100644
--- a/src/backend/storage/file/buffile.c
+++ b/src/backend/storage/file/buffile.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/file/buffile.c,v 1.21 2004/12/31 22:00:51 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/file/buffile.c,v 1.22 2005/10/15 02:49:25 momjian Exp $
*
* NOTES:
*
@@ -59,8 +59,8 @@ struct BufFile
long *offsets; /* palloc'd array with numFiles entries */
/*
- * offsets[i] is the current seek position of files[i]. We use this
- * to avoid making redundant FileSeek calls.
+ * offsets[i] is the current seek position of files[i]. We use this to
+ * avoid making redundant FileSeek calls.
*/
bool isTemp; /* can only add files if this is TRUE */
@@ -68,9 +68,8 @@ struct BufFile
bool dirty; /* does buffer need to be written? */
/*
- * "current pos" is position of start of buffer within the logical
- * file. Position as seen by user of BufFile is (curFile, curOffset +
- * pos).
+ * "current pos" is position of start of buffer within the logical file.
+ * Position as seen by user of BufFile is (curFile, curOffset + pos).
*/
int curFile; /* file index (0..n) part of current pos */
int curOffset; /* offset part of current pos */
@@ -125,7 +124,7 @@ extendBufFile(BufFile *file)
file->files = (File *) repalloc(file->files,
(file->numFiles + 1) * sizeof(File));
file->offsets = (long *) repalloc(file->offsets,
- (file->numFiles + 1) * sizeof(long));
+ (file->numFiles + 1) * sizeof(long));
file->files[file->numFiles] = pfile;
file->offsets[file->numFiles] = 0L;
file->numFiles++;
@@ -270,8 +269,8 @@ BufFileDumpBuffer(BufFile *file)
}
/*
- * Enforce per-file size limit only for temp files, else just try
- * to write as much as asked...
+ * Enforce per-file size limit only for temp files, else just try to
+ * write as much as asked...
*/
bytestowrite = file->nbytes - wpos;
if (file->isTemp)
@@ -302,11 +301,10 @@ BufFileDumpBuffer(BufFile *file)
file->dirty = false;
/*
- * At this point, curOffset has been advanced to the end of the
- * buffer, ie, its original value + nbytes. We need to make it point
- * to the logical file position, ie, original value + pos, in case
- * that is less (as could happen due to a small backwards seek in a
- * dirty buffer!)
+ * At this point, curOffset has been advanced to the end of the buffer,
+ * ie, its original value + nbytes. We need to make it point to the
+ * logical file position, ie, original value + pos, in case that is less
+ * (as could happen due to a small backwards seek in a dirty buffer!)
*/
file->curOffset -= (file->nbytes - file->pos);
if (file->curOffset < 0) /* handle possible segment crossing */
@@ -317,8 +315,7 @@ BufFileDumpBuffer(BufFile *file)
}
/*
- * Now we can set the buffer empty without changing the logical
- * position
+ * Now we can set the buffer empty without changing the logical position
*/
file->pos = 0;
file->nbytes = 0;
@@ -467,8 +464,8 @@ BufFileSeek(BufFile *file, int fileno, long offset, int whence)
/*
* Relative seek considers only the signed offset, ignoring
- * fileno. Note that large offsets (> 1 gig) risk overflow in
- * this add...
+ * fileno. Note that large offsets (> 1 gig) risk overflow in this
+ * add...
*/
newFile = file->curFile;
newOffset = (file->curOffset + file->pos) + offset;
@@ -507,8 +504,8 @@ BufFileSeek(BufFile *file, int fileno, long offset, int whence)
/*
* At this point and no sooner, check for seek past last segment. The
- * above flush could have created a new segment, so checking sooner
- * would not work (at least not with this code).
+ * above flush could have created a new segment, so checking sooner would
+ * not work (at least not with this code).
*/
if (file->isTemp)
{
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 11ca95e833e..2db12ebd11b 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.120 2005/08/08 03:11:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.121 2005/10/15 02:49:25 momjian Exp $
*
* NOTES:
*
@@ -123,7 +123,7 @@ typedef struct vfd
{
signed short fd; /* current FD, or VFD_CLOSED if none */
unsigned short fdstate; /* bitflags for VFD's state */
- SubTransactionId create_subid; /* for TEMPORARY fds, creating subxact */
+ SubTransactionId create_subid; /* for TEMPORARY fds, creating subxact */
File nextFree; /* link to next free VFD, if in freelist */
File lruMoreRecently; /* doubly linked recency-of-use list */
File lruLessRecently;
@@ -268,7 +268,7 @@ pg_fsync_writethrough(int fd)
#ifdef WIN32
return _commit(fd);
#elif defined(__darwin__)
- return (fcntl(fd, F_FULLFSYNC, 0) == -1) ? -1 : 0;
+ return (fcntl(fd, F_FULLFSYNC, 0) == -1) ? -1 : 0;
#else
return -1;
#endif
@@ -305,7 +305,7 @@ pg_fdatasync(int fd)
void
InitFileAccess(void)
{
- Assert(SizeVfdCache == 0); /* call me only once */
+ Assert(SizeVfdCache == 0); /* call me only once */
/* initialize cache header entry */
VfdCache = (Vfd *) malloc(sizeof(Vfd));
@@ -330,7 +330,7 @@ InitFileAccess(void)
* We stop counting if usable_fds reaches max_to_probe. Note: a small
* value of max_to_probe might result in an underestimate of already_open;
* we must fill in any "gaps" in the set of used FDs before the calculation
- * of already_open will give the right answer. In practice, max_to_probe
+ * of already_open will give the right answer. In practice, max_to_probe
* of a couple of dozen should be enough to ensure good results.
*
* We assume stdin (FD 0) is available for dup'ing
@@ -382,9 +382,9 @@ count_usable_fds(int max_to_probe, int *usable_fds, int *already_open)
pfree(fd);
/*
- * Return results. usable_fds is just the number of successful dups.
- * We assume that the system limit is highestfd+1 (remember 0 is a
- * legal FD number) and so already_open is highestfd+1 - usable_fds.
+ * Return results. usable_fds is just the number of successful dups. We
+ * assume that the system limit is highestfd+1 (remember 0 is a legal FD
+ * number) and so already_open is highestfd+1 - usable_fds.
*/
*usable_fds = used;
*already_open = highestfd + 1 - used;
@@ -466,7 +466,7 @@ tryAgain:
ereport(LOG,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
- errmsg("out of file descriptors: %m; release and retry")));
+ errmsg("out of file descriptors: %m; release and retry")));
errno = 0;
if (ReleaseLruFile())
goto tryAgain;
@@ -587,9 +587,9 @@ LruInsert(File file)
}
/*
- * The open could still fail for lack of file descriptors, eg due
- * to overall system file table being full. So, be prepared to
- * release another FD if necessary...
+ * The open could still fail for lack of file descriptors, eg due to
+ * overall system file table being full. So, be prepared to release
+ * another FD if necessary...
*/
vfdP->fd = BasicOpenFile(vfdP->fileName, vfdP->fileFlags,
vfdP->fileMode);
@@ -631,8 +631,8 @@ ReleaseLruFile(void)
if (nfile > 0)
{
/*
- * There are opened files and so there should be at least one used
- * vfd in the ring.
+ * There are opened files and so there should be at least one used vfd
+ * in the ring.
*/
Assert(VfdCache[0].lruMoreRecently != 0);
LruDelete(VfdCache[0].lruMoreRecently);
@@ -649,14 +649,14 @@ AllocateVfd(void)
DO_DB(elog(LOG, "AllocateVfd. Size %d", SizeVfdCache));
- Assert(SizeVfdCache > 0); /* InitFileAccess not called? */
+ Assert(SizeVfdCache > 0); /* InitFileAccess not called? */
if (VfdCache[0].nextFree == 0)
{
/*
- * The free list is empty so it is time to increase the size of
- * the array. We choose to double it each time this happens.
- * However, there's not much point in starting *real* small.
+ * The free list is empty so it is time to increase the size of the
+ * array. We choose to double it each time this happens. However,
+ * there's not much point in starting *real* small.
*/
Size newCacheSize = SizeVfdCache * 2;
Vfd *newVfdCache;
@@ -745,9 +745,8 @@ FileAccess(File file)
file, VfdCache[file].fileName));
/*
- * Is the file open? If not, open it and put it at the head of the
- * LRU ring (possibly closing the least recently used file to get an
- * FD).
+ * Is the file open? If not, open it and put it at the head of the LRU
+ * ring (possibly closing the least recently used file to get an FD).
*/
if (FileIsNotOpen(file))
@@ -759,9 +758,8 @@ FileAccess(File file)
else if (VfdCache[0].lruLessRecently != file)
{
/*
- * We now know that the file is open and that it is not the last
- * one accessed, so we need to move it to the head of the Lru
- * ring.
+ * We now know that the file is open and that it is not the last one
+ * accessed, so we need to move it to the head of the Lru ring.
*/
Delete(file);
@@ -889,8 +887,8 @@ OpenTemporaryFile(bool interXact)
MyProcPid, tempFileCounter++);
/*
- * Open the file. Note: we don't use O_EXCL, in case there is an
- * orphaned temp file that can be reused.
+ * Open the file. Note: we don't use O_EXCL, in case there is an orphaned
+ * temp file that can be reused.
*/
file = FileNameOpenFile(tempfilepath,
O_RDWR | O_CREAT | O_TRUNC | PG_BINARY,
@@ -900,12 +898,12 @@ OpenTemporaryFile(bool interXact)
char *dirpath;
/*
- * We might need to create the pg_tempfiles subdirectory, if no
- * one has yet done so.
+ * We might need to create the pg_tempfiles subdirectory, if no one
+ * has yet done so.
*
- * Don't check for error from mkdir; it could fail if someone else
- * just did the same thing. If it doesn't work then we'll bomb
- * out on the second create attempt, instead.
+ * Don't check for error from mkdir; it could fail if someone else just
+ * did the same thing. If it doesn't work then we'll bomb out on the
+ * second create attempt, instead.
*/
dirpath = make_database_relative(PG_TEMP_FILES_DIR);
mkdir(dirpath, S_IRWXU);
@@ -1190,9 +1188,9 @@ AllocateFile(char *name, char *mode)
/*
* The test against MAX_ALLOCATED_DESCS prevents us from overflowing
- * allocatedFiles[]; the test against max_safe_fds prevents
- * AllocateFile from hogging every one of the available FDs, which'd
- * lead to infinite looping.
+ * allocatedFiles[]; the test against max_safe_fds prevents AllocateFile
+ * from hogging every one of the available FDs, which'd lead to infinite
+ * looping.
*/
if (numAllocatedDescs >= MAX_ALLOCATED_DESCS ||
numAllocatedDescs >= max_safe_fds - 1)
@@ -1216,7 +1214,7 @@ TryAgain:
ereport(LOG,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
- errmsg("out of file descriptors: %m; release and retry")));
+ errmsg("out of file descriptors: %m; release and retry")));
errno = 0;
if (ReleaseLruFile())
goto TryAgain;
@@ -1305,9 +1303,9 @@ AllocateDir(const char *dirname)
/*
* The test against MAX_ALLOCATED_DESCS prevents us from overflowing
- * allocatedDescs[]; the test against max_safe_fds prevents
- * AllocateDir from hogging every one of the available FDs, which'd
- * lead to infinite looping.
+ * allocatedDescs[]; the test against max_safe_fds prevents AllocateDir
+ * from hogging every one of the available FDs, which'd lead to infinite
+ * looping.
*/
if (numAllocatedDescs >= MAX_ALLOCATED_DESCS ||
numAllocatedDescs >= max_safe_fds - 1)
@@ -1331,7 +1329,7 @@ TryAgain:
ereport(LOG,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
- errmsg("out of file descriptors: %m; release and retry")));
+ errmsg("out of file descriptors: %m; release and retry")));
errno = 0;
if (ReleaseLruFile())
goto TryAgain;
@@ -1345,7 +1343,7 @@ TryAgain:
* Read a directory opened with AllocateDir, ereport'ing any error.
*
* This is easier to use than raw readdir() since it takes care of some
- * otherwise rather tedious and error-prone manipulation of errno. Also,
+ * otherwise rather tedious and error-prone manipulation of errno. Also,
* if you are happy with a generic error message for AllocateDir failure,
* you can just do
*
@@ -1378,9 +1376,10 @@ ReadDir(DIR *dir, const char *dirname)
return dent;
#ifdef WIN32
+
/*
- * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
- * not in released version
+ * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but not in
+ * released version
*/
if (GetLastError() == ERROR_NO_MORE_FILES)
errno = 0;
@@ -1542,9 +1541,9 @@ CleanupTempFiles(bool isProcExit)
if ((fdstate & FD_TEMPORARY) && VfdCache[i].fileName != NULL)
{
/*
- * If we're in the process of exiting a backend process,
- * close all temporary files. Otherwise, only close
- * temporary files local to the current transaction.
+ * If we're in the process of exiting a backend process, close
+ * all temporary files. Otherwise, only close temporary files
+ * local to the current transaction.
*/
if (isProcExit || (fdstate & FD_XACT_TEMPORARY))
FileClose(i);
@@ -1596,8 +1595,8 @@ RemovePgTempFiles(void)
FreeDir(db_dir);
/*
- * In EXEC_BACKEND case there is a pgsql_tmp directory at the top
- * level of DataDir as well.
+ * In EXEC_BACKEND case there is a pgsql_tmp directory at the top level of
+ * DataDir as well.
*/
#ifdef EXEC_BACKEND
RemovePgTempFilesInDir(PG_TEMP_FILES_DIR);
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index 11fc45ea8e7..1bc1d60d4ad 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.48 2005/08/20 23:26:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.49 2005/10/15 02:49:25 momjian Exp $
*
*
* NOTES:
@@ -222,7 +222,7 @@ static HTAB *FreeSpaceMapRelHash; /* points to (what used to be)
static void CheckFreeSpaceMapStatistics(int elevel, int numRels,
- double needed);
+ double needed);
static FSMRelation *lookup_fsm_rel(RelFileNode *rel);
static FSMRelation *create_fsm_rel(RelFileNode *rel);
static void delete_fsm_rel(FSMRelation *fsmrel);
@@ -295,7 +295,7 @@ InitFreeSpaceMap(void)
if (!FreeSpaceMapRelHash)
ereport(FATAL,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("insufficient shared memory for free space map")));
+ errmsg("insufficient shared memory for free space map")));
if (found)
return;
@@ -307,14 +307,14 @@ InitFreeSpaceMap(void)
if (nchunks <= MaxFSMRelations)
ereport(FATAL,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("max_fsm_pages must exceed max_fsm_relations * %d",
- CHUNKPAGES)));
+ errmsg("max_fsm_pages must exceed max_fsm_relations * %d",
+ CHUNKPAGES)));
FreeSpaceMap->arena = (char *) ShmemAlloc((Size) nchunks * CHUNKBYTES);
if (FreeSpaceMap->arena == NULL)
ereport(FATAL,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("insufficient shared memory for free space map")));
+ errmsg("insufficient shared memory for free space map")));
FreeSpaceMap->totalChunks = nchunks;
FreeSpaceMap->usedChunks = 0;
@@ -371,10 +371,10 @@ GetPageWithFreeSpace(RelFileNode *rel, Size spaceNeeded)
fsmrel = create_fsm_rel(rel);
/*
- * Update the moving average of space requests. This code implements
- * an exponential moving average with an equivalent period of about 63
- * requests. Ignore silly requests, however, to ensure that the
- * average stays sane.
+ * Update the moving average of space requests. This code implements an
+ * exponential moving average with an equivalent period of about 63
+ * requests. Ignore silly requests, however, to ensure that the average
+ * stays sane.
*/
if (spaceNeeded > 0 && spaceNeeded < BLCKSZ)
{
@@ -478,10 +478,10 @@ RecordRelationFreeSpace(RelFileNode *rel,
LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
/*
- * Note we don't record info about a relation unless there's already
- * an FSM entry for it, implying someone has done GetPageWithFreeSpace
- * for it. Inactive rels thus will not clutter the map simply by
- * being vacuumed.
+ * Note we don't record info about a relation unless there's already an
+ * FSM entry for it, implying someone has done GetPageWithFreeSpace for
+ * it. Inactive rels thus will not clutter the map simply by being
+ * vacuumed.
*/
fsmrel = lookup_fsm_rel(rel);
if (fsmrel)
@@ -494,8 +494,8 @@ RecordRelationFreeSpace(RelFileNode *rel,
curAllocPages = curAlloc * CHUNKPAGES;
/*
- * If the data fits in our current allocation, just copy it;
- * otherwise must compress.
+ * If the data fits in our current allocation, just copy it; otherwise
+ * must compress.
*/
newLocation = (FSMPageData *)
(FreeSpaceMap->arena + fsmrel->firstChunk * CHUNKBYTES);
@@ -567,10 +567,9 @@ RecordIndexFreeSpace(RelFileNode *rel,
LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
/*
- * Note we don't record info about a relation unless there's already
- * an FSM entry for it, implying someone has done GetFreeIndexPage for
- * it. Inactive rels thus will not clutter the map simply by being
- * vacuumed.
+ * Note we don't record info about a relation unless there's already an
+ * FSM entry for it, implying someone has done GetFreeIndexPage for it.
+ * Inactive rels thus will not clutter the map simply by being vacuumed.
*/
fsmrel = lookup_fsm_rel(rel);
if (fsmrel)
@@ -584,9 +583,9 @@ RecordIndexFreeSpace(RelFileNode *rel,
curAllocPages = curAlloc * INDEXCHUNKPAGES;
/*
- * If the data fits in our current allocation, just copy it;
- * otherwise must compress. But compression is easy: we merely
- * forget extra pages.
+ * If the data fits in our current allocation, just copy it; otherwise
+ * must compress. But compression is easy: we merely forget extra
+ * pages.
*/
newLocation = (IndexFSMPageData *)
(FreeSpaceMap->arena + fsmrel->firstChunk * CHUNKBYTES);
@@ -708,34 +707,34 @@ PrintFreeSpaceMapStatistics(int elevel)
ereport(elevel,
(errmsg("free space map contains %d pages in %d relations",
storedPages, numRels),
- errdetail("A total of %.0f page slots are in use (including overhead).\n"
- "%.0f page slots are required to track all free space.\n"
- "Current limits are: %d page slots, %d relations, using %.0f KB.",
- Min(needed, MaxFSMPages),
- needed, MaxFSMPages, MaxFSMRelations,
- (double) FreeSpaceShmemSize() / 1024.0)));
+ errdetail("A total of %.0f page slots are in use (including overhead).\n"
+ "%.0f page slots are required to track all free space.\n"
+ "Current limits are: %d page slots, %d relations, using %.0f KB.",
+ Min(needed, MaxFSMPages),
+ needed, MaxFSMPages, MaxFSMRelations,
+ (double) FreeSpaceShmemSize() / 1024.0)));
CheckFreeSpaceMapStatistics(NOTICE, numRels, needed);
/* Print to server logs too because is deals with a config variable. */
CheckFreeSpaceMapStatistics(LOG, numRels, needed);
}
-
+
static void
CheckFreeSpaceMapStatistics(int elevel, int numRels, double needed)
{
- if (numRels == MaxFSMRelations)
+ if (numRels == MaxFSMRelations)
ereport(elevel,
- (errmsg("max_fsm_relations(%d) equals the number of relations checked",
- MaxFSMRelations),
- errhint("You have >= %d relations.\n"
- "Consider increasing the configuration parameter \"max_fsm_relations\".",
- numRels)));
+ (errmsg("max_fsm_relations(%d) equals the number of relations checked",
+ MaxFSMRelations),
+ errhint("You have >= %d relations.\n"
+ "Consider increasing the configuration parameter \"max_fsm_relations\".",
+ numRels)));
else if (needed > MaxFSMPages)
ereport(elevel,
- (errmsg("the number of page slots needed (%.0f) exceeds max_fsm_pages (%d)",
- needed, MaxFSMPages),
- errhint("Consider increasing the configuration parameter \"max_fsm_pages\"\n"
- "to a value over %.0f.", needed)));
+ (errmsg("the number of page slots needed (%.0f) exceeds max_fsm_pages (%d)",
+ needed, MaxFSMPages),
+ errhint("Consider increasing the configuration parameter \"max_fsm_pages\"\n"
+ "to a value over %.0f.", needed)));
}
/*
@@ -753,7 +752,7 @@ DumpFreeSpaceMap(int code, Datum arg)
FSMRelation *fsmrel;
/* Try to create file */
- unlink(FSM_CACHE_FILENAME); /* in case it exists w/wrong permissions */
+ unlink(FSM_CACHE_FILENAME); /* in case it exists w/wrong permissions */
fp = AllocateFile(FSM_CACHE_FILENAME, PG_BINARY_W);
if (fp == NULL)
@@ -917,11 +916,11 @@ LoadFreeSpaceMap(void)
}
/*
- * Okay, create the FSM entry and insert data into it. Since the
- * rels were stored in reverse usage order, at the end of the loop
- * they will be correctly usage-ordered in memory; and if
- * MaxFSMRelations is less than it used to be, we will correctly
- * drop the least recently used ones.
+ * Okay, create the FSM entry and insert data into it. Since the rels
+ * were stored in reverse usage order, at the end of the loop they
+ * will be correctly usage-ordered in memory; and if MaxFSMRelations
+ * is less than it used to be, we will correctly drop the least
+ * recently used ones.
*/
fsmrel = create_fsm_rel(&relheader.key);
fsmrel->avgRequest = relheader.avgRequest;
@@ -936,8 +935,8 @@ LoadFreeSpaceMap(void)
/*
* If the data fits in our current allocation, just copy it;
- * otherwise must compress. But compression is easy: we
- * merely forget extra pages.
+ * otherwise must compress. But compression is easy: we merely
+ * forget extra pages.
*/
newLocation = (IndexFSMPageData *)
(FreeSpaceMap->arena + fsmrel->firstChunk * CHUNKBYTES);
@@ -1105,10 +1104,10 @@ realloc_fsm_rel(FSMRelation *fsmrel, int nPages, bool isIndex)
myAlloc = fsm_calc_target_allocation(myRequest);
/*
- * Need to reallocate space if (a) my target allocation is more than
- * my current allocation, AND (b) my actual immediate need
- * (myRequest+1 chunks) is more than my current allocation. Otherwise
- * just store the new data in-place.
+ * Need to reallocate space if (a) my target allocation is more than my
+ * current allocation, AND (b) my actual immediate need (myRequest+1
+ * chunks) is more than my current allocation. Otherwise just store the
+ * new data in-place.
*/
curAlloc = fsm_current_allocation(fsmrel);
if (myAlloc > curAlloc && (myRequest + 1) > curAlloc && nPages > 0)
@@ -1241,8 +1240,7 @@ find_free_space(FSMRelation *fsmrel, Size spaceNeeded)
if (spaceAvail >= spaceNeeded)
{
/*
- * Found what we want --- adjust the entry, and update
- * nextPage.
+ * Found what we want --- adjust the entry, and update nextPage.
*/
FSMPageSetSpace(page, spaceAvail - spaceNeeded);
fsmrel->nextPage = pageIndex + 1;
@@ -1266,10 +1264,10 @@ find_index_free_space(FSMRelation *fsmrel)
BlockNumber result;
/*
- * If isIndex isn't set, it could be that RecordIndexFreeSpace() has
- * never yet been called on this relation, and we're still looking at
- * the default setting from create_fsm_rel(). If so, just act as
- * though there's no space.
+ * If isIndex isn't set, it could be that RecordIndexFreeSpace() has never
+ * yet been called on this relation, and we're still looking at the
+ * default setting from create_fsm_rel(). If so, just act as though
+ * there's no space.
*/
if (!fsmrel->isIndex)
{
@@ -1279,10 +1277,10 @@ find_index_free_space(FSMRelation *fsmrel)
}
/*
- * For indexes, there's no need for the nextPage state variable; we
- * just remove and return the first available page. (We could save
- * cycles here by returning the last page, but it seems better to
- * encourage re-use of lower-numbered pages.)
+ * For indexes, there's no need for the nextPage state variable; we just
+ * remove and return the first available page. (We could save cycles here
+ * by returning the last page, but it seems better to encourage re-use of
+ * lower-numbered pages.)
*/
if (fsmrel->storedPages <= 0)
return InvalidBlockNumber; /* no pages available */
@@ -1318,10 +1316,10 @@ fsm_record_free_space(FSMRelation *fsmrel, BlockNumber page, Size spaceAvail)
else
{
/*
- * No existing entry; ignore the call. We used to add the page to
- * the FSM --- but in practice, if the page hasn't got enough
- * space to satisfy the caller who's kicking it back to us, then
- * it's probably uninteresting to everyone else as well.
+ * No existing entry; ignore the call. We used to add the page to the
+ * FSM --- but in practice, if the page hasn't got enough space to
+ * satisfy the caller who's kicking it back to us, then it's probably
+ * uninteresting to everyone else as well.
*/
}
}
@@ -1454,25 +1452,23 @@ compact_fsm_storage(void)
/*
* It's possible that we have to move data down, not up, if the
- * allocations of previous rels expanded. This normally means
- * that our allocation expanded too (or at least got no worse),
- * and ditto for later rels. So there should be room to move all
- * our data down without dropping any --- but we might have to
- * push down following rels to acquire the room. We don't want to
- * do the push more than once, so pack everything against the end
- * of the arena if so.
+ * allocations of previous rels expanded. This normally means that
+ * our allocation expanded too (or at least got no worse), and ditto
+ * for later rels. So there should be room to move all our data down
+ * without dropping any --- but we might have to push down following
+ * rels to acquire the room. We don't want to do the push more than
+ * once, so pack everything against the end of the arena if so.
*
* In corner cases where we are on the short end of a roundoff choice
* that we were formerly on the long end of, it's possible that we
- * have to move down and compress our data too. In fact, even
- * after pushing down the following rels, there might not be as
- * much space as we computed for this rel above --- that would
- * imply that some following rel(s) are also on the losing end of
- * roundoff choices. We could handle this fairly by doing the
- * per-rel compactions out-of-order, but that seems like way too
- * much complexity to deal with a very infrequent corner case.
- * Instead, we simply drop pages from the end of the current rel's
- * data until it fits.
+ * have to move down and compress our data too. In fact, even after
+ * pushing down the following rels, there might not be as much space
+ * as we computed for this rel above --- that would imply that some
+ * following rel(s) are also on the losing end of roundoff choices. We
+ * could handle this fairly by doing the per-rel compactions
+ * out-of-order, but that seems like way too much complexity to deal
+ * with a very infrequent corner case. Instead, we simply drop pages
+ * from the end of the current rel's data until it fits.
*/
if (newChunkIndex > oldChunkIndex)
{
@@ -1508,12 +1504,11 @@ compact_fsm_storage(void)
newAlloc = limitChunkIndex - newChunkIndex;
/*
- * If newAlloc < 0 at this point, we are moving the
- * rel's firstChunk into territory currently assigned
- * to a later rel. This is okay so long as we do not
- * copy any data. The rels will be back in
- * nondecreasing firstChunk order at completion of the
- * compaction pass.
+ * If newAlloc < 0 at this point, we are moving the rel's
+ * firstChunk into territory currently assigned to a later
+ * rel. This is okay so long as we do not copy any data.
+ * The rels will be back in nondecreasing firstChunk order
+ * at completion of the compaction pass.
*/
if (newAlloc < 0)
newAlloc = 0;
@@ -1530,9 +1525,9 @@ compact_fsm_storage(void)
else if (newAllocPages < fsmrel->storedPages)
{
/*
- * Need to compress the page data. For an index,
- * "compression" just means dropping excess pages; otherwise
- * we try to keep the ones with the most space.
+ * Need to compress the page data. For an index, "compression"
+ * just means dropping excess pages; otherwise we try to keep the
+ * ones with the most space.
*/
if (fsmrel->isIndex)
{
@@ -1863,7 +1858,7 @@ DumpFreeSpace(void)
relNum++;
fprintf(stderr, "Map %d: rel %u/%u/%u isIndex %d avgRequest %u lastPageCount %d nextPage %d\nMap= ",
relNum,
- fsmrel->key.spcNode, fsmrel->key.dbNode, fsmrel->key.relNode,
+ fsmrel->key.spcNode, fsmrel->key.dbNode, fsmrel->key.relNode,
(int) fsmrel->isIndex, fsmrel->avgRequest,
fsmrel->lastPageCount, fsmrel->nextPage);
if (fsmrel->isIndex)
diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c
index 2976bf654de..39e8d3e527a 100644
--- a/src/backend/storage/ipc/ipc.c
+++ b/src/backend/storage/ipc/ipc.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.90 2004/12/31 22:00:56 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.91 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -74,8 +74,8 @@ void
proc_exit(int code)
{
/*
- * Once we set this flag, we are committed to exit. Any ereport()
- * will NOT send control back to the main loop, but right back here.
+ * Once we set this flag, we are committed to exit. Any ereport() will
+ * NOT send control back to the main loop, but right back here.
*/
proc_exit_inprogress = true;
@@ -100,15 +100,14 @@ proc_exit(int code)
/*
* call all the callbacks registered before calling exit().
*
- * Note that since we decrement on_proc_exit_index each time, if a
- * callback calls ereport(ERROR) or ereport(FATAL) then it won't be
- * invoked again when control comes back here (nor will the
- * previously-completed callbacks). So, an infinite loop should not
- * be possible.
+ * Note that since we decrement on_proc_exit_index each time, if a callback
+ * calls ereport(ERROR) or ereport(FATAL) then it won't be invoked again
+ * when control comes back here (nor will the previously-completed
+ * callbacks). So, an infinite loop should not be possible.
*/
while (--on_proc_exit_index >= 0)
(*on_proc_exit_list[on_proc_exit_index].function) (code,
- on_proc_exit_list[on_proc_exit_index].arg);
+ on_proc_exit_list[on_proc_exit_index].arg);
elog(DEBUG3, "exit(%d)", code);
exit(code);
@@ -128,12 +127,12 @@ shmem_exit(int code)
/*
* call all the registered callbacks.
*
- * As with proc_exit(), we remove each callback from the list before
- * calling it, to avoid infinite loop in case of error.
+ * As with proc_exit(), we remove each callback from the list before calling
+ * it, to avoid infinite loop in case of error.
*/
while (--on_shmem_exit_index >= 0)
(*on_shmem_exit_list[on_shmem_exit_index].function) (code,
- on_shmem_exit_list[on_shmem_exit_index].arg);
+ on_shmem_exit_list[on_shmem_exit_index].arg);
on_shmem_exit_index = 0;
}
diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c
index 48ef94a3ecb..997c38a45c8 100644
--- a/src/backend/storage/ipc/ipci.c
+++ b/src/backend/storage/ipc/ipci.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/ipci.c,v 1.78 2005/08/20 23:26:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/ipci.c,v 1.79 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -66,13 +66,12 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
/*
* Size of the Postgres shared-memory block is estimated via
- * moderately-accurate estimates for the big hogs, plus 100K for
- * the stuff that's too small to bother with estimating.
+ * moderately-accurate estimates for the big hogs, plus 100K for the
+ * stuff that's too small to bother with estimating.
*
- * We take some care during this phase to ensure that the total
- * size request doesn't overflow size_t. If this gets through,
- * we don't need to be so careful during the actual allocation
- * phase.
+ * We take some care during this phase to ensure that the total size
+ * request doesn't overflow size_t. If this gets through, we don't
+ * need to be so careful during the actual allocation phase.
*/
size = 100000;
size = add_size(size, hash_estimate_size(SHMEM_INDEX_SIZE,
@@ -115,9 +114,9 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
else
{
/*
- * We are reattaching to an existing shared memory segment.
- * This should only be reached in the EXEC_BACKEND case, and
- * even then only with makePrivate == false.
+ * We are reattaching to an existing shared memory segment. This
+ * should only be reached in the EXEC_BACKEND case, and even then only
+ * with makePrivate == false.
*/
#ifdef EXEC_BACKEND
Assert(!makePrivate);
diff --git a/src/backend/storage/ipc/pmsignal.c b/src/backend/storage/ipc/pmsignal.c
index a916688717c..98a742f24f4 100644
--- a/src/backend/storage/ipc/pmsignal.c
+++ b/src/backend/storage/ipc/pmsignal.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/pmsignal.c,v 1.19 2005/08/20 23:26:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/pmsignal.c,v 1.20 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -112,9 +112,9 @@ PostmasterIsAlive(bool amDirectChild)
{
/*
* Use kill() to see if the postmaster is still alive. This can
- * sometimes give a false positive result, since the postmaster's
- * PID may get recycled, but it is good enough for existing uses
- * by indirect children.
+ * sometimes give a false positive result, since the postmaster's PID
+ * may get recycled, but it is good enough for existing uses by
+ * indirect children.
*/
return (kill(PostmasterPid, 0) == 0);
}
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index f2950bece30..1387ec6bd9d 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -16,14 +16,14 @@
* prepared transactions. The xid and subxids fields of these are valid,
* as is the procLocks list. They can be distinguished from regular backend
* PGPROCs at need by checking for pid == 0.
- *
+ *
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.6 2005/08/20 23:26:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.7 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,8 +44,8 @@ typedef struct ProcArrayStruct
int maxProcs; /* allocated size of procs array */
/*
- * We declare procs[] as 1 entry because C wants a fixed-size array,
- * but actually it is maxProcs entries long.
+ * We declare procs[] as 1 entry because C wants a fixed-size array, but
+ * actually it is maxProcs entries long.
*/
PGPROC *procs[1]; /* VARIABLE LENGTH ARRAY */
} ProcArrayStruct;
@@ -67,14 +67,12 @@ static long xc_slow_answer = 0;
#define xc_slow_answer_inc() (xc_slow_answer++)
static void DisplayXidCache(void);
-
#else /* !XIDCACHE_DEBUG */
#define xc_by_recent_xmin_inc() ((void) 0)
#define xc_by_main_xid_inc() ((void) 0)
#define xc_by_child_xid_inc() ((void) 0)
#define xc_slow_answer_inc() ((void) 0)
-
#endif /* XIDCACHE_DEBUG */
@@ -88,7 +86,7 @@ ProcArrayShmemSize(void)
size = offsetof(ProcArrayStruct, procs);
size = add_size(size, mul_size(sizeof(PGPROC *),
- add_size(MaxBackends, max_prepared_xacts)));
+ add_size(MaxBackends, max_prepared_xacts)));
return size;
}
@@ -128,9 +126,9 @@ ProcArrayAdd(PGPROC *proc)
if (arrayP->numProcs >= arrayP->maxProcs)
{
/*
- * Ooops, no room. (This really shouldn't happen, since there is
- * a fixed supply of PGPROC structs too, and so we should have
- * failed earlier.)
+ * Ooops, no room. (This really shouldn't happen, since there is a
+ * fixed supply of PGPROC structs too, and so we should have failed
+ * earlier.)
*/
LWLockRelease(ProcArrayLock);
ereport(FATAL,
@@ -213,8 +211,8 @@ TransactionIdIsInProgress(TransactionId xid)
bool locked;
/*
- * Don't bother checking a transaction older than RecentXmin; it
- * could not possibly still be running.
+ * Don't bother checking a transaction older than RecentXmin; it could not
+ * possibly still be running.
*/
if (TransactionIdPrecedes(xid, RecentXmin))
{
@@ -249,8 +247,8 @@ TransactionIdIsInProgress(TransactionId xid)
}
/*
- * We can ignore main Xids that are younger than the target
- * Xid, since the target could not possibly be their child.
+ * We can ignore main Xids that are younger than the target Xid, since
+ * the target could not possibly be their child.
*/
if (TransactionIdPrecedes(xid, pxid))
continue;
@@ -272,11 +270,11 @@ TransactionIdIsInProgress(TransactionId xid)
}
/*
- * Save the main Xid for step 3. We only need to remember
- * main Xids that have uncached children. (Note: there is no
- * race condition here because the overflowed flag cannot be
- * cleared, only set, while we hold ProcArrayLock. So we can't
- * miss an Xid that we need to worry about.)
+ * Save the main Xid for step 3. We only need to remember main Xids
+ * that have uncached children. (Note: there is no race condition
+ * here because the overflowed flag cannot be cleared, only set, while
+ * we hold ProcArrayLock. So we can't miss an Xid that we need to
+ * worry about.)
*/
if (proc->subxids.overflowed)
xids[nxids++] = pxid;
@@ -295,11 +293,10 @@ TransactionIdIsInProgress(TransactionId xid)
/*
* Step 3: have to check pg_subtrans.
*
- * At this point, we know it's either a subtransaction of one of the Xids
- * in xids[], or it's not running. If it's an already-failed
- * subtransaction, we want to say "not running" even though its parent
- * may still be running. So first, check pg_clog to see if it's been
- * aborted.
+ * At this point, we know it's either a subtransaction of one of the Xids in
+ * xids[], or it's not running. If it's an already-failed subtransaction,
+ * we want to say "not running" even though its parent may still be
+ * running. So first, check pg_clog to see if it's been aborted.
*/
xc_slow_answer_inc();
@@ -307,10 +304,9 @@ TransactionIdIsInProgress(TransactionId xid)
goto result_known;
/*
- * It isn't aborted, so check whether the transaction tree it belongs
- * to is still running (or, more precisely, whether it was running
- * when this routine started -- note that we already released
- * ProcArrayLock).
+ * It isn't aborted, so check whether the transaction tree it belongs to
+ * is still running (or, more precisely, whether it was running when this
+ * routine started -- note that we already released ProcArrayLock).
*/
topxid = SubTransGetTopmostTransaction(xid);
Assert(TransactionIdIsValid(topxid));
@@ -350,8 +346,8 @@ TransactionIdIsActive(TransactionId xid)
int i;
/*
- * Don't bother checking a transaction older than RecentXmin; it
- * could not possibly still be running.
+ * Don't bother checking a transaction older than RecentXmin; it could not
+ * possibly still be running.
*/
if (TransactionIdPrecedes(xid, RecentXmin))
return false;
@@ -413,9 +409,9 @@ GetOldestXmin(bool allDbs)
/*
* Normally we start the min() calculation with our own XID. But if
* called by checkpointer, we will not be inside a transaction, so use
- * next XID as starting point for min() calculation. (Note that if
- * there are no xacts running at all, that will be the subtrans
- * truncation point!)
+ * next XID as starting point for min() calculation. (Note that if there
+ * are no xacts running at all, that will be the subtrans truncation
+ * point!)
*/
if (IsTransactionState())
result = GetTopTransactionId();
@@ -463,7 +459,7 @@ GetOldestXmin(bool allDbs)
* This ensures that the set of transactions seen as "running" by the
* current xact will not change after it takes the snapshot.
*
- * Note that only top-level XIDs are included in the snapshot. We can
+ * Note that only top-level XIDs are included in the snapshot. We can
* still apply the xmin and xmax limits to subtransaction XIDs, but we
* need to work a bit harder to see if XIDs in [xmin..xmax) are running.
*
@@ -474,7 +470,7 @@ GetOldestXmin(bool allDbs)
* RecentXmin: the xmin computed for the most recent snapshot. XIDs
* older than this are known not running any more.
* RecentGlobalXmin: the global xmin (oldest TransactionXmin across all
- * running transactions). This is the same computation done by
+ * running transactions). This is the same computation done by
* GetOldestXmin(TRUE).
*----------
*/
@@ -496,14 +492,14 @@ GetSnapshotData(Snapshot snapshot, bool serializable)
TransactionIdIsValid(MyProc->xmin));
/*
- * Allocating space for maxProcs xids is usually overkill;
- * numProcs would be sufficient. But it seems better to do the
- * malloc while not holding the lock, so we can't look at numProcs.
+ * Allocating space for maxProcs xids is usually overkill; numProcs would
+ * be sufficient. But it seems better to do the malloc while not holding
+ * the lock, so we can't look at numProcs.
*
* This does open a possibility for avoiding repeated malloc/free: since
- * maxProcs does not change at runtime, we can simply reuse the
- * previous xip array if any. (This relies on the fact that all
- * callers pass static SnapshotData structs.)
+ * maxProcs does not change at runtime, we can simply reuse the previous
+ * xip array if any. (This relies on the fact that all callers pass
+ * static SnapshotData structs.)
*/
if (snapshot->xip == NULL)
{
@@ -563,13 +559,12 @@ GetSnapshotData(Snapshot snapshot, bool serializable)
TransactionId xid = proc->xid;
/*
- * Ignore my own proc (dealt with my xid above), procs not
- * running a transaction, and xacts started since we read the
- * next transaction ID. There's no need to store XIDs above
- * what we got from ReadNewTransactionId, since we'll treat
- * them as running anyway. We also assume that such xacts
- * can't compute an xmin older than ours, so they needn't be
- * considered in computing globalxmin.
+ * Ignore my own proc (dealt with my xid above), procs not running a
+ * transaction, and xacts started since we read the next transaction
+ * ID. There's no need to store XIDs above what we got from
+ * ReadNewTransactionId, since we'll treat them as running anyway. We
+ * also assume that such xacts can't compute an xmin older than ours,
+ * so they needn't be considered in computing globalxmin.
*/
if (proc == MyProc ||
!TransactionIdIsNormal(xid) ||
@@ -594,9 +589,9 @@ GetSnapshotData(Snapshot snapshot, bool serializable)
LWLockRelease(ProcArrayLock);
/*
- * Update globalxmin to include actual process xids. This is a
- * slightly different way of computing it than GetOldestXmin uses, but
- * should give the same result.
+ * Update globalxmin to include actual process xids. This is a slightly
+ * different way of computing it than GetOldestXmin uses, but should give
+ * the same result.
*/
if (TransactionIdPrecedes(xmin, globalxmin))
globalxmin = xmin;
@@ -696,14 +691,14 @@ BackendPidGetProc(int pid)
* Returns 0 if not found or it's a prepared transaction. Note that
* it is up to the caller to be sure that the question remains
* meaningful for long enough for the answer to be used ...
- *
+ *
* Only main transaction Ids are considered. This function is mainly
* useful for determining what backend owns a lock.
*/
int
BackendXidGetPid(TransactionId xid)
{
- int result = 0;
+ int result = 0;
ProcArrayStruct *arrayP = procArray;
int index;
@@ -754,9 +749,8 @@ CountActiveBackends(void)
/*
* Note: for speed, we don't acquire ProcArrayLock. This is a little bit
- * bogus, but since we are only testing fields for zero or nonzero,
- * it should be OK. The result is only used for heuristic purposes
- * anyway...
+ * bogus, but since we are only testing fields for zero or nonzero, it
+ * should be OK. The result is only used for heuristic purposes anyway...
*/
for (index = 0; index < arrayP->numProcs; index++)
{
@@ -854,17 +848,16 @@ XidCacheRemoveRunningXids(TransactionId xid, int nxids, TransactionId *xids)
/*
* We must hold ProcArrayLock exclusively in order to remove transactions
- * from the PGPROC array. (See notes in GetSnapshotData.) It's
- * possible this could be relaxed since we know this routine is only
- * used to abort subtransactions, but pending closer analysis we'd
- * best be conservative.
+ * from the PGPROC array. (See notes in GetSnapshotData.) It's possible
+ * this could be relaxed since we know this routine is only used to abort
+ * subtransactions, but pending closer analysis we'd best be conservative.
*/
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
/*
- * Under normal circumstances xid and xids[] will be in increasing
- * order, as will be the entries in subxids. Scan backwards to avoid
- * O(N^2) behavior when removing a lot of xids.
+ * Under normal circumstances xid and xids[] will be in increasing order,
+ * as will be the entries in subxids. Scan backwards to avoid O(N^2)
+ * behavior when removing a lot of xids.
*/
for (i = nxids - 1; i >= 0; i--)
{
@@ -878,11 +871,13 @@ XidCacheRemoveRunningXids(TransactionId xid, int nxids, TransactionId *xids)
break;
}
}
+
/*
- * Ordinarily we should have found it, unless the cache has overflowed.
- * However it's also possible for this routine to be invoked multiple
- * times for the same subtransaction, in case of an error during
- * AbortSubTransaction. So instead of Assert, emit a debug warning.
+ * Ordinarily we should have found it, unless the cache has
+ * overflowed. However it's also possible for this routine to be
+ * invoked multiple times for the same subtransaction, in case of an
+ * error during AbortSubTransaction. So instead of Assert, emit a
+ * debug warning.
*/
if (j < 0 && !MyProc->subxids.overflowed)
elog(WARNING, "did not find subXID %u in MyProc", anxid);
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index d7498389b56..443c153c90a 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.86 2005/10/07 21:42:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.87 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -71,13 +71,13 @@ SHMEM_OFFSET ShmemBase; /* start address of shared memory */
static SHMEM_OFFSET ShmemEnd; /* end+1 address of shared memory */
-slock_t *ShmemLock; /* spinlock for shared memory and LWLock allocation */
+slock_t *ShmemLock; /* spinlock for shared memory and LWLock
+ * allocation */
NON_EXEC_STATIC slock_t *ShmemIndexLock; /* spinlock for ShmemIndex */
-NON_EXEC_STATIC void *ShmemIndexAlloc = NULL; /* Memory actually
- * allocated for
- * ShmemIndex */
+NON_EXEC_STATIC void *ShmemIndexAlloc = NULL; /* Memory actually allocated
+ * for ShmemIndex */
static HTAB *ShmemIndex = NULL; /* primary index hashtable for shmem */
@@ -205,11 +205,10 @@ InitShmemIndex(void)
bool found;
/*
- * Since ShmemInitHash calls ShmemInitStruct, which expects the
- * ShmemIndex hashtable to exist already, we have a bit of a
- * circularity problem in initializing the ShmemIndex itself. The
- * special "ShmemIndex" hash table name will tell ShmemInitStruct
- * to fake it.
+ * Since ShmemInitHash calls ShmemInitStruct, which expects the ShmemIndex
+ * hashtable to exist already, we have a bit of a circularity problem in
+ * initializing the ShmemIndex itself. The special "ShmemIndex" hash
+ * table name will tell ShmemInitStruct to fake it.
*/
/* create the shared memory shmem index */
@@ -274,9 +273,9 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
void *location;
/*
- * Hash tables allocated in shared memory have a fixed directory; it
- * can't grow or other backends wouldn't be able to find it. So, make
- * sure we make it big enough to start with.
+ * Hash tables allocated in shared memory have a fixed directory; it can't
+ * grow or other backends wouldn't be able to find it. So, make sure we
+ * make it big enough to start with.
*
* The shared memory allocator must be specified too.
*/
@@ -286,19 +285,19 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
/* look it up in the shmem index */
location = ShmemInitStruct(name,
- sizeof(HASHHDR) + infoP->dsize * sizeof(HASHSEGMENT),
+ sizeof(HASHHDR) + infoP->dsize * sizeof(HASHSEGMENT),
&found);
/*
- * shmem index is corrupted. Let someone else give the error
- * message since they have more information
+ * shmem index is corrupted. Let someone else give the error message
+ * since they have more information
*/
if (location == NULL)
return NULL;
/*
- * if it already exists, attach to it rather than allocate and
- * initialize new space
+ * if it already exists, attach to it rather than allocate and initialize
+ * new space
*/
if (found)
hash_flags |= HASH_ATTACH;
@@ -348,11 +347,11 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
else
{
/*
- * If the shmem index doesn't exist, we are bootstrapping: we
- * must be trying to init the shmem index itself.
+ * If the shmem index doesn't exist, we are bootstrapping: we must
+ * be trying to init the shmem index itself.
*
- * Notice that the ShmemIndexLock is held until the shmem index
- * has been completely initialized.
+ * Notice that the ShmemIndexLock is held until the shmem index has
+ * been completely initialized.
*/
*foundPtr = FALSE;
ShmemIndexAlloc = ShmemAlloc(size);
@@ -375,9 +374,9 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
if (*foundPtr)
{
/*
- * Structure is in the shmem index so someone else has allocated
- * it already. The size better be the same as the size we are
- * trying to initialize to or there is a name conflict (or worse).
+ * Structure is in the shmem index so someone else has allocated it
+ * already. The size better be the same as the size we are trying to
+ * initialize to or there is a name conflict (or worse).
*/
if (result->size != size)
{
@@ -402,7 +401,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
ereport(WARNING,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("could not allocate shared memory segment \"%s\"", name)));
+ errmsg("could not allocate shared memory segment \"%s\"", name)));
*foundPtr = FALSE;
return NULL;
}
diff --git a/src/backend/storage/ipc/sinval.c b/src/backend/storage/ipc/sinval.c
index 0d7b01f7966..b5efb510d7d 100644
--- a/src/backend/storage/ipc/sinval.c
+++ b/src/backend/storage/ipc/sinval.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/sinval.c,v 1.77 2005/08/20 23:26:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/sinval.c,v 1.78 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -109,7 +109,7 @@ SendSharedInvalidMessage(SharedInvalidationMessage *msg)
*/
void
ReceiveSharedInvalidMessages(
- void (*invalFunction) (SharedInvalidationMessage *msg),
+ void (*invalFunction) (SharedInvalidationMessage *msg),
void (*resetFunction) (void))
{
SharedInvalidationMessage data;
@@ -119,20 +119,20 @@ ReceiveSharedInvalidMessages(
for (;;)
{
/*
- * We can discard any pending catchup event, since we will not
- * exit this loop until we're fully caught up.
+ * We can discard any pending catchup event, since we will not exit
+ * this loop until we're fully caught up.
*/
catchupInterruptOccurred = 0;
/*
- * We can run SIGetDataEntry in parallel with other backends
- * running SIGetDataEntry for themselves, since each instance will
- * modify only fields of its own backend's ProcState, and no
- * instance will look at fields of other backends' ProcStates. We
- * express this by grabbing SInvalLock in shared mode. Note that
- * this is not exactly the normal (read-only) interpretation of a
- * shared lock! Look closely at the interactions before allowing
- * SInvalLock to be grabbed in shared mode for any other reason!
+ * We can run SIGetDataEntry in parallel with other backends running
+ * SIGetDataEntry for themselves, since each instance will modify only
+ * fields of its own backend's ProcState, and no instance will look at
+ * fields of other backends' ProcStates. We express this by grabbing
+ * SInvalLock in shared mode. Note that this is not exactly the
+ * normal (read-only) interpretation of a shared lock! Look closely at
+ * the interactions before allowing SInvalLock to be grabbed in shared
+ * mode for any other reason!
*/
LWLockAcquire(SInvalLock, LW_SHARED);
getResult = SIGetDataEntry(shmInvalBuffer, MyBackendId, &data);
@@ -195,19 +195,18 @@ CatchupInterruptHandler(SIGNAL_ARGS)
bool save_ImmediateInterruptOK = ImmediateInterruptOK;
/*
- * We may be called while ImmediateInterruptOK is true; turn it
- * off while messing with the catchup state. (We would have to
- * save and restore it anyway, because PGSemaphore operations
- * inside ProcessCatchupEvent() might reset it.)
+ * We may be called while ImmediateInterruptOK is true; turn it off
+ * while messing with the catchup state. (We would have to save and
+ * restore it anyway, because PGSemaphore operations inside
+ * ProcessCatchupEvent() might reset it.)
*/
ImmediateInterruptOK = false;
/*
* I'm not sure whether some flavors of Unix might allow another
- * SIGUSR1 occurrence to recursively interrupt this routine. To
- * cope with the possibility, we do the same sort of dance that
- * EnableCatchupInterrupt must do --- see that routine for
- * comments.
+ * SIGUSR1 occurrence to recursively interrupt this routine. To cope
+ * with the possibility, we do the same sort of dance that
+ * EnableCatchupInterrupt must do --- see that routine for comments.
*/
catchupInterruptEnabled = 0; /* disable any recursive signal */
catchupInterruptOccurred = 1; /* do at least one iteration */
@@ -225,8 +224,7 @@ CatchupInterruptHandler(SIGNAL_ARGS)
}
/*
- * Restore ImmediateInterruptOK, and check for interrupts if
- * needed.
+ * Restore ImmediateInterruptOK, and check for interrupts if needed.
*/
ImmediateInterruptOK = save_ImmediateInterruptOK;
if (save_ImmediateInterruptOK)
@@ -235,8 +233,7 @@ CatchupInterruptHandler(SIGNAL_ARGS)
else
{
/*
- * In this path it is NOT SAFE to do much of anything, except
- * this:
+ * In this path it is NOT SAFE to do much of anything, except this:
*/
catchupInterruptOccurred = 1;
}
@@ -258,27 +255,25 @@ void
EnableCatchupInterrupt(void)
{
/*
- * This code is tricky because we are communicating with a signal
- * handler that could interrupt us at any point. If we just checked
- * catchupInterruptOccurred and then set catchupInterruptEnabled, we
- * could fail to respond promptly to a signal that happens in between
- * those two steps. (A very small time window, perhaps, but Murphy's
- * Law says you can hit it...) Instead, we first set the enable flag,
- * then test the occurred flag. If we see an unserviced interrupt has
- * occurred, we re-clear the enable flag before going off to do the
- * service work. (That prevents re-entrant invocation of
- * ProcessCatchupEvent() if another interrupt occurs.) If an interrupt
- * comes in between the setting and clearing of
- * catchupInterruptEnabled, then it will have done the service work
- * and left catchupInterruptOccurred zero, so we have to check again
- * after clearing enable. The whole thing has to be in a loop in case
- * another interrupt occurs while we're servicing the first. Once we
- * get out of the loop, enable is set and we know there is no
+ * This code is tricky because we are communicating with a signal handler
+ * that could interrupt us at any point. If we just checked
+ * catchupInterruptOccurred and then set catchupInterruptEnabled, we could
+ * fail to respond promptly to a signal that happens in between those two
+ * steps. (A very small time window, perhaps, but Murphy's Law says you
+ * can hit it...) Instead, we first set the enable flag, then test the
+ * occurred flag. If we see an unserviced interrupt has occurred, we
+ * re-clear the enable flag before going off to do the service work.
+ * (That prevents re-entrant invocation of ProcessCatchupEvent() if
+ * another interrupt occurs.) If an interrupt comes in between the setting
+ * and clearing of catchupInterruptEnabled, then it will have done the
+ * service work and left catchupInterruptOccurred zero, so we have to
+ * check again after clearing enable. The whole thing has to be in a loop
+ * in case another interrupt occurs while we're servicing the first. Once
+ * we get out of the loop, enable is set and we know there is no
* unserviced interrupt.
*
- * NB: an overenthusiastic optimizing compiler could easily break this
- * code. Hopefully, they all understand what "volatile" means these
- * days.
+ * NB: an overenthusiastic optimizing compiler could easily break this code.
+ * Hopefully, they all understand what "volatile" means these days.
*/
for (;;)
{
@@ -330,17 +325,17 @@ ProcessCatchupEvent(void)
notify_enabled = DisableNotifyInterrupt();
/*
- * What we need to do here is cause ReceiveSharedInvalidMessages() to
- * run, which will do the necessary work and also reset the
- * catchupInterruptOccurred flag. If we are inside a transaction we
- * can just call AcceptInvalidationMessages() to do this. If we
- * aren't, we start and immediately end a transaction; the call to
+ * What we need to do here is cause ReceiveSharedInvalidMessages() to run,
+ * which will do the necessary work and also reset the
+ * catchupInterruptOccurred flag. If we are inside a transaction we can
+ * just call AcceptInvalidationMessages() to do this. If we aren't, we
+ * start and immediately end a transaction; the call to
* AcceptInvalidationMessages() happens down inside transaction start.
*
- * It is awfully tempting to just call AcceptInvalidationMessages()
- * without the rest of the xact start/stop overhead, and I think that
- * would actually work in the normal case; but I am not sure that
- * things would clean up nicely if we got an error partway through.
+ * It is awfully tempting to just call AcceptInvalidationMessages() without
+ * the rest of the xact start/stop overhead, and I think that would
+ * actually work in the normal case; but I am not sure that things would
+ * clean up nicely if we got an error partway through.
*/
if (IsTransactionOrTransactionBlock())
{
diff --git a/src/backend/storage/ipc/sinvaladt.c b/src/backend/storage/ipc/sinvaladt.c
index 612f437322a..3d11c0d29b3 100644
--- a/src/backend/storage/ipc/sinvaladt.c
+++ b/src/backend/storage/ipc/sinvaladt.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.60 2005/08/20 23:26:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.61 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -198,8 +198,8 @@ SIInsertDataEntry(SISeg *segP, SharedInvalidationMessage *data)
{
/*
* Don't panic just yet: slowest backend might have consumed some
- * messages but not yet have done SIDelExpiredDataEntries() to
- * advance minMsgNum. So, make sure minMsgNum is up-to-date.
+ * messages but not yet have done SIDelExpiredDataEntries() to advance
+ * minMsgNum. So, make sure minMsgNum is up-to-date.
*/
SIDelExpiredDataEntries(segP);
numMsgs = segP->maxMsgNum - segP->minMsgNum;
@@ -213,9 +213,9 @@ SIInsertDataEntry(SISeg *segP, SharedInvalidationMessage *data)
/*
* Try to prevent table overflow. When the table is 70% full send a
- * WAKEN_CHILDREN request to the postmaster. The postmaster will send
- * a SIGUSR1 signal to all the backends, which will cause sinval.c to
- * read any pending SI entries.
+ * WAKEN_CHILDREN request to the postmaster. The postmaster will send a
+ * SIGUSR1 signal to all the backends, which will cause sinval.c to read
+ * any pending SI entries.
*
* This should never happen if all the backends are actively executing
* queries, but if a backend is sitting idle then it won't be starting
@@ -302,9 +302,9 @@ SIGetDataEntry(SISeg *segP, int backendId,
stateP->nextMsgNum++;
/*
- * There may be other backends that haven't read the message, so we
- * cannot delete it here. SIDelExpiredDataEntries() should be called
- * to remove dead messages.
+ * There may be other backends that haven't read the message, so we cannot
+ * delete it here. SIDelExpiredDataEntries() should be called to remove
+ * dead messages.
*/
return 1; /* got a message */
}
@@ -338,8 +338,8 @@ SIDelExpiredDataEntries(SISeg *segP)
segP->minMsgNum = min;
/*
- * When minMsgNum gets really large, decrement all message counters so
- * as to forestall overflow of the counters.
+ * When minMsgNum gets really large, decrement all message counters so as
+ * to forestall overflow of the counters.
*/
if (min >= MSGNUMWRAPAROUND)
{
diff --git a/src/backend/storage/large_object/inv_api.c b/src/backend/storage/large_object/inv_api.c
index 9762c769163..74409f3cd0a 100644
--- a/src/backend/storage/large_object/inv_api.c
+++ b/src/backend/storage/large_object/inv_api.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/large_object/inv_api.c,v 1.112 2005/08/12 01:35:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/large_object/inv_api.c,v 1.113 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -82,8 +82,8 @@ close_lo_relation(bool isCommit)
if (lo_heap_r || lo_index_r)
{
/*
- * Only bother to close if committing; else abort cleanup will
- * handle it
+ * Only bother to close if committing; else abort cleanup will handle
+ * it
*/
if (isCommit)
{
@@ -176,9 +176,9 @@ Oid
inv_create(Oid lobjId)
{
/*
- * Allocate an OID to be the LO's identifier, unless we were told
- * what to use. We can use the index on pg_largeobject for checking
- * OID uniqueness, even though it has additional columns besides OID.
+ * Allocate an OID to be the LO's identifier, unless we were told what to
+ * use. We can use the index on pg_largeobject for checking OID
+ * uniqueness, even though it has additional columns besides OID.
*/
if (!OidIsValid(lobjId))
{
@@ -188,8 +188,8 @@ inv_create(Oid lobjId)
}
/*
- * Create the LO by writing an empty first page for it in
- * pg_largeobject (will fail if duplicate)
+ * Create the LO by writing an empty first page for it in pg_largeobject
+ * (will fail if duplicate)
*/
LargeObjectCreate(lobjId);
@@ -305,8 +305,8 @@ inv_getsize(LargeObjectDesc *obj_desc)
/*
* Because the pg_largeobject index is on both loid and pageno, but we
* constrain only loid, a backwards scan should visit all pages of the
- * large object in reverse pageno order. So, it's sufficient to
- * examine the first valid tuple (== last valid page).
+ * large object in reverse pageno order. So, it's sufficient to examine
+ * the first valid tuple (== last valid page).
*/
while ((tuple = index_getnext(sd, BackwardScanDirection)) != NULL)
{
@@ -423,8 +423,8 @@ inv_read(LargeObjectDesc *obj_desc, char *buf, int nbytes)
/*
* We assume the indexscan will deliver pages in order. However,
- * there may be missing pages if the LO contains unwritten
- * "holes". We want missing sections to read out as zeroes.
+ * there may be missing pages if the LO contains unwritten "holes". We
+ * want missing sections to read out as zeroes.
*/
pageoff = ((uint32) data->pageno) * LOBLKSIZE;
if (pageoff > obj_desc->offset)
@@ -536,9 +536,8 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
while (nwritten < nbytes)
{
/*
- * If possible, get next pre-existing page of the LO. We assume
- * the indexscan will deliver these in order --- but there may be
- * holes.
+ * If possible, get next pre-existing page of the LO. We assume the
+ * indexscan will deliver these in order --- but there may be holes.
*/
if (neednextpage)
{
@@ -551,8 +550,8 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
}
/*
- * If we have a pre-existing page, see if it is the page we want
- * to write, or a later one.
+ * If we have a pre-existing page, see if it is the page we want to
+ * write, or a later one.
*/
if (olddata != NULL && olddata->pageno == pageno)
{
@@ -660,8 +659,8 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
CatalogCloseIndexes(indstate);
/*
- * Advance command counter so that my tuple updates will be seen by
- * later large-object operations in this transaction.
+ * Advance command counter so that my tuple updates will be seen by later
+ * large-object operations in this transaction.
*/
CommandCounterIncrement();
diff --git a/src/backend/storage/lmgr/deadlock.c b/src/backend/storage/lmgr/deadlock.c
index 7edabff6dd4..06de6071f14 100644
--- a/src/backend/storage/lmgr/deadlock.c
+++ b/src/backend/storage/lmgr/deadlock.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/deadlock.c,v 1.34 2005/04/29 22:28:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/deadlock.c,v 1.35 2005/10/15 02:49:26 momjian Exp $
*
* Interface:
*
@@ -130,15 +130,15 @@ InitDeadLockChecking(void)
oldcxt = MemoryContextSwitchTo(TopMemoryContext);
/*
- * FindLockCycle needs at most MaxBackends entries in visitedProcs[]
- * and deadlockDetails[].
+ * FindLockCycle needs at most MaxBackends entries in visitedProcs[] and
+ * deadlockDetails[].
*/
visitedProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
deadlockDetails = (DEADLOCK_INFO *) palloc(MaxBackends * sizeof(DEADLOCK_INFO));
/*
- * TopoSort needs to consider at most MaxBackends wait-queue entries,
- * and it needn't run concurrently with FindLockCycle.
+ * TopoSort needs to consider at most MaxBackends wait-queue entries, and
+ * it needn't run concurrently with FindLockCycle.
*/
topoProcs = visitedProcs; /* re-use this space */
beforeConstraints = (int *) palloc(MaxBackends * sizeof(int));
@@ -146,33 +146,32 @@ InitDeadLockChecking(void)
/*
* We need to consider rearranging at most MaxBackends/2 wait queues
- * (since it takes at least two waiters in a queue to create a soft
- * edge), and the expanded form of the wait queues can't involve more
- * than MaxBackends total waiters.
+ * (since it takes at least two waiters in a queue to create a soft edge),
+ * and the expanded form of the wait queues can't involve more than
+ * MaxBackends total waiters.
*/
waitOrders = (WAIT_ORDER *)
palloc((MaxBackends / 2) * sizeof(WAIT_ORDER));
waitOrderProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
/*
- * Allow at most MaxBackends distinct constraints in a configuration.
- * (Is this enough? In practice it seems it should be, but I don't
- * quite see how to prove it. If we run out, we might fail to find a
- * workable wait queue rearrangement even though one exists.) NOTE
- * that this number limits the maximum recursion depth of
- * DeadLockCheckRecurse. Making it really big might potentially allow
- * a stack-overflow problem.
+ * Allow at most MaxBackends distinct constraints in a configuration. (Is
+ * this enough? In practice it seems it should be, but I don't quite see
+ * how to prove it. If we run out, we might fail to find a workable wait
+ * queue rearrangement even though one exists.) NOTE that this number
+ * limits the maximum recursion depth of DeadLockCheckRecurse. Making it
+ * really big might potentially allow a stack-overflow problem.
*/
maxCurConstraints = MaxBackends;
curConstraints = (EDGE *) palloc(maxCurConstraints * sizeof(EDGE));
/*
* Allow up to 3*MaxBackends constraints to be saved without having to
- * re-run TestConfiguration. (This is probably more than enough, but
- * we can survive if we run low on space by doing excess runs of
- * TestConfiguration to re-compute constraint lists each time needed.)
- * The last MaxBackends entries in possibleConstraints[] are reserved
- * as output workspace for FindLockCycle.
+ * re-run TestConfiguration. (This is probably more than enough, but we
+ * can survive if we run low on space by doing excess runs of
+ * TestConfiguration to re-compute constraint lists each time needed.) The
+ * last MaxBackends entries in possibleConstraints[] are reserved as
+ * output workspace for FindLockCycle.
*/
maxPossibleConstraints = MaxBackends * 4;
possibleConstraints =
@@ -361,9 +360,9 @@ TestConfiguration(PGPROC *startProc)
return -1;
/*
- * Check for cycles involving startProc or any of the procs mentioned
- * in constraints. We check startProc last because if it has a soft
- * cycle still to be dealt with, we want to deal with that first.
+ * Check for cycles involving startProc or any of the procs mentioned in
+ * constraints. We check startProc last because if it has a soft cycle
+ * still to be dealt with, we want to deal with that first.
*/
for (i = 0; i < nCurConstraints; i++)
{
@@ -447,8 +446,8 @@ FindLockCycleRecurse(PGPROC *checkProc,
if (i == 0)
{
/*
- * record total length of cycle --- outer levels will now
- * fill deadlockDetails[]
+ * record total length of cycle --- outer levels will now fill
+ * deadlockDetails[]
*/
Assert(depth <= MaxBackends);
nDeadlockDetails = depth;
@@ -457,8 +456,8 @@ FindLockCycleRecurse(PGPROC *checkProc,
}
/*
- * Otherwise, we have a cycle but it does not include the
- * start point, so say "no deadlock".
+ * Otherwise, we have a cycle but it does not include the start
+ * point, so say "no deadlock".
*/
return false;
}
@@ -480,8 +479,8 @@ FindLockCycleRecurse(PGPROC *checkProc,
conflictMask = lockMethodTable->conflictTab[checkProc->waitLockMode];
/*
- * Scan for procs that already hold conflicting locks. These are
- * "hard" edges in the waits-for graph.
+ * Scan for procs that already hold conflicting locks. These are "hard"
+ * edges in the waits-for graph.
*/
procLocks = &(lock->procLocks);
@@ -520,15 +519,14 @@ FindLockCycleRecurse(PGPROC *checkProc,
}
proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
- offsetof(PROCLOCK, lockLink));
+ offsetof(PROCLOCK, lockLink));
}
/*
* Scan for procs that are ahead of this one in the lock's wait queue.
- * Those that have conflicting requests soft-block this one. This
- * must be done after the hard-block search, since if another proc
- * both hard- and soft-blocks this one, we want to call it a hard
- * edge.
+ * Those that have conflicting requests soft-block this one. This must be
+ * done after the hard-block search, since if another proc both hard- and
+ * soft-blocks this one, we want to call it a hard edge.
*
* If there is a proposed re-ordering of the lock's wait order, use that
* rather than the current wait order.
@@ -569,8 +567,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
info->pid = checkProc->pid;
/*
- * Add this edge to the list of soft edges in the
- * cycle
+ * Add this edge to the list of soft edges in the cycle
*/
Assert(*nSoftEdges < MaxBackends);
softEdges[*nSoftEdges].waiter = checkProc;
@@ -610,8 +607,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
info->pid = checkProc->pid;
/*
- * Add this edge to the list of soft edges in the
- * cycle
+ * Add this edge to the list of soft edges in the cycle
*/
Assert(*nSoftEdges < MaxBackends);
softEdges[*nSoftEdges].waiter = checkProc;
@@ -655,8 +651,8 @@ ExpandConstraints(EDGE *constraints,
/*
* Scan constraint list backwards. This is because the last-added
- * constraint is the only one that could fail, and so we want to test
- * it for inconsistency first.
+ * constraint is the only one that could fail, and so we want to test it
+ * for inconsistency first.
*/
for (i = nConstraints; --i >= 0;)
{
@@ -679,8 +675,8 @@ ExpandConstraints(EDGE *constraints,
Assert(nWaitOrderProcs <= MaxBackends);
/*
- * Do the topo sort. TopoSort need not examine constraints after
- * this one, since they must be for different locks.
+ * Do the topo sort. TopoSort need not examine constraints after this
+ * one, since they must be for different locks.
*/
if (!TopoSort(lock, constraints, i + 1,
waitOrders[nWaitOrders].procs))
@@ -739,15 +735,14 @@ TopoSort(LOCK *lock,
}
/*
- * Scan the constraints, and for each proc in the array, generate a
- * count of the number of constraints that say it must be before
- * something else, plus a list of the constraints that say it must be
- * after something else. The count for the j'th proc is stored in
- * beforeConstraints[j], and the head of its list in
- * afterConstraints[j]. Each constraint stores its list link in
- * constraints[i].link (note any constraint will be in just one list).
- * The array index for the before-proc of the i'th constraint is
- * remembered in constraints[i].pred.
+ * Scan the constraints, and for each proc in the array, generate a count
+ * of the number of constraints that say it must be before something else,
+ * plus a list of the constraints that say it must be after something
+ * else. The count for the j'th proc is stored in beforeConstraints[j],
+ * and the head of its list in afterConstraints[j]. Each constraint
+ * stores its list link in constraints[i].link (note any constraint will
+ * be in just one list). The array index for the before-proc of the i'th
+ * constraint is remembered in constraints[i].pred.
*/
MemSet(beforeConstraints, 0, queue_size * sizeof(int));
MemSet(afterConstraints, 0, queue_size * sizeof(int));
@@ -933,7 +928,7 @@ DeadLockReport(void)
DescribeLockTag(&buf2, &info->locktag);
appendStringInfo(&buf,
- _("Process %d waits for %s on %s; blocked by process %d."),
+ _("Process %d waits for %s on %s; blocked by process %d."),
info->pid,
GetLockmodeName(info->lockmode),
buf2.data,
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index 7a4ef9f7554..8ffeced9979 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lmgr.c,v 1.78 2005/08/01 20:31:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lmgr.c,v 1.79 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -145,11 +145,11 @@ LockRelation(Relation relation, LOCKMODE lockmode)
lockmode, false, false);
/*
- * Check to see if the relcache entry has been invalidated while we
- * were waiting to lock it. If so, rebuild it, or ereport() trying.
- * Increment the refcount to ensure that RelationFlushRelation will
- * rebuild it and not just delete it. We can skip this if the lock
- * was already held, however.
+ * Check to see if the relcache entry has been invalidated while we were
+ * waiting to lock it. If so, rebuild it, or ereport() trying. Increment
+ * the refcount to ensure that RelationFlushRelation will rebuild it and
+ * not just delete it. We can skip this if the lock was already held,
+ * however.
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
{
@@ -185,11 +185,11 @@ ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
return false;
/*
- * Check to see if the relcache entry has been invalidated while we
- * were waiting to lock it. If so, rebuild it, or ereport() trying.
- * Increment the refcount to ensure that RelationFlushRelation will
- * rebuild it and not just delete it. We can skip this if the lock
- * was already held, however.
+ * Check to see if the relcache entry has been invalidated while we were
+ * waiting to lock it. If so, rebuild it, or ereport() trying. Increment
+ * the refcount to ensure that RelationFlushRelation will rebuild it and
+ * not just delete it. We can skip this if the lock was already held,
+ * however.
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
{
@@ -429,7 +429,7 @@ XactLockTableInsert(TransactionId xid)
*
* Delete the lock showing that the given transaction ID is running.
* (This is never used for main transaction IDs; those locks are only
- * released implicitly at transaction end. But we do use it for subtrans
+ * released implicitly at transaction end. But we do use it for subtrans
* IDs.)
*/
void
@@ -451,7 +451,7 @@ XactLockTableDelete(TransactionId xid)
* subtransaction, we will exit as soon as it aborts or its top parent commits.
* It takes some extra work to ensure this, because to save on shared memory
* the XID lock of a subtransaction is released when it ends, whether
- * successfully or unsuccessfully. So we have to check if it's "still running"
+ * successfully or unsuccessfully. So we have to check if it's "still running"
* and if so wait for its parent.
*/
void
@@ -477,8 +477,8 @@ XactLockTableWait(TransactionId xid)
}
/*
- * Transaction was committed/aborted/crashed - we have to update
- * pg_clog if transaction is still marked as running.
+ * Transaction was committed/aborted/crashed - we have to update pg_clog
+ * if transaction is still marked as running.
*/
if (!TransactionIdDidCommit(xid) && !TransactionIdDidAbort(xid))
TransactionIdAbort(xid);
@@ -514,8 +514,8 @@ ConditionalXactLockTableWait(TransactionId xid)
}
/*
- * Transaction was committed/aborted/crashed - we have to update
- * pg_clog if transaction is still marked as running.
+ * Transaction was committed/aborted/crashed - we have to update pg_clog
+ * if transaction is still marked as running.
*/
if (!TransactionIdDidCommit(xid) && !TransactionIdDidAbort(xid))
TransactionIdAbort(xid);
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index c11070a1309..245b8eeee23 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.157 2005/08/20 23:26:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.158 2005/10/15 02:49:26 momjian Exp $
*
* NOTES
* Outside modules can create a lock table and acquire/release
@@ -46,7 +46,7 @@
/* This configuration variable is used to set the lock table size */
int max_locks_per_xact; /* set by guc.c */
-#define NLOCKENTS() \
+#define NLOCKENTS() \
mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
@@ -155,12 +155,11 @@ PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
{
if (LOCK_DEBUG_ENABLED((LOCK *) MAKE_PTR(proclockP->tag.lock)))
elog(LOG,
- "%s: proclock(%lx) lock(%lx) method(%u) proc(%lx) hold(%x)",
+ "%s: proclock(%lx) lock(%lx) method(%u) proc(%lx) hold(%x)",
where, MAKE_OFFSET(proclockP), proclockP->tag.lock,
PROCLOCK_LOCKMETHOD(*(proclockP)),
proclockP->tag.proc, (int) proclockP->holdMask);
}
-
#else /* not LOCK_DEBUG */
#define LOCK_PRINT(where, lock, type)
@@ -171,11 +170,11 @@ PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
static void RemoveLocalLock(LOCALLOCK *locallock);
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
static void WaitOnLock(LOCKMETHODID lockmethodid, LOCALLOCK *locallock,
- ResourceOwner owner);
+ ResourceOwner owner);
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
- PROCLOCK *proclock, LockMethod lockMethodTable);
+ PROCLOCK *proclock, LockMethod lockMethodTable);
static void CleanUpLock(LOCKMETHODID lockmethodid, LOCK *lock,
- PROCLOCK *proclock, bool wakeupNeeded);
+ PROCLOCK *proclock, bool wakeupNeeded);
/*
@@ -320,14 +319,13 @@ LockMethodTableInit(const char *tabName,
elog(FATAL, "could not initialize lock table \"%s\"", tabName);
/*
- * allocate a non-shared hash table for LOCALLOCK structs. This is
- * used to store lock counts and resource owner information.
+ * allocate a non-shared hash table for LOCALLOCK structs. This is used
+ * to store lock counts and resource owner information.
*
- * The non-shared table could already exist in this process (this occurs
- * when the postmaster is recreating shared memory after a backend
- * crash). If so, delete and recreate it. (We could simply leave it,
- * since it ought to be empty in the postmaster, but for safety let's
- * zap it.)
+ * The non-shared table could already exist in this process (this occurs when
+ * the postmaster is recreating shared memory after a backend crash). If
+ * so, delete and recreate it. (We could simply leave it, since it ought
+ * to be empty in the postmaster, but for safety let's zap it.)
*/
if (LockMethodLocalHash[lockmethodid])
hash_destroy(LockMethodLocalHash[lockmethodid]);
@@ -499,7 +497,7 @@ LockAcquire(LOCKMETHODID lockmethodid,
locallock->lockOwners = NULL;
locallock->lockOwners = (LOCALLOCKOWNER *)
MemoryContextAlloc(TopMemoryContext,
- locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
+ locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
}
else
{
@@ -518,8 +516,7 @@ LockAcquire(LOCKMETHODID lockmethodid,
}
/*
- * If we already hold the lock, we can just increase the count
- * locally.
+ * If we already hold the lock, we can just increase the count locally.
*/
if (locallock->nLocks > 0)
{
@@ -537,8 +534,8 @@ LockAcquire(LOCKMETHODID lockmethodid,
/*
* Find or create a lock with this tag.
*
- * Note: if the locallock object already existed, it might have a pointer
- * to the lock already ... but we probably should not assume that that
+ * Note: if the locallock object already existed, it might have a pointer to
+ * the lock already ... but we probably should not assume that that
* pointer is valid, since a lock object with no locks can go away
* anytime.
*/
@@ -551,7 +548,7 @@ LockAcquire(LOCKMETHODID lockmethodid,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You may need to increase max_locks_per_transaction.")));
+ errhint("You may need to increase max_locks_per_transaction.")));
}
locallock->lock = lock;
@@ -581,7 +578,7 @@ LockAcquire(LOCKMETHODID lockmethodid,
/*
* Create the hash key for the proclock table.
*/
- MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
+ MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
proclocktag.lock = MAKE_OFFSET(lock);
proclocktag.proc = MAKE_OFFSET(MyProc);
@@ -612,7 +609,7 @@ LockAcquire(LOCKMETHODID lockmethodid,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You may need to increase max_locks_per_transaction.")));
+ errhint("You may need to increase max_locks_per_transaction.")));
}
locallock->proclock = proclock;
@@ -636,29 +633,28 @@ LockAcquire(LOCKMETHODID lockmethodid,
#ifdef CHECK_DEADLOCK_RISK
/*
- * Issue warning if we already hold a lower-level lock on this
- * object and do not hold a lock of the requested level or higher.
- * This indicates a deadlock-prone coding practice (eg, we'd have
- * a deadlock if another backend were following the same code path
- * at about the same time).
+ * Issue warning if we already hold a lower-level lock on this object
+ * and do not hold a lock of the requested level or higher. This
+ * indicates a deadlock-prone coding practice (eg, we'd have a
+ * deadlock if another backend were following the same code path at
+ * about the same time).
*
- * This is not enabled by default, because it may generate log
- * entries about user-level coding practices that are in fact safe
- * in context. It can be enabled to help find system-level
- * problems.
+ * This is not enabled by default, because it may generate log entries
+ * about user-level coding practices that are in fact safe in context.
+ * It can be enabled to help find system-level problems.
*
* XXX Doing numeric comparison on the lockmodes is a hack; it'd be
* better to use a table. For now, though, this works.
*/
{
- int i;
+ int i;
for (i = lockMethodTable->numLockModes; i > 0; i--)
{
if (proclock->holdMask & LOCKBIT_ON(i))
{
if (i >= (int) lockmode)
- break; /* safe: we have a lock >= req level */
+ break; /* safe: we have a lock >= req level */
elog(LOG, "deadlock risk: raising lock level"
" from %s to %s on object %u/%u/%u",
lock_mode_names[i], lock_mode_names[lockmode],
@@ -673,16 +669,16 @@ LockAcquire(LOCKMETHODID lockmethodid,
/*
* lock->nRequested and lock->requested[] count the total number of
- * requests, whether granted or waiting, so increment those
- * immediately. The other counts don't increment till we get the lock.
+ * requests, whether granted or waiting, so increment those immediately.
+ * The other counts don't increment till we get the lock.
*/
lock->nRequested++;
lock->requested[lockmode]++;
Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
/*
- * We shouldn't already hold the desired lock; else locallock table
- * is broken.
+ * We shouldn't already hold the desired lock; else locallock table is
+ * broken.
*/
if (proclock->holdMask & LOCKBIT_ON(lockmode))
elog(ERROR, "lock %s on object %u/%u/%u is already held",
@@ -691,9 +687,9 @@ LockAcquire(LOCKMETHODID lockmethodid,
lock->tag.locktag_field3);
/*
- * If lock requested conflicts with locks requested by waiters, must
- * join wait queue. Otherwise, check for conflict with already-held
- * locks. (That's last because most complex check.)
+ * If lock requested conflicts with locks requested by waiters, must join
+ * wait queue. Otherwise, check for conflict with already-held locks.
+ * (That's last because most complex check.)
*/
if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
status = STATUS_FOUND;
@@ -713,8 +709,8 @@ LockAcquire(LOCKMETHODID lockmethodid,
/*
* We can't acquire the lock immediately. If caller specified no
- * blocking, remove useless table entries and return NOT_AVAIL
- * without waiting.
+ * blocking, remove useless table entries and return NOT_AVAIL without
+ * waiting.
*/
if (dontWait)
{
@@ -753,8 +749,7 @@ LockAcquire(LOCKMETHODID lockmethodid,
/*
* NOTE: do not do any material change of state between here and
* return. All required changes in locktable state must have been
- * done when the lock was granted to us --- see notes in
- * WaitOnLock.
+ * done when the lock was granted to us --- see notes in WaitOnLock.
*/
/*
@@ -820,13 +815,13 @@ LockCheckConflicts(LockMethod lockMethodTable,
int i;
/*
- * first check for global conflicts: If no locks conflict with my
- * request, then I get the lock.
+ * first check for global conflicts: If no locks conflict with my request,
+ * then I get the lock.
*
- * Checking for conflict: lock->grantMask represents the types of
- * currently held locks. conflictTable[lockmode] has a bit set for
- * each type of lock that conflicts with request. Bitwise compare
- * tells if there is a conflict.
+ * Checking for conflict: lock->grantMask represents the types of currently
+ * held locks. conflictTable[lockmode] has a bit set for each type of
+ * lock that conflicts with request. Bitwise compare tells if there is a
+ * conflict.
*/
if (!(lockMethodTable->conflictTab[lockmode] & lock->grantMask))
{
@@ -835,15 +830,15 @@ LockCheckConflicts(LockMethod lockMethodTable,
}
/*
- * Rats. Something conflicts. But it could still be my own lock.
- * We have to construct a conflict mask that does not reflect our own
- * locks, but only lock types held by other processes.
+ * Rats. Something conflicts. But it could still be my own lock. We have
+ * to construct a conflict mask that does not reflect our own locks, but
+ * only lock types held by other processes.
*/
myLocks = proclock->holdMask;
otherLocks = 0;
for (i = 1; i <= numLockModes; i++)
{
- int myHolding = (myLocks & LOCKBIT_ON(i)) ? 1 : 0;
+ int myHolding = (myLocks & LOCKBIT_ON(i)) ? 1 : 0;
if (lock->granted[i] > myHolding)
otherLocks |= LOCKBIT_ON(i);
@@ -851,8 +846,8 @@ LockCheckConflicts(LockMethod lockMethodTable,
/*
* now check again for conflicts. 'otherLocks' describes the types of
- * locks held by other processes. If one of these conflicts with the
- * kind of lock that I want, there is a conflict and I have to sleep.
+ * locks held by other processes. If one of these conflicts with the kind
+ * of lock that I want, there is a conflict and I have to sleep.
*/
if (!(lockMethodTable->conflictTab[lockmode] & otherLocks))
{
@@ -891,7 +886,7 @@ GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
}
/*
- * UnGrantLock -- opposite of GrantLock.
+ * UnGrantLock -- opposite of GrantLock.
*
* Updates the lock and proclock data structures to show that the lock
* is no longer held nor requested by the current holder.
@@ -903,7 +898,7 @@ static bool
UnGrantLock(LOCK *lock, LOCKMODE lockmode,
PROCLOCK *proclock, LockMethod lockMethodTable)
{
- bool wakeupNeeded = false;
+ bool wakeupNeeded = false;
Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
@@ -926,13 +921,13 @@ UnGrantLock(LOCK *lock, LOCKMODE lockmode,
LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
/*
- * We need only run ProcLockWakeup if the released lock conflicts with
- * at least one of the lock types requested by waiter(s). Otherwise
- * whatever conflict made them wait must still exist. NOTE: before
- * MVCC, we could skip wakeup if lock->granted[lockmode] was still
- * positive. But that's not true anymore, because the remaining
- * granted locks might belong to some waiter, who could now be
- * awakened because he doesn't conflict with his own locks.
+ * We need only run ProcLockWakeup if the released lock conflicts with at
+ * least one of the lock types requested by waiter(s). Otherwise whatever
+ * conflict made them wait must still exist. NOTE: before MVCC, we could
+ * skip wakeup if lock->granted[lockmode] was still positive. But that's
+ * not true anymore, because the remaining granted locks might belong to
+ * some waiter, who could now be awakened because he doesn't conflict with
+ * his own locks.
*/
if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
wakeupNeeded = true;
@@ -947,7 +942,7 @@ UnGrantLock(LOCK *lock, LOCKMODE lockmode,
}
/*
- * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
+ * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
* proclock and lock objects if possible, and call ProcLockWakeup if there
* are remaining requests and the caller says it's OK. (Normally, this
* should be called after UnGrantLock, and wakeupNeeded is the result from
@@ -961,8 +956,8 @@ CleanUpLock(LOCKMETHODID lockmethodid, LOCK *lock, PROCLOCK *proclock,
bool wakeupNeeded)
{
/*
- * If this was my last hold on this lock, delete my entry in the
- * proclock table.
+ * If this was my last hold on this lock, delete my entry in the proclock
+ * table.
*/
if (proclock->holdMask == 0)
{
@@ -978,8 +973,8 @@ CleanUpLock(LOCKMETHODID lockmethodid, LOCK *lock, PROCLOCK *proclock,
if (lock->nRequested == 0)
{
/*
- * The caller just released the last lock, so garbage-collect the
- * lock object.
+ * The caller just released the last lock, so garbage-collect the lock
+ * object.
*/
LOCK_PRINT("CleanUpLock: deleting", lock, 0);
Assert(SHMQueueEmpty(&(lock->procLocks)));
@@ -991,7 +986,7 @@ CleanUpLock(LOCKMETHODID lockmethodid, LOCK *lock, PROCLOCK *proclock,
else if (wakeupNeeded)
{
/* There are waiters on this lock, so wake them up. */
- ProcLockWakeup(LockMethods[lockmethodid], lock);
+ ProcLockWakeup(LockMethods[lockmethodid], lock);
}
}
@@ -1075,16 +1070,15 @@ WaitOnLock(LOCKMETHODID lockmethodid, LOCALLOCK *locallock,
/*
* NOTE: Think not to put any shared-state cleanup after the call to
- * ProcSleep, in either the normal or failure path. The lock state
- * must be fully set by the lock grantor, or by CheckDeadLock if we
- * give up waiting for the lock. This is necessary because of the
- * possibility that a cancel/die interrupt will interrupt ProcSleep
- * after someone else grants us the lock, but before we've noticed it.
- * Hence, after granting, the locktable state must fully reflect the
- * fact that we own the lock; we can't do additional work on return.
- * Contrariwise, if we fail, any cleanup must happen in xact abort
- * processing, not here, to ensure it will also happen in the
- * cancel/die case.
+ * ProcSleep, in either the normal or failure path. The lock state must
+ * be fully set by the lock grantor, or by CheckDeadLock if we give up
+ * waiting for the lock. This is necessary because of the possibility
+ * that a cancel/die interrupt will interrupt ProcSleep after someone else
+ * grants us the lock, but before we've noticed it. Hence, after granting,
+ * the locktable state must fully reflect the fact that we own the lock;
+ * we can't do additional work on return. Contrariwise, if we fail, any
+ * cleanup must happen in xact abort processing, not here, to ensure it
+ * will also happen in the cancel/die case.
*/
if (ProcSleep(lockMethodTable,
@@ -1093,8 +1087,7 @@ WaitOnLock(LOCKMETHODID lockmethodid, LOCALLOCK *locallock,
locallock->proclock) != STATUS_OK)
{
/*
- * We failed as a result of a deadlock, see CheckDeadLock(). Quit
- * now.
+ * We failed as a result of a deadlock, see CheckDeadLock(). Quit now.
*/
awaitedLock = NULL;
LOCK_PRINT("WaitOnLock: aborting on lock",
@@ -1102,8 +1095,8 @@ WaitOnLock(LOCKMETHODID lockmethodid, LOCALLOCK *locallock,
LWLockRelease(lockMethodTable->masterLock);
/*
- * Now that we aren't holding the LockMgrLock, we can give an
- * error report including details about the detected deadlock.
+ * Now that we aren't holding the LockMgrLock, we can give an error
+ * report including details about the detected deadlock.
*/
DeadLockReport();
/* not reached */
@@ -1163,15 +1156,15 @@ RemoveFromWaitQueue(PGPROC *proc)
* Delete the proclock immediately if it represents no already-held locks.
* (This must happen now because if the owner of the lock decides to
* release it, and the requested/granted counts then go to zero,
- * LockRelease expects there to be no remaining proclocks.)
- * Then see if any other waiters for the lock can be woken up now.
+ * LockRelease expects there to be no remaining proclocks.) Then see if
+ * any other waiters for the lock can be woken up now.
*/
CleanUpLock(lockmethodid, waitLock, proclock, true);
}
/*
* LockRelease -- look up 'locktag' in lock table 'lockmethodid' and
- * release one 'lockmode' lock on it. Release a session lock if
+ * release one 'lockmode' lock on it. Release a session lock if
* 'sessionLock' is true, else release a regular transaction lock.
*
* Side Effects: find any waiting processes that are now wakable,
@@ -1219,8 +1212,7 @@ LockRelease(LOCKMETHODID lockmethodid, LOCKTAG *locktag,
HASH_FIND, NULL);
/*
- * let the caller print its own error message, too. Do not
- * ereport(ERROR).
+ * let the caller print its own error message, too. Do not ereport(ERROR).
*/
if (!locallock || locallock->nLocks <= 0)
{
@@ -1268,8 +1260,8 @@ LockRelease(LOCKMETHODID lockmethodid, LOCKTAG *locktag,
}
/*
- * Decrease the total local count. If we're still holding the lock,
- * we're done.
+ * Decrease the total local count. If we're still holding the lock, we're
+ * done.
*/
locallock->nLocks--;
@@ -1285,8 +1277,8 @@ LockRelease(LOCKMETHODID lockmethodid, LOCKTAG *locktag,
/*
* We don't need to re-find the lock or proclock, since we kept their
- * addresses in the locallock table, and they couldn't have been
- * removed while we were holding a lock on them.
+ * addresses in the locallock table, and they couldn't have been removed
+ * while we were holding a lock on them.
*/
lock = locallock->lock;
LOCK_PRINT("LockRelease: found", lock, lockmode);
@@ -1294,8 +1286,8 @@ LockRelease(LOCKMETHODID lockmethodid, LOCKTAG *locktag,
PROCLOCK_PRINT("LockRelease: found", proclock);
/*
- * Double-check that we are actually holding a lock of the type we
- * want to release.
+ * Double-check that we are actually holding a lock of the type we want to
+ * release.
*/
if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
{
@@ -1356,10 +1348,10 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/*
* First we run through the locallock table and get rid of unwanted
- * entries, then we scan the process's proclocks and get rid of those.
- * We do this separately because we may have multiple locallock
- * entries pointing to the same proclock, and we daren't end up with
- * any dangling pointers.
+ * entries, then we scan the process's proclocks and get rid of those. We
+ * do this separately because we may have multiple locallock entries
+ * pointing to the same proclock, and we daren't end up with any dangling
+ * pointers.
*/
hash_seq_init(&status, LockMethodLocalHash[lockmethodid]);
@@ -1368,8 +1360,8 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
if (locallock->proclock == NULL || locallock->lock == NULL)
{
/*
- * We must've run out of shared memory while trying to set up
- * this lock. Just forget the local entry.
+ * We must've run out of shared memory while trying to set up this
+ * lock. Just forget the local entry.
*/
Assert(locallock->nLocks == 0);
RemoveLocalLock(locallock);
@@ -1381,9 +1373,9 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
continue;
/*
- * If we are asked to release all locks, we can just zap the
- * entry. Otherwise, must scan to see if there are session locks.
- * We assume there is at most one lockOwners entry for session locks.
+ * If we are asked to release all locks, we can just zap the entry.
+ * Otherwise, must scan to see if there are session locks. We assume
+ * there is at most one lockOwners entry for session locks.
*/
if (!allLocks)
{
@@ -1431,7 +1423,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/* Get link first, since we may unlink/delete this proclock */
nextplock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->procLink,
- offsetof(PROCLOCK, procLink));
+ offsetof(PROCLOCK, procLink));
Assert(proclock->tag.proc == MAKE_OFFSET(MyProc));
@@ -1581,8 +1573,8 @@ LockReassignCurrentOwner(void)
continue;
/*
- * Scan to see if there are any locks belonging to current owner
- * or its parent
+ * Scan to see if there are any locks belonging to current owner or
+ * its parent
*/
lockOwners = locallock->lockOwners;
for (i = locallock->numLockOwners - 1; i >= 0; i--)
@@ -1644,7 +1636,7 @@ AtPrepare_Locks(void)
{
TwoPhaseLockRecord record;
LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
- int i;
+ int i;
/* Ignore items that are not of the lockmethod to be processed */
if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
@@ -1722,12 +1714,12 @@ PostPrepare_Locks(TransactionId xid)
/*
* First we run through the locallock table and get rid of unwanted
- * entries, then we scan the process's proclocks and transfer them
- * to the target proc.
+ * entries, then we scan the process's proclocks and transfer them to the
+ * target proc.
*
- * We do this separately because we may have multiple locallock
- * entries pointing to the same proclock, and we daren't end up with
- * any dangling pointers.
+ * We do this separately because we may have multiple locallock entries
+ * pointing to the same proclock, and we daren't end up with any dangling
+ * pointers.
*/
hash_seq_init(&status, LockMethodLocalHash[lockmethodid]);
@@ -1736,8 +1728,8 @@ PostPrepare_Locks(TransactionId xid)
if (locallock->proclock == NULL || locallock->lock == NULL)
{
/*
- * We must've run out of shared memory while trying to set up
- * this lock. Just forget the local entry.
+ * We must've run out of shared memory while trying to set up this
+ * lock. Just forget the local entry.
*/
Assert(locallock->nLocks == 0);
RemoveLocalLock(locallock);
@@ -1771,7 +1763,7 @@ PostPrepare_Locks(TransactionId xid)
/* Get link first, since we may unlink/delete this proclock */
nextplock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->procLink,
- offsetof(PROCLOCK, procLink));
+ offsetof(PROCLOCK, procLink));
Assert(proclock->tag.proc == MAKE_OFFSET(MyProc));
@@ -1797,13 +1789,13 @@ PostPrepare_Locks(TransactionId xid)
holdMask = proclock->holdMask;
/*
- * We cannot simply modify proclock->tag.proc to reassign ownership
- * of the lock, because that's part of the hash key and the proclock
+ * We cannot simply modify proclock->tag.proc to reassign ownership of
+ * the lock, because that's part of the hash key and the proclock
* would then be in the wrong hash chain. So, unlink and delete the
- * old proclock; create a new one with the right contents; and link
- * it into place. We do it in this order to be certain we won't
- * run out of shared memory (the way dynahash.c works, the deleted
- * object is certain to be available for reallocation).
+ * old proclock; create a new one with the right contents; and link it
+ * into place. We do it in this order to be certain we won't run out
+ * of shared memory (the way dynahash.c works, the deleted object is
+ * certain to be available for reallocation).
*/
SHMQueueDelete(&proclock->lockLink);
SHMQueueDelete(&proclock->procLink);
@@ -1823,7 +1815,7 @@ PostPrepare_Locks(TransactionId xid)
(void *) &proclocktag,
HASH_ENTER_NULL, &found);
if (!newproclock)
- ereport(PANIC, /* should not happen */
+ ereport(PANIC, /* should not happen */
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errdetail("Not enough memory for reassigning the prepared transaction's locks.")));
@@ -1881,11 +1873,11 @@ LockShmemSize(void)
size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
/*
- * Note we count only one pair of hash tables, since the userlocks
- * table actually overlays the main one.
+ * Note we count only one pair of hash tables, since the userlocks table
+ * actually overlays the main one.
*
- * Since the lockHash entry count above is only an estimate, add 10%
- * safety margin.
+ * Since the lockHash entry count above is only an estimate, add 10% safety
+ * margin.
*/
size = add_size(size, size / 10);
@@ -2000,7 +1992,7 @@ DumpLocks(PGPROC *proc)
LOCK_PRINT("DumpLocks", lock, 0);
proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->procLink,
- offsetof(PROCLOCK, procLink));
+ offsetof(PROCLOCK, procLink));
}
}
@@ -2046,7 +2038,6 @@ DumpAllLocks(void)
elog(LOG, "DumpAllLocks: proclock->tag.lock = NULL");
}
}
-
#endif /* LOCK_DEBUG */
/*
@@ -2066,7 +2057,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
{
TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
PGPROC *proc = TwoPhaseGetDummyProc(xid);
- LOCKTAG *locktag;
+ LOCKTAG *locktag;
LOCKMODE lockmode;
LOCKMETHODID lockmethodid;
LOCK *lock;
@@ -2102,7 +2093,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You may need to increase max_locks_per_transaction.")));
+ errhint("You may need to increase max_locks_per_transaction.")));
}
/*
@@ -2131,7 +2122,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
/*
* Create the hash key for the proclock table.
*/
- MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
+ MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
proclocktag.lock = MAKE_OFFSET(lock);
proclocktag.proc = MAKE_OFFSET(proc);
@@ -2162,7 +2153,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You may need to increase max_locks_per_transaction.")));
+ errhint("You may need to increase max_locks_per_transaction.")));
}
/*
@@ -2185,8 +2176,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
/*
* lock->nRequested and lock->requested[] count the total number of
- * requests, whether granted or waiting, so increment those
- * immediately.
+ * requests, whether granted or waiting, so increment those immediately.
*/
lock->nRequested++;
lock->requested[lockmode]++;
@@ -2220,7 +2210,7 @@ lock_twophase_postcommit(TransactionId xid, uint16 info,
{
TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
PGPROC *proc = TwoPhaseGetDummyProc(xid);
- LOCKTAG *locktag;
+ LOCKTAG *locktag;
LOCKMODE lockmode;
LOCKMETHODID lockmethodid;
PROCLOCKTAG proclocktag;
@@ -2256,7 +2246,7 @@ lock_twophase_postcommit(TransactionId xid, uint16 info,
/*
* Re-find the proclock object (ditto).
*/
- MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
+ MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
proclocktag.lock = MAKE_OFFSET(lock);
proclocktag.proc = MAKE_OFFSET(proc);
proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash[lockmethodid],
@@ -2266,8 +2256,8 @@ lock_twophase_postcommit(TransactionId xid, uint16 info,
elog(PANIC, "failed to re-find shared proclock object");
/*
- * Double-check that we are actually holding a lock of the type we
- * want to release.
+ * Double-check that we are actually holding a lock of the type we want to
+ * release.
*/
if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
{
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index ce0606a3c4e..5526c77a676 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.33 2005/10/12 16:55:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.34 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,10 +44,10 @@ typedef struct LWLock
/*
* All the LWLock structs are allocated as an array in shared memory.
- * (LWLockIds are indexes into the array.) We force the array stride to
+ * (LWLockIds are indexes into the array.) We force the array stride to
* be a power of 2, which saves a few cycles in indexing, but more
* importantly also ensures that individual LWLocks don't cross cache line
- * boundaries. This reduces cache contention problems, especially on AMD
+ * boundaries. This reduces cache contention problems, especially on AMD
* Opterons. (Of course, we have to also ensure that the array start
* address is suitably aligned.)
*
@@ -101,7 +101,6 @@ LOG_LWDEBUG(const char *where, LWLockId lockid, const char *msg)
if (Trace_lwlocks)
elog(LOG, "%s(%d): %s", where, (int) lockid, msg);
}
-
#else /* not LOCK_DEBUG */
#define PRINT_LWDEBUG(a,b,c)
#define LOG_LWDEBUG(a,b,c)
@@ -117,10 +116,10 @@ NumLWLocks(void)
int numLocks;
/*
- * Possibly this logic should be spread out among the affected
- * modules, the same way that shmem space estimation is done. But for
- * now, there are few enough users of LWLocks that we can get away
- * with just keeping the knowledge here.
+ * Possibly this logic should be spread out among the affected modules,
+ * the same way that shmem space estimation is done. But for now, there
+ * are few enough users of LWLocks that we can get away with just keeping
+ * the knowledge here.
*/
/* Predefined LWLocks */
@@ -136,8 +135,8 @@ NumLWLocks(void)
numLocks += NUM_SLRU_BUFFERS;
/*
- * multixact.c needs one per MultiXact buffer, but there are
- * two SLRU areas for MultiXact
+ * multixact.c needs one per MultiXact buffer, but there are two SLRU
+ * areas for MultiXact
*/
numLocks += 2 * NUM_SLRU_BUFFERS;
@@ -226,6 +225,7 @@ LWLockId
LWLockAssign(void)
{
LWLockId result;
+
/* use volatile pointer to prevent code rearrangement */
volatile int *LWLockCounter;
@@ -261,8 +261,8 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
/*
* We can't wait if we haven't got a PGPROC. This should only occur
- * during bootstrap or shared memory initialization. Put an Assert
- * here to catch unsafe coding practices.
+ * during bootstrap or shared memory initialization. Put an Assert here
+ * to catch unsafe coding practices.
*/
Assert(!(proc == NULL && IsUnderPostmaster));
@@ -271,9 +271,9 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
elog(ERROR, "too many LWLocks taken");
/*
- * Lock out cancel/die interrupts until we exit the code section
- * protected by the LWLock. This ensures that interrupts will not
- * interfere with manipulations of data structures in shared memory.
+ * Lock out cancel/die interrupts until we exit the code section protected
+ * by the LWLock. This ensures that interrupts will not interfere with
+ * manipulations of data structures in shared memory.
*/
HOLD_INTERRUPTS();
@@ -282,17 +282,16 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
* LWLockRelease.
*
* NOTE: it might seem better to have LWLockRelease actually grant us the
- * lock, rather than retrying and possibly having to go back to sleep.
- * But in practice that is no good because it means a process swap for
- * every lock acquisition when two or more processes are contending
- * for the same lock. Since LWLocks are normally used to protect
- * not-very-long sections of computation, a process needs to be able
- * to acquire and release the same lock many times during a single CPU
- * time slice, even in the presence of contention. The efficiency of
- * being able to do that outweighs the inefficiency of sometimes
- * wasting a process dispatch cycle because the lock is not free when
- * a released waiter finally gets to run. See pgsql-hackers archives
- * for 29-Dec-01.
+ * lock, rather than retrying and possibly having to go back to sleep. But
+ * in practice that is no good because it means a process swap for every
+ * lock acquisition when two or more processes are contending for the same
+ * lock. Since LWLocks are normally used to protect not-very-long
+ * sections of computation, a process needs to be able to acquire and
+ * release the same lock many times during a single CPU time slice, even
+ * in the presence of contention. The efficiency of being able to do that
+ * outweighs the inefficiency of sometimes wasting a process dispatch
+ * cycle because the lock is not free when a released waiter finally gets
+ * to run. See pgsql-hackers archives for 29-Dec-01.
*/
for (;;)
{
@@ -334,8 +333,8 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
* Add myself to wait queue.
*
* If we don't have a PGPROC structure, there's no way to wait. This
- * should never occur, since MyProc should only be null during
- * shared memory initialization.
+ * should never occur, since MyProc should only be null during shared
+ * memory initialization.
*/
if (proc == NULL)
elog(FATAL, "cannot wait without a PGPROC structure");
@@ -356,13 +355,13 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
* Wait until awakened.
*
* Since we share the process wait semaphore with the regular lock
- * manager and ProcWaitForSignal, and we may need to acquire an
- * LWLock while one of those is pending, it is possible that we
- * get awakened for a reason other than being signaled by
- * LWLockRelease. If so, loop back and wait again. Once we've
- * gotten the LWLock, re-increment the sema by the number of
- * additional signals received, so that the lock manager or signal
- * manager will see the received signal when it next waits.
+ * manager and ProcWaitForSignal, and we may need to acquire an LWLock
+ * while one of those is pending, it is possible that we get awakened
+ * for a reason other than being signaled by LWLockRelease. If so,
+ * loop back and wait again. Once we've gotten the LWLock,
+ * re-increment the sema by the number of additional signals received,
+ * so that the lock manager or signal manager will see the received
+ * signal when it next waits.
*/
LOG_LWDEBUG("LWLockAcquire", lockid, "waiting");
@@ -414,9 +413,9 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
elog(ERROR, "too many LWLocks taken");
/*
- * Lock out cancel/die interrupts until we exit the code section
- * protected by the LWLock. This ensures that interrupts will not
- * interfere with manipulations of data structures in shared memory.
+ * Lock out cancel/die interrupts until we exit the code section protected
+ * by the LWLock. This ensures that interrupts will not interfere with
+ * manipulations of data structures in shared memory.
*/
HOLD_INTERRUPTS();
@@ -477,8 +476,8 @@ LWLockRelease(LWLockId lockid)
PRINT_LWDEBUG("LWLockRelease", lockid, lock);
/*
- * Remove lock from list of locks held. Usually, but not always, it
- * will be the latest-acquired lock; so search array backwards.
+ * Remove lock from list of locks held. Usually, but not always, it will
+ * be the latest-acquired lock; so search array backwards.
*/
for (i = num_held_lwlocks; --i >= 0;)
{
@@ -504,10 +503,10 @@ LWLockRelease(LWLockId lockid)
}
/*
- * See if I need to awaken any waiters. If I released a non-last
- * shared hold, there cannot be anything to do. Also, do not awaken
- * any waiters if someone has already awakened waiters that haven't
- * yet acquired the lock.
+ * See if I need to awaken any waiters. If I released a non-last shared
+ * hold, there cannot be anything to do. Also, do not awaken any waiters
+ * if someone has already awakened waiters that haven't yet acquired the
+ * lock.
*/
head = lock->head;
if (head != NULL)
@@ -515,9 +514,9 @@ LWLockRelease(LWLockId lockid)
if (lock->exclusive == 0 && lock->shared == 0 && lock->releaseOK)
{
/*
- * Remove the to-be-awakened PGPROCs from the queue. If the
- * front waiter wants exclusive lock, awaken him only.
- * Otherwise awaken as many waiters as want shared access.
+ * Remove the to-be-awakened PGPROCs from the queue. If the front
+ * waiter wants exclusive lock, awaken him only. Otherwise awaken
+ * as many waiters as want shared access.
*/
proc = head;
if (!proc->lwExclusive)
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 6005cb7ee53..1c26a5934ba 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.166 2005/10/13 06:24:05 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.167 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -166,8 +166,7 @@ InitProcGlobal(void)
ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
/*
- * Pre-create the PGPROC structures and create a semaphore for
- * each.
+ * Pre-create the PGPROC structures and create a semaphore for each.
*/
procs = (PGPROC *) ShmemAlloc(MaxBackends * sizeof(PGPROC));
if (!procs)
@@ -207,8 +206,8 @@ InitProcess(void)
volatile PROC_HDR *procglobal = ProcGlobal;
/*
- * ProcGlobal should be set by a previous call to InitProcGlobal (if
- * we are a backend, we inherit this by fork() from the postmaster).
+ * ProcGlobal should be set by a previous call to InitProcGlobal (if we
+ * are a backend, we inherit this by fork() from the postmaster).
*/
if (procglobal == NULL)
elog(PANIC, "proc header uninitialized");
@@ -217,11 +216,11 @@ InitProcess(void)
elog(ERROR, "you already exist");
/*
- * Try to get a proc struct from the free list. If this fails, we
- * must be out of PGPROC structures (not to mention semaphores).
+ * Try to get a proc struct from the free list. If this fails, we must be
+ * out of PGPROC structures (not to mention semaphores).
*
- * While we are holding the ProcStructLock, also copy the current
- * shared estimate of spins_per_delay to local storage.
+ * While we are holding the ProcStructLock, also copy the current shared
+ * estimate of spins_per_delay to local storage.
*/
SpinLockAcquire(ProcStructLock);
@@ -238,9 +237,9 @@ InitProcess(void)
else
{
/*
- * If we reach here, all the PGPROCs are in use. This is one of
- * the possible places to detect "too many backends", so give the
- * standard error message.
+ * If we reach here, all the PGPROCs are in use. This is one of the
+ * possible places to detect "too many backends", so give the standard
+ * error message.
*/
SpinLockRelease(ProcStructLock);
ereport(FATAL,
@@ -278,14 +277,14 @@ InitProcess(void)
on_shmem_exit(ProcKill, 0);
/*
- * We might be reusing a semaphore that belonged to a failed process.
- * So be careful and reinitialize its value here.
+ * We might be reusing a semaphore that belonged to a failed process. So
+ * be careful and reinitialize its value here.
*/
PGSemaphoreReset(&MyProc->sem);
/*
- * Now that we have a PGPROC, we could try to acquire locks, so
- * initialize the deadlock checker.
+ * Now that we have a PGPROC, we could try to acquire locks, so initialize
+ * the deadlock checker.
*/
InitDeadLockChecking();
}
@@ -322,8 +321,8 @@ InitDummyProcess(int proctype)
* Just for paranoia's sake, we use the ProcStructLock to protect
* assignment and releasing of DummyProcs entries.
*
- * While we are holding the ProcStructLock, also copy the current
- * shared estimate of spins_per_delay to local storage.
+ * While we are holding the ProcStructLock, also copy the current shared
+ * estimate of spins_per_delay to local storage.
*/
SpinLockAcquire(ProcStructLock);
@@ -347,8 +346,8 @@ InitDummyProcess(int proctype)
SpinLockRelease(ProcStructLock);
/*
- * Initialize all fields of MyProc, except MyProc->sem which was set
- * up by InitProcGlobal.
+ * Initialize all fields of MyProc, except MyProc->sem which was set up by
+ * InitProcGlobal.
*/
SHMQueueElemInit(&(MyProc->links));
MyProc->waitStatus = STATUS_OK;
@@ -369,8 +368,8 @@ InitDummyProcess(int proctype)
on_shmem_exit(DummyProcKill, Int32GetDatum(proctype));
/*
- * We might be reusing a semaphore that belonged to a failed process.
- * So be careful and reinitialize its value here.
+ * We might be reusing a semaphore that belonged to a failed process. So
+ * be careful and reinitialize its value here.
*/
PGSemaphoreReset(&MyProc->sem);
}
@@ -385,6 +384,7 @@ HaveNFreeProcs(int n)
{
SHMEM_OFFSET offset;
PGPROC *proc;
+
/* use volatile pointer to prevent code rearrangement */
volatile PROC_HDR *procglobal = ProcGlobal;
@@ -436,9 +436,9 @@ LockWaitCancel(void)
{
/*
* Somebody kicked us off the lock queue already. Perhaps they
- * granted us the lock, or perhaps they detected a deadlock. If
- * they did grant us the lock, we'd better remember it in our
- * local lock table.
+ * granted us the lock, or perhaps they detected a deadlock. If they
+ * did grant us the lock, we'd better remember it in our local lock
+ * table.
*/
if (MyProc->waitStatus == STATUS_OK)
GrantAwaitedLock();
@@ -451,17 +451,17 @@ LockWaitCancel(void)
/*
* Reset the proc wait semaphore to zero. This is necessary in the
* scenario where someone else granted us the lock we wanted before we
- * were able to remove ourselves from the wait-list. The semaphore
- * will have been bumped to 1 by the would-be grantor, and since we
- * are no longer going to wait on the sema, we have to force it back
- * to zero. Otherwise, our next attempt to wait for a lock will fall
- * through prematurely.
+ * were able to remove ourselves from the wait-list. The semaphore will
+ * have been bumped to 1 by the would-be grantor, and since we are no
+ * longer going to wait on the sema, we have to force it back to zero.
+ * Otherwise, our next attempt to wait for a lock will fall through
+ * prematurely.
*/
PGSemaphoreReset(&MyProc->sem);
/*
- * Return true even if we were kicked off the lock before we were able
- * to remove ourselves.
+ * Return true even if we were kicked off the lock before we were able to
+ * remove ourselves.
*/
return true;
}
@@ -508,8 +508,8 @@ ProcKill(int code, Datum arg)
Assert(MyProc != NULL);
/*
- * Release any LW locks I am holding. There really shouldn't be any,
- * but it's cheap to check again before we cut the knees off the LWLock
+ * Release any LW locks I am holding. There really shouldn't be any, but
+ * it's cheap to check again before we cut the knees off the LWLock
* facility by releasing our PGPROC ...
*/
LWLockReleaseAll();
@@ -640,20 +640,19 @@ ProcSleep(LockMethod lockMethodTable,
/*
* Determine where to add myself in the wait queue.
*
- * Normally I should go at the end of the queue. However, if I already
- * hold locks that conflict with the request of any previous waiter,
- * put myself in the queue just in front of the first such waiter.
- * This is not a necessary step, since deadlock detection would move
- * me to before that waiter anyway; but it's relatively cheap to
- * detect such a conflict immediately, and avoid delaying till
- * deadlock timeout.
+ * Normally I should go at the end of the queue. However, if I already hold
+ * locks that conflict with the request of any previous waiter, put myself
+ * in the queue just in front of the first such waiter. This is not a
+ * necessary step, since deadlock detection would move me to before that
+ * waiter anyway; but it's relatively cheap to detect such a conflict
+ * immediately, and avoid delaying till deadlock timeout.
*
- * Special case: if I find I should go in front of some waiter, check to
- * see if I conflict with already-held locks or the requests before
- * that waiter. If not, then just grant myself the requested lock
- * immediately. This is the same as the test for immediate grant in
- * LockAcquire, except we are only considering the part of the wait
- * queue before my insertion point.
+ * Special case: if I find I should go in front of some waiter, check to see
+ * if I conflict with already-held locks or the requests before that
+ * waiter. If not, then just grant myself the requested lock immediately.
+ * This is the same as the test for immediate grant in LockAcquire, except
+ * we are only considering the part of the wait queue before my insertion
+ * point.
*/
if (myHeldLocks != 0)
{
@@ -669,12 +668,11 @@ ProcSleep(LockMethod lockMethodTable,
if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
{
/*
- * Yes, so we have a deadlock. Easiest way to clean
- * up correctly is to call RemoveFromWaitQueue(), but
- * we can't do that until we are *on* the wait queue.
- * So, set a flag to check below, and break out of
- * loop. Also, record deadlock info for later
- * message.
+ * Yes, so we have a deadlock. Easiest way to clean up
+ * correctly is to call RemoveFromWaitQueue(), but we
+ * can't do that until we are *on* the wait queue. So, set
+ * a flag to check below, and break out of loop. Also,
+ * record deadlock info for later message.
*/
RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
early_deadlock = true;
@@ -702,8 +700,8 @@ ProcSleep(LockMethod lockMethodTable,
}
/*
- * If we fall out of loop normally, proc points to waitQueue head,
- * so we will insert at tail of queue as desired.
+ * If we fall out of loop normally, proc points to waitQueue head, so
+ * we will insert at tail of queue as desired.
*/
}
else
@@ -713,8 +711,7 @@ ProcSleep(LockMethod lockMethodTable,
}
/*
- * Insert self into queue, ahead of the given proc (or at tail of
- * queue).
+ * Insert self into queue, ahead of the given proc (or at tail of queue).
*/
SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
waitQueue->size++;
@@ -729,9 +726,9 @@ ProcSleep(LockMethod lockMethodTable,
MyProc->waitStatus = STATUS_ERROR; /* initialize result for error */
/*
- * If we detected deadlock, give up without waiting. This must agree
- * with CheckDeadLock's recovery code, except that we shouldn't
- * release the semaphore since we haven't tried to lock it yet.
+ * If we detected deadlock, give up without waiting. This must agree with
+ * CheckDeadLock's recovery code, except that we shouldn't release the
+ * semaphore since we haven't tried to lock it yet.
*/
if (early_deadlock)
{
@@ -746,39 +743,38 @@ ProcSleep(LockMethod lockMethodTable,
* Release the locktable's masterLock.
*
* NOTE: this may also cause us to exit critical-section state, possibly
- * allowing a cancel/die interrupt to be accepted. This is OK because
- * we have recorded the fact that we are waiting for a lock, and so
+ * allowing a cancel/die interrupt to be accepted. This is OK because we
+ * have recorded the fact that we are waiting for a lock, and so
* LockWaitCancel will clean up if cancel/die happens.
*/
LWLockRelease(masterLock);
/*
- * Set timer so we can wake up after awhile and check for a deadlock.
- * If a deadlock is detected, the handler releases the process's
- * semaphore and sets MyProc->waitStatus = STATUS_ERROR, allowing us
- * to know that we must report failure rather than success.
+ * Set timer so we can wake up after awhile and check for a deadlock. If a
+ * deadlock is detected, the handler releases the process's semaphore and
+ * sets MyProc->waitStatus = STATUS_ERROR, allowing us to know that we
+ * must report failure rather than success.
*
- * By delaying the check until we've waited for a bit, we can avoid
- * running the rather expensive deadlock-check code in most cases.
+ * By delaying the check until we've waited for a bit, we can avoid running
+ * the rather expensive deadlock-check code in most cases.
*/
if (!enable_sig_alarm(DeadlockTimeout, false))
elog(FATAL, "could not set timer for process wakeup");
/*
* If someone wakes us between LWLockRelease and PGSemaphoreLock,
- * PGSemaphoreLock will not block. The wakeup is "saved" by the
- * semaphore implementation. Note also that if CheckDeadLock is
- * invoked but does not detect a deadlock, PGSemaphoreLock() will
- * continue to wait. There used to be a loop here, but it was useless
- * code...
+ * PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore
+ * implementation. Note also that if CheckDeadLock is invoked but does
+ * not detect a deadlock, PGSemaphoreLock() will continue to wait. There
+ * used to be a loop here, but it was useless code...
*
- * We pass interruptOK = true, which eliminates a window in which
- * cancel/die interrupts would be held off undesirably. This is a
- * promise that we don't mind losing control to a cancel/die interrupt
- * here. We don't, because we have no shared-state-change work to do
- * after being granted the lock (the grantor did it all). We do have
- * to worry about updating the locallock table, but if we lose control
- * to an error, LockWaitCancel will fix that up.
+ * We pass interruptOK = true, which eliminates a window in which cancel/die
+ * interrupts would be held off undesirably. This is a promise that we
+ * don't mind losing control to a cancel/die interrupt here. We don't,
+ * because we have no shared-state-change work to do after being granted
+ * the lock (the grantor did it all). We do have to worry about updating
+ * the locallock table, but if we lose control to an error, LockWaitCancel
+ * will fix that up.
*/
PGSemaphoreLock(&MyProc->sem, true);
@@ -789,9 +785,9 @@ ProcSleep(LockMethod lockMethodTable,
elog(FATAL, "could not disable timer for process wakeup");
/*
- * Re-acquire the locktable's masterLock. We have to do this to hold
- * off cancel/die interrupts before we can mess with waitingForLock
- * (else we might have a missed or duplicated locallock update).
+ * Re-acquire the locktable's masterLock. We have to do this to hold off
+ * cancel/die interrupts before we can mess with waitingForLock (else we
+ * might have a missed or duplicated locallock update).
*/
LWLockAcquire(masterLock, LW_EXCLUSIVE);
@@ -879,8 +875,8 @@ ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
LOCKMODE lockmode = proc->waitLockMode;
/*
- * Waken if (a) doesn't conflict with requests of earlier waiters,
- * and (b) doesn't conflict with already-held locks.
+ * Waken if (a) doesn't conflict with requests of earlier waiters, and
+ * (b) doesn't conflict with already-held locks.
*/
if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
LockCheckConflicts(lockMethodTable,
@@ -894,16 +890,15 @@ ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
proc = ProcWakeup(proc, STATUS_OK);
/*
- * ProcWakeup removes proc from the lock's waiting process
- * queue and returns the next proc in chain; don't use proc's
- * next-link, because it's been cleared.
+ * ProcWakeup removes proc from the lock's waiting process queue
+ * and returns the next proc in chain; don't use proc's next-link,
+ * because it's been cleared.
*/
}
else
{
/*
- * Cannot wake this guy. Remember his request for later
- * checks.
+ * Cannot wake this guy. Remember his request for later checks.
*/
aheadRequests |= LOCKBIT_ON(lockmode);
proc = (PGPROC *) MAKE_PTR(proc->links.next);
@@ -928,22 +923,21 @@ CheckDeadLock(void)
* Acquire locktable lock. Note that the deadlock check interrupt had
* better not be enabled anywhere that this process itself holds the
* locktable lock, else this will wait forever. Also note that
- * LWLockAcquire creates a critical section, so that this routine
- * cannot be interrupted by cancel/die interrupts.
+ * LWLockAcquire creates a critical section, so that this routine cannot
+ * be interrupted by cancel/die interrupts.
*/
LWLockAcquire(LockMgrLock, LW_EXCLUSIVE);
/*
* Check to see if we've been awoken by anyone in the interim.
*
- * If we have we can return and resume our transaction -- happy day.
- * Before we are awoken the process releasing the lock grants it to us
- * so we know that we don't have to wait anymore.
+ * If we have we can return and resume our transaction -- happy day. Before
+ * we are awoken the process releasing the lock grants it to us so we know
+ * that we don't have to wait anymore.
*
* We check by looking to see if we've been unlinked from the wait queue.
- * This is quicker than checking our semaphore's state, since no
- * kernel call is needed, and it is safe because we hold the locktable
- * lock.
+ * This is quicker than checking our semaphore's state, since no kernel
+ * call is needed, and it is safe because we hold the locktable lock.
*/
if (MyProc->links.prev == INVALID_OFFSET ||
MyProc->links.next == INVALID_OFFSET)
@@ -972,8 +966,8 @@ CheckDeadLock(void)
RemoveFromWaitQueue(MyProc);
/*
- * Set MyProc->waitStatus to STATUS_ERROR so that ProcSleep will
- * report an error after we return from the signal handler.
+ * Set MyProc->waitStatus to STATUS_ERROR so that ProcSleep will report an
+ * error after we return from the signal handler.
*/
MyProc->waitStatus = STATUS_ERROR;
@@ -984,14 +978,14 @@ CheckDeadLock(void)
PGSemaphoreUnlock(&MyProc->sem);
/*
- * We're done here. Transaction abort caused by the error that
- * ProcSleep will raise will cause any other locks we hold to be
- * released, thus allowing other processes to wake up; we don't need
- * to do that here. NOTE: an exception is that releasing locks we hold
- * doesn't consider the possibility of waiters that were blocked
- * behind us on the lock we just failed to get, and might now be
- * wakable because we're not in front of them anymore. However,
- * RemoveFromWaitQueue took care of waking up any such processes.
+ * We're done here. Transaction abort caused by the error that ProcSleep
+ * will raise will cause any other locks we hold to be released, thus
+ * allowing other processes to wake up; we don't need to do that here.
+ * NOTE: an exception is that releasing locks we hold doesn't consider the
+ * possibility of waiters that were blocked behind us on the lock we just
+ * failed to get, and might now be wakable because we're not in front of
+ * them anymore. However, RemoveFromWaitQueue took care of waking up any
+ * such processes.
*/
LWLockRelease(LockMgrLock);
}
@@ -1061,7 +1055,6 @@ enable_sig_alarm(int delayms, bool is_statement_timeout)
#ifndef __BEOS__
struct itimerval timeval;
-
#else
bigtime_t time_interval;
#endif
@@ -1092,16 +1085,16 @@ enable_sig_alarm(int delayms, bool is_statement_timeout)
/*
* Begin deadlock timeout with statement-level timeout active
*
- * Here, we want to interrupt at the closer of the two timeout times.
- * If fin_time >= statement_fin_time then we need not touch the
- * existing timer setting; else set up to interrupt at the
- * deadlock timeout time.
+ * Here, we want to interrupt at the closer of the two timeout times. If
+ * fin_time >= statement_fin_time then we need not touch the existing
+ * timer setting; else set up to interrupt at the deadlock timeout
+ * time.
*
* NOTE: in this case it is possible that this routine will be
* interrupted by the previously-set timer alarm. This is okay
- * because the signal handler will do only what it should do
- * according to the state variables. The deadlock checker may get
- * run earlier than normal, but that does no harm.
+ * because the signal handler will do only what it should do according
+ * to the state variables. The deadlock checker may get run earlier
+ * than normal, but that does no harm.
*/
deadlock_timeout_active = true;
if (fin_time.tv_sec > statement_fin_time.tv_sec ||
diff --git a/src/backend/storage/lmgr/s_lock.c b/src/backend/storage/lmgr/s_lock.c
index 1fb069d4f37..f1c92d70dac 100644
--- a/src/backend/storage/lmgr/s_lock.c
+++ b/src/backend/storage/lmgr/s_lock.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.39 2005/10/11 20:41:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.40 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,47 +50,45 @@ void
s_lock(volatile slock_t *lock, const char *file, int line)
{
/*
- * We loop tightly for awhile, then delay using pg_usleep() and try
- * again. Preferably, "awhile" should be a small multiple of the
- * maximum time we expect a spinlock to be held. 100 iterations seems
- * about right as an initial guess. However, on a uniprocessor the
- * loop is a waste of cycles, while in a multi-CPU scenario it's usually
- * better to spin a bit longer than to call the kernel, so we try to
- * adapt the spin loop count depending on whether we seem to be in
- * a uniprocessor or multiprocessor.
+ * We loop tightly for awhile, then delay using pg_usleep() and try again.
+ * Preferably, "awhile" should be a small multiple of the maximum time we
+ * expect a spinlock to be held. 100 iterations seems about right as an
+ * initial guess. However, on a uniprocessor the loop is a waste of
+ * cycles, while in a multi-CPU scenario it's usually better to spin a bit
+ * longer than to call the kernel, so we try to adapt the spin loop count
+ * depending on whether we seem to be in a uniprocessor or multiprocessor.
*
- * Note: you might think MIN_SPINS_PER_DELAY should be just 1, but you'd
- * be wrong; there are platforms where that can result in a "stuck
- * spinlock" failure. This has been seen particularly on Alphas; it
- * seems that the first TAS after returning from kernel space will always
- * fail on that hardware.
+ * Note: you might think MIN_SPINS_PER_DELAY should be just 1, but you'd be
+ * wrong; there are platforms where that can result in a "stuck spinlock"
+ * failure. This has been seen particularly on Alphas; it seems that the
+ * first TAS after returning from kernel space will always fail on that
+ * hardware.
*
- * Once we do decide to block, we use randomly increasing pg_usleep()
- * delays. The first delay is 1 msec, then the delay randomly
- * increases to about one second, after which we reset to 1 msec and
- * start again. The idea here is that in the presence of heavy
- * contention we need to increase the delay, else the spinlock holder
- * may never get to run and release the lock. (Consider situation
- * where spinlock holder has been nice'd down in priority by the
- * scheduler --- it will not get scheduled until all would-be
- * acquirers are sleeping, so if we always use a 1-msec sleep, there
- * is a real possibility of starvation.) But we can't just clamp the
- * delay to an upper bound, else it would take a long time to make a
- * reasonable number of tries.
+ * Once we do decide to block, we use randomly increasing pg_usleep() delays.
+ * The first delay is 1 msec, then the delay randomly increases to about
+ * one second, after which we reset to 1 msec and start again. The idea
+ * here is that in the presence of heavy contention we need to increase
+ * the delay, else the spinlock holder may never get to run and release
+ * the lock. (Consider situation where spinlock holder has been nice'd
+ * down in priority by the scheduler --- it will not get scheduled until
+ * all would-be acquirers are sleeping, so if we always use a 1-msec
+ * sleep, there is a real possibility of starvation.) But we can't just
+ * clamp the delay to an upper bound, else it would take a long time to
+ * make a reasonable number of tries.
*
- * We time out and declare error after NUM_DELAYS delays (thus, exactly
- * that many tries). With the given settings, this will usually take
- * 2 or so minutes. It seems better to fix the total number of tries
- * (and thus the probability of unintended failure) than to fix the
- * total time spent.
+ * We time out and declare error after NUM_DELAYS delays (thus, exactly that
+ * many tries). With the given settings, this will usually take 2 or so
+ * minutes. It seems better to fix the total number of tries (and thus
+ * the probability of unintended failure) than to fix the total time
+ * spent.
*
- * The pg_usleep() delays are measured in milliseconds because 1 msec
- * is a common resolution limit at the OS level for newer platforms.
- * On older platforms the resolution limit is usually 10 msec, in
- * which case the total delay before timeout will be a bit more.
+ * The pg_usleep() delays are measured in milliseconds because 1 msec is a
+ * common resolution limit at the OS level for newer platforms. On older
+ * platforms the resolution limit is usually 10 msec, in which case the
+ * total delay before timeout will be a bit more.
*/
-#define MIN_SPINS_PER_DELAY 10
-#define MAX_SPINS_PER_DELAY 1000
+#define MIN_SPINS_PER_DELAY 10
+#define MAX_SPINS_PER_DELAY 1000
#define NUM_DELAYS 1000
#define MIN_DELAY_MSEC 1
#define MAX_DELAY_MSEC 1000
@@ -110,7 +108,7 @@ s_lock(volatile slock_t *lock, const char *file, int line)
if (++delays > NUM_DELAYS)
s_lock_stuck(lock, file, line);
- if (cur_delay == 0) /* first time to delay? */
+ if (cur_delay == 0) /* first time to delay? */
cur_delay = MIN_DELAY_MSEC;
pg_usleep(cur_delay * 1000L);
@@ -122,7 +120,7 @@ s_lock(volatile slock_t *lock, const char *file, int line)
/* increase delay by a random fraction between 1X and 2X */
cur_delay += (int) (cur_delay *
- (((double) random()) / ((double) MAX_RANDOM_VALUE)) + 0.5);
+ (((double) random()) / ((double) MAX_RANDOM_VALUE)) + 0.5);
/* wrap back to minimum delay when max is exceeded */
if (cur_delay > MAX_DELAY_MSEC)
cur_delay = MIN_DELAY_MSEC;
@@ -133,18 +131,18 @@ s_lock(volatile slock_t *lock, const char *file, int line)
/*
* If we were able to acquire the lock without delaying, it's a good
- * indication we are in a multiprocessor. If we had to delay, it's
- * a sign (but not a sure thing) that we are in a uniprocessor.
- * Hence, we decrement spins_per_delay slowly when we had to delay,
- * and increase it rapidly when we didn't. It's expected that
- * spins_per_delay will converge to the minimum value on a uniprocessor
- * and to the maximum value on a multiprocessor.
+ * indication we are in a multiprocessor. If we had to delay, it's a sign
+ * (but not a sure thing) that we are in a uniprocessor. Hence, we
+ * decrement spins_per_delay slowly when we had to delay, and increase it
+ * rapidly when we didn't. It's expected that spins_per_delay will
+ * converge to the minimum value on a uniprocessor and to the maximum
+ * value on a multiprocessor.
*
- * Note: spins_per_delay is local within our current process.
- * We want to average these observations across multiple backends,
- * since it's relatively rare for this function to even get entered,
- * and so a single backend might not live long enough to converge on
- * a good value. That is handled by the two routines below.
+ * Note: spins_per_delay is local within our current process. We want to
+ * average these observations across multiple backends, since it's
+ * relatively rare for this function to even get entered, and so a single
+ * backend might not live long enough to converge on a good value. That
+ * is handled by the two routines below.
*/
if (cur_delay == 0)
{
@@ -180,15 +178,14 @@ int
update_spins_per_delay(int shared_spins_per_delay)
{
/*
- * We use an exponential moving average with a relatively slow
- * adaption rate, so that noise in any one backend's result won't
- * affect the shared value too much. As long as both inputs are
- * within the allowed range, the result must be too, so we need not
- * worry about clamping the result.
+ * We use an exponential moving average with a relatively slow adaption
+ * rate, so that noise in any one backend's result won't affect the shared
+ * value too much. As long as both inputs are within the allowed range,
+ * the result must be too, so we need not worry about clamping the result.
*
- * We deliberately truncate rather than rounding; this is so that
- * single adjustments inside a backend can affect the shared estimate
- * (see the asymmetric adjustment rules above).
+ * We deliberately truncate rather than rounding; this is so that single
+ * adjustments inside a backend can affect the shared estimate (see the
+ * asymmetric adjustment rules above).
*/
return (shared_spins_per_delay * 15 + spins_per_delay) / 16;
}
@@ -227,7 +224,7 @@ tas_dummy()
__asm__ __volatile__(
#if defined(__NetBSD__) && defined(__ELF__)
/* no underscore for label and % for registers */
- "\
+ "\
.global tas \n\
tas: \n\
movel %sp@(0x4),%a0 \n\
@@ -239,7 +236,7 @@ _success: \n\
moveq #0,%d0 \n\
rts \n"
#else
- "\
+ "\
.global _tas \n\
_tas: \n\
movel sp@(0x4),a0 \n\
@@ -251,11 +248,10 @@ _success: \n\
moveq #0,d0 \n\
rts \n"
#endif /* __NetBSD__ && __ELF__ */
-);
+ );
}
#endif /* __m68k__ && !__linux__ */
-
#else /* not __GNUC__ */
/*
diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
index 4e7e47afcd9..dfec2a77694 100644
--- a/src/backend/storage/lmgr/spin.c
+++ b/src/backend/storage/lmgr/spin.c
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/spin.c,v 1.16 2004/12/31 22:01:05 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/spin.c,v 1.17 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,7 +37,6 @@ SpinlockSemas(void)
{
return 0;
}
-
#else /* !HAVE_SPINLOCKS */
/*
@@ -52,11 +51,11 @@ int
SpinlockSemas(void)
{
/*
- * It would be cleaner to distribute this logic into the affected
- * modules, similar to the way shmem space estimation is handled.
+ * It would be cleaner to distribute this logic into the affected modules,
+ * similar to the way shmem space estimation is handled.
*
- * For now, though, we just need a few spinlocks (10 should be plenty)
- * plus one for each LWLock.
+ * For now, though, we just need a few spinlocks (10 should be plenty) plus
+ * one for each LWLock.
*/
return NumLWLocks() + 10;
}
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index 25ab0d09022..fd19fd8736d 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/page/bufpage.c,v 1.66 2005/09/22 16:45:59 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/page/bufpage.c,v 1.67 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -133,7 +133,7 @@ PageAddItem(Page page,
ereport(PANIC,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
- phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
+ phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
/*
* Select offsetNumber to place the new item at
@@ -184,8 +184,8 @@ PageAddItem(Page page,
/*
* Compute new lower and upper pointers for page, see if it'll fit.
*
- * Note: do arithmetic as signed ints, to avoid mistakes if, say,
- * alignedSize > pd_upper.
+ * Note: do arithmetic as signed ints, to avoid mistakes if, say, alignedSize
+ * > pd_upper.
*/
if (offsetNumber == limit || needshuffle)
lower = phdr->pd_lower + sizeof(ItemIdData);
@@ -200,8 +200,7 @@ PageAddItem(Page page,
return InvalidOffsetNumber;
/*
- * OK to insert the item. First, shuffle the existing pointers if
- * needed.
+ * OK to insert the item. First, shuffle the existing pointers if needed.
*/
itemId = PageGetItemId(phdr, offsetNumber);
@@ -318,11 +317,11 @@ PageRepairFragmentation(Page page, OffsetNumber *unused)
Offset upper;
/*
- * It's worth the trouble to be more paranoid here than in most
- * places, because we are about to reshuffle data in (what is usually)
- * a shared disk buffer. If we aren't careful then corrupted
- * pointers, lengths, etc could cause us to clobber adjacent disk
- * buffers, spreading the data loss further. So, check everything.
+ * It's worth the trouble to be more paranoid here than in most places,
+ * because we are about to reshuffle data in (what is usually) a shared
+ * disk buffer. If we aren't careful then corrupted pointers, lengths,
+ * etc could cause us to clobber adjacent disk buffers, spreading the data
+ * loss further. So, check everything.
*/
if (pd_lower < SizeOfPageHeaderData ||
pd_lower > pd_upper ||
@@ -389,8 +388,8 @@ PageRepairFragmentation(Page page, OffsetNumber *unused)
if (totallen > (Size) (pd_special - pd_lower))
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("corrupted item lengths: total %u, available space %u",
- (unsigned int) totallen, pd_special - pd_lower)));
+ errmsg("corrupted item lengths: total %u, available space %u",
+ (unsigned int) totallen, pd_special - pd_lower)));
/* sort itemIdSortData array into decreasing itemoff order */
qsort((char *) itemidbase, nused, sizeof(itemIdSortData),
@@ -470,7 +469,7 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum)
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
- phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
+ phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
nline = PageGetMaxOffsetNumber(page);
if ((int) offnum <= 0 || (int) offnum > nline)
@@ -491,10 +490,10 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum)
offset, (unsigned int) size)));
/*
- * First, we want to get rid of the pd_linp entry for the index tuple.
- * We copy all subsequent linp's back one slot in the array. We don't
- * use PageGetItemId, because we are manipulating the _array_, not
- * individual linp's.
+ * First, we want to get rid of the pd_linp entry for the index tuple. We
+ * copy all subsequent linp's back one slot in the array. We don't use
+ * PageGetItemId, because we are manipulating the _array_, not individual
+ * linp's.
*/
nbytes = phdr->pd_lower -
((char *) &phdr->pd_linp[offidx + 1] - (char *) phdr);
@@ -506,11 +505,10 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum)
/*
* Now move everything between the old upper bound (beginning of tuple
- * space) and the beginning of the deleted tuple forward, so that
- * space in the middle of the page is left free. If we've just
- * deleted the tuple at the beginning of tuple space, then there's no
- * need to do the copy (and bcopy on some architectures SEGV's if
- * asked to move zero bytes).
+ * space) and the beginning of the deleted tuple forward, so that space in
+ * the middle of the page is left free. If we've just deleted the tuple
+ * at the beginning of tuple space, then there's no need to do the copy
+ * (and bcopy on some architectures SEGV's if asked to move zero bytes).
*/
/* beginning of tuple space */
@@ -526,8 +524,8 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum)
/*
* Finally, we need to adjust the linp entries that remain.
*
- * Anything that used to be before the deleted tuple's data was moved
- * forward by the size of the deleted tuple.
+ * Anything that used to be before the deleted tuple's data was moved forward
+ * by the size of the deleted tuple.
*/
if (!PageIsEmpty(page))
{
@@ -549,7 +547,7 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum)
* PageIndexMultiDelete
*
* This routine handles the case of deleting multiple tuples from an
- * index page at once. It is considerably faster than a loop around
+ * index page at once. It is considerably faster than a loop around
* PageIndexTupleDelete ... however, the caller *must* supply the array
* of item numbers to be deleted in item number order!
*/
@@ -599,12 +597,12 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
- pd_lower, pd_upper, pd_special)));
+ pd_lower, pd_upper, pd_special)));
/*
- * Scan the item pointer array and build a list of just the ones we
- * are going to keep. Notice we do not modify the page yet, since
- * we are still validity-checking.
+ * Scan the item pointer array and build a list of just the ones we are
+ * going to keep. Notice we do not modify the page yet, since we are
+ * still validity-checking.
*/
nline = PageGetMaxOffsetNumber(page);
itemidbase = (itemIdSort) palloc(sizeof(itemIdSortData) * nline);
@@ -632,7 +630,7 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
}
else
{
- itemidptr->offsetindex = nused; /* where it will go */
+ itemidptr->offsetindex = nused; /* where it will go */
itemidptr->itemoff = offset;
itemidptr->olditemid = *lp;
itemidptr->alignedlen = MAXALIGN(size);
@@ -649,8 +647,8 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
if (totallen > (Size) (pd_special - pd_lower))
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("corrupted item lengths: total %u, available space %u",
- (unsigned int) totallen, pd_special - pd_lower)));
+ errmsg("corrupted item lengths: total %u, available space %u",
+ (unsigned int) totallen, pd_special - pd_lower)));
/* sort itemIdSortData array into decreasing itemoff order */
qsort((char *) itemidbase, nused, sizeof(itemIdSortData),
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index 3a0a1f1262b..f8d15ee3ff8 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.117 2005/07/04 04:51:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.118 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -114,9 +114,9 @@ mdinit(void)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Create pending-operations hashtable if we need it. Currently, we
- * need it if we are standalone (not under a postmaster) OR if we are
- * a bootstrap-mode subprocess of a postmaster (that is, a startup or
+ * Create pending-operations hashtable if we need it. Currently, we need
+ * it if we are standalone (not under a postmaster) OR if we are a
+ * bootstrap-mode subprocess of a postmaster (that is, a startup or
* bgwriter process).
*/
if (!IsUnderPostmaster || IsBootstrapProcessingMode())
@@ -131,7 +131,7 @@ mdinit(void)
pendingOpsTable = hash_create("Pending Ops Table",
100L,
&hash_ctl,
- HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
+ HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
}
return true;
@@ -162,11 +162,10 @@ mdcreate(SMgrRelation reln, bool isRedo)
int save_errno = errno;
/*
- * During bootstrap, there are cases where a system relation will
- * be accessed (by internal backend processes) before the
- * bootstrap script nominally creates it. Therefore, allow the
- * file to exist already, even if isRedo is not set. (See also
- * mdopen)
+ * During bootstrap, there are cases where a system relation will be
+ * accessed (by internal backend processes) before the bootstrap
+ * script nominally creates it. Therefore, allow the file to exist
+ * already, even if isRedo is not set. (See also mdopen)
*/
if (isRedo || IsBootstrapProcessingMode())
fd = PathNameOpenFile(path, O_RDWR | PG_BINARY, 0600);
@@ -283,13 +282,13 @@ mdextend(SMgrRelation reln, BlockNumber blocknum, char *buffer, bool isTemp)
#endif
/*
- * Note: because caller obtained blocknum by calling _mdnblocks, which
- * did a seek(SEEK_END), this seek is often redundant and will be
- * optimized away by fd.c. It's not redundant, however, if there is a
- * partial page at the end of the file. In that case we want to try
- * to overwrite the partial page with a full page. It's also not
- * redundant if bufmgr.c had to dump another buffer of the same file
- * to make room for the new page's buffer.
+ * Note: because caller obtained blocknum by calling _mdnblocks, which did
+ * a seek(SEEK_END), this seek is often redundant and will be optimized
+ * away by fd.c. It's not redundant, however, if there is a partial page
+ * at the end of the file. In that case we want to try to overwrite the
+ * partial page with a full page. It's also not redundant if bufmgr.c had
+ * to dump another buffer of the same file to make room for the new page's
+ * buffer.
*/
if (FileSeek(v->mdfd_vfd, seekpos, SEEK_SET) != seekpos)
return false;
@@ -345,11 +344,10 @@ mdopen(SMgrRelation reln, bool allowNotFound)
if (fd < 0)
{
/*
- * During bootstrap, there are cases where a system relation will
- * be accessed (by internal backend processes) before the
- * bootstrap script nominally creates it. Therefore, accept
- * mdopen() as a substitute for mdcreate() in bootstrap mode only.
- * (See mdcreate)
+ * During bootstrap, there are cases where a system relation will be
+ * accessed (by internal backend processes) before the bootstrap
+ * script nominally creates it. Therefore, accept mdopen() as a
+ * substitute for mdcreate() in bootstrap mode only. (See mdcreate)
*/
if (IsBootstrapProcessingMode())
fd = PathNameOpenFile(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, 0600);
@@ -445,8 +443,8 @@ mdread(SMgrRelation reln, BlockNumber blocknum, char *buffer)
if ((nbytes = FileRead(v->mdfd_vfd, buffer, BLCKSZ)) != BLCKSZ)
{
/*
- * If we are at or past EOF, return zeroes without complaining.
- * Also substitute zeroes if we found a partial block at EOF.
+ * If we are at or past EOF, return zeroes without complaining. Also
+ * substitute zeroes if we found a partial block at EOF.
*
* XXX this is really ugly, bad design. However the current
* implementation of hash indexes requires it, because hash index
@@ -515,13 +513,12 @@ mdnblocks(SMgrRelation reln)
BlockNumber segno = 0;
/*
- * Skip through any segments that aren't the last one, to avoid
- * redundant seeks on them. We have previously verified that these
- * segments are exactly RELSEG_SIZE long, and it's useless to recheck
- * that each time. (NOTE: this assumption could only be wrong if
- * another backend has truncated the relation. We rely on higher code
- * levels to handle that scenario by closing and re-opening the md
- * fd.)
+ * Skip through any segments that aren't the last one, to avoid redundant
+ * seeks on them. We have previously verified that these segments are
+ * exactly RELSEG_SIZE long, and it's useless to recheck that each time.
+ * (NOTE: this assumption could only be wrong if another backend has
+ * truncated the relation. We rely on higher code levels to handle that
+ * scenario by closing and re-opening the md fd.)
*/
while (v->mdfd_chain != NULL)
{
@@ -545,11 +542,10 @@ mdnblocks(SMgrRelation reln)
if (v->mdfd_chain == NULL)
{
/*
- * Because we pass O_CREAT, we will create the next segment
- * (with zero length) immediately, if the last segment is of
- * length REL_SEGSIZE. This is unnecessary but harmless, and
- * testing for the case would take more cycles than it seems
- * worth.
+ * Because we pass O_CREAT, we will create the next segment (with
+ * zero length) immediately, if the last segment is of length
+ * REL_SEGSIZE. This is unnecessary but harmless, and testing for
+ * the case would take more cycles than it seems worth.
*/
v->mdfd_chain = _mdfd_openseg(reln, segno, O_CREAT);
if (v->mdfd_chain == NULL)
@@ -601,11 +597,11 @@ mdtruncate(SMgrRelation reln, BlockNumber nblocks, bool isTemp)
if (priorblocks > nblocks)
{
/*
- * This segment is no longer wanted at all (and has already
- * been unlinked from the mdfd_chain). We truncate the file
- * before deleting it because if other backends are holding
- * the file open, the unlink will fail on some platforms.
- * Better a zero-size file gets left around than a big file...
+ * This segment is no longer wanted at all (and has already been
+ * unlinked from the mdfd_chain). We truncate the file before
+ * deleting it because if other backends are holding the file
+ * open, the unlink will fail on some platforms. Better a
+ * zero-size file gets left around than a big file...
*/
FileTruncate(v->mdfd_vfd, 0);
FileUnlink(v->mdfd_vfd);
@@ -616,12 +612,12 @@ mdtruncate(SMgrRelation reln, BlockNumber nblocks, bool isTemp)
else if (priorblocks + ((BlockNumber) RELSEG_SIZE) > nblocks)
{
/*
- * This is the last segment we want to keep. Truncate the file
- * to the right length, and clear chain link that points to
- * any remaining segments (which we shall zap). NOTE: if
- * nblocks is exactly a multiple K of RELSEG_SIZE, we will
- * truncate the K+1st segment to 0 length but keep it. This is
- * mainly so that the right thing happens if nblocks==0.
+ * This is the last segment we want to keep. Truncate the file to
+ * the right length, and clear chain link that points to any
+ * remaining segments (which we shall zap). NOTE: if nblocks is
+ * exactly a multiple K of RELSEG_SIZE, we will truncate the K+1st
+ * segment to 0 length but keep it. This is mainly so that the
+ * right thing happens if nblocks==0.
*/
BlockNumber lastsegblocks = nblocks - priorblocks;
@@ -638,8 +634,8 @@ mdtruncate(SMgrRelation reln, BlockNumber nblocks, bool isTemp)
else
{
/*
- * We still need this segment and 0 or more blocks beyond it,
- * so nothing to do here.
+ * We still need this segment and 0 or more blocks beyond it, so
+ * nothing to do here.
*/
v = v->mdfd_chain;
}
@@ -712,9 +708,9 @@ mdsync(void)
/*
* If we are in the bgwriter, the sync had better include all fsync
- * requests that were queued by backends before the checkpoint REDO
- * point was determined. We go that a little better by accepting all
- * requests queued up to the point where we start fsync'ing.
+ * requests that were queued by backends before the checkpoint REDO point
+ * was determined. We go that a little better by accepting all requests
+ * queued up to the point where we start fsync'ing.
*/
AbsorbFsyncRequests();
@@ -722,9 +718,9 @@ mdsync(void)
while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL)
{
/*
- * If fsync is off then we don't have to bother opening the file
- * at all. (We delay checking until this point so that changing
- * fsync on the fly behaves sensibly.)
+ * If fsync is off then we don't have to bother opening the file at
+ * all. (We delay checking until this point so that changing fsync on
+ * the fly behaves sensibly.)
*/
if (enableFsync)
{
@@ -732,28 +728,28 @@ mdsync(void)
MdfdVec *seg;
/*
- * Find or create an smgr hash entry for this relation. This
- * may seem a bit unclean -- md calling smgr? But it's really
- * the best solution. It ensures that the open file reference
- * isn't permanently leaked if we get an error here. (You may
- * say "but an unreferenced SMgrRelation is still a leak!" Not
- * really, because the only case in which a checkpoint is done
- * by a process that isn't about to shut down is in the
- * bgwriter, and it will periodically do smgrcloseall(). This
- * fact justifies our not closing the reln in the success path
- * either, which is a good thing since in non-bgwriter cases
- * we couldn't safely do that.) Furthermore, in many cases
- * the relation will have been dirtied through this same smgr
- * relation, and so we can save a file open/close cycle.
+ * Find or create an smgr hash entry for this relation. This may
+ * seem a bit unclean -- md calling smgr? But it's really the
+ * best solution. It ensures that the open file reference isn't
+ * permanently leaked if we get an error here. (You may say "but
+ * an unreferenced SMgrRelation is still a leak!" Not really,
+ * because the only case in which a checkpoint is done by a
+ * process that isn't about to shut down is in the bgwriter, and
+ * it will periodically do smgrcloseall(). This fact justifies
+ * our not closing the reln in the success path either, which is a
+ * good thing since in non-bgwriter cases we couldn't safely do
+ * that.) Furthermore, in many cases the relation will have been
+ * dirtied through this same smgr relation, and so we can save a
+ * file open/close cycle.
*/
reln = smgropen(entry->rnode);
/*
- * It is possible that the relation has been dropped or
- * truncated since the fsync request was entered. Therefore,
- * we have to allow file-not-found errors. This applies both
- * during _mdfd_getseg() and during FileSync, since fd.c might
- * have closed the file behind our back.
+ * It is possible that the relation has been dropped or truncated
+ * since the fsync request was entered. Therefore, we have to
+ * allow file-not-found errors. This applies both during
+ * _mdfd_getseg() and during FileSync, since fd.c might have
+ * closed the file behind our back.
*/
seg = _mdfd_getseg(reln,
entry->segno * ((BlockNumber) RELSEG_SIZE),
@@ -925,26 +921,25 @@ _mdfd_getseg(SMgrRelation reln, BlockNumber blkno, bool allowNotFound)
{
/*
* We will create the next segment only if the target block is
- * within it. This prevents Sorcerer's Apprentice syndrome if
- * a bug at higher levels causes us to be handed a
- * ridiculously large blkno --- otherwise we could create many
- * thousands of empty segment files before reaching the
- * "target" block. We should never need to create more than
- * one new segment per call, so this restriction seems
- * reasonable.
+ * within it. This prevents Sorcerer's Apprentice syndrome if a
+ * bug at higher levels causes us to be handed a ridiculously
+ * large blkno --- otherwise we could create many thousands of
+ * empty segment files before reaching the "target" block. We
+ * should never need to create more than one new segment per call,
+ * so this restriction seems reasonable.
*
* BUT: when doing WAL recovery, disable this logic and create
- * segments unconditionally. In this case it seems better
- * to assume the given blkno is good (it presumably came from
- * a CRC-checked WAL record); furthermore this lets us cope
- * in the case where we are replaying WAL data that has a write
- * into a high-numbered segment of a relation that was later
- * deleted. We want to go ahead and create the segments so
- * we can finish out the replay.
+ * segments unconditionally. In this case it seems better to
+ * assume the given blkno is good (it presumably came from a
+ * CRC-checked WAL record); furthermore this lets us cope in the
+ * case where we are replaying WAL data that has a write into a
+ * high-numbered segment of a relation that was later deleted. We
+ * want to go ahead and create the segments so we can finish out
+ * the replay.
*/
v->mdfd_chain = _mdfd_openseg(reln,
nextsegno,
- (segstogo == 1 || InRecovery) ? O_CREAT : 0);
+ (segstogo == 1 || InRecovery) ? O_CREAT : 0);
if (v->mdfd_chain == NULL)
{
if (allowNotFound && errno == ENOENT)
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index ac1767588d3..17d14e38700 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.92 2005/08/08 03:12:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.93 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -155,7 +155,7 @@ smgrinit(void)
if (!(*(smgrsw[i].smgr_init)) ())
elog(FATAL, "smgr initialization failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
- Int16GetDatum(i))));
+ Int16GetDatum(i))));
}
}
@@ -178,7 +178,7 @@ smgrshutdown(int code, Datum arg)
if (!(*(smgrsw[i].smgr_shutdown)) ())
elog(FATAL, "smgr shutdown failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
- Int16GetDatum(i))));
+ Int16GetDatum(i))));
}
}
}
@@ -234,8 +234,8 @@ void
smgrsetowner(SMgrRelation *owner, SMgrRelation reln)
{
/*
- * First, unhook any old owner. (Normally there shouldn't be any, but
- * it seems possible that this can happen during swap_relation_files()
+ * First, unhook any old owner. (Normally there shouldn't be any, but it
+ * seems possible that this can happen during swap_relation_files()
* depending on the order of processing. It's ok to close the old
* relcache entry early in that case.)
*/
@@ -271,9 +271,8 @@ smgrclose(SMgrRelation reln)
elog(ERROR, "SMgrRelation hashtable corrupted");
/*
- * Unhook the owner pointer, if any. We do this last since in the
- * remote possibility of failure above, the SMgrRelation object will still
- * exist.
+ * Unhook the owner pointer, if any. We do this last since in the remote
+ * possibility of failure above, the SMgrRelation object will still exist.
*/
if (owner)
*owner = NULL;
@@ -345,11 +344,10 @@ smgrcreate(SMgrRelation reln, bool isTemp, bool isRedo)
* We may be using the target table space for the first time in this
* database, so create a per-database subdirectory if needed.
*
- * XXX this is a fairly ugly violation of module layering, but this seems
- * to be the best place to put the check. Maybe
- * TablespaceCreateDbspace should be here and not in
- * commands/tablespace.c? But that would imply importing a lot of
- * stuff that smgr.c oughtn't know, either.
+ * XXX this is a fairly ugly violation of module layering, but this seems to
+ * be the best place to put the check. Maybe TablespaceCreateDbspace
+ * should be here and not in commands/tablespace.c? But that would imply
+ * importing a lot of stuff that smgr.c oughtn't know, either.
*/
TablespaceCreateDbspace(reln->smgr_rnode.spcNode,
reln->smgr_rnode.dbNode,
@@ -368,9 +366,8 @@ smgrcreate(SMgrRelation reln, bool isTemp, bool isRedo)
/*
* Make a non-transactional XLOG entry showing the file creation. It's
- * non-transactional because we should replay it whether the
- * transaction commits or not; if not, the file will be dropped at
- * abort time.
+ * non-transactional because we should replay it whether the transaction
+ * commits or not; if not, the file will be dropped at abort time.
*/
xlrec.rnode = reln->smgr_rnode;
@@ -418,13 +415,13 @@ smgrscheduleunlink(SMgrRelation reln, bool isTemp)
pendingDeletes = pending;
/*
- * NOTE: if the relation was created in this transaction, it will now
- * be present in the pending-delete list twice, once with atCommit
- * true and once with atCommit false. Hence, it will be physically
- * deleted at end of xact in either case (and the other entry will be
- * ignored by smgrDoPendingDeletes, so no error will occur). We could
- * instead remove the existing list entry and delete the physical file
- * immediately, but for now I'll keep the logic simple.
+ * NOTE: if the relation was created in this transaction, it will now be
+ * present in the pending-delete list twice, once with atCommit true and
+ * once with atCommit false. Hence, it will be physically deleted at end
+ * of xact in either case (and the other entry will be ignored by
+ * smgrDoPendingDeletes, so no error will occur). We could instead remove
+ * the existing list entry and delete the physical file immediately, but
+ * for now I'll keep the logic simple.
*/
/* Now close the file and throw away the hashtable entry */
@@ -467,17 +464,16 @@ smgr_internal_unlink(RelFileNode rnode, int which, bool isTemp, bool isRedo)
DropRelFileNodeBuffers(rnode, isTemp, 0);
/*
- * Tell the free space map to forget this relation. It won't be
- * accessed any more anyway, but we may as well recycle the map space
- * quickly.
+ * Tell the free space map to forget this relation. It won't be accessed
+ * any more anyway, but we may as well recycle the map space quickly.
*/
FreeSpaceMapForgetRel(&rnode);
/*
* And delete the physical files.
*
- * Note: we treat deletion failure as a WARNING, not an error, because
- * we've already decided to commit or abort the current xact.
+ * Note: we treat deletion failure as a WARNING, not an error, because we've
+ * already decided to commit or abort the current xact.
*/
if (!(*(smgrsw[which].smgr_unlink)) (rnode, isRedo))
ereport(WARNING,
@@ -524,11 +520,11 @@ smgrread(SMgrRelation reln, BlockNumber blocknum, char *buffer)
if (!(*(smgrsw[reln->smgr_which].smgr_read)) (reln, blocknum, buffer))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read block %u of relation %u/%u/%u: %m",
- blocknum,
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode)));
+ errmsg("could not read block %u of relation %u/%u/%u: %m",
+ blocknum,
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode)));
}
/*
@@ -549,11 +545,11 @@ smgrwrite(SMgrRelation reln, BlockNumber blocknum, char *buffer, bool isTemp)
isTemp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write block %u of relation %u/%u/%u: %m",
- blocknum,
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode)));
+ errmsg("could not write block %u of relation %u/%u/%u: %m",
+ blocknum,
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode)));
}
/*
@@ -600,15 +596,15 @@ smgrtruncate(SMgrRelation reln, BlockNumber nblocks, bool isTemp)
BlockNumber newblks;
/*
- * Get rid of any buffers for the about-to-be-deleted blocks.
- * bufmgr will just drop them without bothering to write the contents.
+ * Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will
+ * just drop them without bothering to write the contents.
*/
DropRelFileNodeBuffers(reln->smgr_rnode, isTemp, nblocks);
/*
- * Tell the free space map to forget anything it may have stored for
- * the about-to-be-deleted blocks. We want to be sure it won't return
- * bogus block numbers later on.
+ * Tell the free space map to forget anything it may have stored for the
+ * about-to-be-deleted blocks. We want to be sure it won't return bogus
+ * block numbers later on.
*/
FreeSpaceMapTruncateRel(&reln->smgr_rnode, nblocks);
@@ -618,19 +614,19 @@ smgrtruncate(SMgrRelation reln, BlockNumber nblocks, bool isTemp)
if (newblks == InvalidBlockNumber)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not truncate relation %u/%u/%u to %u blocks: %m",
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode,
- nblocks)));
+ errmsg("could not truncate relation %u/%u/%u to %u blocks: %m",
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode,
+ nblocks)));
if (!isTemp)
{
/*
- * Make a non-transactional XLOG entry showing the file
- * truncation. It's non-transactional because we should replay it
- * whether the transaction commits or not; the underlying file
- * change is certainly not reversible.
+ * Make a non-transactional XLOG entry showing the file truncation.
+ * It's non-transactional because we should replay it whether the
+ * transaction commits or not; the underlying file change is certainly
+ * not reversible.
*/
XLogRecPtr lsn;
XLogRecData rdata;
@@ -841,7 +837,7 @@ smgrcommit(void)
if (!(*(smgrsw[i].smgr_commit)) ())
elog(ERROR, "transaction commit failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
- Int16GetDatum(i))));
+ Int16GetDatum(i))));
}
}
}
@@ -861,7 +857,7 @@ smgrabort(void)
if (!(*(smgrsw[i].smgr_abort)) ())
elog(ERROR, "transaction abort failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
- Int16GetDatum(i))));
+ Int16GetDatum(i))));
}
}
}
@@ -881,7 +877,7 @@ smgrsync(void)
if (!(*(smgrsw[i].smgr_sync)) ())
elog(ERROR, "storage sync failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
- Int16GetDatum(i))));
+ Int16GetDatum(i))));
}
}
}
@@ -912,30 +908,30 @@ smgr_redo(XLogRecPtr lsn, XLogRecord *record)
/*
* First, force bufmgr to drop any buffers it has for the to-be-
- * truncated blocks. We must do this, else subsequent
- * XLogReadBuffer operations will not re-extend the file properly.
+ * truncated blocks. We must do this, else subsequent XLogReadBuffer
+ * operations will not re-extend the file properly.
*/
DropRelFileNodeBuffers(xlrec->rnode, false, xlrec->blkno);
/*
- * Tell the free space map to forget anything it may have stored
- * for the about-to-be-deleted blocks. We want to be sure it
- * won't return bogus block numbers later on.
+ * Tell the free space map to forget anything it may have stored for
+ * the about-to-be-deleted blocks. We want to be sure it won't return
+ * bogus block numbers later on.
*/
FreeSpaceMapTruncateRel(&reln->smgr_rnode, xlrec->blkno);
/* Do the truncation */
newblks = (*(smgrsw[reln->smgr_which].smgr_truncate)) (reln,
- xlrec->blkno,
+ xlrec->blkno,
false);
if (newblks == InvalidBlockNumber)
ereport(WARNING,
(errcode_for_file_access(),
- errmsg("could not truncate relation %u/%u/%u to %u blocks: %m",
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode,
- xlrec->blkno)));
+ errmsg("could not truncate relation %u/%u/%u to %u blocks: %m",
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode,
+ xlrec->blkno)));
}
else
elog(PANIC, "smgr_redo: unknown op code %u", info);
diff --git a/src/backend/tcop/dest.c b/src/backend/tcop/dest.c
index 2b60c2c46e3..da164ecd0b7 100644
--- a/src/backend/tcop/dest.c
+++ b/src/backend/tcop/dest.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/dest.c,v 1.65 2005/03/16 21:38:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/dest.c,v 1.66 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -173,9 +173,8 @@ NullCommand(CommandDest dest)
case RemoteExecute:
/*
- * tell the fe that we saw an empty query string. In
- * protocols before 3.0 this has a useless empty-string
- * message body.
+ * tell the fe that we saw an empty query string. In protocols
+ * before 3.0 this has a useless empty-string message body.
*/
if (PG_PROTOCOL_MAJOR(FrontendProtocol) >= 3)
pq_putemptymessage('I');
diff --git a/src/backend/tcop/fastpath.c b/src/backend/tcop/fastpath.c
index fa105c560e0..7c7de52e57a 100644
--- a/src/backend/tcop/fastpath.c
+++ b/src/backend/tcop/fastpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/fastpath.c,v 1.82 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/fastpath.c,v 1.83 2005/10/15 02:49:26 momjian Exp $
*
* NOTES
* This cruft is the server side of PQfn.
@@ -103,8 +103,8 @@ GetOldFunctionMessage(StringInfo buf)
/* FATAL here since no hope of regaining message sync */
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid argument size %d in function call message",
- argsize)));
+ errmsg("invalid argument size %d in function call message",
+ argsize)));
}
/* and arg contents */
if (argsize > 0)
@@ -204,11 +204,10 @@ fetch_fp_info(Oid func_id, struct fp_info * fip)
/*
* Since the validity of this structure is determined by whether the
* funcid is OK, we clear the funcid here. It must not be set to the
- * correct value until we are about to return with a good struct
- * fp_info, since we can be interrupted (i.e., with an ereport(ERROR,
- * ...)) at any time. [No longer really an issue since we don't save
- * the struct fp_info across transactions anymore, but keep it
- * anyway.]
+ * correct value until we are about to return with a good struct fp_info,
+ * since we can be interrupted (i.e., with an ereport(ERROR, ...)) at any
+ * time. [No longer really an issue since we don't save the struct
+ * fp_info across transactions anymore, but keep it anyway.]
*/
MemSet(fip, 0, sizeof(struct fp_info));
fip->funcid = InvalidOid;
@@ -294,14 +293,14 @@ HandleFunctionRequest(StringInfo msgBuf)
/*
* Now that we've eaten the input message, check to see if we actually
- * want to do the function call or not. It's now safe to ereport();
- * we won't lose sync with the frontend.
+ * want to do the function call or not. It's now safe to ereport(); we
+ * won't lose sync with the frontend.
*/
if (IsAbortedTransactionBlockState())
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
- "commands ignored until end of transaction block")));
+ "commands ignored until end of transaction block")));
/*
* Begin parsing the buffer contents.
@@ -440,8 +439,8 @@ parse_fcall_arguments(StringInfo msgBuf, struct fp_info * fip,
if (argsize < 0)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid argument size %d in function call message",
- argsize)));
+ errmsg("invalid argument size %d in function call message",
+ argsize)));
/* Reset abuf to empty, and insert raw data into it */
abuf.len = 0;
@@ -500,8 +499,8 @@ parse_fcall_arguments(StringInfo msgBuf, struct fp_info * fip,
if (abuf.cursor != abuf.len)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("incorrect binary data format in function argument %d",
- i + 1)));
+ errmsg("incorrect binary data format in function argument %d",
+ i + 1)));
}
else
ereport(ERROR,
@@ -543,9 +542,9 @@ parse_fcall_arguments_20(StringInfo msgBuf, struct fp_info * fip,
* Copy supplied arguments into arg vector. In protocol 2.0 these are
* always assumed to be supplied in binary format.
*
- * Note: although the original protocol 2.0 code did not have any way for
- * the frontend to specify a NULL argument, we now choose to interpret
- * length == -1 as meaning a NULL.
+ * Note: although the original protocol 2.0 code did not have any way for the
+ * frontend to specify a NULL argument, we now choose to interpret length
+ * == -1 as meaning a NULL.
*/
for (i = 0; i < nargs; ++i)
{
@@ -563,8 +562,8 @@ parse_fcall_arguments_20(StringInfo msgBuf, struct fp_info * fip,
if (argsize < 0)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid argument size %d in function call message",
- argsize)));
+ errmsg("invalid argument size %d in function call message",
+ argsize)));
/* Reset abuf to empty, and insert raw data into it */
abuf.len = 0;
@@ -587,8 +586,8 @@ parse_fcall_arguments_20(StringInfo msgBuf, struct fp_info * fip,
if (abuf.cursor != abuf.len)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("incorrect binary data format in function argument %d",
- i + 1)));
+ errmsg("incorrect binary data format in function argument %d",
+ i + 1)));
}
/* Desired result format is always binary in protocol 2.0 */
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 8b722c2e015..93bcc936602 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.465 2005/10/13 22:57:27 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.466 2005/10/15 02:49:27 momjian Exp $
*
* NOTES
* this is the "main" module of the postgres backend and
@@ -71,8 +71,7 @@ extern char *optarg;
* global variables
* ----------------
*/
-const char *debug_query_string; /* for pgmonitor and
- * log_min_error_statement */
+const char *debug_query_string; /* for pgmonitor and log_min_error_statement */
/* Note: whereToSendOutput is initialized for the bootstrap/standalone case */
CommandDest whereToSendOutput = Debug;
@@ -96,7 +95,7 @@ static int max_stack_depth_bytes = 2048 * 1024;
/* stack base pointer (initialized by PostgresMain) */
/* Do not make static so PL/Java can modifiy it */
-char *stack_base_ptr = NULL;
+char *stack_base_ptr = NULL;
/*
@@ -142,9 +141,7 @@ static bool EchoQuery = false; /* default don't echo */
* tcop/tcopdebug.h
*/
#ifndef TCOP_DONTUSENEWLINE
-static int UseNewLine = 1; /* Use newlines query delimiters (the
- * default) */
-
+static int UseNewLine = 1; /* Use newlines query delimiters (the default) */
#else
static int UseNewLine = 0; /* Use EOF as query delimiters */
#endif /* TCOP_DONTUSENEWLINE */
@@ -204,8 +201,8 @@ InteractiveBackend(StringInfo inBuf)
if (UseNewLine)
{
/*
- * if we are using \n as a delimiter, then read characters
- * until the \n.
+ * if we are using \n as a delimiter, then read characters until
+ * the \n.
*/
while ((c = getc(stdin)) != EOF)
{
@@ -297,12 +294,12 @@ SocketBackend(StringInfo inBuf)
}
/*
- * Validate message type code before trying to read body; if we have
- * lost sync, better to say "command unknown" than to run out of
- * memory because we used garbage as a length word.
+ * Validate message type code before trying to read body; if we have lost
+ * sync, better to say "command unknown" than to run out of memory because
+ * we used garbage as a length word.
*
- * This also gives us a place to set the doing_extended_query_message
- * flag as soon as possible.
+ * This also gives us a place to set the doing_extended_query_message flag as
+ * soon as possible.
*/
switch (qtype)
{
@@ -315,7 +312,7 @@ SocketBackend(StringInfo inBuf)
{
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("unexpected EOF on client connection")));
+ errmsg("unexpected EOF on client connection")));
return EOF;
}
}
@@ -342,7 +339,7 @@ SocketBackend(StringInfo inBuf)
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid frontend message type %d", qtype)));
+ errmsg("invalid frontend message type %d", qtype)));
break;
case 'S': /* sync */
@@ -354,7 +351,7 @@ SocketBackend(StringInfo inBuf)
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid frontend message type %d", qtype)));
+ errmsg("invalid frontend message type %d", qtype)));
break;
case 'd': /* copy data */
@@ -365,15 +362,15 @@ SocketBackend(StringInfo inBuf)
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid frontend message type %d", qtype)));
+ errmsg("invalid frontend message type %d", qtype)));
break;
default:
/*
- * Otherwise we got garbage from the frontend. We treat this
- * as fatal because we have probably lost message boundary
- * sync, and there's no good way to recover.
+ * Otherwise we got garbage from the frontend. We treat this as
+ * fatal because we have probably lost message boundary sync, and
+ * there's no good way to recover.
*/
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
@@ -382,9 +379,9 @@ SocketBackend(StringInfo inBuf)
}
/*
- * In protocol version 3, all frontend messages have a length word
- * next after the type code; we can read the message contents
- * independently of the type.
+ * In protocol version 3, all frontend messages have a length word next
+ * after the type code; we can read the message contents independently of
+ * the type.
*/
if (PG_PROTOCOL_MAJOR(FrontendProtocol) >= 3)
{
@@ -532,14 +529,14 @@ pg_parse_query(const char *query_string)
static bool
log_after_parse(List *raw_parsetree_list, const char *query_string,
- char **prepare_string)
+ char **prepare_string)
{
ListCell *parsetree_item;
bool log_this_statement = (log_statement == LOGSTMT_ALL);
*prepare_string = NULL;
- /* Check if we need to log the statement, and get prepare_string. */
+ /* Check if we need to log the statement, and get prepare_string. */
foreach(parsetree_item, raw_parsetree_list)
{
Node *parsetree = (Node *) lfirst(parsetree_item);
@@ -554,7 +551,7 @@ log_after_parse(List *raw_parsetree_list, const char *query_string,
if (IsA(parsetree, SelectStmt) &&
((SelectStmt *) parsetree)->into == NULL)
- continue; /* optimization for frequent command */
+ continue; /* optimization for frequent command */
if (log_statement == LOGSTMT_MOD &&
(IsA(parsetree, InsertStmt) ||
@@ -562,23 +559,23 @@ log_after_parse(List *raw_parsetree_list, const char *query_string,
IsA(parsetree, DeleteStmt) ||
IsA(parsetree, TruncateStmt) ||
(IsA(parsetree, CopyStmt) &&
- ((CopyStmt *) parsetree)->is_from))) /* COPY FROM */
+ ((CopyStmt *) parsetree)->is_from))) /* COPY FROM */
log_this_statement = true;
commandTag = CreateCommandTag(parsetree);
if ((log_statement == LOGSTMT_MOD ||
log_statement == LOGSTMT_DDL) &&
(strncmp(commandTag, "CREATE ", strlen("CREATE ")) == 0 ||
- IsA(parsetree, SelectStmt) || /* SELECT INTO, CREATE AS */
+ IsA(parsetree, SelectStmt) || /* SELECT INTO, CREATE AS */
strncmp(commandTag, "ALTER ", strlen("ALTER ")) == 0 ||
strncmp(commandTag, "DROP ", strlen("DROP ")) == 0 ||
- IsA(parsetree, GrantStmt) || /* GRANT or REVOKE */
+ IsA(parsetree, GrantStmt) || /* GRANT or REVOKE */
IsA(parsetree, CommentStmt)))
log_this_statement = true;
/*
- * For the first EXECUTE we find, record the client statement
- * used by the PREPARE.
+ * For the first EXECUTE we find, record the client statement used by
+ * the PREPARE.
*/
if (IsA(parsetree, ExecuteStmt))
{
@@ -589,13 +586,13 @@ log_after_parse(List *raw_parsetree_list, const char *query_string,
entry->query_string)
{
*prepare_string = palloc(strlen(entry->query_string) +
- strlen(" [client PREPARE: %s]") - 1);
+ strlen(" [client PREPARE: %s]") - 1);
sprintf(*prepare_string, " [client PREPARE: %s]",
- entry->query_string);
+ entry->query_string);
}
}
}
-
+
if (log_this_statement)
{
ereport(LOG,
@@ -657,8 +654,8 @@ pg_rewrite_queries(List *querytree_list)
ResetUsage();
/*
- * rewritten queries are collected in new_list. Note there may be
- * more or fewer than in the original list.
+ * rewritten queries are collected in new_list. Note there may be more or
+ * fewer than in the original list.
*/
foreach(list_item, querytree_list)
{
@@ -690,8 +687,7 @@ pg_rewrite_queries(List *querytree_list)
#ifdef COPY_PARSE_PLAN_TREES
/*
- * Optional debugging check: pass querytree output through
- * copyObject()
+ * Optional debugging check: pass querytree output through copyObject()
*/
new_list = (List *) copyObject(querytree_list);
/* This checks both copyObject() and the equal() routines... */
@@ -734,8 +730,8 @@ pg_plan_query(Query *querytree, ParamListInfo boundParams)
Plan *new_plan = (Plan *) copyObject(plan);
/*
- * equal() currently does not have routines to compare Plan nodes,
- * so don't try to test equality here. Perhaps fix someday?
+ * equal() currently does not have routines to compare Plan nodes, so
+ * don't try to test equality here. Perhaps fix someday?
*/
#ifdef NOT_USED
/* This checks both copyObject() and the equal() routines... */
@@ -813,13 +809,14 @@ exec_simple_query(const char *query_string)
MemoryContext oldcontext;
List *parsetree_list;
ListCell *parsetree_item;
- struct timeval start_t, stop_t;
+ struct timeval start_t,
+ stop_t;
bool save_log_duration = log_duration;
int save_log_min_duration_statement = log_min_duration_statement;
bool save_log_statement_stats = log_statement_stats;
- char *prepare_string = NULL;
+ char *prepare_string = NULL;
bool was_logged = false;
-
+
/*
* Report query to various monitoring facilities.
*/
@@ -829,9 +826,9 @@ exec_simple_query(const char *query_string)
/*
* We use save_log_* so "SET log_duration = true" and "SET
- * log_min_duration_statement = true" don't report incorrect time
- * because gettimeofday() wasn't called. Similarly,
- * log_statement_stats has to be captured once.
+ * log_min_duration_statement = true" don't report incorrect time because
+ * gettimeofday() wasn't called. Similarly, log_statement_stats has to be
+ * captured once.
*/
if (save_log_duration || save_log_min_duration_statement != -1)
gettimeofday(&start_t, NULL);
@@ -842,17 +839,17 @@ exec_simple_query(const char *query_string)
/*
* Start up a transaction command. All queries generated by the
* query_string will be in this same command block, *unless* we find a
- * BEGIN/COMMIT/ABORT statement; we have to force a new xact command
- * after one of those, else bad things will happen in xact.c. (Note
- * that this will normally change current memory context.)
+ * BEGIN/COMMIT/ABORT statement; we have to force a new xact command after
+ * one of those, else bad things will happen in xact.c. (Note that this
+ * will normally change current memory context.)
*/
start_xact_command();
/*
- * Zap any pre-existing unnamed statement. (While not strictly
- * necessary, it seems best to define simple-Query mode as if it used
- * the unnamed statement and portal; this ensures we recover any
- * storage used by prior unnamed operations.)
+ * Zap any pre-existing unnamed statement. (While not strictly necessary,
+ * it seems best to define simple-Query mode as if it used the unnamed
+ * statement and portal; this ensures we recover any storage used by prior
+ * unnamed operations.)
*/
unnamed_stmt_pstmt = NULL;
if (unnamed_stmt_context)
@@ -870,14 +867,14 @@ exec_simple_query(const char *query_string)
QueryContext = CurrentMemoryContext;
/*
- * Do basic parsing of the query or queries (this should be safe even
- * if we are in aborted transaction state!)
+ * Do basic parsing of the query or queries (this should be safe even if
+ * we are in aborted transaction state!)
*/
parsetree_list = pg_parse_query(query_string);
if (log_statement != LOGSTMT_NONE || save_log_min_duration_statement != -1)
was_logged = log_after_parse(parsetree_list, query_string,
- &prepare_string);
+ &prepare_string);
/*
* Switch back to transaction context to enter the loop.
@@ -899,10 +896,10 @@ exec_simple_query(const char *query_string)
int16 format;
/*
- * Get the command name for use in status display (it also becomes
- * the default completion tag, down inside PortalRun). Set
- * ps_status and do any special start-of-SQL-command processing
- * needed by the destination.
+ * Get the command name for use in status display (it also becomes the
+ * default completion tag, down inside PortalRun). Set ps_status and
+ * do any special start-of-SQL-command processing needed by the
+ * destination.
*/
commandTag = CreateCommandTag(parsetree);
@@ -912,11 +909,11 @@ exec_simple_query(const char *query_string)
/*
* If we are in an aborted transaction, reject all commands except
- * COMMIT/ABORT. It is important that this test occur before we
- * try to do parse analysis, rewrite, or planning, since all those
- * phases try to do database accesses, which may fail in abort
- * state. (It might be safe to allow some additional utility
- * commands in this state, but not many...)
+ * COMMIT/ABORT. It is important that this test occur before we try
+ * to do parse analysis, rewrite, or planning, since all those phases
+ * try to do database accesses, which may fail in abort state. (It
+ * might be safe to allow some additional utility commands in this
+ * state, but not many...)
*/
if (IsAbortedTransactionBlockState())
{
@@ -937,7 +934,7 @@ exec_simple_query(const char *query_string)
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
- "commands ignored until end of transaction block")));
+ "commands ignored until end of transaction block")));
}
/* Make sure we are in a transaction command */
@@ -980,10 +977,10 @@ exec_simple_query(const char *query_string)
PortalStart(portal, NULL, InvalidSnapshot);
/*
- * Select the appropriate output format: text unless we are doing
- * a FETCH from a binary cursor. (Pretty grotty to have to do
- * this here --- but it avoids grottiness in other places. Ah,
- * the joys of backward compatibility...)
+ * Select the appropriate output format: text unless we are doing a
+ * FETCH from a binary cursor. (Pretty grotty to have to do this here
+ * --- but it avoids grottiness in other places. Ah, the joys of
+ * backward compatibility...)
*/
format = 0; /* TEXT is default */
if (IsA(parsetree, FetchStmt))
@@ -1012,8 +1009,7 @@ exec_simple_query(const char *query_string)
MemoryContextSwitchTo(oldcontext);
/*
- * Run the portal to completion, and then drop it (and the
- * receiver).
+ * Run the portal to completion, and then drop it (and the receiver).
*/
(void) PortalRun(portal,
FETCH_ALL,
@@ -1028,24 +1024,22 @@ exec_simple_query(const char *query_string)
if (IsA(parsetree, TransactionStmt))
{
/*
- * If this was a transaction control statement, commit it. We
- * will start a new xact command for the next command (if
- * any).
+ * If this was a transaction control statement, commit it. We will
+ * start a new xact command for the next command (if any).
*/
finish_xact_command();
}
else if (lnext(parsetree_item) == NULL)
{
/*
- * If this is the last parsetree of the query string, close
- * down transaction statement before reporting
- * command-complete. This is so that any end-of-transaction
- * errors are reported before the command-complete message is
- * issued, to avoid confusing clients who will expect either a
- * command-complete message or an error, not one and then the
- * other. But for compatibility with historical Postgres
- * behavior, we do not force a transaction boundary between
- * queries appearing in a single query string.
+ * If this is the last parsetree of the query string, close down
+ * transaction statement before reporting command-complete. This
+ * is so that any end-of-transaction errors are reported before
+ * the command-complete message is issued, to avoid confusing
+ * clients who will expect either a command-complete message or an
+ * error, not one and then the other. But for compatibility with
+ * historical Postgres behavior, we do not force a transaction
+ * boundary between queries appearing in a single query string.
*/
finish_xact_command();
}
@@ -1059,11 +1053,10 @@ exec_simple_query(const char *query_string)
}
/*
- * Tell client that we're done with this query. Note we emit
- * exactly one EndCommand report for each raw parsetree, thus one
- * for each SQL command the client sent, regardless of rewriting.
- * (But a command aborted by error will not send an EndCommand
- * report at all.)
+ * Tell client that we're done with this query. Note we emit exactly
+ * one EndCommand report for each raw parsetree, thus one for each SQL
+ * command the client sent, regardless of rewriting. (But a command
+ * aborted by error will not send an EndCommand report at all.)
*/
EndCommand(completionTag, dest);
} /* end loop over parsetrees */
@@ -1082,8 +1075,8 @@ exec_simple_query(const char *query_string)
QueryContext = NULL;
/*
- * Combine processing here as we need to calculate the query duration
- * in both instances.
+ * Combine processing here as we need to calculate the query duration in
+ * both instances.
*/
if (save_log_duration || save_log_min_duration_statement != -1)
{
@@ -1096,28 +1089,28 @@ exec_simple_query(const char *query_string)
stop_t.tv_usec += 1000000;
}
usecs = (long) (stop_t.tv_sec - start_t.tv_sec) * 1000000 +
- (long) (stop_t.tv_usec - start_t.tv_usec);
+ (long) (stop_t.tv_usec - start_t.tv_usec);
/* Only print duration if we previously printed the statement. */
if (was_logged && save_log_duration)
ereport(LOG,
(errmsg("duration: %ld.%03ld ms",
- (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
- (stop_t.tv_usec - start_t.tv_usec) / 1000),
- (long) (stop_t.tv_usec - start_t.tv_usec) % 1000)));
+ (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
+ (stop_t.tv_usec - start_t.tv_usec) / 1000),
+ (long) (stop_t.tv_usec - start_t.tv_usec) % 1000)));
/*
- * Output a duration_statement to the log if the query has
- * exceeded the min duration, or if we are to print all durations.
+ * Output a duration_statement to the log if the query has exceeded
+ * the min duration, or if we are to print all durations.
*/
if (save_log_min_duration_statement == 0 ||
(save_log_min_duration_statement > 0 &&
usecs >= save_log_min_duration_statement * 1000))
ereport(LOG,
(errmsg("duration: %ld.%03ld ms statement: %s%s",
- (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
- (stop_t.tv_usec - start_t.tv_usec) / 1000),
- (long) (stop_t.tv_usec - start_t.tv_usec) % 1000,
+ (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
+ (stop_t.tv_usec - start_t.tv_usec) / 1000),
+ (long) (stop_t.tv_usec - start_t.tv_usec) % 1000,
query_string,
prepare_string ? prepare_string : "")));
}
@@ -1170,9 +1163,9 @@ exec_parse_message(const char *query_string, /* string to execute */
query_string)));
/*
- * Start up a transaction command so we can run parse analysis etc.
- * (Note that this will normally change current memory context.)
- * Nothing happens if we are already in one.
+ * Start up a transaction command so we can run parse analysis etc. (Note
+ * that this will normally change current memory context.) Nothing happens
+ * if we are already in one.
*/
start_xact_command();
@@ -1182,13 +1175,12 @@ exec_parse_message(const char *query_string, /* string to execute */
* We have two strategies depending on whether the prepared statement is
* named or not. For a named prepared statement, we do parsing in
* MessageContext and copy the finished trees into the prepared
- * statement's private context; then the reset of MessageContext
- * releases temporary space used by parsing and planning. For an
- * unnamed prepared statement, we assume the statement isn't going to
- * hang around long, so getting rid of temp space quickly is probably
- * not worth the costs of copying parse/plan trees. So in this case,
- * we set up a special context for the unnamed statement, and do all
- * the parsing/planning therein.
+ * statement's private context; then the reset of MessageContext releases
+ * temporary space used by parsing and planning. For an unnamed prepared
+ * statement, we assume the statement isn't going to hang around long, so
+ * getting rid of temp space quickly is probably not worth the costs of
+ * copying parse/plan trees. So in this case, we set up a special context
+ * for the unnamed statement, and do all the parsing/planning therein.
*/
is_named = (stmt_name[0] != '\0');
if (is_named)
@@ -1219,20 +1211,20 @@ exec_parse_message(const char *query_string, /* string to execute */
QueryContext = CurrentMemoryContext;
/*
- * Do basic parsing of the query or queries (this should be safe even
- * if we are in aborted transaction state!)
+ * Do basic parsing of the query or queries (this should be safe even if
+ * we are in aborted transaction state!)
*/
parsetree_list = pg_parse_query(query_string);
/*
- * We only allow a single user statement in a prepared statement. This
- * is mainly to keep the protocol simple --- otherwise we'd need to
- * worry about multiple result tupdescs and things like that.
+ * We only allow a single user statement in a prepared statement. This is
+ * mainly to keep the protocol simple --- otherwise we'd need to worry
+ * about multiple result tupdescs and things like that.
*/
if (list_length(parsetree_list) > 1)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("cannot insert multiple commands into a prepared statement")));
+ errmsg("cannot insert multiple commands into a prepared statement")));
if (parsetree_list != NIL)
{
@@ -1246,11 +1238,11 @@ exec_parse_message(const char *query_string, /* string to execute */
/*
* If we are in an aborted transaction, reject all commands except
- * COMMIT/ROLLBACK. It is important that this test occur before
- * we try to do parse analysis, rewrite, or planning, since all
- * those phases try to do database accesses, which may fail in
- * abort state. (It might be safe to allow some additional utility
- * commands in this state, but not many...)
+ * COMMIT/ROLLBACK. It is important that this test occur before we
+ * try to do parse analysis, rewrite, or planning, since all those
+ * phases try to do database accesses, which may fail in abort state.
+ * (It might be safe to allow some additional utility commands in this
+ * state, but not many...)
*/
if (IsAbortedTransactionBlockState())
{
@@ -1271,13 +1263,13 @@ exec_parse_message(const char *query_string, /* string to execute */
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
- "commands ignored until end of transaction block")));
+ "commands ignored until end of transaction block")));
}
/*
* OK to analyze, rewrite, and plan this query. Note that the
- * originally specified parameter set is not required to be
- * complete, so we have to use parse_analyze_varparams().
+ * originally specified parameter set is not required to be complete,
+ * so we have to use parse_analyze_varparams().
*/
if (log_parser_stats)
ResetUsage();
@@ -1298,8 +1290,8 @@ exec_parse_message(const char *query_string, /* string to execute */
if (ptype == InvalidOid || ptype == UNKNOWNOID)
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_DATATYPE),
- errmsg("could not determine data type of parameter $%d",
- i + 1)));
+ errmsg("could not determine data type of parameter $%d",
+ i + 1)));
param_list = lappend_oid(param_list, ptype);
}
@@ -1309,8 +1301,8 @@ exec_parse_message(const char *query_string, /* string to execute */
querytree_list = pg_rewrite_queries(querytree_list);
/*
- * If this is the unnamed statement and it has parameters, defer
- * query planning until Bind. Otherwise do it now.
+ * If this is the unnamed statement and it has parameters, defer query
+ * planning until Bind. Otherwise do it now.
*/
if (!is_named && numParams > 0)
plantree_list = NIL;
@@ -1363,10 +1355,9 @@ exec_parse_message(const char *query_string, /* string to execute */
QueryContext = NULL;
/*
- * We do NOT close the open transaction command here; that only
- * happens when the client sends Sync. Instead, do
- * CommandCounterIncrement just in case something happened during
- * parse/plan.
+ * We do NOT close the open transaction command here; that only happens
+ * when the client sends Sync. Instead, do CommandCounterIncrement just
+ * in case something happened during parse/plan.
*/
CommandCounterIncrement();
@@ -1408,9 +1399,9 @@ exec_bind_message(StringInfo input_message)
set_ps_display("BIND");
/*
- * Start up a transaction command so we can call functions etc. (Note
- * that this will normally change current memory context.) Nothing
- * happens if we are already in one.
+ * Start up a transaction command so we can call functions etc. (Note that
+ * this will normally change current memory context.) Nothing happens if
+ * we are already in one.
*/
start_xact_command();
@@ -1436,8 +1427,8 @@ exec_bind_message(StringInfo input_message)
if (numPFormats > 1 && numPFormats != numParams)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("bind message has %d parameter formats but %d parameters",
- numPFormats, numParams)));
+ errmsg("bind message has %d parameter formats but %d parameters",
+ numPFormats, numParams)));
/* Find prepared statement */
if (stmt_name[0] != '\0')
@@ -1449,18 +1440,18 @@ exec_bind_message(StringInfo input_message)
if (!pstmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_PSTATEMENT),
- errmsg("unnamed prepared statement does not exist")));
+ errmsg("unnamed prepared statement does not exist")));
}
if (numParams != list_length(pstmt->argtype_list))
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("bind message supplies %d parameters, but prepared statement \"%s\" requires %d",
- numParams, stmt_name, list_length(pstmt->argtype_list))));
+ numParams, stmt_name, list_length(pstmt->argtype_list))));
/*
- * Create the portal. Allow silent replacement of an existing portal
- * only if the unnamed portal is specified.
+ * Create the portal. Allow silent replacement of an existing portal only
+ * if the unnamed portal is specified.
*/
if (portal_name[0] == '\0')
portal = CreatePortal(portal_name, true, true);
@@ -1475,9 +1466,9 @@ exec_bind_message(StringInfo input_message)
/*
* Fetch parameters, if any, and store in the portal's memory context.
*
- * In an aborted transaction, we can't risk calling user-defined
- * functions, but we can't fail to Bind either, so bind all parameters
- * to null values.
+ * In an aborted transaction, we can't risk calling user-defined functions,
+ * but we can't fail to Bind either, so bind all parameters to null
+ * values.
*/
if (numParams > 0)
{
@@ -1522,13 +1513,13 @@ exec_bind_message(StringInfo input_message)
pformat = 0; /* default = text */
/*
- * Rather than copying data around, we just set up a
- * phony StringInfo pointing to the correct portion of
- * the message buffer. We assume we can scribble on
- * the message buffer so as to maintain the convention
- * that StringInfos have a trailing null. This is
- * grotty but is a big win when dealing with very
- * large parameter strings.
+ * Rather than copying data around, we just set up a phony
+ * StringInfo pointing to the correct portion of the
+ * message buffer. We assume we can scribble on the
+ * message buffer so as to maintain the convention that
+ * StringInfos have a trailing null. This is grotty but
+ * is a big win when dealing with very large parameter
+ * strings.
*/
pbuf.data = (char *) pvalue;
pbuf.maxlen = plength + 1;
@@ -1547,8 +1538,8 @@ exec_bind_message(StringInfo input_message)
getTypeInputInfo(ptype, &typinput, &typioparam);
/*
- * We have to do encoding conversion before
- * calling the typinput routine.
+ * We have to do encoding conversion before calling
+ * the typinput routine.
*/
pstring = pg_client_to_server(pbuf.data, plength);
params[i].value =
@@ -1566,8 +1557,7 @@ exec_bind_message(StringInfo input_message)
Oid typioparam;
/*
- * Call the parameter type's binary input
- * converter
+ * Call the parameter type's binary input converter
*/
getTypeBinaryInputInfo(ptype, &typreceive, &typioparam);
@@ -1580,9 +1570,9 @@ exec_bind_message(StringInfo input_message)
/* Trouble if it didn't eat the whole buffer */
if (pbuf.cursor != pbuf.len)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("incorrect binary data format in bind parameter %d",
- i + 1)));
+ (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
+ errmsg("incorrect binary data format in bind parameter %d",
+ i + 1)));
}
else
{
@@ -1624,8 +1614,8 @@ exec_bind_message(StringInfo input_message)
pq_getmsgend(input_message);
/*
- * If we didn't plan the query before, do it now. This allows the
- * planner to make use of the concrete parameter values we now have.
+ * If we didn't plan the query before, do it now. This allows the planner
+ * to make use of the concrete parameter values we now have.
*
* This happens only for unnamed statements, and so switching into the
* statement context for planning is correct (see notes in
@@ -1679,7 +1669,8 @@ exec_execute_message(const char *portal_name, long max_rows)
bool is_trans_exit = false;
bool completed;
char completionTag[COMPLETION_TAG_BUFSIZE];
- struct timeval start_t, stop_t;
+ struct timeval start_t,
+ stop_t;
bool save_log_duration = log_duration;
int save_log_min_duration_statement = log_min_duration_statement;
bool save_log_statement_stats = log_statement_stats;
@@ -1697,10 +1688,10 @@ exec_execute_message(const char *portal_name, long max_rows)
errmsg("portal \"%s\" does not exist", portal_name)));
/*
- * If we re-issue an Execute protocol request against an existing
- * portal, then we are only fetching more rows rather than
- * completely re-executing the query from the start. atStart is never
- * reset for a v3 portal, so we are safe to use this check.
+ * If we re-issue an Execute protocol request against an existing portal,
+ * then we are only fetching more rows rather than completely re-executing
+ * the query from the start. atStart is never reset for a v3 portal, so we
+ * are safe to use this check.
*/
if (!portal->atStart)
execute_is_fetch = true;
@@ -1737,9 +1728,9 @@ exec_execute_message(const char *portal_name, long max_rows)
/*
* We use save_log_* so "SET log_duration = true" and "SET
- * log_min_duration_statement = true" don't report incorrect time
- * because gettimeofday() wasn't called. Similarly,
- * log_statement_stats has to be captured once.
+ * log_min_duration_statement = true" don't report incorrect time because
+ * gettimeofday() wasn't called. Similarly, log_statement_stats has to be
+ * captured once.
*/
if (save_log_duration || save_log_min_duration_statement != -1)
gettimeofday(&start_t, NULL);
@@ -1778,9 +1769,8 @@ exec_execute_message(const char *portal_name, long max_rows)
}
/*
- * Create dest receiver in MessageContext (we don't want it in
- * transaction context, because that may get deleted if portal
- * contains VACUUM).
+ * Create dest receiver in MessageContext (we don't want it in transaction
+ * context, because that may get deleted if portal contains VACUUM).
*/
receiver = CreateDestReceiver(dest, portal);
@@ -1800,7 +1790,7 @@ exec_execute_message(const char *portal_name, long max_rows)
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
- "commands ignored until end of transaction block")));
+ "commands ignored until end of transaction block")));
}
/* Check for cancel signal before we start execution */
@@ -1826,8 +1816,7 @@ exec_execute_message(const char *portal_name, long max_rows)
{
/*
* If this was a transaction control statement, commit it. We
- * will start a new xact command for the next command (if
- * any).
+ * will start a new xact command for the next command (if any).
*/
finish_xact_command();
}
@@ -1851,8 +1840,8 @@ exec_execute_message(const char *portal_name, long max_rows)
}
/*
- * Combine processing here as we need to calculate the query duration
- * in both instances.
+ * Combine processing here as we need to calculate the query duration in
+ * both instances.
*/
if (save_log_duration || save_log_min_duration_statement != -1)
{
@@ -1865,30 +1854,30 @@ exec_execute_message(const char *portal_name, long max_rows)
stop_t.tv_usec += 1000000;
}
usecs = (long) (stop_t.tv_sec - start_t.tv_sec) * 1000000 +
- (long) (stop_t.tv_usec - start_t.tv_usec);
+ (long) (stop_t.tv_usec - start_t.tv_usec);
/* Only print duration if we previously printed the statement. */
if (log_statement == LOGSTMT_ALL && save_log_duration)
ereport(LOG,
(errmsg("duration: %ld.%03ld ms",
- (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
- (stop_t.tv_usec - start_t.tv_usec) / 1000),
- (long) (stop_t.tv_usec - start_t.tv_usec) % 1000)));
+ (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
+ (stop_t.tv_usec - start_t.tv_usec) / 1000),
+ (long) (stop_t.tv_usec - start_t.tv_usec) % 1000)));
/*
- * Output a duration_statement to the log if the query has
- * exceeded the min duration, or if we are to print all durations.
+ * Output a duration_statement to the log if the query has exceeded
+ * the min duration, or if we are to print all durations.
*/
if (save_log_min_duration_statement == 0 ||
(save_log_min_duration_statement > 0 &&
usecs >= save_log_min_duration_statement * 1000))
ereport(LOG,
(errmsg("duration: %ld.%03ld ms statement: %sEXECUTE %s [PREPARE: %s]",
- (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
- (stop_t.tv_usec - start_t.tv_usec) / 1000),
- (long) (stop_t.tv_usec - start_t.tv_usec) % 1000,
+ (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
+ (stop_t.tv_usec - start_t.tv_usec) / 1000),
+ (long) (stop_t.tv_usec - start_t.tv_usec) % 1000,
(execute_is_fetch) ? "FETCH from " : "",
- (*portal_name != '\0') ? portal_name : "<unnamed>",
+ (*portal_name != '\0') ? portal_name : "<unnamed>",
portal->sourceText ? portal->sourceText : "")));
}
@@ -1921,7 +1910,7 @@ exec_describe_statement_message(const char *stmt_name)
if (!pstmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_PSTATEMENT),
- errmsg("unnamed prepared statement does not exist")));
+ errmsg("unnamed prepared statement does not exist")));
}
if (whereToSendOutput != Remote)
@@ -1999,7 +1988,7 @@ start_xact_command(void)
enable_sig_alarm(StatementTimeout, true);
else
cancel_from_timeout = false;
-
+
xact_started = true;
}
}
@@ -2052,28 +2041,28 @@ quickdie(SIGNAL_ARGS)
PG_SETMASK(&BlockSig);
/*
- * Ideally this should be ereport(FATAL), but then we'd not get
- * control back...
+ * Ideally this should be ereport(FATAL), but then we'd not get control
+ * back...
*/
ereport(WARNING,
(errcode(ERRCODE_CRASH_SHUTDOWN),
errmsg("terminating connection because of crash of another server process"),
- errdetail("The postmaster has commanded this server process to roll back"
- " the current transaction and exit, because another"
- " server process exited abnormally and possibly corrupted"
- " shared memory."),
+ errdetail("The postmaster has commanded this server process to roll back"
+ " the current transaction and exit, because another"
+ " server process exited abnormally and possibly corrupted"
+ " shared memory."),
errhint("In a moment you should be able to reconnect to the"
" database and repeat your command.")));
/*
* DO NOT proc_exit() -- we're here because shared memory may be
- * corrupted, so we don't want to try to clean up our transaction.
- * Just nail the windows shut and get out of town.
+ * corrupted, so we don't want to try to clean up our transaction. Just
+ * nail the windows shut and get out of town.
*
- * Note we do exit(1) not exit(0). This is to force the postmaster into
- * a system reset cycle if some idiot DBA sends a manual SIGQUIT to a
- * random backend. This is necessary precisely because we don't clean
- * up our shared memory state.
+ * Note we do exit(1) not exit(0). This is to force the postmaster into a
+ * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
+ * backend. This is necessary precisely because we don't clean up our
+ * shared memory state.
*/
exit(1);
}
@@ -2094,8 +2083,8 @@ die(SIGNAL_ARGS)
ProcDiePending = true;
/*
- * If it's safe to interrupt, and we're waiting for input or a
- * lock, service the interrupt immediately
+ * If it's safe to interrupt, and we're waiting for input or a lock,
+ * service the interrupt immediately
*/
if (ImmediateInterruptOK && InterruptHoldoffCount == 0 &&
CritSectionCount == 0)
@@ -2147,9 +2136,9 @@ StatementCancelHandler(SIGNAL_ARGS)
QueryCancelPending = true;
/*
- * If it's safe to interrupt, and we're waiting for a lock,
- * service the interrupt immediately. No point in interrupting if
- * we're waiting for input, however.
+ * If it's safe to interrupt, and we're waiting for a lock, service
+ * the interrupt immediately. No point in interrupting if we're
+ * waiting for input, however.
*/
if (ImmediateInterruptOK && InterruptHoldoffCount == 0 &&
CritSectionCount == 0)
@@ -2179,9 +2168,9 @@ FloatExceptionHandler(SIGNAL_ARGS)
ereport(ERROR,
(errcode(ERRCODE_FLOATING_POINT_EXCEPTION),
errmsg("floating-point exception"),
- errdetail("An invalid floating-point operation was signaled. "
- "This probably means an out-of-range result or an "
- "invalid operation, such as division by zero.")));
+ errdetail("An invalid floating-point operation was signaled. "
+ "This probably means an out-of-range result or an "
+ "invalid operation, such as division by zero.")));
}
/* SIGHUP: set flag to re-read config file at next convenient time */
@@ -2215,7 +2204,7 @@ ProcessInterrupts(void)
DisableCatchupInterrupt();
ereport(FATAL,
(errcode(ERRCODE_ADMIN_SHUTDOWN),
- errmsg("terminating connection due to administrator command")));
+ errmsg("terminating connection due to administrator command")));
}
if (QueryCancelPending)
{
@@ -2255,15 +2244,14 @@ check_stack_depth(void)
/*
* Compute distance from PostgresMain's local variables to my own
*
- * Note: in theory stack_depth should be ptrdiff_t or some such, but
- * since the whole point of this code is to bound the value to
- * something much less than integer-sized, int should work fine.
+ * Note: in theory stack_depth should be ptrdiff_t or some such, but since
+ * the whole point of this code is to bound the value to something much
+ * less than integer-sized, int should work fine.
*/
stack_depth = (int) (stack_base_ptr - &stack_top_loc);
/*
- * Take abs value, since stacks grow up on some machines, down on
- * others
+ * Take abs value, since stacks grow up on some machines, down on others
*/
if (stack_depth < 0)
stack_depth = -stack_depth;
@@ -2271,10 +2259,9 @@ check_stack_depth(void)
/*
* Trouble?
*
- * The test on stack_base_ptr prevents us from erroring out if called
- * during process setup or in a non-backend process. Logically it
- * should be done first, but putting it here avoids wasting cycles
- * during normal cases.
+ * The test on stack_base_ptr prevents us from erroring out if called during
+ * process setup or in a non-backend process. Logically it should be done
+ * first, but putting it here avoids wasting cycles during normal cases.
*/
if (stack_depth > max_stack_depth_bytes &&
stack_base_ptr != NULL)
@@ -2385,8 +2372,8 @@ PostgresMain(int argc, char *argv[], const char *username)
char *userDoption = NULL;
bool secure;
int errs = 0;
- int debug_flag = -1; /* -1 means not given */
- List *guc_names = NIL; /* for SUSET options */
+ int debug_flag = -1; /* -1 means not given */
+ List *guc_names = NIL; /* for SUSET options */
List *guc_values = NIL;
GucContext ctx;
GucSource gucsource;
@@ -2595,9 +2582,9 @@ PostgresMain(int argc, char *argv[], const char *username)
/*
* ignore system indexes
*
- * As of PG 7.4 this is safe to allow from the client, since
- * it only disables reading the system indexes, not
- * writing them. Worst case consequence is slowness.
+ * As of PG 7.4 this is safe to allow from the client, since it
+ * only disables reading the system indexes, not writing them.
+ * Worst case consequence is slowness.
*/
IgnoreSystemIndexes(true);
break;
@@ -2621,8 +2608,7 @@ PostgresMain(int argc, char *argv[], const char *username)
{
dbname = strdup(optarg);
- secure = false; /* subsequent switches are NOT
- * secure */
+ secure = false; /* subsequent switches are NOT secure */
ctx = PGC_BACKEND;
gucsource = PGC_S_CLIENT;
}
@@ -2641,8 +2627,8 @@ PostgresMain(int argc, char *argv[], const char *username)
/*
* s - report usage statistics (timings) after each query
*
- * Since log options are SUSET, we need to postpone unless
- * still in secure context
+ * Since log options are SUSET, we need to postpone unless still
+ * in secure context
*/
if (ctx == PGC_BACKEND)
PendingConfigOption("log_statement_stats", "true");
@@ -2724,8 +2710,8 @@ PostgresMain(int argc, char *argv[], const char *username)
}
/*
- * If a SUSET option, must postpone evaluation, unless
- * we are still reading secure switches.
+ * If a SUSET option, must postpone evaluation, unless we
+ * are still reading secure switches.
*/
if (ctx == PGC_BACKEND && IsSuperuserConfigOption(name))
PendingConfigOption(name, value);
@@ -2744,8 +2730,8 @@ PostgresMain(int argc, char *argv[], const char *username)
}
/*
- * Process any additional GUC variable settings passed in startup
- * packet. These are handled exactly like command-line variables.
+ * Process any additional GUC variable settings passed in startup packet.
+ * These are handled exactly like command-line variables.
*/
if (MyProcPort != NULL)
{
@@ -2779,16 +2765,16 @@ PostgresMain(int argc, char *argv[], const char *username)
/*
* Set up signal handlers and masks.
*
- * Note that postmaster blocked all signals before forking child process,
- * so there is no race condition whereby we might receive a signal
- * before we have set up the handler.
+ * Note that postmaster blocked all signals before forking child process, so
+ * there is no race condition whereby we might receive a signal before we
+ * have set up the handler.
*
- * Also note: it's best not to use any signals that are SIG_IGNored in
- * the postmaster. If such a signal arrives before we are able to
- * change the handler to non-SIG_IGN, it'll get dropped. Instead,
- * make a dummy handler in the postmaster to reserve the signal. (Of
- * course, this isn't an issue for signals that are locally generated,
- * such as SIGALRM and SIGPIPE.)
+ * Also note: it's best not to use any signals that are SIG_IGNored in the
+ * postmaster. If such a signal arrives before we are able to change the
+ * handler to non-SIG_IGN, it'll get dropped. Instead, make a dummy
+ * handler in the postmaster to reserve the signal. (Of course, this isn't
+ * an issue for signals that are locally generated, such as SIGALRM and
+ * SIGPIPE.)
*/
pqsignal(SIGHUP, SigHupHandler); /* set flag to read config file */
pqsignal(SIGINT, StatementCancelHandler); /* cancel current query */
@@ -2799,8 +2785,8 @@ PostgresMain(int argc, char *argv[], const char *username)
/*
* Ignore failure to write to frontend. Note: if frontend closes
* connection, we will notice it and exit cleanly when control next
- * returns to outer loop. This seems safer than forcing exit in the
- * midst of output during who-knows-what operation...
+ * returns to outer loop. This seems safer than forcing exit in the midst
+ * of output during who-knows-what operation...
*/
pqsignal(SIGPIPE, SIG_IGN);
pqsignal(SIGUSR1, CatchupInterruptHandler);
@@ -2808,11 +2794,9 @@ PostgresMain(int argc, char *argv[], const char *username)
pqsignal(SIGFPE, FloatExceptionHandler);
/*
- * Reset some signals that are accepted by postmaster but not by
- * backend
+ * Reset some signals that are accepted by postmaster but not by backend
*/
- pqsignal(SIGCHLD, SIG_DFL); /* system() requires this on some
- * platforms */
+ pqsignal(SIGCHLD, SIG_DFL); /* system() requires this on some platforms */
pqinitmask();
@@ -2833,8 +2817,8 @@ PostgresMain(int argc, char *argv[], const char *username)
{
ereport(FATAL,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("invalid command-line arguments for server process"),
- errhint("Try \"%s --help\" for more information.", argv[0])));
+ errmsg("invalid command-line arguments for server process"),
+ errhint("Try \"%s --help\" for more information.", argv[0])));
}
BaseInit();
@@ -2848,7 +2832,7 @@ PostgresMain(int argc, char *argv[], const char *username)
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("%s: invalid command-line arguments",
argv[0]),
- errhint("Try \"%s --help\" for more information.", argv[0])));
+ errhint("Try \"%s --help\" for more information.", argv[0])));
}
else if (argc - optind == 1)
dbname = argv[optind];
@@ -2861,8 +2845,8 @@ PostgresMain(int argc, char *argv[], const char *username)
}
/*
- * Validate we have been given a reasonable-looking DataDir (if
- * under postmaster, assume postmaster did this already).
+ * Validate we have been given a reasonable-looking DataDir (if under
+ * postmaster, assume postmaster did this already).
*/
Assert(DataDir);
ValidatePgVersion(DataDir);
@@ -2885,15 +2869,15 @@ PostgresMain(int argc, char *argv[], const char *username)
on_shmem_exit(ShutdownXLOG, 0);
/*
- * Read any existing FSM cache file, and register to write one out
- * at exit.
+ * Read any existing FSM cache file, and register to write one out at
+ * exit.
*/
LoadFreeSpaceMap();
on_shmem_exit(DumpFreeSpaceMap, 0);
/*
- * We have to build the flat file for pg_database, but not for
- * the user and group tables, since we won't try to do authentication.
+ * We have to build the flat file for pg_database, but not for the
+ * user and group tables, since we won't try to do authentication.
*/
BuildFlatFiles(true);
}
@@ -2901,9 +2885,9 @@ PostgresMain(int argc, char *argv[], const char *username)
/*
* General initialization.
*
- * NOTE: if you are tempted to add code in this vicinity, consider
- * putting it inside InitPostgres() instead. In particular, anything
- * that involves database access should be there, not here.
+ * NOTE: if you are tempted to add code in this vicinity, consider putting it
+ * inside InitPostgres() instead. In particular, anything that involves
+ * database access should be there, not here.
*/
ereport(DEBUG3,
(errmsg_internal("InitPostgres")));
@@ -2943,8 +2927,8 @@ PostgresMain(int argc, char *argv[], const char *username)
BeginReportingGUCOptions();
/*
- * Also set up handler to log session end; we have to wait till now
- * to be sure Log_disconnections has its final value.
+ * Also set up handler to log session end; we have to wait till now to be
+ * sure Log_disconnections has its final value.
*/
if (IsUnderPostmaster && Log_disconnections)
on_proc_exit(log_disconnections, 0);
@@ -2989,17 +2973,16 @@ PostgresMain(int argc, char *argv[], const char *username)
/*
* POSTGRES main processing loop begins here
*
- * If an exception is encountered, processing resumes here so we abort
- * the current transaction and start a new one.
+ * If an exception is encountered, processing resumes here so we abort the
+ * current transaction and start a new one.
*
- * You might wonder why this isn't coded as an infinite loop around a
- * PG_TRY construct. The reason is that this is the bottom of the
- * exception stack, and so with PG_TRY there would be no exception
- * handler in force at all during the CATCH part. By leaving the
- * outermost setjmp always active, we have at least some chance of
- * recovering from an error during error recovery. (If we get into an
- * infinite loop thereby, it will soon be stopped by overflow of
- * elog.c's internal state stack.)
+ * You might wonder why this isn't coded as an infinite loop around a PG_TRY
+ * construct. The reason is that this is the bottom of the exception
+ * stack, and so with PG_TRY there would be no exception handler in force
+ * at all during the CATCH part. By leaving the outermost setjmp always
+ * active, we have at least some chance of recovering from an error during
+ * error recovery. (If we get into an infinite loop thereby, it will soon
+ * be stopped by overflow of elog.c's internal state stack.)
*/
if (sigsetjmp(local_sigjmp_buf, 1) != 0)
@@ -3008,9 +2991,8 @@ PostgresMain(int argc, char *argv[], const char *username)
* NOTE: if you are tempted to add more code in this if-block,
* consider the high probability that it should be in
* AbortTransaction() instead. The only stuff done directly here
- * should be stuff that is guaranteed to apply *only* for
- * outer-level error recovery, such as adjusting the FE/BE
- * protocol status.
+ * should be stuff that is guaranteed to apply *only* for outer-level
+ * error recovery, such as adjusting the FE/BE protocol status.
*/
/* Since not using PG_TRY, must reset error stack by hand */
@@ -3020,18 +3002,17 @@ PostgresMain(int argc, char *argv[], const char *username)
HOLD_INTERRUPTS();
/*
- * Forget any pending QueryCancel request, since we're returning
- * to the idle loop anyway, and cancel the statement timer if
- * running.
+ * Forget any pending QueryCancel request, since we're returning to
+ * the idle loop anyway, and cancel the statement timer if running.
*/
QueryCancelPending = false;
disable_sig_alarm(true);
QueryCancelPending = false; /* again in case timeout occurred */
/*
- * Turn off these interrupts too. This is only needed here and
- * not in other exception-catching places since these interrupts
- * are only enabled while we wait for client input.
+ * Turn off these interrupts too. This is only needed here and not in
+ * other exception-catching places since these interrupts are only
+ * enabled while we wait for client input.
*/
DoingCommandRead = false;
DisableNotifyInterrupt();
@@ -3044,8 +3025,8 @@ PostgresMain(int argc, char *argv[], const char *username)
EmitErrorReport();
/*
- * Make sure debug_query_string gets reset before we possibly
- * clobber the storage it points at.
+ * Make sure debug_query_string gets reset before we possibly clobber
+ * the storage it points at.
*/
debug_query_string = NULL;
@@ -3055,16 +3036,16 @@ PostgresMain(int argc, char *argv[], const char *username)
AbortCurrentTransaction();
/*
- * Now return to normal top-level context and clear ErrorContext
- * for next time.
+ * Now return to normal top-level context and clear ErrorContext for
+ * next time.
*/
MemoryContextSwitchTo(TopMemoryContext);
FlushErrorState();
QueryContext = NULL;
/*
- * If we were handling an extended-query-protocol message,
- * initiate skip till next Sync. This also causes us not to issue
+ * If we were handling an extended-query-protocol message, initiate
+ * skip till next Sync. This also causes us not to issue
* ReadyForQuery (until we get Sync).
*/
if (doing_extended_query_message)
@@ -3098,8 +3079,8 @@ PostgresMain(int argc, char *argv[], const char *username)
doing_extended_query_message = false;
/*
- * Release storage left over from prior query cycle, and create a
- * new query input buffer in the cleared MessageContext.
+ * Release storage left over from prior query cycle, and create a new
+ * query input buffer in the cleared MessageContext.
*/
MemoryContextSwitchTo(MessageContext);
MemoryContextResetAndDeleteChildren(MessageContext);
@@ -3107,16 +3088,16 @@ PostgresMain(int argc, char *argv[], const char *username)
initStringInfo(&input_message);
/*
- * (1) If we've reached idle state, tell the frontend we're ready
- * for a new query.
+ * (1) If we've reached idle state, tell the frontend we're ready for
+ * a new query.
*
* Note: this includes fflush()'ing the last of the prior output.
*
* This is also a good time to send collected statistics to the
* collector, and to update the PS stats display. We avoid doing
- * those every time through the message loop because it'd slow
- * down processing of batched messages, and because we don't want
- * to report uncommitted updates (that confuses autovacuum).
+ * those every time through the message loop because it'd slow down
+ * processing of batched messages, and because we don't want to report
+ * uncommitted updates (that confuses autovacuum).
*/
if (send_rfq)
{
@@ -3138,10 +3119,10 @@ PostgresMain(int argc, char *argv[], const char *username)
}
/*
- * (2) Allow asynchronous signals to be executed immediately
- * if they come in while we are waiting for client input.
- * (This must be conditional since we don't want, say, reads on
- * behalf of COPY FROM STDIN doing the same thing.)
+ * (2) Allow asynchronous signals to be executed immediately if they
+ * come in while we are waiting for client input. (This must be
+ * conditional since we don't want, say, reads on behalf of COPY FROM
+ * STDIN doing the same thing.)
*/
QueryCancelPending = false; /* forget any earlier CANCEL signal */
DoingCommandRead = true;
@@ -3157,8 +3138,8 @@ PostgresMain(int argc, char *argv[], const char *username)
DoingCommandRead = false;
/*
- * (5) check for any other interesting events that happened while
- * we slept.
+ * (5) check for any other interesting events that happened while we
+ * slept.
*/
if (got_SIGHUP)
{
@@ -3216,8 +3197,8 @@ PostgresMain(int argc, char *argv[], const char *username)
case 'B': /* bind */
/*
- * this message is complex enough that it seems best to
- * put the field extraction out-of-line
+ * this message is complex enough that it seems best to put
+ * the field extraction out-of-line
*/
exec_bind_message(&input_message);
break;
@@ -3306,8 +3287,8 @@ PostgresMain(int argc, char *argv[], const char *username)
default:
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid CLOSE message subtype %d",
- close_type)));
+ errmsg("invalid CLOSE message subtype %d",
+ close_type)));
break;
}
@@ -3336,8 +3317,8 @@ PostgresMain(int argc, char *argv[], const char *username)
default:
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid DESCRIBE message subtype %d",
- describe_type)));
+ errmsg("invalid DESCRIBE message subtype %d",
+ describe_type)));
break;
}
}
@@ -3356,16 +3337,16 @@ PostgresMain(int argc, char *argv[], const char *username)
break;
/*
- * 'X' means that the frontend is closing down the socket.
- * EOF means unexpected loss of frontend connection.
- * Either way, perform normal shutdown.
+ * 'X' means that the frontend is closing down the socket. EOF
+ * means unexpected loss of frontend connection. Either way,
+ * perform normal shutdown.
*/
case 'X':
case EOF:
/*
- * Reset whereToSendOutput to prevent ereport from
- * attempting to send any more messages to client.
+ * Reset whereToSendOutput to prevent ereport from attempting
+ * to send any more messages to client.
*/
if (whereToSendOutput == Remote)
whereToSendOutput = None;
@@ -3373,9 +3354,9 @@ PostgresMain(int argc, char *argv[], const char *username)
/*
* NOTE: if you are tempted to add more code here, DON'T!
* Whatever you had in mind to do should be set up as an
- * on_proc_exit or on_shmem_exit callback, instead.
- * Otherwise it will fail to be called during other
- * backend-shutdown scenarios.
+ * on_proc_exit or on_shmem_exit callback, instead. Otherwise
+ * it will fail to be called during other backend-shutdown
+ * scenarios.
*/
proc_exit(0);
@@ -3385,8 +3366,8 @@ PostgresMain(int argc, char *argv[], const char *username)
/*
* Accept but ignore these messages, per protocol spec; we
- * probably got here because a COPY failed, and the
- * frontend is still sending data.
+ * probably got here because a COPY failed, and the frontend
+ * is still sending data.
*/
break;
@@ -3454,23 +3435,22 @@ ShowUsage(const char *title)
/*
* the only stats we don't show here are for memory usage -- i can't
- * figure out how to interpret the relevant fields in the rusage
- * struct, and they change names across o/s platforms, anyway. if you
- * can figure out what the entries mean, you can somehow extract
- * resident set size, shared text size, and unshared data and stack
- * sizes.
+ * figure out how to interpret the relevant fields in the rusage struct,
+ * and they change names across o/s platforms, anyway. if you can figure
+ * out what the entries mean, you can somehow extract resident set size,
+ * shared text size, and unshared data and stack sizes.
*/
initStringInfo(&str);
appendStringInfo(&str, "! system usage stats:\n");
appendStringInfo(&str,
- "!\t%ld.%06ld elapsed %ld.%06ld user %ld.%06ld system sec\n",
+ "!\t%ld.%06ld elapsed %ld.%06ld user %ld.%06ld system sec\n",
(long) (elapse_t.tv_sec - Save_t.tv_sec),
(long) (elapse_t.tv_usec - Save_t.tv_usec),
(long) (r.ru_utime.tv_sec - Save_r.ru_utime.tv_sec),
- (long) (r.ru_utime.tv_usec - Save_r.ru_utime.tv_usec),
+ (long) (r.ru_utime.tv_usec - Save_r.ru_utime.tv_usec),
(long) (r.ru_stime.tv_sec - Save_r.ru_stime.tv_sec),
- (long) (r.ru_stime.tv_usec - Save_r.ru_stime.tv_usec));
+ (long) (r.ru_stime.tv_usec - Save_r.ru_stime.tv_usec));
appendStringInfo(&str,
"!\t[%ld.%06ld user %ld.%06ld sys total]\n",
(long) user.tv_sec,
@@ -3486,21 +3466,21 @@ ShowUsage(const char *title)
r.ru_oublock - Save_r.ru_oublock,
r.ru_inblock, r.ru_oublock);
appendStringInfo(&str,
- "!\t%ld/%ld [%ld/%ld] page faults/reclaims, %ld [%ld] swaps\n",
+ "!\t%ld/%ld [%ld/%ld] page faults/reclaims, %ld [%ld] swaps\n",
r.ru_majflt - Save_r.ru_majflt,
r.ru_minflt - Save_r.ru_minflt,
r.ru_majflt, r.ru_minflt,
r.ru_nswap - Save_r.ru_nswap,
r.ru_nswap);
appendStringInfo(&str,
- "!\t%ld [%ld] signals rcvd, %ld/%ld [%ld/%ld] messages rcvd/sent\n",
+ "!\t%ld [%ld] signals rcvd, %ld/%ld [%ld/%ld] messages rcvd/sent\n",
r.ru_nsignals - Save_r.ru_nsignals,
r.ru_nsignals,
r.ru_msgrcv - Save_r.ru_msgrcv,
r.ru_msgsnd - Save_r.ru_msgsnd,
r.ru_msgrcv, r.ru_msgsnd);
appendStringInfo(&str,
- "!\t%ld/%ld [%ld/%ld] voluntary/involuntary context switches\n",
+ "!\t%ld/%ld [%ld/%ld] voluntary/involuntary context switches\n",
r.ru_nvcsw - Save_r.ru_nvcsw,
r.ru_nivcsw - Save_r.ru_nivcsw,
r.ru_nvcsw, r.ru_nivcsw);
@@ -3527,11 +3507,11 @@ ShowUsage(const char *title)
static void
log_disconnections(int code, Datum arg)
{
- Port *port = MyProcPort;
- struct timeval end;
- int hours,
- minutes,
- seconds;
+ Port *port = MyProcPort;
+ struct timeval end;
+ int hours,
+ minutes,
+ seconds;
gettimeofday(&end, NULL);
if (end.tv_usec < port->session_start.tv_usec)
@@ -3553,5 +3533,5 @@ log_disconnections(int code, Datum arg)
"user=%s database=%s host=%s%s%s",
hours, minutes, seconds, (int) (end.tv_usec / 10000),
port->user_name, port->database_name, port->remote_host,
- port->remote_port[0] ? " port=" : "", port->remote_port)));
+ port->remote_port[0] ? " port=" : "", port->remote_port)));
}
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 75eb75f6de7..edf2ba44aeb 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.94 2005/06/22 17:45:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.95 2005/10/15 02:49:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -72,7 +72,7 @@ CreateQueryDesc(Query *parsetree,
qd->parsetree = parsetree; /* parse tree */
qd->plantree = plantree; /* plan */
qd->snapshot = snapshot; /* snapshot */
- qd->crosscheck_snapshot = crosscheck_snapshot; /* RI check snapshot */
+ qd->crosscheck_snapshot = crosscheck_snapshot; /* RI check snapshot */
qd->dest = dest; /* output dest */
qd->params = params; /* parameter values passed into query */
qd->doInstrument = doInstrument; /* instrumentation wanted? */
@@ -138,16 +138,16 @@ ProcessQuery(Query *parsetree,
* SELECT INTO table (a/k/a CREATE AS ... SELECT).
*
* Override the normal communication destination; execMain.c
- * special-cases this case. (Perhaps would be cleaner to have
- * an additional destination type?)
+ * special-cases this case. (Perhaps would be cleaner to have an
+ * additional destination type?)
*/
dest = None_Receiver;
}
}
/*
- * Must always set snapshot for plannable queries. Note we assume
- * that caller will take care of restoring ActiveSnapshot on exit/error.
+ * Must always set snapshot for plannable queries. Note we assume that
+ * caller will take care of restoring ActiveSnapshot on exit/error.
*/
ActiveSnapshot = CopySnapshot(GetTransactionSnapshot());
@@ -191,7 +191,7 @@ ProcessQuery(Query *parsetree,
else
lastOid = InvalidOid;
snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
- "INSERT %u %u", lastOid, queryDesc->estate->es_processed);
+ "INSERT %u %u", lastOid, queryDesc->estate->es_processed);
break;
case CMD_UPDATE:
snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
@@ -270,31 +270,31 @@ FetchPortalTargetList(Portal portal)
return ((Query *) linitial(portal->parseTrees))->targetList;
if (portal->strategy == PORTAL_UTIL_SELECT)
{
- Node *utilityStmt;
+ Node *utilityStmt;
utilityStmt = ((Query *) linitial(portal->parseTrees))->utilityStmt;
switch (nodeTag(utilityStmt))
{
case T_FetchStmt:
- {
- FetchStmt *substmt = (FetchStmt *) utilityStmt;
- Portal subportal;
+ {
+ FetchStmt *substmt = (FetchStmt *) utilityStmt;
+ Portal subportal;
- Assert(!substmt->ismove);
- subportal = GetPortalByName(substmt->portalname);
- Assert(PortalIsValid(subportal));
- return FetchPortalTargetList(subportal);
- }
+ Assert(!substmt->ismove);
+ subportal = GetPortalByName(substmt->portalname);
+ Assert(PortalIsValid(subportal));
+ return FetchPortalTargetList(subportal);
+ }
case T_ExecuteStmt:
- {
- ExecuteStmt *substmt = (ExecuteStmt *) utilityStmt;
- PreparedStatement *entry;
+ {
+ ExecuteStmt *substmt = (ExecuteStmt *) utilityStmt;
+ PreparedStatement *entry;
- Assert(!substmt->into);
- entry = FetchPreparedStatement(substmt->name, true);
- return FetchPreparedStatementTargetList(entry);
- }
+ Assert(!substmt->into);
+ entry = FetchPreparedStatement(substmt->name, true);
+ return FetchPreparedStatementTargetList(entry);
+ }
default:
break;
@@ -335,8 +335,7 @@ PortalStart(Portal portal, ParamListInfo params, Snapshot snapshot)
AssertState(portal->status == PORTAL_NEW); /* else extra PortalStart */
/*
- * Set up global portal context pointers. (Should we set
- * QueryContext?)
+ * Set up global portal context pointers. (Should we set QueryContext?)
*/
saveActivePortal = ActivePortal;
saveActiveSnapshot = ActiveSnapshot;
@@ -345,7 +344,7 @@ PortalStart(Portal portal, ParamListInfo params, Snapshot snapshot)
PG_TRY();
{
ActivePortal = portal;
- ActiveSnapshot = NULL; /* will be set later */
+ ActiveSnapshot = NULL; /* will be set later */
CurrentResourceOwner = portal->resowner;
PortalContext = PortalGetHeapMemory(portal);
@@ -367,7 +366,7 @@ PortalStart(Portal portal, ParamListInfo params, Snapshot snapshot)
case PORTAL_ONE_SELECT:
/*
- * Must set snapshot before starting executor. Be sure to
+ * Must set snapshot before starting executor. Be sure to
* copy it into the portal's context.
*/
if (snapshot)
@@ -376,11 +375,11 @@ PortalStart(Portal portal, ParamListInfo params, Snapshot snapshot)
ActiveSnapshot = CopySnapshot(GetTransactionSnapshot());
/*
- * Create QueryDesc in portal's context; for the moment,
- * set the destination to None.
+ * Create QueryDesc in portal's context; for the moment, set
+ * the destination to None.
*/
queryDesc = CreateQueryDesc((Query *) linitial(portal->parseTrees),
- (Plan *) linitial(portal->planTrees),
+ (Plan *) linitial(portal->planTrees),
ActiveSnapshot,
InvalidSnapshot,
None_Receiver,
@@ -388,10 +387,10 @@ PortalStart(Portal portal, ParamListInfo params, Snapshot snapshot)
false);
/*
- * We do *not* call AfterTriggerBeginQuery() here. We
- * assume that a SELECT cannot queue any triggers. It
- * would be messy to support triggers since the execution
- * of the portal may be interleaved with other queries.
+ * We do *not* call AfterTriggerBeginQuery() here. We assume
+ * that a SELECT cannot queue any triggers. It would be messy
+ * to support triggers since the execution of the portal may
+ * be interleaved with other queries.
*/
/*
@@ -421,8 +420,8 @@ PortalStart(Portal portal, ParamListInfo params, Snapshot snapshot)
case PORTAL_UTIL_SELECT:
/*
- * We don't set snapshot here, because
- * PortalRunUtility will take care of it if needed.
+ * We don't set snapshot here, because PortalRunUtility will
+ * take care of it if needed.
*/
portal->tupDesc =
UtilityTupleDescriptor(((Query *) linitial(portal->parseTrees))->utilityStmt);
@@ -579,14 +578,14 @@ PortalRun(Portal portal, long count,
*
* We have to play a special game here to support utility commands like
* VACUUM and CLUSTER, which internally start and commit transactions.
- * When we are called to execute such a command, CurrentResourceOwner
- * will be pointing to the TopTransactionResourceOwner --- which will
- * be destroyed and replaced in the course of the internal commit and
- * restart. So we need to be prepared to restore it as pointing to
- * the exit-time TopTransactionResourceOwner. (Ain't that ugly? This
- * idea of internally starting whole new transactions is not good.)
- * CurrentMemoryContext has a similar problem, but the other pointers
- * we save here will be NULL or pointing to longer-lived objects.
+ * When we are called to execute such a command, CurrentResourceOwner will
+ * be pointing to the TopTransactionResourceOwner --- which will be
+ * destroyed and replaced in the course of the internal commit and
+ * restart. So we need to be prepared to restore it as pointing to the
+ * exit-time TopTransactionResourceOwner. (Ain't that ugly? This idea of
+ * internally starting whole new transactions is not good.)
+ * CurrentMemoryContext has a similar problem, but the other pointers we
+ * save here will be NULL or pointing to longer-lived objects.
*/
saveTopTransactionResourceOwner = TopTransactionResourceOwner;
saveTopTransactionContext = TopTransactionContext;
@@ -599,7 +598,7 @@ PortalRun(Portal portal, long count,
PG_TRY();
{
ActivePortal = portal;
- ActiveSnapshot = NULL; /* will be set later */
+ ActiveSnapshot = NULL; /* will be set later */
CurrentResourceOwner = portal->resowner;
PortalContext = PortalGetHeapMemory(portal);
QueryContext = portal->queryContext;
@@ -618,8 +617,7 @@ PortalRun(Portal portal, long count,
portal->status = PORTAL_READY;
/*
- * Since it's a forward fetch, say DONE iff atEnd is now
- * true.
+ * Since it's a forward fetch, say DONE iff atEnd is now true.
*/
result = portal->atEnd;
break;
@@ -658,8 +656,7 @@ PortalRun(Portal portal, long count,
portal->status = PORTAL_READY;
/*
- * Since it's a forward fetch, say DONE iff atEnd is now
- * true.
+ * Since it's a forward fetch, say DONE iff atEnd is now true.
*/
result = portal->atEnd;
break;
@@ -750,8 +747,8 @@ PortalRunSelect(Portal portal,
uint32 nprocessed;
/*
- * NB: queryDesc will be NULL if we are fetching from a held cursor or
- * a completed utility query; can't use it in that path.
+ * NB: queryDesc will be NULL if we are fetching from a held cursor or a
+ * completed utility query; can't use it in that path.
*/
queryDesc = PortalGetQueryDesc(portal);
@@ -768,15 +765,15 @@ PortalRunSelect(Portal portal,
queryDesc->dest = dest;
/*
- * Determine which direction to go in, and check to see if we're
- * already at the end of the available tuples in that direction. If
- * so, set the direction to NoMovement to avoid trying to fetch any
- * tuples. (This check exists because not all plan node types are
- * robust about being called again if they've already returned NULL
- * once.) Then call the executor (we must not skip this, because the
- * destination needs to see a setup and shutdown even if no tuples are
- * available). Finally, update the portal position state depending on
- * the number of tuples that were retrieved.
+ * Determine which direction to go in, and check to see if we're already
+ * at the end of the available tuples in that direction. If so, set the
+ * direction to NoMovement to avoid trying to fetch any tuples. (This
+ * check exists because not all plan node types are robust about being
+ * called again if they've already returned NULL once.) Then call the
+ * executor (we must not skip this, because the destination needs to see a
+ * setup and shutdown even if no tuples are available). Finally, update
+ * the portal position state depending on the number of tuples that were
+ * retrieved.
*/
if (forward)
{
@@ -924,9 +921,9 @@ RunFromStore(Portal portal, ScanDirection direction, long count,
ExecClearTuple(slot);
/*
- * check our tuple count.. if we've processed the proper
- * number then quit, else loop again and process more tuples.
- * Zero count means no limit.
+ * check our tuple count.. if we've processed the proper number
+ * then quit, else loop again and process more tuples. Zero count
+ * means no limit.
*/
current_tuple_count++;
if (count && count == current_tuple_count)
@@ -955,19 +952,18 @@ PortalRunUtility(Portal portal, Query *query,
(errmsg_internal("ProcessUtility")));
/*
- * Set snapshot if utility stmt needs one. Most reliable way to do
- * this seems to be to enumerate those that do not need one; this is a
- * short list. Transaction control, LOCK, and SET must *not* set a
- * snapshot since they need to be executable at the start of a
- * serializable transaction without freezing a snapshot. By extension
- * we allow SHOW not to set a snapshot. The other stmts listed are
- * just efficiency hacks. Beware of listing anything that can modify
- * the database --- if, say, it has to update an index with
- * expressions that invoke user-defined functions, then it had better
- * have a snapshot.
+ * Set snapshot if utility stmt needs one. Most reliable way to do this
+ * seems to be to enumerate those that do not need one; this is a short
+ * list. Transaction control, LOCK, and SET must *not* set a snapshot
+ * since they need to be executable at the start of a serializable
+ * transaction without freezing a snapshot. By extension we allow SHOW
+ * not to set a snapshot. The other stmts listed are just efficiency
+ * hacks. Beware of listing anything that can modify the database --- if,
+ * say, it has to update an index with expressions that invoke
+ * user-defined functions, then it had better have a snapshot.
*
- * Note we assume that caller will take care of restoring ActiveSnapshot
- * on exit/error.
+ * Note we assume that caller will take care of restoring ActiveSnapshot on
+ * exit/error.
*/
if (!(IsA(utilityStmt, TransactionStmt) ||
IsA(utilityStmt, LockStmt) ||
@@ -1020,13 +1016,13 @@ PortalRunMulti(Portal portal,
/*
* If the destination is RemoteExecute, change to None. The reason is
- * that the client won't be expecting any tuples, and indeed has no
- * way to know what they are, since there is no provision for Describe
- * to send a RowDescription message when this portal execution
- * strategy is in effect. This presently will only affect SELECT
- * commands added to non-SELECT queries by rewrite rules: such
- * commands will be executed, but the results will be discarded unless
- * you use "simple Query" protocol.
+ * that the client won't be expecting any tuples, and indeed has no way to
+ * know what they are, since there is no provision for Describe to send a
+ * RowDescription message when this portal execution strategy is in
+ * effect. This presently will only affect SELECT commands added to
+ * non-SELECT queries by rewrite rules: such commands will be executed,
+ * but the results will be discarded unless you use "simple Query"
+ * protocol.
*/
if (dest->mydest == RemoteExecute)
dest = None_Receiver;
@@ -1034,8 +1030,8 @@ PortalRunMulti(Portal portal,
altdest = None_Receiver;
/*
- * Loop to handle the individual queries generated from a single
- * parsetree by analysis and rewrite.
+ * Loop to handle the individual queries generated from a single parsetree
+ * by analysis and rewrite.
*/
forboth(querylist_item, portal->parseTrees,
planlist_item, portal->planTrees)
@@ -1087,8 +1083,8 @@ PortalRunMulti(Portal portal,
}
/*
- * Increment command counter between queries, but not after the
- * last one.
+ * Increment command counter between queries, but not after the last
+ * one.
*/
if (lnext(planlist_item) != NULL)
CommandCounterIncrement();
@@ -1102,12 +1098,12 @@ PortalRunMulti(Portal portal,
}
/*
- * If a command completion tag was supplied, use it. Otherwise use
- * the portal's commandTag as the default completion tag.
+ * If a command completion tag was supplied, use it. Otherwise use the
+ * portal's commandTag as the default completion tag.
*
- * Exception: clients will expect INSERT/UPDATE/DELETE tags to have
- * counts, so fake something up if necessary. (This could happen if
- * the original query was replaced by a DO INSTEAD rule.)
+ * Exception: clients will expect INSERT/UPDATE/DELETE tags to have counts,
+ * so fake something up if necessary. (This could happen if the original
+ * query was replaced by a DO INSTEAD rule.)
*/
if (completionTag && completionTag[0] == '\0')
{
@@ -1164,7 +1160,7 @@ PortalRunFetch(Portal portal,
PG_TRY();
{
ActivePortal = portal;
- ActiveSnapshot = NULL; /* will be set later */
+ ActiveSnapshot = NULL; /* will be set later */
CurrentResourceOwner = portal->resowner;
PortalContext = PortalGetHeapMemory(portal);
QueryContext = portal->queryContext;
@@ -1276,11 +1272,11 @@ DoPortalRunFetch(Portal portal,
if (count > 0)
{
/*
- * Definition: Rewind to start, advance count-1 rows,
- * return next row (if any). In practice, if the goal is
- * less than halfway back to the start, it's better to
- * scan from where we are. In any case, we arrange to
- * fetch the target row going forwards.
+ * Definition: Rewind to start, advance count-1 rows, return
+ * next row (if any). In practice, if the goal is less than
+ * halfway back to the start, it's better to scan from where
+ * we are. In any case, we arrange to fetch the target row
+ * going forwards.
*/
if (portal->posOverflow || portal->portalPos == LONG_MAX ||
count - 1 <= portal->portalPos / 2)
@@ -1309,11 +1305,10 @@ DoPortalRunFetch(Portal portal,
{
/*
* Definition: Advance to end, back up abs(count)-1 rows,
- * return prior row (if any). We could optimize this if
- * we knew in advance where the end was, but typically we
- * won't. (Is it worth considering case where count > half
- * of size of query? We could rewind once we know the
- * size ...)
+ * return prior row (if any). We could optimize this if we
+ * knew in advance where the end was, but typically we won't.
+ * (Is it worth considering case where count > half of size of
+ * query? We could rewind once we know the size ...)
*/
PortalRunSelect(portal, true, FETCH_ALL, None_Receiver);
if (count < -1)
@@ -1332,8 +1327,7 @@ DoPortalRunFetch(Portal portal,
if (count > 0)
{
/*
- * Definition: advance count-1 rows, return next row (if
- * any).
+ * Definition: advance count-1 rows, return next row (if any).
*/
if (count > 1)
PortalRunSelect(portal, true, count - 1, None_Receiver);
@@ -1342,8 +1336,8 @@ DoPortalRunFetch(Portal portal,
else if (count < 0)
{
/*
- * Definition: back up abs(count)-1 rows, return prior row
- * (if any).
+ * Definition: back up abs(count)-1 rows, return prior row (if
+ * any).
*/
if (count < -1)
PortalRunSelect(portal, false, -count - 1, None_Receiver);
@@ -1362,8 +1356,8 @@ DoPortalRunFetch(Portal portal,
}
/*
- * Get here with fdirection == FETCH_FORWARD or FETCH_BACKWARD, and
- * count >= 0.
+ * Get here with fdirection == FETCH_FORWARD or FETCH_BACKWARD, and count
+ * >= 0.
*/
forward = (fdirection == FETCH_FORWARD);
@@ -1385,11 +1379,11 @@ DoPortalRunFetch(Portal portal,
else
{
/*
- * If we are sitting on a row, back up one so we can re-fetch
- * it. If we are not sitting on a row, we still have to start
- * up and shut down the executor so that the destination is
- * initialized and shut down correctly; so keep going. To
- * PortalRunSelect, count == 0 means we will retrieve no row.
+ * If we are sitting on a row, back up one so we can re-fetch it.
+ * If we are not sitting on a row, we still have to start up and
+ * shut down the executor so that the destination is initialized
+ * and shut down correctly; so keep going. To PortalRunSelect,
+ * count == 0 means we will retrieve no row.
*/
if (on_row)
{
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index dd89832da89..8e3c900053c 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/utility.c,v 1.244 2005/10/06 21:30:36 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/utility.c,v 1.245 2005/10/15 02:49:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -214,8 +214,8 @@ CheckRelationOwnership(RangeVar *rel, bool noCatalogs)
IsSystemClass((Form_pg_class) GETSTRUCT(tuple)))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied: \"%s\" is a system catalog",
- rel->relname)));
+ errmsg("permission denied: \"%s\" is a system catalog",
+ rel->relname)));
}
ReleaseSysCache(tuple);
@@ -236,9 +236,9 @@ QueryIsReadOnly(Query *parsetree)
{
case CMD_SELECT:
if (parsetree->into != NULL)
- return false; /* SELECT INTO */
+ return false; /* SELECT INTO */
else if (parsetree->rowMarks != NIL)
- return false; /* SELECT FOR UPDATE/SHARE */
+ return false; /* SELECT FOR UPDATE/SHARE */
else
return true;
case CMD_UPDATE:
@@ -269,8 +269,8 @@ check_xact_readonly(Node *parsetree)
return;
/*
- * Note: Commands that need to do more complicated checking are
- * handled elsewhere.
+ * Note: Commands that need to do more complicated checking are handled
+ * elsewhere.
*/
switch (nodeTag(parsetree))
@@ -367,8 +367,8 @@ ProcessUtility(Node *parsetree,
switch (stmt->kind)
{
/*
- * START TRANSACTION, as defined by SQL99:
- * Identical to BEGIN. Same code for both.
+ * START TRANSACTION, as defined by SQL99: Identical
+ * to BEGIN. Same code for both.
*/
case TRANS_STMT_BEGIN:
case TRANS_STMT_START:
@@ -498,8 +498,8 @@ ProcessUtility(Node *parsetree,
RELKIND_RELATION);
/*
- * Let AlterTableCreateToastTable decide if this one needs
- * a secondary relation too.
+ * Let AlterTableCreateToastTable decide if this one needs a
+ * secondary relation too.
*/
CommandCounterIncrement();
AlterTableCreateToastTable(relOid, true);
@@ -558,8 +558,7 @@ ProcessUtility(Node *parsetree,
case OBJECT_DOMAIN:
/*
- * RemoveDomain does its own permissions
- * checks
+ * RemoveDomain does its own permissions checks
*/
RemoveDomain(names, stmt->behavior);
break;
@@ -571,8 +570,7 @@ ProcessUtility(Node *parsetree,
case OBJECT_SCHEMA:
/*
- * RemoveSchema does its own permissions
- * checks
+ * RemoveSchema does its own permissions checks
*/
RemoveSchema(names, stmt->behavior);
break;
@@ -584,8 +582,8 @@ ProcessUtility(Node *parsetree,
}
/*
- * We used to need to do CommandCounterIncrement()
- * here, but now it's done inside performDeletion().
+ * We used to need to do CommandCounterIncrement() here,
+ * but now it's done inside performDeletion().
*/
}
}
@@ -651,8 +649,8 @@ ProcessUtility(Node *parsetree,
case 'T': /* ALTER DOMAIN DEFAULT */
/*
- * Recursively alter column default for table and,
- * if requested, for descendants
+ * Recursively alter column default for table and, if
+ * requested, for descendants
*/
AlterDomainDefault(stmt->typename,
stmt->def);
@@ -691,8 +689,7 @@ ProcessUtility(Node *parsetree,
break;
/*
- * ******************************** object creation /
- * destruction ********************************
+ * ******************************** object creation / destruction ********************************
*
*/
case T_DefineStmt:
@@ -738,7 +735,7 @@ ProcessUtility(Node *parsetree,
CreateFunction((CreateFunctionStmt *) parsetree);
break;
- case T_AlterFunctionStmt: /* ALTER FUNCTION */
+ case T_AlterFunctionStmt: /* ALTER FUNCTION */
AlterFunction((AlterFunctionStmt *) parsetree);
break;
@@ -750,7 +747,7 @@ ProcessUtility(Node *parsetree,
DefineIndex(stmt->relation, /* relation */
stmt->idxname, /* index name */
- InvalidOid, /* no predefined OID */
+ InvalidOid, /* no predefined OID */
stmt->accessMethod, /* am name */
stmt->tableSpace,
stmt->indexParams, /* parameters */
@@ -865,8 +862,8 @@ ProcessUtility(Node *parsetree,
VariableSetStmt *n = (VariableSetStmt *) parsetree;
/*
- * Special cases for special SQL syntax that effectively
- * sets more than one variable per statement.
+ * Special cases for special SQL syntax that effectively sets
+ * more than one variable per statement.
*/
if (strcmp(n->name, "TRANSACTION") == 0)
{
@@ -878,10 +875,10 @@ ProcessUtility(Node *parsetree,
if (strcmp(item->defname, "transaction_isolation") == 0)
SetPGVariable("transaction_isolation",
- list_make1(item->arg), n->is_local);
+ list_make1(item->arg), n->is_local);
else if (strcmp(item->defname, "transaction_read_only") == 0)
SetPGVariable("transaction_read_only",
- list_make1(item->arg), n->is_local);
+ list_make1(item->arg), n->is_local);
}
}
else if (strcmp(n->name, "SESSION CHARACTERISTICS") == 0)
@@ -894,10 +891,10 @@ ProcessUtility(Node *parsetree,
if (strcmp(item->defname, "transaction_isolation") == 0)
SetPGVariable("default_transaction_isolation",
- list_make1(item->arg), n->is_local);
+ list_make1(item->arg), n->is_local);
else if (strcmp(item->defname, "transaction_read_only") == 0)
SetPGVariable("default_transaction_read_only",
- list_make1(item->arg), n->is_local);
+ list_make1(item->arg), n->is_local);
}
}
else
@@ -1380,30 +1377,30 @@ CreateCommandTag(Node *parsetree)
break;
case T_AlterObjectSchemaStmt:
- switch (((AlterObjectSchemaStmt *) parsetree)->objectType)
- {
+ switch (((AlterObjectSchemaStmt *) parsetree)->objectType)
+ {
case OBJECT_AGGREGATE:
- tag = "ALTER AGGREGATE";
- break;
+ tag = "ALTER AGGREGATE";
+ break;
case OBJECT_DOMAIN:
- tag = "ALTER DOMAIN";
- break;
+ tag = "ALTER DOMAIN";
+ break;
case OBJECT_FUNCTION:
- tag = "ALTER FUNCTION";
- break;
+ tag = "ALTER FUNCTION";
+ break;
case OBJECT_SEQUENCE:
- tag = "ALTER SEQUENCE";
- break;
+ tag = "ALTER SEQUENCE";
+ break;
case OBJECT_TABLE:
- tag = "ALTER TABLE";
- break;
+ tag = "ALTER TABLE";
+ break;
case OBJECT_TYPE:
- tag = "ALTER TYPE";
- break;
+ tag = "ALTER TYPE";
+ break;
default:
tag = "???";
break;
- }
+ }
break;
case T_AlterOwnerStmt:
@@ -1480,7 +1477,7 @@ CreateCommandTag(Node *parsetree)
case T_GrantRoleStmt:
{
- GrantRoleStmt *stmt = (GrantRoleStmt *) parsetree;
+ GrantRoleStmt *stmt = (GrantRoleStmt *) parsetree;
tag = (stmt->is_grant) ? "GRANT ROLE" : "REVOKE ROLE";
}
@@ -1717,9 +1714,10 @@ CreateQueryTag(Query *parsetree)
switch (parsetree->commandType)
{
case CMD_SELECT:
+
/*
- * We take a little extra care here so that the result will
- * be useful for complaints about read-only statements
+ * We take a little extra care here so that the result will be
+ * useful for complaints about read-only statements
*/
if (parsetree->into != NULL)
tag = "SELECT INTO";
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index 9909640ad4a..5fcb9b25fc4 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/acl.c,v 1.125 2005/10/10 18:49:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/acl.c,v 1.126 2005/10/15 02:49:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,9 +59,9 @@
* The cache is valid if cached_member_role is not InvalidOid.
*/
static Oid cached_privs_role = InvalidOid;
-static List *cached_privs_roles = NIL;
+static List *cached_privs_roles = NIL;
static Oid cached_member_role = InvalidOid;
-static List *cached_membership_roles = NIL;
+static List *cached_membership_roles = NIL;
static const char *getid(const char *s, char *n);
@@ -73,7 +73,7 @@ static void check_circularity(const Acl *old_acl, const AclItem *mod_aip,
Oid ownerId);
static Acl *recursive_revoke(Acl *acl, Oid grantee, AclMode revoke_privs,
Oid ownerId, DropBehavior behavior);
-static int oidComparator(const void *arg1, const void *arg2);
+static int oidComparator(const void *arg1, const void *arg2);
static AclMode convert_priv_string(text *priv_type_text);
@@ -143,8 +143,8 @@ getid(const char *s, char *n)
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("identifier too long"),
- errdetail("Identifier must be less than %d characters.",
- NAMEDATALEN)));
+ errdetail("Identifier must be less than %d characters.",
+ NAMEDATALEN)));
n[len++] = *s;
}
@@ -230,7 +230,7 @@ aclparse(const char *s, AclItem *aip)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("unrecognized key word: \"%s\"", name),
- errhint("ACL key word must be \"group\" or \"user\".")));
+ errhint("ACL key word must be \"group\" or \"user\".")));
s = getid(s, name); /* move s to the name beyond the keyword */
if (name[0] == '\0')
ereport(ERROR,
@@ -289,8 +289,8 @@ aclparse(const char *s, AclItem *aip)
default:
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid mode character: must be one of \"%s\"",
- ACL_ALL_RIGHTS_STR)));
+ errmsg("invalid mode character: must be one of \"%s\"",
+ ACL_ALL_RIGHTS_STR)));
}
privs |= read;
@@ -302,8 +302,8 @@ aclparse(const char *s, AclItem *aip)
aip->ai_grantee = get_roleid_checked(name);
/*
- * XXX Allow a degree of backward compatibility by defaulting the
- * grantor to the superuser.
+ * XXX Allow a degree of backward compatibility by defaulting the grantor
+ * to the superuser.
*/
if (*s == '/')
{
@@ -380,7 +380,7 @@ aclitemin(PG_FUNCTION_ARGS)
if (*s)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("extra garbage at the end of the ACL specification")));
+ errmsg("extra garbage at the end of the ACL specification")));
PG_RETURN_ACLITEM_P(aip);
}
@@ -565,14 +565,14 @@ acldefault(GrantObjectType objtype, Oid ownerId)
}
/*
- * Note that the owner's entry shows all ordinary privileges but no
- * grant options. This is because his grant options come "from the
- * system" and not from his own efforts. (The SQL spec says that the
- * owner's rights come from a "_SYSTEM" authid.) However, we do
- * consider that the owner's ordinary privileges are self-granted;
- * this lets him revoke them. We implement the owner's grant options
- * without any explicit "_SYSTEM"-like ACL entry, by internally
- * special-casing the owner whereever we are testing grant options.
+ * Note that the owner's entry shows all ordinary privileges but no grant
+ * options. This is because his grant options come "from the system" and
+ * not from his own efforts. (The SQL spec says that the owner's rights
+ * come from a "_SYSTEM" authid.) However, we do consider that the
+ * owner's ordinary privileges are self-granted; this lets him revoke
+ * them. We implement the owner's grant options without any explicit
+ * "_SYSTEM"-like ACL entry, by internally special-casing the owner
+ * whereever we are testing grant options.
*/
aip->ai_grantee = ownerId;
aip->ai_grantor = ownerId;
@@ -631,10 +631,10 @@ aclupdate(const Acl *old_acl, const AclItem *mod_aip,
old_aip = ACL_DAT(old_acl);
/*
- * Search the ACL for an existing entry for this grantee and grantor.
- * If one exists, just modify the entry in-place (well, in the same
- * position, since we actually return a copy); otherwise, insert the
- * new entry at the end.
+ * Search the ACL for an existing entry for this grantee and grantor. If
+ * one exists, just modify the entry in-place (well, in the same position,
+ * since we actually return a copy); otherwise, insert the new entry at
+ * the end.
*/
for (dst = 0; dst < num; ++dst)
@@ -676,7 +676,7 @@ aclupdate(const Acl *old_acl, const AclItem *mod_aip,
break;
case ACL_MODECHG_DEL:
ACLITEM_SET_RIGHTS(new_aip[dst],
- old_rights & ~ACLITEM_GET_RIGHTS(*mod_aip));
+ old_rights & ~ACLITEM_GET_RIGHTS(*mod_aip));
break;
case ACL_MODECHG_EQL:
ACLITEM_SET_RIGHTS(new_aip[dst],
@@ -700,8 +700,8 @@ aclupdate(const Acl *old_acl, const AclItem *mod_aip,
}
/*
- * Remove abandoned privileges (cascading revoke). Currently we can
- * only handle this when the grantee is not PUBLIC.
+ * Remove abandoned privileges (cascading revoke). Currently we can only
+ * handle this when the grantee is not PUBLIC.
*/
if ((old_goptions & ~new_goptions) != 0)
{
@@ -742,8 +742,8 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
/*
* Make a copy of the given ACL, substituting new owner ID for old
- * wherever it appears as either grantor or grantee. Also note if the
- * new owner ID is already present.
+ * wherever it appears as either grantor or grantee. Also note if the new
+ * owner ID is already present.
*/
num = ACL_NUM(old_acl);
old_aip = ACL_DAT(old_acl);
@@ -763,21 +763,20 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
}
/*
- * If the old ACL contained any references to the new owner, then we
- * may now have generated an ACL containing duplicate entries. Find
- * them and merge them so that there are not duplicates. (This is
- * relatively expensive since we use a stupid O(N^2) algorithm, but
- * it's unlikely to be the normal case.)
+ * If the old ACL contained any references to the new owner, then we may
+ * now have generated an ACL containing duplicate entries. Find them and
+ * merge them so that there are not duplicates. (This is relatively
+ * expensive since we use a stupid O(N^2) algorithm, but it's unlikely to
+ * be the normal case.)
*
- * To simplify deletion of duplicate entries, we temporarily leave them
- * in the array but set their privilege masks to zero; when we reach
- * such an entry it's just skipped. (Thus, a side effect of this code
- * will be to remove privilege-free entries, should there be any in
- * the input.) dst is the next output slot, targ is the currently
- * considered input slot (always >= dst), and src scans entries to the
- * right of targ looking for duplicates. Once an entry has been
- * emitted to dst it is known duplicate-free and need not be
- * considered anymore.
+ * To simplify deletion of duplicate entries, we temporarily leave them in
+ * the array but set their privilege masks to zero; when we reach such an
+ * entry it's just skipped. (Thus, a side effect of this code will be to
+ * remove privilege-free entries, should there be any in the input.) dst
+ * is the next output slot, targ is the currently considered input slot
+ * (always >= dst), and src scans entries to the right of targ looking for
+ * duplicates. Once an entry has been emitted to dst it is known
+ * duplicate-free and need not be considered anymore.
*/
if (newpresent)
{
@@ -877,14 +876,14 @@ cc_restart:
own_privs = aclmask(acl,
mod_aip->ai_grantor,
ownerId,
- ACL_GRANT_OPTION_FOR(ACLITEM_GET_GOPTIONS(*mod_aip)),
+ ACL_GRANT_OPTION_FOR(ACLITEM_GET_GOPTIONS(*mod_aip)),
ACLMASK_ALL);
own_privs = ACL_OPTION_TO_PRIVS(own_privs);
if ((ACLITEM_GET_GOPTIONS(*mod_aip) & ~own_privs) != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
- errmsg("grant options cannot be granted back to your own grantor")));
+ errmsg("grant options cannot be granted back to your own grantor")));
pfree(acl);
}
@@ -1041,11 +1040,11 @@ aclmask(const Acl *acl, Oid roleid, Oid ownerId,
}
/*
- * Check privileges granted indirectly via role memberships.
- * We do this in a separate pass to minimize expensive indirect
- * membership tests. In particular, it's worth testing whether
- * a given ACL entry grants any privileges still of interest before
- * we perform the has_privs_of_role test.
+ * Check privileges granted indirectly via role memberships. We do this in
+ * a separate pass to minimize expensive indirect membership tests. In
+ * particular, it's worth testing whether a given ACL entry grants any
+ * privileges still of interest before we perform the has_privs_of_role
+ * test.
*/
remaining = mask & ~result;
for (i = 0; i < num; i++)
@@ -1140,11 +1139,11 @@ aclmask_direct(const Acl *acl, Oid roleid, Oid ownerId,
int
aclmembers(const Acl *acl, Oid **roleids)
{
- Oid *list;
+ Oid *list;
const AclItem *acldat;
- int i,
- j,
- k;
+ int i,
+ j,
+ k;
if (acl == NULL || ACL_NUM(acl) == 0)
{
@@ -1183,8 +1182,8 @@ aclmembers(const Acl *acl, Oid **roleids)
}
/*
- * We could repalloc the array down to minimum size, but it's hardly
- * worth it since it's only transient memory.
+ * We could repalloc the array down to minimum size, but it's hardly worth
+ * it since it's only transient memory.
*/
*roleids = list;
@@ -1198,8 +1197,8 @@ aclmembers(const Acl *acl, Oid **roleids)
static int
oidComparator(const void *arg1, const void *arg2)
{
- Oid oid1 = * (const Oid *) arg1;
- Oid oid2 = * (const Oid *) arg2;
+ Oid oid1 = *(const Oid *) arg1;
+ Oid oid2 = *(const Oid *) arg2;
if (oid1 > oid2)
return 1;
@@ -1257,7 +1256,7 @@ Datum
makeaclitem(PG_FUNCTION_ARGS)
{
Oid grantee = PG_GETARG_OID(0);
- Oid grantor = PG_GETARG_OID(1);
+ Oid grantor = PG_GETARG_OID(1);
text *privtext = PG_GETARG_TEXT_P(2);
bool goption = PG_GETARG_BOOL(3);
AclItem *result;
@@ -1282,7 +1281,7 @@ convert_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
if (pg_strcasecmp(priv_type, "SELECT") == 0)
return ACL_SELECT;
@@ -1410,7 +1409,7 @@ has_table_privilege_id(PG_FUNCTION_ARGS)
{
Oid tableoid = PG_GETARG_OID(0);
text *priv_type_text = PG_GETARG_TEXT_P(1);
- Oid roleid;
+ Oid roleid;
AclMode mode;
AclResult aclresult;
@@ -1493,7 +1492,7 @@ convert_table_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
@@ -1704,7 +1703,7 @@ convert_database_name(text *databasename)
Oid oid;
dbname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(databasename)));
+ PointerGetDatum(databasename)));
oid = get_database_oid(dbname);
if (!OidIsValid(oid))
@@ -1725,7 +1724,7 @@ convert_database_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
@@ -1916,10 +1915,10 @@ convert_function_name(text *functionname)
Oid oid;
funcname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(functionname)));
+ PointerGetDatum(functionname)));
oid = DatumGetObjectId(DirectFunctionCall1(regprocedurein,
- CStringGetDatum(funcname)));
+ CStringGetDatum(funcname)));
if (!OidIsValid(oid))
ereport(ERROR,
@@ -1939,7 +1938,7 @@ convert_function_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
@@ -2120,7 +2119,7 @@ convert_language_name(text *languagename)
Oid oid;
langname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(languagename)));
+ PointerGetDatum(languagename)));
oid = GetSysCacheOid(LANGNAME,
CStringGetDatum(langname),
@@ -2143,7 +2142,7 @@ convert_language_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
@@ -2324,7 +2323,7 @@ convert_schema_name(text *schemaname)
Oid oid;
nspname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(schemaname)));
+ PointerGetDatum(schemaname)));
oid = GetSysCacheOid(NAMESPACENAME,
CStringGetDatum(nspname),
@@ -2347,7 +2346,7 @@ convert_schema_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
@@ -2462,7 +2461,7 @@ has_tablespace_privilege_id(PG_FUNCTION_ARGS)
{
Oid tablespaceoid = PG_GETARG_OID(0);
text *priv_type_text = PG_GETARG_TEXT_P(1);
- Oid roleid;
+ Oid roleid;
AclMode mode;
AclResult aclresult;
@@ -2532,7 +2531,7 @@ convert_tablespace_name(text *tablespacename)
Oid oid;
spcname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(tablespacename)));
+ PointerGetDatum(tablespacename)));
oid = get_tablespace_oid(spcname);
if (!OidIsValid(oid))
@@ -2553,7 +2552,7 @@ convert_tablespace_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
@@ -2663,7 +2662,7 @@ pg_has_role_id(PG_FUNCTION_ARGS)
{
Oid roleoid = PG_GETARG_OID(0);
text *priv_type_text = PG_GETARG_TEXT_P(1);
- Oid roleid;
+ Oid roleid;
AclMode mode;
AclResult aclresult;
@@ -2739,7 +2738,7 @@ convert_role_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
@@ -2795,8 +2794,8 @@ initialize_acl(void)
if (!IsBootstrapProcessingMode())
{
/*
- * In normal mode, set a callback on any syscache
- * invalidation of pg_auth_members rows
+ * In normal mode, set a callback on any syscache invalidation of
+ * pg_auth_members rows
*/
CacheRegisterSyscacheCallback(AUTHMEMROLEMEM,
RoleMembershipCacheCallback,
@@ -2806,7 +2805,7 @@ initialize_acl(void)
/*
* RoleMembershipCacheCallback
- * Syscache inval callback function
+ * Syscache inval callback function
*/
static void
RoleMembershipCacheCallback(Datum arg, Oid relid)
@@ -2853,19 +2852,19 @@ has_rolinherit(Oid roleid)
static List *
roles_has_privs_of(Oid roleid)
{
- List *roles_list;
- ListCell *l;
- List *new_cached_privs_roles;
- MemoryContext oldctx;
+ List *roles_list;
+ ListCell *l;
+ List *new_cached_privs_roles;
+ MemoryContext oldctx;
/* If cache is already valid, just return the list */
if (OidIsValid(cached_privs_role) && cached_privs_role == roleid)
return cached_privs_roles;
- /*
- * Find all the roles that roleid is a member of,
- * including multi-level recursion. The role itself will always
- * be the first element of the resulting list.
+ /*
+ * Find all the roles that roleid is a member of, including multi-level
+ * recursion. The role itself will always be the first element of the
+ * resulting list.
*
* Each element of the list is scanned to see if it adds any indirect
* memberships. We can use a single list as both the record of
@@ -2877,9 +2876,9 @@ roles_has_privs_of(Oid roleid)
foreach(l, roles_list)
{
- Oid memberid = lfirst_oid(l);
- CatCList *memlist;
- int i;
+ Oid memberid = lfirst_oid(l);
+ CatCList *memlist;
+ int i;
/* Ignore non-inheriting roles */
if (!has_rolinherit(memberid))
@@ -2892,12 +2891,12 @@ roles_has_privs_of(Oid roleid)
for (i = 0; i < memlist->n_members; i++)
{
HeapTuple tup = &memlist->members[i]->tuple;
- Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
+ Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
/*
* Even though there shouldn't be any loops in the membership
- * graph, we must test for having already seen this role.
- * It is legal for instance to have both A->B and A->C->B.
+ * graph, we must test for having already seen this role. It is
+ * legal for instance to have both A->B and A->C->B.
*/
roles_list = list_append_unique_oid(roles_list, otherid);
}
@@ -2915,7 +2914,7 @@ roles_has_privs_of(Oid roleid)
/*
* Now safe to assign to state variable
*/
- cached_privs_role = InvalidOid; /* just paranoia */
+ cached_privs_role = InvalidOid; /* just paranoia */
list_free(cached_privs_roles);
cached_privs_roles = new_cached_privs_roles;
cached_privs_role = roleid;
@@ -2937,19 +2936,19 @@ roles_has_privs_of(Oid roleid)
static List *
roles_is_member_of(Oid roleid)
{
- List *roles_list;
- ListCell *l;
- List *new_cached_membership_roles;
- MemoryContext oldctx;
+ List *roles_list;
+ ListCell *l;
+ List *new_cached_membership_roles;
+ MemoryContext oldctx;
/* If cache is already valid, just return the list */
if (OidIsValid(cached_member_role) && cached_member_role == roleid)
return cached_membership_roles;
- /*
- * Find all the roles that roleid is a member of,
- * including multi-level recursion. The role itself will always
- * be the first element of the resulting list.
+ /*
+ * Find all the roles that roleid is a member of, including multi-level
+ * recursion. The role itself will always be the first element of the
+ * resulting list.
*
* Each element of the list is scanned to see if it adds any indirect
* memberships. We can use a single list as both the record of
@@ -2961,9 +2960,9 @@ roles_is_member_of(Oid roleid)
foreach(l, roles_list)
{
- Oid memberid = lfirst_oid(l);
- CatCList *memlist;
- int i;
+ Oid memberid = lfirst_oid(l);
+ CatCList *memlist;
+ int i;
/* Find roles that memberid is directly a member of */
memlist = SearchSysCacheList(AUTHMEMMEMROLE, 1,
@@ -2972,12 +2971,12 @@ roles_is_member_of(Oid roleid)
for (i = 0; i < memlist->n_members; i++)
{
HeapTuple tup = &memlist->members[i]->tuple;
- Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
+ Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
/*
* Even though there shouldn't be any loops in the membership
- * graph, we must test for having already seen this role.
- * It is legal for instance to have both A->B and A->C->B.
+ * graph, we must test for having already seen this role. It is
+ * legal for instance to have both A->B and A->C->B.
*/
roles_list = list_append_unique_oid(roles_list, otherid);
}
@@ -3023,7 +3022,7 @@ has_privs_of_role(Oid member, Oid role)
if (superuser_arg(member))
return true;
- /*
+ /*
* Find all the roles that member has the privileges of, including
* multi-level recursion, then see if target role is any one of them.
*/
@@ -3047,7 +3046,7 @@ is_member_of_role(Oid member, Oid role)
if (superuser_arg(member))
return true;
- /*
+ /*
* Find all the roles that member is a member of, including multi-level
* recursion, then see if target role is any one of them.
*/
@@ -3080,8 +3079,8 @@ bool
is_admin_of_role(Oid member, Oid role)
{
bool result = false;
- List *roles_list;
- ListCell *l;
+ List *roles_list;
+ ListCell *l;
/* Fast path for simple case */
if (member == role)
@@ -3091,18 +3090,18 @@ is_admin_of_role(Oid member, Oid role)
if (superuser_arg(member))
return true;
- /*
- * Find all the roles that member is a member of,
- * including multi-level recursion. We build a list in the same way
- * that is_member_of_role does to track visited and unvisited roles.
+ /*
+ * Find all the roles that member is a member of, including multi-level
+ * recursion. We build a list in the same way that is_member_of_role does
+ * to track visited and unvisited roles.
*/
roles_list = list_make1_oid(member);
foreach(l, roles_list)
{
- Oid memberid = lfirst_oid(l);
- CatCList *memlist;
- int i;
+ Oid memberid = lfirst_oid(l);
+ CatCList *memlist;
+ int i;
/* Find roles that memberid is directly a member of */
memlist = SearchSysCacheList(AUTHMEMMEMROLE, 1,
@@ -3111,7 +3110,7 @@ is_admin_of_role(Oid member, Oid role)
for (i = 0; i < memlist->n_members; i++)
{
HeapTuple tup = &memlist->members[i]->tuple;
- Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
+ Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
if (otherid == role &&
((Form_pg_auth_members) GETSTRUCT(tup))->admin_option)
@@ -3138,7 +3137,7 @@ is_admin_of_role(Oid member, Oid role)
static int
count_one_bits(AclMode mask)
{
- int nbits = 0;
+ int nbits = 0;
/* this code relies on AclMode being an unsigned type */
while (mask)
@@ -3157,14 +3156,14 @@ count_one_bits(AclMode mask)
* The grantor must always be either the object owner or some role that has
* been explicitly granted grant options. This ensures that all granted
* privileges appear to flow from the object owner, and there are never
- * multiple "original sources" of a privilege. Therefore, if the would-be
+ * multiple "original sources" of a privilege. Therefore, if the would-be
* grantor is a member of a role that has the needed grant options, we have
* to do the grant as that role instead.
*
* It is possible that the would-be grantor is a member of several roles
* that have different subsets of the desired grant options, but no one
* role has 'em all. In this case we pick a role with the largest number
- * of desired options. Ties are broken in favor of closer ancestors.
+ * of desired options. Ties are broken in favor of closer ancestors.
*
* roleId: the role attempting to do the GRANT/REVOKE
* privileges: the privileges to be granted/revoked
@@ -3181,15 +3180,15 @@ select_best_grantor(Oid roleId, AclMode privileges,
Oid *grantorId, AclMode *grantOptions)
{
AclMode needed_goptions = ACL_GRANT_OPTION_FOR(privileges);
- List *roles_list;
+ List *roles_list;
int nrights;
ListCell *l;
/*
- * The object owner is always treated as having all grant options,
- * so if roleId is the owner it's easy. Also, if roleId is a superuser
- * it's easy: superusers are implicitly members of every role, so they
- * act as the object owner.
+ * The object owner is always treated as having all grant options, so if
+ * roleId is the owner it's easy. Also, if roleId is a superuser it's
+ * easy: superusers are implicitly members of every role, so they act as
+ * the object owner.
*/
if (roleId == ownerId || superuser_arg(roleId))
{
@@ -3200,8 +3199,8 @@ select_best_grantor(Oid roleId, AclMode privileges,
/*
* Otherwise we have to do a careful search to see if roleId has the
- * privileges of any suitable role. Note: we can hang onto the result
- * of roles_has_privs_of() throughout this loop, because aclmask_direct()
+ * privileges of any suitable role. Note: we can hang onto the result of
+ * roles_has_privs_of() throughout this loop, because aclmask_direct()
* doesn't query any role memberships.
*/
roles_list = roles_has_privs_of(roleId);
@@ -3213,8 +3212,8 @@ select_best_grantor(Oid roleId, AclMode privileges,
foreach(l, roles_list)
{
- Oid otherrole = lfirst_oid(l);
- AclMode otherprivs;
+ Oid otherrole = lfirst_oid(l);
+ AclMode otherprivs;
otherprivs = aclmask_direct(acl, otherrole, ownerId,
needed_goptions, ACLMASK_ALL);
@@ -3225,13 +3224,14 @@ select_best_grantor(Oid roleId, AclMode privileges,
*grantOptions = otherprivs;
return;
}
+
/*
* If it has just some of the needed privileges, remember best
* candidate.
*/
if (otherprivs != ACL_NO_RIGHTS)
{
- int nnewrights = count_one_bits(otherprivs);
+ int nnewrights = count_one_bits(otherprivs);
if (nnewrights > nrights)
{
diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c
index fd83025d6e2..08a7072634c 100644
--- a/src/backend/utils/adt/array_userfuncs.c
+++ b/src/backend/utils/adt/array_userfuncs.c
@@ -6,7 +6,7 @@
* Copyright (c) 2003-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/array_userfuncs.c,v 1.15 2005/01/01 20:44:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/array_userfuncs.c,v 1.16 2005/10/15 02:49:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -96,17 +96,17 @@ array_push(PG_FUNCTION_ARGS)
else
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
- errmsg("argument must be empty or one-dimensional array")));
+ errmsg("argument must be empty or one-dimensional array")));
/*
- * We arrange to look up info about element type only once per series
- * of calls, assuming the element type doesn't change underneath us.
+ * We arrange to look up info about element type only once per series of
+ * calls, assuming the element type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
@@ -194,8 +194,8 @@ array_cat(PG_FUNCTION_ARGS)
ndims2 = ARR_NDIM(v2);
/*
- * short circuit - if one input array is empty, and the other is not,
- * we return the non-empty one as the result
+ * short circuit - if one input array is empty, and the other is not, we
+ * return the non-empty one as the result
*
* if both are empty, return the first one
*/
@@ -245,8 +245,8 @@ array_cat(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("cannot concatenate incompatible arrays"),
- errdetail("Arrays with differing element dimensions are "
- "not compatible for concatenation.")));
+ errdetail("Arrays with differing element dimensions are "
+ "not compatible for concatenation.")));
dims[i] = dims1[i];
lbs[i] = lbs1[i];
@@ -255,9 +255,8 @@ array_cat(PG_FUNCTION_ARGS)
else if (ndims1 == ndims2 - 1)
{
/*
- * resulting array has the second argument as the outer array,
- * with the first argument appended to the front of the outer
- * dimension
+ * resulting array has the second argument as the outer array, with
+ * the first argument appended to the front of the outer dimension
*/
ndims = ndims2;
dims = (int *) palloc(ndims * sizeof(int));
@@ -278,8 +277,8 @@ array_cat(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("cannot concatenate incompatible arrays"),
- errdetail("Arrays with differing dimensions are not "
- "compatible for concatenation.")));
+ errdetail("Arrays with differing dimensions are not "
+ "compatible for concatenation.")));
}
}
else
@@ -287,8 +286,8 @@ array_cat(PG_FUNCTION_ARGS)
/*
* (ndims1 == ndims2 + 1)
*
- * resulting array has the first argument as the outer array, with
- * the second argument appended to the end of the outer dimension
+ * resulting array has the first argument as the outer array, with the
+ * second argument appended to the end of the outer dimension
*/
ndims = ndims1;
dims = (int *) palloc(ndims * sizeof(int));
@@ -306,8 +305,8 @@ array_cat(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("cannot concatenate incompatible arrays"),
- errdetail("Arrays with differing dimensions are not "
- "compatible for concatenation.")));
+ errdetail("Arrays with differing dimensions are not "
+ "compatible for concatenation.")));
}
}
@@ -351,7 +350,7 @@ create_singleton_array(FunctionCallInfo fcinfo,
if (element_type == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid array element type OID: %u", element_type)));
+ errmsg("invalid array element type OID: %u", element_type)));
if (ndims < 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -371,14 +370,14 @@ create_singleton_array(FunctionCallInfo fcinfo,
}
/*
- * We arrange to look up info about element type only once per series
- * of calls, assuming the element type doesn't change underneath us.
+ * We arrange to look up info about element type only once per series of
+ * calls, assuming the element type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index efb4ea9dc14..5304d47fa8a 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.122 2005/08/15 19:40:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.123 2005/10/15 02:49:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -130,8 +130,7 @@ array_in(PG_FUNCTION_ARGS)
char *string = PG_GETARG_CSTRING(0); /* external form */
Oid element_type = PG_GETARG_OID(1); /* type of an array
* element */
- int32 typmod = PG_GETARG_INT32(2); /* typmod for array
- * elements */
+ int32 typmod = PG_GETARG_INT32(2); /* typmod for array elements */
int typlen;
bool typbyval;
char typalign;
@@ -151,14 +150,14 @@ array_in(PG_FUNCTION_ARGS)
/*
* We arrange to look up info about element type, including its input
- * conversion proc, only once per series of calls, assuming the
- * element type doesn't change underneath us.
+ * conversion proc, only once per series of calls, assuming the element
+ * type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = ~element_type;
}
@@ -166,8 +165,7 @@ array_in(PG_FUNCTION_ARGS)
if (my_extra->element_type != element_type)
{
/*
- * Get info about element type, including its input conversion
- * proc
+ * Get info about element type, including its input conversion proc
*/
get_type_io_data(element_type, IOFunc_input,
&my_extra->typlen, &my_extra->typbyval,
@@ -191,8 +189,8 @@ array_in(PG_FUNCTION_ARGS)
* Otherwise, we require the input to be in curly-brace style, and we
* prescan the input to determine dimensions.
*
- * Dimension info takes the form of one or more [n] or [m:n] items. The
- * outer loop iterates once per dimension item.
+ * Dimension info takes the form of one or more [n] or [m:n] items. The outer
+ * loop iterates once per dimension item.
*/
p = string_save;
ndim = 0;
@@ -250,7 +248,7 @@ array_in(PG_FUNCTION_ARGS)
if (ub < lBound[ndim])
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("upper bound cannot be less than lower bound")));
+ errmsg("upper bound cannot be less than lower bound")));
dim[ndim] = ub - lBound[ndim] + 1;
ndim++;
@@ -282,8 +280,8 @@ array_in(PG_FUNCTION_ARGS)
p++;
/*
- * intuit dimensions from brace structure -- it better match what
- * we were given
+ * intuit dimensions from brace structure -- it better match what we
+ * were given
*/
if (*p != '{')
ereport(ERROR,
@@ -293,13 +291,13 @@ array_in(PG_FUNCTION_ARGS)
if (ndim_braces != ndim)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("array dimensions incompatible with array literal")));
+ errmsg("array dimensions incompatible with array literal")));
for (i = 0; i < ndim; ++i)
{
if (dim[i] != dim_braces[i])
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("array dimensions incompatible with array literal")));
+ errmsg("array dimensions incompatible with array literal")));
}
}
@@ -406,22 +404,22 @@ ArrayCount(char *str, int *dim, char typdelim)
/* Signal a premature end of the string */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ errmsg("malformed array literal: \"%s\"", str)));
break;
case '\\':
/*
- * An escape must be after a level start, after an
- * element start, or after an element delimiter. In
- * any case we now must be past an element start.
+ * An escape must be after a level start, after an element
+ * start, or after an element delimiter. In any case we
+ * now must be past an element start.
*/
if (parse_state != ARRAY_LEVEL_STARTED &&
parse_state != ARRAY_ELEM_STARTED &&
parse_state != ARRAY_QUOTED_ELEM_STARTED &&
parse_state != ARRAY_ELEM_DELIMITED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
if (parse_state != ARRAY_QUOTED_ELEM_STARTED)
parse_state = ARRAY_ELEM_STARTED;
/* skip the escaped character */
@@ -429,22 +427,22 @@ ArrayCount(char *str, int *dim, char typdelim)
ptr++;
else
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
break;
case '\"':
/*
* A quote must be after a level start, after a quoted
- * element start, or after an element delimiter. In
- * any case we now must be past an element start.
+ * element start, or after an element delimiter. In any
+ * case we now must be past an element start.
*/
if (parse_state != ARRAY_LEVEL_STARTED &&
parse_state != ARRAY_QUOTED_ELEM_STARTED &&
parse_state != ARRAY_ELEM_DELIMITED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
in_quotes = !in_quotes;
if (in_quotes)
parse_state = ARRAY_QUOTED_ELEM_STARTED;
@@ -455,22 +453,22 @@ ArrayCount(char *str, int *dim, char typdelim)
if (!in_quotes)
{
/*
- * A left brace can occur if no nesting has
- * occurred yet, after a level start, or after a
- * level delimiter.
+ * A left brace can occur if no nesting has occurred
+ * yet, after a level start, or after a level
+ * delimiter.
*/
if (parse_state != ARRAY_NO_LEVEL &&
parse_state != ARRAY_LEVEL_STARTED &&
parse_state != ARRAY_LEVEL_DELIMITED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
parse_state = ARRAY_LEVEL_STARTED;
if (nest_level >= MAXDIM)
ereport(ERROR,
- (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("number of array dimensions (%d) exceeds the maximum allowed (%d)",
- nest_level, MAXDIM)));
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("number of array dimensions (%d) exceeds the maximum allowed (%d)",
+ nest_level, MAXDIM)));
temp[nest_level] = 0;
nest_level++;
if (ndim < nest_level)
@@ -481,9 +479,9 @@ ArrayCount(char *str, int *dim, char typdelim)
if (!in_quotes)
{
/*
- * A right brace can occur after an element start,
- * an element completion, a quoted element
- * completion, or a level completion.
+ * A right brace can occur after an element start, an
+ * element completion, a quoted element completion, or
+ * a level completion.
*/
if (parse_state != ARRAY_ELEM_STARTED &&
parse_state != ARRAY_ELEM_COMPLETED &&
@@ -491,22 +489,22 @@ ArrayCount(char *str, int *dim, char typdelim)
parse_state != ARRAY_LEVEL_COMPLETED &&
!(nest_level == 1 && parse_state == ARRAY_LEVEL_STARTED))
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
parse_state = ARRAY_LEVEL_COMPLETED;
if (nest_level == 0)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
nest_level--;
if ((nelems_last[nest_level] != 1) &&
- (nelems[nest_level] != nelems_last[nest_level]))
+ (nelems[nest_level] != nelems_last[nest_level]))
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("multidimensional arrays must have "
- "array expressions with matching "
- "dimensions")));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("multidimensional arrays must have "
+ "array expressions with matching "
+ "dimensions")));
nelems_last[nest_level] = nelems[nest_level];
nelems[nest_level] = 1;
if (nest_level == 0)
@@ -527,17 +525,17 @@ ArrayCount(char *str, int *dim, char typdelim)
if (*ptr == typdelim)
{
/*
- * Delimiters can occur after an element
- * start, an element completion, a quoted
- * element completion, or a level completion.
+ * Delimiters can occur after an element start, an
+ * element completion, a quoted element
+ * completion, or a level completion.
*/
if (parse_state != ARRAY_ELEM_STARTED &&
parse_state != ARRAY_ELEM_COMPLETED &&
- parse_state != ARRAY_QUOTED_ELEM_COMPLETED &&
+ parse_state != ARRAY_QUOTED_ELEM_COMPLETED &&
parse_state != ARRAY_LEVEL_COMPLETED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
if (parse_state == ARRAY_LEVEL_COMPLETED)
parse_state = ARRAY_LEVEL_DELIMITED;
else
@@ -549,16 +547,16 @@ ArrayCount(char *str, int *dim, char typdelim)
{
/*
* Other non-space characters must be after a
- * level start, after an element start, or
- * after an element delimiter. In any case we
- * now must be past an element start.
+ * level start, after an element start, or after
+ * an element delimiter. In any case we now must
+ * be past an element start.
*/
if (parse_state != ARRAY_LEVEL_STARTED &&
parse_state != ARRAY_ELEM_STARTED &&
parse_state != ARRAY_ELEM_DELIMITED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
parse_state = ARRAY_ELEM_STARTED;
}
}
@@ -637,18 +635,18 @@ ReadArrayStr(char *arrayStr,
MemSet(indx, 0, sizeof(indx));
/*
- * We have to remove " and \ characters to create a clean item value
- * to pass to the datatype input routine. We overwrite each item
- * value in-place within arrayStr to do this. srcptr is the current
- * scan point, and dstptr is where we are copying to.
+ * We have to remove " and \ characters to create a clean item value to
+ * pass to the datatype input routine. We overwrite each item value
+ * in-place within arrayStr to do this. srcptr is the current scan point,
+ * and dstptr is where we are copying to.
*
- * We also want to suppress leading and trailing unquoted whitespace.
- * We use the leadingspace flag to suppress leading space. Trailing
- * space is tracked by using dstendptr to point to the last significant
- * output character.
+ * We also want to suppress leading and trailing unquoted whitespace. We use
+ * the leadingspace flag to suppress leading space. Trailing space is
+ * tracked by using dstendptr to point to the last significant output
+ * character.
*
- * The error checking in this routine is mostly pro-forma, since we
- * expect that ArrayCount() already validated the string.
+ * The error checking in this routine is mostly pro-forma, since we expect
+ * that ArrayCount() already validated the string.
*/
srcptr = arrayStr;
while (!eoArray)
@@ -706,9 +704,9 @@ ReadArrayStr(char *arrayStr,
{
if (nest_level >= ndim)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"",
- origStr)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"",
+ origStr)));
nest_level++;
indx[nest_level - 1] = 0;
srcptr++;
@@ -721,9 +719,9 @@ ReadArrayStr(char *arrayStr,
{
if (nest_level == 0)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"",
- origStr)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"",
+ origStr)));
if (i == -1)
i = ArrayGetOffset0(ndim, indx, prod);
indx[nest_level - 1] = 0;
@@ -751,8 +749,8 @@ ReadArrayStr(char *arrayStr,
else if (isspace((unsigned char) *srcptr))
{
/*
- * If leading space, drop it immediately. Else,
- * copy but don't advance dstendptr.
+ * If leading space, drop it immediately. Else, copy
+ * but don't advance dstendptr.
*/
if (leadingspace)
srcptr++;
@@ -913,14 +911,14 @@ array_out(PG_FUNCTION_ARGS)
/*
* We arrange to look up info about element type, including its output
- * conversion proc, only once per series of calls, assuming the
- * element type doesn't change underneath us.
+ * conversion proc, only once per series of calls, assuming the element
+ * type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
@@ -928,8 +926,7 @@ array_out(PG_FUNCTION_ARGS)
if (my_extra->element_type != element_type)
{
/*
- * Get info about element type, including its output conversion
- * proc
+ * Get info about element type, including its output conversion proc
*/
get_type_io_data(element_type, IOFunc_output,
&my_extra->typlen, &my_extra->typbyval,
@@ -956,8 +953,8 @@ array_out(PG_FUNCTION_ARGS)
}
/*
- * we will need to add explicit dimensions if any dimension has a
- * lower bound other than one
+ * we will need to add explicit dimensions if any dimension has a lower
+ * bound other than one
*/
for (i = 0; i < ndim; i++)
{
@@ -969,9 +966,9 @@ array_out(PG_FUNCTION_ARGS)
}
/*
- * Convert all values to string form, count total space needed
- * (including any overhead such as escaping backslashes), and detect
- * whether each item needs double quotes.
+ * Convert all values to string form, count total space needed (including
+ * any overhead such as escaping backslashes), and detect whether each
+ * item needs double quotes.
*/
values = (char **) palloc(nitems * sizeof(char *));
needquotes = (bool *) palloc(nitems * sizeof(bool));
@@ -991,7 +988,7 @@ array_out(PG_FUNCTION_ARGS)
/* count data plus backslashes; detect chars needing quotes */
if (values[i][0] == '\0')
- needquote = true; /* force quotes for empty string */
+ needquote = true; /* force quotes for empty string */
else
needquote = false;
@@ -1121,8 +1118,7 @@ array_recv(PG_FUNCTION_ARGS)
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
Oid spec_element_type = PG_GETARG_OID(1); /* type of an array
* element */
- int32 typmod = PG_GETARG_INT32(2); /* typmod for array
- * elements */
+ int32 typmod = PG_GETARG_INT32(2); /* typmod for array elements */
Oid element_type;
int typlen;
bool typbyval;
@@ -1174,15 +1170,15 @@ array_recv(PG_FUNCTION_ARGS)
nitems = ArrayGetNItems(ndim, dim);
/*
- * We arrange to look up info about element type, including its
- * receive conversion proc, only once per series of calls, assuming
- * the element type doesn't change underneath us.
+ * We arrange to look up info about element type, including its receive
+ * conversion proc, only once per series of calls, assuming the element
+ * type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = ~element_type;
}
@@ -1197,8 +1193,8 @@ array_recv(PG_FUNCTION_ARGS)
if (!OidIsValid(my_extra->typiofunc))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("no binary input function available for type %s",
- format_type_be(element_type))));
+ errmsg("no binary input function available for type %s",
+ format_type_be(element_type))));
fmgr_info_cxt(my_extra->typiofunc, &my_extra->proc,
fcinfo->flinfo->fn_mcxt);
my_extra->element_type = element_type;
@@ -1278,10 +1274,10 @@ ReadArrayBinary(StringInfo buf,
errmsg("insufficient data left in message")));
/*
- * Rather than copying data around, we just set up a phony
- * StringInfo pointing to the correct portion of the input buffer.
- * We assume we can scribble on the input buffer so as to maintain
- * the convention that StringInfos have a trailing null.
+ * Rather than copying data around, we just set up a phony StringInfo
+ * pointing to the correct portion of the input buffer. We assume we
+ * can scribble on the input buffer so as to maintain the convention
+ * that StringInfos have a trailing null.
*/
elem_buf.data = &buf->data[buf->cursor];
elem_buf.maxlen = itemlen + 1;
@@ -1359,14 +1355,14 @@ array_send(PG_FUNCTION_ARGS)
/*
* We arrange to look up info about element type, including its send
- * conversion proc, only once per series of calls, assuming the
- * element type doesn't change underneath us.
+ * conversion proc, only once per series of calls, assuming the element
+ * type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
@@ -1381,8 +1377,8 @@ array_send(PG_FUNCTION_ARGS)
if (!OidIsValid(my_extra->typiofunc))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("no binary output function available for type %s",
- format_type_be(element_type))));
+ errmsg("no binary output function available for type %s",
+ format_type_be(element_type))));
fmgr_info_cxt(my_extra->typiofunc, &my_extra->proc,
fcinfo->flinfo->fn_mcxt);
my_extra->element_type = element_type;
@@ -1646,14 +1642,14 @@ array_get_slice(ArrayType *array,
if (arraylen > 0)
{
/*
- * fixed-length arrays -- currently, cannot slice these because
- * parser labels output as being of the fixed-length array type!
- * Code below shows how we could support it if the parser were
- * changed to label output as a suitable varlena array type.
+ * fixed-length arrays -- currently, cannot slice these because parser
+ * labels output as being of the fixed-length array type! Code below
+ * shows how we could support it if the parser were changed to label
+ * output as a suitable varlena array type.
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("slices of fixed-length arrays not implemented")));
+ errmsg("slices of fixed-length arrays not implemented")));
/*
* fixed-length arrays -- these are assumed to be 1-d, 0-based XXX
@@ -1678,10 +1674,9 @@ array_get_slice(ArrayType *array,
}
/*
- * Check provided subscripts. A slice exceeding the current array
- * limits is silently truncated to the array limits. If we end up
- * with an empty slice, return NULL (should it be an empty array
- * instead?)
+ * Check provided subscripts. A slice exceeding the current array limits
+ * is silently truncated to the array limits. If we end up with an empty
+ * slice, return NULL (should it be an empty array instead?)
*/
if (ndim < nSubscripts || ndim <= 0 || ndim > MAXDIM)
RETURN_NULL(ArrayType *);
@@ -1719,8 +1714,8 @@ array_get_slice(ArrayType *array,
memcpy(ARR_DIMS(newarray), span, ndim * sizeof(int));
/*
- * Lower bounds of the new array are set to 1. Formerly (before 7.3)
- * we copied the given lowerIndx values ... but that seems confusing.
+ * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
+ * copied the given lowerIndx values ... but that seems confusing.
*/
newlb = ARR_LBOUND(newarray);
for (i = 0; i < ndim; i++)
@@ -1815,9 +1810,9 @@ array_set(ArrayType *array,
ndim = ARR_NDIM(array);
/*
- * if number of dims is zero, i.e. an empty array, create an array
- * with nSubscripts dimensions, and set the lower bounds to the
- * supplied subscripts
+ * if number of dims is zero, i.e. an empty array, create an array with
+ * nSubscripts dimensions, and set the lower bounds to the supplied
+ * subscripts
*/
if (ndim == 0)
{
@@ -1987,7 +1982,7 @@ array_set_slice(ArrayType *array,
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("updates on slices of fixed-length arrays not implemented")));
+ errmsg("updates on slices of fixed-length arrays not implemented")));
}
/* detoast arrays if necessary */
@@ -1999,9 +1994,9 @@ array_set_slice(ArrayType *array,
ndim = ARR_NDIM(array);
/*
- * if number of dims is zero, i.e. an empty array, create an array
- * with nSubscripts dimensions, and set the upper and lower bounds to
- * the supplied subscripts
+ * if number of dims is zero, i.e. an empty array, create an array with
+ * nSubscripts dimensions, and set the upper and lower bounds to the
+ * supplied subscripts
*/
if (ndim == 0)
{
@@ -2038,10 +2033,9 @@ array_set_slice(ArrayType *array,
memcpy(lb, ARR_LBOUND(array), ndim * sizeof(int));
/*
- * Check provided subscripts. A slice exceeding the current array
- * limits throws an error, *except* in the 1-D case where we will
- * extend the array as long as no hole is created. An empty slice is
- * an error, too.
+ * Check provided subscripts. A slice exceeding the current array limits
+ * throws an error, *except* in the 1-D case where we will extend the
+ * array as long as no hole is created. An empty slice is an error, too.
*/
for (i = 0; i < nSubscripts; i++)
{
@@ -2083,8 +2077,8 @@ array_set_slice(ArrayType *array,
}
/*
- * Make sure source array has enough entries. Note we ignore the
- * shape of the source array and just read entries serially.
+ * Make sure source array has enough entries. Note we ignore the shape of
+ * the source array and just read entries serially.
*/
mda_get_range(ndim, span, lowerIndx, upperIndx);
nsrcitems = ArrayGetNItems(ndim, span);
@@ -2104,8 +2098,8 @@ array_set_slice(ArrayType *array,
if (ndim > 1)
{
/*
- * here we do not need to cope with extension of the array; it
- * would be a lot more complicated if we had to do so...
+ * here we do not need to cope with extension of the array; it would
+ * be a lot more complicated if we had to do so...
*/
olditemsize = array_slice_size(ndim, dim, lb, ARR_DATA_PTR(array),
lowerIndx, upperIndx,
@@ -2115,8 +2109,7 @@ array_set_slice(ArrayType *array,
else
{
/*
- * here we must allow for possibility of slice larger than orig
- * array
+ * here we must allow for possibility of slice larger than orig array
*/
int oldlb = ARR_LBOUND(array)[0];
int oldub = oldlb + ARR_DIMS(array)[0] - 1;
@@ -2148,8 +2141,8 @@ array_set_slice(ArrayType *array,
if (ndim > 1)
{
/*
- * here we do not need to cope with extension of the array; it
- * would be a lot more complicated if we had to do so...
+ * here we do not need to cope with extension of the array; it would
+ * be a lot more complicated if we had to do so...
*/
array_insert_slice(ndim, dim, lb, ARR_DATA_PTR(array), olddatasize,
ARR_DATA_PTR(newarray),
@@ -2192,7 +2185,7 @@ array_set_slice(ArrayType *array,
* or binary-compatible with, the first argument type of fn().
* * retType: OID of element type of output array. This must be the same as,
* or binary-compatible with, the result type of fn().
- * * amstate: workspace for array_map. Must be zeroed by caller before
+ * * amstate: workspace for array_map. Must be zeroed by caller before
* first call, and not touched after that.
*
* It is legitimate to pass a freshly-zeroed ArrayMapState on each call,
@@ -2250,9 +2243,9 @@ array_map(FunctionCallInfo fcinfo, Oid inpType, Oid retType,
}
/*
- * We arrange to look up info about input and return element types
- * only once per series of calls, assuming the element type doesn't
- * change underneath us.
+ * We arrange to look up info about input and return element types only
+ * once per series of calls, assuming the element type doesn't change
+ * underneath us.
*/
inp_extra = &amstate->inp_extra;
ret_extra = &amstate->ret_extra;
@@ -2297,9 +2290,9 @@ array_map(FunctionCallInfo fcinfo, Oid inpType, Oid retType,
/*
* Apply the given function to source elt and extra args.
*
- * We assume the extra args are non-NULL, so need not check whether
- * fn() is strict. Would need to do more work here to support
- * arrays containing nulls, too.
+ * We assume the extra args are non-NULL, so need not check whether fn()
+ * is strict. Would need to do more work here to support arrays
+ * containing nulls, too.
*/
fcinfo->arg[0] = elt;
fcinfo->argnull[0] = false;
@@ -2329,8 +2322,7 @@ array_map(FunctionCallInfo fcinfo, Oid inpType, Oid retType,
memcpy(ARR_DIMS(result), ARR_DIMS(v), 2 * ndim * sizeof(int));
/*
- * Note: do not risk trying to pfree the results of the called
- * function
+ * Note: do not risk trying to pfree the results of the called function
*/
CopyArrayEls(ARR_DATA_PTR(result), values, nitems,
typlen, typbyval, typalign, false);
@@ -2543,7 +2535,7 @@ array_eq(PG_FUNCTION_ARGS)
if (element_type != ARR_ELEMTYPE(array2))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot compare arrays of different element types")));
+ errmsg("cannot compare arrays of different element types")));
/* fast path if the arrays do not have the same number of elements */
if (nitems1 != nitems2)
@@ -2551,10 +2543,10 @@ array_eq(PG_FUNCTION_ARGS)
else
{
/*
- * We arrange to look up the equality function only once per
- * series of calls, assuming the element type doesn't change
- * underneath us. The typcache is used so that we have no memory
- * leakage when being used as an index support function.
+ * We arrange to look up the equality function only once per series of
+ * calls, assuming the element type doesn't change underneath us. The
+ * typcache is used so that we have no memory leakage when being used
+ * as an index support function.
*/
typentry = (TypeCacheEntry *) fcinfo->flinfo->fn_extra;
if (typentry == NULL ||
@@ -2565,8 +2557,8 @@ array_eq(PG_FUNCTION_ARGS)
if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(element_type))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(element_type))));
fcinfo->flinfo->fn_extra = (void *) typentry;
}
typlen = typentry->typlen;
@@ -2697,13 +2689,13 @@ array_cmp(FunctionCallInfo fcinfo)
if (element_type != ARR_ELEMTYPE(array2))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot compare arrays of different element types")));
+ errmsg("cannot compare arrays of different element types")));
/*
- * We arrange to look up the comparison function only once per series
- * of calls, assuming the element type doesn't change underneath us.
- * The typcache is used so that we have no memory leakage when being
- * used as an index support function.
+ * We arrange to look up the comparison function only once per series of
+ * calls, assuming the element type doesn't change underneath us. The
+ * typcache is used so that we have no memory leakage when being used as
+ * an index support function.
*/
typentry = (TypeCacheEntry *) fcinfo->flinfo->fn_extra;
if (typentry == NULL ||
@@ -2714,8 +2706,8 @@ array_cmp(FunctionCallInfo fcinfo)
if (!OidIsValid(typentry->cmp_proc_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify a comparison function for type %s",
- format_type_be(element_type))));
+ errmsg("could not identify a comparison function for type %s",
+ format_type_be(element_type))));
fcinfo->flinfo->fn_extra = (void *) typentry;
}
typlen = typentry->typlen;
@@ -3121,11 +3113,11 @@ array_type_length_coerce_internal(ArrayType *src,
errmsg("target type is not an array")));
/*
- * We don't deal with domain constraints yet, so bail out. This
- * isn't currently a problem, because we also don't support arrays
- * of domain type elements either. But in the future we might. At
- * that point consideration should be given to removing the check
- * below and adding a domain constraints check to the coercion.
+ * We don't deal with domain constraints yet, so bail out. This isn't
+ * currently a problem, because we also don't support arrays of domain
+ * type elements either. But in the future we might. At that point
+ * consideration should be given to removing the check below and
+ * adding a domain constraints check to the coercion.
*/
if (getBaseType(tgt_elem_type) != tgt_elem_type)
ereport(ERROR,
@@ -3150,8 +3142,8 @@ array_type_length_coerce_internal(ArrayType *src,
}
/*
- * If it's binary-compatible, modify the element type in the array
- * header, but otherwise leave the array as we received it.
+ * If it's binary-compatible, modify the element type in the array header,
+ * but otherwise leave the array as we received it.
*/
if (my_extra->coerce_finfo.fn_oid == InvalidOid)
{
@@ -3166,8 +3158,8 @@ array_type_length_coerce_internal(ArrayType *src,
/*
* Use array_map to apply the function to each array element.
*
- * We pass on the desttypmod and isExplicit flags whether or not the
- * function wants them.
+ * We pass on the desttypmod and isExplicit flags whether or not the function
+ * wants them.
*/
InitFunctionCallInfoData(locfcinfo, &my_extra->coerce_finfo, 3,
NULL, NULL);
@@ -3207,8 +3199,8 @@ array_length_coerce(PG_FUNCTION_ARGS)
PG_RETURN_ARRAYTYPE_P(v);
/*
- * We arrange to look up the element type's coercion function only
- * once per series of calls, assuming the element type doesn't change
+ * We arrange to look up the element type's coercion function only once
+ * per series of calls, assuming the element type doesn't change
* underneath us.
*/
my_extra = (alc_extra *) fmgr_info->fn_extra;
@@ -3303,7 +3295,7 @@ accumArrayResult(ArrayBuildState *astate,
if ((astate->nelems % ARRAY_ELEMS_CHUNKSIZE) == 0)
astate->dvalues = (Datum *)
repalloc(astate->dvalues,
- (astate->nelems + ARRAY_ELEMS_CHUNKSIZE) * sizeof(Datum));
+ (astate->nelems + ARRAY_ELEMS_CHUNKSIZE) * sizeof(Datum));
}
if (disnull)
@@ -3381,9 +3373,9 @@ makeMdArrayResult(ArrayBuildState *astate,
Datum
array_larger(PG_FUNCTION_ARGS)
{
- ArrayType *v1,
- *v2,
- *result;
+ ArrayType *v1,
+ *v2,
+ *result;
v1 = PG_GETARG_ARRAYTYPE_P(0);
v2 = PG_GETARG_ARRAYTYPE_P(1);
@@ -3396,9 +3388,9 @@ array_larger(PG_FUNCTION_ARGS)
Datum
array_smaller(PG_FUNCTION_ARGS)
{
- ArrayType *v1,
- *v2,
- *result;
+ ArrayType *v1,
+ *v2,
+ *result;
v1 = PG_GETARG_ARRAYTYPE_P(0);
v2 = PG_GETARG_ARRAYTYPE_P(1);
diff --git a/src/backend/utils/adt/ascii.c b/src/backend/utils/adt/ascii.c
index 361dec59f57..599b37b1f39 100644
--- a/src/backend/utils/adt/ascii.c
+++ b/src/backend/utils/adt/ascii.c
@@ -5,7 +5,7 @@
* Portions Copyright (c) 1999-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/ascii.c,v 1.25 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ascii.c,v 1.26 2005/10/15 02:49:28 momjian Exp $
*
*-----------------------------------------------------------------------
*/
@@ -73,8 +73,8 @@ pg_to_ascii(unsigned char *src, unsigned char *src_end, unsigned char *dest, int
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("encoding conversion from %s to ASCII not supported",
- pg_encoding_to_char(enc))));
+ errmsg("encoding conversion from %s to ASCII not supported",
+ pg_encoding_to_char(enc))));
return; /* keep compiler quiet */
}
diff --git a/src/backend/utils/adt/cash.c b/src/backend/utils/adt/cash.c
index 8788af9f87e..f9e2f10325a 100644
--- a/src/backend/utils/adt/cash.c
+++ b/src/backend/utils/adt/cash.c
@@ -9,7 +9,7 @@
* workings can be found in the book "Software Solutions in C" by
* Dale Schumacher, Academic Press, ISBN: 0-12-632360-7.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/cash.c,v 1.65 2005/07/21 04:41:43 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/cash.c,v 1.66 2005/10/15 02:49:28 momjian Exp $
*/
#include "postgres.h"
@@ -85,14 +85,14 @@ cash_in(PG_FUNCTION_ARGS)
struct lconv *lconvert = PGLC_localeconv();
/*
- * frac_digits will be CHAR_MAX in some locales, notably C. However,
- * just testing for == CHAR_MAX is risky, because of compilers like
- * gcc that "helpfully" let you alter the platform-standard definition
- * of whether char is signed or not. If we are so unfortunate as to
- * get compiled with a nonstandard -fsigned-char or -funsigned-char
- * switch, then our idea of CHAR_MAX will not agree with libc's. The
- * safest course is not to test for CHAR_MAX at all, but to impose a
- * range check for plausible frac_digits values.
+ * frac_digits will be CHAR_MAX in some locales, notably C. However, just
+ * testing for == CHAR_MAX is risky, because of compilers like gcc that
+ * "helpfully" let you alter the platform-standard definition of whether
+ * char is signed or not. If we are so unfortunate as to get compiled
+ * with a nonstandard -fsigned-char or -funsigned-char switch, then our
+ * idea of CHAR_MAX will not agree with libc's. The safest course is not
+ * to test for CHAR_MAX at all, but to impose a range check for plausible
+ * frac_digits values.
*/
fpoint = lconvert->frac_digits;
if (fpoint < 0 || fpoint > 10)
@@ -195,7 +195,7 @@ cash_in(PG_FUNCTION_ARGS)
if (*s != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type money: \"%s\"", str)));
+ errmsg("invalid input syntax for type money: \"%s\"", str)));
result = value * sgn;
@@ -238,8 +238,8 @@ cash_out(PG_FUNCTION_ARGS)
points = 2; /* best guess in this case, I think */
/*
- * As with frac_digits, must apply a range check to mon_grouping to
- * avoid being fooled by variant CHAR_MAX values.
+ * As with frac_digits, must apply a range check to mon_grouping to avoid
+ * being fooled by variant CHAR_MAX values.
*/
mon_group = *lconvert->mon_grouping;
if (mon_group <= 0 || mon_group > 6)
diff --git a/src/backend/utils/adt/char.c b/src/backend/utils/adt/char.c
index bc208164c1f..663fac909e6 100644
--- a/src/backend/utils/adt/char.c
+++ b/src/backend/utils/adt/char.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/char.c,v 1.42 2004/12/31 22:01:21 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/char.c,v 1.43 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -187,9 +187,9 @@ text_char(PG_FUNCTION_ARGS)
char result;
/*
- * An empty input string is converted to \0 (for consistency with
- * charin). If the input is longer than one character, the excess data
- * is silently discarded.
+ * An empty input string is converted to \0 (for consistency with charin).
+ * If the input is longer than one character, the excess data is silently
+ * discarded.
*/
if (VARSIZE(arg1) > VARHDRSZ)
result = *(VARDATA(arg1));
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index ec1d808544b..619a099b654 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/date.c,v 1.121 2005/10/09 17:21:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/date.c,v 1.122 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -18,7 +18,7 @@
#include <ctype.h>
#include <limits.h>
#include <float.h>
-#include <time.h>
+#include <time.h>
#include "access/hash.h"
#include "libpq/pqformat.h"
@@ -38,10 +38,10 @@
#endif
-static int time2tm(TimeADT time, struct pg_tm *tm, fsec_t *fsec);
-static int timetz2tm(TimeTzADT *time, struct pg_tm *tm, fsec_t *fsec, int *tzp);
-static int tm2time(struct pg_tm *tm, fsec_t fsec, TimeADT *result);
-static int tm2timetz(struct pg_tm *tm, fsec_t fsec, int tz, TimeTzADT *result);
+static int time2tm(TimeADT time, struct pg_tm * tm, fsec_t *fsec);
+static int timetz2tm(TimeTzADT *time, struct pg_tm * tm, fsec_t *fsec, int *tzp);
+static int tm2time(struct pg_tm * tm, fsec_t fsec, TimeADT *result);
+static int tm2timetz(struct pg_tm * tm, fsec_t fsec, int tz, TimeTzADT *result);
static void AdjustTimeForTypmod(TimeADT *time, int32 typmod);
/*****************************************************************************
@@ -56,7 +56,7 @@ Datum
date_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
- DateADT date;
+ DateADT date;
fsec_t fsec;
struct pg_tm tt,
*tm = &tt;
@@ -83,7 +83,7 @@ date_in(PG_FUNCTION_ARGS)
case DTK_CURRENT:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"current\" is no longer supported")));
+ errmsg("date/time value \"current\" is no longer supported")));
GetCurrentDateTime(tm);
break;
@@ -108,13 +108,13 @@ date_in(PG_FUNCTION_ARGS)
Datum
date_out(PG_FUNCTION_ARGS)
{
- DateADT date = PG_GETARG_DATEADT(0);
+ DateADT date = PG_GETARG_DATEADT(0);
char *result;
struct pg_tm tt,
*tm = &tt;
char buf[MAXDATELEN + 1];
- j2date(date +POSTGRES_EPOCH_JDATE,
+ j2date(date + POSTGRES_EPOCH_JDATE,
&(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
EncodeDateOnly(tm, DateStyle, buf);
@@ -140,7 +140,7 @@ date_recv(PG_FUNCTION_ARGS)
Datum
date_send(PG_FUNCTION_ARGS)
{
- DateADT date = PG_GETARG_DATEADT(0);
+ DateADT date = PG_GETARG_DATEADT(0);
StringInfoData buf;
pq_begintypsend(&buf);
@@ -306,7 +306,7 @@ date2timestamptz(DateADT dateVal)
#ifdef HAVE_INT64_TIMESTAMP
result = dateVal * USECS_PER_DAY + tz * USECS_PER_SEC;
#else
- result = dateVal * (double)SECS_PER_DAY + tz;
+ result = dateVal * (double) SECS_PER_DAY + tz;
#endif
return result;
@@ -715,7 +715,7 @@ date_timestamp(PG_FUNCTION_ARGS)
Datum
timestamp_date(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
DateADT result;
struct pg_tm tt,
*tm = &tt;
@@ -797,11 +797,11 @@ abstime_date(PG_FUNCTION_ARGS)
case NOEND_ABSTIME:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert reserved abstime value to date")));
+ errmsg("cannot convert reserved abstime value to date")));
/*
- * pretend to drop through to make compiler think that result
- * will be set
+ * pretend to drop through to make compiler think that result will
+ * be set
*/
default:
@@ -821,7 +821,7 @@ Datum
date_text(PG_FUNCTION_ARGS)
{
/* Input is a Date, but may as well leave it in Datum form */
- Datum date = PG_GETARG_DATUM(0);
+ Datum date = PG_GETARG_DATUM(0);
text *result;
char *str;
int len;
@@ -914,11 +914,11 @@ time_in(PG_FUNCTION_ARGS)
* Convert a tm structure to a time data type.
*/
static int
-tm2time(struct pg_tm *tm, fsec_t fsec, TimeADT *result)
+tm2time(struct pg_tm * tm, fsec_t fsec, TimeADT *result)
{
#ifdef HAVE_INT64_TIMESTAMP
*result = ((((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec)
- * USECS_PER_SEC) + fsec;
+ * USECS_PER_SEC) + fsec;
#else
*result = ((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec + fsec;
#endif
@@ -931,7 +931,7 @@ tm2time(struct pg_tm *tm, fsec_t fsec, TimeADT *result)
* local time zone. If out of this range, leave as GMT. - tgl 97/05/27
*/
static int
-time2tm(TimeADT time, struct pg_tm *tm, fsec_t *fsec)
+time2tm(TimeADT time, struct pg_tm * tm, fsec_t *fsec)
{
#ifdef HAVE_INT64_TIMESTAMP
tm->tm_hour = time / USECS_PER_HOUR;
@@ -946,8 +946,8 @@ time2tm(TimeADT time, struct pg_tm *tm, fsec_t *fsec)
recalc:
trem = time;
- TMODULO(trem, tm->tm_hour, (double)SECS_PER_HOUR);
- TMODULO(trem, tm->tm_min, (double)SECS_PER_MINUTE);
+ TMODULO(trem, tm->tm_hour, (double) SECS_PER_HOUR);
+ TMODULO(trem, tm->tm_min, (double) SECS_PER_MINUTE);
TMODULO(trem, tm->tm_sec, 1.0);
trem = TIMEROUND(trem);
/* roundoff may need to propagate to higher-order fields */
@@ -989,6 +989,7 @@ Datum
time_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -1072,7 +1073,6 @@ AdjustTimeForTypmod(TimeADT *time, int32 typmod)
INT64CONST(5),
INT64CONST(0)
};
-
#else
/* note MAX_TIME_PRECISION differs in this case */
static const double TimeScales[MAX_TIME_PRECISION + 1] = {
@@ -1093,21 +1093,21 @@ AdjustTimeForTypmod(TimeADT *time, int32 typmod)
if (typmod >= 0 && typmod <= MAX_TIME_PRECISION)
{
/*
- * Note: this round-to-nearest code is not completely consistent
- * about rounding values that are exactly halfway between integral
- * values. On most platforms, rint() will implement
- * round-to-nearest-even, but the integer code always rounds up
- * (away from zero). Is it worth trying to be consistent?
+ * Note: this round-to-nearest code is not completely consistent about
+ * rounding values that are exactly halfway between integral values.
+ * On most platforms, rint() will implement round-to-nearest-even, but
+ * the integer code always rounds up (away from zero). Is it worth
+ * trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
if (*time >= INT64CONST(0))
*time = ((*time + TimeOffsets[typmod]) / TimeScales[typmod]) *
- TimeScales[typmod];
+ TimeScales[typmod];
else
*time = -((((-*time) + TimeOffsets[typmod]) / TimeScales[typmod]) *
- TimeScales[typmod]);
+ TimeScales[typmod]);
#else
- *time = rint((double) * time * TimeScales[typmod]) / TimeScales[typmod];
+ *time = rint((double) *time * TimeScales[typmod]) / TimeScales[typmod];
#endif
}
}
@@ -1208,8 +1208,8 @@ Datum
overlaps_time(PG_FUNCTION_ARGS)
{
/*
- * The arguments are TimeADT, but we leave them as generic Datums to
- * avoid dereferencing nulls (TimeADT is pass-by-reference!)
+ * The arguments are TimeADT, but we leave them as generic Datums to avoid
+ * dereferencing nulls (TimeADT is pass-by-reference!)
*/
Datum ts1 = PG_GETARG_DATUM(0);
Datum te1 = PG_GETARG_DATUM(1);
@@ -1226,9 +1226,9 @@ overlaps_time(PG_FUNCTION_ARGS)
(DatumGetTimeADT(t1) < DatumGetTimeADT(t2))
/*
- * If both endpoints of interval 1 are null, the result is null
- * (unknown). If just one endpoint is null, take ts1 as the non-null
- * one. Otherwise, take ts1 as the lesser endpoint.
+ * If both endpoints of interval 1 are null, the result is null (unknown).
+ * If just one endpoint is null, take ts1 as the non-null one. Otherwise,
+ * take ts1 as the lesser endpoint.
*/
if (ts1IsNull)
{
@@ -1276,8 +1276,8 @@ overlaps_time(PG_FUNCTION_ARGS)
if (TIMEADT_GT(ts1, ts2))
{
/*
- * This case is ts1 < te2 OR te1 < te2, which may look redundant
- * but in the presence of nulls it's not quite completely so.
+ * This case is ts1 < te2 OR te1 < te2, which may look redundant but
+ * in the presence of nulls it's not quite completely so.
*/
if (te2IsNull)
PG_RETURN_NULL();
@@ -1287,8 +1287,8 @@ overlaps_time(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
/*
- * If te1 is not null then we had ts1 <= te1 above, and we just
- * found ts1 >= te2, hence te1 >= te2.
+ * If te1 is not null then we had ts1 <= te1 above, and we just found
+ * ts1 >= te2, hence te1 >= te2.
*/
PG_RETURN_BOOL(false);
}
@@ -1303,8 +1303,8 @@ overlaps_time(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
/*
- * If te2 is not null then we had ts2 <= te2 above, and we just
- * found ts2 >= te1, hence te2 >= te1.
+ * If te2 is not null then we had ts2 <= te2 above, and we just found
+ * ts2 >= te1, hence te2 >= te1.
*/
PG_RETURN_BOOL(false);
}
@@ -1312,8 +1312,7 @@ overlaps_time(PG_FUNCTION_ARGS)
{
/*
* For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a
- * rather silly way of saying "true if both are nonnull, else
- * null".
+ * rather silly way of saying "true if both are nonnull, else null".
*/
if (te1IsNull || te2IsNull)
PG_RETURN_NULL();
@@ -1330,7 +1329,7 @@ overlaps_time(PG_FUNCTION_ARGS)
Datum
timestamp_time(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
TimeADT result;
struct pg_tm tt,
*tm = &tt;
@@ -1351,7 +1350,7 @@ timestamp_time(PG_FUNCTION_ARGS)
* USECS_PER_DAY) - timestamp;
*/
result = ((((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) *
- USECS_PER_SEC) + fsec;
+ USECS_PER_SEC) + fsec;
#else
result = ((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec + fsec;
#endif
@@ -1388,7 +1387,7 @@ timestamptz_time(PG_FUNCTION_ARGS)
* USECS_PER_DAY) - timestamp;
*/
result = ((((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) *
- USECS_PER_SEC) + fsec;
+ USECS_PER_SEC) + fsec;
#else
result = ((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec + fsec;
#endif
@@ -1402,12 +1401,12 @@ timestamptz_time(PG_FUNCTION_ARGS)
Datum
datetime_timestamp(PG_FUNCTION_ARGS)
{
- DateADT date = PG_GETARG_DATEADT(0);
+ DateADT date = PG_GETARG_DATEADT(0);
TimeADT time = PG_GETARG_TIMEADT(1);
Timestamp result;
result = DatumGetTimestamp(DirectFunctionCall1(date_timestamp,
- DateADTGetDatum(date)));
+ DateADTGetDatum(date)));
result += time;
PG_RETURN_TIMESTAMP(result);
@@ -1461,8 +1460,8 @@ interval_time(PG_FUNCTION_ARGS)
}
#else
result = span->time;
- if (result >= (double)SECS_PER_DAY || result < 0)
- result -= floor(result / (double)SECS_PER_DAY) * (double)SECS_PER_DAY;
+ if (result >= (double) SECS_PER_DAY || result < 0)
+ result -= floor(result / (double) SECS_PER_DAY) * (double) SECS_PER_DAY;
#endif
PG_RETURN_TIMEADT(result);
@@ -1506,7 +1505,7 @@ time_pl_interval(PG_FUNCTION_ARGS)
TimeADT time1;
result = time + span->time;
- TMODULO(result, time1, (double)SECS_PER_DAY);
+ TMODULO(result, time1, (double) SECS_PER_DAY);
if (result < 0)
result += SECS_PER_DAY;
#endif
@@ -1533,7 +1532,7 @@ time_mi_interval(PG_FUNCTION_ARGS)
TimeADT time1;
result = time - span->time;
- TMODULO(result, time1, (double)SECS_PER_DAY);
+ TMODULO(result, time1, (double) SECS_PER_DAY);
if (result < 0)
result += SECS_PER_DAY;
#endif
@@ -1678,8 +1677,8 @@ time_part(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("\"time\" units \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
result = 0;
}
@@ -1698,7 +1697,7 @@ time_part(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("\"time\" units \"%s\" not recognized",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ PointerGetDatum(units))))));
result = 0;
}
@@ -1714,7 +1713,7 @@ time_part(PG_FUNCTION_ARGS)
* Convert a tm structure to a time data type.
*/
static int
-tm2timetz(struct pg_tm *tm, fsec_t fsec, int tz, TimeTzADT *result)
+tm2timetz(struct pg_tm * tm, fsec_t fsec, int tz, TimeTzADT *result)
{
#ifdef HAVE_INT64_TIMESTAMP
result->time = ((((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) *
@@ -1787,6 +1786,7 @@ Datum
timetz_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -1831,7 +1831,7 @@ timetz_send(PG_FUNCTION_ARGS)
* Convert TIME WITH TIME ZONE data type to POSIX time structure.
*/
static int
-timetz2tm(TimeTzADT *time, struct pg_tm *tm, fsec_t *fsec, int *tzp)
+timetz2tm(TimeTzADT *time, struct pg_tm * tm, fsec_t *fsec, int *tzp)
{
#ifdef HAVE_INT64_TIMESTAMP
int64 trem = time->time;
@@ -1846,8 +1846,8 @@ timetz2tm(TimeTzADT *time, struct pg_tm *tm, fsec_t *fsec, int *tzp)
double trem = time->time;
recalc:
- TMODULO(trem, tm->tm_hour, (double)SECS_PER_HOUR);
- TMODULO(trem, tm->tm_min, (double)SECS_PER_MINUTE);
+ TMODULO(trem, tm->tm_hour, (double) SECS_PER_HOUR);
+ TMODULO(trem, tm->tm_min, (double) SECS_PER_MINUTE);
TMODULO(trem, tm->tm_sec, 1.0);
trem = TIMEROUND(trem);
/* roundoff may need to propagate to higher-order fields */
@@ -1995,8 +1995,8 @@ timetz_hash(PG_FUNCTION_ARGS)
/*
* Specify hash length as sizeof(double) + sizeof(int4), not as
- * sizeof(TimeTzADT), so that any garbage pad bytes in the structure
- * won't be included in the hash!
+ * sizeof(TimeTzADT), so that any garbage pad bytes in the structure won't
+ * be included in the hash!
*/
return hash_any((unsigned char *) key, sizeof(key->time) + sizeof(key->zone));
}
@@ -2052,7 +2052,7 @@ timetz_pl_interval(PG_FUNCTION_ARGS)
result->time += USECS_PER_DAY;
#else
result->time = time->time + span->time;
- TMODULO(result->time, time1.time, (double)SECS_PER_DAY);
+ TMODULO(result->time, time1.time, (double) SECS_PER_DAY);
if (result->time < 0)
result->time += SECS_PER_DAY;
#endif
@@ -2085,7 +2085,7 @@ timetz_mi_interval(PG_FUNCTION_ARGS)
result->time += USECS_PER_DAY;
#else
result->time = time->time - span->time;
- TMODULO(result->time, time1.time, (double)SECS_PER_DAY);
+ TMODULO(result->time, time1.time, (double) SECS_PER_DAY);
if (result->time < 0)
result->time += SECS_PER_DAY;
#endif
@@ -2105,8 +2105,8 @@ Datum
overlaps_timetz(PG_FUNCTION_ARGS)
{
/*
- * The arguments are TimeTzADT *, but we leave them as generic Datums
- * for convenience of notation --- and to avoid dereferencing nulls.
+ * The arguments are TimeTzADT *, but we leave them as generic Datums for
+ * convenience of notation --- and to avoid dereferencing nulls.
*/
Datum ts1 = PG_GETARG_DATUM(0);
Datum te1 = PG_GETARG_DATUM(1);
@@ -2123,9 +2123,9 @@ overlaps_timetz(PG_FUNCTION_ARGS)
DatumGetBool(DirectFunctionCall2(timetz_lt,t1,t2))
/*
- * If both endpoints of interval 1 are null, the result is null
- * (unknown). If just one endpoint is null, take ts1 as the non-null
- * one. Otherwise, take ts1 as the lesser endpoint.
+ * If both endpoints of interval 1 are null, the result is null (unknown).
+ * If just one endpoint is null, take ts1 as the non-null one. Otherwise,
+ * take ts1 as the lesser endpoint.
*/
if (ts1IsNull)
{
@@ -2173,8 +2173,8 @@ overlaps_timetz(PG_FUNCTION_ARGS)
if (TIMETZ_GT(ts1, ts2))
{
/*
- * This case is ts1 < te2 OR te1 < te2, which may look redundant
- * but in the presence of nulls it's not quite completely so.
+ * This case is ts1 < te2 OR te1 < te2, which may look redundant but
+ * in the presence of nulls it's not quite completely so.
*/
if (te2IsNull)
PG_RETURN_NULL();
@@ -2184,8 +2184,8 @@ overlaps_timetz(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
/*
- * If te1 is not null then we had ts1 <= te1 above, and we just
- * found ts1 >= te2, hence te1 >= te2.
+ * If te1 is not null then we had ts1 <= te1 above, and we just found
+ * ts1 >= te2, hence te1 >= te2.
*/
PG_RETURN_BOOL(false);
}
@@ -2200,8 +2200,8 @@ overlaps_timetz(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
/*
- * If te2 is not null then we had ts2 <= te2 above, and we just
- * found ts2 >= te1, hence te2 >= te1.
+ * If te2 is not null then we had ts2 <= te2 above, and we just found
+ * ts2 >= te1, hence te2 >= te1.
*/
PG_RETURN_BOOL(false);
}
@@ -2209,8 +2209,7 @@ overlaps_timetz(PG_FUNCTION_ARGS)
{
/*
* For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a
- * rather silly way of saying "true if both are nonnull, else
- * null".
+ * rather silly way of saying "true if both are nonnull, else null".
*/
if (te1IsNull || te2IsNull)
PG_RETURN_NULL();
@@ -2297,14 +2296,14 @@ timestamptz_timetz(PG_FUNCTION_ARGS)
Datum
datetimetz_timestamptz(PG_FUNCTION_ARGS)
{
- DateADT date = PG_GETARG_DATEADT(0);
+ DateADT date = PG_GETARG_DATEADT(0);
TimeTzADT *time = PG_GETARG_TIMETZADT_P(1);
TimestampTz result;
#ifdef HAVE_INT64_TIMESTAMP
result = date * USECS_PER_DAY + time->time + time->zone * USECS_PER_SEC;
#else
- result = date * (double)SECS_PER_DAY + time->time + time->zone;
+ result = date * (double) SECS_PER_DAY + time->time + time->zone;
#endif
PG_RETURN_TIMESTAMP(result);
@@ -2355,8 +2354,8 @@ text_timetz(PG_FUNCTION_ARGS)
if (VARSIZE(str) - VARHDRSZ > MAXDATELEN)
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("invalid input syntax for type time with time zone: \"%s\"",
- VARDATA(str))));
+ errmsg("invalid input syntax for type time with time zone: \"%s\"",
+ VARDATA(str))));
sp = VARDATA(str);
dp = dstr;
@@ -2410,12 +2409,12 @@ timetz_part(PG_FUNCTION_ARGS)
case DTK_TZ_MINUTE:
result = -tz;
result /= SECS_PER_MINUTE;
- FMODULO(result, dummy, (double)SECS_PER_MINUTE);
+ FMODULO(result, dummy, (double) SECS_PER_MINUTE);
break;
case DTK_TZ_HOUR:
dummy = -tz;
- FMODULO(dummy, result, (double)SECS_PER_HOUR);
+ FMODULO(dummy, result, (double) SECS_PER_HOUR);
break;
case DTK_MICROSEC:
@@ -2460,9 +2459,9 @@ timetz_part(PG_FUNCTION_ARGS)
default:
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("\"time with time zone\" units \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ errmsg("\"time with time zone\" units \"%s\" not recognized",
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
result = 0;
}
@@ -2479,9 +2478,9 @@ timetz_part(PG_FUNCTION_ARGS)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("\"time with time zone\" units \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ errmsg("\"time with time zone\" units \"%s\" not recognized",
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
result = 0;
}
@@ -2500,15 +2499,15 @@ timetz_zone(PG_FUNCTION_ARGS)
TimeTzADT *t = PG_GETARG_TIMETZADT_P(1);
TimeTzADT *result;
int tz;
- char tzname[TZ_STRLEN_MAX + 1];
- int len;
+ char tzname[TZ_STRLEN_MAX + 1];
+ int len;
pg_tz *tzp;
/*
- * Look up the requested timezone. First we look in the timezone
- * database (to handle cases like "America/New_York"), and if that
- * fails, we look in the date token table (to handle cases like "EST").
- */
+ * Look up the requested timezone. First we look in the timezone database
+ * (to handle cases like "America/New_York"), and if that fails, we look
+ * in the date token table (to handle cases like "EST").
+ */
len = Min(VARSIZE(zone) - VARHDRSZ, TZ_STRLEN_MAX);
memcpy(tzname, VARDATA(zone), len);
tzname[len] = '\0';
@@ -2516,7 +2515,7 @@ timetz_zone(PG_FUNCTION_ARGS)
if (tzp)
{
/* Get the offset-from-GMT that is valid today for the selected zone */
- pg_time_t now;
+ pg_time_t now;
struct pg_tm *tm;
now = time(NULL);
@@ -2546,7 +2545,7 @@ timetz_zone(PG_FUNCTION_ARGS)
}
result = (TimeTzADT *) palloc(sizeof(TimeTzADT));
-
+
#ifdef HAVE_INT64_TIMESTAMP
result->time = t->time + (t->zone - tz) * USECS_PER_SEC;
while (result->time < INT64CONST(0))
@@ -2582,7 +2581,7 @@ timetz_izone(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("\"interval\" time zone \"%s\" not valid",
DatumGetCString(DirectFunctionCall1(interval_out,
- PointerGetDatum(zone))))));
+ PointerGetDatum(zone))))));
#ifdef HAVE_INT64_TIMESTAMP
tz = -(zone->time / USECS_PER_SEC);
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index faacdb2eba4..5b3fc46d9c2 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.159 2005/10/14 11:47:57 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.160 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,16 +28,16 @@
static int DecodeNumber(int flen, char *field, bool haveTextMonth,
int fmask, int *tmask,
- struct pg_tm *tm, fsec_t *fsec, int *is2digits);
+ struct pg_tm * tm, fsec_t *fsec, int *is2digits);
static int DecodeNumberField(int len, char *str,
int fmask, int *tmask,
- struct pg_tm *tm, fsec_t *fsec, int *is2digits);
+ struct pg_tm * tm, fsec_t *fsec, int *is2digits);
static int DecodeTime(char *str, int fmask, int *tmask,
- struct pg_tm *tm, fsec_t *fsec);
+ struct pg_tm * tm, fsec_t *fsec);
static int DecodeTimezone(char *str, int *tzp);
static int DecodePosixTimezone(char *str, int *tzp);
static datetkn *datebsearch(char *key, datetkn *base, unsigned int nel);
-static int DecodeDate(char *str, int fmask, int *tmask, struct pg_tm *tm);
+static int DecodeDate(char *str, int fmask, int *tmask, struct pg_tm * tm);
static void TrimTrailingZeros(char *str);
@@ -308,8 +308,7 @@ static datetkn datetktbl[] = {
{"lhdt", DTZ, POS(44)}, /* Lord Howe Daylight Time, Australia */
{"lhst", TZ, POS(42)}, /* Lord Howe Standard Time, Australia */
{"ligt", TZ, POS(40)}, /* From Melbourne, Australia */
- {"lint", TZ, POS(56)}, /* Line Islands Time (Kiribati; +14
- * hours!) */
+ {"lint", TZ, POS(56)}, /* Line Islands Time (Kiribati; +14 hours!) */
{"lkt", TZ, POS(24)}, /* Lanka Time */
{"m", UNITS, DTK_MONTH}, /* "month" for ISO input */
{"magst", DTZ, POS(48)}, /* Magadan Summer Time */
@@ -681,7 +680,7 @@ j2day(int date)
* Get the transaction start time ("now()") broken down as a struct pg_tm.
*/
void
-GetCurrentDateTime(struct pg_tm *tm)
+GetCurrentDateTime(struct pg_tm * tm)
{
int tz;
fsec_t fsec;
@@ -698,7 +697,7 @@ GetCurrentDateTime(struct pg_tm *tm)
* including fractional seconds and timezone offset.
*/
void
-GetCurrentTimeUsec(struct pg_tm *tm, fsec_t *fsec, int *tzp)
+GetCurrentTimeUsec(struct pg_tm * tm, fsec_t *fsec, int *tzp)
{
int tz;
@@ -741,8 +740,8 @@ TrimTrailingZeros(char *str)
*
* timestr - the input string
* workbuf - workspace for field string storage. This must be
- * larger than the largest legal input for this datetime type --
- * some additional space will be needed to NUL terminate fields.
+ * larger than the largest legal input for this datetime type --
+ * some additional space will be needed to NUL terminate fields.
* buflen - the size of workbuf
* field[] - pointers to field strings are returned in this array
* ftype[] - field type indicators are returned in this array
@@ -776,10 +775,10 @@ ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
const char *bufend = workbuf + buflen;
/*
- * Set the character pointed-to by "bufptr" to "newchar", and
- * increment "bufptr". "end" gives the end of the buffer -- we
- * return an error if there is no space left to append a character
- * to the buffer. Note that "bufptr" is evaluated twice.
+ * Set the character pointed-to by "bufptr" to "newchar", and increment
+ * "bufptr". "end" gives the end of the buffer -- we return an error if
+ * there is no space left to append a character to the buffer. Note that
+ * "bufptr" is evaluated twice.
*/
#define APPEND_CHAR(bufptr, end, newchar) \
do \
@@ -835,8 +834,8 @@ ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
APPEND_CHAR(bufp, bufend, *cp++);
/*
- * insist that the delimiters match to get a
- * three-field date.
+ * insist that the delimiters match to get a three-field
+ * date.
*/
if (*cp == delim)
{
@@ -855,8 +854,8 @@ ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
}
/*
- * otherwise, number only and will determine year, month, day,
- * or concatenated fields later...
+ * otherwise, number only and will determine year, month, day, or
+ * concatenated fields later...
*/
else
ftype[nf] = DTK_NUMBER;
@@ -872,8 +871,7 @@ ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
}
/*
- * text? then date string, month, day of week, special, or
- * timezone
+ * text? then date string, month, day of week, special, or timezone
*/
else if (isalpha((unsigned char) *cp))
{
@@ -883,8 +881,8 @@ ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
APPEND_CHAR(bufp, bufend, pg_tolower((unsigned char) *cp++));
/*
- * Full date string with leading text month? Could also be a
- * POSIX time zone...
+ * Full date string with leading text month? Could also be a POSIX
+ * time zone...
*/
if (*cp == '-' || *cp == '/' || *cp == '.')
{
@@ -969,13 +967,12 @@ ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
*/
int
DecodeDateTime(char **field, int *ftype, int nf,
- int *dtype, struct pg_tm *tm, fsec_t *fsec, int *tzp)
+ int *dtype, struct pg_tm * tm, fsec_t *fsec, int *tzp)
{
int fmask = 0,
tmask,
type;
- int ptype = 0; /* "prefix type" for ISO y2001m02d04
- * format */
+ int ptype = 0; /* "prefix type" for ISO y2001m02d04 format */
int i;
int val;
int dterr;
@@ -1054,8 +1051,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
/*
* Starts with a digit but we already have a time
- * field? Then we are in trouble with a date and
- * time already...
+ * field? Then we are in trouble with a date and time
+ * already...
*/
if ((fmask & DTK_TIME_M) == DTK_TIME_M)
return DTERR_BAD_FORMAT;
@@ -1070,8 +1067,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
*cp = '\0';
/*
- * Then read the rest of the field as a
- * concatenated time
+ * Then read the rest of the field as a concatenated
+ * time
*/
dterr = DecodeNumberField(strlen(field[i]), field[i],
fmask,
@@ -1115,8 +1112,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
* DecodeTime()
*/
/* test for > 24:00:00 */
- if (tm->tm_hour > 24 ||
- (tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0)))
+ if (tm->tm_hour > 24 ||
+ (tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0)))
return DTERR_FIELD_OVERFLOW;
break;
@@ -1132,9 +1129,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
return dterr;
/*
- * Already have a time zone? Then maybe this is the
- * second field of a POSIX time: EST+3 (equivalent to
- * PST)
+ * Already have a time zone? Then maybe this is the second
+ * field of a POSIX time: EST+3 (equivalent to PST)
*/
if (i > 0 && (fmask & DTK_M(TZ)) != 0 &&
ftype[i - 1] == DTK_TZ &&
@@ -1278,7 +1274,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
case DTK_TIME:
/* previous field was "t" for ISO time */
dterr = DecodeNumberField(strlen(field[i]), field[i],
- (fmask | DTK_DATE_M),
+ (fmask | DTK_DATE_M),
&tmask, tm,
fsec, &is2digits);
if (dterr < 0)
@@ -1316,9 +1312,9 @@ DecodeDateTime(char **field, int *ftype, int nf,
else if (cp != NULL && flen - strlen(cp) > 2)
{
/*
- * Interpret as a concatenated date or time Set
- * the type field to allow decoding other fields
- * later. Example: 20011223 or 040506
+ * Interpret as a concatenated date or time Set the
+ * type field to allow decoding other fields later.
+ * Example: 20011223 or 040506
*/
dterr = DecodeNumberField(flen, field[i], fmask,
&tmask, tm,
@@ -1363,8 +1359,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
{
case DTK_CURRENT:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"current\" is no longer supported")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("date/time value \"current\" is no longer supported")));
return DTERR_BAD_FORMAT;
break;
@@ -1380,7 +1376,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
*dtype = DTK_DATE;
GetCurrentDateTime(tm);
j2date(date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - 1,
- &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
+ &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
tm->tm_hour = 0;
tm->tm_min = 0;
tm->tm_sec = 0;
@@ -1400,7 +1396,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
*dtype = DTK_DATE;
GetCurrentDateTime(tm);
j2date(date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) + 1,
- &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
+ &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
tm->tm_hour = 0;
tm->tm_min = 0;
tm->tm_sec = 0;
@@ -1425,8 +1421,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
case MONTH:
/*
- * already have a (numeric) month? then see if we
- * can substitute...
+ * already have a (numeric) month? then see if we can
+ * substitute...
*/
if ((fmask & DTK_M(MONTH)) && !haveTextMonth &&
!(fmask & DTK_M(DAY)) && tm->tm_mon >= 1 &&
@@ -1442,8 +1438,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
case DTZMOD:
/*
- * daylight savings time modifier (solves "MET
- * DST" syntax)
+ * daylight savings time modifier (solves "MET DST"
+ * syntax)
*/
tmask |= DTK_M(DTZ);
tm->tm_isdst = 1;
@@ -1455,8 +1451,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
case DTZ:
/*
- * set mask for TZ here _or_ check for DTZ later
- * when getting default timezone
+ * set mask for TZ here _or_ check for DTZ later when
+ * getting default timezone
*/
tmask |= DTK_M(TZ);
tm->tm_isdst = 1;
@@ -1497,9 +1493,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
case ISOTIME:
/*
- * This is a filler field "t" indicating that the
- * next field is time. Try to verify that this is
- * sensible.
+ * This is a filler field "t" indicating that the next
+ * field is time. Try to verify that this is sensible.
*/
tmask = 0;
@@ -1546,8 +1541,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("inconsistent use of year %04d and \"BC\"",
- tm->tm_year)));
+ errmsg("inconsistent use of year %04d and \"BC\"",
+ tm->tm_year)));
}
else if (is2digits)
{
@@ -1597,9 +1592,9 @@ DecodeDateTime(char **field, int *ftype, int nf,
}
/*
- * Check for valid day of month, now that we know for sure the
- * month and year. Note we don't use MD_FIELD_OVERFLOW here,
- * since it seems unlikely that "Feb 29" is a YMD-order error.
+ * Check for valid day of month, now that we know for sure the month
+ * and year. Note we don't use MD_FIELD_OVERFLOW here, since it seems
+ * unlikely that "Feb 29" is a YMD-order error.
*/
if (tm->tm_mday > day_tab[isleap(tm->tm_year)][tm->tm_mon - 1])
return DTERR_FIELD_OVERFLOW;
@@ -1608,8 +1603,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
if (tzp != NULL && !(fmask & DTK_M(TZ)))
{
/*
- * daylight savings time modifier but no standard timezone?
- * then error
+ * daylight savings time modifier but no standard timezone? then
+ * error
*/
if (fmask & DTK_M(DTZMOD))
return DTERR_BAD_FORMAT;
@@ -1634,7 +1629,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
* of mktime(), anyway.
*/
int
-DetermineTimeZoneOffset(struct pg_tm *tm, pg_tz *tzp)
+DetermineTimeZoneOffset(struct pg_tm * tm, pg_tz *tzp)
{
int date,
sec;
@@ -1658,15 +1653,15 @@ DetermineTimeZoneOffset(struct pg_tm *tm, pg_tz *tzp)
/*
* First, generate the pg_time_t value corresponding to the given
- * y/m/d/h/m/s taken as GMT time. If this overflows, punt and decide
- * the timezone is GMT. (We only need to worry about overflow on
- * machines where pg_time_t is 32 bits.)
+ * y/m/d/h/m/s taken as GMT time. If this overflows, punt and decide the
+ * timezone is GMT. (We only need to worry about overflow on machines
+ * where pg_time_t is 32 bits.)
*/
if (!IS_VALID_JULIAN(tm->tm_year, tm->tm_mon, tm->tm_mday))
goto overflow;
date = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - UNIX_EPOCH_JDATE;
- day = ((pg_time_t) date) *SECS_PER_DAY;
+ day = ((pg_time_t) date) * SECS_PER_DAY;
if (day / SECS_PER_DAY != date)
goto overflow;
sec = tm->tm_sec + (tm->tm_min + tm->tm_hour * MINS_PER_HOUR) * SECS_PER_MINUTE;
@@ -1676,10 +1671,10 @@ DetermineTimeZoneOffset(struct pg_tm *tm, pg_tz *tzp)
goto overflow;
/*
- * Find the DST time boundary just before or following the target time.
- * We assume that all zones have GMT offsets less than 24 hours, and
- * that DST boundaries can't be closer together than 48 hours, so
- * backing up 24 hours and finding the "next" boundary will work.
+ * Find the DST time boundary just before or following the target time. We
+ * assume that all zones have GMT offsets less than 24 hours, and that DST
+ * boundaries can't be closer together than 48 hours, so backing up 24
+ * hours and finding the "next" boundary will work.
*/
prevtime = mytime - SECS_PER_DAY;
if (mytime < 0 && prevtime > 0)
@@ -1689,7 +1684,7 @@ DetermineTimeZoneOffset(struct pg_tm *tm, pg_tz *tzp)
&before_gmtoff, &before_isdst,
&boundary,
&after_gmtoff, &after_isdst,
- tzp);
+ tzp);
if (res < 0)
goto overflow; /* failure? */
@@ -1697,7 +1692,7 @@ DetermineTimeZoneOffset(struct pg_tm *tm, pg_tz *tzp)
{
/* Non-DST zone, life is simple */
tm->tm_isdst = before_isdst;
- return - (int) before_gmtoff;
+ return -(int) before_gmtoff;
}
/*
@@ -1722,24 +1717,25 @@ DetermineTimeZoneOffset(struct pg_tm *tm, pg_tz *tzp)
if (beforetime <= boundary && aftertime < boundary)
{
tm->tm_isdst = before_isdst;
- return - (int) before_gmtoff;
+ return -(int) before_gmtoff;
}
if (beforetime > boundary && aftertime >= boundary)
{
tm->tm_isdst = after_isdst;
- return - (int) after_gmtoff;
+ return -(int) after_gmtoff;
}
+
/*
- * It's an invalid or ambiguous time due to timezone transition.
- * Prefer the standard-time interpretation.
+ * It's an invalid or ambiguous time due to timezone transition. Prefer
+ * the standard-time interpretation.
*/
if (after_isdst == 0)
{
tm->tm_isdst = after_isdst;
- return - (int) after_gmtoff;
+ return -(int) after_gmtoff;
}
tm->tm_isdst = before_isdst;
- return - (int) before_gmtoff;
+ return -(int) before_gmtoff;
overflow:
/* Given date is out of range, so assume UTC */
@@ -1762,7 +1758,7 @@ overflow:
*/
int
DecodeTimeOnly(char **field, int *ftype, int nf,
- int *dtype, struct pg_tm *tm, fsec_t *fsec, int *tzp)
+ int *dtype, struct pg_tm * tm, fsec_t *fsec, int *tzp)
{
int fmask = 0,
tmask,
@@ -1792,8 +1788,8 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
case DTK_DATE:
/*
- * Time zone not allowed? Then should not accept dates or
- * time zones no matter what else!
+ * Time zone not allowed? Then should not accept dates or time
+ * zones no matter what else!
*/
if (tzp == NULL)
return DTERR_BAD_FORMAT;
@@ -1815,15 +1811,13 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
/*
* Starts with a digit but we already have a time
- * field? Then we are in trouble with time
- * already...
+ * field? Then we are in trouble with time already...
*/
if ((fmask & DTK_TIME_M) == DTK_TIME_M)
return DTERR_BAD_FORMAT;
/*
- * Should not get here and fail. Sanity check
- * only...
+ * Should not get here and fail. Sanity check only...
*/
if ((cp = strchr(field[i], '-')) == NULL)
return DTERR_BAD_FORMAT;
@@ -1835,8 +1829,8 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
*cp = '\0';
/*
- * Then read the rest of the field as a
- * concatenated time
+ * Then read the rest of the field as a concatenated
+ * time
*/
dterr = DecodeNumberField(strlen(field[i]), field[i],
(fmask | DTK_DATE_M),
@@ -1879,9 +1873,8 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
return dterr;
/*
- * Already have a time zone? Then maybe this is the
- * second field of a POSIX time: EST+3 (equivalent to
- * PST)
+ * Already have a time zone? Then maybe this is the second
+ * field of a POSIX time: EST+3 (equivalent to PST)
*/
if (i > 0 && (fmask & DTK_M(TZ)) != 0 &&
ftype[i - 1] == DTK_TZ &&
@@ -2025,10 +2018,10 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
tmask |= DTK_TIME_M;
#ifdef HAVE_INT64_TIMESTAMP
dt2time(time * USECS_PER_DAY,
- &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec);
+ &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec);
#else
dt2time(time * SECS_PER_DAY,
- &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec);
+ &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec);
#endif
}
break;
@@ -2036,7 +2029,7 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
case DTK_TIME:
/* previous field was "t" for ISO time */
dterr = DecodeNumberField(strlen(field[i]), field[i],
- (fmask | DTK_DATE_M),
+ (fmask | DTK_DATE_M),
&tmask, tm,
fsec, &is2digits);
if (dterr < 0)
@@ -2080,12 +2073,12 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
else if (flen - strlen(cp) > 2)
{
/*
- * Interpret as a concatenated date or time
- * Set the type field to allow decoding other
- * fields later. Example: 20011223 or 040506
+ * Interpret as a concatenated date or time Set
+ * the type field to allow decoding other fields
+ * later. Example: 20011223 or 040506
*/
dterr = DecodeNumberField(flen, field[i],
- (fmask | DTK_DATE_M),
+ (fmask | DTK_DATE_M),
&tmask, tm,
fsec, &is2digits);
if (dterr < 0)
@@ -2133,8 +2126,8 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
{
case DTK_CURRENT:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"current\" is no longer supported")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("date/time value \"current\" is no longer supported")));
return DTERR_BAD_FORMAT;
break;
@@ -2162,8 +2155,8 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
case DTZMOD:
/*
- * daylight savings time modifier (solves "MET
- * DST" syntax)
+ * daylight savings time modifier (solves "MET DST"
+ * syntax)
*/
tmask |= DTK_M(DTZ);
tm->tm_isdst = 1;
@@ -2175,8 +2168,8 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
case DTZ:
/*
- * set mask for TZ here _or_ check for DTZ later
- * when getting default timezone
+ * set mask for TZ here _or_ check for DTZ later when
+ * getting default timezone
*/
tmask |= DTK_M(TZ);
tm->tm_isdst = 1;
@@ -2247,14 +2240,14 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
if (tm->tm_hour < 0 || tm->tm_min < 0 || tm->tm_min > 59 ||
tm->tm_sec < 0 || tm->tm_sec > 60 || tm->tm_hour > 24 ||
- /* test for > 24:00:00 */
- (tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0 ||
+ /* test for > 24:00:00 */
+ (tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0 ||
#ifdef HAVE_INT64_TIMESTAMP
- *fsec > INT64CONST(0))) ||
+ *fsec > INT64CONST(0))) ||
*fsec < INT64CONST(0) || *fsec >= USECS_PER_SEC)
return DTERR_FIELD_OVERFLOW;
#else
- *fsec > 0)) ||
+ *fsec > 0)) ||
*fsec < 0 || *fsec >= 1)
return DTERR_FIELD_OVERFLOW;
#endif
@@ -2269,8 +2262,7 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
*tmp = &tt;
/*
- * daylight savings time modifier but no standard timezone? then
- * error
+ * daylight savings time modifier but no standard timezone? then error
*/
if (fmask & DTK_M(DTZMOD))
return DTERR_BAD_FORMAT;
@@ -2300,7 +2292,7 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
* Insist on a complete set of fields.
*/
static int
-DecodeDate(char *str, int fmask, int *tmask, struct pg_tm *tm)
+DecodeDate(char *str, int fmask, int *tmask, struct pg_tm * tm)
{
fsec_t fsec;
int nf = 0;
@@ -2458,7 +2450,7 @@ DecodeDate(char *str, int fmask, int *tmask, struct pg_tm *tm)
* can be used to represent time spans.
*/
static int
-DecodeTime(char *str, int fmask, int *tmask, struct pg_tm *tm, fsec_t *fsec)
+DecodeTime(char *str, int fmask, int *tmask, struct pg_tm * tm, fsec_t *fsec)
{
char *cp;
@@ -2522,7 +2514,7 @@ DecodeTime(char *str, int fmask, int *tmask, struct pg_tm *tm, fsec_t *fsec)
*/
static int
DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
- int *tmask, struct pg_tm *tm, fsec_t *fsec, int *is2digits)
+ int *tmask, struct pg_tm * tm, fsec_t *fsec, int *is2digits)
{
int val;
char *cp;
@@ -2539,8 +2531,8 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
double frac;
/*
- * More than two digits before decimal point? Then could be a date
- * or a run-together time: 2001.360 20011225 040506.789
+ * More than two digits before decimal point? Then could be a date or
+ * a run-together time: 2001.360 20011225 040506.789
*/
if (cp - str > 2)
{
@@ -2581,9 +2573,9 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
case 0:
/*
- * Nothing so far; make a decision about what we think the
- * input is. There used to be lots of heuristics here, but
- * the consensus now is to be paranoid. It *must* be either
+ * Nothing so far; make a decision about what we think the input
+ * is. There used to be lots of heuristics here, but the
+ * consensus now is to be paranoid. It *must* be either
* YYYY-MM-DD (with a more-than-two-digit year field), or the
* field order defined by DateOrder.
*/
@@ -2614,12 +2606,11 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
if (haveTextMonth)
{
/*
- * We are at the first numeric field of a date that
- * included a textual month name. We want to support the
- * variants MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as
- * unambiguous inputs. We will also accept MON-DD-YY or
- * DD-MON-YY in either DMY or MDY modes, as well as
- * YY-MON-DD in YMD mode.
+ * We are at the first numeric field of a date that included a
+ * textual month name. We want to support the variants
+ * MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as unambiguous
+ * inputs. We will also accept MON-DD-YY or DD-MON-YY in
+ * either DMY or MDY modes, as well as YY-MON-DD in YMD mode.
*/
if (flen >= 3 || DateOrder == DATEORDER_YMD)
{
@@ -2693,8 +2684,8 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
}
/*
- * When processing a year field, mark it for adjustment if it's only
- * one or two digits.
+ * When processing a year field, mark it for adjustment if it's only one
+ * or two digits.
*/
if (*tmask == DTK_M(YEAR))
*is2digits = (flen <= 2);
@@ -2712,13 +2703,13 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
*/
static int
DecodeNumberField(int len, char *str, int fmask,
- int *tmask, struct pg_tm *tm, fsec_t *fsec, int *is2digits)
+ int *tmask, struct pg_tm * tm, fsec_t *fsec, int *is2digits)
{
char *cp;
/*
- * Have a decimal point? Then this is a date or something with a
- * seconds field...
+ * Have a decimal point? Then this is a date or something with a seconds
+ * field...
*/
if ((cp = strchr(str, '.')) != NULL)
{
@@ -2970,7 +2961,7 @@ DecodeSpecial(int field, char *lowtoken, int *val)
* preceding an hh:mm:ss field. - thomas 1998-04-30
*/
int
-DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct pg_tm *tm, fsec_t *fsec)
+DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct pg_tm * tm, fsec_t *fsec)
{
int is_before = FALSE;
char *cp;
@@ -3014,9 +3005,9 @@ DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct pg_tm *tm, f
Assert(*field[i] == '-' || *field[i] == '+');
/*
- * A single signed number ends up here, but will be
- * rejected by DecodeTime(). So, work this out to drop
- * through to DTK_NUMBER, which *can* tolerate this.
+ * A single signed number ends up here, but will be rejected
+ * by DecodeTime(). So, work this out to drop through to
+ * DTK_NUMBER, which *can* tolerate this.
*/
cp = field[i] + 1;
while (*cp != '\0' && *cp != ':' && *cp != '.')
@@ -3035,8 +3026,8 @@ DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct pg_tm *tm, f
/*
* Set the next type to be a day, if units are not
- * specified. This handles the case of '1 +02:03'
- * since we are reading right to left.
+ * specified. This handles the case of '1 +02:03' since we
+ * are reading right to left.
*/
type = DTK_DAY;
tmask = DTK_M(TZ);
@@ -3366,7 +3357,7 @@ DateTimeParseError(int dterr, const char *str, const char *datatype)
(errcode(ERRCODE_DATETIME_FIELD_OVERFLOW),
errmsg("date/time field value out of range: \"%s\"",
str),
- errhint("Perhaps you need a different \"datestyle\" setting.")));
+ errhint("Perhaps you need a different \"datestyle\" setting.")));
break;
case DTERR_INTERVAL_OVERFLOW:
ereport(ERROR,
@@ -3376,9 +3367,9 @@ DateTimeParseError(int dterr, const char *str, const char *datatype)
break;
case DTERR_TZDISP_OVERFLOW:
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TIME_ZONE_DISPLACEMENT_VALUE),
- errmsg("time zone displacement out of range: \"%s\"",
- str)));
+ (errcode(ERRCODE_INVALID_TIME_ZONE_DISPLACEMENT_VALUE),
+ errmsg("time zone displacement out of range: \"%s\"",
+ str)));
break;
case DTERR_BAD_FORMAT:
default:
@@ -3424,7 +3415,7 @@ datebsearch(char *key, datetkn *base, unsigned int nel)
* Encode date as local time.
*/
int
-EncodeDateOnly(struct pg_tm *tm, int style, char *str)
+EncodeDateOnly(struct pg_tm * tm, int style, char *str)
{
if (tm->tm_mon < 1 || tm->tm_mon > MONTHS_PER_YEAR)
return -1;
@@ -3438,7 +3429,7 @@ EncodeDateOnly(struct pg_tm *tm, int style, char *str)
tm->tm_year, tm->tm_mon, tm->tm_mday);
else
sprintf(str, "%04d-%02d-%02d %s",
- -(tm->tm_year - 1), tm->tm_mon, tm->tm_mday, "BC");
+ -(tm->tm_year - 1), tm->tm_mon, tm->tm_mday, "BC");
break;
case USE_SQL_DATES:
@@ -3484,7 +3475,7 @@ EncodeDateOnly(struct pg_tm *tm, int style, char *str)
* Encode time fields only.
*/
int
-EncodeTimeOnly(struct pg_tm *tm, fsec_t fsec, int *tzp, int style, char *str)
+EncodeTimeOnly(struct pg_tm * tm, fsec_t fsec, int *tzp, int style, char *str)
{
if (tm->tm_hour < 0 || tm->tm_hour > HOURS_PER_DAY)
return -1;
@@ -3492,8 +3483,8 @@ EncodeTimeOnly(struct pg_tm *tm, fsec_t fsec, int *tzp, int style, char *str)
sprintf(str, "%02d:%02d", tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The fractional field widths
- * here should be equal to the larger of MAX_TIME_PRECISION and
+ * Print fractional seconds if any. The fractional field widths here
+ * should be equal to the larger of MAX_TIME_PRECISION and
* MAX_TIMESTAMP_PRECISION.
*/
if (fsec != 0)
@@ -3534,15 +3525,15 @@ EncodeTimeOnly(struct pg_tm *tm, fsec_t fsec, int *tzp, int style, char *str)
* European - dd/mm/yyyy
*/
int
-EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, char *str)
+EncodeDateTime(struct pg_tm * tm, fsec_t fsec, int *tzp, char **tzn, int style, char *str)
{
int day,
hour,
min;
/*
- * Why are we checking only the month field? Change this to an
- * assert... if (tm->tm_mon < 1 || tm->tm_mon > MONTHS_PER_YEAR) return -1;
+ * Why are we checking only the month field? Change this to an assert...
+ * if (tm->tm_mon < 1 || tm->tm_mon > MONTHS_PER_YEAR) return -1;
*/
Assert(tm->tm_mon >= 1 && tm->tm_mon <= MONTHS_PER_YEAR);
@@ -3556,11 +3547,11 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here
- * should be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here should
+ * be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD,
- * since it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD, since
+ * it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -3579,10 +3570,10 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
sprintf(str + strlen(str), ":%02d", tm->tm_sec);
/*
- * tzp == NULL indicates that we don't want *any* time zone
- * info in the output string. *tzn != NULL indicates that we
- * have alpha time zone info available. tm_isdst != -1
- * indicates that we have a valid time zone translation.
+ * tzp == NULL indicates that we don't want *any* time zone info
+ * in the output string. *tzn != NULL indicates that we have alpha
+ * time zone info available. tm_isdst != -1 indicates that we have
+ * a valid time zone translation.
*/
if (tzp != NULL && tm->tm_isdst >= 0)
{
@@ -3608,11 +3599,11 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here
- * should be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here should
+ * be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD,
- * since it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD, since
+ * it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -3656,11 +3647,11 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here
- * should be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here should
+ * be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD,
- * since it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD, since
+ * it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -3703,7 +3694,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
strncpy(str, days[tm->tm_wday], 3);
strcpy(str + 3, " ");
-
+
if (DateOrder == DATEORDER_DMY)
sprintf(str + 4, "%02d %3s", tm->tm_mday, months[tm->tm_mon - 1]);
else
@@ -3712,11 +3703,11 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
sprintf(str + 10, " %02d:%02d", tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here
- * should be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here should
+ * be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD,
- * since it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD, since
+ * it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -3735,7 +3726,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
sprintf(str + strlen(str), ":%02d", tm->tm_sec);
sprintf(str + strlen(str), " %04d",
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1));
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1));
if (tzp != NULL && tm->tm_isdst >= 0)
{
@@ -3745,10 +3736,9 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
{
/*
* We have a time zone, but no string version. Use the
- * numeric form, but be sure to include a leading
- * space to avoid formatting something which would be
- * rejected by the date/time parser later. - thomas
- * 2001-10-19
+ * numeric form, but be sure to include a leading space to
+ * avoid formatting something which would be rejected by
+ * the date/time parser later. - thomas 2001-10-19
*/
hour = -(*tzp / SECS_PER_HOUR);
min = (abs(*tzp) / MINS_PER_HOUR) % MINS_PER_HOUR;
@@ -3774,7 +3764,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
* - thomas 1998-04-30
*/
int
-EncodeInterval(struct pg_tm *tm, fsec_t fsec, int style, char *str)
+EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str)
{
int is_before = FALSE;
int is_nonzero = FALSE;
@@ -3782,9 +3772,8 @@ EncodeInterval(struct pg_tm *tm, fsec_t fsec, int style, char *str)
/*
* The sign of year and month are guaranteed to match, since they are
- * stored internally as "month". But we'll need to check for is_before
- * and is_nonzero when determining the signs of hour/minute/seconds
- * fields.
+ * stored internally as "month". But we'll need to check for is_before and
+ * is_nonzero when determining the signs of hour/minute/seconds fields.
*/
switch (style)
{
diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c
index 03e02278d11..0b229e20593 100644
--- a/src/backend/utils/adt/datum.c
+++ b/src/backend/utils/adt/datum.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/datum.c,v 1.30 2004/12/31 22:01:21 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/datum.c,v 1.31 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -179,11 +179,10 @@ datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen)
if (typByVal)
{
/*
- * just compare the two datums. NOTE: just comparing "len" bytes
- * will not do the work, because we do not know how these bytes
- * are aligned inside the "Datum". We assume instead that any
- * given datatype is consistent about how it fills extraneous bits
- * in the Datum.
+ * just compare the two datums. NOTE: just comparing "len" bytes will
+ * not do the work, because we do not know how these bytes are aligned
+ * inside the "Datum". We assume instead that any given datatype is
+ * consistent about how it fills extraneous bits in the Datum.
*/
res = (value1 == value2);
}
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index c8917b145c4..4a0ac3dcfb1 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -5,7 +5,7 @@
* Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/dbsize.c,v 1.5 2005/09/29 22:04:36 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/dbsize.c,v 1.6 2005/10/15 02:49:28 momjian Exp $
*
*/
@@ -31,22 +31,22 @@ static int64
db_dir_size(const char *path)
{
int64 dirsize = 0;
- struct dirent *direntry;
- DIR *dirdesc;
- char filename[MAXPGPATH];
+ struct dirent *direntry;
+ DIR *dirdesc;
+ char filename[MAXPGPATH];
dirdesc = AllocateDir(path);
if (!dirdesc)
- return 0;
+ return 0;
while ((direntry = ReadDir(dirdesc, path)) != NULL)
{
- struct stat fst;
+ struct stat fst;
- if (strcmp(direntry->d_name, ".") == 0 ||
+ if (strcmp(direntry->d_name, ".") == 0 ||
strcmp(direntry->d_name, "..") == 0)
- continue;
+ continue;
snprintf(filename, MAXPGPATH, "%s/%s", path, direntry->d_name);
@@ -54,8 +54,8 @@ db_dir_size(const char *path)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not stat \"%s\": %m", filename)));
-
- dirsize += fst.st_size;
+
+ dirsize += fst.st_size;
}
FreeDir(dirdesc);
@@ -69,10 +69,10 @@ static int64
calculate_database_size(Oid dbOid)
{
int64 totalsize;
- DIR *dirdesc;
- struct dirent *direntry;
- char dirpath[MAXPGPATH];
- char pathname[MAXPGPATH];
+ DIR *dirdesc;
+ struct dirent *direntry;
+ char dirpath[MAXPGPATH];
+ char pathname[MAXPGPATH];
/* Shared storage in pg_global is not counted */
@@ -84,16 +84,16 @@ calculate_database_size(Oid dbOid)
snprintf(dirpath, MAXPGPATH, "%s/pg_tblspc", DataDir);
dirdesc = AllocateDir(dirpath);
if (!dirdesc)
- ereport(ERROR,
+ ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not open tablespace directory \"%s\": %m",
dirpath)));
while ((direntry = ReadDir(dirdesc, dirpath)) != NULL)
{
- if (strcmp(direntry->d_name, ".") == 0 ||
+ if (strcmp(direntry->d_name, ".") == 0 ||
strcmp(direntry->d_name, "..") == 0)
- continue;
+ continue;
snprintf(pathname, MAXPGPATH, "%s/pg_tblspc/%s/%u",
DataDir, direntry->d_name, dbOid);
@@ -104,7 +104,7 @@ calculate_database_size(Oid dbOid)
/* Complain if we found no trace of the DB at all */
if (!totalsize)
- ereport(ERROR,
+ ereport(ERROR,
(ERRCODE_UNDEFINED_DATABASE,
errmsg("database with OID %u does not exist", dbOid)));
@@ -114,7 +114,7 @@ calculate_database_size(Oid dbOid)
Datum
pg_database_size_oid(PG_FUNCTION_ARGS)
{
- Oid dbOid = PG_GETARG_OID(0);
+ Oid dbOid = PG_GETARG_OID(0);
PG_RETURN_INT64(calculate_database_size(dbOid));
}
@@ -122,8 +122,8 @@ pg_database_size_oid(PG_FUNCTION_ARGS)
Datum
pg_database_size_name(PG_FUNCTION_ARGS)
{
- Name dbName = PG_GETARG_NAME(0);
- Oid dbOid = get_database_oid(NameStr(*dbName));
+ Name dbName = PG_GETARG_NAME(0);
+ Oid dbOid = get_database_oid(NameStr(*dbName));
if (!OidIsValid(dbOid))
ereport(ERROR,
@@ -141,16 +141,16 @@ pg_database_size_name(PG_FUNCTION_ARGS)
static int64
calculate_tablespace_size(Oid tblspcOid)
{
- char tblspcPath[MAXPGPATH];
- char pathname[MAXPGPATH];
- int64 totalsize=0;
- DIR *dirdesc;
- struct dirent *direntry;
+ char tblspcPath[MAXPGPATH];
+ char pathname[MAXPGPATH];
+ int64 totalsize = 0;
+ DIR *dirdesc;
+ struct dirent *direntry;
if (tblspcOid == DEFAULTTABLESPACE_OID)
- snprintf(tblspcPath, MAXPGPATH, "%s/base", DataDir);
+ snprintf(tblspcPath, MAXPGPATH, "%s/base", DataDir);
else if (tblspcOid == GLOBALTABLESPACE_OID)
- snprintf(tblspcPath, MAXPGPATH, "%s/global", DataDir);
+ snprintf(tblspcPath, MAXPGPATH, "%s/global", DataDir);
else
snprintf(tblspcPath, MAXPGPATH, "%s/pg_tblspc/%u", DataDir, tblspcOid);
@@ -164,11 +164,11 @@ calculate_tablespace_size(Oid tblspcOid)
while ((direntry = ReadDir(dirdesc, tblspcPath)) != NULL)
{
- struct stat fst;
+ struct stat fst;
- if (strcmp(direntry->d_name, ".") == 0 ||
+ if (strcmp(direntry->d_name, ".") == 0 ||
strcmp(direntry->d_name, "..") == 0)
- continue;
+ continue;
snprintf(pathname, MAXPGPATH, "%s/%s", tblspcPath, direntry->d_name);
@@ -178,29 +178,29 @@ calculate_tablespace_size(Oid tblspcOid)
errmsg("could not stat \"%s\": %m", pathname)));
if (fst.st_mode & S_IFDIR)
- totalsize += db_dir_size(pathname);
-
- totalsize += fst.st_size;
+ totalsize += db_dir_size(pathname);
+
+ totalsize += fst.st_size;
}
FreeDir(dirdesc);
-
+
return totalsize;
}
Datum
pg_tablespace_size_oid(PG_FUNCTION_ARGS)
{
- Oid tblspcOid = PG_GETARG_OID(0);
-
+ Oid tblspcOid = PG_GETARG_OID(0);
+
PG_RETURN_INT64(calculate_tablespace_size(tblspcOid));
}
Datum
pg_tablespace_size_name(PG_FUNCTION_ARGS)
{
- Name tblspcName = PG_GETARG_NAME(0);
- Oid tblspcOid = get_tablespace_oid(NameStr(*tblspcName));
+ Name tblspcName = PG_GETARG_NAME(0);
+ Oid tblspcOid = get_tablespace_oid(NameStr(*tblspcName));
if (!OidIsValid(tblspcOid))
ereport(ERROR,
@@ -226,22 +226,22 @@ calculate_relation_size(RelFileNode *rfn)
Assert(OidIsValid(rfn->spcNode));
if (rfn->spcNode == DEFAULTTABLESPACE_OID)
- snprintf(dirpath, MAXPGPATH, "%s/base/%u", DataDir, rfn->dbNode);
+ snprintf(dirpath, MAXPGPATH, "%s/base/%u", DataDir, rfn->dbNode);
else if (rfn->spcNode == GLOBALTABLESPACE_OID)
- snprintf(dirpath, MAXPGPATH, "%s/global", DataDir);
+ snprintf(dirpath, MAXPGPATH, "%s/global", DataDir);
else
- snprintf(dirpath, MAXPGPATH, "%s/pg_tblspc/%u/%u",
+ snprintf(dirpath, MAXPGPATH, "%s/pg_tblspc/%u/%u",
DataDir, rfn->spcNode, rfn->dbNode);
- for (segcount = 0; ; segcount++)
+ for (segcount = 0;; segcount++)
{
struct stat fst;
if (segcount == 0)
- snprintf(pathname, MAXPGPATH, "%s/%u",
+ snprintf(pathname, MAXPGPATH, "%s/%u",
dirpath, rfn->relNode);
else
- snprintf(pathname, MAXPGPATH, "%s/%u.%u",
+ snprintf(pathname, MAXPGPATH, "%s/%u.%u",
dirpath, rfn->relNode, segcount);
if (stat(pathname, &fst) < 0)
@@ -262,7 +262,7 @@ calculate_relation_size(RelFileNode *rfn)
Datum
pg_relation_size_oid(PG_FUNCTION_ARGS)
{
- Oid relOid=PG_GETARG_OID(0);
+ Oid relOid = PG_GETARG_OID(0);
Relation rel;
int64 size;
@@ -282,12 +282,12 @@ pg_relation_size_name(PG_FUNCTION_ARGS)
RangeVar *relrv;
Relation rel;
int64 size;
-
- relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
+
+ relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
rel = relation_openrv(relrv, AccessShareLock);
-
+
size = calculate_relation_size(&(rel->rd_node));
-
+
relation_close(rel, AccessShareLock);
PG_RETURN_INT64(size);
@@ -295,9 +295,9 @@ pg_relation_size_name(PG_FUNCTION_ARGS)
/*
- * Compute the on-disk size of files for the relation according to the
- * stat function, optionally including heap data, index data, and/or
- * toast data.
+ * Compute the on-disk size of files for the relation according to the
+ * stat function, optionally including heap data, index data, and/or
+ * toast data.
*/
static int64
calculate_total_relation_size(Oid Relid)
@@ -317,7 +317,7 @@ calculate_total_relation_size(Oid Relid)
if (heapRel->rd_rel->relhasindex)
{
/* recursively include any dependent indexes */
- List *index_oids = RelationGetIndexList(heapRel);
+ List *index_oids = RelationGetIndexList(heapRel);
foreach(cell, index_oids)
{
@@ -344,13 +344,13 @@ calculate_total_relation_size(Oid Relid)
}
/*
- * Compute on-disk size of files for 'relation' including
- * heap data, index data, and toasted data.
+ * Compute on-disk size of files for 'relation' including
+ * heap data, index data, and toasted data.
*/
Datum
pg_total_relation_size_oid(PG_FUNCTION_ARGS)
{
- Oid relid = PG_GETARG_OID(0);
+ Oid relid = PG_GETARG_OID(0);
PG_RETURN_INT64(calculate_total_relation_size(relid));
}
@@ -361,10 +361,10 @@ pg_total_relation_size_name(PG_FUNCTION_ARGS)
text *relname = PG_GETARG_TEXT_P(0);
RangeVar *relrv;
Oid relid;
-
- relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
+
+ relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
relid = RangeVarGetRelid(relrv, false);
-
+
PG_RETURN_INT64(calculate_total_relation_size(relid));
}
@@ -374,35 +374,35 @@ pg_total_relation_size_name(PG_FUNCTION_ARGS)
Datum
pg_size_pretty(PG_FUNCTION_ARGS)
{
- int64 size = PG_GETARG_INT64(0);
- char *result = palloc(50 + VARHDRSZ);
- int64 limit = 10 * 1024;
- int64 mult = 1;
+ int64 size = PG_GETARG_INT64(0);
+ char *result = palloc(50 + VARHDRSZ);
+ int64 limit = 10 * 1024;
+ int64 mult = 1;
if (size < limit * mult)
- snprintf(VARDATA(result), 50, INT64_FORMAT " bytes", size);
+ snprintf(VARDATA(result), 50, INT64_FORMAT " bytes", size);
else
{
mult *= 1024;
if (size < limit * mult)
- snprintf(VARDATA(result), 50, INT64_FORMAT " kB",
- (size + mult / 2) / mult);
+ snprintf(VARDATA(result), 50, INT64_FORMAT " kB",
+ (size + mult / 2) / mult);
else
{
mult *= 1024;
if (size < limit * mult)
- snprintf(VARDATA(result), 50, INT64_FORMAT " MB",
+ snprintf(VARDATA(result), 50, INT64_FORMAT " MB",
(size + mult / 2) / mult);
else
{
mult *= 1024;
if (size < limit * mult)
- snprintf(VARDATA(result), 50, INT64_FORMAT " GB",
+ snprintf(VARDATA(result), 50, INT64_FORMAT " GB",
(size + mult / 2) / mult);
else
{
- mult *= 1024;
- snprintf(VARDATA(result), 50, INT64_FORMAT " TB",
+ mult *= 1024;
+ snprintf(VARDATA(result), 50, INT64_FORMAT " TB",
(size + mult / 2) / mult);
}
}
diff --git a/src/backend/utils/adt/encode.c b/src/backend/utils/adt/encode.c
index 659263230ff..1f23a8419ee 100644
--- a/src/backend/utils/adt/encode.c
+++ b/src/backend/utils/adt/encode.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/encode.c,v 1.15 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/encode.c,v 1.16 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -175,7 +175,7 @@ hex_decode(const char *src, unsigned len, char *dst)
if (s >= srcend)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid hexadecimal data: odd number of digits")));
+ errmsg("invalid hexadecimal data: odd number of digits")));
v2 = get_hex(*s++);
*p++ = v1 | v2;
@@ -428,8 +428,8 @@ esc_decode(const char *src, unsigned srclen, char *dst)
else
{
/*
- * One backslash, not followed by ### valid octal. Should
- * never get here, since esc_dec_len does same check.
+ * One backslash, not followed by ### valid octal. Should never
+ * get here, since esc_dec_len does same check.
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index c943ee2c71d..fb37e36624e 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/float.c,v 1.114 2005/04/06 23:56:07 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/float.c,v 1.115 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -235,11 +235,11 @@ CheckFloat8Val(double val)
if (fabs(val) > FLOAT8_MAX)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("type \"double precision\" value out of range: overflow")));
+ errmsg("type \"double precision\" value out of range: overflow")));
if (val != 0.0 && fabs(val) < FLOAT8_MIN)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("type \"double precision\" value out of range: underflow")));
+ errmsg("type \"double precision\" value out of range: underflow")));
}
/*
@@ -258,15 +258,15 @@ float4in(PG_FUNCTION_ARGS)
char *endptr;
/*
- * endptr points to the first character _after_ the sequence we
- * recognized as a valid floating point number. orig_num points to the
- * original input string.
+ * endptr points to the first character _after_ the sequence we recognized
+ * as a valid floating point number. orig_num points to the original input
+ * string.
*/
orig_num = num;
/*
- * Check for an empty-string input to begin with, to avoid the
- * vagaries of strtod() on different platforms.
+ * Check for an empty-string input to begin with, to avoid the vagaries of
+ * strtod() on different platforms.
*/
if (*num == '\0')
ereport(ERROR,
@@ -285,10 +285,9 @@ float4in(PG_FUNCTION_ARGS)
if (endptr == num || errno != 0)
{
/*
- * C99 requires that strtod() accept NaN and [-]Infinity, but not
- * all platforms support that yet (and some accept them but set
- * ERANGE anyway...) Therefore, we check for these inputs
- * ourselves.
+ * C99 requires that strtod() accept NaN and [-]Infinity, but not all
+ * platforms support that yet (and some accept them but set ERANGE
+ * anyway...) Therefore, we check for these inputs ourselves.
*/
if (pg_strncasecmp(num, "NaN", 3) == 0)
{
@@ -320,9 +319,9 @@ float4in(PG_FUNCTION_ARGS)
else
{
/*
- * Many versions of Solaris have a bug wherein strtod sets endptr
- * to point one byte beyond the end of the string when given "inf"
- * or "infinity".
+ * Many versions of Solaris have a bug wherein strtod sets endptr to
+ * point one byte beyond the end of the string when given "inf" or
+ * "infinity".
*/
if (endptr != num && endptr[-1] == '\0')
endptr--;
@@ -341,8 +340,8 @@ float4in(PG_FUNCTION_ARGS)
orig_num)));
/*
- * if we get here, we have a legal double, still need to check to see
- * if it's a legal float4
+ * if we get here, we have a legal double, still need to check to see if
+ * it's a legal float4
*/
if (!isinf(val))
CheckFloat4Val(val);
@@ -426,21 +425,21 @@ float8in(PG_FUNCTION_ARGS)
char *endptr;
/*
- * endptr points to the first character _after_ the sequence we
- * recognized as a valid floating point number. orig_num points to the
- * original input string.
+ * endptr points to the first character _after_ the sequence we recognized
+ * as a valid floating point number. orig_num points to the original input
+ * string.
*/
orig_num = num;
/*
- * Check for an empty-string input to begin with, to avoid the
- * vagaries of strtod() on different platforms.
+ * Check for an empty-string input to begin with, to avoid the vagaries of
+ * strtod() on different platforms.
*/
if (*num == '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- orig_num)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ orig_num)));
/* skip leading whitespace */
while (*num != '\0' && isspace((unsigned char) *num))
@@ -453,10 +452,9 @@ float8in(PG_FUNCTION_ARGS)
if (endptr == num || errno != 0)
{
/*
- * C99 requires that strtod() accept NaN and [-]Infinity, but not
- * all platforms support that yet (and some accept them but set
- * ERANGE anyway...) Therefore, we check for these inputs
- * ourselves.
+ * C99 requires that strtod() accept NaN and [-]Infinity, but not all
+ * platforms support that yet (and some accept them but set ERANGE
+ * anyway...) Therefore, we check for these inputs ourselves.
*/
if (pg_strncasecmp(num, "NaN", 3) == 0)
{
@@ -476,21 +474,21 @@ float8in(PG_FUNCTION_ARGS)
else if (errno == ERANGE)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("\"%s\" is out of range for type double precision",
- orig_num)));
+ errmsg("\"%s\" is out of range for type double precision",
+ orig_num)));
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- orig_num)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ orig_num)));
}
#ifdef HAVE_BUGGY_SOLARIS_STRTOD
else
{
/*
- * Many versions of Solaris have a bug wherein strtod sets endptr
- * to point one byte beyond the end of the string when given "inf"
- * or "infinity".
+ * Many versions of Solaris have a bug wherein strtod sets endptr to
+ * point one byte beyond the end of the string when given "inf" or
+ * "infinity".
*/
if (endptr != num && endptr[-1] == '\0')
endptr--;
@@ -505,8 +503,8 @@ float8in(PG_FUNCTION_ARGS)
if (*endptr != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- orig_num)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ orig_num)));
if (!isinf(val))
CheckFloat8Val(val);
@@ -860,9 +858,9 @@ static int
float4_cmp_internal(float4 a, float4 b)
{
/*
- * We consider all NANs to be equal and larger than any non-NAN. This
- * is somewhat arbitrary; the important thing is to have a consistent
- * sort order.
+ * We consider all NANs to be equal and larger than any non-NAN. This is
+ * somewhat arbitrary; the important thing is to have a consistent sort
+ * order.
*/
if (isnan(a))
{
@@ -956,9 +954,9 @@ static int
float8_cmp_internal(float8 a, float8 b)
{
/*
- * We consider all NANs to be equal and larger than any non-NAN. This
- * is somewhat arbitrary; the important thing is to have a consistent
- * sort order.
+ * We consider all NANs to be equal and larger than any non-NAN. This is
+ * somewhat arbitrary; the important thing is to have a consistent sort
+ * order.
*/
if (isnan(a))
{
@@ -1465,8 +1463,8 @@ dpow(PG_FUNCTION_ARGS)
float8 result;
/*
- * The SQL spec requires that we emit a particular SQLSTATE error code
- * for certain error conditions.
+ * The SQL spec requires that we emit a particular SQLSTATE error code for
+ * certain error conditions.
*/
if ((arg1 == 0 && arg2 < 0) ||
(arg1 < 0 && floor(arg2) != arg2))
@@ -1475,8 +1473,8 @@ dpow(PG_FUNCTION_ARGS)
errmsg("invalid argument for power function")));
/*
- * We must check both for errno getting set and for a NaN result, in
- * order to deal with the vagaries of different platforms...
+ * We must check both for errno getting set and for a NaN result, in order
+ * to deal with the vagaries of different platforms...
*/
errno = 0;
result = pow(arg1, arg2);
@@ -1504,9 +1502,9 @@ dexp(PG_FUNCTION_ARGS)
float8 result;
/*
- * We must check both for errno getting set and for a NaN result, in
- * order to deal with the vagaries of different platforms. Also, a
- * zero result implies unreported underflow.
+ * We must check both for errno getting set and for a NaN result, in order
+ * to deal with the vagaries of different platforms. Also, a zero result
+ * implies unreported underflow.
*/
errno = 0;
result = exp(arg1);
@@ -1534,8 +1532,8 @@ dlog1(PG_FUNCTION_ARGS)
float8 result;
/*
- * Emit particular SQLSTATE error codes for ln(). This is required by
- * the SQL standard.
+ * Emit particular SQLSTATE error codes for ln(). This is required by the
+ * SQL standard.
*/
if (arg1 == 0.0)
ereport(ERROR,
@@ -1563,9 +1561,9 @@ dlog10(PG_FUNCTION_ARGS)
float8 result;
/*
- * Emit particular SQLSTATE error codes for log(). The SQL spec
- * doesn't define log(), but it does define ln(), so it makes sense to
- * emit the same error code for an analogous error condition.
+ * Emit particular SQLSTATE error codes for log(). The SQL spec doesn't
+ * define log(), but it does define ln(), so it makes sense to emit the
+ * same error code for an analogous error condition.
*/
if (arg1 == 0.0)
ereport(ERROR,
@@ -1914,9 +1912,8 @@ float8_accum(PG_FUNCTION_ARGS)
/*
* If we're invoked by nodeAgg, we can cheat and modify our first
- * parameter in-place to reduce palloc overhead. Otherwise we
- * construct a new array with the updated transition data and
- * return it.
+ * parameter in-place to reduce palloc overhead. Otherwise we construct a
+ * new array with the updated transition data and return it.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
@@ -1937,7 +1934,7 @@ float8_accum(PG_FUNCTION_ARGS)
result = construct_array(transdatums, 3,
FLOAT8OID,
- sizeof(float8), false /* float8 byval */ , 'd');
+ sizeof(float8), false /* float8 byval */ , 'd');
PG_RETURN_ARRAYTYPE_P(result);
}
@@ -1968,9 +1965,8 @@ float4_accum(PG_FUNCTION_ARGS)
/*
* If we're invoked by nodeAgg, we can cheat and modify our first
- * parameter in-place to reduce palloc overhead. Otherwise we
- * construct a new array with the updated transition data and
- * return it.
+ * parameter in-place to reduce palloc overhead. Otherwise we construct a
+ * new array with the updated transition data and return it.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
@@ -1991,7 +1987,7 @@ float4_accum(PG_FUNCTION_ARGS)
result = construct_array(transdatums, 3,
FLOAT8OID,
- sizeof(float8), false /* float8 byval */ , 'd');
+ sizeof(float8), false /* float8 byval */ , 'd');
PG_RETURN_ARRAYTYPE_P(result);
}
diff --git a/src/backend/utils/adt/format_type.c b/src/backend/utils/adt/format_type.c
index 0280196af9a..adbfb588580 100644
--- a/src/backend/utils/adt/format_type.c
+++ b/src/backend/utils/adt/format_type.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/format_type.c,v 1.40 2005/03/29 00:17:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/format_type.c,v 1.41 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -138,12 +138,12 @@ format_type_internal(Oid type_oid, int32 typemod,
typeform = (Form_pg_type) GETSTRUCT(tuple);
/*
- * Check if it's an array (and not a domain --- we don't want to show
- * the substructure of a domain type). Fixed-length array types such
- * as "name" shouldn't get deconstructed either. As of Postgres 8.1,
- * rather than checking typlen we check the toast property, and don't
- * deconstruct "plain storage" array types --- this is because we don't
- * want to show oidvector as oid[].
+ * Check if it's an array (and not a domain --- we don't want to show the
+ * substructure of a domain type). Fixed-length array types such as
+ * "name" shouldn't get deconstructed either. As of Postgres 8.1, rather
+ * than checking typlen we check the toast property, and don't deconstruct
+ * "plain storage" array types --- this is because we don't want to show
+ * oidvector as oid[].
*/
array_base_type = typeform->typelem;
@@ -171,14 +171,14 @@ format_type_internal(Oid type_oid, int32 typemod,
is_array = false;
/*
- * See if we want to special-case the output for certain built-in
- * types. Note that these special cases should all correspond to
- * special productions in gram.y, to ensure that the type name will be
- * taken as a system type, not a user type of the same name.
+ * See if we want to special-case the output for certain built-in types.
+ * Note that these special cases should all correspond to special
+ * productions in gram.y, to ensure that the type name will be taken as a
+ * system type, not a user type of the same name.
*
* If we do not provide a special-case output here, the type name will be
- * handled the same way as a user type name --- in particular, it will
- * be double-quoted if it matches any lexer keyword. This behavior is
+ * handled the same way as a user type name --- in particular, it will be
+ * double-quoted if it matches any lexer keyword. This behavior is
* essential for some cases, such as types "bit" and "char".
*/
buf = NULL; /* flag for no special case */
@@ -193,8 +193,8 @@ format_type_internal(Oid type_oid, int32 typemod,
{
/*
* bit with typmod -1 is not the same as BIT, which means
- * BIT(1) per SQL spec. Report it as the quoted typename
- * so that parser will not assign a bogus typmod.
+ * BIT(1) per SQL spec. Report it as the quoted typename so
+ * that parser will not assign a bogus typmod.
*/
}
else
@@ -212,9 +212,9 @@ format_type_internal(Oid type_oid, int32 typemod,
else if (typemod_given)
{
/*
- * bpchar with typmod -1 is not the same as CHARACTER,
- * which means CHARACTER(1) per SQL spec. Report it as
- * bpchar so that parser will not assign a bogus typmod.
+ * bpchar with typmod -1 is not the same as CHARACTER, which
+ * means CHARACTER(1) per SQL spec. Report it as bpchar so
+ * that parser will not assign a bogus typmod.
*/
}
else
@@ -382,9 +382,9 @@ format_type_internal(Oid type_oid, int32 typemod,
{
/*
* Default handling: report the name as it appears in the catalog.
- * Here, we must qualify the name if it is not visible in the
- * search path, and we must double-quote it if it's not a standard
- * identifier or if it matches any keyword.
+ * Here, we must qualify the name if it is not visible in the search
+ * path, and we must double-quote it if it's not a standard identifier
+ * or if it matches any keyword.
*/
char *nspname;
char *typname;
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 1e3553816d7..90e940e7b9c 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1,7 +1,7 @@
/* -----------------------------------------------------------------------
* formatting.c
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.99 2005/08/18 13:43:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.100 2005/10/15 02:49:28 momjian Exp $
*
*
* Portions Copyright (c) 1999-2005, PostgreSQL Global Development Group
@@ -135,9 +135,9 @@ typedef struct
{
const char *name; /* keyword */
int len; /* keyword length */
- int (*action) (int arg, char *inout, /* action for keyword */
- int suf, bool is_to_char, bool is_interval,
- FormatNode *node, void *data);
+ int (*action) (int arg, char *inout, /* action for keyword */
+ int suf, bool is_to_char, bool is_interval,
+ FormatNode *node, void *data);
int id; /* keyword id */
bool isitdigit; /* is expected output/input digit */
} KeyWord;
@@ -252,7 +252,7 @@ static char *numth[] = {"st", "nd", "rd", "th", NULL};
* Flags for DCH version
* ----------
*/
-static bool DCH_global_fx = false;
+static bool DCH_global_fx = false;
/* ----------
@@ -379,7 +379,7 @@ typedef struct
q,
j,
us,
- yysz; /* is it YY or YYYY ? */
+ yysz; /* is it YY or YYYY ? */
} TmFromChar;
#define ZERO_tmfc(_X) memset(_X, 0, sizeof(TmFromChar))
@@ -442,17 +442,17 @@ do { \
errmsg("invalid format specification for an interval value"), \
errhint("Intervals are not tied to specific calendar dates."))); \
} while(0)
-
+
/*****************************************************************************
* KeyWords definition & action
*****************************************************************************/
-static int dch_global(int arg, char *inout, int suf, bool is_to_char,
- bool is_interval, FormatNode *node, void *data);
-static int dch_time(int arg, char *inout, int suf, bool is_to_char,
- bool is_interval, FormatNode *node, void *data);
-static int dch_date(int arg, char *inout, int suf, bool is_to_char,
- bool is_interval, FormatNode *node, void *data);
+static int dch_global(int arg, char *inout, int suf, bool is_to_char,
+ bool is_interval, FormatNode *node, void *data);
+static int dch_time(int arg, char *inout, int suf, bool is_to_char,
+ bool is_interval, FormatNode *node, void *data);
+static int dch_date(int arg, char *inout, int suf, bool is_to_char,
+ bool is_interval, FormatNode *node, void *data);
/* ----------
* Suffixes:
@@ -803,7 +803,7 @@ static const KeyWord NUM_keywords[] = {
* KeyWords index for DATE-TIME version
* ----------
*/
-static const int DCH_index[KeyWord_INDEX_SIZE] = {
+static const int DCH_index[KeyWord_INDEX_SIZE] = {
/*
0 1 2 3 4 5 6 7 8 9
*/
@@ -827,7 +827,7 @@ static const int DCH_index[KeyWord_INDEX_SIZE] = {
* KeyWords index for NUMBER version
* ----------
*/
-static const int NUM_index[KeyWord_INDEX_SIZE] = {
+static const int NUM_index[KeyWord_INDEX_SIZE] = {
/*
0 1 2 3 4 5 6 7 8 9
*/
@@ -871,8 +871,7 @@ typedef struct NUMProc
*number_p, /* pointer to current number position */
*inout, /* in / out buffer */
*inout_p, /* pointer to current inout position */
- *last_relevant, /* last relevant number after decimal
- * point */
+ *last_relevant, /* last relevant number after decimal point */
*L_negative_sign, /* Locale */
*L_positive_sign,
@@ -887,13 +886,13 @@ typedef struct NUMProc
* ----------
*/
static const KeyWord *index_seq_search(char *str, const KeyWord *kw,
- const int *index);
+ const int *index);
static KeySuffix *suff_search(char *str, KeySuffix *suf, int type);
static void NUMDesc_prepare(NUMDesc *num, FormatNode *n);
static void parse_format(FormatNode *node, char *str, const KeyWord *kw,
KeySuffix *suf, const int *index, int ver, NUMDesc *Num);
static char *DCH_processor(FormatNode *node, char *inout, bool is_to_char,
- bool is_interval, void *data);
+ bool is_interval, void *data);
#ifdef DEBUG_TO_FROM_CHAR
static void dump_index(const KeyWord *k, const int *index);
@@ -909,7 +908,7 @@ static char *str_tolower(char *buff);
/* static int is_acdc(char *str, int *len); */
static int seq_search(char *name, char **array, int type, int max, int *len);
static void do_to_timestamp(text *date_txt, text *fmt,
- struct pg_tm *tm, fsec_t *fsec);
+ struct pg_tm * tm, fsec_t *fsec);
static char *fill_str(char *str, int c, int max);
static FormatNode *NUM_cache(int len, NUMDesc *Num, char *pars_str, bool *shouldFree);
static char *int_to_roman(int number);
@@ -1047,7 +1046,7 @@ NUMDesc_prepare(NUMDesc *num, FormatNode *n)
NUM_cache_remove(last_NUMCacheEntry);
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("cannot use \"V\" and decimal point together")));
+ errmsg("cannot use \"V\" and decimal point together")));
}
num->flag |= NUM_F_DECIMAL;
break;
@@ -1152,7 +1151,7 @@ NUMDesc_prepare(NUMDesc *num, FormatNode *n)
NUM_cache_remove(last_NUMCacheEntry);
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("cannot use \"V\" and decimal point together")));
+ errmsg("cannot use \"V\" and decimal point together")));
}
num->flag |= NUM_F_MULTI;
break;
@@ -1324,11 +1323,11 @@ DCH_processor(FormatNode *node, char *inout, bool is_to_char,
if (!is_to_char && *s == '\0')
/*
- * The input string is shorter than format picture, so it's
- * good time to break this loop...
+ * The input string is shorter than format picture, so it's good
+ * time to break this loop...
*
- * Note: this isn't relevant for TO_CHAR mode, beacuse it use
- * 'inout' allocated by format picture length.
+ * Note: this isn't relevant for TO_CHAR mode, beacuse it use 'inout'
+ * allocated by format picture length.
*/
break;
@@ -1393,7 +1392,7 @@ dump_node(FormatNode *node, int max)
{
if (n->type == NODE_TYPE_ACTION)
elog(DEBUG_elog_output, "%d:\t NODE_TYPE_ACTION '%s'\t(%s,%s)",
- a, n->key->name, DUMP_THth(n->suffix), DUMP_FM(n->suffix));
+ a, n->key->name, DUMP_THth(n->suffix), DUMP_FM(n->suffix));
else if (n->type == NODE_TYPE_CHAR)
elog(DEBUG_elog_output, "%d:\t NODE_TYPE_CHAR '%c'", a, n->character);
else if (n->type == NODE_TYPE_END)
@@ -1578,8 +1577,8 @@ seq_search(char *name, char **array, int type, int max, int *len)
#ifdef DEBUG_TO_FROM_CHAR
/*
- * elog(DEBUG_elog_output, "N: %c, P: %c, A: %s (%s)", *n, *p,
- * *a, name);
+ * elog(DEBUG_elog_output, "N: %c, P: %c, A: %s (%s)", *n, *p, *a,
+ * name);
*/
#endif
if (*n != *p)
@@ -1637,7 +1636,7 @@ dump_index(const KeyWord *k, const int *index)
*/
static int
dch_global(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
- FormatNode *node, void *data)
+ FormatNode *node, void *data)
{
if (arg == DCH_FX)
DCH_global_fx = true;
@@ -1704,7 +1703,7 @@ strdigits_len(char *str)
*/
static int
dch_time(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
- FormatNode *node, void *data)
+ FormatNode *node, void *data)
{
char *p_inout = inout;
struct pg_tm *tm = NULL;
@@ -1727,7 +1726,7 @@ dch_time(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
if (is_to_char)
{
strcpy(inout, ((tm->tm_hour > 11
- && tm->tm_hour < HOURS_PER_DAY) ? P_M_STR : A_M_STR));
+ && tm->tm_hour < HOURS_PER_DAY) ? P_M_STR : A_M_STR));
return strlen(p_inout);
}
else
@@ -1747,7 +1746,7 @@ dch_time(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
if (is_to_char)
{
strcpy(inout, ((tm->tm_hour > 11
- && tm->tm_hour < HOURS_PER_DAY) ? PM_STR : AM_STR));
+ && tm->tm_hour < HOURS_PER_DAY) ? PM_STR : AM_STR));
return strlen(p_inout);
}
else
@@ -1767,7 +1766,7 @@ dch_time(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
if (is_to_char)
{
strcpy(inout, ((tm->tm_hour > 11
- && tm->tm_hour < HOURS_PER_DAY) ? p_m_STR : a_m_STR));
+ && tm->tm_hour < HOURS_PER_DAY) ? p_m_STR : a_m_STR));
return strlen(p_inout);
}
else
@@ -1787,7 +1786,7 @@ dch_time(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
if (is_to_char)
{
strcpy(inout, ((tm->tm_hour > 11
- && tm->tm_hour < HOURS_PER_DAY) ? pm_STR : am_STR));
+ && tm->tm_hour < HOURS_PER_DAY) ? pm_STR : am_STR));
return strlen(p_inout);
}
else
@@ -1925,15 +1924,13 @@ dch_time(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
}
/*
- * 25 is 0.25 and 250 is 0.25 too; 025 is 0.025 and not
- * 0.25
+ * 25 is 0.25 and 250 is 0.25 too; 025 is 0.025 and not 0.25
*/
tmfc->ms *= x == 1 ? 100 :
x == 2 ? 10 : 1;
/*
- * elog(DEBUG3, "X: %d, MS: %d, LEN: %d", x, tmfc->ms,
- * len);
+ * elog(DEBUG3, "X: %d, MS: %d, LEN: %d", x, tmfc->ms, len);
*/
return len + SKIP_THth(suf);
}
@@ -1974,8 +1971,7 @@ dch_time(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
x == 5 ? 10 : 1;
/*
- * elog(DEBUG3, "X: %d, US: %d, LEN: %d", x, tmfc->us,
- * len);
+ * elog(DEBUG3, "X: %d, US: %d, LEN: %d", x, tmfc->us, len);
*/
return len + SKIP_THth(suf);
}
@@ -2049,7 +2045,7 @@ do { \
*/
static int
dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
- FormatNode *node, void *data)
+ FormatNode *node, void *data)
{
char buff[DCH_CACHE_SIZE],
workbuff[32],
@@ -2069,8 +2065,8 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
tmfc = (TmFromChar *) data;
/*
- * In the FROM-char is not difference between "January" or "JANUARY"
- * or "january", all is before search convert to "first-upper". This
+ * In the FROM-char is not difference between "January" or "JANUARY" or
+ * "january", all is before search convert to "first-upper". This
* convention is used for MONTH, MON, DAY, DY
*/
if (!is_to_char)
@@ -2193,7 +2189,7 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
return strlen(p_inout);
case DCH_MON:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
if (!tm->tm_mon)
return -1;
strcpy(inout, months[tm->tm_mon - 1]);
@@ -2201,14 +2197,14 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
return strlen(p_inout);
case DCH_Mon:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
if (!tm->tm_mon)
return -1;
strcpy(inout, months[tm->tm_mon - 1]);
return strlen(p_inout);
case DCH_mon:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
if (!tm->tm_mon)
return -1;
strcpy(inout, months[tm->tm_mon - 1]);
@@ -2238,38 +2234,38 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
}
break;
case DCH_DAY:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
strcpy(workbuff, days[tm->tm_wday]);
sprintf(inout, "%*s", S_FM(suf) ? 0 : -9, str_toupper(workbuff));
return strlen(p_inout);
case DCH_Day:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
sprintf(inout, "%*s", S_FM(suf) ? 0 : -9, days[tm->tm_wday]);
return strlen(p_inout);
case DCH_day:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
sprintf(inout, "%*s", S_FM(suf) ? 0 : -9, days[tm->tm_wday]);
*inout = pg_tolower((unsigned char) *inout);
return strlen(p_inout);
case DCH_DY:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
strcpy(inout, days[tm->tm_wday]);
str_toupper(inout);
- return 3; /* truncate */
+ return 3; /* truncate */
case DCH_Dy:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
strcpy(inout, days[tm->tm_wday]);
- return 3; /* truncate */
+ return 3; /* truncate */
case DCH_dy:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
strcpy(inout, days[tm->tm_wday]);
*inout = pg_tolower((unsigned char) *inout);
- return 3; /* truncate */
+ return 3; /* truncate */
case DCH_DDD:
if (is_to_char)
@@ -2316,7 +2312,7 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
}
break;
case DCH_D:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
if (is_to_char)
{
sprintf(inout, "%d", tm->tm_wday + 1);
@@ -2357,7 +2353,7 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
if (is_to_char)
{
sprintf(inout, "%0*d", S_FM(suf) ? 0 : 2,
- date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday));
+ date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday));
if (S_THth(suf))
str_numth(p_inout, inout, S_TH_TYPE(suf));
return strlen(p_inout);
@@ -2447,17 +2443,17 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
arg == DCH_YYYY ?
ADJUST_YEAR(tm->tm_year, is_interval) :
ADJUST_YEAR(date2isoyear(
- tm->tm_year,
- tm->tm_mon,
- tm->tm_mday), is_interval));
+ tm->tm_year,
+ tm->tm_mon,
+ tm->tm_mday), is_interval));
else
sprintf(inout, "%d",
arg == DCH_YYYY ?
ADJUST_YEAR(tm->tm_year, is_interval) :
ADJUST_YEAR(date2isoyear(
- tm->tm_year,
- tm->tm_mon,
- tm->tm_mday), is_interval));
+ tm->tm_year,
+ tm->tm_mon,
+ tm->tm_mday), is_interval));
if (S_THth(suf))
str_numth(p_inout, inout, S_TH_TYPE(suf));
return strlen(p_inout);
@@ -2486,8 +2482,8 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
arg == DCH_YYY ?
ADJUST_YEAR(tm->tm_year, is_interval) :
ADJUST_YEAR(date2isoyear(tm->tm_year,
- tm->tm_mon, tm->tm_mday),
- is_interval));
+ tm->tm_mon, tm->tm_mday),
+ is_interval));
i = strlen(buff);
strcpy(inout, buff + (i - 3));
if (S_THth(suf))
@@ -2518,8 +2514,8 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
arg == DCH_YY ?
ADJUST_YEAR(tm->tm_year, is_interval) :
ADJUST_YEAR(date2isoyear(tm->tm_year,
- tm->tm_mon, tm->tm_mday),
- is_interval));
+ tm->tm_mon, tm->tm_mday),
+ is_interval));
i = strlen(buff);
strcpy(inout, buff + (i - 2));
if (S_THth(suf))
@@ -2531,8 +2527,8 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
sscanf(inout, "%02d", &tmfc->year);
/*
- * 2-digit year: '00' ... '69' = 2000 ... 2069 '70' ...
- * '99' = 1970 ... 1999
+ * 2-digit year: '00' ... '69' = 2000 ... 2069 '70' ... '99'
+ * = 1970 ... 1999
*/
if (tmfc->year < 70)
tmfc->year += 2000;
@@ -2550,8 +2546,8 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
arg == DCH_Y ?
ADJUST_YEAR(tm->tm_year, is_interval) :
ADJUST_YEAR(date2isoyear(tm->tm_year,
- tm->tm_mon, tm->tm_mday),
- is_interval));
+ tm->tm_mon, tm->tm_mday),
+ is_interval));
i = strlen(buff);
strcpy(inout, buff + (i - 1));
if (S_THth(suf))
@@ -2751,8 +2747,8 @@ datetime_to_char_body(TmToChar *tmtc, text *fmt, bool is_interval)
result = palloc((fmt_len * DCH_MAX_ITEM_SIZ) + 1);
/*
- * Allocate new memory if format picture is bigger than static cache
- * and not use cache (call parser always)
+ * Allocate new memory if format picture is bigger than static cache and
+ * not use cache (call parser always)
*/
if (fmt_len > DCH_CACHE_SIZE)
{
@@ -2778,8 +2774,8 @@ datetime_to_char_body(TmToChar *tmtc, text *fmt, bool is_interval)
ent = DCH_cache_getnew(fmt_str);
/*
- * Not in the cache, must run parser and save a new
- * format-picture to the cache.
+ * Not in the cache, must run parser and save a new format-picture
+ * to the cache.
*/
parse_format(ent->format, fmt_str, DCH_keywords,
DCH_suff, DCH_index, DCH_TYPE, NULL);
@@ -2802,8 +2798,8 @@ datetime_to_char_body(TmToChar *tmtc, text *fmt, bool is_interval)
pfree(fmt_str);
/*
- * for result is allocated max memory, which current format-picture
- * needs, now it allocate result with real size
+ * for result is allocated max memory, which current format-picture needs,
+ * now it allocate result with real size
*/
if (result && *result)
{
@@ -2965,7 +2961,7 @@ to_date(PG_FUNCTION_ARGS)
*/
static void
do_to_timestamp(text *date_txt, text *fmt,
- struct pg_tm *tm, fsec_t *fsec)
+ struct pg_tm * tm, fsec_t *fsec)
{
FormatNode *format;
TmFromChar tmfc;
@@ -2990,8 +2986,8 @@ do_to_timestamp(text *date_txt, text *fmt,
*(fmt_str + fmt_len) = '\0';
/*
- * Allocate new memory if format picture is bigger than static
- * cache and not use cache (call parser always)
+ * Allocate new memory if format picture is bigger than static cache
+ * and not use cache (call parser always)
*/
if (fmt_len > DCH_CACHE_SIZE)
{
@@ -3059,8 +3055,8 @@ do_to_timestamp(text *date_txt, text *fmt,
DEBUG_TMFC(&tmfc);
/*
- * Convert values that user define for FROM_CHAR
- * (to_date/to_timestamp) to standard 'tm'
+ * Convert values that user define for FROM_CHAR (to_date/to_timestamp) to
+ * standard 'tm'
*/
if (tmfc.ssss)
{
@@ -3125,18 +3121,19 @@ do_to_timestamp(text *date_txt, text *fmt,
if (tmfc.year)
{
- if (tmfc.yysz==2 && tmfc.cc)
+ if (tmfc.yysz == 2 && tmfc.cc)
{
- /* CC and YY defined
- * why -[2000|1900]? See dch_date() DCH_YY code.
+ /*
+ * CC and YY defined why -[2000|1900]? See dch_date() DCH_YY code.
*/
- tm->tm_year = (tmfc.cc-1)*100 + (tmfc.year >= 2000 ? tmfc.year-2000 : tmfc.year-1900);
+ tm->tm_year = (tmfc.cc - 1) * 100 + (tmfc.year >= 2000 ? tmfc.year - 2000 : tmfc.year - 1900);
}
- else if (tmfc.yysz==1 && tmfc.cc)
+ else if (tmfc.yysz == 1 && tmfc.cc)
{
- /* CC and Y defined
+ /*
+ * CC and Y defined
*/
- tm->tm_year = (tmfc.cc-1)*100 + tmfc.year-2000;
+ tm->tm_year = (tmfc.cc - 1) * 100 + tmfc.year - 2000;
}
else
/* set year (and ignore CC if defined) */
@@ -3184,7 +3181,7 @@ do_to_timestamp(text *date_txt, text *fmt,
if (!tm->tm_year)
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("cannot calculate day of year without year information")));
+ errmsg("cannot calculate day of year without year information")));
y = ysum[isleap(tm->tm_year)];
@@ -3369,9 +3366,9 @@ NUM_cache(int len, NUMDesc *Num, char *pars_str, bool *shouldFree)
*(str + len) = '\0';
/*
- * Allocate new memory if format picture is bigger than static cache
- * and not use cache (call parser always). This branches sets
- * shouldFree to true, accordingly.
+ * Allocate new memory if format picture is bigger than static cache and
+ * not use cache (call parser always). This branches sets shouldFree to
+ * true, accordingly.
*/
if (len > NUM_CACHE_SIZE)
{
@@ -3402,8 +3399,8 @@ NUM_cache(int len, NUMDesc *Num, char *pars_str, bool *shouldFree)
ent = NUM_cache_getnew(str);
/*
- * Not in the cache, must run parser and save a new
- * format-picture to the cache.
+ * Not in the cache, must run parser and save a new format-picture
+ * to the cache.
*/
parse_format(ent->format, str, NUM_keywords,
NULL, NUM_index, NUM_TYPE, &ent->Num);
@@ -3591,18 +3588,18 @@ get_last_relevant_decnum(char *num)
static void
NUM_numpart_from_char(NUMProc *Np, int id, int plen)
{
- bool isread = FALSE;
-
+ bool isread = FALSE;
+
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, " --- scan start --- id=%s",
- (id==NUM_0 || id==NUM_9) ? "NUM_0/9" : id==NUM_DEC ? "NUM_DEC" : "???");
+ (id == NUM_0 || id == NUM_9) ? "NUM_0/9" : id == NUM_DEC ? "NUM_DEC" : "???");
#endif
if (*Np->inout_p == ' ')
Np->inout_p++;
#define OVERLOAD_TEST (Np->inout_p >= Np->inout + plen)
-#define AMOUNT_TEST(_s) (plen-(Np->inout_p-Np->inout) >= _s)
+#define AMOUNT_TEST(_s) (plen-(Np->inout_p-Np->inout) >= _s)
if (*Np->inout_p == ' ')
Np->inout_p++;
@@ -3613,13 +3610,13 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
/*
* read sign before number
*/
- if (*Np->number == ' ' && (id == NUM_0 || id == NUM_9 ) &&
- (Np->read_pre + Np->read_post)==0)
+ if (*Np->number == ' ' && (id == NUM_0 || id == NUM_9) &&
+ (Np->read_pre + Np->read_post) == 0)
{
#ifdef DEBUG_TO_FROM_CHAR
- elog(DEBUG_elog_output, "Try read sign (%c), locale positive: %s, negative: %s",
- *Np->inout_p, Np->L_positive_sign, Np->L_negative_sign);
+ elog(DEBUG_elog_output, "Try read sign (%c), locale positive: %s, negative: %s",
+ *Np->inout_p, Np->L_positive_sign, Np->L_negative_sign);
#endif
/*
@@ -3627,20 +3624,21 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
*/
if (IS_LSIGN(Np->Num) && Np->Num->lsign == NUM_LSIGN_PRE)
{
- int x=0;
+ int x = 0;
+
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Try read locale pre-sign (%c)", *Np->inout_p);
#endif
- if ((x = strlen(Np->L_negative_sign)) &&
+ if ((x = strlen(Np->L_negative_sign)) &&
AMOUNT_TEST(x) &&
- strncmp(Np->inout_p, Np->L_negative_sign, x)==0)
+ strncmp(Np->inout_p, Np->L_negative_sign, x) == 0)
{
Np->inout_p += x;
*Np->number = '-';
}
- else if ((x = strlen(Np->L_positive_sign)) &&
- AMOUNT_TEST(x) &&
- strncmp(Np->inout_p, Np->L_positive_sign, x)==0)
+ else if ((x = strlen(Np->L_positive_sign)) &&
+ AMOUNT_TEST(x) &&
+ strncmp(Np->inout_p, Np->L_positive_sign, x) == 0)
{
Np->inout_p += x;
*Np->number = '+';
@@ -3651,6 +3649,7 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Try read simple sign (%c)", *Np->inout_p);
#endif
+
/*
* simple + - < >
*/
@@ -3658,14 +3657,14 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
*Np->inout_p == '<'))
{
- *Np->number = '-'; /* set - */
+ *Np->number = '-'; /* set - */
Np->inout_p++;
}
else if (*Np->inout_p == '+')
{
- *Np->number = '+'; /* set + */
+ *Np->number = '+'; /* set + */
Np->inout_p++;
}
}
@@ -3673,11 +3672,11 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
if (OVERLOAD_TEST)
return;
-
+
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Scan for numbers (%c), current number: '%s'", *Np->inout_p, Np->number);
#endif
-
+
/*
* read digit
*/
@@ -3696,13 +3695,14 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
Np->read_pre++;
isread = TRUE;
-
+
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Read digit (%c)", *Np->inout_p);
#endif
- /*
- * read decimal point
- */
+
+ /*
+ * read decimal point
+ */
}
else if (IS_DECIMAL(Np->Num) && Np->read_dec == FALSE)
{
@@ -3726,7 +3726,7 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
elog(DEBUG_elog_output, "Try read locale point (%c)",
*Np->inout_p);
#endif
- if (x && AMOUNT_TEST(x) && strncmp(Np->inout_p, Np->decimal, x)==0)
+ if (x && AMOUNT_TEST(x) && strncmp(Np->inout_p, Np->decimal, x) == 0)
{
Np->inout_p += x - 1;
*Np->number_p = '.';
@@ -3739,69 +3739,68 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
if (OVERLOAD_TEST)
return;
-
+
/*
* Read sign behind "last" number
*
- * We need sign detection because determine exact position of
- * post-sign is difficult:
+ * We need sign detection because determine exact position of post-sign is
+ * difficult:
*
- * FM9999.9999999S -> 123.001-
- * 9.9S -> .5-
- * FM9.999999MI -> 5.01-
+ * FM9999.9999999S -> 123.001- 9.9S -> .5- FM9.999999MI
+ * -> 5.01-
*/
if (*Np->number == ' ' && Np->read_pre + Np->read_post > 0)
{
/*
- * locale sign (NUM_S) is always anchored behind a last number, if:
- * - locale sign expected
- * - last read char was NUM_0/9 or NUM_DEC
- * - and next char is not digit
- */
- if (IS_LSIGN(Np->Num) && isread &&
- (Np->inout_p+1) <= Np->inout + plen &&
- !isdigit((unsigned char) *(Np->inout_p+1)))
+ * locale sign (NUM_S) is always anchored behind a last number, if: -
+ * locale sign expected - last read char was NUM_0/9 or NUM_DEC - and
+ * next char is not digit
+ */
+ if (IS_LSIGN(Np->Num) && isread &&
+ (Np->inout_p + 1) <= Np->inout + plen &&
+ !isdigit((unsigned char) *(Np->inout_p + 1)))
{
- int x;
- char *tmp = Np->inout_p++;
-
+ int x;
+ char *tmp = Np->inout_p++;
+
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Try read locale post-sign (%c)", *Np->inout_p);
#endif
- if ((x = strlen(Np->L_negative_sign)) &&
+ if ((x = strlen(Np->L_negative_sign)) &&
AMOUNT_TEST(x) &&
- strncmp(Np->inout_p, Np->L_negative_sign, x)==0)
+ strncmp(Np->inout_p, Np->L_negative_sign, x) == 0)
{
- Np->inout_p += x-1; /* -1 .. NUM_processor() do inout_p++ */
+ Np->inout_p += x - 1; /* -1 .. NUM_processor() do inout_p++ */
*Np->number = '-';
}
- else if ((x = strlen(Np->L_positive_sign)) &&
- AMOUNT_TEST(x) &&
- strncmp(Np->inout_p, Np->L_positive_sign, x)==0)
+ else if ((x = strlen(Np->L_positive_sign)) &&
+ AMOUNT_TEST(x) &&
+ strncmp(Np->inout_p, Np->L_positive_sign, x) == 0)
{
- Np->inout_p += x-1; /* -1 .. NUM_processor() do inout_p++ */
+ Np->inout_p += x - 1; /* -1 .. NUM_processor() do inout_p++ */
*Np->number = '+';
}
if (*Np->number == ' ')
/* no sign read */
Np->inout_p = tmp;
}
-
+
/*
* try read non-locale sign, it's happen only if format is not exact
* and we cannot determine sign position of MI/PL/SG, an example:
*
- * FM9.999999MI -> 5.01-
+ * FM9.999999MI -> 5.01-
*
- * if (.... && IS_LSIGN(Np->Num)==FALSE) prevents read wrong formats
- * like to_number('1 -', '9S') where sign is not anchored to last number.
+ * if (.... && IS_LSIGN(Np->Num)==FALSE) prevents read wrong formats like
+ * to_number('1 -', '9S') where sign is not anchored to last number.
*/
- else if (isread==FALSE && IS_LSIGN(Np->Num)==FALSE &&
- (IS_PLUS(Np->Num) || IS_MINUS(Np->Num)))
+ else if (isread == FALSE && IS_LSIGN(Np->Num) == FALSE &&
+ (IS_PLUS(Np->Num) || IS_MINUS(Np->Num)))
{
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Try read simple post-sign (%c)", *Np->inout_p);
#endif
+
/*
* simple + -
*/
@@ -3848,8 +3847,8 @@ NUM_numpart_to_char(NUMProc *Np, int id)
Np->num_in = FALSE;
/*
- * Write sign if real number will write to output Note:
- * IS_PREDEC_SPACE() handle "9.9" --> " .1"
+ * Write sign if real number will write to output Note: IS_PREDEC_SPACE()
+ * handle "9.9" --> " .1"
*/
if (Np->sign_wrote == FALSE &&
(Np->num_curr >= Np->num_pre || (IS_ZERO(Np->Num) && Np->Num->zero_start == Np->num_curr)) &&
@@ -4032,7 +4031,7 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
Np->inout = inout;
Np->last_relevant = NULL;
Np->read_post = 0;
- Np->read_pre = 0;
+ Np->read_pre = 0;
Np->read_dec = FALSE;
if (Np->Num->zero_start)
@@ -4114,8 +4113,8 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
if (IS_DECIMAL(Np->Num))
Np->last_relevant = get_last_relevant_decnum(
Np->number +
- ((Np->Num->zero_end - Np->num_pre > 0) ?
- Np->Num->zero_end - Np->num_pre : 0));
+ ((Np->Num->zero_end - Np->num_pre > 0) ?
+ Np->Num->zero_end - Np->num_pre : 0));
}
if (Np->sign_wrote == FALSE && Np->num_pre == 0)
@@ -4185,10 +4184,10 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
/*
* Create/reading digit/zero/blank/sing
*
- * 'NUM_S' note:
- * The locale sign is anchored to number and we read/write it
- * when we work with first or last number (NUM_0/NUM_9). This
- * is reason why NUM_S missing in follow switch().
+ * 'NUM_S' note: The locale sign is anchored to number and we
+ * read/write it when we work with first or last number
+ * (NUM_0/NUM_9). This is reason why NUM_S missing in follow
+ * switch().
*/
switch (n->key->id)
{
@@ -4497,7 +4496,7 @@ numeric_to_number(PG_FUNCTION_ARGS)
result = DirectFunctionCall3(numeric_in,
CStringGetDatum(numstr),
ObjectIdGetDatum(InvalidOid),
- Int32GetDatum(((precision << 16) | scale) + VARHDRSZ));
+ Int32GetDatum(((precision << 16) | scale) + VARHDRSZ));
pfree(numstr);
return result;
}
@@ -4536,7 +4535,7 @@ numeric_to_char(PG_FUNCTION_ARGS)
Int32GetDatum(0)));
numstr = orgnum =
int_to_roman(DatumGetInt32(DirectFunctionCall1(numeric_int4,
- NumericGetDatum(x))));
+ NumericGetDatum(x))));
pfree(x);
}
else
@@ -4546,16 +4545,16 @@ numeric_to_char(PG_FUNCTION_ARGS)
if (IS_MULTI(&Num))
{
Numeric a = DatumGetNumeric(DirectFunctionCall1(int4_numeric,
- Int32GetDatum(10)));
+ Int32GetDatum(10)));
Numeric b = DatumGetNumeric(DirectFunctionCall1(int4_numeric,
- Int32GetDatum(Num.multi)));
+ Int32GetDatum(Num.multi)));
x = DatumGetNumeric(DirectFunctionCall2(numeric_power,
NumericGetDatum(a),
NumericGetDatum(b)));
val = DatumGetNumeric(DirectFunctionCall2(numeric_mul,
- NumericGetDatum(value),
- NumericGetDatum(x)));
+ NumericGetDatum(value),
+ NumericGetDatum(x)));
pfree(x);
pfree(a);
pfree(b);
@@ -4639,7 +4638,7 @@ int4_to_char(PG_FUNCTION_ARGS)
else
{
orgnum = DatumGetCString(DirectFunctionCall1(int4out,
- Int32GetDatum(value)));
+ Int32GetDatum(value)));
}
len = strlen(orgnum);
@@ -4711,7 +4710,7 @@ int8_to_char(PG_FUNCTION_ARGS)
{
/* Currently don't support int8 conversion to roman... */
numstr = orgnum = int_to_roman(DatumGetInt32(
- DirectFunctionCall1(int84, Int64GetDatum(value))));
+ DirectFunctionCall1(int84, Int64GetDatum(value))));
}
else
{
@@ -4720,14 +4719,14 @@ int8_to_char(PG_FUNCTION_ARGS)
double multi = pow((double) 10, (double) Num.multi);
value = DatumGetInt64(DirectFunctionCall2(int8mul,
- Int64GetDatum(value),
- DirectFunctionCall1(dtoi8,
- Float8GetDatum(multi))));
+ Int64GetDatum(value),
+ DirectFunctionCall1(dtoi8,
+ Float8GetDatum(multi))));
Num.pre += Num.multi;
}
orgnum = DatumGetCString(DirectFunctionCall1(int8out,
- Int64GetDatum(value)));
+ Int64GetDatum(value)));
len = strlen(orgnum);
if (*orgnum == '-')
diff --git a/src/backend/utils/adt/genfile.c b/src/backend/utils/adt/genfile.c
index cbbf9ca1c37..06ff9afe032 100644
--- a/src/backend/utils/adt/genfile.c
+++ b/src/backend/utils/adt/genfile.c
@@ -5,11 +5,11 @@
*
*
* Copyright (c) 2004-2005, PostgreSQL Global Development Group
- *
+ *
* Author: Andreas Pflug <pgadmin@pse-consulting.de>
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/genfile.c,v 1.6 2005/08/29 19:39:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/genfile.c,v 1.7 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,10 +30,10 @@
#include "utils/memutils.h"
-typedef struct
+typedef struct
{
- char *location;
- DIR *dirdesc;
+ char *location;
+ DIR *dirdesc;
} directory_fctx;
@@ -46,9 +46,9 @@ typedef struct
static char *
check_and_make_absolute(text *arg)
{
- int input_len = VARSIZE(arg) - VARHDRSZ;
- char *filename = palloc(input_len + 1);
-
+ int input_len = VARSIZE(arg) - VARHDRSZ;
+ char *filename = palloc(input_len + 1);
+
memcpy(filename, VARDATA(arg), input_len);
filename[input_len] = '\0';
@@ -58,7 +58,7 @@ check_and_make_absolute(text *arg)
if (path_contains_parent_reference(filename))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("reference to parent directory (\"..\") not allowed"))));
+ (errmsg("reference to parent directory (\"..\") not allowed"))));
if (is_absolute_path(filename))
{
@@ -70,14 +70,15 @@ check_and_make_absolute(text *arg)
path_is_prefix_of_path(Log_directory, filename))
return filename;
- ereport(ERROR,
+ ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("absolute path not allowed"))));
return NULL; /* keep compiler quiet */
}
else
{
- char *absname = palloc(strlen(DataDir) + strlen(filename) + 2);
+ char *absname = palloc(strlen(DataDir) + strlen(filename) + 2);
+
sprintf(absname, "%s/%s", DataDir, filename);
pfree(filename);
return absname;
@@ -94,13 +95,13 @@ pg_read_file(PG_FUNCTION_ARGS)
text *filename_t = PG_GETARG_TEXT_P(0);
int64 seek_offset = PG_GETARG_INT64(1);
int64 bytes_to_read = PG_GETARG_INT64(2);
- char *buf;
+ char *buf;
size_t nbytes;
- FILE *file;
- char *filename;
+ FILE *file;
+ char *filename;
if (!superuser())
- ereport(ERROR,
+ ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to read files"))));
@@ -128,7 +129,7 @@ pg_read_file(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("requested length too large")));
-
+
buf = palloc((Size) bytes_to_read + VARHDRSZ);
nbytes = fread(VARDATA(buf), 1, (size_t) bytes_to_read, file);
@@ -153,7 +154,7 @@ Datum
pg_stat_file(PG_FUNCTION_ARGS)
{
text *filename_t = PG_GETARG_TEXT_P(0);
- char *filename;
+ char *filename;
struct stat fst;
Datum values[6];
bool isnull[6];
@@ -161,7 +162,7 @@ pg_stat_file(PG_FUNCTION_ARGS)
TupleDesc tupdesc;
if (!superuser())
- ereport(ERROR,
+ ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to get file information"))));
@@ -173,8 +174,8 @@ pg_stat_file(PG_FUNCTION_ARGS)
errmsg("could not stat file \"%s\": %m", filename)));
/*
- * This record type had better match the output parameters declared
- * for me in pg_proc.h (actually, in system_views.sql at the moment).
+ * This record type had better match the output parameters declared for me
+ * in pg_proc.h (actually, in system_views.sql at the moment).
*/
tupdesc = CreateTemplateTupleDesc(6, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1,
@@ -220,12 +221,12 @@ pg_stat_file(PG_FUNCTION_ARGS)
Datum
pg_ls_dir(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
- struct dirent *de;
- directory_fctx *fctx;
+ FuncCallContext *funcctx;
+ struct dirent *de;
+ directory_fctx *fctx;
if (!superuser())
- ereport(ERROR,
+ ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to get directory listings"))));
@@ -242,7 +243,7 @@ pg_ls_dir(PG_FUNCTION_ARGS)
fctx->dirdesc = AllocateDir(fctx->location);
if (!fctx->dirdesc)
- ereport(ERROR,
+ ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not open directory \"%s\": %m",
fctx->location)));
@@ -252,16 +253,16 @@ pg_ls_dir(PG_FUNCTION_ARGS)
}
funcctx = SRF_PERCALL_SETUP();
- fctx = (directory_fctx*) funcctx->user_fctx;
+ fctx = (directory_fctx *) funcctx->user_fctx;
while ((de = ReadDir(fctx->dirdesc, fctx->location)) != NULL)
{
int len = strlen(de->d_name);
- text *result;
+ text *result;
if (strcmp(de->d_name, ".") == 0 ||
strcmp(de->d_name, "..") == 0)
- continue;
+ continue;
result = palloc(len + VARHDRSZ);
VARATT_SIZEP(result) = len + VARHDRSZ;
diff --git a/src/backend/utils/adt/geo_ops.c b/src/backend/utils/adt/geo_ops.c
index 1786da6dd1c..2f1714a034a 100644
--- a/src/backend/utils/adt/geo_ops.c
+++ b/src/backend/utils/adt/geo_ops.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/geo_ops.c,v 1.90 2005/07/01 19:19:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/geo_ops.c,v 1.91 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -387,7 +387,7 @@ box_in(PG_FUNCTION_ARGS)
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type box: \"%s\"", str)));
+ errmsg("invalid input syntax for type box: \"%s\"", str)));
/* reorder corners if necessary... */
if (box->high.x < box->low.x)
@@ -951,7 +951,7 @@ line_in(PG_FUNCTION_ARGS)
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type line: \"%s\"", str)));
+ errmsg("invalid input syntax for type line: \"%s\"", str)));
line = (LINE *) palloc(sizeof(LINE));
line_construct_pts(line, &lseg.p[0], &lseg.p[1]);
@@ -1292,10 +1292,9 @@ line_interpt_internal(LINE *l1, LINE *l2)
y;
/*
- * NOTE: if the lines are identical then we will find they are
- * parallel and report "no intersection". This is a little weird, but
- * since there's no *unique* intersection, maybe it's appropriate
- * behavior.
+ * NOTE: if the lines are identical then we will find they are parallel
+ * and report "no intersection". This is a little weird, but since
+ * there's no *unique* intersection, maybe it's appropriate behavior.
*/
if (DatumGetBool(DirectFunctionCall2(line_parallel,
LinePGetDatum(l1),
@@ -1400,7 +1399,7 @@ path_in(PG_FUNCTION_ARGS)
if ((npts = pair_count(str, ',')) <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type path: \"%s\"", str)));
+ errmsg("invalid input syntax for type path: \"%s\"", str)));
s = str;
while (isspace((unsigned char) *s))
@@ -1420,10 +1419,10 @@ path_in(PG_FUNCTION_ARGS)
path->npts = npts;
if ((!path_decode(TRUE, npts, s, &isopen, &s, &(path->p[0])))
- && (!((depth == 0) && (*s == '\0'))) && !((depth >= 1) && (*s == RDELIM)))
+ && (!((depth == 0) && (*s == '\0'))) && !((depth >= 1) && (*s == RDELIM)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type path: \"%s\"", str)));
+ errmsg("invalid input syntax for type path: \"%s\"", str)));
path->closed = (!isopen);
@@ -1460,7 +1459,7 @@ path_recv(PG_FUNCTION_ARGS)
if (npts < 0 || npts >= (int32) ((INT_MAX - offsetof(PATH, p[0])) / sizeof(Point)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid number of points in external \"path\" value")));
+ errmsg("invalid number of points in external \"path\" value")));
size = offsetof(PATH, p[0]) +sizeof(path->p[0]) * npts;
path = (PATH *) palloc(size);
@@ -1730,7 +1729,7 @@ path_distance(PG_FUNCTION_ARGS)
tmp = DatumGetFloat8(DirectFunctionCall2(lseg_distance,
LsegPGetDatum(&seg1),
- LsegPGetDatum(&seg2)));
+ LsegPGetDatum(&seg2)));
if (!have_min || tmp < min)
{
min = tmp;
@@ -1801,7 +1800,7 @@ point_in(PG_FUNCTION_ARGS)
if (!pair_decode(str, &x, &y, &s) || (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type point: \"%s\"", str)));
+ errmsg("invalid input syntax for type point: \"%s\"", str)));
point = (Point *) palloc(sizeof(Point));
@@ -1976,7 +1975,7 @@ point_dt(Point *pt1, Point *pt2)
{
#ifdef GEODEBUG
printf("point_dt- segment (%f,%f),(%f,%f) length is %f\n",
- pt1->x, pt1->y, pt2->x, pt2->y, HYPOT(pt1->x - pt2->x, pt1->y - pt2->y));
+ pt1->x, pt1->y, pt2->x, pt2->y, HYPOT(pt1->x - pt2->x, pt1->y - pt2->y));
#endif
return HYPOT(pt1->x - pt2->x, pt1->y - pt2->y);
}
@@ -2029,7 +2028,7 @@ lseg_in(PG_FUNCTION_ARGS)
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type lseg: \"%s\"", str)));
+ errmsg("invalid input syntax for type lseg: \"%s\"", str)));
#ifdef NOT_USED
lseg->m = point_sl(&lseg->p[0], &lseg->p[1]);
@@ -2374,8 +2373,8 @@ lseg_interpt(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
/*
- * If the line intersection point isn't within l1 (or equivalently
- * l2), there is no valid segment intersection point at all.
+ * If the line intersection point isn't within l1 (or equivalently l2),
+ * there is no valid segment intersection point at all.
*/
if (!on_ps_internal(result, l1) ||
!on_ps_internal(result, l2))
@@ -2393,7 +2392,7 @@ lseg_interpt(PG_FUNCTION_ARGS)
result->y = l1->p[0].y;
}
else if ((FPeq(l1->p[1].x, l2->p[0].x) && FPeq(l1->p[1].y, l2->p[0].y)) ||
- (FPeq(l1->p[1].x, l2->p[1].x) && FPeq(l1->p[1].y, l2->p[1].y)))
+ (FPeq(l1->p[1].x, l2->p[1].x) && FPeq(l1->p[1].y, l2->p[1].y)))
{
result->x = l1->p[1].x;
result->y = l1->p[1].y;
@@ -2521,8 +2520,8 @@ dist_ppath(PG_FUNCTION_ARGS)
Assert(path->npts > 1);
/*
- * the distance from a point to a path is the smallest
- * distance from the point to any of its constituent segments.
+ * the distance from a point to a path is the smallest distance
+ * from the point to any of its constituent segments.
*/
for (i = 0; i < path->npts; i++)
{
@@ -2534,8 +2533,7 @@ dist_ppath(PG_FUNCTION_ARGS)
{
if (!path->closed)
continue;
- iprev = path->npts - 1; /* include the closure
- * segment */
+ iprev = path->npts - 1; /* include the closure segment */
}
statlseg_construct(&lseg, &path->p[iprev], &path->p[i]);
@@ -2853,8 +2851,8 @@ close_ps(PG_FUNCTION_ARGS)
}
/*
- * vert. and horiz. cases are down, now check if the closest point is
- * one of the end points or someplace on the lseg.
+ * vert. and horiz. cases are down, now check if the closest point is one
+ * of the end points or someplace on the lseg.
*/
invm = -1.0 / point_sl(&(lseg->p[0]), &(lseg->p[1]));
@@ -2862,8 +2860,8 @@ close_ps(PG_FUNCTION_ARGS)
* "band" */
if (pt->y < (tmp->A * pt->x + tmp->C))
{ /* we are below the lower edge */
- result = point_copy(&lseg->p[!yh]); /* below the lseg, take
- * lower end pt */
+ result = point_copy(&lseg->p[!yh]); /* below the lseg, take lower
+ * end pt */
#ifdef GEODEBUG
printf("close_ps below: tmp A %f B %f C %f m %f\n",
tmp->A, tmp->B, tmp->C, tmp->m);
@@ -2874,8 +2872,8 @@ close_ps(PG_FUNCTION_ARGS)
* "band" */
if (pt->y > (tmp->A * pt->x + tmp->C))
{ /* we are below the lower edge */
- result = point_copy(&lseg->p[yh]); /* above the lseg, take
- * higher end pt */
+ result = point_copy(&lseg->p[yh]); /* above the lseg, take higher
+ * end pt */
#ifdef GEODEBUG
printf("close_ps above: tmp A %f B %f C %f m %f\n",
tmp->A, tmp->B, tmp->C, tmp->m);
@@ -2884,8 +2882,8 @@ close_ps(PG_FUNCTION_ARGS)
}
/*
- * at this point the "normal" from point will hit lseg. The closet
- * point will be somewhere on the lseg
+ * at this point the "normal" from point will hit lseg. The closet point
+ * will be somewhere on the lseg
*/
tmp = line_construct_pm(pt, invm);
#ifdef GEODEBUG
@@ -2927,22 +2925,22 @@ close_lseg(PG_FUNCTION_ARGS)
if ((d = dist_ps_internal(&l2->p[0], l1)) < dist)
{
result = DatumGetPointP(DirectFunctionCall2(close_ps,
- PointPGetDatum(&l2->p[0]),
+ PointPGetDatum(&l2->p[0]),
LsegPGetDatum(l1)));
memcpy(&point, result, sizeof(Point));
result = DatumGetPointP(DirectFunctionCall2(close_ps,
- PointPGetDatum(&point),
+ PointPGetDatum(&point),
LsegPGetDatum(l2)));
}
if ((d = dist_ps_internal(&l2->p[1], l1)) < dist)
{
result = DatumGetPointP(DirectFunctionCall2(close_ps,
- PointPGetDatum(&l2->p[1]),
+ PointPGetDatum(&l2->p[1]),
LsegPGetDatum(l1)));
memcpy(&point, result, sizeof(Point));
result = DatumGetPointP(DirectFunctionCall2(close_ps,
- PointPGetDatum(&point),
+ PointPGetDatum(&point),
LsegPGetDatum(l2)));
}
@@ -3235,11 +3233,11 @@ on_sl(PG_FUNCTION_ARGS)
LINE *line = PG_GETARG_LINE_P(1);
PG_RETURN_BOOL(DatumGetBool(DirectFunctionCall2(on_pl,
- PointPGetDatum(&lseg->p[0]),
- LinePGetDatum(line))) &&
+ PointPGetDatum(&lseg->p[0]),
+ LinePGetDatum(line))) &&
DatumGetBool(DirectFunctionCall2(on_pl,
- PointPGetDatum(&lseg->p[1]),
- LinePGetDatum(line))));
+ PointPGetDatum(&lseg->p[1]),
+ LinePGetDatum(line))));
}
Datum
@@ -3249,10 +3247,10 @@ on_sb(PG_FUNCTION_ARGS)
BOX *box = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(DatumGetBool(DirectFunctionCall2(on_pb,
- PointPGetDatum(&lseg->p[0]),
+ PointPGetDatum(&lseg->p[0]),
BoxPGetDatum(box))) &&
DatumGetBool(DirectFunctionCall2(on_pb,
- PointPGetDatum(&lseg->p[1]),
+ PointPGetDatum(&lseg->p[1]),
BoxPGetDatum(box))));
}
@@ -3437,7 +3435,7 @@ poly_in(PG_FUNCTION_ARGS)
if ((npts = pair_count(str, ',')) <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type polygon: \"%s\"", str)));
+ errmsg("invalid input syntax for type polygon: \"%s\"", str)));
size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * npts;
poly = (POLYGON *) palloc0(size); /* zero any holes */
@@ -3449,7 +3447,7 @@ poly_in(PG_FUNCTION_ARGS)
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type polygon: \"%s\"", str)));
+ errmsg("invalid input syntax for type polygon: \"%s\"", str)));
make_bound_box(poly);
@@ -3489,7 +3487,7 @@ poly_recv(PG_FUNCTION_ARGS)
if (npts < 0 || npts >= (int32) ((INT_MAX - offsetof(POLYGON, p[0])) / sizeof(Point)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid number of points in external \"polygon\" value")));
+ errmsg("invalid number of points in external \"polygon\" value")));
size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * npts;
poly = (POLYGON *) palloc0(size); /* zero any holes */
@@ -3544,8 +3542,7 @@ poly_left(PG_FUNCTION_ARGS)
result = polya->boundbox.high.x < polyb->boundbox.low.x;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3568,8 +3565,7 @@ poly_overleft(PG_FUNCTION_ARGS)
result = polya->boundbox.high.x <= polyb->boundbox.high.x;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3592,8 +3588,7 @@ poly_right(PG_FUNCTION_ARGS)
result = polya->boundbox.low.x > polyb->boundbox.high.x;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3616,8 +3611,7 @@ poly_overright(PG_FUNCTION_ARGS)
result = polya->boundbox.low.x >= polyb->boundbox.low.x;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3640,8 +3634,7 @@ poly_below(PG_FUNCTION_ARGS)
result = polya->boundbox.high.y < polyb->boundbox.low.y;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3664,8 +3657,7 @@ poly_overbelow(PG_FUNCTION_ARGS)
result = polya->boundbox.high.y <= polyb->boundbox.high.y;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3688,8 +3680,7 @@ poly_above(PG_FUNCTION_ARGS)
result = polya->boundbox.low.y > polyb->boundbox.high.y;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3712,8 +3703,7 @@ poly_overabove(PG_FUNCTION_ARGS)
result = polya->boundbox.low.y >= polyb->boundbox.low.y;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3742,8 +3732,7 @@ poly_same(PG_FUNCTION_ARGS)
result = plist_same(polya->npts, polya->p, polyb->p);
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3767,8 +3756,7 @@ poly_overlap(PG_FUNCTION_ARGS)
result = box_ov(&polya->boundbox, &polyb->boundbox);
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3833,8 +3821,7 @@ poly_contain(PG_FUNCTION_ARGS)
}
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -4169,7 +4156,7 @@ path_mul_pt(PG_FUNCTION_ARGS)
for (i = 0; i < path->npts; i++)
{
p = DatumGetPointP(DirectFunctionCall2(point_mul,
- PointPGetDatum(&path->p[i]),
+ PointPGetDatum(&path->p[i]),
PointPGetDatum(point)));
path->p[i].x = p->x;
path->p[i].y = p->y;
@@ -4189,7 +4176,7 @@ path_div_pt(PG_FUNCTION_ARGS)
for (i = 0; i < path->npts; i++)
{
p = DatumGetPointP(DirectFunctionCall2(point_div,
- PointPGetDatum(&path->p[i]),
+ PointPGetDatum(&path->p[i]),
PointPGetDatum(point)));
path->p[i].x = p->x;
path->p[i].y = p->y;
@@ -4392,7 +4379,7 @@ circle_in(PG_FUNCTION_ARGS)
if (!pair_decode(s, &circle->center.x, &circle->center.y, &s))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type circle: \"%s\"", str)));
+ errmsg("invalid input syntax for type circle: \"%s\"", str)));
if (*s == DELIM)
s++;
@@ -4402,7 +4389,7 @@ circle_in(PG_FUNCTION_ARGS)
if ((!single_decode(s, &circle->radius, &s)) || (circle->radius < 0))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type circle: \"%s\"", str)));
+ errmsg("invalid input syntax for type circle: \"%s\"", str)));
while (depth > 0)
{
@@ -4417,13 +4404,13 @@ circle_in(PG_FUNCTION_ARGS)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type circle: \"%s\"", str)));
+ errmsg("invalid input syntax for type circle: \"%s\"", str)));
}
if (*s != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type circle: \"%s\"", str)));
+ errmsg("invalid input syntax for type circle: \"%s\"", str)));
PG_RETURN_CIRCLE_P(circle);
}
@@ -4780,7 +4767,7 @@ circle_mul_pt(PG_FUNCTION_ARGS)
result = circle_copy(circle);
p = DatumGetPointP(DirectFunctionCall2(point_mul,
- PointPGetDatum(&circle->center),
+ PointPGetDatum(&circle->center),
PointPGetDatum(point)));
result->center.x = p->x;
result->center.y = p->y;
@@ -4800,7 +4787,7 @@ circle_div_pt(PG_FUNCTION_ARGS)
result = circle_copy(circle);
p = DatumGetPointP(DirectFunctionCall2(point_div,
- PointPGetDatum(&circle->center),
+ PointPGetDatum(&circle->center),
PointPGetDatum(point)));
result->center.x = p->x;
result->center.y = p->y;
@@ -5001,7 +4988,7 @@ circle_poly(PG_FUNCTION_ARGS)
if (FPzero(circle->radius))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert circle with radius zero to polygon")));
+ errmsg("cannot convert circle with radius zero to polygon")));
if (npts < 2)
ereport(ERROR,
diff --git a/src/backend/utils/adt/inet_net_ntop.c b/src/backend/utils/adt/inet_net_ntop.c
index 67a55be5711..abbfcd592ca 100644
--- a/src/backend/utils/adt/inet_net_ntop.c
+++ b/src/backend/utils/adt/inet_net_ntop.c
@@ -14,7 +14,7 @@
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/inet_net_ntop.c,v 1.20 2005/09/24 22:54:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/inet_net_ntop.c,v 1.21 2005/10/15 02:49:28 momjian Exp $
*/
#if defined(LIBC_SCCS) && !defined(lint)
@@ -412,11 +412,11 @@ static char *
inet_net_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
{
/*
- * Note that int32_t and int16_t need only be "at least" large enough
- * to contain a value of the specified size. On some systems, like
- * Crays, there is no such thing as an integer variable with 16 bits.
- * Keep this in mind if you think this function should have been coded
- * to use pointer overlays. All the world's not a VAX.
+ * Note that int32_t and int16_t need only be "at least" large enough to
+ * contain a value of the specified size. On some systems, like Crays,
+ * there is no such thing as an integer variable with 16 bits. Keep this
+ * in mind if you think this function should have been coded to use
+ * pointer overlays. All the world's not a VAX.
*/
char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255/128"];
char *tp;
@@ -435,8 +435,8 @@ inet_net_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
}
/*
- * Preprocess: Copy the input (bytewise) array into a wordwise array.
- * Find the longest run of 0x00's in src[] for :: shorthanding.
+ * Preprocess: Copy the input (bytewise) array into a wordwise array. Find
+ * the longest run of 0x00's in src[] for :: shorthanding.
*/
memset(words, '\0', sizeof words);
for (i = 0; i < NS_IN6ADDRSZ; i++)
@@ -491,8 +491,8 @@ inet_net_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
*tp++ = ':';
/* Is this address an encapsulated IPv4? */
if (i == 6 && best.base == 0 && (best.len == 6 ||
- (best.len == 7 && words[7] != 0x0001) ||
- (best.len == 5 && words[5] == 0xffff)))
+ (best.len == 7 && words[7] != 0x0001) ||
+ (best.len == 5 && words[5] == 0xffff)))
{
int n;
diff --git a/src/backend/utils/adt/inet_net_pton.c b/src/backend/utils/adt/inet_net_pton.c
index a6911740cd5..e9239e317eb 100644
--- a/src/backend/utils/adt/inet_net_pton.c
+++ b/src/backend/utils/adt/inet_net_pton.c
@@ -14,7 +14,7 @@
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/inet_net_pton.c,v 1.20 2005/02/01 00:59:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/inet_net_pton.c,v 1.21 2005/10/15 02:49:28 momjian Exp $
*/
#if defined(LIBC_SCCS) && !defined(lint)
@@ -207,7 +207,8 @@ inet_cidr_pton_ipv4(const char *src, u_char *dst, size_t size)
bits = 24;
else if (*odst >= 128) /* Class B */
bits = 16;
- else /* Class A */
+ else
+ /* Class A */
bits = 8;
/* If imputed mask is narrower than specified octets, widen. */
if (bits < ((dst - odst) * 8))
diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c
index d35af1c913a..e41e584ffea 100644
--- a/src/backend/utils/adt/int.c
+++ b/src/backend/utils/adt/int.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/int.c,v 1.67 2005/07/10 21:36:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/int.c,v 1.68 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -120,7 +120,7 @@ int2send(PG_FUNCTION_ARGS)
int2vector *
buildint2vector(const int2 *int2s, int n)
{
- int2vector *result;
+ int2vector *result;
result = (int2vector *) palloc0(Int2VectorSize(n));
@@ -128,8 +128,8 @@ buildint2vector(const int2 *int2s, int n)
memcpy(result->values, int2s, n * sizeof(int2));
/*
- * Attach standard array header. For historical reasons, we set the
- * index lower bound to 0 not 1.
+ * Attach standard array header. For historical reasons, we set the index
+ * lower bound to 0 not 1.
*/
result->size = Int2VectorSize(n);
result->ndim = 1;
@@ -212,7 +212,7 @@ Datum
int2vectorrecv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
- int2vector *result;
+ int2vector *result;
result = (int2vector *)
DatumGetPointer(DirectFunctionCall3(array_recv,
@@ -686,10 +686,11 @@ int4pl(PG_FUNCTION_ARGS)
int32 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -706,10 +707,11 @@ int4mi(PG_FUNCTION_ARGS)
int32 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -726,21 +728,22 @@ int4mul(PG_FUNCTION_ARGS)
int32 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg2 gives
- * arg1 again. There are two cases where this fails: arg2 = 0 (which
- * cannot overflow) and arg1 = INT_MIN, arg2 = -1 (where the division
- * itself will overflow and thus incorrectly match).
+ * Overflow check. We basically check to see if result / arg2 gives arg1
+ * again. There are two cases where this fails: arg2 = 0 (which cannot
+ * overflow) and arg1 = INT_MIN, arg2 = -1 (where the division itself will
+ * overflow and thus incorrectly match).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int16 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int16
+ * range; if so, no overflow is possible.
*/
if (!(arg1 >= (int32) SHRT_MIN && arg1 <= (int32) SHRT_MAX &&
arg2 >= (int32) SHRT_MIN && arg2 <= (int32) SHRT_MAX) &&
arg2 != 0 &&
- (result/arg2 != arg1 || (arg2 == -1 && arg1 < 0 && result < 0)))
+ (result / arg2 != arg1 || (arg2 == -1 && arg1 < 0 && result < 0)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("integer out of range")));
@@ -760,10 +763,11 @@ int4div(PG_FUNCTION_ARGS)
errmsg("division by zero")));
result = arg1 / arg2;
+
/*
- * Overflow check. The only possible overflow case is for
- * arg1 = INT_MIN, arg2 = -1, where the correct result is -INT_MIN,
- * which can't be represented on a two's-complement machine.
+ * Overflow check. The only possible overflow case is for arg1 = INT_MIN,
+ * arg2 = -1, where the correct result is -INT_MIN, which can't be
+ * represented on a two's-complement machine.
*/
if (arg2 == -1 && arg1 < 0 && result < 0)
ereport(ERROR,
@@ -819,10 +823,11 @@ int2pl(PG_FUNCTION_ARGS)
int16 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -839,10 +844,11 @@ int2mi(PG_FUNCTION_ARGS)
int16 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -859,11 +865,11 @@ int2mul(PG_FUNCTION_ARGS)
int32 result32;
/*
- * The most practical way to detect overflow is to do the arithmetic
- * in int32 (so that the result can't overflow) and then do a range
- * check.
+ * The most practical way to detect overflow is to do the arithmetic in
+ * int32 (so that the result can't overflow) and then do a range check.
*/
- result32 = (int32) arg1 * (int32) arg2;
+ result32 = (int32) arg1 *(int32) arg2;
+
if (result32 < SHRT_MIN || result32 > SHRT_MAX)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
@@ -885,10 +891,11 @@ int2div(PG_FUNCTION_ARGS)
errmsg("division by zero")));
result = arg1 / arg2;
+
/*
- * Overflow check. The only possible overflow case is for
- * arg1 = SHRT_MIN, arg2 = -1, where the correct result is -SHRT_MIN,
- * which can't be represented on a two's-complement machine.
+ * Overflow check. The only possible overflow case is for arg1 =
+ * SHRT_MIN, arg2 = -1, where the correct result is -SHRT_MIN, which can't
+ * be represented on a two's-complement machine.
*/
if (arg2 == -1 && arg1 < 0 && result < 0)
ereport(ERROR,
@@ -905,10 +912,11 @@ int24pl(PG_FUNCTION_ARGS)
int32 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -925,10 +933,11 @@ int24mi(PG_FUNCTION_ARGS)
int32 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -945,18 +954,19 @@ int24mul(PG_FUNCTION_ARGS)
int32 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg2 gives
- * arg1 again. There is one case where this fails: arg2 = 0 (which
- * cannot overflow).
+ * Overflow check. We basically check to see if result / arg2 gives arg1
+ * again. There is one case where this fails: arg2 = 0 (which cannot
+ * overflow).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int16 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int16
+ * range; if so, no overflow is possible.
*/
if (!(arg2 >= (int32) SHRT_MIN && arg2 <= (int32) SHRT_MAX) &&
- result/arg2 != arg1)
+ result / arg2 != arg1)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("integer out of range")));
@@ -985,10 +995,11 @@ int42pl(PG_FUNCTION_ARGS)
int32 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -1005,10 +1016,11 @@ int42mi(PG_FUNCTION_ARGS)
int32 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -1025,18 +1037,19 @@ int42mul(PG_FUNCTION_ARGS)
int32 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg1 gives
- * arg2 again. There is one case where this fails: arg1 = 0 (which
- * cannot overflow).
+ * Overflow check. We basically check to see if result / arg1 gives arg2
+ * again. There is one case where this fails: arg1 = 0 (which cannot
+ * overflow).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int16 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int16
+ * range; if so, no overflow is possible.
*/
if (!(arg1 >= (int32) SHRT_MIN && arg1 <= (int32) SHRT_MAX) &&
- result/arg1 != arg2)
+ result / arg1 != arg2)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("integer out of range")));
@@ -1056,10 +1069,11 @@ int42div(PG_FUNCTION_ARGS)
errmsg("division by zero")));
result = arg1 / arg2;
+
/*
- * Overflow check. The only possible overflow case is for
- * arg1 = INT_MIN, arg2 = -1, where the correct result is -INT_MIN,
- * which can't be represented on a two's-complement machine.
+ * Overflow check. The only possible overflow case is for arg1 = INT_MIN,
+ * arg2 = -1, where the correct result is -INT_MIN, which can't be
+ * represented on a two's-complement machine.
*/
if (arg2 == -1 && arg1 < 0 && result < 0)
ereport(ERROR,
@@ -1352,8 +1366,7 @@ generate_series_step_int4(PG_FUNCTION_ARGS)
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@@ -1376,8 +1389,7 @@ generate_series_step_int4(PG_FUNCTION_ARGS)
funcctx = SRF_PERCALL_SETUP();
/*
- * get the saved state and use current as the result for this
- * iteration
+ * get the saved state and use current as the result for this iteration
*/
fctx = funcctx->user_fctx;
result = fctx->current;
diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c
index c5c3d30d03d..6418da312e0 100644
--- a/src/backend/utils/adt/int8.c
+++ b/src/backend/utils/adt/int8.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/int8.c,v 1.58 2005/03/12 20:25:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/int8.c,v 1.59 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,8 +59,8 @@ scanint8(const char *str, bool errorOK, int64 *result)
int sign = 1;
/*
- * Do our own scan, rather than relying on sscanf which might be
- * broken for long long.
+ * Do our own scan, rather than relying on sscanf which might be broken
+ * for long long.
*/
/* skip leading spaces */
@@ -74,8 +74,7 @@ scanint8(const char *str, bool errorOK, int64 *result)
/*
* Do an explicit check for INT64_MIN. Ugly though this is, it's
- * cleaner than trying to get the loop below to handle it
- * portably.
+ * cleaner than trying to get the loop below to handle it portably.
*/
#ifndef INT64_IS_BUSTED
if (strncmp(ptr, "9223372036854775808", 19) == 0)
@@ -115,8 +114,8 @@ scanint8(const char *str, bool errorOK, int64 *result)
else
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("value \"%s\" is out of range for type bigint",
- str)));
+ errmsg("value \"%s\" is out of range for type bigint",
+ str)));
}
tmp = newtmp;
}
@@ -524,10 +523,11 @@ int8pl(PG_FUNCTION_ARGS)
int64 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -544,10 +544,11 @@ int8mi(PG_FUNCTION_ARGS)
int64 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -564,21 +565,22 @@ int8mul(PG_FUNCTION_ARGS)
int64 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg2 gives
- * arg1 again. There are two cases where this fails: arg2 = 0 (which
- * cannot overflow) and arg1 = INT64_MIN, arg2 = -1 (where the division
- * itself will overflow and thus incorrectly match).
+ * Overflow check. We basically check to see if result / arg2 gives arg1
+ * again. There are two cases where this fails: arg2 = 0 (which cannot
+ * overflow) and arg1 = INT64_MIN, arg2 = -1 (where the division itself
+ * will overflow and thus incorrectly match).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int32 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int32
+ * range; if so, no overflow is possible.
*/
if (!(arg1 == (int64) ((int32) arg1) &&
arg2 == (int64) ((int32) arg2)) &&
arg2 != 0 &&
- (result/arg2 != arg1 || (arg2 == -1 && arg1 < 0 && result < 0)))
+ (result / arg2 != arg1 || (arg2 == -1 && arg1 < 0 && result < 0)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
@@ -598,10 +600,11 @@ int8div(PG_FUNCTION_ARGS)
errmsg("division by zero")));
result = arg1 / arg2;
+
/*
- * Overflow check. The only possible overflow case is for
- * arg1 = INT64_MIN, arg2 = -1, where the correct result is -INT64_MIN,
- * which can't be represented on a two's-complement machine.
+ * Overflow check. The only possible overflow case is for arg1 =
+ * INT64_MIN, arg2 = -1, where the correct result is -INT64_MIN, which
+ * can't be represented on a two's-complement machine.
*/
if (arg2 == -1 && arg1 < 0 && result < 0)
ereport(ERROR,
@@ -653,9 +656,9 @@ int8inc(PG_FUNCTION_ARGS)
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
/*
- * Special case to avoid palloc overhead for COUNT(): when called
- * from nodeAgg, we know that the argument is modifiable local
- * storage, so just update it in-place.
+ * Special case to avoid palloc overhead for COUNT(): when called from
+ * nodeAgg, we know that the argument is modifiable local storage, so
+ * just update it in-place.
*
* Note: this assumes int8 is a pass-by-ref type; if we ever support
* pass-by-val int8, this should be ifdef'd out when int8 is
@@ -723,10 +726,11 @@ int84pl(PG_FUNCTION_ARGS)
int64 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -743,10 +747,11 @@ int84mi(PG_FUNCTION_ARGS)
int64 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -763,18 +768,19 @@ int84mul(PG_FUNCTION_ARGS)
int64 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg1 gives
- * arg2 again. There is one case where this fails: arg1 = 0 (which
- * cannot overflow).
+ * Overflow check. We basically check to see if result / arg1 gives arg2
+ * again. There is one case where this fails: arg1 = 0 (which cannot
+ * overflow).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int32 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int32
+ * range; if so, no overflow is possible.
*/
if (arg1 != (int64) ((int32) arg1) &&
- result/arg1 != arg2)
+ result / arg1 != arg2)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
@@ -794,10 +800,11 @@ int84div(PG_FUNCTION_ARGS)
errmsg("division by zero")));
result = arg1 / arg2;
+
/*
- * Overflow check. The only possible overflow case is for
- * arg1 = INT64_MIN, arg2 = -1, where the correct result is -INT64_MIN,
- * which can't be represented on a two's-complement machine.
+ * Overflow check. The only possible overflow case is for arg1 =
+ * INT64_MIN, arg2 = -1, where the correct result is -INT64_MIN, which
+ * can't be represented on a two's-complement machine.
*/
if (arg2 == -1 && arg1 < 0 && result < 0)
ereport(ERROR,
@@ -814,10 +821,11 @@ int48pl(PG_FUNCTION_ARGS)
int64 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -834,10 +842,11 @@ int48mi(PG_FUNCTION_ARGS)
int64 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -854,18 +863,19 @@ int48mul(PG_FUNCTION_ARGS)
int64 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg2 gives
- * arg1 again. There is one case where this fails: arg2 = 0 (which
- * cannot overflow).
+ * Overflow check. We basically check to see if result / arg2 gives arg1
+ * again. There is one case where this fails: arg2 = 0 (which cannot
+ * overflow).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int32 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int32
+ * range; if so, no overflow is possible.
*/
if (arg2 != (int64) ((int32) arg2) &&
- result/arg2 != arg1)
+ result / arg2 != arg1)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
@@ -1027,9 +1037,9 @@ dtoi8(PG_FUNCTION_ARGS)
arg = rint(arg);
/*
- * Does it fit in an int64? Avoid assuming that we have handy
- * constants defined for the range boundaries, instead test for
- * overflow by reverse-conversion.
+ * Does it fit in an int64? Avoid assuming that we have handy constants
+ * defined for the range boundaries, instead test for overflow by
+ * reverse-conversion.
*/
result = (int64) arg;
@@ -1066,9 +1076,9 @@ ftoi8(PG_FUNCTION_ARGS)
darg = rint(arg);
/*
- * Does it fit in an int64? Avoid assuming that we have handy
- * constants defined for the range boundaries, instead test for
- * overflow by reverse-conversion.
+ * Does it fit in an int64? Avoid assuming that we have handy constants
+ * defined for the range boundaries, instead test for overflow by
+ * reverse-conversion.
*/
result = (int64) darg;
@@ -1183,8 +1193,7 @@ generate_series_step_int8(PG_FUNCTION_ARGS)
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@@ -1207,8 +1216,7 @@ generate_series_step_int8(PG_FUNCTION_ARGS)
funcctx = SRF_PERCALL_SETUP();
/*
- * get the saved state and use current as the result for this
- * iteration
+ * get the saved state and use current as the result for this iteration
*/
fctx = funcctx->user_fctx;
result = fctx->current;
diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c
index 1e84474c2ae..4bf2cd33872 100644
--- a/src/backend/utils/adt/like.c
+++ b/src/backend/utils/adt/like.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/like.c,v 1.61 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/like.c,v 1.62 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,13 +28,13 @@
#define LIKE_ABORT (-1)
-static int MatchText(char *t, int tlen, char *p, int plen);
-static int MatchTextIC(char *t, int tlen, char *p, int plen);
-static int MatchBytea(char *t, int tlen, char *p, int plen);
+static int MatchText(char *t, int tlen, char *p, int plen);
+static int MatchTextIC(char *t, int tlen, char *p, int plen);
+static int MatchBytea(char *t, int tlen, char *p, int plen);
static text *do_like_escape(text *, text *);
-static int MBMatchText(char *t, int tlen, char *p, int plen);
-static int MBMatchTextIC(char *t, int tlen, char *p, int plen);
+static int MBMatchText(char *t, int tlen, char *p, int plen);
+static int MBMatchTextIC(char *t, int tlen, char *p, int plen);
static text *MB_do_like_escape(text *, text *);
/*--------------------
@@ -48,7 +48,7 @@ wchareq(char *p1, char *p2)
int p1_len;
/* Optimization: quickly compare the first byte. */
- if(*p1 != *p2)
+ if (*p1 != *p2)
return (0);
p1_len = pg_mblen(p1);
@@ -80,15 +80,15 @@ iwchareq(char *p1, char *p2)
int l;
/*
- * short cut. if *p1 and *p2 is lower than CHARMAX, then we could
- * assume they are ASCII
+ * short cut. if *p1 and *p2 is lower than CHARMAX, then we could assume
+ * they are ASCII
*/
if ((unsigned char) *p1 < CHARMAX && (unsigned char) *p2 < CHARMAX)
return (tolower((unsigned char) *p1) == tolower((unsigned char) *p2));
/*
- * if one of them is an ASCII while the other is not, then they must
- * be different characters
+ * if one of them is an ASCII while the other is not, then they must be
+ * different characters
*/
else if ((unsigned char) *p1 < CHARMAX || (unsigned char) *p2 < CHARMAX)
return (0);
@@ -452,7 +452,7 @@ like_escape_bytea(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
errmsg("invalid escape string"),
- errhint("Escape string must be empty or one character.")));
+ errhint("Escape string must be empty or one character.")));
e = VARDATA(esc);
@@ -466,9 +466,9 @@ like_escape_bytea(PG_FUNCTION_ARGS)
}
/*
- * Otherwise, convert occurrences of the specified escape
- * character to '\', and double occurrences of '\' --- unless they
- * immediately follow an escape character!
+ * Otherwise, convert occurrences of the specified escape character to
+ * '\', and double occurrences of '\' --- unless they immediately
+ * follow an escape character!
*/
afterescape = false;
while (plen > 0)
@@ -530,8 +530,8 @@ MatchBytea(char *t, int tlen, char *p, int plen)
return LIKE_TRUE;
/*
- * Otherwise, scan for a text position at which we can match
- * the rest of the pattern.
+ * Otherwise, scan for a text position at which we can match the
+ * rest of the pattern.
*/
while (tlen > 0)
{
@@ -551,16 +551,16 @@ MatchBytea(char *t, int tlen, char *p, int plen)
}
/*
- * End of text with no match, so no point in trying later
- * places to start matching this pattern.
+ * End of text with no match, so no point in trying later places
+ * to start matching this pattern.
*/
return LIKE_ABORT;
}
else if ((*p != '_') && !BYTEA_CHAREQ(t, p))
{
/*
- * Not the single-character wildcard and no explicit match?
- * Then time to quit...
+ * Not the single-character wildcard and no explicit match? Then
+ * time to quit...
*/
return LIKE_FALSE;
}
@@ -580,8 +580,8 @@ MatchBytea(char *t, int tlen, char *p, int plen)
return LIKE_TRUE;
/*
- * End of text with no match, so no point in trying later places to
- * start matching this pattern.
+ * End of text with no match, so no point in trying later places to start
+ * matching this pattern.
*/
return LIKE_ABORT;
} /* MatchBytea() */
diff --git a/src/backend/utils/adt/like_match.c b/src/backend/utils/adt/like_match.c
index 94ad7997610..dc78e89f951 100644
--- a/src/backend/utils/adt/like_match.c
+++ b/src/backend/utils/adt/like_match.c
@@ -19,7 +19,7 @@
* Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/like_match.c,v 1.11 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/like_match.c,v 1.12 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -97,8 +97,8 @@ MatchText(char *t, int tlen, char *p, int plen)
return LIKE_TRUE;
/*
- * Otherwise, scan for a text position at which we can match
- * the rest of the pattern.
+ * Otherwise, scan for a text position at which we can match the
+ * rest of the pattern.
*/
while (tlen > 0)
{
@@ -118,16 +118,16 @@ MatchText(char *t, int tlen, char *p, int plen)
}
/*
- * End of text with no match, so no point in trying later
- * places to start matching this pattern.
+ * End of text with no match, so no point in trying later places
+ * to start matching this pattern.
*/
return LIKE_ABORT;
}
else if ((*p != '_') && !CHAREQ(t, p))
{
/*
- * Not the single-character wildcard and no explicit match?
- * Then time to quit...
+ * Not the single-character wildcard and no explicit match? Then
+ * time to quit...
*/
return LIKE_FALSE;
}
@@ -147,8 +147,8 @@ MatchText(char *t, int tlen, char *p, int plen)
return LIKE_TRUE;
/*
- * End of text with no match, so no point in trying later places to
- * start matching this pattern.
+ * End of text with no match, so no point in trying later places to start
+ * matching this pattern.
*/
return LIKE_ABORT;
} /* MatchText() */
@@ -183,8 +183,8 @@ MatchTextIC(char *t, int tlen, char *p, int plen)
return LIKE_TRUE;
/*
- * Otherwise, scan for a text position at which we can match
- * the rest of the pattern.
+ * Otherwise, scan for a text position at which we can match the
+ * rest of the pattern.
*/
while (tlen > 0)
{
@@ -204,16 +204,16 @@ MatchTextIC(char *t, int tlen, char *p, int plen)
}
/*
- * End of text with no match, so no point in trying later
- * places to start matching this pattern.
+ * End of text with no match, so no point in trying later places
+ * to start matching this pattern.
*/
return LIKE_ABORT;
}
else if ((*p != '_') && !ICHAREQ(t, p))
{
/*
- * Not the single-character wildcard and no explicit match?
- * Then time to quit...
+ * Not the single-character wildcard and no explicit match? Then
+ * time to quit...
*/
return LIKE_FALSE;
}
@@ -233,8 +233,8 @@ MatchTextIC(char *t, int tlen, char *p, int plen)
return LIKE_TRUE;
/*
- * End of text with no match, so no point in trying later places to
- * start matching this pattern.
+ * End of text with no match, so no point in trying later places to start
+ * matching this pattern.
*/
return LIKE_ABORT;
} /* MatchTextIC() */
@@ -289,7 +289,7 @@ do_like_escape(text *pat, text *esc)
ereport(ERROR,
(errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
errmsg("invalid escape string"),
- errhint("Escape string must be empty or one character.")));
+ errhint("Escape string must be empty or one character.")));
e = VARDATA(esc);
@@ -303,9 +303,9 @@ do_like_escape(text *pat, text *esc)
}
/*
- * Otherwise, convert occurrences of the specified escape
- * character to '\', and double occurrences of '\' --- unless they
- * immediately follow an escape character!
+ * Otherwise, convert occurrences of the specified escape character to
+ * '\', and double occurrences of '\' --- unless they immediately
+ * follow an escape character!
*/
afterescape = false;
while (plen > 0)
diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c
index 0bdf918e475..bf7ee788c42 100644
--- a/src/backend/utils/adt/lockfuncs.c
+++ b/src/backend/utils/adt/lockfuncs.c
@@ -6,7 +6,7 @@
* Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/lockfuncs.c,v 1.19 2005/06/18 19:33:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/lockfuncs.c,v 1.20 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -21,7 +21,7 @@
/* This must match enum LockTagType! */
-static const char * const LockTagTypeNames[] = {
+static const char *const LockTagTypeNames[] = {
"relation",
"extend",
"page",
@@ -57,8 +57,7 @@ pg_lock_status(PG_FUNCTION_ARGS)
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@@ -95,8 +94,8 @@ pg_lock_status(PG_FUNCTION_ARGS)
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
/*
- * Collect all the locking information that we will format and
- * send out as a result set.
+ * Collect all the locking information that we will format and send
+ * out as a result set.
*/
mystatus = (PG_Lock_Status *) palloc(sizeof(PG_Lock_Status));
funcctx->user_fctx = (void *) mystatus;
@@ -130,9 +129,9 @@ pg_lock_status(PG_FUNCTION_ARGS)
proc = &(lockData->procs[mystatus->currIdx]);
/*
- * Look to see if there are any held lock modes in this PROCLOCK.
- * If so, report, and destructively modify lockData so we don't
- * report again.
+ * Look to see if there are any held lock modes in this PROCLOCK. If
+ * so, report, and destructively modify lockData so we don't report
+ * again.
*/
granted = false;
if (proclock->holdMask)
@@ -160,16 +159,16 @@ pg_lock_status(PG_FUNCTION_ARGS)
mode = proc->waitLockMode;
/*
- * We are now done with this PROCLOCK, so advance pointer
- * to continue with next one on next call.
+ * We are now done with this PROCLOCK, so advance pointer to
+ * continue with next one on next call.
*/
mystatus->currIdx++;
}
else
{
/*
- * Okay, we've displayed all the locks associated with
- * this PROCLOCK, proceed to the next one.
+ * Okay, we've displayed all the locks associated with this
+ * PROCLOCK, proceed to the next one.
*/
mystatus->currIdx++;
continue;
@@ -191,7 +190,7 @@ pg_lock_status(PG_FUNCTION_ARGS)
locktypename = tnbuf;
}
values[0] = DirectFunctionCall1(textin,
- CStringGetDatum(locktypename));
+ CStringGetDatum(locktypename));
switch (lock->tag.locktag_type)
@@ -257,7 +256,7 @@ pg_lock_status(PG_FUNCTION_ARGS)
else
nulls[10] = 'n';
values[11] = DirectFunctionCall1(textin,
- CStringGetDatum(GetLockmodeName(mode)));
+ CStringGetDatum(GetLockmodeName(mode)));
values[12] = BoolGetDatum(granted);
tuple = heap_formtuple(funcctx->tuple_desc, values, nulls);
diff --git a/src/backend/utils/adt/mac.c b/src/backend/utils/adt/mac.c
index 4d62c6e0250..c974b633ca1 100644
--- a/src/backend/utils/adt/mac.c
+++ b/src/backend/utils/adt/mac.c
@@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for MAC addresses.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/mac.c,v 1.34 2004/08/29 05:06:49 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/mac.c,v 1.35 2005/10/15 02:49:28 momjian Exp $
*/
#include "postgres.h"
@@ -62,14 +62,14 @@ macaddr_in(PG_FUNCTION_ARGS)
if (count != 6)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type macaddr: \"%s\"", str)));
+ errmsg("invalid input syntax for type macaddr: \"%s\"", str)));
if ((a < 0) || (a > 255) || (b < 0) || (b > 255) ||
(c < 0) || (c > 255) || (d < 0) || (d > 255) ||
(e < 0) || (e > 255) || (f < 0) || (f > 255))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("invalid octet value in \"macaddr\" value: \"%s\"", str)));
+ errmsg("invalid octet value in \"macaddr\" value: \"%s\"", str)));
result = (macaddr *) palloc(sizeof(macaddr));
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 88f776df062..14bb593c2c2 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/misc.c,v 1.48 2005/09/16 05:35:40 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/misc.c,v 1.49 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,16 +79,16 @@ pg_signal_backend(int pid, int sig)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be superuser to signal other server processes"))));
+ (errmsg("must be superuser to signal other server processes"))));
if (!IsBackendPid(pid))
{
/*
- * This is just a warning so a loop-through-resultset will not
- * abort if one backend terminated on it's own during the run
+ * This is just a warning so a loop-through-resultset will not abort
+ * if one backend terminated on it's own during the run
*/
ereport(WARNING,
- (errmsg("PID %d is not a PostgreSQL server process", pid)));
+ (errmsg("PID %d is not a PostgreSQL server process", pid)));
return false;
}
@@ -111,7 +111,7 @@ pg_cancel_backend(PG_FUNCTION_ARGS)
Datum
pg_reload_conf(PG_FUNCTION_ARGS)
{
- if (!superuser())
+ if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to signal the postmaster"))));
@@ -133,7 +133,7 @@ pg_reload_conf(PG_FUNCTION_ARGS)
Datum
pg_rotate_logfile(PG_FUNCTION_ARGS)
{
- if (!superuser())
+ if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to rotate log files"))));
@@ -141,7 +141,7 @@ pg_rotate_logfile(PG_FUNCTION_ARGS)
if (!Redirect_stderr)
{
ereport(WARNING,
- (errmsg("rotation not possible because log redirection not active")));
+ (errmsg("rotation not possible because log redirection not active")));
PG_RETURN_BOOL(false);
}
@@ -186,8 +186,7 @@ pg_tablespace_databases(PG_FUNCTION_ARGS)
fctx = palloc(sizeof(ts_db_fctx));
/*
- * size = tablespace dirname length + dir sep
- * char + oid + terminator
+ * size = tablespace dirname length + dir sep char + oid + terminator
*/
fctx->location = (char *) palloc(10 + 10 + 1);
if (tablespaceOid == GLOBALTABLESPACE_OID)
@@ -214,7 +213,7 @@ pg_tablespace_databases(PG_FUNCTION_ARGS)
errmsg("could not open directory \"%s\": %m",
fctx->location)));
ereport(WARNING,
- (errmsg("%u is not a tablespace OID", tablespaceOid)));
+ (errmsg("%u is not a tablespace OID", tablespaceOid)));
}
}
funcctx->user_fctx = fctx;
diff --git a/src/backend/utils/adt/nabstime.c b/src/backend/utils/adt/nabstime.c
index 148ee0abb1c..40e7522b879 100644
--- a/src/backend/utils/adt/nabstime.c
+++ b/src/backend/utils/adt/nabstime.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/nabstime.c,v 1.144 2005/10/14 11:47:57 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/nabstime.c,v 1.145 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -77,11 +77,11 @@
* Function prototypes -- internal to this file only
*/
-static AbsoluteTime tm2abstime(struct pg_tm *tm, int tz);
-static void reltime2tm(RelativeTime time, struct pg_tm *tm);
+static AbsoluteTime tm2abstime(struct pg_tm * tm, int tz);
+static void reltime2tm(RelativeTime time, struct pg_tm * tm);
static void parsetinterval(char *i_string,
- AbsoluteTime *i_start,
- AbsoluteTime *i_end);
+ AbsoluteTime *i_start,
+ AbsoluteTime *i_end);
/*
@@ -100,21 +100,21 @@ GetCurrentAbsoluteTime(void)
void
-abstime2tm(AbsoluteTime _time, int *tzp, struct pg_tm *tm, char **tzn)
+abstime2tm(AbsoluteTime _time, int *tzp, struct pg_tm * tm, char **tzn)
{
pg_time_t time = (pg_time_t) _time;
struct pg_tm *tx;
/*
- * If HasCTZSet is true then we have a brute force time zone
- * specified. Go ahead and rotate to the local time zone since we will
- * later bypass any calls which adjust the tm fields.
+ * If HasCTZSet is true then we have a brute force time zone specified. Go
+ * ahead and rotate to the local time zone since we will later bypass any
+ * calls which adjust the tm fields.
*/
if (HasCTZSet && (tzp != NULL))
time -= CTimeZone;
if (!HasCTZSet && tzp != NULL)
- tx = pg_localtime(&time,global_timezone);
+ tx = pg_localtime(&time, global_timezone);
else
tx = pg_gmtime(&time);
@@ -156,8 +156,8 @@ abstime2tm(AbsoluteTime _time, int *tzp, struct pg_tm *tm, char **tzn)
{
/*
* Copy no more than MAXTZLEN bytes of timezone to tzn, in
- * case it contains an error message, which doesn't fit in
- * the buffer
+ * case it contains an error message, which doesn't fit in the
+ * buffer
*/
StrNCpy(*tzn, tm->tm_zone, MAXTZLEN + 1);
if (strlen(tm->tm_zone) > MAXTZLEN)
@@ -178,7 +178,7 @@ abstime2tm(AbsoluteTime _time, int *tzp, struct pg_tm *tm, char **tzn)
* Note that tm has full year (not 1900-based) and 1-based month.
*/
static AbsoluteTime
-tm2abstime(struct pg_tm *tm, int tz)
+tm2abstime(struct pg_tm * tm, int tz)
{
int day;
AbsoluteTime sec;
@@ -188,7 +188,7 @@ tm2abstime(struct pg_tm *tm, int tz)
tm->tm_mon < 1 || tm->tm_mon > 12 ||
tm->tm_mday < 1 || tm->tm_mday > 31 ||
tm->tm_hour < 0 ||
- tm->tm_hour > 24 || /* test for > 24:00:00 */
+ tm->tm_hour > 24 || /* test for > 24:00:00 */
(tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0)) ||
tm->tm_min < 0 || tm->tm_min > 59 ||
tm->tm_sec < 0 || tm->tm_sec > 60)
@@ -204,11 +204,11 @@ tm2abstime(struct pg_tm *tm, int tz)
sec = tm->tm_sec + tz + (tm->tm_min + (day * HOURS_PER_DAY + tm->tm_hour) * MINS_PER_HOUR) * SECS_PER_MINUTE;
/*
- * check for overflow. We need a little slop here because the H/M/S plus
+ * check for overflow. We need a little slop here because the H/M/S plus
* TZ offset could add up to more than 1 day.
*/
- if ((day >= MAX_DAYNUM-10 && sec < 0) ||
- (day <= MIN_DAYNUM+10 && sec > 0))
+ if ((day >= MAX_DAYNUM - 10 && sec < 0) ||
+ (day <= MIN_DAYNUM + 10 && sec > 0))
return INVALID_ABSTIME;
/* check for reserved values (e.g. "current" on edge of usual range */
@@ -254,8 +254,8 @@ abstimein(PG_FUNCTION_ARGS)
case DTK_EPOCH:
/*
- * Don't bother retaining this as a reserved value, but
- * instead just set to the actual epoch time (1970-01-01)
+ * Don't bother retaining this as a reserved value, but instead
+ * just set to the actual epoch time (1970-01-01)
*/
result = 0;
break;
@@ -370,9 +370,9 @@ static int
abstime_cmp_internal(AbsoluteTime a, AbsoluteTime b)
{
/*
- * We consider all INVALIDs to be equal and larger than any
- * non-INVALID. This is somewhat arbitrary; the important thing is to
- * have a consistent sort order.
+ * We consider all INVALIDs to be equal and larger than any non-INVALID.
+ * This is somewhat arbitrary; the important thing is to have a consistent
+ * sort order.
*/
if (a == INVALID_ABSTIME)
{
@@ -463,7 +463,7 @@ btabstimecmp(PG_FUNCTION_ARGS)
Datum
timestamp_abstime(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
AbsoluteTime result;
fsec_t fsec;
int tz;
@@ -509,7 +509,7 @@ abstime_timestamp(PG_FUNCTION_ARGS)
case INVALID_ABSTIME:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert abstime \"invalid\" to timestamp")));
+ errmsg("cannot convert abstime \"invalid\" to timestamp")));
TIMESTAMP_NOBEGIN(result);
break;
@@ -582,7 +582,7 @@ abstime_timestamptz(PG_FUNCTION_ARGS)
case INVALID_ABSTIME:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert abstime \"invalid\" to timestamp")));
+ errmsg("cannot convert abstime \"invalid\" to timestamp")));
TIMESTAMP_NOBEGIN(result);
break;
@@ -703,7 +703,7 @@ reltimesend(PG_FUNCTION_ARGS)
static void
-reltime2tm(RelativeTime time, struct pg_tm *tm)
+reltime2tm(RelativeTime time, struct pg_tm * tm)
{
double dtime = time;
@@ -764,12 +764,12 @@ tintervalout(PG_FUNCTION_ARGS)
else
{
p = DatumGetCString(DirectFunctionCall1(abstimeout,
- AbsoluteTimeGetDatum(tinterval->data[0])));
+ AbsoluteTimeGetDatum(tinterval->data[0])));
strcat(i_str, p);
pfree(p);
strcat(i_str, "\" \"");
p = DatumGetCString(DirectFunctionCall1(abstimeout,
- AbsoluteTimeGetDatum(tinterval->data[1])));
+ AbsoluteTimeGetDatum(tinterval->data[1])));
strcat(i_str, p);
pfree(p);
}
@@ -788,16 +788,16 @@ tintervalrecv(PG_FUNCTION_ARGS)
tinterval = (TimeInterval) palloc(sizeof(TimeIntervalData));
- tinterval ->status = pq_getmsgint(buf, sizeof(tinterval->status));
+ tinterval->status = pq_getmsgint(buf, sizeof(tinterval->status));
if (!(tinterval->status == T_INTERVAL_INVAL ||
tinterval->status == T_INTERVAL_VALID))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid status in external \"tinterval\" value")));
+ errmsg("invalid status in external \"tinterval\" value")));
- tinterval ->data[0] = pq_getmsgint(buf, sizeof(tinterval->data[0]));
- tinterval ->data[1] = pq_getmsgint(buf, sizeof(tinterval->data[1]));
+ tinterval->data[0] = pq_getmsgint(buf, sizeof(tinterval->data[0]));
+ tinterval->data[1] = pq_getmsgint(buf, sizeof(tinterval->data[1]));
PG_RETURN_TIMEINTERVAL(tinterval);
}
@@ -844,11 +844,11 @@ interval_reltime(PG_FUNCTION_ARGS)
#ifdef HAVE_INT64_TIMESTAMP
span = ((INT64CONST(365250000) * year + INT64CONST(30000000) * month +
- INT64CONST(1000000) * day) * INT64CONST(86400)) +
- interval->time;
+ INT64CONST(1000000) * day) * INT64CONST(86400)) +
+ interval->time;
span /= USECS_PER_SEC;
#else
- span = (DAYS_PER_YEAR * year + (double)DAYS_PER_MONTH * month + day) * SECS_PER_DAY + interval->time;
+ span = (DAYS_PER_YEAR * year + (double) DAYS_PER_MONTH * month + day) * SECS_PER_DAY + interval->time;
#endif
if (span < INT_MIN || span > INT_MAX)
@@ -876,7 +876,7 @@ reltime_interval(PG_FUNCTION_ARGS)
case INVALID_RELTIME:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert reltime \"invalid\" to interval")));
+ errmsg("cannot convert reltime \"invalid\" to interval")));
result->time = 0;
result->day = 0;
result->month = 0;
@@ -954,7 +954,7 @@ timepl(PG_FUNCTION_ARGS)
if (AbsoluteTimeIsReal(t1) &&
RelativeTimeIsValid(t2) &&
((t2 > 0 && t1 < NOEND_ABSTIME - t2) ||
- (t2 <= 0 && t1 > NOSTART_ABSTIME - t2))) /* prevent overflow */
+ (t2 <= 0 && t1 > NOSTART_ABSTIME - t2))) /* prevent overflow */
PG_RETURN_ABSOLUTETIME(t1 + t2);
PG_RETURN_ABSOLUTETIME(INVALID_ABSTIME);
@@ -973,7 +973,7 @@ timemi(PG_FUNCTION_ARGS)
if (AbsoluteTimeIsReal(t1) &&
RelativeTimeIsValid(t2) &&
((t2 > 0 && t1 > NOSTART_ABSTIME + t2) ||
- (t2 <= 0 && t1 < NOEND_ABSTIME + t2))) /* prevent overflow */
+ (t2 <= 0 && t1 < NOEND_ABSTIME + t2))) /* prevent overflow */
PG_RETURN_ABSOLUTETIME(t1 - t2);
PG_RETURN_ABSOLUTETIME(INVALID_ABSTIME);
@@ -993,10 +993,10 @@ intinterval(PG_FUNCTION_ARGS)
{
if (DatumGetBool(DirectFunctionCall2(abstimege,
AbsoluteTimeGetDatum(t),
- AbsoluteTimeGetDatum(tinterval->data[0]))) &&
+ AbsoluteTimeGetDatum(tinterval->data[0]))) &&
DatumGetBool(DirectFunctionCall2(abstimele,
AbsoluteTimeGetDatum(t),
- AbsoluteTimeGetDatum(tinterval->data[1]))))
+ AbsoluteTimeGetDatum(tinterval->data[1]))))
PG_RETURN_BOOL(true);
}
PG_RETURN_BOOL(false);
@@ -1046,9 +1046,9 @@ static int
reltime_cmp_internal(RelativeTime a, RelativeTime b)
{
/*
- * We consider all INVALIDs to be equal and larger than any
- * non-INVALID. This is somewhat arbitrary; the important thing is to
- * have a consistent sort order.
+ * We consider all INVALIDs to be equal and larger than any non-INVALID.
+ * This is somewhat arbitrary; the important thing is to have a consistent
+ * sort order.
*/
if (a == INVALID_RELTIME)
{
@@ -1147,11 +1147,11 @@ tintervalsame(PG_FUNCTION_ARGS)
PG_RETURN_BOOL(false);
if (DatumGetBool(DirectFunctionCall2(abstimeeq,
- AbsoluteTimeGetDatum(i1->data[0]),
- AbsoluteTimeGetDatum(i2->data[0]))) &&
+ AbsoluteTimeGetDatum(i1->data[0]),
+ AbsoluteTimeGetDatum(i2->data[0]))) &&
DatumGetBool(DirectFunctionCall2(abstimeeq,
- AbsoluteTimeGetDatum(i1->data[1]),
- AbsoluteTimeGetDatum(i2->data[1]))))
+ AbsoluteTimeGetDatum(i1->data[1]),
+ AbsoluteTimeGetDatum(i2->data[1]))))
PG_RETURN_BOOL(true);
PG_RETURN_BOOL(false);
}
@@ -1172,16 +1172,16 @@ tinterval_cmp_internal(TimeInterval a, TimeInterval b)
AbsoluteTime b_len;
/*
- * We consider all INVALIDs to be equal and larger than any
- * non-INVALID. This is somewhat arbitrary; the important thing is to
- * have a consistent sort order.
+ * We consider all INVALIDs to be equal and larger than any non-INVALID.
+ * This is somewhat arbitrary; the important thing is to have a consistent
+ * sort order.
*/
a_invalid = a->status == T_INTERVAL_INVAL ||
- a->data[0] == INVALID_ABSTIME ||
- a->data[1] == INVALID_ABSTIME;
+ a->data[0] == INVALID_ABSTIME ||
+ a->data[1] == INVALID_ABSTIME;
b_invalid = b->status == T_INTERVAL_INVAL ||
- b->data[0] == INVALID_ABSTIME ||
- b->data[1] == INVALID_ABSTIME;
+ b->data[0] == INVALID_ABSTIME ||
+ b->data[1] == INVALID_ABSTIME;
if (a_invalid)
{
@@ -1293,7 +1293,7 @@ tintervalleneq(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt == t);
}
@@ -1307,7 +1307,7 @@ tintervallenne(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt != t);
}
@@ -1321,7 +1321,7 @@ tintervallenlt(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt < t);
}
@@ -1335,7 +1335,7 @@ tintervallengt(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt > t);
}
@@ -1349,7 +1349,7 @@ tintervallenle(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt <= t);
}
@@ -1363,7 +1363,7 @@ tintervallenge(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt >= t);
}
@@ -1379,11 +1379,11 @@ tintervalct(PG_FUNCTION_ARGS)
if (i1->status == T_INTERVAL_INVAL || i2->status == T_INTERVAL_INVAL)
PG_RETURN_BOOL(false);
if (DatumGetBool(DirectFunctionCall2(abstimele,
- AbsoluteTimeGetDatum(i1->data[0]),
- AbsoluteTimeGetDatum(i2->data[0]))) &&
+ AbsoluteTimeGetDatum(i1->data[0]),
+ AbsoluteTimeGetDatum(i2->data[0]))) &&
DatumGetBool(DirectFunctionCall2(abstimege,
- AbsoluteTimeGetDatum(i1->data[1]),
- AbsoluteTimeGetDatum(i2->data[1]))))
+ AbsoluteTimeGetDatum(i1->data[1]),
+ AbsoluteTimeGetDatum(i2->data[1]))))
PG_RETURN_BOOL(true);
PG_RETURN_BOOL(false);
}
@@ -1400,11 +1400,11 @@ tintervalov(PG_FUNCTION_ARGS)
if (i1->status == T_INTERVAL_INVAL || i2->status == T_INTERVAL_INVAL)
PG_RETURN_BOOL(false);
if (DatumGetBool(DirectFunctionCall2(abstimelt,
- AbsoluteTimeGetDatum(i1->data[1]),
- AbsoluteTimeGetDatum(i2->data[0]))) ||
+ AbsoluteTimeGetDatum(i1->data[1]),
+ AbsoluteTimeGetDatum(i2->data[0]))) ||
DatumGetBool(DirectFunctionCall2(abstimegt,
- AbsoluteTimeGetDatum(i1->data[0]),
- AbsoluteTimeGetDatum(i2->data[1]))))
+ AbsoluteTimeGetDatum(i1->data[0]),
+ AbsoluteTimeGetDatum(i2->data[1]))))
PG_RETURN_BOOL(false);
PG_RETURN_BOOL(true);
}
@@ -1492,8 +1492,7 @@ parsetinterval(char *i_string,
goto bogus; /* syntax error */
p++;
if (strncmp(INVALID_INTERVAL_STR, p, strlen(INVALID_INTERVAL_STR)) == 0)
- goto bogus; /* undefined range, handled like a syntax
- * err. */
+ goto bogus; /* undefined range, handled like a syntax err. */
/* search for the end of the first date and change it to a \0 */
p1 = p;
while ((c = *p1) != '\0')
@@ -1507,7 +1506,7 @@ parsetinterval(char *i_string,
*p1 = '\0';
/* get the first date */
*i_start = DatumGetAbsoluteTime(DirectFunctionCall1(abstimein,
- CStringGetDatum(p)));
+ CStringGetDatum(p)));
/* undo change to \0 */
*p1 = c;
p = ++p1;
@@ -1537,7 +1536,7 @@ parsetinterval(char *i_string,
*p1 = '\0';
/* get the second date */
*i_end = DatumGetAbsoluteTime(DirectFunctionCall1(abstimein,
- CStringGetDatum(p)));
+ CStringGetDatum(p)));
/* undo change to \0 */
*p1 = c;
p = ++p1;
@@ -1566,7 +1565,7 @@ bogus:
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
errmsg("invalid input syntax for type tinterval: \"%s\"",
i_string)));
- *i_start = *i_end = INVALID_ABSTIME; /* keep compiler quiet */
+ *i_start = *i_end = INVALID_ABSTIME; /* keep compiler quiet */
}
@@ -1595,7 +1594,7 @@ timeofday(PG_FUNCTION_ARGS)
gettimeofday(&tp, &tpz);
tt = (pg_time_t) tp.tv_sec;
pg_strftime(templ, sizeof(templ), "%a %b %d %H:%M:%S.%%06d %Y %Z",
- pg_localtime(&tt,global_timezone));
+ pg_localtime(&tt, global_timezone));
snprintf(buf, sizeof(buf), templ, tp.tv_usec);
len = VARHDRSZ + strlen(buf);
diff --git a/src/backend/utils/adt/name.c b/src/backend/utils/adt/name.c
index 1200ad9b34c..0a52dcfec66 100644
--- a/src/backend/utils/adt/name.c
+++ b/src/backend/utils/adt/name.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/name.c,v 1.55 2004/12/31 22:01:22 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/name.c,v 1.56 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -258,8 +258,8 @@ namecpy(Name n1, Name n2)
int
namecat(Name n1, Name n2)
{
- return namestrcat(n1, NameStr(*n2)); /* n2 can't be any longer
- * than n1 */
+ return namestrcat(n1, NameStr(*n2)); /* n2 can't be any longer than
+ * n1 */
}
#endif
diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c
index dc83d7028c5..17403c5f33c 100644
--- a/src/backend/utils/adt/network.c
+++ b/src/backend/utils/adt/network.c
@@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for the INET and CIDR types.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/network.c,v 1.54 2004/10/08 01:10:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/network.c,v 1.55 2005/10/15 02:49:29 momjian Exp $
*
* Jon Postel RIP 16 Oct 1998
*/
@@ -74,9 +74,9 @@ network_in(char *src, int type)
dst = (inet *) palloc0(VARHDRSZ + sizeof(inet_struct));
/*
- * First, check to see if this is an IPv6 or IPv4 address. IPv6
- * addresses will have a : somewhere in them (several, in fact) so if
- * there is one present, assume it's V6, otherwise assume it's V4.
+ * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
+ * will have a : somewhere in them (several, in fact) so if there is one
+ * present, assume it's V6, otherwise assume it's V4.
*/
if (strchr(src, ':') != NULL)
@@ -94,8 +94,7 @@ network_in(char *src, int type)
type ? "cidr" : "inet", src)));
/*
- * Error check: CIDR values must not have any bits set beyond the
- * masklen.
+ * Error check: CIDR values must not have any bits set beyond the masklen.
*/
if (type)
{
@@ -195,7 +194,7 @@ inet_recv(PG_FUNCTION_ARGS)
ip_family(addr) != PGSQL_AF_INET6)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid address family in external \"inet\" value")));
+ errmsg("invalid address family in external \"inet\" value")));
bits = pq_getmsgbyte(buf);
if (bits < 0 || bits > ip_maxbits(addr))
ereport(ERROR,
@@ -221,8 +220,7 @@ inet_recv(PG_FUNCTION_ARGS)
addrptr[i] = pq_getmsgbyte(buf);
/*
- * Error check: CIDR values must not have any bits set beyond the
- * masklen.
+ * Error check: CIDR values must not have any bits set beyond the masklen.
*/
if (ip_type(addr))
{
@@ -457,7 +455,7 @@ network_sub(PG_FUNCTION_ARGS)
if (ip_family(a1) == ip_family(a2))
{
PG_RETURN_BOOL(ip_bits(a1) > ip_bits(a2)
- && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a2)) == 0);
+ && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a2)) == 0);
}
PG_RETURN_BOOL(false);
@@ -472,7 +470,7 @@ network_subeq(PG_FUNCTION_ARGS)
if (ip_family(a1) == ip_family(a2))
{
PG_RETURN_BOOL(ip_bits(a1) >= ip_bits(a2)
- && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a2)) == 0);
+ && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a2)) == 0);
}
PG_RETURN_BOOL(false);
@@ -487,7 +485,7 @@ network_sup(PG_FUNCTION_ARGS)
if (ip_family(a1) == ip_family(a2))
{
PG_RETURN_BOOL(ip_bits(a1) < ip_bits(a2)
- && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a1)) == 0);
+ && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a1)) == 0);
}
PG_RETURN_BOOL(false);
@@ -502,7 +500,7 @@ network_supeq(PG_FUNCTION_ARGS)
if (ip_family(a1) == ip_family(a2))
{
PG_RETURN_BOOL(ip_bits(a1) <= ip_bits(a2)
- && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a1)) == 0);
+ && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a1)) == 0);
}
PG_RETURN_BOOL(false);
@@ -870,8 +868,8 @@ convert_network_to_scalar(Datum value, Oid typid)
}
/*
- * Can't get here unless someone tries to use scalarltsel/scalargtsel
- * on an operator with one network and one non-network operand.
+ * Can't get here unless someone tries to use scalarltsel/scalargtsel on
+ * an operator with one network and one non-network operand.
*/
elog(ERROR, "unsupported type: %u", typid);
return 0;
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index 4aa631ee577..a8becf990d1 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -14,7 +14,7 @@
* Copyright (c) 1998-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.85 2005/07/10 21:13:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.86 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -131,8 +131,7 @@ typedef struct NumericVar
{
int ndigits; /* # of digits in digits[] - can be 0! */
int weight; /* weight of first digit */
- int sign; /* NUMERIC_POS, NUMERIC_NEG, or
- * NUMERIC_NAN */
+ int sign; /* NUMERIC_POS, NUMERIC_NEG, or NUMERIC_NAN */
int dscale; /* display scale */
NumericDigit *buf; /* start of palloc'd space for digits[] */
NumericDigit *digits; /* base-NBASE digits */
@@ -157,10 +156,8 @@ static NumericVar const_two =
#if DEC_DIGITS == 4
static NumericDigit const_zero_point_five_data[1] = {5000};
-
#elif DEC_DIGITS == 2
static NumericDigit const_zero_point_five_data[1] = {50};
-
#elif DEC_DIGITS == 1
static NumericDigit const_zero_point_five_data[1] = {5};
#endif
@@ -169,10 +166,8 @@ static NumericVar const_zero_point_five =
#if DEC_DIGITS == 4
static NumericDigit const_zero_point_nine_data[1] = {9000};
-
#elif DEC_DIGITS == 2
static NumericDigit const_zero_point_nine_data[1] = {90};
-
#elif DEC_DIGITS == 1
static NumericDigit const_zero_point_nine_data[1] = {9};
#endif
@@ -183,12 +178,10 @@ static NumericVar const_zero_point_nine =
static NumericDigit const_zero_point_01_data[1] = {100};
static NumericVar const_zero_point_01 =
{1, -1, NUMERIC_POS, 2, NULL, const_zero_point_01_data};
-
#elif DEC_DIGITS == 2
static NumericDigit const_zero_point_01_data[1] = {1};
static NumericVar const_zero_point_01 =
{1, -1, NUMERIC_POS, 2, NULL, const_zero_point_01_data};
-
#elif DEC_DIGITS == 1
static NumericDigit const_zero_point_01_data[1] = {1};
static NumericVar const_zero_point_01 =
@@ -197,10 +190,8 @@ static NumericVar const_zero_point_01 =
#if DEC_DIGITS == 4
static NumericDigit const_one_point_one_data[2] = {1, 1000};
-
#elif DEC_DIGITS == 2
static NumericDigit const_one_point_one_data[2] = {1, 10};
-
#elif DEC_DIGITS == 1
static NumericDigit const_one_point_one_data[2] = {1, 1};
#endif
@@ -223,7 +214,6 @@ static const int round_powers[4] = {0, 1000, 100, 10};
#ifdef NUMERIC_DEBUG
static void dump_numeric(const char *str, Numeric num);
static void dump_var(const char *str, NumericVar *var);
-
#else
#define dump_numeric(s,n)
#define dump_var(s,v)
@@ -322,8 +312,8 @@ numeric_in(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Use set_var_from_str() to parse the input string and return it in
- * the packed DB storage format
+ * Use set_var_from_str() to parse the input string and return it in the
+ * packed DB storage format
*/
init_var(&value);
set_var_from_str(str, &value);
@@ -358,10 +348,10 @@ numeric_out(PG_FUNCTION_ARGS)
/*
* Get the number in the variable format.
*
- * Even if we didn't need to change format, we'd still need to copy the
- * value to have a modifiable copy for rounding. set_var_from_num()
- * also guarantees there is extra digit space in case we produce a
- * carry out from rounding.
+ * Even if we didn't need to change format, we'd still need to copy the value
+ * to have a modifiable copy for rounding. set_var_from_num() also
+ * guarantees there is extra digit space in case we produce a carry out
+ * from rounding.
*/
init_var(&x);
set_var_from_num(num, &x);
@@ -383,6 +373,7 @@ Datum
numeric_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -419,7 +410,7 @@ numeric_recv(PG_FUNCTION_ARGS)
if (d < 0 || d >= NBASE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid digit in external \"numeric\" value")));
+ errmsg("invalid digit in external \"numeric\" value")));
value.digits[i] = d;
}
@@ -468,7 +459,7 @@ numeric_send(PG_FUNCTION_ARGS)
* scale of the attribute have to be applied on the value.
*/
Datum
-numeric (PG_FUNCTION_ARGS)
+numeric(PG_FUNCTION_ARGS)
{
Numeric num = PG_GETARG_NUMERIC(0);
int32 typmod = PG_GETARG_INT32(1);
@@ -487,8 +478,8 @@ numeric (PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * If the value isn't a valid type modifier, simply return a copy of
- * the input value
+ * If the value isn't a valid type modifier, simply return a copy of the
+ * input value
*/
if (typmod < (int32) (VARHDRSZ))
{
@@ -507,9 +498,8 @@ numeric (PG_FUNCTION_ARGS)
/*
* If the number is certainly in bounds and due to the target scale no
- * rounding could be necessary, just make a copy of the input and
- * modify its scale fields. (Note we assume the existing dscale is
- * honest...)
+ * rounding could be necessary, just make a copy of the input and modify
+ * its scale fields. (Note we assume the existing dscale is honest...)
*/
ddigits = (num->n_weight + 1) * DEC_DIGITS;
if (ddigits <= maxdigits && scale >= NUMERIC_DSCALE(num))
@@ -587,9 +577,9 @@ numeric_uminus(PG_FUNCTION_ARGS)
memcpy(res, num, num->varlen);
/*
- * The packed format is known to be totally zero digit trimmed always.
- * So we can identify a ZERO by the fact that there are no digits at
- * all. Do nothing to a zero.
+ * The packed format is known to be totally zero digit trimmed always. So
+ * we can identify a ZERO by the fact that there are no digits at all. Do
+ * nothing to a zero.
*/
if (num->varlen != NUMERIC_HDRSZ)
{
@@ -638,17 +628,16 @@ numeric_sign(PG_FUNCTION_ARGS)
init_var(&result);
/*
- * The packed format is known to be totally zero digit trimmed always.
- * So we can identify a ZERO by the fact that there are no digits at
- * all.
+ * The packed format is known to be totally zero digit trimmed always. So
+ * we can identify a ZERO by the fact that there are no digits at all.
*/
if (num->varlen == NUMERIC_HDRSZ)
set_var_from_var(&const_zero, &result);
else
{
/*
- * And if there are some, we return a copy of ONE with the sign of
- * our argument
+ * And if there are some, we return a copy of ONE with the sign of our
+ * argument
*/
set_var_from_var(&const_one, &result);
result.sign = NUMERIC_SIGN(num);
@@ -837,8 +826,8 @@ width_bucket_numeric(PG_FUNCTION_ARGS)
if (count <= 0)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
- errmsg("count must be greater than zero")));
+ (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
+ errmsg("count must be greater than zero")));
init_var(&result_var);
init_var(&count_var);
@@ -850,8 +839,8 @@ width_bucket_numeric(PG_FUNCTION_ARGS)
{
case 0:
ereport(ERROR,
- (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
- errmsg("lower bound cannot equal upper bound")));
+ (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
+ errmsg("lower bound cannot equal upper bound")));
/* bound1 < bound2 */
case -1:
@@ -1055,9 +1044,9 @@ cmp_numerics(Numeric num1, Numeric num2)
int result;
/*
- * We consider all NANs to be equal and larger than any non-NAN. This
- * is somewhat arbitrary; the important thing is to have a consistent
- * sort order.
+ * We consider all NANs to be equal and larger than any non-NAN. This is
+ * somewhat arbitrary; the important thing is to have a consistent sort
+ * order.
*/
if (NUMERIC_IS_NAN(num1))
{
@@ -1208,10 +1197,10 @@ numeric_mul(PG_FUNCTION_ARGS)
/*
* Unpack the values, let mul_var() compute the result and return it.
- * Unlike add_var() and sub_var(), mul_var() will round its result. In
- * the case of numeric_mul(), which is invoked for the * operator on
- * numerics, we request exact representation for the product (rscale =
- * sum(dscale of arg1, dscale of arg2)).
+ * Unlike add_var() and sub_var(), mul_var() will round its result. In the
+ * case of numeric_mul(), which is invoked for the * operator on numerics,
+ * we request exact representation for the product (rscale = sum(dscale of
+ * arg1, dscale of arg2)).
*/
init_var(&arg1);
init_var(&arg2);
@@ -1368,8 +1357,8 @@ numeric_smaller(PG_FUNCTION_ARGS)
Numeric num2 = PG_GETARG_NUMERIC(1);
/*
- * Use cmp_numerics so that this will agree with the comparison
- * operators, particularly as regards comparisons involving NaN.
+ * Use cmp_numerics so that this will agree with the comparison operators,
+ * particularly as regards comparisons involving NaN.
*/
if (cmp_numerics(num1, num2) < 0)
PG_RETURN_NUMERIC(num1);
@@ -1390,8 +1379,8 @@ numeric_larger(PG_FUNCTION_ARGS)
Numeric num2 = PG_GETARG_NUMERIC(1);
/*
- * Use cmp_numerics so that this will agree with the comparison
- * operators, particularly as regards comparisons involving NaN.
+ * Use cmp_numerics so that this will agree with the comparison operators,
+ * particularly as regards comparisons involving NaN.
*/
if (cmp_numerics(num1, num2) > 0)
PG_RETURN_NUMERIC(num1);
@@ -1469,9 +1458,9 @@ numeric_sqrt(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a
- * scale to give at least NUMERIC_MIN_SIG_DIGITS significant digits;
- * but in any case not less than the input's dscale.
+ * Unpack the argument and determine the result scale. We choose a scale
+ * to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
+ * case not less than the input's dscale.
*/
init_var(&arg);
init_var(&result);
@@ -1522,9 +1511,9 @@ numeric_exp(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a
- * scale to give at least NUMERIC_MIN_SIG_DIGITS significant digits;
- * but in any case not less than the input's dscale.
+ * Unpack the argument and determine the result scale. We choose a scale
+ * to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
+ * case not less than the input's dscale.
*/
init_var(&arg);
init_var(&result);
@@ -1535,8 +1524,8 @@ numeric_exp(PG_FUNCTION_ARGS)
val = numericvar_to_double_no_overflow(&arg);
/*
- * log10(result) = num * log10(e), so this is approximately the
- * decimal weight of the result:
+ * log10(result) = num * log10(e), so this is approximately the decimal
+ * weight of the result:
*/
val *= 0.434294481903252;
@@ -1646,8 +1635,8 @@ numeric_log(PG_FUNCTION_ARGS)
set_var_from_num(num2, &arg2);
/*
- * Call log_var() to compute and return the result; note it handles
- * scale selection itself.
+ * Call log_var() to compute and return the result; note it handles scale
+ * selection itself.
*/
log_var(&arg1, &arg2, &result);
@@ -1698,8 +1687,8 @@ numeric_power(PG_FUNCTION_ARGS)
trunc_var(&arg2_trunc, 0);
/*
- * Return special SQLSTATE error codes for a few conditions mandated
- * by the standard.
+ * Return special SQLSTATE error codes for a few conditions mandated by
+ * the standard.
*/
if ((cmp_var(&arg1, &const_zero) == 0 &&
cmp_var(&arg2, &const_zero) < 0) ||
@@ -2093,8 +2082,8 @@ do_numeric_accum(ArrayType *transarray, Numeric newval)
NumericGetDatum(newval));
sumX2 = DirectFunctionCall2(numeric_add, sumX2,
DirectFunctionCall2(numeric_mul,
- NumericGetDatum(newval),
- NumericGetDatum(newval)));
+ NumericGetDatum(newval),
+ NumericGetDatum(newval)));
transdatums[0] = N;
transdatums[1] = sumX;
@@ -2252,7 +2241,7 @@ numeric_variance(PG_FUNCTION_ARGS)
{
mul_var(&vN, &vNminus1, &vNminus1, 0); /* N * (N - 1) */
rscale = select_div_scale(&vsumX2, &vNminus1);
- div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
+ div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
res = make_result(&vsumX);
}
@@ -2328,7 +2317,7 @@ numeric_stddev(PG_FUNCTION_ARGS)
{
mul_var(&vN, &vNminus1, &vNminus1, 0); /* N * (N - 1) */
rscale = select_div_scale(&vsumX2, &vNminus1);
- div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
+ div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
sqrt_var(&vsumX, &vsumX, rscale); /* stddev */
res = make_result(&vsumX);
@@ -2377,12 +2366,12 @@ int2_sum(PG_FUNCTION_ARGS)
/*
* If we're invoked by nodeAgg, we can cheat and modify out first
- * parameter in-place to avoid palloc overhead. If not, we need to
- * return the new value of the transition variable.
+ * parameter in-place to avoid palloc overhead. If not, we need to return
+ * the new value of the transition variable.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
- int64 *oldsum = (int64 *) PG_GETARG_POINTER(0);
+ int64 *oldsum = (int64 *) PG_GETARG_POINTER(0);
/* Leave the running sum unchanged in the new input is null */
if (!PG_ARGISNULL(1))
@@ -2422,12 +2411,12 @@ int4_sum(PG_FUNCTION_ARGS)
/*
* If we're invoked by nodeAgg, we can cheat and modify out first
- * parameter in-place to avoid palloc overhead. If not, we need to
- * return the new value of the transition variable.
+ * parameter in-place to avoid palloc overhead. If not, we need to return
+ * the new value of the transition variable.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
- int64 *oldsum = (int64 *) PG_GETARG_POINTER(0);
+ int64 *oldsum = (int64 *) PG_GETARG_POINTER(0);
/* Leave the running sum unchanged in the new input is null */
if (!PG_ARGISNULL(1))
@@ -2467,9 +2456,9 @@ int8_sum(PG_FUNCTION_ARGS)
}
/*
- * Note that we cannot special-case the nodeAgg case here, as we
- * do for int2_sum and int4_sum: numeric is of variable size, so
- * we cannot modify our first parameter in-place.
+ * Note that we cannot special-case the nodeAgg case here, as we do for
+ * int2_sum and int4_sum: numeric is of variable size, so we cannot modify
+ * our first parameter in-place.
*/
oldsum = PG_GETARG_NUMERIC(0);
@@ -2514,8 +2503,8 @@ int2_avg_accum(PG_FUNCTION_ARGS)
/*
* If we're invoked by nodeAgg, we can cheat and modify our first
- * parameter in-place to reduce palloc overhead. Otherwise we need
- * to make a copy of it before scribbling on it.
+ * parameter in-place to reduce palloc overhead. Otherwise we need to make
+ * a copy of it before scribbling on it.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
transarray = PG_GETARG_ARRAYTYPE_P(0);
@@ -2541,8 +2530,8 @@ int4_avg_accum(PG_FUNCTION_ARGS)
/*
* If we're invoked by nodeAgg, we can cheat and modify our first
- * parameter in-place to reduce palloc overhead. Otherwise we need
- * to make a copy of it before scribbling on it.
+ * parameter in-place to reduce palloc overhead. Otherwise we need to make
+ * a copy of it before scribbling on it.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
transarray = PG_GETARG_ARRAYTYPE_P(0);
@@ -2743,8 +2732,8 @@ set_var_from_str(const char *str, NumericVar *dest)
NumericDigit *digits;
/*
- * We first parse the string to extract decimal digits and determine
- * the correct decimal weight. Then convert to NBASE representation.
+ * We first parse the string to extract decimal digits and determine the
+ * correct decimal weight. Then convert to NBASE representation.
*/
/* skip leading spaces */
@@ -2777,7 +2766,7 @@ set_var_from_str(const char *str, NumericVar *dest)
if (!isdigit((unsigned char) *cp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"", str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"", str)));
decdigits = (unsigned char *) palloc(strlen(cp) + DEC_DIGITS * 2);
@@ -2800,8 +2789,8 @@ set_var_from_str(const char *str, NumericVar *dest)
if (have_dp)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"",
+ str)));
have_dp = TRUE;
cp++;
}
@@ -2824,15 +2813,15 @@ set_var_from_str(const char *str, NumericVar *dest)
if (endptr == cp)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"",
+ str)));
cp = endptr;
if (exponent > NUMERIC_MAX_PRECISION ||
exponent < -NUMERIC_MAX_PRECISION)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"",
+ str)));
dweight += (int) exponent;
dscale -= (int) exponent;
if (dscale < 0)
@@ -2845,16 +2834,16 @@ set_var_from_str(const char *str, NumericVar *dest)
if (!isspace((unsigned char) *cp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"",
+ str)));
cp++;
}
/*
- * Okay, convert pure-decimal representation to base NBASE. First we
- * need to determine the converted weight and ndigits. offset is the
- * number of decimal zeroes to insert before the first given digit to
- * have a correctly aligned first NBASE digit.
+ * Okay, convert pure-decimal representation to base NBASE. First we need
+ * to determine the converted weight and ndigits. offset is the number of
+ * decimal zeroes to insert before the first given digit to have a
+ * correctly aligned first NBASE digit.
*/
if (dweight >= 0)
weight = (dweight + 1 + DEC_DIGITS - 1) / DEC_DIGITS - 1;
@@ -2969,10 +2958,10 @@ get_str_from_var(NumericVar *var, int dscale)
/*
* Allocate space for the result.
*
- * i is set to to # of decimal digits before decimal point. dscale is the
- * # of decimal digits we will print after decimal point. We may
- * generate as many as DEC_DIGITS-1 excess digits at the end, and in
- * addition we need room for sign, decimal point, null terminator.
+ * i is set to to # of decimal digits before decimal point. dscale is the #
+ * of decimal digits we will print after decimal point. We may generate as
+ * many as DEC_DIGITS-1 excess digits at the end, and in addition we need
+ * room for sign, decimal point, null terminator.
*/
i = (var->weight + 1) * DEC_DIGITS;
if (i <= 0)
@@ -3037,9 +3026,9 @@ get_str_from_var(NumericVar *var, int dscale)
}
/*
- * If requested, output a decimal point and all the digits that follow
- * it. We initially put out a multiple of DEC_DIGITS digits, then
- * truncate if needed.
+ * If requested, output a decimal point and all the digits that follow it.
+ * We initially put out a multiple of DEC_DIGITS digits, then truncate if
+ * needed.
*/
if (dscale > 0)
{
@@ -3179,10 +3168,10 @@ apply_typmod(NumericVar *var, int32 typmod)
/*
* Check for overflow - note we can't do this before rounding, because
- * rounding could raise the weight. Also note that the var's weight
- * could be inflated by leading zeroes, which will be stripped before
- * storage but perhaps might not have been yet. In any case, we must
- * recognize a true zero, whose weight doesn't mean anything.
+ * rounding could raise the weight. Also note that the var's weight could
+ * be inflated by leading zeroes, which will be stripped before storage
+ * but perhaps might not have been yet. In any case, we must recognize a
+ * true zero, whose weight doesn't mean anything.
*/
ddigits = (var->weight + 1) * DEC_DIGITS;
if (ddigits > maxdigits)
@@ -3254,9 +3243,8 @@ numericvar_to_int8(NumericVar *var, int64 *result)
}
/*
- * For input like 10000000000, we must treat stripped digits as real.
- * So the loop assumes there are weight+1 digits before the decimal
- * point.
+ * For input like 10000000000, we must treat stripped digits as real. So
+ * the loop assumes there are weight+1 digits before the decimal point.
*/
weight = var->weight;
Assert(weight >= 0 && ndigits <= weight + 1);
@@ -3274,10 +3262,10 @@ numericvar_to_int8(NumericVar *var, int64 *result)
/*
* The overflow check is a bit tricky because we want to accept
- * INT64_MIN, which will overflow the positive accumulator. We
- * can detect this case easily though because INT64_MIN is the
- * only nonzero value for which -val == val (on a two's complement
- * machine, anyway).
+ * INT64_MIN, which will overflow the positive accumulator. We can
+ * detect this case easily though because INT64_MIN is the only
+ * nonzero value for which -val == val (on a two's complement machine,
+ * anyway).
*/
if ((val / NBASE) != oldval) /* possible overflow? */
{
@@ -3355,8 +3343,8 @@ numeric_to_double_no_overflow(Numeric num)
/* shouldn't happen ... */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- tmp)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ tmp)));
}
pfree(tmp);
@@ -3381,8 +3369,8 @@ numericvar_to_double_no_overflow(NumericVar *var)
/* shouldn't happen ... */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- tmp)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ tmp)));
}
pfree(tmp);
@@ -3454,8 +3442,7 @@ add_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
else
{
/*
- * var1 is positive, var2 is negative Must compare absolute
- * values
+ * var1 is positive, var2 is negative Must compare absolute values
*/
switch (cmp_abs(var1, var2))
{
@@ -3715,10 +3702,9 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Determine number of result digits to compute. If the exact result
- * would have more than rscale fractional digits, truncate the
- * computation with MUL_GUARD_DIGITS guard digits. We do that by
- * pretending that one or both inputs have fewer digits than they
- * really do.
+ * would have more than rscale fractional digits, truncate the computation
+ * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
+ * or both inputs have fewer digits than they really do.
*/
res_ndigits = var1ndigits + var2ndigits + 1;
maxdigits = res_weight + 1 + (rscale * DEC_DIGITS) + MUL_GUARD_DIGITS;
@@ -3752,12 +3738,12 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* We do the arithmetic in an array "dig[]" of signed int's. Since
- * INT_MAX is noticeably larger than NBASE*NBASE, this gives us
- * headroom to avoid normalizing carries immediately.
+ * INT_MAX is noticeably larger than NBASE*NBASE, this gives us headroom
+ * to avoid normalizing carries immediately.
*
* maxdig tracks the maximum possible value of any dig[] entry; when this
- * threatens to exceed INT_MAX, we take the time to propagate carries.
- * To avoid overflow in maxdig itself, it actually represents the max
+ * threatens to exceed INT_MAX, we take the time to propagate carries. To
+ * avoid overflow in maxdig itself, it actually represents the max
* possible value divided by NBASE-1.
*/
dig = (int *) palloc0(res_ndigits * sizeof(int));
@@ -3801,9 +3787,9 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
}
/*
- * Now we do a final carry propagation pass to normalize the result,
- * which we combine with storing the result digits into the output.
- * Note that this is still done at full precision w/guard digits.
+ * Now we do a final carry propagation pass to normalize the result, which
+ * we combine with storing the result digits into the output. Note that
+ * this is still done at full precision w/guard digits.
*/
alloc_var(result, res_ndigits);
res_digits = result->digits;
@@ -3909,24 +3895,24 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* We do the arithmetic in an array "div[]" of signed int's. Since
- * INT_MAX is noticeably larger than NBASE*NBASE, this gives us
- * headroom to avoid normalizing carries immediately.
+ * INT_MAX is noticeably larger than NBASE*NBASE, this gives us headroom
+ * to avoid normalizing carries immediately.
*
- * We start with div[] containing one zero digit followed by the
- * dividend's digits (plus appended zeroes to reach the desired
- * precision including guard digits). Each step of the main loop
- * computes an (approximate) quotient digit and stores it into div[],
- * removing one position of dividend space. A final pass of carry
- * propagation takes care of any mistaken quotient digits.
+ * We start with div[] containing one zero digit followed by the dividend's
+ * digits (plus appended zeroes to reach the desired precision including
+ * guard digits). Each step of the main loop computes an (approximate)
+ * quotient digit and stores it into div[], removing one position of
+ * dividend space. A final pass of carry propagation takes care of any
+ * mistaken quotient digits.
*/
div = (int *) palloc0((div_ndigits + 1) * sizeof(int));
for (i = 0; i < var1ndigits; i++)
div[i + 1] = var1digits[i];
/*
- * We estimate each quotient digit using floating-point arithmetic,
- * taking the first four digits of the (current) dividend and divisor.
- * This must be float to avoid overflow.
+ * We estimate each quotient digit using floating-point arithmetic, taking
+ * the first four digits of the (current) dividend and divisor. This must
+ * be float to avoid overflow.
*/
fdivisor = (double) var2digits[0];
for (i = 1; i < 4; i++)
@@ -3938,10 +3924,10 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
fdivisorinverse = 1.0 / fdivisor;
/*
- * maxdiv tracks the maximum possible absolute value of any div[]
- * entry; when this threatens to exceed INT_MAX, we take the time to
- * propagate carries. To avoid overflow in maxdiv itself, it actually
- * represents the max possible abs. value divided by NBASE-1.
+ * maxdiv tracks the maximum possible absolute value of any div[] entry;
+ * when this threatens to exceed INT_MAX, we take the time to propagate
+ * carries. To avoid overflow in maxdiv itself, it actually represents
+ * the max possible abs. value divided by NBASE-1.
*/
maxdiv = 1;
@@ -3992,8 +3978,8 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
div[qi] = newdig;
/*
- * All the div[] digits except possibly div[qi] are now in
- * the range 0..NBASE-1.
+ * All the div[] digits except possibly div[qi] are now in the
+ * range 0..NBASE-1.
*/
maxdiv = Abs(newdig) / (NBASE - 1);
maxdiv = Max(maxdiv, 1);
@@ -4012,8 +3998,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/* Compute the (approximate) quotient digit */
fquotient = fdividend * fdivisorinverse;
qdigit = (fquotient >= 0.0) ? ((int) fquotient) :
- (((int) fquotient) - 1); /* truncate towards
- * -infinity */
+ (((int) fquotient) - 1); /* truncate towards -infinity */
maxdiv += Abs(qdigit);
}
@@ -4028,10 +4013,10 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
}
/*
- * The dividend digit we are about to replace might still be
- * nonzero. Fold it into the next digit position. We don't need
- * to worry about overflow here since this should nearly cancel
- * with the subtraction of the divisor.
+ * The dividend digit we are about to replace might still be nonzero.
+ * Fold it into the next digit position. We don't need to worry about
+ * overflow here since this should nearly cancel with the subtraction
+ * of the divisor.
*/
div[qi + 1] += div[qi] * NBASE;
@@ -4050,9 +4035,9 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
div[qi] = qdigit;
/*
- * Now we do a final carry propagation pass to normalize the result,
- * which we combine with storing the result digits into the output.
- * Note that this is still done at full precision w/guard digits.
+ * Now we do a final carry propagation pass to normalize the result, which
+ * we combine with storing the result digits into the output. Note that
+ * this is still done at full precision w/guard digits.
*/
alloc_var(result, div_ndigits + 1);
res_digits = result->digits;
@@ -4089,7 +4074,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
round_var(result, rscale);
else
trunc_var(result, rscale);
-
+
/* Strip leading and trailing zeroes */
strip_var(result);
}
@@ -4112,8 +4097,8 @@ select_div_scale(NumericVar *var1, NumericVar *var2)
int rscale;
/*
- * The result scale of a division isn't specified in any SQL standard.
- * For PostgreSQL we select a result scale that will give at least
+ * The result scale of a division isn't specified in any SQL standard. For
+ * PostgreSQL we select a result scale that will give at least
* NUMERIC_MIN_SIG_DIGITS significant digits, so that numeric gives a
* result no less accurate than float8; but use a scale not less than
* either input's display scale.
@@ -4274,8 +4259,8 @@ sqrt_var(NumericVar *arg, NumericVar *result, int rscale)
}
/*
- * SQL2003 defines sqrt() in terms of power, so we need to emit the
- * right SQLSTATE error code if the operand is negative.
+ * SQL2003 defines sqrt() in terms of power, so we need to emit the right
+ * SQLSTATE error code if the operand is negative.
*/
if (stat < 0)
ereport(ERROR,
@@ -4445,9 +4430,8 @@ exp_var_internal(NumericVar *arg, NumericVar *result, int rscale)
*
* exp(x) = 1 + x + x^2/2! + x^3/3! + ...
*
- * Given the limited range of x, this should converge reasonably quickly.
- * We run the series until the terms fall below the local_rscale
- * limit.
+ * Given the limited range of x, this should converge reasonably quickly. We
+ * run the series until the terms fall below the local_rscale limit.
*/
add_var(&const_one, &x, result);
set_var_from_var(&x, &xpow);
@@ -4535,11 +4519,11 @@ ln_var(NumericVar *arg, NumericVar *result, int rscale)
*
* z + z^3/3 + z^5/5 + ...
*
- * where z = (x-1)/(x+1) is in the range (approximately) -0.053 .. 0.048
- * due to the above range-reduction of x.
+ * where z = (x-1)/(x+1) is in the range (approximately) -0.053 .. 0.048 due
+ * to the above range-reduction of x.
*
- * The convergence of this is not as fast as one would like, but is
- * tolerable given that z is small.
+ * The convergence of this is not as fast as one would like, but is tolerable
+ * given that z is small.
*/
sub_var(&x, &const_one, result);
add_var(&x, &const_one, &elem);
@@ -4711,8 +4695,7 @@ power_var(NumericVar *base, NumericVar *exp, NumericVar *result)
val = numericvar_to_double_no_overflow(&ln_num);
/*
- * log10(result) = num * log10(e), so this is approximately the
- * weight:
+ * log10(result) = num * log10(e), so this is approximately the weight:
*/
val *= 0.434294481903252;
@@ -4772,8 +4755,7 @@ power_var_int(NumericVar *base, int exp, NumericVar *result, int rscale)
/*
* The general case repeatedly multiplies base according to the bit
- * pattern of exp. We do the multiplications with some extra
- * precision.
+ * pattern of exp. We do the multiplications with some extra precision.
*/
neg = (exp < 0);
exp = Abs(exp);
@@ -4866,8 +4848,8 @@ cmp_abs(NumericVar *var1, NumericVar *var2)
}
/*
- * At this point, we've run out of digits on one side or the other; so
- * any remaining nonzero digits imply that side is larger
+ * At this point, we've run out of digits on one side or the other; so any
+ * remaining nonzero digits imply that side is larger
*/
while (i1 < var1->ndigits)
{
@@ -5071,8 +5053,8 @@ round_var(NumericVar *var, int rscale)
di = (var->weight + 1) * DEC_DIGITS + rscale;
/*
- * If di = 0, the value loses all digits, but could round up to 1 if
- * its first extra digit is >= 5. If di < 0 the result must be 0.
+ * If di = 0, the value loses all digits, but could round up to 1 if its
+ * first extra digit is >= 5. If di < 0 the result must be 0.
*/
if (di < 0)
{
diff --git a/src/backend/utils/adt/numutils.c b/src/backend/utils/adt/numutils.c
index fb7fd94b8c8..ffa225277e1 100644
--- a/src/backend/utils/adt/numutils.c
+++ b/src/backend/utils/adt/numutils.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/numutils.c,v 1.68 2005/01/09 21:03:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/numutils.c,v 1.69 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,8 +63,8 @@ pg_atoi(char *s, int size, int c)
char *badp;
/*
- * Some versions of strtol treat the empty string as an error, but
- * some seem not to. Make an explicit test to be sure we catch it.
+ * Some versions of strtol treat the empty string as an error, but some
+ * seem not to. Make an explicit test to be sure we catch it.
*/
if (s == NULL)
elog(ERROR, "NULL pointer");
@@ -85,8 +85,8 @@ pg_atoi(char *s, int size, int c)
s)));
/*
- * Skip any trailing whitespace; if anything but whitespace remains
- * before the terminating character, bail out
+ * Skip any trailing whitespace; if anything but whitespace remains before
+ * the terminating character, bail out
*/
while (*badp && *badp != c && isspace((unsigned char) *badp))
badp++;
@@ -108,19 +108,19 @@ pg_atoi(char *s, int size, int c)
)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("value \"%s\" is out of range for type integer", s)));
+ errmsg("value \"%s\" is out of range for type integer", s)));
break;
case sizeof(int16):
if (errno == ERANGE || l < SHRT_MIN || l > SHRT_MAX)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("value \"%s\" is out of range for type smallint", s)));
+ errmsg("value \"%s\" is out of range for type smallint", s)));
break;
case sizeof(int8):
if (errno == ERANGE || l < SCHAR_MIN || l > SCHAR_MAX)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("value \"%s\" is out of range for 8-bit integer", s)));
+ errmsg("value \"%s\" is out of range for 8-bit integer", s)));
break;
default:
elog(ERROR, "unsupported result size: %d", size);
diff --git a/src/backend/utils/adt/oid.c b/src/backend/utils/adt/oid.c
index e9a2c741be2..62db042bbde 100644
--- a/src/backend/utils/adt/oid.c
+++ b/src/backend/utils/adt/oid.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.63 2005/07/10 21:36:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.64 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,9 +47,9 @@ oidin_subr(const char *funcname, const char *s, char **endloc)
cvt = strtoul(s, &endptr, 10);
/*
- * strtoul() normally only sets ERANGE. On some systems it also may
- * set EINVAL, which simply means it couldn't parse the input string.
- * This is handled by the second "if" consistent across platforms.
+ * strtoul() normally only sets ERANGE. On some systems it also may set
+ * EINVAL, which simply means it couldn't parse the input string. This is
+ * handled by the second "if" consistent across platforms.
*/
if (errno && errno != ERANGE && errno != EINVAL)
ereport(ERROR,
@@ -88,16 +88,16 @@ oidin_subr(const char *funcname, const char *s, char **endloc)
result = (Oid) cvt;
/*
- * Cope with possibility that unsigned long is wider than Oid, in
- * which case strtoul will not raise an error for some values that are
- * out of the range of Oid.
+ * Cope with possibility that unsigned long is wider than Oid, in which
+ * case strtoul will not raise an error for some values that are out of
+ * the range of Oid.
*
- * For backwards compatibility, we want to accept inputs that are given
- * with a minus sign, so allow the input value if it matches after
- * either signed or unsigned extension to long.
+ * For backwards compatibility, we want to accept inputs that are given with
+ * a minus sign, so allow the input value if it matches after either
+ * signed or unsigned extension to long.
*
- * To ensure consistent results on 32-bit and 64-bit platforms, make sure
- * the error message is the same as if strtoul() had returned ERANGE.
+ * To ensure consistent results on 32-bit and 64-bit platforms, make sure the
+ * error message is the same as if strtoul() had returned ERANGE.
*/
#if OID_MAX != ULONG_MAX
if (cvt != (unsigned long) result &&
@@ -171,8 +171,8 @@ buildoidvector(const Oid *oids, int n)
memcpy(result->values, oids, n * sizeof(Oid));
/*
- * Attach standard array header. For historical reasons, we set the
- * index lower bound to 0 not 1.
+ * Attach standard array header. For historical reasons, we set the index
+ * lower bound to 0 not 1.
*/
result->size = OidVectorSize(n);
result->ndim = 1;
diff --git a/src/backend/utils/adt/oracle_compat.c b/src/backend/utils/adt/oracle_compat.c
index 5dd9a44ccf8..a1ddc00a782 100644
--- a/src/backend/utils/adt/oracle_compat.c
+++ b/src/backend/utils/adt/oracle_compat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/oracle_compat.c,v 1.61 2005/08/24 17:50:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/oracle_compat.c,v 1.62 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -87,12 +87,12 @@ texttowcs(const text *txt)
if (ncodes == (size_t) -1)
{
/*
- * Invalid multibyte character encountered. We try to give a
- * useful error message by letting pg_verifymbstr check the
- * string. But it's possible that the string is OK to us, and not
- * OK to mbstowcs --- this suggests that the LC_CTYPE locale is
- * different from the database encoding. Give a generic error
- * message if verifymbstr can't find anything wrong.
+ * Invalid multibyte character encountered. We try to give a useful
+ * error message by letting pg_verifymbstr check the string. But it's
+ * possible that the string is OK to us, and not OK to mbstowcs ---
+ * this suggests that the LC_CTYPE locale is different from the
+ * database encoding. Give a generic error message if verifymbstr
+ * can't find anything wrong.
*/
pg_verifymbstr(workstr, nbytes, false);
ereport(ERROR,
@@ -164,11 +164,11 @@ win32_utf8_texttowcs(const text *txt)
{
int nbytes = VARSIZE(txt) - VARHDRSZ;
wchar_t *result;
- int r;
+ int r;
/* Overflow paranoia */
if (nbytes < 0 ||
- nbytes > (int) (INT_MAX / sizeof(wchar_t)) -1)
+ nbytes > (int) (INT_MAX / sizeof(wchar_t)) - 1)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
@@ -206,9 +206,9 @@ win32_utf8_texttowcs(const text *txt)
static text *
win32_utf8_wcstotext(const wchar_t *str)
{
- text *result;
- int nbytes;
- int r;
+ text *result;
+ int nbytes;
+ int r;
nbytes = WideCharToMultiByte(CP_UTF8, 0, str, -1, NULL, 0, NULL, NULL);
if (nbytes == 0) /* shouldn't happen */
@@ -217,7 +217,7 @@ win32_utf8_wcstotext(const wchar_t *str)
errmsg("UTF16 to UTF8 translation failed: %lu",
GetLastError())));
- result = palloc(nbytes+VARHDRSZ);
+ result = palloc(nbytes + VARHDRSZ);
r = WideCharToMultiByte(CP_UTF8, 0, str, -1, VARDATA(result), nbytes,
NULL, NULL);
@@ -227,7 +227,7 @@ win32_utf8_wcstotext(const wchar_t *str)
errmsg("UTF16 to UTF8 translation failed: %lu",
GetLastError())));
- VARATT_SIZEP(result) = nbytes + VARHDRSZ - 1; /* -1 to ignore null */
+ VARATT_SIZEP(result) = nbytes + VARHDRSZ - 1; /* -1 to ignore null */
return result;
}
@@ -256,8 +256,7 @@ win32_wcstotext(const wchar_t *str, int ncodes)
#define texttowcs win32_texttowcs
#define wcstotext win32_wcstotext
-
-#endif /* WIN32 */
+#endif /* WIN32 */
/********************************************************************
@@ -278,10 +277,11 @@ Datum
lower(PG_FUNCTION_ARGS)
{
#ifdef USE_WIDE_UPPER_LOWER
+
/*
- * Use wide char code only when max encoding length > 1 and ctype != C.
- * Some operating systems fail with multi-byte encodings and a C locale.
- * Also, for a C locale there is no need to process as multibyte.
+ * Use wide char code only when max encoding length > 1 and ctype != C.
+ * Some operating systems fail with multi-byte encodings and a C locale.
+ * Also, for a C locale there is no need to process as multibyte.
*/
if (pg_database_encoding_max_length() > 1 && !lc_ctype_is_c())
{
@@ -309,8 +309,7 @@ lower(PG_FUNCTION_ARGS)
int m;
/*
- * Since we copied the string, we can scribble directly on the
- * value
+ * Since we copied the string, we can scribble directly on the value
*/
ptr = VARDATA(string);
m = VARSIZE(string) - VARHDRSZ;
@@ -344,10 +343,11 @@ Datum
upper(PG_FUNCTION_ARGS)
{
#ifdef USE_WIDE_UPPER_LOWER
+
/*
- * Use wide char code only when max encoding length > 1 and ctype != C.
- * Some operating systems fail with multi-byte encodings and a C locale.
- * Also, for a C locale there is no need to process as multibyte.
+ * Use wide char code only when max encoding length > 1 and ctype != C.
+ * Some operating systems fail with multi-byte encodings and a C locale.
+ * Also, for a C locale there is no need to process as multibyte.
*/
if (pg_database_encoding_max_length() > 1 && !lc_ctype_is_c())
{
@@ -375,8 +375,7 @@ upper(PG_FUNCTION_ARGS)
int m;
/*
- * Since we copied the string, we can scribble directly on the
- * value
+ * Since we copied the string, we can scribble directly on the value
*/
ptr = VARDATA(string);
m = VARSIZE(string) - VARHDRSZ;
@@ -413,10 +412,11 @@ Datum
initcap(PG_FUNCTION_ARGS)
{
#ifdef USE_WIDE_UPPER_LOWER
+
/*
- * Use wide char code only when max encoding length > 1 and ctype != C.
- * Some operating systems fail with multi-byte encodings and a C locale.
- * Also, for a C locale there is no need to process as multibyte.
+ * Use wide char code only when max encoding length > 1 and ctype != C.
+ * Some operating systems fail with multi-byte encodings and a C locale.
+ * Also, for a C locale there is no need to process as multibyte.
*/
if (pg_database_encoding_max_length() > 1 && !lc_ctype_is_c())
{
@@ -452,8 +452,7 @@ initcap(PG_FUNCTION_ARGS)
int m;
/*
- * Since we copied the string, we can scribble directly on the
- * value
+ * Since we copied the string, we can scribble directly on the value
*/
ptr = VARDATA(string);
m = VARSIZE(string) - VARHDRSZ;
@@ -732,8 +731,8 @@ dotrim(const char *string, int stringlen,
{
/*
* In the multibyte-encoding case, build arrays of pointers to
- * character starts, so that we can avoid inefficient checks
- * in the inner loops.
+ * character starts, so that we can avoid inefficient checks in
+ * the inner loops.
*/
const char **stringchars;
const char **setchars;
@@ -828,8 +827,7 @@ dotrim(const char *string, int stringlen,
else
{
/*
- * In the single-byte-encoding case, we don't need such
- * overhead.
+ * In the single-byte-encoding case, we don't need such overhead.
*/
if (doltrim)
{
@@ -1152,9 +1150,9 @@ translate(PG_FUNCTION_ARGS)
VARATT_SIZEP(result) = retlen + VARHDRSZ;
/*
- * There may be some wasted space in the result if deletions occurred,
- * but it's not worth reallocating it; the function result probably
- * won't live long anyway.
+ * There may be some wasted space in the result if deletions occurred, but
+ * it's not worth reallocating it; the function result probably won't live
+ * long anyway.
*/
PG_RETURN_TEXT_P(result);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 7c9c774d91b..303fec745ab 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -4,7 +4,7 @@
*
* Portions Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/pg_locale.c,v 1.31 2005/03/16 00:02:49 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pg_locale.c,v 1.32 2005/10/15 02:49:29 momjian Exp $
*
*-----------------------------------------------------------------------
*/
@@ -124,9 +124,9 @@ const char *
locale_messages_assign(const char *value, bool doit, GucSource source)
{
#ifndef WIN32
+
/*
- * LC_MESSAGES category does not exist everywhere, but accept it
- * anyway
+ * LC_MESSAGES category does not exist everywhere, but accept it anyway
*/
#ifdef LC_MESSAGES
if (doit)
@@ -138,16 +138,15 @@ locale_messages_assign(const char *value, bool doit, GucSource source)
value = locale_xxx_assign(LC_MESSAGES, value, false, source);
#endif /* LC_MESSAGES */
return value;
-
-#else /* WIN32 */
+#else /* WIN32 */
/*
* Win32 does not have working setlocale() for LC_MESSAGES. We can only
- * use environment variables to change it (per gettext FAQ). This
- * means we can't actually check the supplied value, so always assume
- * it's good. Also, ignore attempts to set to "", which really means
- * "keep using the old value". (Actually it means "use the environment
- * value", but we are too lazy to try to implement that exactly.)
+ * use environment variables to change it (per gettext FAQ). This means
+ * we can't actually check the supplied value, so always assume it's good.
+ * Also, ignore attempts to set to "", which really means "keep using the
+ * old value". (Actually it means "use the environment value", but we are
+ * too lazy to try to implement that exactly.)
*/
if (doit && value[0])
{
@@ -160,12 +159,12 @@ locale_messages_assign(const char *value, bool doit, GucSource source)
if (!SetEnvironmentVariable("LC_MESSAGES", value))
return NULL;
- snprintf(env, sizeof(env)-1, "LC_MESSAGES=%s", value);
+ snprintf(env, sizeof(env) - 1, "LC_MESSAGES=%s", value);
if (_putenv(env))
return NULL;
}
return value;
-#endif /* WIN32 */
+#endif /* WIN32 */
}
@@ -289,8 +288,8 @@ PGLC_localeconv(void)
extlconv = localeconv();
/*
- * Must copy all values since restoring internal settings may
- * overwrite localeconv()'s results.
+ * Must copy all values since restoring internal settings may overwrite
+ * localeconv()'s results.
*/
CurrentLocaleConv = *extlconv;
CurrentLocaleConv.currency_symbol = strdup(extlconv->currency_symbol);
diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c
index d7c34b6a929..48d93d0602c 100644
--- a/src/backend/utils/adt/pg_lzcompress.c
+++ b/src/backend/utils/adt/pg_lzcompress.c
@@ -1,7 +1,7 @@
/* ----------
* pg_lzcompress.c -
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/pg_lzcompress.c,v 1.19 2005/05/25 21:40:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pg_lzcompress.c,v 1.20 2005/10/15 02:49:29 momjian Exp $
*
* This is an implementation of LZ compression for PostgreSQL.
* It uses a simple history table and generates 2-3 byte tags
@@ -219,11 +219,11 @@ static PGLZ_Strategy strategy_default_data = {
6144, /* Data chunks greater equal 6K force
* compression */
/* except compressed result is greater uncompressed data */
- 20, /* Compression rates below 20% mean
- * fallback to uncompressed */
+ 20, /* Compression rates below 20% mean fallback
+ * to uncompressed */
/* storage except compression is forced by previous parameter */
- 128, /* Stop history lookup if a match of 128
- * bytes is found */
+ 128, /* Stop history lookup if a match of 128 bytes
+ * is found */
10 /* Lower good match size by 10% at every
* lookup loop iteration. */
};
@@ -233,10 +233,9 @@ PGLZ_Strategy *PGLZ_strategy_default = &strategy_default_data;
static PGLZ_Strategy strategy_always_data = {
0, /* Chunks of any size are compressed */
0, /* */
- 0, /* We want to save at least one single
- * byte */
- 128, /* Stop history lookup if a match of 128
- * bytes is found */
+ 0, /* We want to save at least one single byte */
+ 128, /* Stop history lookup if a match of 128 bytes
+ * is found */
6 /* Look harder for a good match. */
};
PGLZ_Strategy *PGLZ_strategy_always = &strategy_always_data;
@@ -246,8 +245,7 @@ static PGLZ_Strategy strategy_never_data = {
0, /* */
0, /* */
0, /* */
- 0, /* Zero indicates "store uncompressed
- * always" */
+ 0, /* Zero indicates "store uncompressed always" */
0 /* */
};
PGLZ_Strategy *PGLZ_strategy_never = &strategy_never_data;
@@ -395,8 +393,7 @@ pglz_find_match(PGLZ_HistEntry **hstart, char *input, char *end,
int32 off = 0;
/*
- * Traverse the linked history list until a good enough match is
- * found.
+ * Traverse the linked history list until a good enough match is found.
*/
hent = hstart[pglz_hist_idx(input, end)];
while (hent)
@@ -414,12 +411,12 @@ pglz_find_match(PGLZ_HistEntry **hstart, char *input, char *end,
break;
/*
- * Determine length of match. A better match must be larger than
- * the best so far. And if we already have a match of 16 or more
- * bytes, it's worth the call overhead to use memcmp() to check if
- * this match is equal for the same size. After that we must
- * fallback to character by character comparison to know the exact
- * position where the diff occurred.
+ * Determine length of match. A better match must be larger than the
+ * best so far. And if we already have a match of 16 or more bytes,
+ * it's worth the call overhead to use memcmp() to check if this match
+ * is equal for the same size. After that we must fallback to
+ * character by character comparison to know the exact position where
+ * the diff occurred.
*/
thislen = 0;
if (len >= 16)
@@ -462,8 +459,8 @@ pglz_find_match(PGLZ_HistEntry **hstart, char *input, char *end,
hent = hent->next;
/*
- * Be happy with lesser good matches the more entries we visited.
- * But no point in doing calculation if we're at end of list.
+ * Be happy with lesser good matches the more entries we visited. But
+ * no point in doing calculation if we're at end of list.
*/
if (hent)
{
@@ -565,10 +562,10 @@ pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strate
memset((void *) hist_start, 0, sizeof(hist_start));
/*
- * Compute the maximum result size allowed by the strategy. If the
- * input size exceeds force_input_size, the max result size is the
- * input size itself. Otherwise, it is the input size minus the
- * minimum wanted compression rate.
+ * Compute the maximum result size allowed by the strategy. If the input
+ * size exceeds force_input_size, the max result size is the input size
+ * itself. Otherwise, it is the input size minus the minimum wanted
+ * compression rate.
*/
if (slen >= strategy->force_input_size)
result_max = slen;
@@ -588,8 +585,8 @@ pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strate
while (dp < dend)
{
/*
- * If we already exceeded the maximum result size, set no
- * compression flag and stop this. But don't check too often.
+ * If we already exceeded the maximum result size, set no compression
+ * flag and stop this. But don't check too often.
*/
if (bp - bstart >= result_max)
{
@@ -632,9 +629,9 @@ pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strate
}
/*
- * If we are still in compressing mode, write out the last control
- * byte and determine if the compression gained the rate requested by
- * the strategy.
+ * If we are still in compressing mode, write out the last control byte
+ * and determine if the compression gained the rate requested by the
+ * strategy.
*/
if (do_compress)
{
@@ -647,8 +644,8 @@ pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strate
/*
* Done - if we successfully compressed and matched the strategy's
- * constraints, return the compressed result. Otherwise copy the
- * original source over it and return the original length.
+ * constraints, return the compressed result. Otherwise copy the original
+ * source over it and return the original length.
*/
if (do_compress)
{
@@ -704,9 +701,9 @@ pglz_decompress(PGLZ_Header *source, char *dest)
/*
* Otherwise it contains the match length minus 3 and the
* upper 4 bits of the offset. The next following byte
- * contains the lower 8 bits of the offset. If the length
- * is coded as 18, another extension tag byte tells how
- * much longer the match really was (0-255).
+ * contains the lower 8 bits of the offset. If the length is
+ * coded as 18, another extension tag byte tells how much
+ * longer the match really was (0-255).
*/
len = (dp[0] & 0x0f) + 3;
off = ((dp[0] & 0xf0) << 4) | dp[1];
@@ -715,10 +712,10 @@ pglz_decompress(PGLZ_Header *source, char *dest)
len += *dp++;
/*
- * Now we copy the bytes specified by the tag from OUTPUT
- * to OUTPUT. It is dangerous and platform dependent to
- * use memcpy() here, because the copied areas could
- * overlap extremely!
+ * Now we copy the bytes specified by the tag from OUTPUT to
+ * OUTPUT. It is dangerous and platform dependent to use
+ * memcpy() here, because the copied areas could overlap
+ * extremely!
*/
while (len--)
{
@@ -729,8 +726,8 @@ pglz_decompress(PGLZ_Header *source, char *dest)
else
{
/*
- * An unset control bit means LITERAL BYTE. So we just
- * copy one from INPUT to OUTPUT.
+ * An unset control bit means LITERAL BYTE. So we just copy
+ * one from INPUT to OUTPUT.
*/
*bp++ = *dp++;
}
@@ -764,8 +761,8 @@ pglz_get_next_decomp_char_from_lzdata(PGLZ_DecompState *dstate)
if (dstate->tocopy > 0)
{
/*
- * Copy one byte from output to output until we did it for the
- * length specified by the last tag. Return that byte.
+ * Copy one byte from output to output until we did it for the length
+ * specified by the last tag. Return that byte.
*/
dstate->tocopy--;
return (*(dstate->cp_out++) = *(dstate->cp_copy++));
@@ -774,21 +771,20 @@ pglz_get_next_decomp_char_from_lzdata(PGLZ_DecompState *dstate)
if (dstate->ctrl_count == 0)
{
/*
- * Get the next control byte if we need to, but check for EOF
- * before.
+ * Get the next control byte if we need to, but check for EOF before.
*/
if (dstate->cp_in == dstate->cp_end)
return EOF;
/*
* This decompression method saves time only, if we stop near the
- * beginning of the data (maybe because we're called by a
- * comparison function and a difference occurs early). Otherwise,
- * all the checks, needed here, cause too much overhead.
+ * beginning of the data (maybe because we're called by a comparison
+ * function and a difference occurs early). Otherwise, all the checks,
+ * needed here, cause too much overhead.
*
- * Thus we decompress the entire rest at once into the temporary
- * buffer and change the decomp state to return the prepared data
- * from the buffer by the more simple calls to
+ * Thus we decompress the entire rest at once into the temporary buffer
+ * and change the decomp state to return the prepared data from the
+ * buffer by the more simple calls to
* pglz_get_next_decomp_char_from_plain().
*/
if (dstate->cp_out - dstate->temp_buf >= 256)
@@ -856,8 +852,8 @@ pglz_get_next_decomp_char_from_lzdata(PGLZ_DecompState *dstate)
if (dstate->ctrl & 0x01)
{
/*
- * Bit is set, so tag is following. Setup copy information and do
- * the copy for the first byte as above.
+ * Bit is set, so tag is following. Setup copy information and do the
+ * copy for the first byte as above.
*/
int off;
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index b1bd11c9c20..8c10bf387d4 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/pgstatfuncs.c,v 1.24 2005/06/29 22:51:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pgstatfuncs.c,v 1.25 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -354,8 +354,8 @@ pg_stat_get_backend_activity_start(PG_FUNCTION_ARGS)
result = beentry->activity_start_timestamp;
/*
- * No time recorded for start of current query -- this is the case if
- * the user hasn't enabled query-level stats collection.
+ * No time recorded for start of current query -- this is the case if the
+ * user hasn't enabled query-level stats collection.
*/
if (result == 0)
PG_RETURN_NULL();
@@ -366,7 +366,7 @@ pg_stat_get_backend_activity_start(PG_FUNCTION_ARGS)
Datum
pg_stat_get_backend_start(PG_FUNCTION_ARGS)
{
- int32 beid = PG_GETARG_INT32(0);
+ int32 beid = PG_GETARG_INT32(0);
TimestampTz result;
PgStat_StatBeEntry *beentry;
@@ -389,7 +389,7 @@ Datum
pg_stat_get_backend_client_addr(PG_FUNCTION_ARGS)
{
PgStat_StatBeEntry *beentry;
- int32 beid;
+ int32 beid;
char remote_host[NI_MAXHOST];
int ret;
@@ -432,7 +432,7 @@ Datum
pg_stat_get_backend_client_port(PG_FUNCTION_ARGS)
{
PgStat_StatBeEntry *beentry;
- int32 beid;
+ int32 beid;
char remote_port[NI_MAXSERV];
int ret;
diff --git a/src/backend/utils/adt/quote.c b/src/backend/utils/adt/quote.c
index 808ae6142ed..98a8ae765ee 100644
--- a/src/backend/utils/adt/quote.c
+++ b/src/backend/utils/adt/quote.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/quote.c,v 1.16 2005/07/02 17:01:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/quote.c,v 1.17 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,13 +65,13 @@ quote_literal(PG_FUNCTION_ARGS)
cp1 = VARDATA(t);
cp2 = VARDATA(result);
- for(; len-- > 0; cp1++)
+ for (; len-- > 0; cp1++)
if (*cp1 == '\\')
{
*cp2++ = ESCAPE_STRING_SYNTAX;
break;
}
-
+
len = VARSIZE(t) - VARHDRSZ;
cp1 = VARDATA(t);
*cp2++ = '\'';
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index 0aba560aa9c..a872762c3c2 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/regexp.c,v 1.58 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/regexp.c,v 1.59 2005/10/15 02:49:29 momjian Exp $
*
* Alistair Crooks added the code for the regex caching
* agc - cached the regular expressions used - there's a good chance
@@ -85,8 +85,8 @@ static cached_re_str re_array[MAX_CACHED_RES]; /* cached re's */
*
* Returns regex_t
*
- * text_re --- the pattern, expressed as an *untoasted* TEXT object
- * cflags --- compile options for the pattern
+ * text_re --- the pattern, expressed as an *untoasted* TEXT object
+ * cflags --- compile options for the pattern
*
* Pattern is given in the database encoding. We internally convert to
* array of pg_wchar which is what Spencer's regex package wants.
@@ -104,8 +104,8 @@ RE_compile_and_cache(text *text_re, int cflags)
/*
* Look for a match among previously compiled REs. Since the data
- * structure is self-organizing with most-used entries at the front,
- * our search strategy can just be to scan from the front.
+ * structure is self-organizing with most-used entries at the front, our
+ * search strategy can just be to scan from the front.
*/
for (i = 0; i < num_res; i++)
{
@@ -171,8 +171,8 @@ RE_compile_and_cache(text *text_re, int cflags)
re_temp.cre_flags = cflags;
/*
- * Okay, we have a valid new item in re_temp; insert it into the
- * storage array. Discard last entry if needed.
+ * Okay, we have a valid new item in re_temp; insert it into the storage
+ * array. Discard last entry if needed.
*/
if (num_res >= MAX_CACHED_RES)
{
@@ -213,7 +213,7 @@ RE_compile_and_execute(text *text_re, char *dat, int dat_len,
size_t data_len;
int regexec_result;
regex_t re;
- char errMsg[100];
+ char errMsg[100];
/* Convert data string to wide characters */
data = (pg_wchar *) palloc((dat_len + 1) * sizeof(pg_wchar));
@@ -405,10 +405,10 @@ textregexsubstr(PG_FUNCTION_ARGS)
regmatch_t pmatch[2];
/*
- * We pass two regmatch_t structs to get info about the overall match
- * and the match for the first parenthesized subexpression (if any).
- * If there is a parenthesized subexpression, we return what it
- * matched; else return what the whole regexp matched.
+ * We pass two regmatch_t structs to get info about the overall match and
+ * the match for the first parenthesized subexpression (if any). If there
+ * is a parenthesized subexpression, we return what it matched; else
+ * return what the whole regexp matched.
*/
match = RE_compile_and_execute(p,
VARDATA(s),
@@ -432,9 +432,9 @@ textregexsubstr(PG_FUNCTION_ARGS)
}
return DirectFunctionCall3(text_substr,
- PointerGetDatum(s),
- Int32GetDatum(so + 1),
- Int32GetDatum(eo - so));
+ PointerGetDatum(s),
+ Int32GetDatum(so + 1),
+ Int32GetDatum(eo - so));
}
PG_RETURN_NULL();
@@ -442,7 +442,7 @@ textregexsubstr(PG_FUNCTION_ARGS)
/*
* textregexreplace_noopt()
- * Return a replace string matched by a regular expression.
+ * Return a replace string matched by a regular expression.
* This function is a version that doesn't specify the option of
* textregexreplace. This is case sensitive, replace the first
* instance only.
@@ -458,15 +458,15 @@ textregexreplace_noopt(PG_FUNCTION_ARGS)
re = RE_compile_and_cache(p, regex_flavor);
return DirectFunctionCall4(replace_text_regexp,
- PointerGetDatum(s),
- PointerGetDatum(&re),
- PointerGetDatum(r),
- BoolGetDatum(false));
+ PointerGetDatum(s),
+ PointerGetDatum(&re),
+ PointerGetDatum(r),
+ BoolGetDatum(false));
}
/*
* textregexreplace()
- * Return a replace string matched by a regular expression.
+ * Return a replace string matched by a regular expression.
*/
Datum
textregexreplace(PG_FUNCTION_ARGS)
@@ -478,7 +478,7 @@ textregexreplace(PG_FUNCTION_ARGS)
char *opt_p = VARDATA(opt);
int opt_len = (VARSIZE(opt) - VARHDRSZ);
int i;
- bool global = false;
+ bool global = false;
bool ignorecase = false;
regex_t re;
@@ -492,12 +492,13 @@ textregexreplace(PG_FUNCTION_ARGS)
break;
case 'g':
global = true;
+
break;
default:
ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid option of regexp_replace: %c",
- opt_p[i])));
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("invalid option of regexp_replace: %c",
+ opt_p[i])));
break;
}
}
@@ -508,10 +509,10 @@ textregexreplace(PG_FUNCTION_ARGS)
re = RE_compile_and_cache(p, regex_flavor);
return DirectFunctionCall4(replace_text_regexp,
- PointerGetDatum(s),
- PointerGetDatum(&re),
- PointerGetDatum(r),
- BoolGetDatum(global));
+ PointerGetDatum(s),
+ PointerGetDatum(&re),
+ PointerGetDatum(r),
+ BoolGetDatum(global));
}
/* similar_escape()
@@ -555,7 +556,7 @@ similar_escape(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
errmsg("invalid escape string"),
- errhint("Escape string must be empty or one character.")));
+ errhint("Escape string must be empty or one character.")));
}
/* We need room for ^, $, and up to 2 output bytes per input byte */
@@ -566,7 +567,7 @@ similar_escape(PG_FUNCTION_ARGS)
while (plen > 0)
{
- char pchar = *p;
+ char pchar = *p;
if (afterescape)
{
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index 3a52c8756d1..9a626c2f766 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/regproc.c,v 1.95 2005/10/02 23:50:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/regproc.c,v 1.96 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -71,17 +71,17 @@ regprocin(PG_FUNCTION_ARGS)
strspn(pro_name_or_oid, "0123456789") == strlen(pro_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(pro_name_or_oid)));
+ CStringGetDatum(pro_name_or_oid)));
PG_RETURN_OID(result);
}
/* Else it's a name, possibly schema-qualified */
/*
- * In bootstrap mode we assume the given name is not schema-qualified,
- * and just search pg_proc for a unique match. This is needed for
- * initializing other system catalogs (pg_namespace may not exist yet,
- * and certainly there are no schemas other than pg_catalog).
+ * In bootstrap mode we assume the given name is not schema-qualified, and
+ * just search pg_proc for a unique match. This is needed for
+ * initializing other system catalogs (pg_namespace may not exist yet, and
+ * certainly there are no schemas other than pg_catalog).
*/
if (IsBootstrapProcessingMode())
{
@@ -113,7 +113,7 @@ regprocin(PG_FUNCTION_ARGS)
if (matches == 0)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function \"%s\" does not exist", pro_name_or_oid)));
+ errmsg("function \"%s\" does not exist", pro_name_or_oid)));
else if (matches > 1)
ereport(ERROR,
@@ -125,8 +125,8 @@ regprocin(PG_FUNCTION_ARGS)
}
/*
- * Normal case: parse the name into components and see if it matches
- * any pg_proc entries in the current search path.
+ * Normal case: parse the name into components and see if it matches any
+ * pg_proc entries in the current search path.
*/
names = stringToQualifiedNameList(pro_name_or_oid, "regprocin");
clist = FuncnameGetCandidates(names, -1);
@@ -134,7 +134,7 @@ regprocin(PG_FUNCTION_ARGS)
if (clist == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function \"%s\" does not exist", pro_name_or_oid)));
+ errmsg("function \"%s\" does not exist", pro_name_or_oid)));
else if (clist->next != NULL)
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_FUNCTION),
@@ -172,9 +172,9 @@ regprocout(PG_FUNCTION_ARGS)
char *proname = NameStr(procform->proname);
/*
- * In bootstrap mode, skip the fancy namespace stuff and just
- * return the proc name. (This path is only needed for debugging
- * output anyway.)
+ * In bootstrap mode, skip the fancy namespace stuff and just return
+ * the proc name. (This path is only needed for debugging output
+ * anyway.)
*/
if (IsBootstrapProcessingMode())
result = pstrdup(proname);
@@ -258,15 +258,15 @@ regprocedurein(PG_FUNCTION_ARGS)
strspn(pro_name_or_oid, "0123456789") == strlen(pro_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(pro_name_or_oid)));
+ CStringGetDatum(pro_name_or_oid)));
PG_RETURN_OID(result);
}
/*
- * Else it's a name and arguments. Parse the name and arguments, look
- * up potential matches in the current namespace search list, and scan
- * to see which one exactly matches the given argument types. (There
- * will not be more than one match.)
+ * Else it's a name and arguments. Parse the name and arguments, look up
+ * potential matches in the current namespace search list, and scan to see
+ * which one exactly matches the given argument types. (There will not be
+ * more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
* datatype cannot be used for any system column that needs to receive
@@ -286,7 +286,7 @@ regprocedurein(PG_FUNCTION_ARGS)
if (clist == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function \"%s\" does not exist", pro_name_or_oid)));
+ errmsg("function \"%s\" does not exist", pro_name_or_oid)));
result = clist->oid;
@@ -323,8 +323,8 @@ format_procedure(Oid procedure_oid)
initStringInfo(&buf);
/*
- * Would this proc be found (given the right args) by
- * regprocedurein? If not, we need to qualify it.
+ * Would this proc be found (given the right args) by regprocedurein?
+ * If not, we need to qualify it.
*/
if (FunctionIsVisible(procedure_oid))
nspname = NULL;
@@ -421,17 +421,17 @@ regoperin(PG_FUNCTION_ARGS)
strspn(opr_name_or_oid, "0123456789") == strlen(opr_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(opr_name_or_oid)));
+ CStringGetDatum(opr_name_or_oid)));
PG_RETURN_OID(result);
}
/* Else it's a name, possibly schema-qualified */
/*
- * In bootstrap mode we assume the given name is not schema-qualified,
- * and just search pg_operator for a unique match. This is needed for
- * initializing other system catalogs (pg_namespace may not exist yet,
- * and certainly there are no schemas other than pg_catalog).
+ * In bootstrap mode we assume the given name is not schema-qualified, and
+ * just search pg_operator for a unique match. This is needed for
+ * initializing other system catalogs (pg_namespace may not exist yet, and
+ * certainly there are no schemas other than pg_catalog).
*/
if (IsBootstrapProcessingMode())
{
@@ -463,7 +463,7 @@ regoperin(PG_FUNCTION_ARGS)
if (matches == 0)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("operator does not exist: %s", opr_name_or_oid)));
+ errmsg("operator does not exist: %s", opr_name_or_oid)));
else if (matches > 1)
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_FUNCTION),
@@ -474,8 +474,8 @@ regoperin(PG_FUNCTION_ARGS)
}
/*
- * Normal case: parse the name into components and see if it matches
- * any pg_operator entries in the current search path.
+ * Normal case: parse the name into components and see if it matches any
+ * pg_operator entries in the current search path.
*/
names = stringToQualifiedNameList(opr_name_or_oid, "regoperin");
clist = OpernameGetCandidates(names, '\0');
@@ -521,9 +521,9 @@ regoperout(PG_FUNCTION_ARGS)
char *oprname = NameStr(operform->oprname);
/*
- * In bootstrap mode, skip the fancy namespace stuff and just
- * return the oper name. (This path is only needed for debugging
- * output anyway.)
+ * In bootstrap mode, skip the fancy namespace stuff and just return
+ * the oper name. (This path is only needed for debugging output
+ * anyway.)
*/
if (IsBootstrapProcessingMode())
result = pstrdup(oprname);
@@ -556,8 +556,7 @@ regoperout(PG_FUNCTION_ARGS)
else
{
/*
- * If OID doesn't match any pg_operator entry, return it
- * numerically
+ * If OID doesn't match any pg_operator entry, return it numerically
*/
result = (char *) palloc(NAMEDATALEN);
snprintf(result, NAMEDATALEN, "%u", oprid);
@@ -616,15 +615,15 @@ regoperatorin(PG_FUNCTION_ARGS)
strspn(opr_name_or_oid, "0123456789") == strlen(opr_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(opr_name_or_oid)));
+ CStringGetDatum(opr_name_or_oid)));
PG_RETURN_OID(result);
}
/*
- * Else it's a name and arguments. Parse the name and arguments, look
- * up potential matches in the current namespace search list, and scan
- * to see which one exactly matches the given argument types. (There
- * will not be more than one match.)
+ * Else it's a name and arguments. Parse the name and arguments, look up
+ * potential matches in the current namespace search list, and scan to see
+ * which one exactly matches the given argument types. (There will not be
+ * more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
* datatype cannot be used for any system column that needs to receive
@@ -696,8 +695,8 @@ format_operator(Oid operator_oid)
initStringInfo(&buf);
/*
- * Would this oper be found (given the right args) by
- * regoperatorin? If not, we need to qualify it.
+ * Would this oper be found (given the right args) by regoperatorin?
+ * If not, we need to qualify it.
*/
if (!OperatorIsVisible(operator_oid))
{
@@ -727,8 +726,7 @@ format_operator(Oid operator_oid)
else
{
/*
- * If OID doesn't match any pg_operator entry, return it
- * numerically
+ * If OID doesn't match any pg_operator entry, return it numerically
*/
result = (char *) palloc(NAMEDATALEN);
snprintf(result, NAMEDATALEN, "%u", operator_oid);
@@ -797,20 +795,20 @@ regclassin(PG_FUNCTION_ARGS)
/* Numeric OID? */
if (class_name_or_oid[0] >= '0' &&
class_name_or_oid[0] <= '9' &&
- strspn(class_name_or_oid, "0123456789") == strlen(class_name_or_oid))
+ strspn(class_name_or_oid, "0123456789") == strlen(class_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(class_name_or_oid)));
+ CStringGetDatum(class_name_or_oid)));
PG_RETURN_OID(result);
}
/* Else it's a name, possibly schema-qualified */
/*
- * In bootstrap mode we assume the given name is not schema-qualified,
- * and just search pg_class for a match. This is needed for
- * initializing other system catalogs (pg_namespace may not exist yet,
- * and certainly there are no schemas other than pg_catalog).
+ * In bootstrap mode we assume the given name is not schema-qualified, and
+ * just search pg_class for a match. This is needed for initializing
+ * other system catalogs (pg_namespace may not exist yet, and certainly
+ * there are no schemas other than pg_catalog).
*/
if (IsBootstrapProcessingMode())
{
@@ -833,7 +831,7 @@ regclassin(PG_FUNCTION_ARGS)
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("relation \"%s\" does not exist", class_name_or_oid)));
+ errmsg("relation \"%s\" does not exist", class_name_or_oid)));
/* We assume there can be only one match */
@@ -844,8 +842,8 @@ regclassin(PG_FUNCTION_ARGS)
}
/*
- * Normal case: parse the name into components and see if it matches
- * any pg_class entries in the current search path.
+ * Normal case: parse the name into components and see if it matches any
+ * pg_class entries in the current search path.
*/
names = stringToQualifiedNameList(class_name_or_oid, "regclassin");
@@ -880,9 +878,9 @@ regclassout(PG_FUNCTION_ARGS)
char *classname = NameStr(classform->relname);
/*
- * In bootstrap mode, skip the fancy namespace stuff and just
- * return the class name. (This path is only needed for debugging
- * output anyway.)
+ * In bootstrap mode, skip the fancy namespace stuff and just return
+ * the class name. (This path is only needed for debugging output
+ * anyway.)
*/
if (IsBootstrapProcessingMode())
result = pstrdup(classname);
@@ -891,8 +889,7 @@ regclassout(PG_FUNCTION_ARGS)
char *nspname;
/*
- * Would this class be found by regclassin? If not, qualify
- * it.
+ * Would this class be found by regclassin? If not, qualify it.
*/
if (RelationIsVisible(classid))
nspname = NULL;
@@ -966,17 +963,17 @@ regtypein(PG_FUNCTION_ARGS)
strspn(typ_name_or_oid, "0123456789") == strlen(typ_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(typ_name_or_oid)));
+ CStringGetDatum(typ_name_or_oid)));
PG_RETURN_OID(result);
}
/* Else it's a type name, possibly schema-qualified or decorated */
/*
- * In bootstrap mode we assume the given name is not schema-qualified,
- * and just search pg_type for a match. This is needed for
- * initializing other system catalogs (pg_namespace may not exist yet,
- * and certainly there are no schemas other than pg_catalog).
+ * In bootstrap mode we assume the given name is not schema-qualified, and
+ * just search pg_type for a match. This is needed for initializing other
+ * system catalogs (pg_namespace may not exist yet, and certainly there
+ * are no schemas other than pg_catalog).
*/
if (IsBootstrapProcessingMode())
{
@@ -999,7 +996,7 @@ regtypein(PG_FUNCTION_ARGS)
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("type \"%s\" does not exist", typ_name_or_oid)));
+ errmsg("type \"%s\" does not exist", typ_name_or_oid)));
/* We assume there can be only one match */
@@ -1010,8 +1007,8 @@ regtypein(PG_FUNCTION_ARGS)
}
/*
- * Normal case: invoke the full parser to deal with special cases such
- * as array syntax.
+ * Normal case: invoke the full parser to deal with special cases such as
+ * array syntax.
*/
parseTypeString(typ_name_or_oid, &result, &typmod);
@@ -1043,9 +1040,9 @@ regtypeout(PG_FUNCTION_ARGS)
Form_pg_type typeform = (Form_pg_type) GETSTRUCT(typetup);
/*
- * In bootstrap mode, skip the fancy namespace stuff and just
- * return the type name. (This path is only needed for debugging
- * output anyway.)
+ * In bootstrap mode, skip the fancy namespace stuff and just return
+ * the type name. (This path is only needed for debugging output
+ * anyway.)
*/
if (IsBootstrapProcessingMode())
{
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 8de31643a68..c49b17be10d 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -17,7 +17,7 @@
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.80 2005/06/28 05:09:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.81 2005/10/15 02:49:29 momjian Exp $
*
* ----------
*/
@@ -194,8 +194,7 @@ RI_FKey_check(PG_FUNCTION_ARGS)
int match_type;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_check", RI_TRIGTYPE_INUP);
@@ -203,8 +202,7 @@ RI_FKey_check(PG_FUNCTION_ARGS)
tgargs = trigdata->tg_trigger->tgargs;
/*
- * Get the relation descriptors of the FK and PK tables and the new
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the new tuple.
*
* pk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR SHARE will get on it.
@@ -225,9 +223,9 @@ RI_FKey_check(PG_FUNCTION_ARGS)
}
/*
- * We should not even consider checking the row if it is no longer
- * valid since it was either deleted (doesn't matter) or updated (in
- * which case it'll be checked with its final values).
+ * We should not even consider checking the row if it is no longer valid
+ * since it was either deleted (doesn't matter) or updated (in which case
+ * it'll be checked with its final values).
*/
Assert(new_row_buf != InvalidBuffer);
if (!HeapTupleSatisfiesItself(new_row->t_data, new_row_buf))
@@ -311,8 +309,8 @@ RI_FKey_check(PG_FUNCTION_ARGS)
case RI_KEYS_ALL_NULL:
/*
- * No check - if NULLs are allowed at all is already checked
- * by NOT NULL constraint.
+ * No check - if NULLs are allowed at all is already checked by
+ * NOT NULL constraint.
*
* This is true for MATCH FULL, MATCH PARTIAL, and MATCH
* <unspecified>
@@ -323,21 +321,21 @@ RI_FKey_check(PG_FUNCTION_ARGS)
case RI_KEYS_SOME_NULL:
/*
- * This is the only case that differs between the three kinds
- * of MATCH.
+ * This is the only case that differs between the three kinds of
+ * MATCH.
*/
switch (match_type)
{
case RI_MATCH_TYPE_FULL:
/*
- * Not allowed - MATCH FULL says either all or none of
- * the attributes can be NULLs
+ * Not allowed - MATCH FULL says either all or none of the
+ * attributes can be NULLs
*/
ereport(ERROR,
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("insert or update on table \"%s\" violates foreign key constraint \"%s\"",
- RelationGetRelationName(trigdata->tg_relation),
+ RelationGetRelationName(trigdata->tg_relation),
tgargs[RI_CONSTRAINT_NAME_ARGNO]),
errdetail("MATCH FULL does not allow mixing of null and nonnull key values.")));
heap_close(pk_rel, RowShareLock);
@@ -346,8 +344,8 @@ RI_FKey_check(PG_FUNCTION_ARGS)
case RI_MATCH_TYPE_UNSPECIFIED:
/*
- * MATCH <unspecified> - if ANY column is null, we
- * have a match.
+ * MATCH <unspecified> - if ANY column is null, we have a
+ * match.
*/
heap_close(pk_rel, RowShareLock);
return PointerGetDatum(NULL);
@@ -355,14 +353,14 @@ RI_FKey_check(PG_FUNCTION_ARGS)
case RI_MATCH_TYPE_PARTIAL:
/*
- * MATCH PARTIAL - all non-null columns must match.
- * (not implemented, can be done by modifying the
- * query below to only include non-null columns, or by
- * writing a special version here)
+ * MATCH PARTIAL - all non-null columns must match. (not
+ * implemented, can be done by modifying the query below
+ * to only include non-null columns, or by writing a
+ * special version here)
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("MATCH PARTIAL not yet implemented")));
+ errmsg("MATCH PARTIAL not yet implemented")));
heap_close(pk_rel, RowShareLock);
return PointerGetDatum(NULL);
}
@@ -370,8 +368,8 @@ RI_FKey_check(PG_FUNCTION_ARGS)
case RI_KEYS_NONE_NULL:
/*
- * Have a full qualified key - continue below for all three
- * kinds of MATCH.
+ * Have a full qualified key - continue below for all three kinds
+ * of MATCH.
*/
break;
}
@@ -385,7 +383,7 @@ RI_FKey_check(PG_FUNCTION_ARGS)
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char pkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
@@ -406,12 +404,12 @@ RI_FKey_check(PG_FUNCTION_ARGS)
for (i = 0; i < qkey.nkeypairs; i++)
{
quoteOneName(attname,
- tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_PK_IDX]);
+ tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_PK_IDX]);
snprintf(querystr + strlen(querystr), sizeof(querystr) - strlen(querystr), " %s %s = $%d",
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(fk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_FK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_FK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
@@ -493,16 +491,15 @@ ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
case RI_KEYS_ALL_NULL:
/*
- * No check - nothing could have been referencing this row
- * anyway.
+ * No check - nothing could have been referencing this row anyway.
*/
return true;
case RI_KEYS_SOME_NULL:
/*
- * This is the only case that differs between the three kinds
- * of MATCH.
+ * This is the only case that differs between the three kinds of
+ * MATCH.
*/
switch (match_type)
{
@@ -510,30 +507,30 @@ ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
case RI_MATCH_TYPE_UNSPECIFIED:
/*
- * MATCH <unspecified>/FULL - if ANY column is null,
- * we can't be matching to this row already.
+ * MATCH <unspecified>/FULL - if ANY column is null, we
+ * can't be matching to this row already.
*/
return true;
case RI_MATCH_TYPE_PARTIAL:
/*
- * MATCH PARTIAL - all non-null columns must match.
- * (not implemented, can be done by modifying the
- * query below to only include non-null columns, or by
- * writing a special version here)
+ * MATCH PARTIAL - all non-null columns must match. (not
+ * implemented, can be done by modifying the query below
+ * to only include non-null columns, or by writing a
+ * special version here)
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("MATCH PARTIAL not yet implemented")));
+ errmsg("MATCH PARTIAL not yet implemented")));
break;
}
case RI_KEYS_NONE_NULL:
/*
- * Have a full qualified key - continue below for all three
- * kinds of MATCH.
+ * Have a full qualified key - continue below for all three kinds
+ * of MATCH.
*/
break;
}
@@ -547,7 +544,7 @@ ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char pkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
@@ -568,12 +565,12 @@ ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
for (i = 0; i < qkey.nkeypairs; i++)
{
quoteOneName(attname,
- tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_PK_IDX]);
+ tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_PK_IDX]);
snprintf(querystr + strlen(querystr), sizeof(querystr) - strlen(querystr), " %s %s = $%d",
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
@@ -621,8 +618,7 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
int match_type;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_noaction_del", RI_TRIGTYPE_DELETE);
@@ -636,8 +632,7 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
* fk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR SHARE will get on it.
@@ -699,13 +694,13 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
elog(ERROR, "SPI_connect failed");
/*
- * Fetch or prepare a saved plan for the restrict delete
- * lookup if foreign references exist
+ * Fetch or prepare a saved plan for the restrict delete lookup if
+ * foreign references exist
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
@@ -731,7 +726,7 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
@@ -741,8 +736,7 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to check for existing
- * references.
+ * We have a plan now. Run it to check for existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -800,8 +794,7 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
int match_type;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_noaction_upd", RI_TRIGTYPE_UPDATE);
@@ -815,8 +808,8 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the new
- * and old tuple.
+ * Get the relation descriptors of the FK and PK tables and the new and
+ * old tuple.
*
* fk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR SHARE will get on it.
@@ -879,8 +872,8 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
match_type, tgnargs, tgargs))
{
/*
- * There's either another row, or no row could match this
- * one. In either case, we don't need to do the check.
+ * There's either another row, or no row could match this one.
+ * In either case, we don't need to do the check.
*/
heap_close(fk_rel, RowShareLock);
return PointerGetDatum(NULL);
@@ -890,13 +883,13 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
elog(ERROR, "SPI_connect failed");
/*
- * Fetch or prepare a saved plan for the noaction update
- * lookup if foreign references exist
+ * Fetch or prepare a saved plan for the noaction update lookup if
+ * foreign references exist
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
@@ -922,7 +915,7 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
@@ -932,8 +925,7 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to check for existing
- * references.
+ * We have a plan now. Run it to check for existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -987,8 +979,7 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
int i;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_cascade_del", RI_TRIGTYPE_DELETE);
@@ -1002,11 +993,10 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual DELETE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * DELETE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -1057,7 +1047,7 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
@@ -1083,7 +1073,7 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
/* Prepare and save the plan */
@@ -1092,9 +1082,8 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Build up the arguments from the key
- * values in the deleted PK tuple and delete the referencing
- * rows
+ * We have a plan now. Build up the arguments from the key values
+ * in the deleted PK tuple and delete the referencing rows
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -1150,8 +1139,7 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
int j;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_cascade_upd", RI_TRIGTYPE_UPDATE);
@@ -1165,11 +1153,11 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the new
- * and old tuple.
+ * Get the relation descriptors of the FK and PK tables and the new and
+ * old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -1232,7 +1220,7 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
char qualstr[(MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
@@ -1266,7 +1254,7 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
querysep = ",";
qualsep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
queryoids[j] = queryoids[i];
}
strcat(querystr, qualstr);
@@ -1277,8 +1265,7 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to update the existing
- * references.
+ * We have a plan now. Run it to update the existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -1339,8 +1326,7 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
int i;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_restrict_del", RI_TRIGTYPE_DELETE);
@@ -1354,8 +1340,7 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
* fk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR SHARE will get on it.
@@ -1404,13 +1389,13 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
elog(ERROR, "SPI_connect failed");
/*
- * Fetch or prepare a saved plan for the restrict delete
- * lookup if foreign references exist
+ * Fetch or prepare a saved plan for the restrict delete lookup if
+ * foreign references exist
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
@@ -1436,7 +1421,7 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
@@ -1446,8 +1431,7 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to check for existing
- * references.
+ * We have a plan now. Run it to check for existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -1509,8 +1493,7 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
int i;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_restrict_upd", RI_TRIGTYPE_UPDATE);
@@ -1524,8 +1507,8 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the new
- * and old tuple.
+ * Get the relation descriptors of the FK and PK tables and the new and
+ * old tuple.
*
* fk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR SHARE will get on it.
@@ -1585,13 +1568,13 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
elog(ERROR, "SPI_connect failed");
/*
- * Fetch or prepare a saved plan for the restrict update
- * lookup if foreign references exist
+ * Fetch or prepare a saved plan for the restrict update lookup if
+ * foreign references exist
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
@@ -1617,7 +1600,7 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
@@ -1627,8 +1610,7 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to check for existing
- * references.
+ * We have a plan now. Run it to check for existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -1682,8 +1664,7 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
int i;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_setnull_del", RI_TRIGTYPE_DELETE);
@@ -1697,11 +1678,10 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -1747,13 +1727,12 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
elog(ERROR, "SPI_connect failed");
/*
- * Fetch or prepare a saved plan for the set null delete
- * operation
+ * Fetch or prepare a saved plan for the set null delete operation
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
char qualstr[(MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
@@ -1787,7 +1766,7 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
querysep = ",";
qualsep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, qualstr);
@@ -1797,8 +1776,7 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to check for existing
- * references.
+ * We have a plan now. Run it to check for existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -1855,8 +1833,7 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
bool use_cached_query;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_setnull_upd", RI_TRIGTYPE_UPDATE);
@@ -1870,11 +1847,10 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -1932,17 +1908,16 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
elog(ERROR, "SPI_connect failed");
/*
- * "MATCH <unspecified>" only changes columns corresponding to
- * the referenced columns that have changed in pk_rel. This
- * means the "SET attrn=NULL [, attrn=NULL]" string will be
- * change as well. In this case, we need to build a temporary
- * plan rather than use our cached plan, unless the update
- * happens to change all columns in the key. Fortunately, for
- * the most common case of a single-column foreign key, this
- * will be true.
+ * "MATCH <unspecified>" only changes columns corresponding to the
+ * referenced columns that have changed in pk_rel. This means the
+ * "SET attrn=NULL [, attrn=NULL]" string will be change as well.
+ * In this case, we need to build a temporary plan rather than use
+ * our cached plan, unless the update happens to change all
+ * columns in the key. Fortunately, for the most common case of a
+ * single-column foreign key, this will be true.
*
- * In case you're wondering, the inequality check works because
- * we know that the old key value has no NULLs (see above).
+ * In case you're wondering, the inequality check works because we
+ * know that the old key value has no NULLs (see above).
*/
use_cached_query = match_type == RI_MATCH_TYPE_FULL ||
@@ -1950,14 +1925,14 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
&qkey, RI_KEYPAIR_PK_IDX);
/*
- * Fetch or prepare a saved plan for the set null update
- * operation if possible, or build a temporary plan if not.
+ * Fetch or prepare a saved plan for the set null update operation
+ * if possible, or build a temporary plan if not.
*/
if (!use_cached_query ||
(qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
char qualstr[(MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
@@ -1986,8 +1961,8 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_FK_IDX]);
/*
- * MATCH <unspecified> - only change columns
- * corresponding to changed columns in pk_rel's key
+ * MATCH <unspecified> - only change columns corresponding
+ * to changed columns in pk_rel's key
*/
if (match_type == RI_MATCH_TYPE_FULL ||
!ri_OneKeyEqual(pk_rel, i, old_row, new_row, &qkey,
@@ -2001,7 +1976,7 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
qualsep, attname, i + 1);
qualsep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, qualstr);
@@ -2015,8 +1990,7 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to update the existing
- * references.
+ * We have a plan now. Run it to update the existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -2069,8 +2043,7 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
void *qplan;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_setdefault_del", RI_TRIGTYPE_DELETE);
@@ -2084,11 +2057,10 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -2135,12 +2107,12 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
/*
* Prepare a plan for the set default delete operation.
- * Unfortunately we need to do it on every invocation because
- * the default value could potentially change between calls.
+ * Unfortunately we need to do it on every invocation because the
+ * default value could potentially change between calls.
*/
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
char qualstr[(MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
@@ -2175,7 +2147,7 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
querysep = ",";
qualsep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, qualstr);
@@ -2185,8 +2157,7 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to update the existing
- * references.
+ * We have a plan now. Run it to update the existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -2201,12 +2172,12 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
heap_close(fk_rel, RowExclusiveLock);
/*
- * In the case we delete the row who's key is equal to the
- * default values AND a referencing row in the foreign key
- * table exists, we would just have updated it to the same
- * values. We need to do another lookup now and in case a
- * reference exists, abort the operation. That is already
- * implemented in the NO ACTION trigger.
+ * In the case we delete the row who's key is equal to the default
+ * values AND a referencing row in the foreign key table exists,
+ * we would just have updated it to the same values. We need to do
+ * another lookup now and in case a reference exists, abort the
+ * operation. That is already implemented in the NO ACTION
+ * trigger.
*/
RI_FKey_noaction_del(fcinfo);
@@ -2251,8 +2222,7 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
int match_type;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_setdefault_upd", RI_TRIGTYPE_UPDATE);
@@ -2266,11 +2236,10 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -2330,12 +2299,12 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
/*
* Prepare a plan for the set default delete operation.
- * Unfortunately we need to do it on every invocation because
- * the default value could potentially change between calls.
+ * Unfortunately we need to do it on every invocation because the
+ * default value could potentially change between calls.
*/
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
char qualstr[(MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
@@ -2365,12 +2334,12 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_FK_IDX]);
/*
- * MATCH <unspecified> - only change columns
- * corresponding to changed columns in pk_rel's key
+ * MATCH <unspecified> - only change columns corresponding
+ * to changed columns in pk_rel's key
*/
if (match_type == RI_MATCH_TYPE_FULL ||
!ri_OneKeyEqual(pk_rel, i, old_row,
- new_row, &qkey, RI_KEYPAIR_PK_IDX))
+ new_row, &qkey, RI_KEYPAIR_PK_IDX))
{
snprintf(querystr + strlen(querystr), sizeof(querystr) - strlen(querystr), "%s %s = DEFAULT",
querysep, attname);
@@ -2380,7 +2349,7 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
qualsep, attname, i + 1);
qualsep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, qualstr);
@@ -2390,8 +2359,7 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to update the existing
- * references.
+ * We have a plan now. Run it to update the existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -2407,11 +2375,11 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
/*
* In the case we updated the row who's key was equal to the
- * default values AND a referencing row in the foreign key
- * table exists, we would just have updated it to the same
- * values. We need to do another lookup now and in case a
- * reference exists, abort the operation. That is already
- * implemented in the NO ACTION trigger.
+ * default values AND a referencing row in the foreign key table
+ * exists, we would just have updated it to the same values. We
+ * need to do another lookup now and in case a reference exists,
+ * abort the operation. That is already implemented in the NO
+ * ACTION trigger.
*/
RI_FKey_noaction_upd(fcinfo);
@@ -2474,11 +2442,11 @@ RI_FKey_keyequal_upd_pk(Trigger *trigger, Relation pk_rel,
if (!OidIsValid(trigger->tgconstrrelid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("no target table given for trigger \"%s\" on table \"%s\"",
- trigger->tgname,
- RelationGetRelationName(pk_rel)),
- errhint("Remove this referential integrity trigger and its mates, "
- "then do ALTER TABLE ADD CONSTRAINT.")));
+ errmsg("no target table given for trigger \"%s\" on table \"%s\"",
+ trigger->tgname,
+ RelationGetRelationName(pk_rel)),
+ errhint("Remove this referential integrity trigger and its mates, "
+ "then do ALTER TABLE ADD CONSTRAINT.")));
fk_rel = heap_open(trigger->tgconstrrelid, AccessShareLock);
@@ -2496,7 +2464,7 @@ RI_FKey_keyequal_upd_pk(Trigger *trigger, Relation pk_rel,
return ri_KeysEqual(pk_rel, old_row, new_row, &qkey,
RI_KEYPAIR_PK_IDX);
- /* Handle MATCH PARTIAL set null delete. */
+ /* Handle MATCH PARTIAL set null delete. */
case RI_MATCH_TYPE_PARTIAL:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -2548,11 +2516,11 @@ RI_FKey_keyequal_upd_fk(Trigger *trigger, Relation fk_rel,
if (!OidIsValid(trigger->tgconstrrelid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("no target table given for trigger \"%s\" on table \"%s\"",
- trigger->tgname,
- RelationGetRelationName(fk_rel)),
- errhint("Remove this referential integrity trigger and its mates, "
- "then do ALTER TABLE ADD CONSTRAINT.")));
+ errmsg("no target table given for trigger \"%s\" on table \"%s\"",
+ trigger->tgname,
+ RelationGetRelationName(fk_rel)),
+ errhint("Remove this referential integrity trigger and its mates, "
+ "then do ALTER TABLE ADD CONSTRAINT.")));
pk_rel = heap_open(trigger->tgconstrrelid, AccessShareLock);
@@ -2570,7 +2538,7 @@ RI_FKey_keyequal_upd_fk(Trigger *trigger, Relation fk_rel,
return ri_KeysEqual(fk_rel, old_row, new_row, &qkey,
RI_KEYPAIR_FK_IDX);
- /* Handle MATCH PARTIAL set null delete. */
+ /* Handle MATCH PARTIAL set null delete. */
case RI_MATCH_TYPE_PARTIAL:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -2603,7 +2571,7 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
{
const char *constrname = fkconstraint->constr_name;
char querystr[MAX_QUOTED_REL_NAME_LEN * 2 + 250 +
- (MAX_QUOTED_NAME_LEN + 32) * ((RI_MAX_NUMKEYS * 4) + 1)];
+ (MAX_QUOTED_NAME_LEN + 32) * ((RI_MAX_NUMKEYS * 4) + 1)];
char pkrelname[MAX_QUOTED_REL_NAME_LEN];
char relname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
@@ -2617,9 +2585,9 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
void *qplan;
/*
- * Check to make sure current user has enough permissions to do the
- * test query. (If not, caller can fall back to the trigger method,
- * which works because it changes user IDs on the fly.)
+ * Check to make sure current user has enough permissions to do the test
+ * query. (If not, caller can fall back to the trigger method, which
+ * works because it changes user IDs on the fly.)
*
* XXX are there any other show-stopper conditions to check?
*/
@@ -2669,8 +2637,8 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
}
/*
- * It's sufficient to test any one pk attribute for null to detect a
- * join failure.
+ * It's sufficient to test any one pk attribute for null to detect a join
+ * failure.
*/
quoteOneName(attname, strVal(linitial(fkconstraint->pk_attrs)));
snprintf(querystr + strlen(querystr), sizeof(querystr) - strlen(querystr),
@@ -2706,13 +2674,12 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
")");
/*
- * Temporarily increase work_mem so that the check query can be
- * executed more efficiently. It seems okay to do this because the
- * query is simple enough to not use a multiple of work_mem, and one
- * typically would not have many large foreign-key validations
- * happening concurrently. So this seems to meet the criteria for
- * being considered a "maintenance" operation, and accordingly we use
- * maintenance_work_mem.
+ * Temporarily increase work_mem so that the check query can be executed
+ * more efficiently. It seems okay to do this because the query is simple
+ * enough to not use a multiple of work_mem, and one typically would not
+ * have many large foreign-key validations happening concurrently. So
+ * this seems to meet the criteria for being considered a "maintenance"
+ * operation, and accordingly we use maintenance_work_mem.
*
* We do the equivalent of "SET LOCAL work_mem" so that transaction abort
* will restore the old value if we lose control due to an error.
@@ -2736,8 +2703,8 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
elog(ERROR, "SPI_prepare returned %d for %s", SPI_result, querystr);
/*
- * Run the plan. For safety we force a current snapshot to be used.
- * (In serializable mode, this arguably violates serializability, but we
+ * Run the plan. For safety we force a current snapshot to be used. (In
+ * serializable mode, this arguably violates serializability, but we
* really haven't got much choice.) We need at most one tuple returned,
* so pass limit = 1.
*/
@@ -2762,8 +2729,8 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
/*
* If it's MATCH FULL, and there are any nulls in the FK keys,
- * complain about that rather than the lack of a match. MATCH
- * FULL disallows partially-null FK rows.
+ * complain about that rather than the lack of a match. MATCH FULL
+ * disallows partially-null FK rows.
*/
if (fkconstraint->fk_matchtype == FKCONSTR_MATCH_FULL)
{
@@ -2785,8 +2752,8 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
}
/*
- * Although we didn't cache the query, we need to set up a fake
- * query key to pass to ri_ReportViolation.
+ * Although we didn't cache the query, we need to set up a fake query
+ * key to pass to ri_ReportViolation.
*/
MemSet(&qkey, 0, sizeof(qkey));
qkey.constr_queryno = RI_PLAN_CHECK_LOOKUPPK;
@@ -2804,8 +2771,8 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
elog(ERROR, "SPI_finish failed");
/*
- * Restore work_mem for the remainder of the current transaction. This
- * is another SET LOCAL, so it won't affect the session value, nor any
+ * Restore work_mem for the remainder of the current transaction. This is
+ * another SET LOCAL, so it won't affect the session value, nor any
* tentative value if there is one.
*/
snprintf(workmembuf, sizeof(workmembuf), "%d", old_work_mem);
@@ -2917,8 +2884,8 @@ ri_BuildQueryKeyFull(RI_QueryKey *key, Oid constr_id, int32 constr_queryno,
key->nkeypairs = (argc - RI_FIRST_ATTNAME_ARGNO) / 2;
/*
- * Lookup the attribute numbers of the arguments to the trigger call
- * and fill in the keypairs.
+ * Lookup the attribute numbers of the arguments to the trigger call and
+ * fill in the keypairs.
*/
for (i = 0, j = RI_FIRST_ATTNAME_ARGNO; j < argc; i++, j += 2)
{
@@ -2965,35 +2932,35 @@ ri_CheckTrigger(FunctionCallInfo fcinfo, const char *funcname, int tgkind)
!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event))
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" must be fired AFTER ROW", funcname)));
+ errmsg("function \"%s\" must be fired AFTER ROW", funcname)));
switch (tgkind)
{
case RI_TRIGTYPE_INSERT:
if (!TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
ereport(ERROR,
- (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" must be fired for INSERT", funcname)));
+ (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+ errmsg("function \"%s\" must be fired for INSERT", funcname)));
break;
case RI_TRIGTYPE_UPDATE:
if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
ereport(ERROR,
- (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" must be fired for UPDATE", funcname)));
+ (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+ errmsg("function \"%s\" must be fired for UPDATE", funcname)));
break;
case RI_TRIGTYPE_INUP:
if (!TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) &&
!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
ereport(ERROR,
- (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" must be fired for INSERT or UPDATE",
- funcname)));
+ (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+ errmsg("function \"%s\" must be fired for INSERT or UPDATE",
+ funcname)));
break;
case RI_TRIGTYPE_DELETE:
if (!TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
ereport(ERROR,
- (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" must be fired for DELETE", funcname)));
+ (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+ errmsg("function \"%s\" must be fired for DELETE", funcname)));
break;
}
@@ -3010,15 +2977,15 @@ ri_CheckTrigger(FunctionCallInfo fcinfo, const char *funcname, int tgkind)
funcname)));
/*
- * Check that tgconstrrelid is known. We need to check here because
- * of ancient pg_dump bug; see notes in CreateTrigger().
+ * Check that tgconstrrelid is known. We need to check here because of
+ * ancient pg_dump bug; see notes in CreateTrigger().
*/
if (!OidIsValid(trigdata->tg_trigger->tgconstrrelid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("no target table given for trigger \"%s\" on table \"%s\"",
- trigdata->tg_trigger->tgname,
- RelationGetRelationName(trigdata->tg_relation)),
+ errmsg("no target table given for trigger \"%s\" on table \"%s\"",
+ trigdata->tg_trigger->tgname,
+ RelationGetRelationName(trigdata->tg_relation)),
errhint("Remove this referential integrity trigger and its mates, then do ALTER TABLE ADD CONSTRAINT.")));
}
@@ -3105,10 +3072,10 @@ ri_PerformCheck(RI_QueryKey *qkey, void *qplan,
query_rel = fk_rel;
/*
- * The values for the query are taken from the table on which the
- * trigger is called - it is normally the other one with respect to
- * query_rel. An exception is ri_Check_Pk_Match(), which uses the PK
- * table for both (the case when constrname == NULL)
+ * The values for the query are taken from the table on which the trigger
+ * is called - it is normally the other one with respect to query_rel. An
+ * exception is ri_Check_Pk_Match(), which uses the PK table for both (the
+ * case when constrname == NULL)
*/
if (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK && constrname != NULL)
{
@@ -3128,7 +3095,7 @@ ri_PerformCheck(RI_QueryKey *qkey, void *qplan,
vals, nulls);
if (old_tuple)
ri_ExtractValues(qkey, key_idx, source_rel, old_tuple,
- vals + qkey->nkeypairs, nulls + qkey->nkeypairs);
+ vals + qkey->nkeypairs, nulls + qkey->nkeypairs);
}
else
{
@@ -3138,17 +3105,16 @@ ri_PerformCheck(RI_QueryKey *qkey, void *qplan,
/*
* In READ COMMITTED mode, we just need to use an up-to-date regular
- * snapshot, and we will see all rows that could be interesting.
- * But in SERIALIZABLE mode, we can't change the transaction snapshot.
- * If the caller passes detectNewRows == false then it's okay to do the
- * query with the transaction snapshot; otherwise we use a current
- * snapshot, and tell the executor to error out if it finds any rows under
- * the current snapshot that wouldn't be visible per the transaction
- * snapshot.
+ * snapshot, and we will see all rows that could be interesting. But in
+ * SERIALIZABLE mode, we can't change the transaction snapshot. If the
+ * caller passes detectNewRows == false then it's okay to do the query
+ * with the transaction snapshot; otherwise we use a current snapshot, and
+ * tell the executor to error out if it finds any rows under the current
+ * snapshot that wouldn't be visible per the transaction snapshot.
*/
if (IsXactIsoLevelSerializable && detectNewRows)
{
- CommandCounterIncrement(); /* be sure all my own work is visible */
+ CommandCounterIncrement(); /* be sure all my own work is visible */
test_snapshot = CopySnapshot(GetLatestSnapshot());
crosscheck_snapshot = CopySnapshot(GetTransactionSnapshot());
}
@@ -3161,9 +3127,9 @@ ri_PerformCheck(RI_QueryKey *qkey, void *qplan,
/*
* If this is a select query (e.g., for a 'no action' or 'restrict'
- * trigger), we only need to see if there is a single row in the
- * table, matching the key. Otherwise, limit = 0 - because we want
- * the query to affect ALL the matching rows.
+ * trigger), we only need to see if there is a single row in the table,
+ * matching the key. Otherwise, limit = 0 - because we want the query to
+ * affect ALL the matching rows.
*/
limit = (expect_OK == SPI_OK_SELECT) ? 1 : 0;
@@ -3193,7 +3159,7 @@ ri_PerformCheck(RI_QueryKey *qkey, void *qplan,
/* XXX wouldn't it be clearer to do this part at the caller? */
if (constrname && expect_OK == SPI_OK_SELECT &&
- (SPI_processed == 0) == (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK))
+ (SPI_processed == 0) == (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK))
ri_ReportViolation(qkey, constrname,
pk_rel, fk_rel,
new_tuple ? new_tuple : old_tuple,
@@ -3257,8 +3223,8 @@ ri_ReportViolation(RI_QueryKey *qkey, const char *constrname,
errhint("This is most likely due to a rule having rewritten the query.")));
/*
- * Determine which relation to complain about. If tupdesc wasn't
- * passed by caller, assume the violator tuple came from there.
+ * Determine which relation to complain about. If tupdesc wasn't passed
+ * by caller, assume the violator tuple came from there.
*/
onfk = (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK);
if (onfk)
@@ -3276,8 +3242,8 @@ ri_ReportViolation(RI_QueryKey *qkey, const char *constrname,
/*
* Special case - if there are no keys at all, this is a 'no column'
- * constraint - no need to try to extract the values, and the message
- * in this case looks different.
+ * constraint - no need to try to extract the values, and the message in
+ * this case looks different.
*/
if (qkey->nkeypairs == 0)
{
@@ -3302,8 +3268,8 @@ ri_ReportViolation(RI_QueryKey *qkey, const char *constrname,
val = "null";
/*
- * Go to "..." if name or value doesn't fit in buffer. We reserve
- * 5 bytes to ensure we can add comma, "...", null.
+ * Go to "..." if name or value doesn't fit in buffer. We reserve 5
+ * bytes to ensure we can add comma, "...", null.
*/
if (strlen(name) >= (key_names + BUFLENGTH - 5) - name_ptr ||
strlen(val) >= (key_values + BUFLENGTH - 5) - val_ptr)
@@ -3322,18 +3288,18 @@ ri_ReportViolation(RI_QueryKey *qkey, const char *constrname,
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("insert or update on table \"%s\" violates foreign key constraint \"%s\"",
RelationGetRelationName(fk_rel), constrname),
- errdetail("Key (%s)=(%s) is not present in table \"%s\".",
- key_names, key_values,
- RelationGetRelationName(pk_rel))));
+ errdetail("Key (%s)=(%s) is not present in table \"%s\".",
+ key_names, key_values,
+ RelationGetRelationName(pk_rel))));
else
ereport(ERROR,
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("update or delete on \"%s\" violates foreign key constraint \"%s\" on \"%s\"",
RelationGetRelationName(pk_rel),
constrname, RelationGetRelationName(fk_rel)),
- errdetail("Key (%s)=(%s) is still referenced from table \"%s\".",
- key_names, key_values,
- RelationGetRelationName(fk_rel))));
+ errdetail("Key (%s)=(%s) is still referenced from table \"%s\".",
+ key_names, key_values,
+ RelationGetRelationName(fk_rel))));
}
/* ----------
@@ -3373,8 +3339,8 @@ ri_BuildQueryKeyPkCheck(RI_QueryKey *key, Oid constr_id, int32 constr_queryno,
key->nkeypairs = (argc - RI_FIRST_ATTNAME_ARGNO) / 2;
/*
- * Lookup the attribute numbers of the arguments to the trigger call
- * and fill in the keypairs.
+ * Lookup the attribute numbers of the arguments to the trigger call and
+ * fill in the keypairs.
*/
for (i = 0, j = RI_FIRST_ATTNAME_ARGNO + RI_KEYPAIR_PK_IDX; j < argc; i++, j += 2)
{
@@ -3542,8 +3508,8 @@ ri_KeysEqual(Relation rel, HeapTuple oldtup, HeapTuple newtup,
return false;
/*
- * Get the attribute's type OID and call the '=' operator to
- * compare the values.
+ * Get the attribute's type OID and call the '=' operator to compare
+ * the values.
*/
typeid = SPI_gettypeid(rel->rd_att, key->keypair[i][pairidx]);
if (!ri_AttributesEqual(typeid, oldvalue, newvalue))
@@ -3591,8 +3557,8 @@ ri_AllKeysUnequal(Relation rel, HeapTuple oldtup, HeapTuple newtup,
continue;
/*
- * Get the attributes type OID and call the '=' operator to
- * compare the values.
+ * Get the attributes type OID and call the '=' operator to compare
+ * the values.
*/
typeid = SPI_gettypeid(rel->rd_att, key->keypair[i][pairidx]);
if (!ri_AttributesEqual(typeid, oldvalue, newvalue))
@@ -3639,8 +3605,8 @@ ri_OneKeyEqual(Relation rel, int column, HeapTuple oldtup, HeapTuple newtup,
return false;
/*
- * Get the attributes type OID and call the '=' operator to compare
- * the values.
+ * Get the attributes type OID and call the '=' operator to compare the
+ * values.
*/
typeid = SPI_gettypeid(rel->rd_att, key->keypair[column][pairidx]);
if (!ri_AttributesEqual(typeid, oldvalue, newvalue))
@@ -3672,8 +3638,8 @@ ri_AttributesEqual(Oid typeid, Datum oldvalue, Datum newvalue)
if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(typeid))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(typeid))));
/*
* Call the type specific '=' function
diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c
index 07a5cf54eea..1a12185b048 100644
--- a/src/backend/utils/adt/rowtypes.c
+++ b/src/backend/utils/adt/rowtypes.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/rowtypes.c,v 1.12 2005/07/10 21:13:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/rowtypes.c,v 1.13 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -54,6 +54,7 @@ record_in(PG_FUNCTION_ARGS)
{
char *string = PG_GETARG_CSTRING(0);
Oid tupType = PG_GETARG_OID(1);
+
#ifdef NOT_USED
int32 typmod = PG_GETARG_INT32(2);
#endif
@@ -72,14 +73,14 @@ record_in(PG_FUNCTION_ARGS)
/*
* Use the passed type unless it's RECORD; we can't support input of
- * anonymous types, mainly because there's no good way to figure out
- * which anonymous type is wanted. Note that for RECORD, what we'll
- * probably actually get is RECORD's typelem, ie, zero.
+ * anonymous types, mainly because there's no good way to figure out which
+ * anonymous type is wanted. Note that for RECORD, what we'll probably
+ * actually get is RECORD's typelem, ie, zero.
*/
if (tupType == InvalidOid || tupType == RECORDOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("input of anonymous composite types is not implemented")));
+ errmsg("input of anonymous composite types is not implemented")));
tupTypmod = -1; /* for all non-anonymous types */
tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
ncolumns = tupdesc->natts;
@@ -153,7 +154,7 @@ record_in(PG_FUNCTION_ARGS)
/* *ptr must be ')' */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed record literal: \"%s\"", string),
+ errmsg("malformed record literal: \"%s\"", string),
errdetail("Too few columns.")));
}
@@ -184,10 +185,10 @@ record_in(PG_FUNCTION_ARGS)
{
if (*ptr == '\0')
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed record literal: \"%s\"",
- string),
- errdetail("Unexpected end of input.")));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed record literal: \"%s\"",
+ string),
+ errdetail("Unexpected end of input.")));
appendStringInfoChar(&buf, *ptr++);
}
else if (ch == '\"')
@@ -221,8 +222,8 @@ record_in(PG_FUNCTION_ARGS)
values[i] = FunctionCall3(&column_info->proc,
CStringGetDatum(buf.data),
- ObjectIdGetDatum(column_info->typioparam),
- Int32GetDatum(tupdesc->attrs[i]->atttypmod));
+ ObjectIdGetDatum(column_info->typioparam),
+ Int32GetDatum(tupdesc->attrs[i]->atttypmod));
nulls[i] = ' ';
}
@@ -249,9 +250,9 @@ record_in(PG_FUNCTION_ARGS)
tuple = heap_formtuple(tupdesc, values, nulls);
/*
- * We cannot return tuple->t_data because heap_formtuple allocates it
- * as part of a larger chunk, and our caller may expect to be able to
- * pfree our result. So must copy the info into a new palloc chunk.
+ * We cannot return tuple->t_data because heap_formtuple allocates it as
+ * part of a larger chunk, and our caller may expect to be able to pfree
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
@@ -420,6 +421,7 @@ record_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
Oid tupType = PG_GETARG_OID(1);
+
#ifdef NOT_USED
int32 typmod = PG_GETARG_INT32(2);
#endif
@@ -437,14 +439,14 @@ record_recv(PG_FUNCTION_ARGS)
/*
* Use the passed type unless it's RECORD; we can't support input of
- * anonymous types, mainly because there's no good way to figure out
- * which anonymous type is wanted. Note that for RECORD, what we'll
- * probably actually get is RECORD's typelem, ie, zero.
+ * anonymous types, mainly because there's no good way to figure out which
+ * anonymous type is wanted. Note that for RECORD, what we'll probably
+ * actually get is RECORD's typelem, ie, zero.
*/
if (tupType == InvalidOid || tupType == RECORDOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("input of anonymous composite types is not implemented")));
+ errmsg("input of anonymous composite types is not implemented")));
tupTypmod = -1; /* for all non-anonymous types */
tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
ncolumns = tupdesc->natts;
@@ -537,10 +539,9 @@ record_recv(PG_FUNCTION_ARGS)
{
/*
* Rather than copying data around, we just set up a phony
- * StringInfo pointing to the correct portion of the input
- * buffer. We assume we can scribble on the input buffer so as
- * to maintain the convention that StringInfos have a trailing
- * null.
+ * StringInfo pointing to the correct portion of the input buffer.
+ * We assume we can scribble on the input buffer so as to maintain
+ * the convention that StringInfos have a trailing null.
*/
StringInfoData item_buf;
char csave;
@@ -568,16 +569,16 @@ record_recv(PG_FUNCTION_ARGS)
values[i] = FunctionCall3(&column_info->proc,
PointerGetDatum(&item_buf),
- ObjectIdGetDatum(column_info->typioparam),
- Int32GetDatum(tupdesc->attrs[i]->atttypmod));
+ ObjectIdGetDatum(column_info->typioparam),
+ Int32GetDatum(tupdesc->attrs[i]->atttypmod));
nulls[i] = ' ';
/* Trouble if it didn't eat the whole buffer */
if (item_buf.cursor != itemlen)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("improper binary format in record column %d",
- i + 1)));
+ errmsg("improper binary format in record column %d",
+ i + 1)));
buf->data[buf->cursor] = csave;
}
@@ -586,9 +587,9 @@ record_recv(PG_FUNCTION_ARGS)
tuple = heap_formtuple(tupdesc, values, nulls);
/*
- * We cannot return tuple->t_data because heap_formtuple allocates it
- * as part of a larger chunk, and our caller may expect to be able to
- * pfree our result. So must copy the info into a new palloc chunk.
+ * We cannot return tuple->t_data because heap_formtuple allocates it as
+ * part of a larger chunk, and our caller may expect to be able to pfree
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 1a226bd49c3..04e8eb55161 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -3,7 +3,7 @@
* back to source text
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.206 2005/10/06 19:51:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.207 2005/10/15 02:49:29 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
@@ -201,11 +201,11 @@ static void get_agg_expr(Aggref *aggref, deparse_context *context);
static void get_const_expr(Const *constval, deparse_context *context);
static void get_sublink_expr(SubLink *sublink, deparse_context *context);
static void get_from_clause(Query *query, const char *prefix,
- deparse_context *context);
+ deparse_context *context);
static void get_from_clause_item(Node *jtnode, Query *query,
deparse_context *context);
static void get_from_clause_alias(Alias *alias, RangeTblEntry *rte,
- deparse_context *context);
+ deparse_context *context);
static void get_from_clause_coldeflist(List *coldeflist,
deparse_context *context);
static void get_opclass_name(Oid opclass, Oid actual_datatype,
@@ -486,8 +486,8 @@ pg_get_triggerdef(PG_FUNCTION_ARGS)
trigrec = (Form_pg_trigger) GETSTRUCT(ht_trig);
/*
- * Start the trigger definition. Note that the trigger's name should
- * never be schema-qualified, but the trigger rel's name may be.
+ * Start the trigger definition. Note that the trigger's name should never
+ * be schema-qualified, but the trigger rel's name may be.
*/
initStringInfo(&buf);
@@ -527,7 +527,7 @@ pg_get_triggerdef(PG_FUNCTION_ARGS)
{
if (trigrec->tgconstrrelid != InvalidOid)
appendStringInfo(&buf, "FROM %s ",
- generate_relation_name(trigrec->tgconstrrelid));
+ generate_relation_name(trigrec->tgconstrrelid));
if (!trigrec->tgdeferrable)
appendStringInfo(&buf, "NOT ");
appendStringInfo(&buf, "DEFERRABLE INITIALLY ");
@@ -688,9 +688,9 @@ pg_get_indexdef_worker(Oid indexrelid, int colno, int prettyFlags)
amrec = (Form_pg_am) GETSTRUCT(ht_am);
/*
- * Get the index expressions, if any. (NOTE: we do not use the
- * relcache versions of the expressions and predicate, because we want
- * to display non-const-folded expressions.)
+ * Get the index expressions, if any. (NOTE: we do not use the relcache
+ * versions of the expressions and predicate, because we want to display
+ * non-const-folded expressions.)
*/
if (!heap_attisnull(ht_idx, Anum_pg_index_indexprs))
{
@@ -714,8 +714,8 @@ pg_get_indexdef_worker(Oid indexrelid, int colno, int prettyFlags)
context = deparse_context_for(get_rel_name(indrelid), indrelid);
/*
- * Start the index definition. Note that the index's name should
- * never be schema-qualified, but the indexed rel's name may be.
+ * Start the index definition. Note that the index's name should never be
+ * schema-qualified, but the indexed rel's name may be.
*/
initStringInfo(&buf);
@@ -764,7 +764,7 @@ pg_get_indexdef_worker(Oid indexrelid, int colno, int prettyFlags)
{
/* Need parens if it's not a bare function call */
if (indexkey && IsA(indexkey, FuncExpr) &&
- ((FuncExpr *) indexkey)->funcformat == COERCE_EXPLICIT_CALL)
+ ((FuncExpr *) indexkey)->funcformat == COERCE_EXPLICIT_CALL)
appendStringInfoString(&buf, str);
else
appendStringInfo(&buf, "(%s)", str);
@@ -831,7 +831,7 @@ pg_get_constraintdef(PG_FUNCTION_ARGS)
Oid constraintId = PG_GETARG_OID(0);
PG_RETURN_TEXT_P(string_to_text(pg_get_constraintdef_worker(constraintId,
- false, 0)));
+ false, 0)));
}
Datum
@@ -843,7 +843,7 @@ pg_get_constraintdef_ext(PG_FUNCTION_ARGS)
prettyFlags = pretty ? PRETTYFLAG_PAREN | PRETTYFLAG_INDENT : 0;
PG_RETURN_TEXT_P(string_to_text(pg_get_constraintdef_worker(constraintId,
- false, prettyFlags)));
+ false, prettyFlags)));
}
/* Internal version that returns a palloc'd C string */
@@ -865,8 +865,8 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
Form_pg_constraint conForm;
/*
- * Fetch the pg_constraint row. There's no syscache for pg_constraint
- * so we must do it the hard way.
+ * Fetch the pg_constraint row. There's no syscache for pg_constraint so
+ * we must do it the hard way.
*/
conDesc = heap_open(ConstraintRelationId, AccessShareLock);
@@ -914,7 +914,7 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
/* add foreign relation name */
appendStringInfo(&buf, ") REFERENCES %s(",
- generate_relation_name(conForm->confrelid));
+ generate_relation_name(conForm->confrelid));
/* Fetch and build referenced-column list */
val = heap_getattr(tup, Anum_pg_constraint_confkey,
@@ -1067,15 +1067,13 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
prettyFlags, 0);
/*
- * Now emit the constraint definition. There are cases
- * where the constraint expression will be fully
- * parenthesized and we don't need the outer parens ...
- * but there are other cases where we do need 'em. Be
- * conservative for now.
+ * Now emit the constraint definition. There are cases where
+ * the constraint expression will be fully parenthesized and
+ * we don't need the outer parens ... but there are other
+ * cases where we do need 'em. Be conservative for now.
*
* Note that simply checking for leading '(' and trailing ')'
- * would NOT be good enough, consider "(x > 0) AND (y >
- * 0)".
+ * would NOT be good enough, consider "(x > 0) AND (y > 0)".
*/
appendStringInfo(&buf, "CHECK (%s)", consrc);
@@ -1259,7 +1257,7 @@ pg_get_serial_sequence(PG_FUNCTION_ARGS)
/* Get the number of the column */
column = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(columnname)));
+ PointerGetDatum(columnname)));
attnum = get_attnum(tableOid, column);
if (attnum == InvalidAttrNumber)
@@ -1292,8 +1290,8 @@ pg_get_serial_sequence(PG_FUNCTION_ARGS)
Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
/*
- * We assume any internal dependency of a relation on a column
- * must be what we are looking for.
+ * We assume any internal dependency of a relation on a column must be
+ * what we are looking for.
*/
if (deprec->classid == RelationRelationId &&
deprec->objsubid == 0 &&
@@ -1510,7 +1508,7 @@ deparse_context_for_subplan(const char *name, List *tlist,
if (var->varnoold > 0 && var->varnoold <= rtablelength)
{
RangeTblEntry *varrte = rt_fetch(var->varnoold, rtable);
- AttrNumber varattnum = var->varoattno;
+ AttrNumber varattnum = var->varoattno;
/* need this test in case it's referencing a resjunk col */
if (varattnum <= list_length(varrte->eref->colnames))
@@ -1637,8 +1635,8 @@ make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
appendStringInfo(buf, " TO %s", generate_relation_name(ev_class));
if (ev_attr > 0)
appendStringInfo(buf, ".%s",
- quote_identifier(get_relid_attribute_name(ev_class,
- ev_attr)));
+ quote_identifier(get_relid_attribute_name(ev_class,
+ ev_attr)));
/* If the rule has an event qualification, add it */
if (ev_qual == NULL)
@@ -1658,15 +1656,15 @@ make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
/*
* We need to make a context for recognizing any Vars in the qual
- * (which can only be references to OLD and NEW). Use the rtable
- * of the first query in the action list for this purpose.
+ * (which can only be references to OLD and NEW). Use the rtable of
+ * the first query in the action list for this purpose.
*/
query = (Query *) linitial(actions);
/*
* If the action is INSERT...SELECT, OLD/NEW have been pushed down
- * into the SELECT, and that's what we need to look at. (Ugly
- * kluge ... try to fix this when we redesign querytrees.)
+ * into the SELECT, and that's what we need to look at. (Ugly kluge
+ * ... try to fix this when we redesign querytrees.)
*/
query = getInsertSelectQuery(query, NULL);
@@ -1809,9 +1807,9 @@ get_query_def(Query *query, StringInfo buf, List *parentnamespace,
/*
* Before we begin to examine the query, acquire locks on referenced
- * relations, and fix up deleted columns in JOIN RTEs. This ensures
- * consistent results. Note we assume it's OK to scribble on the
- * passed querytree!
+ * relations, and fix up deleted columns in JOIN RTEs. This ensures
+ * consistent results. Note we assume it's OK to scribble on the passed
+ * querytree!
*/
AcquireRewriteLocks(query);
@@ -1874,9 +1872,9 @@ get_select_query_def(Query *query, deparse_context *context,
ListCell *l;
/*
- * If the Query node has a setOperations tree, then it's the top level
- * of a UNION/INTERSECT/EXCEPT query; only the ORDER BY and LIMIT
- * fields are interesting in the top query itself.
+ * If the Query node has a setOperations tree, then it's the top level of
+ * a UNION/INTERSECT/EXCEPT query; only the ORDER BY and LIMIT fields are
+ * interesting in the top query itself.
*/
if (query->setOperations)
{
@@ -1909,7 +1907,7 @@ get_select_query_def(Query *query, deparse_context *context,
sortcoltype = exprType(sortexpr);
/* See whether operator is default < or > for datatype */
typentry = lookup_type_cache(sortcoltype,
- TYPECACHE_LT_OPR | TYPECACHE_GT_OPR);
+ TYPECACHE_LT_OPR | TYPECACHE_GT_OPR);
if (srt->sortop == typentry->lt_opr)
/* ASC is default, so emit nothing */ ;
else if (srt->sortop == typentry->gt_opr)
@@ -2025,10 +2023,10 @@ get_basic_select_query(Query *query, deparse_context *context,
get_rule_expr((Node *) tle->expr, context, true);
/*
- * Figure out what the result column should be called. In the
- * context of a view, use the view's tuple descriptor (so as to
- * pick up the effects of any column RENAME that's been done on
- * the view). Otherwise, just use what we can find in the TLE.
+ * Figure out what the result column should be called. In the context
+ * of a view, use the view's tuple descriptor (so as to pick up the
+ * effects of any column RENAME that's been done on the view).
+ * Otherwise, just use what we can find in the TLE.
*/
if (resultDesc && colno <= resultDesc->natts)
colname = NameStr(resultDesc->attrs[colno - 1]->attname);
@@ -2130,10 +2128,10 @@ get_setop_query(Node *setOp, Query *query, deparse_context *context,
SetOperationStmt *op = (SetOperationStmt *) setOp;
/*
- * We force parens whenever nesting two SetOperationStmts. There
- * are some cases in which parens are needed around a leaf query
- * too, but those are more easily handled at the next level down
- * (see code above).
+ * We force parens whenever nesting two SetOperationStmts. There are
+ * some cases in which parens are needed around a leaf query too, but
+ * those are more easily handled at the next level down (see code
+ * above).
*/
need_paren = !IsA(op->larg, RangeTblRef);
@@ -2231,8 +2229,8 @@ get_insert_query_def(Query *query, deparse_context *context)
List *strippedexprs;
/*
- * If it's an INSERT ... SELECT there will be a single subquery RTE
- * for the SELECT.
+ * If it's an INSERT ... SELECT there will be a single subquery RTE for
+ * the SELECT.
*/
foreach(l, query->rtable)
{
@@ -2279,13 +2277,12 @@ get_insert_query_def(Query *query, deparse_context *context)
* tle->resname, since resname will fail to track RENAME.
*/
appendStringInfoString(buf,
- quote_identifier(get_relid_attribute_name(rte->relid,
- tle->resno)));
+ quote_identifier(get_relid_attribute_name(rte->relid,
+ tle->resno)));
/*
- * Print any indirection needed (subfields or subscripts), and
- * strip off the top-level nodes representing the indirection
- * assignments.
+ * Print any indirection needed (subfields or subscripts), and strip
+ * off the top-level nodes representing the indirection assignments.
*/
strippedexprs = lappend(strippedexprs,
processIndirection((Node *) tle->expr,
@@ -2351,13 +2348,12 @@ get_update_query_def(Query *query, deparse_context *context)
* tle->resname, since resname will fail to track RENAME.
*/
appendStringInfoString(buf,
- quote_identifier(get_relid_attribute_name(rte->relid,
- tle->resno)));
+ quote_identifier(get_relid_attribute_name(rte->relid,
+ tle->resno)));
/*
- * Print any indirection needed (subfields or subscripts), and
- * strip off the top-level nodes representing the indirection
- * assignments.
+ * Print any indirection needed (subfields or subscripts), and strip
+ * off the top-level nodes representing the indirection assignments.
*/
expr = processIndirection((Node *) tle->expr, context);
@@ -2432,8 +2428,8 @@ get_utility_query_def(Query *query, deparse_context *context)
appendContextKeyword(context, "",
0, PRETTYINDENT_STD, 1);
appendStringInfo(buf, "NOTIFY %s",
- quote_qualified_identifier(stmt->relation->schemaname,
- stmt->relation->relname));
+ quote_qualified_identifier(stmt->relation->schemaname,
+ stmt->relation->relname));
}
else
{
@@ -2517,10 +2513,9 @@ get_names_for_var(Var *var, int levelsup, deparse_context *context,
if (rte->rtekind == RTE_RELATION)
{
/*
- * It's possible that use of the bare refname would find
- * another more-closely-nested RTE, or be ambiguous, in which
- * case we need to specify the schemaname to avoid these
- * errors.
+ * It's possible that use of the bare refname would find another
+ * more-closely-nested RTE, or be ambiguous, in which case we need
+ * to specify the schemaname to avoid these errors.
*/
if (find_rte_by_refname(rte->eref->aliasname, context) != rte)
*schemaname =
@@ -2530,20 +2525,20 @@ get_names_for_var(Var *var, int levelsup, deparse_context *context,
{
/*
* If it's an unnamed join, look at the expansion of the alias
- * variable. If it's a simple reference to one of the input
- * vars then recursively find the name of that var, instead.
- * (This allows correct decompiling of cases where there are
- * identically named columns on both sides of the join.)
- * When it's not a simple reference, we have to just return
- * the unqualified variable name (this can only happen with
- * columns that were merged by USING or NATURAL clauses).
+ * variable. If it's a simple reference to one of the input vars
+ * then recursively find the name of that var, instead. (This
+ * allows correct decompiling of cases where there are identically
+ * named columns on both sides of the join.) When it's not a
+ * simple reference, we have to just return the unqualified
+ * variable name (this can only happen with columns that were
+ * merged by USING or NATURAL clauses).
*/
if (var->varattno > 0)
{
- Var *aliasvar;
+ Var *aliasvar;
aliasvar = (Var *) list_nth(rte->joinaliasvars,
- var->varattno-1);
+ var->varattno - 1);
if (IsA(aliasvar, Var))
{
get_names_for_var(aliasvar,
@@ -2568,9 +2563,9 @@ get_names_for_var(Var *var, int levelsup, deparse_context *context,
* Get the name of a field of a Var of type RECORD.
*
* Since no actual table or view column is allowed to have type RECORD, such
- * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
+ * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
* drill down to find the ultimate defining expression and attempt to infer
- * the field name from it. We ereport if we can't determine the name.
+ * the field name from it. We ereport if we can't determine the name.
*
* levelsup is an extra offset to interpret the Var's varlevelsup correctly.
*
@@ -2609,6 +2604,7 @@ get_name_for_var_field(Var *var, int fieldno,
{
case RTE_RELATION:
case RTE_SPECIAL:
+
/*
* This case should not occur: a column of a table shouldn't have
* type RECORD. Fall through and fail (most likely) at the
@@ -2629,7 +2625,7 @@ get_name_for_var_field(Var *var, int fieldno,
{
/*
* Recurse into the sub-select to see what its Var refers
- * to. We have to build an additional level of namespace
+ * to. We have to build an additional level of namespace
* to keep in step with varlevelsup in the subselect.
*/
deparse_namespace mydpns;
@@ -2662,18 +2658,19 @@ get_name_for_var_field(Var *var, int fieldno,
/* else fall through to inspect the expression */
break;
case RTE_FUNCTION:
+
/*
- * We couldn't get here unless a function is declared with one
- * of its result columns as RECORD, which is not allowed.
+ * We couldn't get here unless a function is declared with one of
+ * its result columns as RECORD, which is not allowed.
*/
break;
}
/*
* We now have an expression we can't expand any more, so see if
- * get_expr_result_type() can do anything with it. If not, pass
- * to lookup_rowtype_tupdesc() which will probably fail, but will
- * give an appropriate error message while failing.
+ * get_expr_result_type() can do anything with it. If not, pass to
+ * lookup_rowtype_tupdesc() which will probably fail, but will give an
+ * appropriate error message while failing.
*/
if (get_expr_result_type(expr, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE)
tupleDesc = lookup_rowtype_tupdesc(exprType(expr), exprTypmod(expr));
@@ -2866,8 +2863,8 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags)
return false;
/*
- * Operators are same priority --- can skip parens
- * only if we have (a - b) - c, not a - (b - c).
+ * Operators are same priority --- can skip parens only if
+ * we have (a - b) - c, not a - (b - c).
*/
if (node == (Node *) linitial(((OpExpr *) parentNode)->args))
return true;
@@ -2897,11 +2894,11 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags)
case T_BoolExpr: /* lower precedence */
case T_ArrayRef: /* other separators */
case T_ArrayExpr: /* other separators */
- case T_RowExpr: /* other separators */
+ case T_RowExpr: /* other separators */
case T_CoalesceExpr: /* own parentheses */
case T_MinMaxExpr: /* own parentheses */
case T_NullIfExpr: /* other separators */
- case T_Aggref: /* own parentheses */
+ case T_Aggref: /* own parentheses */
case T_CaseExpr: /* other separators */
return true;
default:
@@ -2945,11 +2942,11 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags)
}
case T_ArrayRef: /* other separators */
case T_ArrayExpr: /* other separators */
- case T_RowExpr: /* other separators */
+ case T_RowExpr: /* other separators */
case T_CoalesceExpr: /* own parentheses */
case T_MinMaxExpr: /* own parentheses */
case T_NullIfExpr: /* other separators */
- case T_Aggref: /* own parentheses */
+ case T_Aggref: /* own parentheses */
case T_CaseExpr: /* other separators */
return true;
default:
@@ -3055,10 +3052,10 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* Each level of get_rule_expr must emit an indivisible term
- * (parenthesized if necessary) to ensure result is reparsed into the
- * same expression tree. The only exception is that when the input
- * is a List, we emit the component items comma-separated with no
- * surrounding decoration; this is convenient for most callers.
+ * (parenthesized if necessary) to ensure result is reparsed into the same
+ * expression tree. The only exception is that when the input is a List,
+ * we emit the component items comma-separated with no surrounding
+ * decoration; this is convenient for most callers.
*
* There might be some work left here to support additional node types.
*/
@@ -3129,8 +3126,8 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* Parenthesize the argument unless it's a simple Var or a
- * FieldSelect. (In particular, if it's another ArrayRef,
- * we *must* parenthesize to avoid confusion.)
+ * FieldSelect. (In particular, if it's another ArrayRef, we
+ * *must* parenthesize to avoid confusion.)
*/
need_parens = !IsA(aref->refexpr, Var) &&
!IsA(aref->refexpr, FieldSelect);
@@ -3188,7 +3185,7 @@ get_rule_expr(Node *node, deparse_context *context,
appendStringInfo(buf, " %s %s (",
generate_operator_name(expr->opno,
exprType(arg1),
- get_element_type(exprType(arg2))),
+ get_element_type(exprType(arg2))),
expr->useOr ? "ANY" : "ALL");
get_rule_expr_paren(arg2, context, true, node);
appendStringInfoChar(buf, ')');
@@ -3261,9 +3258,8 @@ get_rule_expr(Node *node, deparse_context *context,
case T_SubPlan:
{
/*
- * We cannot see an already-planned subplan in rule
- * deparsing, only while EXPLAINing a query plan. For now,
- * just punt.
+ * We cannot see an already-planned subplan in rule deparsing,
+ * only while EXPLAINing a query plan. For now, just punt.
*/
if (((SubPlan *) node)->useHashTable)
appendStringInfo(buf, "(hashed subplan)");
@@ -3282,12 +3278,11 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* Parenthesize the argument unless it's an ArrayRef or
- * another FieldSelect. Note in particular that it would
- * be WRONG to not parenthesize a Var argument; simplicity
- * is not the issue here, having the right number of names
- * is.
+ * another FieldSelect. Note in particular that it would be
+ * WRONG to not parenthesize a Var argument; simplicity is not
+ * the issue here, having the right number of names is.
*/
- need_parens = !IsA(arg, ArrayRef) && !IsA(arg, FieldSelect);
+ need_parens = !IsA(arg, ArrayRef) &&!IsA(arg, FieldSelect);
if (need_parens)
appendStringInfoChar(buf, '(');
get_rule_expr(arg, context, true);
@@ -3296,8 +3291,8 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* If it's a Var of type RECORD, we have to find what the Var
- * refers to; otherwise we can use get_expr_result_type.
- * If that fails, we try lookup_rowtype_tupdesc, which will
+ * refers to; otherwise we can use get_expr_result_type. If
+ * that fails, we try lookup_rowtype_tupdesc, which will
* probably fail too, but will ereport an acceptable message.
*/
if (IsA(arg, Var) &&
@@ -3324,8 +3319,8 @@ get_rule_expr(Node *node, deparse_context *context,
case T_FieldStore:
/*
- * We shouldn't see FieldStore here; it should have been
- * stripped off by processIndirection().
+ * We shouldn't see FieldStore here; it should have been stripped
+ * off by processIndirection().
*/
elog(ERROR, "unexpected FieldStore");
break;
@@ -3349,8 +3344,8 @@ get_rule_expr(Node *node, deparse_context *context,
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
appendStringInfo(buf, "::%s",
- format_type_with_typemod(relabel->resulttype,
- relabel->resulttypmod));
+ format_type_with_typemod(relabel->resulttype,
+ relabel->resulttypmod));
}
}
break;
@@ -3374,7 +3369,7 @@ get_rule_expr(Node *node, deparse_context *context,
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
appendStringInfo(buf, "::%s",
- format_type_with_typemod(convert->resulttype, -1));
+ format_type_with_typemod(convert->resulttype, -1));
}
}
break;
@@ -3444,9 +3439,9 @@ get_rule_expr(Node *node, deparse_context *context,
char *sep;
/*
- * If it's a named type and not RECORD, we may have to
- * skip dropped columns and/or claim there are NULLs for
- * added columns.
+ * If it's a named type and not RECORD, we may have to skip
+ * dropped columns and/or claim there are NULLs for added
+ * columns.
*/
if (rowexpr->row_typeid != RECORDOID)
{
@@ -3455,8 +3450,8 @@ get_rule_expr(Node *node, deparse_context *context,
}
/*
- * SQL99 allows "ROW" to be omitted when there is more
- * than one column, but for simplicity we always print it.
+ * SQL99 allows "ROW" to be omitted when there is more than
+ * one column, but for simplicity we always print it.
*/
appendStringInfo(buf, "ROW(");
sep = "";
@@ -3490,7 +3485,7 @@ get_rule_expr(Node *node, deparse_context *context,
appendStringInfo(buf, ")");
if (rowexpr->row_format == COERCE_EXPLICIT_CAST)
appendStringInfo(buf, "::%s",
- format_type_with_typemod(rowexpr->row_typeid, -1));
+ format_type_with_typemod(rowexpr->row_typeid, -1));
}
break;
@@ -3611,8 +3606,8 @@ get_rule_expr(Node *node, deparse_context *context,
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
appendStringInfo(buf, "::%s",
- format_type_with_typemod(ctest->resulttype,
- ctest->resulttypmod));
+ format_type_with_typemod(ctest->resulttype,
+ ctest->resulttypmod));
}
}
break;
@@ -3724,9 +3719,8 @@ get_func_expr(FuncExpr *expr, deparse_context *context,
ListCell *l;
/*
- * If the function call came from an implicit coercion, then just show
- * the first argument --- unless caller wants to see implicit
- * coercions.
+ * If the function call came from an implicit coercion, then just show the
+ * first argument --- unless caller wants to see implicit coercions.
*/
if (expr->funcformat == COERCE_IMPLICIT_CAST && !showimplicit)
{
@@ -3755,14 +3749,14 @@ get_func_expr(FuncExpr *expr, deparse_context *context,
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
appendStringInfo(buf, "::%s",
- format_type_with_typemod(rettype, coercedTypmod));
+ format_type_with_typemod(rettype, coercedTypmod));
return;
}
/*
- * Normal function: display as proname(args). First we need to
- * extract the argument datatypes.
+ * Normal function: display as proname(args). First we need to extract
+ * the argument datatypes.
*/
nargs = 0;
foreach(l, expr->args)
@@ -3791,7 +3785,7 @@ get_agg_expr(Aggref *aggref, deparse_context *context)
Oid argtype = exprType((Node *) aggref->target);
appendStringInfo(buf, "%s(%s",
- generate_function_name(aggref->aggfnoid, 1, &argtype),
+ generate_function_name(aggref->aggfnoid, 1, &argtype),
aggref->aggdistinct ? "DISTINCT " : "");
if (aggref->aggstar)
appendStringInfo(buf, "*");
@@ -3821,11 +3815,11 @@ get_const_expr(Const *constval, deparse_context *context)
if (constval->constisnull)
{
/*
- * Always label the type of a NULL constant to prevent
- * misdecisions about type when reparsing.
+ * Always label the type of a NULL constant to prevent misdecisions
+ * about type when reparsing.
*/
appendStringInfo(buf, "NULL::%s",
- format_type_with_typemod(constval->consttype, -1));
+ format_type_with_typemod(constval->consttype, -1));
return;
}
@@ -3846,14 +3840,13 @@ get_const_expr(Const *constval, deparse_context *context)
case NUMERICOID:
{
/*
- * These types are printed without quotes unless they
- * contain values that aren't accepted by the scanner
- * unquoted (e.g., 'NaN'). Note that strtod() and friends
- * might accept NaN, so we can't use that to test.
+ * These types are printed without quotes unless they contain
+ * values that aren't accepted by the scanner unquoted (e.g.,
+ * 'NaN'). Note that strtod() and friends might accept NaN,
+ * so we can't use that to test.
*
- * In reality we only need to defend against infinity and
- * NaN, so we need not get too crazy about pattern
- * matching here.
+ * In reality we only need to defend against infinity and NaN, so
+ * we need not get too crazy about pattern matching here.
*/
if (strspn(extval, "0123456789+-eE.") == strlen(extval))
{
@@ -3879,13 +3872,14 @@ get_const_expr(Const *constval, deparse_context *context)
break;
default:
+
/*
* We must quote any funny characters in the constant's
* representation. XXX Any MULTIBYTE considerations here?
*/
for (valptr = extval; *valptr; valptr++)
if (*valptr == '\\' ||
- (unsigned char)*valptr < (unsigned char)' ')
+ (unsigned char) *valptr < (unsigned char) ' ')
{
appendStringInfoChar(buf, ESCAPE_STRING_SYNTAX);
break;
@@ -3901,7 +3895,7 @@ get_const_expr(Const *constval, deparse_context *context)
appendStringInfoChar(buf, ch);
appendStringInfoChar(buf, ch);
}
- else if ((unsigned char)ch < (unsigned char)' ')
+ else if ((unsigned char) ch < (unsigned char) ' ')
appendStringInfo(buf, "\\%03o", (int) ch);
else
appendStringInfoChar(buf, ch);
@@ -3913,9 +3907,9 @@ get_const_expr(Const *constval, deparse_context *context)
pfree(extval);
/*
- * Append ::typename unless the constant will be implicitly typed as
- * the right type when it is read in. XXX this code has to be kept in
- * sync with the behavior of the parser, especially make_const.
+ * Append ::typename unless the constant will be implicitly typed as the
+ * right type when it is read in. XXX this code has to be kept in sync
+ * with the behavior of the parser, especially make_const.
*/
switch (constval->consttype)
{
@@ -3935,7 +3929,7 @@ get_const_expr(Const *constval, deparse_context *context)
}
if (needlabel)
appendStringInfo(buf, "::%s",
- format_type_with_typemod(constval->consttype, -1));
+ format_type_with_typemod(constval->consttype, -1));
}
@@ -3969,10 +3963,10 @@ get_sublink_expr(SubLink *sublink, deparse_context *context)
need_paren = true;
/*
- * XXX we regurgitate the originally given operator name, with or
- * without schema qualification. This is not necessarily 100% right
- * but it's the best we can do, since the operators actually used
- * might not all be in the same schema.
+ * XXX we regurgitate the originally given operator name, with or without
+ * schema qualification. This is not necessarily 100% right but it's the
+ * best we can do, since the operators actually used might not all be in
+ * the same schema.
*/
switch (sublink->subLinkType)
{
@@ -4044,11 +4038,11 @@ get_from_clause(Query *query, const char *prefix, deparse_context *context)
ListCell *l;
/*
- * We use the query's jointree as a guide to what to print. However,
- * we must ignore auto-added RTEs that are marked not inFromCl. (These
- * can only appear at the top level of the jointree, so it's
- * sufficient to check here.) This check also ensures we ignore
- * the rule pseudo-RTEs for NEW and OLD.
+ * We use the query's jointree as a guide to what to print. However, we
+ * must ignore auto-added RTEs that are marked not inFromCl. (These can
+ * only appear at the top level of the jointree, so it's sufficient to
+ * check here.) This check also ensures we ignore the rule pseudo-RTEs
+ * for NEW and OLD.
*/
foreach(l, query->jointree->fromlist)
{
@@ -4124,10 +4118,10 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
strcmp(rte->eref->aliasname, get_rel_name(rte->relid)) != 0)
{
/*
- * Apparently the rel has been renamed since the rule was
- * made. Emit a fake alias clause so that variable references
- * will still work. This is not a 100% solution but should
- * work in most reasonable situations.
+ * Apparently the rel has been renamed since the rule was made.
+ * Emit a fake alias clause so that variable references will still
+ * work. This is not a 100% solution but should work in most
+ * reasonable situations.
*/
appendStringInfo(buf, " %s",
quote_identifier(rte->eref->aliasname));
@@ -4136,10 +4130,9 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
else if (rte->rtekind == RTE_FUNCTION)
{
/*
- * For a function RTE, always give an alias.
- * This covers possible renaming of the function and/or
- * instability of the FigureColname rules for things that
- * aren't simple functions.
+ * For a function RTE, always give an alias. This covers possible
+ * renaming of the function and/or instability of the
+ * FigureColname rules for things that aren't simple functions.
*/
appendStringInfo(buf, " %s",
quote_identifier(rte->eref->aliasname));
@@ -4175,7 +4168,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
need_paren_on_right = PRETTY_PAREN(context) &&
!IsA(j->rarg, RangeTblRef) &&
- !(IsA(j->rarg, JoinExpr) && ((JoinExpr*) j->rarg)->alias != NULL);
+ !(IsA(j->rarg, JoinExpr) &&((JoinExpr *) j->rarg)->alias != NULL);
if (!PRETTY_PAREN(context) || j->alias != NULL)
appendStringInfoChar(buf, '(');
@@ -4278,7 +4271,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
if (col != list_head(j->using))
appendStringInfo(buf, ", ");
appendStringInfoString(buf,
- quote_identifier(strVal(lfirst(col))));
+ quote_identifier(strVal(lfirst(col))));
}
appendStringInfoChar(buf, ')');
}
@@ -4415,8 +4408,7 @@ get_opclass_name(Oid opclass, Oid actual_datatype,
opcrec = (Form_pg_opclass) GETSTRUCT(ht_opc);
/*
- * Special case for ARRAY_OPS: pretend it is default for any array
- * type
+ * Special case for ARRAY_OPS: pretend it is default for any array type
*/
if (OidIsValid(actual_datatype))
{
@@ -4476,19 +4468,19 @@ processIndirection(Node *node, deparse_context *context)
format_type_be(fstore->resulttype));
/*
- * Get the field name. Note we assume here that there's only
- * one field being assigned to. This is okay in stored rules
- * but could be wrong in executable target lists. Presently
- * no problem since explain.c doesn't print plan targetlists,
- * but someday may have to think of something ...
+ * Get the field name. Note we assume here that there's only one
+ * field being assigned to. This is okay in stored rules but
+ * could be wrong in executable target lists. Presently no
+ * problem since explain.c doesn't print plan targetlists, but
+ * someday may have to think of something ...
*/
fieldname = get_relid_attribute_name(typrelid,
- linitial_int(fstore->fieldnums));
+ linitial_int(fstore->fieldnums));
appendStringInfo(buf, ".%s", quote_identifier(fieldname));
/*
- * We ignore arg since it should be an uninteresting reference
- * to the target column or subcolumn.
+ * We ignore arg since it should be an uninteresting reference to
+ * the target column or subcolumn.
*/
node = (Node *) linitial(fstore->newvals);
}
@@ -4501,8 +4493,8 @@ processIndirection(Node *node, deparse_context *context)
printSubscripts(aref, context);
/*
- * We ignore refexpr since it should be an uninteresting
- * reference to the target column or subcolumn.
+ * We ignore refexpr since it should be an uninteresting reference
+ * to the target column or subcolumn.
*/
node = (Node *) aref->refassgnexpr;
}
@@ -4545,10 +4537,9 @@ const char *
quote_identifier(const char *ident)
{
/*
- * Can avoid quoting if ident starts with a lowercase letter or
- * underscore and contains only lowercase letters, digits, and
- * underscores, *and* is not any SQL keyword. Otherwise, supply
- * quotes.
+ * Can avoid quoting if ident starts with a lowercase letter or underscore
+ * and contains only lowercase letters, digits, and underscores, *and* is
+ * not any SQL keyword. Otherwise, supply quotes.
*/
int nquotes = 0;
bool safe;
@@ -4557,8 +4548,8 @@ quote_identifier(const char *ident)
char *optr;
/*
- * would like to use <ctype.h> macros here, but they might yield
- * unwanted locale-specific results...
+ * would like to use <ctype.h> macros here, but they might yield unwanted
+ * locale-specific results...
*/
safe = ((ident[0] >= 'a' && ident[0] <= 'z') || ident[0] == '_');
@@ -4583,13 +4574,13 @@ quote_identifier(const char *ident)
if (safe)
{
/*
- * Check for keyword. This test is overly strong, since many of
- * the "keywords" known to the parser are usable as column names,
- * but the parser doesn't provide any easy way to test for whether
- * an identifier is safe or not... so be safe not sorry.
+ * Check for keyword. This test is overly strong, since many of the
+ * "keywords" known to the parser are usable as column names, but the
+ * parser doesn't provide any easy way to test for whether an
+ * identifier is safe or not... so be safe not sorry.
*
- * Note: ScanKeywordLookup() does case-insensitive comparison, but
- * that's fine, since we already know we have all-lower-case.
+ * Note: ScanKeywordLookup() does case-insensitive comparison, but that's
+ * fine, since we already know we have all-lower-case.
*/
if (ScanKeywordLookup(ident) != NULL)
safe = false;
@@ -4702,8 +4693,8 @@ generate_function_name(Oid funcid, int nargs, Oid *argtypes)
/*
* The idea here is to schema-qualify only if the parser would fail to
- * resolve the correct function given the unqualified func name with
- * the specified argtypes.
+ * resolve the correct function given the unqualified func name with the
+ * specified argtypes.
*/
p_result = func_get_detail(list_make1(makeString(proname)),
NIL, nargs, argtypes,
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index ccc8d0f4483..95980ca1e03 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.190 2005/10/11 17:27:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.191 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -197,8 +197,8 @@ eqsel(PG_FUNCTION_ARGS)
double selec;
/*
- * If expression is not variable = something or something = variable,
- * then punt and return a default estimate.
+ * If expression is not variable = something or something = variable, then
+ * punt and return a default estimate.
*/
if (!get_restriction_variable(root, args, varRelid,
&vardata, &other, &varonleft))
@@ -229,11 +229,11 @@ eqsel(PG_FUNCTION_ARGS)
int i;
/*
- * Is the constant "=" to any of the column's most common
- * values? (Although the given operator may not really be
- * "=", we will assume that seeing whether it returns TRUE is
- * an appropriate test. If you don't like this, maybe you
- * shouldn't be using eqsel for your operator...)
+ * Is the constant "=" to any of the column's most common values?
+ * (Although the given operator may not really be "=", we will
+ * assume that seeing whether it returns TRUE is an appropriate
+ * test. If you don't like this, maybe you shouldn't be using
+ * eqsel for your operator...)
*/
if (get_attstatsslot(vardata.statsTuple,
vardata.atttype, vardata.atttypmod,
@@ -271,18 +271,18 @@ eqsel(PG_FUNCTION_ARGS)
if (match)
{
/*
- * Constant is "=" to this common value. We know
- * selectivity exactly (or as exactly as VACUUM could
- * calculate it, anyway).
+ * Constant is "=" to this common value. We know selectivity
+ * exactly (or as exactly as VACUUM could calculate it,
+ * anyway).
*/
selec = numbers[i];
}
else
{
/*
- * Comparison is against a constant that is neither NULL
- * nor any of the common values. Its selectivity cannot
- * be more than this:
+ * Comparison is against a constant that is neither NULL nor
+ * any of the common values. Its selectivity cannot be more
+ * than this:
*/
double sumcommon = 0.0;
double otherdistinct;
@@ -293,10 +293,10 @@ eqsel(PG_FUNCTION_ARGS)
CLAMP_PROBABILITY(selec);
/*
- * and in fact it's probably a good deal less. We
- * approximate that all the not-common values share this
- * remaining fraction equally, so we divide by the number
- * of other distinct values.
+ * and in fact it's probably a good deal less. We approximate
+ * that all the not-common values share this remaining
+ * fraction equally, so we divide by the number of other
+ * distinct values.
*/
otherdistinct = get_variable_numdistinct(&vardata)
- nnumbers;
@@ -304,8 +304,8 @@ eqsel(PG_FUNCTION_ARGS)
selec /= otherdistinct;
/*
- * Another cross-check: selectivity shouldn't be estimated
- * as more than the least common "most common value".
+ * Another cross-check: selectivity shouldn't be estimated as
+ * more than the least common "most common value".
*/
if (nnumbers > 0 && selec > numbers[nnumbers - 1])
selec = numbers[nnumbers - 1];
@@ -319,14 +319,14 @@ eqsel(PG_FUNCTION_ARGS)
double ndistinct;
/*
- * Search is for a value that we do not know a priori, but we
- * will assume it is not NULL. Estimate the selectivity as
- * non-null fraction divided by number of distinct values, so
- * that we get a result averaged over all possible values
- * whether common or uncommon. (Essentially, we are assuming
- * that the not-yet-known comparison value is equally likely
- * to be any of the possible values, regardless of their
- * frequency in the table. Is that a good idea?)
+ * Search is for a value that we do not know a priori, but we will
+ * assume it is not NULL. Estimate the selectivity as non-null
+ * fraction divided by number of distinct values, so that we get a
+ * result averaged over all possible values whether common or
+ * uncommon. (Essentially, we are assuming that the not-yet-known
+ * comparison value is equally likely to be any of the possible
+ * values, regardless of their frequency in the table. Is that a
+ * good idea?)
*/
selec = 1.0 - stats->stanullfrac;
ndistinct = get_variable_numdistinct(&vardata);
@@ -334,8 +334,8 @@ eqsel(PG_FUNCTION_ARGS)
selec /= ndistinct;
/*
- * Cross-check: selectivity should never be estimated as more
- * than the most common value's.
+ * Cross-check: selectivity should never be estimated as more than
+ * the most common value's.
*/
if (get_attstatsslot(vardata.statsTuple,
vardata.atttype, vardata.atttypmod,
@@ -352,10 +352,10 @@ eqsel(PG_FUNCTION_ARGS)
else
{
/*
- * No VACUUM ANALYZE stats available, so make a guess using
- * estimated number of distinct values and assuming they are
- * equally common. (The guess is unlikely to be very good, but we
- * do know a few special cases.)
+ * No VACUUM ANALYZE stats available, so make a guess using estimated
+ * number of distinct values and assuming they are equally common.
+ * (The guess is unlikely to be very good, but we do know a few
+ * special cases.)
*/
selec = 1.0 / get_variable_numdistinct(&vardata);
}
@@ -386,17 +386,17 @@ neqsel(PG_FUNCTION_ARGS)
float8 result;
/*
- * We want 1 - eqsel() where the equality operator is the one
- * associated with this != operator, that is, its negator.
+ * We want 1 - eqsel() where the equality operator is the one associated
+ * with this != operator, that is, its negator.
*/
eqop = get_negator(operator);
if (eqop)
{
result = DatumGetFloat8(DirectFunctionCall4(eqsel,
PointerGetDatum(root),
- ObjectIdGetDatum(eqop),
+ ObjectIdGetDatum(eqop),
PointerGetDatum(args),
- Int32GetDatum(varRelid)));
+ Int32GetDatum(varRelid)));
}
else
{
@@ -447,9 +447,9 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
/*
* If we have most-common-values info, add up the fractions of the MCV
- * entries that satisfy MCV OP CONST. These fractions contribute
- * directly to the result selectivity. Also add up the total fraction
- * represented by MCV entries.
+ * entries that satisfy MCV OP CONST. These fractions contribute directly
+ * to the result selectivity. Also add up the total fraction represented
+ * by MCV entries.
*/
mcv_selec = 0.0;
sumcommon = 0.0;
@@ -473,17 +473,17 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
}
/*
- * If there is a histogram, determine which bin the constant falls in,
- * and compute the resulting contribution to selectivity.
+ * If there is a histogram, determine which bin the constant falls in, and
+ * compute the resulting contribution to selectivity.
*
* Someday, VACUUM might store more than one histogram per rel/att,
- * corresponding to more than one possible sort ordering defined for
- * the column type. However, to make that work we will need to figure
- * out which staop to search for --- it's not necessarily the one we
- * have at hand! (For example, we might have a '<=' operator rather
- * than the '<' operator that will appear in staop.) For now, assume
- * that whatever appears in pg_statistic is sorted the same way our
- * operator sorts, or the reverse way if isgt is TRUE.
+ * corresponding to more than one possible sort ordering defined for the
+ * column type. However, to make that work we will need to figure out
+ * which staop to search for --- it's not necessarily the one we have at
+ * hand! (For example, we might have a '<=' operator rather than the '<'
+ * operator that will appear in staop.) For now, assume that whatever
+ * appears in pg_statistic is sorted the same way our operator sorts, or
+ * the reverse way if isgt is TRUE.
*/
hist_selec = 0.0;
@@ -511,10 +511,9 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
else
{
/*
- * Scan to find proper location. This could be made
- * faster by using a binary-search method, but it's
- * probably not worth the trouble for typical histogram
- * sizes.
+ * Scan to find proper location. This could be made faster by
+ * using a binary-search method, but it's probably not worth
+ * the trouble for typical histogram sizes.
*/
for (i = 1; i < nvalues; i++)
{
@@ -542,8 +541,8 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
* We have values[i-1] < constant < values[i].
*
* Convert the constant and the two nearest bin boundary
- * values to a uniform comparison scale, and do a
- * linear interpolation within this bin.
+ * values to a uniform comparison scale, and do a linear
+ * interpolation within this bin.
*/
if (convert_to_scalar(constval, consttype, &val,
values[i - 1], values[i],
@@ -564,10 +563,10 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
binfrac = (val - low) / (high - low);
/*
- * Watch out for the possibility that we got a
- * NaN or Infinity from the division. This
- * can happen despite the previous checks, if
- * for example "low" is -Infinity.
+ * Watch out for the possibility that we got a NaN
+ * or Infinity from the division. This can happen
+ * despite the previous checks, if for example
+ * "low" is -Infinity.
*/
if (isnan(binfrac) ||
binfrac < 0.0 || binfrac > 1.0)
@@ -577,22 +576,20 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
else
{
/*
- * Ideally we'd produce an error here, on the
- * grounds that the given operator shouldn't have
- * scalarXXsel registered as its selectivity func
- * unless we can deal with its operand types. But
- * currently, all manner of stuff is invoking
- * scalarXXsel, so give a default estimate until
- * that can be fixed.
+ * Ideally we'd produce an error here, on the grounds
+ * that the given operator shouldn't have scalarXXsel
+ * registered as its selectivity func unless we can
+ * deal with its operand types. But currently, all
+ * manner of stuff is invoking scalarXXsel, so give a
+ * default estimate until that can be fixed.
*/
binfrac = 0.5;
}
/*
- * Now, compute the overall selectivity across the
- * values represented by the histogram. We have i-1
- * full bins and binfrac partial bin below the
- * constant.
+ * Now, compute the overall selectivity across the values
+ * represented by the histogram. We have i-1 full bins
+ * and binfrac partial bin below the constant.
*/
histfrac = (double) (i - 1) + binfrac;
histfrac /= (double) (nvalues - 1);
@@ -608,9 +605,9 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
hist_selec = isgt ? (1.0 - histfrac) : histfrac;
/*
- * The histogram boundaries are only approximate to begin
- * with, and may well be out of date anyway. Therefore, don't
- * believe extremely small or large selectivity estimates.
+ * The histogram boundaries are only approximate to begin with,
+ * and may well be out of date anyway. Therefore, don't believe
+ * extremely small or large selectivity estimates.
*/
if (hist_selec < 0.0001)
hist_selec = 0.0001;
@@ -623,8 +620,8 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
/*
* Now merge the results from the MCV and histogram calculations,
- * realizing that the histogram covers only the non-null values that
- * are not listed in MCV.
+ * realizing that the histogram covers only the non-null values that are
+ * not listed in MCV.
*/
selec = 1.0 - stats->stanullfrac - sumcommon;
@@ -666,16 +663,15 @@ scalarltsel(PG_FUNCTION_ARGS)
double selec;
/*
- * If expression is not variable op something or something op
- * variable, then punt and return a default estimate.
+ * If expression is not variable op something or something op variable,
+ * then punt and return a default estimate.
*/
if (!get_restriction_variable(root, args, varRelid,
&vardata, &other, &varonleft))
PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
/*
- * Can't do anything useful if the something is not a constant,
- * either.
+ * Can't do anything useful if the something is not a constant, either.
*/
if (!IsA(other, Const))
{
@@ -684,8 +680,8 @@ scalarltsel(PG_FUNCTION_ARGS)
}
/*
- * If the constant is NULL, assume operator is strict and return zero,
- * ie, operator will never return TRUE.
+ * If the constant is NULL, assume operator is strict and return zero, ie,
+ * operator will never return TRUE.
*/
if (((Const *) other)->constisnull)
{
@@ -742,16 +738,15 @@ scalargtsel(PG_FUNCTION_ARGS)
double selec;
/*
- * If expression is not variable op something or something op
- * variable, then punt and return a default estimate.
+ * If expression is not variable op something or something op variable,
+ * then punt and return a default estimate.
*/
if (!get_restriction_variable(root, args, varRelid,
&vardata, &other, &varonleft))
PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
/*
- * Can't do anything useful if the something is not a constant,
- * either.
+ * Can't do anything useful if the something is not a constant, either.
*/
if (!IsA(other, Const))
{
@@ -760,8 +755,8 @@ scalargtsel(PG_FUNCTION_ARGS)
}
/*
- * If the constant is NULL, assume operator is strict and return zero,
- * ie, operator will never return TRUE.
+ * If the constant is NULL, assume operator is strict and return zero, ie,
+ * operator will never return TRUE.
*/
if (((Const *) other)->constisnull)
{
@@ -841,8 +836,8 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
variable = (Node *) linitial(args);
/*
- * If the constant is NULL, assume operator is strict and return zero,
- * ie, operator will never return TRUE.
+ * If the constant is NULL, assume operator is strict and return zero, ie,
+ * operator will never return TRUE.
*/
if (((Const *) other)->constisnull)
{
@@ -853,10 +848,10 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
consttype = ((Const *) other)->consttype;
/*
- * The right-hand const is type text or bytea for all supported
- * operators. We do not expect to see binary-compatible types here,
- * since const-folding should have relabeled the const to exactly
- * match the operator's declared type.
+ * The right-hand const is type text or bytea for all supported operators.
+ * We do not expect to see binary-compatible types here, since
+ * const-folding should have relabeled the const to exactly match the
+ * operator's declared type.
*/
if (consttype != TEXTOID && consttype != BYTEAOID)
{
@@ -865,15 +860,15 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
}
/*
- * Similarly, the exposed type of the left-hand side should be one
- * of those we know. (Do not look at vardata.atttype, which might be
- * something binary-compatible but different.) We can use it to choose
+ * Similarly, the exposed type of the left-hand side should be one of
+ * those we know. (Do not look at vardata.atttype, which might be
+ * something binary-compatible but different.) We can use it to choose
* the index opclass from which we must draw the comparison operators.
*
* NOTE: It would be more correct to use the PATTERN opclasses than the
- * simple ones, but at the moment ANALYZE will not generate statistics
- * for the PATTERN operators. But our results are so approximate
- * anyway that it probably hardly matters.
+ * simple ones, but at the moment ANALYZE will not generate statistics for
+ * the PATTERN operators. But our results are so approximate anyway that
+ * it probably hardly matters.
*/
vartype = vardata.vartype;
@@ -904,8 +899,8 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
pstatus = pattern_fixed_prefix(patt, ptype, &prefix, &rest);
/*
- * If necessary, coerce the prefix constant to the right type. (The
- * "rest" constant need not be changed.)
+ * If necessary, coerce the prefix constant to the right type. (The "rest"
+ * constant need not be changed.)
*/
if (prefix && prefix->consttype != vartype)
{
@@ -915,11 +910,11 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
{
case TEXTOID:
prefixstr = DatumGetCString(DirectFunctionCall1(textout,
- prefix->constvalue));
+ prefix->constvalue));
break;
case BYTEAOID:
prefixstr = DatumGetCString(DirectFunctionCall1(byteaout,
- prefix->constvalue));
+ prefix->constvalue));
break;
default:
elog(ERROR, "unrecognized consttype: %u",
@@ -945,16 +940,15 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
eqargs = list_make2(variable, prefix);
result = DatumGetFloat8(DirectFunctionCall4(eqsel,
PointerGetDatum(root),
- ObjectIdGetDatum(eqopr),
- PointerGetDatum(eqargs),
- Int32GetDatum(varRelid)));
+ ObjectIdGetDatum(eqopr),
+ PointerGetDatum(eqargs),
+ Int32GetDatum(varRelid)));
}
else
{
/*
* Not exact-match pattern. We estimate selectivity of the fixed
- * prefix and remainder of pattern separately, then combine the
- * two.
+ * prefix and remainder of pattern separately, then combine the two.
*/
Selectivity prefixsel;
Selectivity restsel;
@@ -1113,8 +1107,8 @@ booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
freq_true = 1.0 - numbers[0] - freq_null;
/*
- * Next derive frequency for false. Then use these as
- * appropriate to derive frequency for each case.
+ * Next derive frequency for false. Then use these as appropriate
+ * to derive frequency for each case.
*/
freq_false = 1.0 - freq_true - freq_null;
@@ -1157,10 +1151,9 @@ booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
else
{
/*
- * No most-common-value info available. Still have null
- * fraction information, so use it for IS [NOT] UNKNOWN.
- * Otherwise adjust for null fraction and assume an even split
- * for boolean tests.
+ * No most-common-value info available. Still have null fraction
+ * information, so use it for IS [NOT] UNKNOWN. Otherwise adjust
+ * for null fraction and assume an even split for boolean tests.
*/
switch (booltesttype)
{
@@ -1174,8 +1167,8 @@ booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
case IS_NOT_UNKNOWN:
/*
- * Select not unknown (not null) values. Calculate
- * from freq_null.
+ * Select not unknown (not null) values. Calculate from
+ * freq_null.
*/
selec = 1.0 - freq_null;
break;
@@ -1198,8 +1191,8 @@ booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
/*
* If we can't get variable statistics for the argument, perhaps
* clause_selectivity can do something with it. We ignore the
- * possibility of a NULL value when using clause_selectivity, and
- * just assume the value is either TRUE or FALSE.
+ * possibility of a NULL value when using clause_selectivity, and just
+ * assume the value is either TRUE or FALSE.
*/
switch (booltesttype)
{
@@ -1217,7 +1210,7 @@ booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
case IS_FALSE:
case IS_NOT_TRUE:
selec = 1.0 - (double) clause_selectivity(root, arg,
- varRelid, jointype);
+ varRelid, jointype);
break;
default:
elog(ERROR, "unrecognized booltesttype: %d",
@@ -1366,17 +1359,16 @@ eqjoinsel(PG_FUNCTION_ARGS)
if (have_mcvs1 && have_mcvs2)
{
/*
- * We have most-common-value lists for both relations. Run
- * through the lists to see which MCVs actually join to each other
- * with the given operator. This allows us to determine the exact
- * join selectivity for the portion of the relations represented
- * by the MCV lists. We still have to estimate for the remaining
- * population, but in a skewed distribution this gives us a big
- * leg up in accuracy. For motivation see the analysis in Y.
- * Ioannidis and S. Christodoulakis, "On the propagation of errors
- * in the size of join results", Technical Report 1018, Computer
- * Science Dept., University of Wisconsin, Madison, March 1991
- * (available from ftp.cs.wisc.edu).
+ * We have most-common-value lists for both relations. Run through
+ * the lists to see which MCVs actually join to each other with the
+ * given operator. This allows us to determine the exact join
+ * selectivity for the portion of the relations represented by the MCV
+ * lists. We still have to estimate for the remaining population, but
+ * in a skewed distribution this gives us a big leg up in accuracy.
+ * For motivation see the analysis in Y. Ioannidis and S.
+ * Christodoulakis, "On the propagation of errors in the size of join
+ * results", Technical Report 1018, Computer Science Dept., University
+ * of Wisconsin, Madison, March 1991 (available from ftp.cs.wisc.edu).
*/
FmgrInfo eqproc;
bool *hasmatch1;
@@ -1400,20 +1392,20 @@ eqjoinsel(PG_FUNCTION_ARGS)
hasmatch2 = (bool *) palloc0(nvalues2 * sizeof(bool));
/*
- * If we are doing any variant of JOIN_IN, pretend all the values
- * of the righthand relation are unique (ie, act as if it's been
+ * If we are doing any variant of JOIN_IN, pretend all the values of
+ * the righthand relation are unique (ie, act as if it's been
* DISTINCT'd).
*
- * NOTE: it might seem that we should unique-ify the lefthand input
- * when considering JOIN_REVERSE_IN. But this is not so, because
- * the join clause we've been handed has not been commuted from
- * the way the parser originally wrote it. We know that the
- * unique side of the IN clause is *always* on the right.
+ * NOTE: it might seem that we should unique-ify the lefthand input when
+ * considering JOIN_REVERSE_IN. But this is not so, because the join
+ * clause we've been handed has not been commuted from the way the
+ * parser originally wrote it. We know that the unique side of the IN
+ * clause is *always* on the right.
*
* NOTE: it would be dangerous to try to be smart about JOIN_LEFT or
* JOIN_RIGHT here, because we do not have enough information to
- * determine which var is really on which side of the join.
- * Perhaps someday we should pass in more information.
+ * determine which var is really on which side of the join. Perhaps
+ * someday we should pass in more information.
*/
if (jointype == JOIN_IN ||
jointype == JOIN_REVERSE_IN ||
@@ -1428,10 +1420,10 @@ eqjoinsel(PG_FUNCTION_ARGS)
}
/*
- * Note we assume that each MCV will match at most one member of
- * the other MCV list. If the operator isn't really equality,
- * there could be multiple matches --- but we don't look for them,
- * both for speed and because the math wouldn't add up...
+ * Note we assume that each MCV will match at most one member of the
+ * other MCV list. If the operator isn't really equality, there could
+ * be multiple matches --- but we don't look for them, both for speed
+ * and because the math wouldn't add up...
*/
matchprodfreq = 0.0;
nmatches = 0;
@@ -1480,8 +1472,8 @@ eqjoinsel(PG_FUNCTION_ARGS)
pfree(hasmatch2);
/*
- * Compute total frequency of non-null values that are not in the
- * MCV lists.
+ * Compute total frequency of non-null values that are not in the MCV
+ * lists.
*/
otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
@@ -1491,10 +1483,10 @@ eqjoinsel(PG_FUNCTION_ARGS)
/*
* We can estimate the total selectivity from the point of view of
* relation 1 as: the known selectivity for matched MCVs, plus
- * unmatched MCVs that are assumed to match against random members
- * of relation 2's non-MCV population, plus non-MCV values that
- * are assumed to match against random members of relation 2's
- * unmatched MCVs plus non-MCV values.
+ * unmatched MCVs that are assumed to match against random members of
+ * relation 2's non-MCV population, plus non-MCV values that are
+ * assumed to match against random members of relation 2's unmatched
+ * MCVs plus non-MCV values.
*/
totalsel1 = matchprodfreq;
if (nd2 > nvalues2)
@@ -1512,9 +1504,9 @@ eqjoinsel(PG_FUNCTION_ARGS)
/*
* Use the smaller of the two estimates. This can be justified in
- * essentially the same terms as given below for the no-stats
- * case: to a first approximation, we are estimating from the
- * point of view of the relation with smaller nd.
+ * essentially the same terms as given below for the no-stats case: to
+ * a first approximation, we are estimating from the point of view of
+ * the relation with smaller nd.
*/
selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
}
@@ -1522,24 +1514,23 @@ eqjoinsel(PG_FUNCTION_ARGS)
{
/*
* We do not have MCV lists for both sides. Estimate the join
- * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2).
- * This is plausible if we assume that the join operator is strict
- * and the non-null values are about equally distributed: a given
- * non-null tuple of rel1 will join to either zero or
- * N2*(1-nullfrac2)/nd2 rows of rel2, so total join rows are at
- * most N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join
- * selectivity of not more than (1-nullfrac1)*(1-nullfrac2)/nd2.
- * By the same logic it is not more than
- * (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression with MIN()
- * is an upper bound. Using the MIN() means we estimate from the
- * point of view of the relation with smaller nd (since the larger
- * nd is determining the MIN). It is reasonable to assume that
- * most tuples in this rel will have join partners, so the bound
- * is probably reasonably tight and should be taken as-is.
+ * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This
+ * is plausible if we assume that the join operator is strict and the
+ * non-null values are about equally distributed: a given non-null
+ * tuple of rel1 will join to either zero or N2*(1-nullfrac2)/nd2 rows
+ * of rel2, so total join rows are at most
+ * N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join selectivity of
+ * not more than (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it
+ * is not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression
+ * with MIN() is an upper bound. Using the MIN() means we estimate
+ * from the point of view of the relation with smaller nd (since the
+ * larger nd is determining the MIN). It is reasonable to assume that
+ * most tuples in this rel will have join partners, so the bound is
+ * probably reasonably tight and should be taken as-is.
*
* XXX Can we be smarter if we have an MCV list for just one side? It
- * seems that if we assume equal distribution for the other side,
- * we end up with the same answer anyway.
+ * seems that if we assume equal distribution for the other side, we
+ * end up with the same answer anyway.
*/
double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
double nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
@@ -1588,9 +1579,9 @@ neqjoinsel(PG_FUNCTION_ARGS)
{
result = DatumGetFloat8(DirectFunctionCall4(eqjoinsel,
PointerGetDatum(root),
- ObjectIdGetDatum(eqop),
+ ObjectIdGetDatum(eqop),
PointerGetDatum(args),
- Int16GetDatum(jointype)));
+ Int16GetDatum(jointype)));
}
else
{
@@ -1812,10 +1803,10 @@ mergejoinscansel(PlannerInfo *root, Node *clause,
*rightscan = selec;
/*
- * Only one of the two fractions can really be less than 1.0; believe
- * the smaller estimate and reset the other one to exactly 1.0. If we
- * get exactly equal estimates (as can easily happen with self-joins),
- * believe neither.
+ * Only one of the two fractions can really be less than 1.0; believe the
+ * smaller estimate and reset the other one to exactly 1.0. If we get
+ * exactly equal estimates (as can easily happen with self-joins), believe
+ * neither.
*/
if (*leftscan > *rightscan)
*leftscan = 1.0;
@@ -1837,9 +1828,9 @@ fail:
*/
typedef struct
{
- Node *var; /* might be an expression, not just a Var */
- RelOptInfo *rel; /* relation it belongs to */
- double ndistinct; /* # distinct values */
+ Node *var; /* might be an expression, not just a Var */
+ RelOptInfo *rel; /* relation it belongs to */
+ double ndistinct; /* # distinct values */
} GroupVarInfo;
static List *
@@ -1999,9 +1990,9 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
/*
* If we find any variable-free GROUP BY item, then either it is a
- * constant (and we can ignore it) or it contains a volatile
- * function; in the latter case we punt and assume that each input
- * row will yield a distinct group.
+ * constant (and we can ignore it) or it contains a volatile function;
+ * in the latter case we punt and assume that each input row will
+ * yield a distinct group.
*/
if (varshere == NIL)
{
@@ -2031,9 +2022,9 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
* Steps 3/4: group Vars by relation and estimate total numdistinct.
*
* For each iteration of the outer loop, we process the frontmost Var in
- * varinfos, plus all other Vars in the same relation. We remove
- * these Vars from the newvarinfos list for the next iteration. This
- * is the easiest way to group Vars of same rel together.
+ * varinfos, plus all other Vars in the same relation. We remove these
+ * Vars from the newvarinfos list for the next iteration. This is the
+ * easiest way to group Vars of same rel together.
*/
numdistinct = 1.0;
@@ -2075,11 +2066,11 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
if (rel->tuples > 0)
{
/*
- * Clamp to size of rel, or size of rel / 10 if multiple Vars.
- * The fudge factor is because the Vars are probably correlated
- * but we don't know by how much. We should never clamp to less
- * than the largest ndistinct value for any of the Vars, though,
- * since there will surely be at least that many groups.
+ * Clamp to size of rel, or size of rel / 10 if multiple Vars. The
+ * fudge factor is because the Vars are probably correlated but we
+ * don't know by how much. We should never clamp to less than the
+ * largest ndistinct value for any of the Vars, though, since
+ * there will surely be at least that many groups.
*/
double clamp = rel->tuples;
@@ -2179,8 +2170,8 @@ estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey, double nbuckets)
else
{
/*
- * Believe a default ndistinct only if it came from stats.
- * Otherwise punt and return 0.1, per comments above.
+ * Believe a default ndistinct only if it came from stats. Otherwise
+ * punt and return 0.1, per comments above.
*/
if (ndistinct == DEFAULT_NUM_DISTINCT)
{
@@ -2195,21 +2186,20 @@ estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey, double nbuckets)
avgfreq = (1.0 - stanullfrac) / ndistinct;
/*
- * Adjust ndistinct to account for restriction clauses. Observe we
- * are assuming that the data distribution is affected uniformly by
- * the restriction clauses!
+ * Adjust ndistinct to account for restriction clauses. Observe we are
+ * assuming that the data distribution is affected uniformly by the
+ * restriction clauses!
*
- * XXX Possibly better way, but much more expensive: multiply by
- * selectivity of rel's restriction clauses that mention the target
- * Var.
+ * XXX Possibly better way, but much more expensive: multiply by selectivity
+ * of rel's restriction clauses that mention the target Var.
*/
if (vardata.rel)
ndistinct *= vardata.rel->rows / vardata.rel->tuples;
/*
- * Initial estimate of bucketsize fraction is 1/nbuckets as long as
- * the number of buckets is less than the expected number of distinct
- * values; otherwise it is 1/ndistinct.
+ * Initial estimate of bucketsize fraction is 1/nbuckets as long as the
+ * number of buckets is less than the expected number of distinct values;
+ * otherwise it is 1/ndistinct.
*/
if (ndistinct > nbuckets)
estfract = 1.0 / nbuckets;
@@ -2239,16 +2229,15 @@ estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey, double nbuckets)
}
/*
- * Adjust estimated bucketsize upward to account for skewed
- * distribution.
+ * Adjust estimated bucketsize upward to account for skewed distribution.
*/
if (avgfreq > 0.0 && mcvfreq > avgfreq)
estfract *= mcvfreq / avgfreq;
/*
* Clamp bucketsize to sane range (the above adjustment could easily
- * produce an out-of-range result). We set the lower bound a little
- * above zero, since zero isn't a very sane result.
+ * produce an out-of-range result). We set the lower bound a little above
+ * zero, since zero isn't a very sane result.
*/
if (estfract < 1.0e-6)
estfract = 1.0e-6;
@@ -2303,18 +2292,18 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
double *scaledlobound, double *scaledhibound)
{
/*
- * Both the valuetypid and the boundstypid should exactly match
- * the declared input type(s) of the operator we are invoked for,
- * so we just error out if either is not recognized.
+ * Both the valuetypid and the boundstypid should exactly match the
+ * declared input type(s) of the operator we are invoked for, so we just
+ * error out if either is not recognized.
*
- * XXX The histogram we are interpolating between points of could belong
- * to a column that's only binary-compatible with the declared type.
- * In essence we are assuming that the semantics of binary-compatible
- * types are enough alike that we can use a histogram generated with one
- * type's operators to estimate selectivity for the other's. This is
- * outright wrong in some cases --- in particular signed versus unsigned
+ * XXX The histogram we are interpolating between points of could belong to a
+ * column that's only binary-compatible with the declared type. In essence
+ * we are assuming that the semantics of binary-compatible types are
+ * enough alike that we can use a histogram generated with one type's
+ * operators to estimate selectivity for the other's. This is outright
+ * wrong in some cases --- in particular signed versus unsigned
* interpretation could trip us up. But it's useful enough in the
- * majority of cases that we do it anyway. Should think about more
+ * majority of cases that we do it anyway. Should think about more
* rigorous ways to do it.
*/
switch (valuetypid)
@@ -2350,9 +2339,9 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
case TEXTOID:
case NAMEOID:
{
- char *valstr = convert_string_datum(value, valuetypid);
- char *lostr = convert_string_datum(lobound, boundstypid);
- char *histr = convert_string_datum(hibound, boundstypid);
+ char *valstr = convert_string_datum(value, valuetypid);
+ char *lostr = convert_string_datum(lobound, boundstypid);
+ char *histr = convert_string_datum(hibound, boundstypid);
convert_string_to_scalar(valstr, scaledvalue,
lostr, scaledlobound,
@@ -2444,8 +2433,8 @@ convert_numeric_to_scalar(Datum value, Oid typid)
}
/*
- * Can't get here unless someone tries to use scalarltsel/scalargtsel
- * on an operator with one numeric and one non-numeric operand.
+ * Can't get here unless someone tries to use scalarltsel/scalargtsel on
+ * an operator with one numeric and one non-numeric operand.
*/
elog(ERROR, "unsupported type: %u", typid);
return 0;
@@ -2563,8 +2552,7 @@ convert_one_string_to_scalar(char *value, int rangelo, int rangehi)
return 0.0; /* empty string has scalar value 0 */
/*
- * Since base is at least 10, need not consider more than about 20
- * chars
+ * Since base is at least 10, need not consider more than about 20 chars
*/
if (slen > 20)
slen = 20;
@@ -2628,8 +2616,8 @@ convert_string_datum(Datum value, Oid typid)
default:
/*
- * Can't get here unless someone tries to use scalarltsel on
- * an operator with one string and one non-string operand.
+ * Can't get here unless someone tries to use scalarltsel on an
+ * operator with one string and one non-string operand.
*/
elog(ERROR, "unsupported type: %u", typid);
return NULL;
@@ -2642,16 +2630,16 @@ convert_string_datum(Datum value, Oid typid)
size_t xfrmlen2;
/*
- * Note: originally we guessed at a suitable output buffer size,
- * and only needed to call strxfrm twice if our guess was too
- * small. However, it seems that some versions of Solaris have
- * buggy strxfrm that can write past the specified buffer length
- * in that scenario. So, do it the dumb way for portability.
+ * Note: originally we guessed at a suitable output buffer size, and
+ * only needed to call strxfrm twice if our guess was too small.
+ * However, it seems that some versions of Solaris have buggy strxfrm
+ * that can write past the specified buffer length in that scenario.
+ * So, do it the dumb way for portability.
*
- * Yet other systems (e.g., glibc) sometimes return a smaller value
- * from the second call than the first; thus the Assert must be <=
- * not == as you'd expect. Can't any of these people program
- * their way out of a paper bag?
+ * Yet other systems (e.g., glibc) sometimes return a smaller value from
+ * the second call than the first; thus the Assert must be <= not ==
+ * as you'd expect. Can't any of these people program their way out
+ * of a paper bag?
*/
xfrmlen = strxfrm(NULL, val, 0);
xfrmstr = (char *) palloc(xfrmlen + 1);
@@ -2780,16 +2768,16 @@ convert_timevalue_to_scalar(Datum value, Oid typid)
Interval *interval = DatumGetIntervalP(value);
/*
- * Convert the month part of Interval to days using
- * assumed average month length of 365.25/12.0 days. Not
- * too accurate, but plenty good enough for our purposes.
+ * Convert the month part of Interval to days using assumed
+ * average month length of 365.25/12.0 days. Not too
+ * accurate, but plenty good enough for our purposes.
*/
#ifdef HAVE_INT64_TIMESTAMP
- return interval->time + interval->day * (double)USECS_PER_DAY +
- interval->month * ((DAYS_PER_YEAR / (double)MONTHS_PER_YEAR) * USECS_PER_DAY);
+ return interval->time + interval->day * (double) USECS_PER_DAY +
+ interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * USECS_PER_DAY);
#else
return interval->time + interval->day * SECS_PER_DAY +
- interval->month * ((DAYS_PER_YEAR / (double)MONTHS_PER_YEAR) * (double)SECS_PER_DAY);
+ interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * (double) SECS_PER_DAY);
#endif
}
case RELTIMEOID:
@@ -2827,8 +2815,8 @@ convert_timevalue_to_scalar(Datum value, Oid typid)
}
/*
- * Can't get here unless someone tries to use scalarltsel/scalargtsel
- * on an operator with one timevalue and one non-timevalue operand.
+ * Can't get here unless someone tries to use scalarltsel/scalargtsel on
+ * an operator with one timevalue and one non-timevalue operand.
*/
elog(ERROR, "unsupported type: %u", typid);
return 0;
@@ -2875,8 +2863,8 @@ get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
right = (Node *) lsecond(args);
/*
- * Examine both sides. Note that when varRelid is nonzero, Vars of
- * other relations will be treated as pseudoconstants.
+ * Examine both sides. Note that when varRelid is nonzero, Vars of other
+ * relations will be treated as pseudoconstants.
*/
examine_variable(root, left, varRelid, vardata);
examine_variable(root, right, varRelid, &rdata);
@@ -2995,18 +2983,18 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
{
vardata->statsTuple = SearchSysCache(STATRELATT,
ObjectIdGetDatum(relid),
- Int16GetDatum(var->varattno),
+ Int16GetDatum(var->varattno),
0, 0);
}
else
{
/*
- * XXX This means the Var comes from a JOIN or sub-SELECT.
- * Later add code to dig down into the join etc and see if we
- * can trace the variable to something with stats. (But
- * beware of sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps
- * there are no cases where this would really be useful,
- * because we'd have flattened the subselect if it is??)
+ * XXX This means the Var comes from a JOIN or sub-SELECT. Later
+ * add code to dig down into the join etc and see if we can trace
+ * the variable to something with stats. (But beware of
+ * sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps there are no
+ * cases where this would really be useful, because we'd have
+ * flattened the subselect if it is??)
*/
}
@@ -3031,9 +3019,9 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
if (varRelid == 0 || bms_is_member(varRelid, varnos))
{
onerel = find_base_rel(root,
- (varRelid ? varRelid : bms_singleton_member(varnos)));
+ (varRelid ? varRelid : bms_singleton_member(varnos)));
vardata->rel = onerel;
- node = basenode; /* strip any relabeling */
+ node = basenode; /* strip any relabeling */
}
/* else treat it as a constant */
break;
@@ -3042,13 +3030,13 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
{
/* treat it as a variable of a join relation */
vardata->rel = find_join_rel(root, varnos);
- node = basenode; /* strip any relabeling */
+ node = basenode; /* strip any relabeling */
}
else if (bms_is_member(varRelid, varnos))
{
/* ignore the vars belonging to other relations */
vardata->rel = find_base_rel(root, varRelid);
- node = basenode; /* strip any relabeling */
+ node = basenode; /* strip any relabeling */
/* note: no point in expressional-index search here */
}
/* else treat it as a constant */
@@ -3064,13 +3052,13 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
if (onerel)
{
/*
- * We have an expression in vars of a single relation. Try to
- * match it to expressional index columns, in hopes of finding
- * some statistics.
+ * We have an expression in vars of a single relation. Try to match
+ * it to expressional index columns, in hopes of finding some
+ * statistics.
*
- * XXX it's conceivable that there are multiple matches with
- * different index opclasses; if so, we need to pick one that
- * matches the operator we are estimating for. FIXME later.
+ * XXX it's conceivable that there are multiple matches with different
+ * index opclasses; if so, we need to pick one that matches the
+ * operator we are estimating for. FIXME later.
*/
ListCell *ilist;
@@ -3105,8 +3093,8 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
if (equal(node, indexkey))
{
/*
- * Found a match ... is it a unique index? Tests
- * here should match has_unique_index().
+ * Found a match ... is it a unique index? Tests here
+ * should match has_unique_index().
*/
if (index->unique &&
index->ncolumns == 1 &&
@@ -3114,8 +3102,8 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
vardata->isunique = true;
/* Has it got stats? */
vardata->statsTuple = SearchSysCache(STATRELATT,
- ObjectIdGetDatum(index->indexoid),
- Int16GetDatum(pos + 1),
+ ObjectIdGetDatum(index->indexoid),
+ Int16GetDatum(pos + 1),
0, 0);
if (vardata->statsTuple)
break;
@@ -3145,9 +3133,9 @@ get_variable_numdistinct(VariableStatData *vardata)
double ntuples;
/*
- * Determine the stadistinct value to use. There are cases where we
- * can get an estimate even without a pg_statistic entry, or can get a
- * better value than is in pg_statistic.
+ * Determine the stadistinct value to use. There are cases where we can
+ * get an estimate even without a pg_statistic entry, or can get a better
+ * value than is in pg_statistic.
*/
if (HeapTupleIsValid(vardata->statsTuple))
{
@@ -3162,16 +3150,15 @@ get_variable_numdistinct(VariableStatData *vardata)
/*
* Special-case boolean columns: presumably, two distinct values.
*
- * Are there any other datatypes we should wire in special estimates
- * for?
+ * Are there any other datatypes we should wire in special estimates for?
*/
stadistinct = 2.0;
}
else
{
/*
- * We don't keep statistics for system columns, but in some cases
- * we can infer distinctness anyway.
+ * We don't keep statistics for system columns, but in some cases we
+ * can infer distinctness anyway.
*/
if (vardata->var && IsA(vardata->var, Var))
{
@@ -3199,8 +3186,8 @@ get_variable_numdistinct(VariableStatData *vardata)
/*
* If there is a unique index for the variable, assume it is unique no
- * matter what pg_statistic says (the statistics could be out of
- * date). Can skip search if we already think it's unique.
+ * matter what pg_statistic says (the statistics could be out of date).
+ * Can skip search if we already think it's unique.
*/
if (stadistinct != -1.0)
{
@@ -3235,8 +3222,8 @@ get_variable_numdistinct(VariableStatData *vardata)
return floor((-stadistinct * ntuples) + 0.5);
/*
- * With no data, estimate ndistinct = ntuples if the table is small,
- * else use default.
+ * With no data, estimate ndistinct = ntuples if the table is small, else
+ * use default.
*/
if (ntuples < DEFAULT_NUM_DISTINCT)
return ntuples;
@@ -3276,12 +3263,10 @@ get_variable_maximum(PlannerInfo *root, VariableStatData *vardata,
get_typlenbyval(vardata->atttype, &typLen, &typByVal);
/*
- * If there is a histogram, grab the last or first value as
- * appropriate.
+ * If there is a histogram, grab the last or first value as appropriate.
*
- * If there is a histogram that is sorted with some other operator than
- * the one we want, fail --- this suggests that there is data we can't
- * use.
+ * If there is a histogram that is sorted with some other operator than the
+ * one we want, fail --- this suggests that there is data we can't use.
*/
if (get_attstatsslot(vardata->statsTuple,
vardata->atttype, vardata->atttypmod,
@@ -3327,9 +3312,9 @@ get_variable_maximum(PlannerInfo *root, VariableStatData *vardata,
/*
* If we have most-common-values info, look for a large MCV. This is
- * needed even if we also have a histogram, since the histogram
- * excludes the MCVs. However, usually the MCVs will not be the
- * extreme values, so avoid unnecessary data copying.
+ * needed even if we also have a histogram, since the histogram excludes
+ * the MCVs. However, usually the MCVs will not be the extreme values, so
+ * avoid unnecessary data copying.
*/
if (get_attstatsslot(vardata->statsTuple,
vardata->atttype, vardata->atttypmod,
@@ -3411,7 +3396,7 @@ like_fixed_prefix(Const *patt_const, bool case_insensitive,
if (typeid == BYTEAOID && case_insensitive)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("case insensitive matching not supported on type bytea")));
+ errmsg("case insensitive matching not supported on type bytea")));
if (typeid != BYTEAOID)
{
@@ -3453,16 +3438,16 @@ like_fixed_prefix(Const *patt_const, bool case_insensitive,
}
/*
- * XXX I suspect isalpha() is not an adequately locale-sensitive
- * test for characters that can vary under case folding?
+ * XXX I suspect isalpha() is not an adequately locale-sensitive test
+ * for characters that can vary under case folding?
*/
if (case_insensitive && isalpha((unsigned char) patt[pos]))
break;
/*
* NOTE: this code used to think that %% meant a literal %, but
- * textlike() itself does not think that, and the SQL92 spec
- * doesn't say any such thing either.
+ * textlike() itself does not think that, and the SQL92 spec doesn't
+ * say any such thing either.
*/
match[match_pos++] = patt[pos];
}
@@ -3487,8 +3472,7 @@ like_fixed_prefix(Const *patt_const, bool case_insensitive,
/* in LIKE, an empty pattern is an exact match! */
if (pos == pattlen)
- return Pattern_Prefix_Exact; /* reached end of pattern, so
- * exact */
+ return Pattern_Prefix_Exact; /* reached end of pattern, so exact */
if (match_pos > 0)
return Pattern_Prefix_Partial;
@@ -3511,14 +3495,14 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive,
Oid typeid = patt_const->consttype;
/*
- * Should be unnecessary, there are no bytea regex operators defined.
- * As such, it should be noted that the rest of this function has *not*
- * been made safe for binary (possibly NULL containing) strings.
+ * Should be unnecessary, there are no bytea regex operators defined. As
+ * such, it should be noted that the rest of this function has *not* been
+ * made safe for binary (possibly NULL containing) strings.
*/
if (typeid == BYTEAOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("regular-expression matching not supported on type bytea")));
+ errmsg("regular-expression matching not supported on type bytea")));
/* the right-hand const is type text for all of these */
patt = DatumGetCString(DirectFunctionCall1(textout, patt_const->constvalue));
@@ -3535,8 +3519,8 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive,
}
/*
- * If unquoted | is present at paren level 0 in pattern, then there
- * are multiple alternatives for the start of the string.
+ * If unquoted | is present at paren level 0 in pattern, then there are
+ * multiple alternatives for the start of the string.
*/
paren_depth = 0;
for (pos = 1; patt[pos]; pos++)
@@ -3568,15 +3552,14 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive,
prev_match_pos = match_pos = 0;
/* note start at pos 1 to skip leading ^ */
- for (prev_pos = pos = 1; patt[pos]; )
+ for (prev_pos = pos = 1; patt[pos];)
{
- int len;
+ int len;
/*
- * Check for characters that indicate multiple possible matches
- * here. XXX I suspect isalpha() is not an adequately
- * locale-sensitive test for characters that can vary under case
- * folding?
+ * Check for characters that indicate multiple possible matches here.
+ * XXX I suspect isalpha() is not an adequately locale-sensitive test
+ * for characters that can vary under case folding?
*/
if (patt[pos] == '.' ||
patt[pos] == '(' ||
@@ -3586,8 +3569,8 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive,
break;
/*
- * In AREs, backslash followed by alphanumeric is an escape, not
- * a quoted character. Must treat it as having multiple possible
+ * In AREs, backslash followed by alphanumeric is an escape, not a
+ * quoted character. Must treat it as having multiple possible
* matches.
*/
if (patt[pos] == '\\' && isalnum((unsigned char) patt[pos + 1]))
@@ -3595,8 +3578,7 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive,
/*
* Check for quantifiers. Except for +, this means the preceding
- * character is optional, so we must remove it from the prefix
- * too!
+ * character is optional, so we must remove it from the prefix too!
*/
if (patt[pos] == '*' ||
patt[pos] == '?' ||
@@ -3716,8 +3698,8 @@ prefix_selectivity(PlannerInfo *root, Node *variable,
/* Assume scalargtsel is appropriate for all supported types */
prefixsel = DatumGetFloat8(DirectFunctionCall4(scalargtsel,
PointerGetDatum(root),
- ObjectIdGetDatum(cmpopr),
- PointerGetDatum(cmpargs),
+ ObjectIdGetDatum(cmpopr),
+ PointerGetDatum(cmpargs),
Int32GetDatum(0)));
/*-------
@@ -3738,13 +3720,13 @@ prefix_selectivity(PlannerInfo *root, Node *variable,
/* Assume scalarltsel is appropriate for all supported types */
topsel = DatumGetFloat8(DirectFunctionCall4(scalarltsel,
PointerGetDatum(root),
- ObjectIdGetDatum(cmpopr),
- PointerGetDatum(cmpargs),
+ ObjectIdGetDatum(cmpopr),
+ PointerGetDatum(cmpargs),
Int32GetDatum(0)));
/*
- * Merge the two selectivities in the same way as for a range
- * query (see clauselist_selectivity()).
+ * Merge the two selectivities in the same way as for a range query
+ * (see clauselist_selectivity()).
*/
prefixsel = topsel + prefixsel - 1.0;
@@ -3752,21 +3734,20 @@ prefix_selectivity(PlannerInfo *root, Node *variable,
prefixsel += nulltestsel(root, IS_NULL, variable, 0);
/*
- * A zero or slightly negative prefixsel should be converted into
- * a small positive value; we probably are dealing with a very
- * tight range and got a bogus result due to roundoff errors.
- * However, if prefixsel is very negative, then we probably have
- * default selectivity estimates on one or both sides of the
- * range. In that case, insert a not-so-wildly-optimistic default
- * estimate.
+ * A zero or slightly negative prefixsel should be converted into a
+ * small positive value; we probably are dealing with a very tight
+ * range and got a bogus result due to roundoff errors. However, if
+ * prefixsel is very negative, then we probably have default
+ * selectivity estimates on one or both sides of the range. In that
+ * case, insert a not-so-wildly-optimistic default estimate.
*/
if (prefixsel <= 0.0)
{
if (prefixsel < -0.01)
{
/*
- * No data available --- use a default estimate that is
- * small, but not real small.
+ * No data available --- use a default estimate that is small,
+ * but not real small.
*/
prefixsel = 0.005;
}
@@ -3795,8 +3776,7 @@ prefix_selectivity(PlannerInfo *root, Node *variable,
#define FIXED_CHAR_SEL 0.20 /* about 1/5 */
#define CHAR_RANGE_SEL 0.25
-#define ANY_CHAR_SEL 0.9 /* not 1, since it won't match
- * end-of-string */
+#define ANY_CHAR_SEL 0.9 /* not 1, since it won't match end-of-string */
#define FULL_WILDCARD_SEL 5.0
#define PARTIAL_WILDCARD_SEL 2.0
@@ -3816,7 +3796,7 @@ like_selectivity(Const *patt_const, bool case_insensitive)
if (typeid == BYTEAOID && case_insensitive)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("case insensitive matching not supported on type bytea")));
+ errmsg("case insensitive matching not supported on type bytea")));
if (typeid != BYTEAOID)
{
@@ -3895,8 +3875,8 @@ regex_selectivity_sub(char *patt, int pattlen, bool case_insensitive)
else if (patt[pos] == '|' && paren_depth == 0)
{
/*
- * If unquoted | is present at paren level 0 in pattern, we
- * have multiple alternatives; sum their probabilities.
+ * If unquoted | is present at paren level 0 in pattern, we have
+ * multiple alternatives; sum their probabilities.
*/
sel += regex_selectivity_sub(patt + (pos + 1),
pattlen - (pos + 1),
@@ -3970,14 +3950,14 @@ regex_selectivity(Const *patt_const, bool case_insensitive)
Oid typeid = patt_const->consttype;
/*
- * Should be unnecessary, there are no bytea regex operators defined.
- * As such, it should be noted that the rest of this function has *not*
- * been made safe for binary (possibly NULL containing) strings.
+ * Should be unnecessary, there are no bytea regex operators defined. As
+ * such, it should be noted that the rest of this function has *not* been
+ * made safe for binary (possibly NULL containing) strings.
*/
if (typeid == BYTEAOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("regular-expression matching not supported on type bytea")));
+ errmsg("regular-expression matching not supported on type bytea")));
/* the right-hand const is type text for all of these */
patt = DatumGetCString(DirectFunctionCall1(textout, patt_const->constvalue));
@@ -4062,7 +4042,7 @@ make_greater_string(const Const *str_const)
if (datatype == NAMEOID)
{
workstr = DatumGetCString(DirectFunctionCall1(nameout,
- str_const->constvalue));
+ str_const->constvalue));
len = strlen(workstr);
}
else if (datatype == BYTEAOID)
@@ -4084,7 +4064,7 @@ make_greater_string(const Const *str_const)
else
{
workstr = DatumGetCString(DirectFunctionCall1(textout,
- str_const->constvalue));
+ str_const->constvalue));
len = strlen(workstr);
}
@@ -4120,8 +4100,8 @@ make_greater_string(const Const *str_const)
*lastchar = savelastchar;
/*
- * Truncate off the last character, which might be more than 1
- * byte, depending on the character encoding.
+ * Truncate off the last character, which might be more than 1 byte,
+ * depending on the character encoding.
*/
if (datatype != BYTEAOID && pg_database_encoding_max_length() > 1)
len = pg_mbcliplen(workstr, len, len - 1);
@@ -4221,27 +4201,27 @@ genericcostestimate(PlannerInfo *root,
List *selectivityQuals;
/*
- * If the index is partial, AND the index predicate with the
- * explicitly given indexquals to produce a more accurate idea of the
- * index selectivity. This may produce redundant clauses. We get rid
- * of exact duplicates in the code below. We expect that most cases
- * of partial redundancy (such as "x < 4" from the qual and "x < 5"
- * from the predicate) will be recognized and handled correctly by
- * clauselist_selectivity(). This assumption is somewhat fragile,
- * since it depends on predicate_implied_by() and clauselist_selectivity()
+ * If the index is partial, AND the index predicate with the explicitly
+ * given indexquals to produce a more accurate idea of the index
+ * selectivity. This may produce redundant clauses. We get rid of exact
+ * duplicates in the code below. We expect that most cases of partial
+ * redundancy (such as "x < 4" from the qual and "x < 5" from the
+ * predicate) will be recognized and handled correctly by
+ * clauselist_selectivity(). This assumption is somewhat fragile, since
+ * it depends on predicate_implied_by() and clauselist_selectivity()
* having similar capabilities, and there are certainly many cases where
- * we will end up with a too-low selectivity estimate. This will bias the
+ * we will end up with a too-low selectivity estimate. This will bias the
* system in favor of using partial indexes where possible, which is not
* necessarily a bad thing. But it'd be nice to do better someday.
*
- * Note that index->indpred and indexQuals are both in implicit-AND form,
- * so ANDing them together just takes merging the lists. However,
- * eliminating duplicates is a bit trickier because indexQuals
- * contains RestrictInfo nodes and the indpred does not. It is okay
- * to pass a mixed list to clauselist_selectivity, but we have to work
- * a bit to generate a list without logical duplicates. (We could
- * just list_union indpred and strippedQuals, but then we'd not get
- * caching of per-qual selectivity estimates.)
+ * Note that index->indpred and indexQuals are both in implicit-AND form, so
+ * ANDing them together just takes merging the lists. However,
+ * eliminating duplicates is a bit trickier because indexQuals contains
+ * RestrictInfo nodes and the indpred does not. It is okay to pass a
+ * mixed list to clauselist_selectivity, but we have to work a bit to
+ * generate a list without logical duplicates. (We could just list_union
+ * indpred and strippedQuals, but then we'd not get caching of per-qual
+ * selectivity estimates.)
*/
if (index->indpred != NIL)
{
@@ -4269,8 +4249,8 @@ genericcostestimate(PlannerInfo *root,
numIndexTuples = *indexSelectivity * index->rel->tuples;
/*
- * We can bound the number of tuples by the index size in any case.
- * Also, always estimate at least one tuple is touched, even when
+ * We can bound the number of tuples by the index size in any case. Also,
+ * always estimate at least one tuple is touched, even when
* indexSelectivity estimate is tiny.
*/
if (numIndexTuples > index->tuples)
@@ -4281,9 +4261,9 @@ genericcostestimate(PlannerInfo *root,
/*
* Estimate the number of index pages that will be retrieved.
*
- * For all currently-supported index types, the first page of the index
- * is a metadata page, and we should figure on fetching that plus a
- * pro-rated fraction of the remaining pages.
+ * For all currently-supported index types, the first page of the index is a
+ * metadata page, and we should figure on fetching that plus a pro-rated
+ * fraction of the remaining pages.
*/
if (index->pages > 1 && index->tuples > 0)
{
@@ -4304,15 +4284,15 @@ genericcostestimate(PlannerInfo *root,
/*
* CPU cost: any complex expressions in the indexquals will need to be
- * evaluated once at the start of the scan to reduce them to runtime
- * keys to pass to the index AM (see nodeIndexscan.c). We model the
- * per-tuple CPU costs as cpu_index_tuple_cost plus one
- * cpu_operator_cost per indexqual operator.
+ * evaluated once at the start of the scan to reduce them to runtime keys
+ * to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
+ * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
+ * indexqual operator.
*
- * Note: this neglects the possible costs of rechecking lossy operators
- * and OR-clause expressions. Detecting that that might be needed
- * seems more expensive than it's worth, though, considering all the
- * other inaccuracies here ...
+ * Note: this neglects the possible costs of rechecking lossy operators and
+ * OR-clause expressions. Detecting that that might be needed seems more
+ * expensive than it's worth, though, considering all the other
+ * inaccuracies here ...
*/
cost_qual_eval(&index_qual_cost, indexQuals);
qual_op_cost = cpu_operator_cost * list_length(indexQuals);
@@ -4351,15 +4331,14 @@ btcostestimate(PG_FUNCTION_ARGS)
ListCell *l;
/*
- * For a btree scan, only leading '=' quals plus inequality quals
- * for the immediately next attribute contribute to index selectivity
- * (these are the "boundary quals" that determine the starting and
- * stopping points of the index scan). Additional quals can suppress
- * visits to the heap, so it's OK to count them in indexSelectivity,
- * but they should not count for estimating numIndexTuples. So we must
- * examine the given indexQuals to find out which ones count as boundary
- * quals. We rely on the knowledge that they are given in index column
- * order.
+ * For a btree scan, only leading '=' quals plus inequality quals for the
+ * immediately next attribute contribute to index selectivity (these are
+ * the "boundary quals" that determine the starting and stopping points of
+ * the index scan). Additional quals can suppress visits to the heap, so
+ * it's OK to count them in indexSelectivity, but they should not count
+ * for estimating numIndexTuples. So we must examine the given indexQuals
+ * to find out which ones count as boundary quals. We rely on the
+ * knowledge that they are given in index column order.
*/
indexBoundQuals = NIL;
indexcol = 0;
@@ -4367,9 +4346,9 @@ btcostestimate(PG_FUNCTION_ARGS)
foreach(l, indexQuals)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
- Expr *clause;
- Oid clause_op;
- int op_strategy;
+ Expr *clause;
+ Oid clause_op;
+ int op_strategy;
Assert(IsA(rinfo, RestrictInfo));
clause = rinfo->clause;
@@ -4409,15 +4388,15 @@ btcostestimate(PG_FUNCTION_ARGS)
}
op_strategy = get_op_opclass_strategy(clause_op,
index->classlist[indexcol]);
- Assert(op_strategy != 0); /* not a member of opclass?? */
+ Assert(op_strategy != 0); /* not a member of opclass?? */
if (op_strategy == BTEqualStrategyNumber)
eqQualHere = true;
indexBoundQuals = lappend(indexBoundQuals, rinfo);
}
/*
- * If index is unique and we found an '=' clause for each column,
- * we can just assume numIndexTuples = 1 and skip the expensive
+ * If index is unique and we found an '=' clause for each column, we can
+ * just assume numIndexTuples = 1 and skip the expensive
* clauselist_selectivity calculations.
*/
if (index->unique && indexcol == index->ncolumns - 1 && eqQualHere)
@@ -4437,13 +4416,12 @@ btcostestimate(PG_FUNCTION_ARGS)
indexSelectivity, indexCorrelation);
/*
- * If we can get an estimate of the first column's ordering
- * correlation C from pg_statistic, estimate the index correlation as
- * C for a single-column index, or C * 0.75 for multiple columns.
- * (The idea here is that multiple columns dilute the importance of
- * the first column's ordering, but don't negate it entirely. Before
- * 8.0 we divided the correlation by the number of columns, but that
- * seems too strong.)
+ * If we can get an estimate of the first column's ordering correlation C
+ * from pg_statistic, estimate the index correlation as C for a
+ * single-column index, or C * 0.75 for multiple columns. (The idea here
+ * is that multiple columns dilute the importance of the first column's
+ * ordering, but don't negate it entirely. Before 8.0 we divided the
+ * correlation by the number of columns, but that seems too strong.)
*/
if (index->indexkeys[0] != 0)
{
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 73e7bb8ea8a..d3090413c4e 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.154 2005/10/09 17:21:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.155 2005/10/15 02:49:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -116,7 +116,7 @@ timestamp_in(PG_FUNCTION_ARGS)
case DTK_INVALID:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"%s\" is no longer supported", str)));
+ errmsg("date/time value \"%s\" is no longer supported", str)));
TIMESTAMP_NOEND(result);
break;
@@ -138,7 +138,7 @@ timestamp_in(PG_FUNCTION_ARGS)
Datum
timestamp_out(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
char *result;
struct pg_tm tt,
*tm = &tt;
@@ -169,11 +169,12 @@ Datum
timestamp_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
int32 typmod = PG_GETARG_INT32(2);
- Timestamp timestamp;
+ Timestamp timestamp;
struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
@@ -203,7 +204,7 @@ timestamp_recv(PG_FUNCTION_ARGS)
Datum
timestamp_send(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
StringInfoData buf;
pq_begintypsend(&buf);
@@ -223,7 +224,7 @@ timestamp_send(PG_FUNCTION_ARGS)
Datum
timestamp_scale(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
int32 typmod = PG_GETARG_INT32(1);
Timestamp result;
@@ -257,7 +258,6 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod)
INT64CONST(5),
INT64CONST(0)
};
-
#else
static const double TimestampScales[MAX_TIMESTAMP_PRECISION + 1] = {
1,
@@ -276,21 +276,21 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod)
if (typmod < 0 || typmod > MAX_TIMESTAMP_PRECISION)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("timestamp(%d) precision must be between %d and %d",
- typmod, 0, MAX_TIMESTAMP_PRECISION)));
+ errmsg("timestamp(%d) precision must be between %d and %d",
+ typmod, 0, MAX_TIMESTAMP_PRECISION)));
/*
- * Note: this round-to-nearest code is not completely consistent
- * about rounding values that are exactly halfway between integral
- * values. On most platforms, rint() will implement
- * round-to-nearest-even, but the integer code always rounds up
- * (away from zero). Is it worth trying to be consistent?
+ * Note: this round-to-nearest code is not completely consistent about
+ * rounding values that are exactly halfway between integral values.
+ * On most platforms, rint() will implement round-to-nearest-even, but
+ * the integer code always rounds up (away from zero). Is it worth
+ * trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
if (*time >= INT64CONST(0))
{
*time = ((*time + TimestampOffsets[typmod]) / TimestampScales[typmod]) *
- TimestampScales[typmod];
+ TimestampScales[typmod];
}
else
{
@@ -298,7 +298,7 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod)
* TimestampScales[typmod]);
}
#else
- *time = rint((double)*time * TimestampScales[typmod]) / TimestampScales[typmod];
+ *time = rint((double) *time * TimestampScales[typmod]) / TimestampScales[typmod];
#endif
}
}
@@ -359,7 +359,7 @@ timestamptz_in(PG_FUNCTION_ARGS)
case DTK_INVALID:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"%s\" is no longer supported", str)));
+ errmsg("date/time value \"%s\" is no longer supported", str)));
TIMESTAMP_NOEND(result);
break;
@@ -413,6 +413,7 @@ Datum
timestamptz_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -540,7 +541,7 @@ interval_in(PG_FUNCTION_ARGS)
case DTK_INVALID:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"%s\" is no longer supported", str)));
+ errmsg("date/time value \"%s\" is no longer supported", str)));
break;
default:
@@ -583,6 +584,7 @@ Datum
interval_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -643,6 +645,7 @@ interval_scale(PG_FUNCTION_ARGS)
PG_RETURN_INTERVAL_P(result);
}
+
/*
* Adjust interval for specified precision, in both YEAR to SECOND
* range and sub-second precision.
@@ -670,7 +673,6 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
INT64CONST(5),
INT64CONST(0)
};
-
#else
static const double IntervalScales[MAX_INTERVAL_PRECISION + 1] = {
1,
@@ -684,8 +686,8 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
#endif
/*
- * Unspecified range and precision? Then not necessary to adjust.
- * Setting typmod to -1 is the convention for all types.
+ * Unspecified range and precision? Then not necessary to adjust. Setting
+ * typmod to -1 is the convention for all types.
*/
if (typmod != -1)
{
@@ -727,9 +729,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
#ifdef HAVE_INT64_TIMESTAMP
interval->time = (interval->time / USECS_PER_HOUR) *
- USECS_PER_HOUR;
+ USECS_PER_HOUR;
#else
- interval->time = ((int)(interval->time / SECS_PER_HOUR)) * (double)SECS_PER_HOUR;
+ interval->time = ((int) (interval->time / SECS_PER_HOUR)) * (double) SECS_PER_HOUR;
#endif
}
else if (range == INTERVAL_MASK(MINUTE))
@@ -747,10 +749,10 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
hour = interval->time / USECS_PER_HOUR;
interval->time -= hour * USECS_PER_HOUR;
interval->time = (interval->time / USECS_PER_MINUTE) *
- USECS_PER_MINUTE;
+ USECS_PER_MINUTE;
#else
- TMODULO(interval->time, hour, (double)SECS_PER_HOUR);
- interval->time = ((int)(interval->time / SECS_PER_MINUTE)) * (double)SECS_PER_MINUTE;
+ TMODULO(interval->time, hour, (double) SECS_PER_HOUR);
+ interval->time = ((int) (interval->time / SECS_PER_MINUTE)) * (double) SECS_PER_MINUTE;
#endif
}
else if (range == INTERVAL_MASK(SECOND))
@@ -768,7 +770,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
minute = interval->time / USECS_PER_MINUTE;
interval->time -= minute * USECS_PER_MINUTE;
#else
- TMODULO(interval->time, minute, (double)SECS_PER_MINUTE);
+ TMODULO(interval->time, minute, (double) SECS_PER_MINUTE);
/* return subseconds too */
#endif
}
@@ -780,9 +782,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
#ifdef HAVE_INT64_TIMESTAMP
interval->time = (interval->time / USECS_PER_HOUR) *
- USECS_PER_HOUR;
+ USECS_PER_HOUR;
#else
- interval->time = ((int) (interval->time / SECS_PER_HOUR)) * (double)SECS_PER_HOUR;
+ interval->time = ((int) (interval->time / SECS_PER_HOUR)) * (double) SECS_PER_HOUR;
#endif
}
/* DAY TO MINUTE */
@@ -794,9 +796,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
#ifdef HAVE_INT64_TIMESTAMP
interval->time = (interval->time / USECS_PER_MINUTE) *
- USECS_PER_MINUTE;
+ USECS_PER_MINUTE;
#else
- interval->time = ((int)(interval->time / SECS_PER_MINUTE)) * (double)SECS_PER_MINUTE;
+ interval->time = ((int) (interval->time / SECS_PER_MINUTE)) * (double) SECS_PER_MINUTE;
#endif
}
/* DAY TO SECOND */
@@ -815,9 +817,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
#ifdef HAVE_INT64_TIMESTAMP
interval->time = (interval->time / USECS_PER_MINUTE) *
- USECS_PER_MINUTE;
+ USECS_PER_MINUTE;
#else
- interval->time = ((int)(interval->time / SECS_PER_MINUTE)) * (double)SECS_PER_MINUTE;
+ interval->time = ((int) (interval->time / SECS_PER_MINUTE)) * (double) SECS_PER_MINUTE;
#endif
}
/* HOUR TO SECOND */
@@ -835,7 +837,6 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
{
#ifdef HAVE_INT64_TIMESTAMP
int64 hour;
-
#else
double hour;
#endif
@@ -847,7 +848,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
hour = interval->time / USECS_PER_HOUR;
interval->time -= hour * USECS_PER_HOUR;
#else
- TMODULO(interval->time, hour, (double)SECS_PER_HOUR);
+ TMODULO(interval->time, hour, (double) SECS_PER_HOUR);
#endif
}
else
@@ -859,36 +860,35 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
if (precision < 0 || precision > MAX_INTERVAL_PRECISION)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("interval(%d) precision must be between %d and %d",
- precision, 0, MAX_INTERVAL_PRECISION)));
+ errmsg("interval(%d) precision must be between %d and %d",
+ precision, 0, MAX_INTERVAL_PRECISION)));
/*
- * Note: this round-to-nearest code is not completely
- * consistent about rounding values that are exactly halfway
- * between integral values. On most platforms, rint() will
- * implement round-to-nearest-even, but the integer code
- * always rounds up (away from zero). Is it worth trying to
- * be consistent?
+ * Note: this round-to-nearest code is not completely consistent
+ * about rounding values that are exactly halfway between integral
+ * values. On most platforms, rint() will implement
+ * round-to-nearest-even, but the integer code always rounds up
+ * (away from zero). Is it worth trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
if (interval->time >= INT64CONST(0))
{
interval->time = ((interval->time +
- IntervalOffsets[precision]) /
- IntervalScales[precision]) *
- IntervalScales[precision];
+ IntervalOffsets[precision]) /
+ IntervalScales[precision]) *
+ IntervalScales[precision];
}
else
{
interval->time = -(((-interval->time +
- IntervalOffsets[precision]) /
+ IntervalOffsets[precision]) /
IntervalScales[precision]) *
- IntervalScales[precision]);
+ IntervalScales[precision]);
}
#else
interval->time = rint(((double) interval->time) *
- IntervalScales[precision]) /
- IntervalScales[precision];
+ IntervalScales[precision]) /
+ IntervalScales[precision];
#endif
}
}
@@ -1016,16 +1016,16 @@ dt2time(Timestamp jd, int *hour, int *min, int *sec, fsec_t *fsec)
* timezone) will be used.
*/
int
-timestamp2tm(Timestamp dt, int *tzp, struct pg_tm *tm, fsec_t *fsec, char **tzn, pg_tz *attimezone)
+timestamp2tm(Timestamp dt, int *tzp, struct pg_tm * tm, fsec_t *fsec, char **tzn, pg_tz *attimezone)
{
- Timestamp date;
+ Timestamp date;
Timestamp time;
pg_time_t utime;
/*
- * If HasCTZSet is true then we have a brute force time zone
- * specified. Go ahead and rotate to the local time zone since we will
- * later bypass any calls which adjust the tm fields.
+ * If HasCTZSet is true then we have a brute force time zone specified. Go
+ * ahead and rotate to the local time zone since we will later bypass any
+ * calls which adjust the tm fields.
*/
if (attimezone == NULL && HasCTZSet && tzp != NULL)
{
@@ -1057,7 +1057,7 @@ timestamp2tm(Timestamp dt, int *tzp, struct pg_tm *tm, fsec_t *fsec, char **tzn,
dt2time(time, &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec);
#else
time = dt;
- TMODULO(time, date, (double)SECS_PER_DAY);
+ TMODULO(time, date, (double) SECS_PER_DAY);
if (time < 0)
{
@@ -1082,7 +1082,7 @@ recalc_t:
if (*fsec >= 1.0)
{
time = ceil(time);
- if (time >= (double)SECS_PER_DAY)
+ if (time >= (double) SECS_PER_DAY)
{
time = 0;
date += 1;
@@ -1104,8 +1104,8 @@ recalc_t:
}
/*
- * We have a brute force time zone per SQL99? Then use it without
- * change since we have already rotated to the time zone.
+ * We have a brute force time zone per SQL99? Then use it without change
+ * since we have already rotated to the time zone.
*/
if (attimezone == NULL && HasCTZSet)
{
@@ -1119,14 +1119,14 @@ recalc_t:
}
/*
- * If the time falls within the range of pg_time_t, use pg_localtime()
- * to rotate to the local time zone.
+ * If the time falls within the range of pg_time_t, use pg_localtime() to
+ * rotate to the local time zone.
*
* First, convert to an integral timestamp, avoiding possibly
* platform-specific roundoff-in-wrong-direction errors, and adjust to
- * Unix epoch. Then see if we can convert to pg_time_t without loss.
- * This coding avoids hardwiring any assumptions about the width of
- * pg_time_t, so it should behave sanely on machines without int64.
+ * Unix epoch. Then see if we can convert to pg_time_t without loss. This
+ * coding avoids hardwiring any assumptions about the width of pg_time_t,
+ * so it should behave sanely on machines without int64.
*/
#ifdef HAVE_INT64_TIMESTAMP
dt = (dt - *fsec) / USECS_PER_SEC +
@@ -1139,7 +1139,7 @@ recalc_t:
if ((Timestamp) utime == dt)
{
struct pg_tm *tx = pg_localtime(&utime,
- attimezone ? attimezone : global_timezone);
+ attimezone ? attimezone : global_timezone);
tm->tm_year = tx->tm_year + 1900;
tm->tm_mon = tx->tm_mon + 1;
@@ -1180,13 +1180,13 @@ recalc_t:
* Returns -1 on failure (value out of range).
*/
int
-tm2timestamp(struct pg_tm *tm, fsec_t fsec, int *tzp, Timestamp *result)
+tm2timestamp(struct pg_tm * tm, fsec_t fsec, int *tzp, Timestamp *result)
{
#ifdef HAVE_INT64_TIMESTAMP
- int date;
+ int date;
int64 time;
#else
- double date,
+ double date,
time;
#endif
@@ -1220,7 +1220,7 @@ tm2timestamp(struct pg_tm *tm, fsec_t fsec, int *tzp, Timestamp *result)
* Convert a interval data type to a tm structure.
*/
int
-interval2tm(Interval span, struct pg_tm *tm, fsec_t *fsec)
+interval2tm(Interval span, struct pg_tm * tm, fsec_t *fsec)
{
#ifdef HAVE_INT64_TIMESTAMP
int64 time;
@@ -1242,8 +1242,8 @@ interval2tm(Interval span, struct pg_tm *tm, fsec_t *fsec)
*fsec = time - (tm->tm_sec * USECS_PER_SEC);
#else
recalc:
- TMODULO(time, tm->tm_hour, (double)SECS_PER_HOUR);
- TMODULO(time, tm->tm_min, (double)SECS_PER_MINUTE);
+ TMODULO(time, tm->tm_hour, (double) SECS_PER_HOUR);
+ TMODULO(time, tm->tm_min, (double) SECS_PER_MINUTE);
TMODULO(time, tm->tm_sec, 1.0);
time = TSROUND(time);
/* roundoff may need to propagate to higher-order fields */
@@ -1259,18 +1259,18 @@ recalc:
}
int
-tm2interval(struct pg_tm *tm, fsec_t fsec, Interval *span)
+tm2interval(struct pg_tm * tm, fsec_t fsec, Interval *span)
{
span->month = tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon;
- span->day = tm->tm_mday;
+ span->day = tm->tm_mday;
#ifdef HAVE_INT64_TIMESTAMP
span->time = (((((tm->tm_hour * INT64CONST(60)) +
- tm->tm_min) * INT64CONST(60)) +
- tm->tm_sec) * USECS_PER_SEC) + fsec;
+ tm->tm_min) * INT64CONST(60)) +
+ tm->tm_sec) * USECS_PER_SEC) + fsec;
#else
- span->time = (((tm->tm_hour * (double)MINS_PER_HOUR) +
- tm->tm_min) * (double)SECS_PER_MINUTE) +
- tm->tm_sec + fsec;
+ span->time = (((tm->tm_hour * (double) MINS_PER_HOUR) +
+ tm->tm_min) * (double) SECS_PER_MINUTE) +
+ tm->tm_sec + fsec;
#endif
return 0;
@@ -1282,7 +1282,6 @@ time2t(const int hour, const int min, const int sec, const fsec_t fsec)
{
return (((((hour * MINS_PER_HOUR) + min) * SECS_PER_MINUTE) + sec) * USECS_PER_SEC) + fsec;
} /* time2t() */
-
#else
static double
time2t(const int hour, const int min, const int sec, const fsec_t fsec)
@@ -1311,7 +1310,7 @@ dt2local(Timestamp dt, int tz)
Datum
timestamp_finite(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
PG_RETURN_BOOL(!TIMESTAMP_NOT_FINITE(timestamp));
}
@@ -1328,7 +1327,7 @@ interval_finite(PG_FUNCTION_ARGS)
*---------------------------------------------------------*/
void
-GetEpochTime(struct pg_tm *tm)
+GetEpochTime(struct pg_tm * tm)
{
struct pg_tm *t0;
pg_time_t epoch = 0;
@@ -1379,8 +1378,8 @@ timestamp_cmp_internal(Timestamp dt1, Timestamp dt2)
* When using float representation, we have to be wary of NaNs.
*
* We consider all NANs to be equal and larger than any non-NAN. This is
- * somewhat arbitrary; the important thing is to have a consistent
- * sort order.
+ * somewhat arbitrary; the important thing is to have a consistent sort
+ * order.
*/
if (isnan(dt1))
{
@@ -1667,10 +1666,10 @@ interval_cmp_internal(Interval *interval1, Interval *interval2)
span2 += interval2->month * INT64CONST(30) * USECS_PER_DAY;
span2 += interval2->day * INT64CONST(24) * USECS_PER_HOUR;
#else
- span1 += interval1->month * ((double)DAYS_PER_MONTH * SECS_PER_DAY);
- span1 += interval1->day * ((double)HOURS_PER_DAY * SECS_PER_HOUR);
- span2 += interval2->month * ((double)DAYS_PER_MONTH * SECS_PER_DAY);
- span2 += interval2->day * ((double)HOURS_PER_DAY * SECS_PER_HOUR);
+ span1 += interval1->month * ((double) DAYS_PER_MONTH * SECS_PER_DAY);
+ span1 += interval1->day * ((double) HOURS_PER_DAY * SECS_PER_HOUR);
+ span2 += interval2->month * ((double) DAYS_PER_MONTH * SECS_PER_DAY);
+ span2 += interval2->day * ((double) HOURS_PER_DAY * SECS_PER_HOUR);
#endif
return ((span1 < span2) ? -1 : (span1 > span2) ? 1 : 0);
@@ -1749,11 +1748,11 @@ interval_hash(PG_FUNCTION_ARGS)
/*
* Specify hash length as sizeof(double) + sizeof(int4), not as
- * sizeof(Interval), so that any garbage pad bytes in the structure
- * won't be included in the hash!
+ * sizeof(Interval), so that any garbage pad bytes in the structure won't
+ * be included in the hash!
*/
- return hash_any((unsigned char *) key,
- sizeof(key->time) + sizeof(key->day) + sizeof(key->month));
+ return hash_any((unsigned char *) key,
+ sizeof(key->time) + sizeof(key->day) + sizeof(key->month));
}
/* overlaps_timestamp() --- implements the SQL92 OVERLAPS operator.
@@ -1766,9 +1765,9 @@ Datum
overlaps_timestamp(PG_FUNCTION_ARGS)
{
/*
- * The arguments are Timestamps, but we leave them as generic Datums
- * to avoid unnecessary conversions between value and reference forms
- * --- not to mention possible dereferences of null pointers.
+ * The arguments are Timestamps, but we leave them as generic Datums to
+ * avoid unnecessary conversions between value and reference forms --- not
+ * to mention possible dereferences of null pointers.
*/
Datum ts1 = PG_GETARG_DATUM(0);
Datum te1 = PG_GETARG_DATUM(1);
@@ -1785,9 +1784,9 @@ overlaps_timestamp(PG_FUNCTION_ARGS)
DatumGetBool(DirectFunctionCall2(timestamp_lt,t1,t2))
/*
- * If both endpoints of interval 1 are null, the result is null
- * (unknown). If just one endpoint is null, take ts1 as the non-null
- * one. Otherwise, take ts1 as the lesser endpoint.
+ * If both endpoints of interval 1 are null, the result is null (unknown).
+ * If just one endpoint is null, take ts1 as the non-null one. Otherwise,
+ * take ts1 as the lesser endpoint.
*/
if (ts1IsNull)
{
@@ -1835,8 +1834,8 @@ overlaps_timestamp(PG_FUNCTION_ARGS)
if (TIMESTAMP_GT(ts1, ts2))
{
/*
- * This case is ts1 < te2 OR te1 < te2, which may look redundant
- * but in the presence of nulls it's not quite completely so.
+ * This case is ts1 < te2 OR te1 < te2, which may look redundant but
+ * in the presence of nulls it's not quite completely so.
*/
if (te2IsNull)
PG_RETURN_NULL();
@@ -1846,8 +1845,8 @@ overlaps_timestamp(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
/*
- * If te1 is not null then we had ts1 <= te1 above, and we just
- * found ts1 >= te2, hence te1 >= te2.
+ * If te1 is not null then we had ts1 <= te1 above, and we just found
+ * ts1 >= te2, hence te1 >= te2.
*/
PG_RETURN_BOOL(false);
}
@@ -1862,8 +1861,8 @@ overlaps_timestamp(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
/*
- * If te2 is not null then we had ts2 <= te2 above, and we just
- * found ts2 >= te1, hence te2 >= te1.
+ * If te2 is not null then we had ts2 <= te2 above, and we just found
+ * ts2 >= te1, hence te2 >= te1.
*/
PG_RETURN_BOOL(false);
}
@@ -1871,8 +1870,7 @@ overlaps_timestamp(PG_FUNCTION_ARGS)
{
/*
* For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a
- * rather silly way of saying "true if both are nonnull, else
- * null".
+ * rather silly way of saying "true if both are nonnull, else null".
*/
if (te1IsNull || te2IsNull)
PG_RETURN_NULL();
@@ -1938,13 +1936,13 @@ timestamp_mi(PG_FUNCTION_ARGS)
result->day = 0;
result = DatumGetIntervalP(DirectFunctionCall1(interval_justify_hours,
- IntervalPGetDatum(result)));
+ IntervalPGetDatum(result)));
PG_RETURN_INTERVAL_P(result);
}
/* interval_justify_hours()
* Adjust interval so 'time' contains less than a whole day, and
- * 'day' contains an integral number of days. This is useful for
+ * 'day' contains an integral number of days. This is useful for
* situations (such as non-TZ) where '1 day' = '24 hours' is valid,
* e.g. interval subtraction and division. The SQL standard requires
* such conversion in these cases, but not the conversion of days to months.
@@ -1952,8 +1950,8 @@ timestamp_mi(PG_FUNCTION_ARGS)
Datum
interval_justify_hours(PG_FUNCTION_ARGS)
{
- Interval *span = PG_GETARG_INTERVAL_P(0);
- Interval *result;
+ Interval *span = PG_GETARG_INTERVAL_P(0);
+ Interval *result;
result = (Interval *) palloc(sizeof(Interval));
result->month = span->month;
@@ -1963,8 +1961,8 @@ interval_justify_hours(PG_FUNCTION_ARGS)
result->time += span->day * USECS_PER_DAY;
TMODULO(result->time, result->day, USECS_PER_DAY);
#else
- result->time += span->day * (double)SECS_PER_DAY;
- TMODULO(result->time, result->day, (double)SECS_PER_DAY);
+ result->time += span->day * (double) SECS_PER_DAY;
+ TMODULO(result->time, result->day, (double) SECS_PER_DAY);
#endif
PG_RETURN_INTERVAL_P(result);
@@ -1977,8 +1975,8 @@ interval_justify_hours(PG_FUNCTION_ARGS)
Datum
interval_justify_days(PG_FUNCTION_ARGS)
{
- Interval *span = PG_GETARG_INTERVAL_P(0);
- Interval *result;
+ Interval *span = PG_GETARG_INTERVAL_P(0);
+ Interval *result;
result = (Interval *) palloc(sizeof(Interval));
result->day = span->day;
@@ -2003,7 +2001,7 @@ interval_justify_days(PG_FUNCTION_ARGS)
Datum
timestamp_pl_interval(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
Interval *span = PG_GETARG_INTERVAL_P(1);
Timestamp result;
@@ -2050,7 +2048,7 @@ timestamp_pl_interval(PG_FUNCTION_ARGS)
*tm = &tt;
fsec_t fsec;
int julian;
-
+
if (timestamp2tm(timestamp, NULL, tm, &fsec, NULL, NULL) != 0)
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
@@ -2076,7 +2074,7 @@ timestamp_pl_interval(PG_FUNCTION_ARGS)
Datum
timestamp_mi_interval(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
Interval *span = PG_GETARG_INTERVAL_P(1);
Interval tspan;
@@ -2277,7 +2275,9 @@ interval_mul(PG_FUNCTION_ARGS)
{
Interval *span = PG_GETARG_INTERVAL_P(0);
float8 factor = PG_GETARG_FLOAT8(1);
- double month_remainder, day_remainder, month_remainder_days;
+ double month_remainder,
+ day_remainder,
+ month_remainder_days;
Interval *result;
result = (Interval *) palloc(sizeof(Interval));
@@ -2303,7 +2303,7 @@ interval_mul(PG_FUNCTION_ARGS)
#endif
result = DatumGetIntervalP(DirectFunctionCall1(interval_justify_hours,
- IntervalPGetDatum(result)));
+ IntervalPGetDatum(result)));
PG_RETURN_INTERVAL_P(result);
}
@@ -2322,7 +2322,9 @@ interval_div(PG_FUNCTION_ARGS)
{
Interval *span = PG_GETARG_INTERVAL_P(0);
float8 factor = PG_GETARG_FLOAT8(1);
- double month_remainder, day_remainder, month_remainder_days;
+ double month_remainder,
+ day_remainder,
+ month_remainder_days;
Interval *result;
result = (Interval *) palloc(sizeof(Interval));
@@ -2354,7 +2356,7 @@ interval_div(PG_FUNCTION_ARGS)
#endif
result = DatumGetIntervalP(DirectFunctionCall1(interval_justify_hours,
- IntervalPGetDatum(result)));
+ IntervalPGetDatum(result)));
PG_RETURN_INTERVAL_P(result);
}
@@ -2386,10 +2388,10 @@ interval_accum(PG_FUNCTION_ARGS)
elog(ERROR, "expected 2-element interval array");
/*
- * XXX memcpy, instead of just extracting a pointer, to work around
- * buggy array code: it won't ensure proper alignment of Interval
- * objects on machines where double requires 8-byte alignment. That
- * should be fixed, but in the meantime...
+ * XXX memcpy, instead of just extracting a pointer, to work around buggy
+ * array code: it won't ensure proper alignment of Interval objects on
+ * machines where double requires 8-byte alignment. That should be fixed,
+ * but in the meantime...
*
* Note: must use DatumGetPointer here, not DatumGetIntervalP, else some
* compilers optimize into double-aligned load/store anyway.
@@ -2398,8 +2400,8 @@ interval_accum(PG_FUNCTION_ARGS)
memcpy((void *) &N, DatumGetPointer(transdatums[1]), sizeof(Interval));
newsum = DatumGetIntervalP(DirectFunctionCall2(interval_pl,
- IntervalPGetDatum(&sumX),
- IntervalPGetDatum(newval)));
+ IntervalPGetDatum(&sumX),
+ IntervalPGetDatum(newval)));
N.time += 1;
transdatums[0] = IntervalPGetDatum(newsum);
@@ -2427,10 +2429,10 @@ interval_avg(PG_FUNCTION_ARGS)
elog(ERROR, "expected 2-element interval array");
/*
- * XXX memcpy, instead of just extracting a pointer, to work around
- * buggy array code: it won't ensure proper alignment of Interval
- * objects on machines where double requires 8-byte alignment. That
- * should be fixed, but in the meantime...
+ * XXX memcpy, instead of just extracting a pointer, to work around buggy
+ * array code: it won't ensure proper alignment of Interval objects on
+ * machines where double requires 8-byte alignment. That should be fixed,
+ * but in the meantime...
*
* Note: must use DatumGetPointer here, not DatumGetIntervalP, else some
* compilers optimize into double-aligned load/store anyway.
@@ -2689,7 +2691,7 @@ Datum
timestamp_text(PG_FUNCTION_ARGS)
{
/* Input is a Timestamp, but may as well leave it in Datum form */
- Datum timestamp = PG_GETARG_DATUM(0);
+ Datum timestamp = PG_GETARG_DATUM(0);
text *result;
char *str;
int len;
@@ -2728,7 +2730,7 @@ text_timestamp(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
errmsg("invalid input syntax for type timestamp: \"%s\"",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(str))))));
+ PointerGetDatum(str))))));
sp = VARDATA(str);
dp = dstr;
@@ -2750,7 +2752,7 @@ Datum
timestamptz_text(PG_FUNCTION_ARGS)
{
/* Input is a Timestamp, but may as well leave it in Datum form */
- Datum timestamp = PG_GETARG_DATUM(0);
+ Datum timestamp = PG_GETARG_DATUM(0);
text *result;
char *str;
int len;
@@ -2788,7 +2790,7 @@ text_timestamptz(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
errmsg("invalid input syntax for type timestamp with time zone: \"%s\"",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(str))))));
+ PointerGetDatum(str))))));
sp = VARDATA(str);
dp = dstr;
@@ -2815,7 +2817,7 @@ interval_text(PG_FUNCTION_ARGS)
int len;
str = DatumGetCString(DirectFunctionCall1(interval_out,
- IntervalPGetDatum(interval)));
+ IntervalPGetDatum(interval)));
len = strlen(str) + VARHDRSZ;
@@ -2849,7 +2851,7 @@ text_interval(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
errmsg("invalid input syntax for type interval: \"%s\"",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(str))))));
+ PointerGetDatum(str))))));
sp = VARDATA(str);
dp = dstr;
@@ -2870,7 +2872,7 @@ Datum
timestamp_trunc(PG_FUNCTION_ARGS)
{
text *units = PG_GETARG_TEXT_P(0);
- Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
Timestamp result;
int type,
val;
@@ -2898,26 +2900,27 @@ timestamp_trunc(PG_FUNCTION_ARGS)
switch (val)
{
case DTK_WEEK:
- {
- int woy;
-
- woy = date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday);
- /*
- * If it is week 52/53 and the month is January,
- * then the week must belong to the previous year.
- * Also, some December dates belong to the next year.
- */
- if (woy >= 52 && tm->tm_mon == 1)
- --tm->tm_year;
- if (woy <= 1 && tm->tm_mon == MONTHS_PER_YEAR)
- ++tm->tm_year;
- isoweek2date(woy, &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
- tm->tm_hour = 0;
- tm->tm_min = 0;
- tm->tm_sec = 0;
- fsec = 0;
- break;
- }
+ {
+ int woy;
+
+ woy = date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday);
+
+ /*
+ * If it is week 52/53 and the month is January, then the
+ * week must belong to the previous year. Also, some
+ * December dates belong to the next year.
+ */
+ if (woy >= 52 && tm->tm_mon == 1)
+ --tm->tm_year;
+ if (woy <= 1 && tm->tm_mon == MONTHS_PER_YEAR)
+ ++tm->tm_year;
+ isoweek2date(woy, &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
+ tm->tm_hour = 0;
+ tm->tm_min = 0;
+ tm->tm_sec = 0;
+ fsec = 0;
+ break;
+ }
case DTK_MILLENNIUM:
/* see comments in timestamptz_trunc */
if (tm->tm_year > 0)
@@ -3032,34 +3035,35 @@ timestamptz_trunc(PG_FUNCTION_ARGS)
switch (val)
{
case DTK_WEEK:
- {
- int woy;
-
- woy = date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday);
- /*
- * If it is week 52/53 and the month is January,
- * then the week must belong to the previous year.
- * Also, some December dates belong to the next year.
- */
- if (woy >= 52 && tm->tm_mon == 1)
- --tm->tm_year;
- if (woy <= 1 && tm->tm_mon == MONTHS_PER_YEAR)
- ++tm->tm_year;
- isoweek2date(woy, &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
- tm->tm_hour = 0;
- tm->tm_min = 0;
- tm->tm_sec = 0;
- fsec = 0;
- redotz = true;
- break;
- }
+ {
+ int woy;
+
+ woy = date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday);
+
+ /*
+ * If it is week 52/53 and the month is January, then the
+ * week must belong to the previous year. Also, some
+ * December dates belong to the next year.
+ */
+ if (woy >= 52 && tm->tm_mon == 1)
+ --tm->tm_year;
+ if (woy <= 1 && tm->tm_mon == MONTHS_PER_YEAR)
+ ++tm->tm_year;
+ isoweek2date(woy, &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
+ tm->tm_hour = 0;
+ tm->tm_min = 0;
+ tm->tm_sec = 0;
+ fsec = 0;
+ redotz = true;
+ break;
+ }
/* one may consider DTK_THOUSAND and DTK_HUNDRED... */
case DTK_MILLENNIUM:
/*
* truncating to the millennium? what is this supposed to
- * mean? let us put the first year of the millennium...
- * i.e. -1000, 1, 1001, 2001...
+ * mean? let us put the first year of the millennium... i.e.
+ * -1000, 1, 1001, 2001...
*/
if (tm->tm_year > 0)
tm->tm_year = ((tm->tm_year + 999) / 1000) * 1000 - 999;
@@ -3076,8 +3080,8 @@ timestamptz_trunc(PG_FUNCTION_ARGS)
case DTK_DECADE:
/*
- * truncating to the decade? first year of the decade.
- * must not be applied if year was truncated before!
+ * truncating to the decade? first year of the decade. must
+ * not be applied if year was truncated before!
*/
if (val != DTK_MILLENNIUM && val != DTK_CENTURY)
{
@@ -3126,8 +3130,8 @@ timestamptz_trunc(PG_FUNCTION_ARGS)
default:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("timestamp with time zone units \"%s\" not "
- "supported", lowunits)));
+ errmsg("timestamp with time zone units \"%s\" not "
+ "supported", lowunits)));
result = 0;
}
@@ -3143,8 +3147,8 @@ timestamptz_trunc(PG_FUNCTION_ARGS)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("timestamp with time zone units \"%s\" not recognized",
- lowunits)));
+ errmsg("timestamp with time zone units \"%s\" not recognized",
+ lowunits)));
result = 0;
}
@@ -3181,7 +3185,7 @@ interval_trunc(PG_FUNCTION_ARGS)
{
switch (val)
{
- /* fall through */
+ /* fall through */
case DTK_MILLENNIUM:
/* caution: C division may have negative remainder */
tm->tm_year = (tm->tm_year / 1000) * 1000;
@@ -3241,7 +3245,7 @@ interval_trunc(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("interval units \"%s\" not recognized",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ PointerGetDatum(units))))));
*result = *interval;
}
@@ -3263,7 +3267,7 @@ isoweek2date(int woy, int *year, int *mon, int *mday)
if (!*year)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("cannot calculate week number without year information")));
+ errmsg("cannot calculate week number without year information")));
/* fourth day of current year */
day4 = date2j(*year, 1, 4);
@@ -3298,8 +3302,8 @@ date2isoweek(int year, int mon, int mday)
day0 = j2day(day4 - 1);
/*
- * We need the first week containing a Thursday, otherwise this day
- * falls into the previous year for purposes of counting weeks
+ * We need the first week containing a Thursday, otherwise this day falls
+ * into the previous year for purposes of counting weeks
*/
if (dayn < day4 - day0)
{
@@ -3312,8 +3316,8 @@ date2isoweek(int year, int mon, int mday)
result = (dayn - (day4 - day0)) / 7 + 1;
/*
- * Sometimes the last few days in a year will fall into the first week
- * of the next year, so check for this.
+ * Sometimes the last few days in a year will fall into the first week of
+ * the next year, so check for this.
*/
if (result >= 52)
{
@@ -3352,8 +3356,8 @@ date2isoyear(int year, int mon, int mday)
day0 = j2day(day4 - 1);
/*
- * We need the first week containing a Thursday, otherwise this day
- * falls into the previous year for purposes of counting weeks
+ * We need the first week containing a Thursday, otherwise this day falls
+ * into the previous year for purposes of counting weeks
*/
if (dayn < day4 - day0)
{
@@ -3368,8 +3372,8 @@ date2isoyear(int year, int mon, int mday)
result = (dayn - (day4 - day0)) / 7 + 1;
/*
- * Sometimes the last few days in a year will fall into the first week
- * of the next year, so check for this.
+ * Sometimes the last few days in a year will fall into the first week of
+ * the next year, so check for this.
*/
if (result >= 52)
{
@@ -3393,7 +3397,7 @@ Datum
timestamp_part(PG_FUNCTION_ARGS)
{
text *units = PG_GETARG_TEXT_P(0);
- Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
float8 result;
int type,
val;
@@ -3484,9 +3488,9 @@ timestamp_part(PG_FUNCTION_ARGS)
case DTK_DECADE:
/*
- * what is a decade wrt dates? let us assume that decade
- * 199 is 1990 thru 1999... decade 0 starts on year 1 BC,
- * and -1 is 11 BC thru 2 BC...
+ * what is a decade wrt dates? let us assume that decade 199
+ * is 1990 thru 1999... decade 0 starts on year 1 BC, and -1
+ * is 11 BC thru 2 BC...
*/
if (tm->tm_year >= 0)
result = tm->tm_year / 10;
@@ -3521,10 +3525,10 @@ timestamp_part(PG_FUNCTION_ARGS)
result = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday);
#ifdef HAVE_INT64_TIMESTAMP
result += ((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) +
- tm->tm_sec + (fsec / 1000000.0)) / (double)SECS_PER_DAY;
+ tm->tm_sec + (fsec / 1000000.0)) / (double) SECS_PER_DAY;
#else
result += ((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) +
- tm->tm_sec + fsec) / (double)SECS_PER_DAY;
+ tm->tm_sec + fsec) / (double) SECS_PER_DAY;
#endif
break;
@@ -3549,20 +3553,19 @@ timestamp_part(PG_FUNCTION_ARGS)
TimestampTz timestamptz;
/*
- * convert to timestamptz to produce consistent
- * results
+ * convert to timestamptz to produce consistent results
*/
if (timestamp2tm(timestamp, NULL, tm, &fsec, NULL, NULL) != 0)
ereport(ERROR,
- (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("timestamp out of range")));
+ (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+ errmsg("timestamp out of range")));
tz = DetermineTimeZoneOffset(tm, global_timezone);
if (tm2timestamp(tm, fsec, &tz, &timestamptz) != 0)
ereport(ERROR,
- (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("timestamp out of range")));
+ (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+ errmsg("timestamp out of range")));
#ifdef HAVE_INT64_TIMESTAMP
result = (timestamptz - SetEpochTimestamp()) / 1000000.0;
@@ -3601,7 +3604,7 @@ timestamp_part(PG_FUNCTION_ARGS)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("timestamp units \"%s\" not recognized", lowunits)));
+ errmsg("timestamp units \"%s\" not recognized", lowunits)));
result = 0;
}
@@ -3657,12 +3660,12 @@ timestamptz_part(PG_FUNCTION_ARGS)
case DTK_TZ_MINUTE:
result = -tz;
result /= MINS_PER_HOUR;
- FMODULO(result, dummy, (double)MINS_PER_HOUR);
+ FMODULO(result, dummy, (double) MINS_PER_HOUR);
break;
case DTK_TZ_HOUR:
dummy = -tz;
- FMODULO(dummy, result, (double)SECS_PER_HOUR);
+ FMODULO(dummy, result, (double) SECS_PER_HOUR);
break;
case DTK_MICROSEC:
@@ -3749,18 +3752,18 @@ timestamptz_part(PG_FUNCTION_ARGS)
result = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday);
#ifdef HAVE_INT64_TIMESTAMP
result += ((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) +
- tm->tm_sec + (fsec / 1000000.0)) / (double)SECS_PER_DAY;
+ tm->tm_sec + (fsec / 1000000.0)) / (double) SECS_PER_DAY;
#else
result += ((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) +
- tm->tm_sec + fsec) / (double)SECS_PER_DAY;
+ tm->tm_sec + fsec) / (double) SECS_PER_DAY;
#endif
break;
default:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("timestamp with time zone units \"%s\" not supported",
- lowunits)));
+ errmsg("timestamp with time zone units \"%s\" not supported",
+ lowunits)));
result = 0;
}
@@ -3771,7 +3774,7 @@ timestamptz_part(PG_FUNCTION_ARGS)
{
case DTK_EPOCH:
#ifdef HAVE_INT64_TIMESTAMP
- result = (timestamp - SetEpochTimestamp()) /1000000.0;
+ result = (timestamp - SetEpochTimestamp()) / 1000000.0;
#else
result = timestamp - SetEpochTimestamp();
#endif
@@ -3797,8 +3800,8 @@ timestamptz_part(PG_FUNCTION_ARGS)
default:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("timestamp with time zone units \"%s\" not supported",
- lowunits)));
+ errmsg("timestamp with time zone units \"%s\" not supported",
+ lowunits)));
result = 0;
}
}
@@ -3806,8 +3809,8 @@ timestamptz_part(PG_FUNCTION_ARGS)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("timestamp with time zone units \"%s\" not recognized",
- lowunits)));
+ errmsg("timestamp with time zone units \"%s\" not recognized",
+ lowunits)));
result = 0;
}
@@ -3913,8 +3916,8 @@ interval_part(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("interval units \"%s\" not supported",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
result = 0;
}
@@ -3933,7 +3936,7 @@ interval_part(PG_FUNCTION_ARGS)
result = interval->time;
#endif
result += (DAYS_PER_YEAR * SECS_PER_DAY) * (interval->month / MONTHS_PER_YEAR);
- result += ((double)DAYS_PER_MONTH * SECS_PER_DAY) * (interval->month % MONTHS_PER_YEAR);
+ result += ((double) DAYS_PER_MONTH * SECS_PER_DAY) * (interval->month % MONTHS_PER_YEAR);
result += interval->day * SECS_PER_DAY;
}
else
@@ -3942,7 +3945,7 @@ interval_part(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("interval units \"%s\" not recognized",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ PointerGetDatum(units))))));
result = 0;
}
@@ -3950,9 +3953,9 @@ interval_part(PG_FUNCTION_ARGS)
}
-/* timestamp_zone()
- * Encode timestamp type with specified time zone.
- * This function is just timestamp2timestamptz() except instead of
+/* timestamp_zone()
+ * Encode timestamp type with specified time zone.
+ * This function is just timestamp2timestamptz() except instead of
* shifting to the global timezone, we shift to the specified timezone.
* This is different from the other AT TIME ZONE cases because instead
* of shifting to a _to_ a new time zone, it sets the time to _be_ the
@@ -3963,20 +3966,20 @@ timestamp_zone(PG_FUNCTION_ARGS)
{
text *zone = PG_GETARG_TEXT_P(0);
Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
- TimestampTz result;
+ TimestampTz result;
int tz;
- pg_tz *tzp;
- char tzname[TZ_STRLEN_MAX + 1];
- int len;
-
+ pg_tz *tzp;
+ char tzname[TZ_STRLEN_MAX + 1];
+ int len;
+
if (TIMESTAMP_NOT_FINITE(timestamp))
PG_RETURN_TIMESTAMPTZ(timestamp);
/*
- * Look up the requested timezone. First we look in the timezone
- * database (to handle cases like "America/New_York"), and if that
- * fails, we look in the date token table (to handle cases like "EST").
- */
+ * Look up the requested timezone. First we look in the timezone database
+ * (to handle cases like "America/New_York"), and if that fails, we look
+ * in the date token table (to handle cases like "EST").
+ */
len = Min(VARSIZE(zone) - VARHDRSZ, TZ_STRLEN_MAX);
memcpy(tzname, VARDATA(zone), len);
tzname[len] = '\0';
@@ -3985,7 +3988,7 @@ timestamp_zone(PG_FUNCTION_ARGS)
{
/* Apply the timezone change */
struct pg_tm tm;
- fsec_t fsec;
+ fsec_t fsec;
if (timestamp2tm(timestamp, NULL, &tm, &fsec, NULL, tzp) != 0)
ereport(ERROR,
@@ -4032,7 +4035,7 @@ Datum
timestamp_izone(PG_FUNCTION_ARGS)
{
Interval *zone = PG_GETARG_INTERVAL_P(0);
- Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
TimestampTz result;
int tz;
@@ -4042,9 +4045,9 @@ timestamp_izone(PG_FUNCTION_ARGS)
if (zone->month != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("interval time zone \"%s\" must not specify month",
- DatumGetCString(DirectFunctionCall1(interval_out,
- PointerGetDatum(zone))))));
+ errmsg("interval time zone \"%s\" must not specify month",
+ DatumGetCString(DirectFunctionCall1(interval_out,
+ PointerGetDatum(zone))))));
#ifdef HAVE_INT64_TIMESTAMP
tz = zone->time / USECS_PER_SEC;
@@ -4063,7 +4066,7 @@ timestamp_izone(PG_FUNCTION_ARGS)
Datum
timestamp_timestamptz(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
PG_RETURN_TIMESTAMPTZ(timestamp2timestamptz(timestamp));
}
@@ -4139,17 +4142,17 @@ timestamptz_zone(PG_FUNCTION_ARGS)
Timestamp result;
int tz;
pg_tz *tzp;
- char tzname[TZ_STRLEN_MAX + 1];
- int len;
+ char tzname[TZ_STRLEN_MAX + 1];
+ int len;
if (TIMESTAMP_NOT_FINITE(timestamp))
PG_RETURN_TIMESTAMP(timestamp);
/*
- * Look up the requested timezone. First we look in the timezone
- * database (to handle cases like "America/New_York"), and if that
- * fails, we look in the date token table (to handle cases like "EST").
- */
+ * Look up the requested timezone. First we look in the timezone database
+ * (to handle cases like "America/New_York"), and if that fails, we look
+ * in the date token table (to handle cases like "EST").
+ */
len = Min(VARSIZE(zone) - VARHDRSZ, TZ_STRLEN_MAX);
memcpy(tzname, VARDATA(zone), len);
tzname[len] = '\0';
@@ -4158,7 +4161,7 @@ timestamptz_zone(PG_FUNCTION_ARGS)
{
/* Apply the timezone change */
struct pg_tm tm;
- fsec_t fsec;
+ fsec_t fsec;
if (timestamp2tm(timestamp, &tz, &tm, &fsec, NULL, tzp) != 0)
ereport(ERROR,
@@ -4215,9 +4218,9 @@ timestamptz_izone(PG_FUNCTION_ARGS)
if (zone->month != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("interval time zone \"%s\" must not specify month",
- DatumGetCString(DirectFunctionCall1(interval_out,
- PointerGetDatum(zone))))));
+ errmsg("interval time zone \"%s\" must not specify month",
+ DatumGetCString(DirectFunctionCall1(interval_out,
+ PointerGetDatum(zone))))));
#ifdef HAVE_INT64_TIMESTAMP
tz = -(zone->time / USECS_PER_SEC);
diff --git a/src/backend/utils/adt/varbit.c b/src/backend/utils/adt/varbit.c
index 370d3e81101..7dbbed16f69 100644
--- a/src/backend/utils/adt/varbit.c
+++ b/src/backend/utils/adt/varbit.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varbit.c,v 1.46 2005/09/24 17:53:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varbit.c,v 1.47 2005/10/15 02:49:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -83,8 +83,8 @@ bit_in(PG_FUNCTION_ARGS)
else
{
/*
- * Otherwise it's binary. This allows things like cast('1001' as
- * bit) to work transparently.
+ * Otherwise it's binary. This allows things like cast('1001' as bit)
+ * to work transparently.
*/
bit_not_hex = true;
sp = input_string;
@@ -98,16 +98,16 @@ bit_in(PG_FUNCTION_ARGS)
bitlen = slen * 4;
/*
- * Sometimes atttypmod is not supplied. If it is supplied we need to
- * make sure that the bitstring fits.
+ * Sometimes atttypmod is not supplied. If it is supplied we need to make
+ * sure that the bitstring fits.
*/
if (atttypmod <= 0)
atttypmod = bitlen;
else if (bitlen != atttypmod)
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_LENGTH_MISMATCH),
- errmsg("bit string length %d does not match type bit(%d)",
- bitlen, atttypmod)));
+ errmsg("bit string length %d does not match type bit(%d)",
+ bitlen, atttypmod)));
len = VARBITTOTALLEN(atttypmod);
/* set to 0 so that *r is always initialised and string is zero-padded */
@@ -204,8 +204,8 @@ bit_out(PG_FUNCTION_ARGS)
}
/*
- * Go back one step if we printed a hex number that was not part of
- * the bitstring anymore
+ * Go back one step if we printed a hex number that was not part of the
+ * bitstring anymore
*/
if (i > len)
r--;
@@ -222,6 +222,7 @@ Datum
bit_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -239,14 +240,14 @@ bit_recv(PG_FUNCTION_ARGS)
errmsg("invalid length in external bit string")));
/*
- * Sometimes atttypmod is not supplied. If it is supplied we need to
- * make sure that the bitstring fits.
+ * Sometimes atttypmod is not supplied. If it is supplied we need to make
+ * sure that the bitstring fits.
*/
if (atttypmod > 0 && bitlen != atttypmod)
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_LENGTH_MISMATCH),
- errmsg("bit string length %d does not match type bit(%d)",
- bitlen, atttypmod)));
+ errmsg("bit string length %d does not match type bit(%d)",
+ bitlen, atttypmod)));
len = VARBITTOTALLEN(bitlen);
result = (VarBit *) palloc(len);
@@ -301,8 +302,8 @@ bit(PG_FUNCTION_ARGS)
if (!isExplicit)
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_LENGTH_MISMATCH),
- errmsg("bit string length %d does not match type bit(%d)",
- VARBITLEN(arg), len)));
+ errmsg("bit string length %d does not match type bit(%d)",
+ VARBITLEN(arg), len)));
rlen = VARBITTOTALLEN(len);
/* set to 0 so that string is zero-padded */
@@ -314,9 +315,9 @@ bit(PG_FUNCTION_ARGS)
Min(VARBITBYTES(result), VARBITBYTES(arg)));
/*
- * Make sure last byte is zero-padded if needed. This is useless but
- * safe if source data was shorter than target length (we assume the
- * last byte of the source data was itself correctly zero-padded).
+ * Make sure last byte is zero-padded if needed. This is useless but safe
+ * if source data was shorter than target length (we assume the last byte
+ * of the source data was itself correctly zero-padded).
*/
ipad = VARBITPAD(result);
if (ipad > 0)
@@ -378,8 +379,8 @@ varbit_in(PG_FUNCTION_ARGS)
bitlen = slen * 4;
/*
- * Sometimes atttypmod is not supplied. If it is supplied we need to
- * make sure that the bitstring fits.
+ * Sometimes atttypmod is not supplied. If it is supplied we need to make
+ * sure that the bitstring fits.
*/
if (atttypmod <= 0)
atttypmod = bitlen;
@@ -500,6 +501,7 @@ Datum
varbit_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -517,8 +519,8 @@ varbit_recv(PG_FUNCTION_ARGS)
errmsg("invalid length in external bit string")));
/*
- * Sometimes atttypmod is not supplied. If it is supplied we need to
- * make sure that the bitstring fits.
+ * Sometimes atttypmod is not supplied. If it is supplied we need to make
+ * sure that the bitstring fits.
*/
if (atttypmod > 0 && bitlen > atttypmod)
ereport(ERROR,
@@ -874,8 +876,8 @@ bitsubstr(PG_FUNCTION_ARGS)
else
{
/*
- * OK, we've got a true substring starting at position s1-1 and
- * ending at position e1-1
+ * OK, we've got a true substring starting at position s1-1 and ending
+ * at position e1-1
*/
rbitlen = e1 - s1;
len = VARBITTOTALLEN(rbitlen);
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index 84fcc97ccdb..1377e7cc6d2 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varchar.c,v 1.112 2005/07/29 12:59:15 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varchar.c,v 1.113 2005/10/15 02:49:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -81,7 +81,7 @@ bpchar_input(const char *s, size_t len, int32 atttypmod)
maxlen = len;
else
{
- size_t charlen; /* number of CHARACTERS in the input */
+ size_t charlen; /* number of CHARACTERS in the input */
maxlen = atttypmod - VARHDRSZ;
charlen = pg_mbstrlen_with_len(s, len);
@@ -106,16 +106,16 @@ bpchar_input(const char *s, size_t len, int32 atttypmod)
}
/*
- * Now we set maxlen to the necessary byte length, not
- * the number of CHARACTERS!
+ * Now we set maxlen to the necessary byte length, not the number
+ * of CHARACTERS!
*/
maxlen = len = mbmaxlen;
}
else
{
/*
- * Now we set maxlen to the necessary byte length, not
- * the number of CHARACTERS!
+ * Now we set maxlen to the necessary byte length, not the number
+ * of CHARACTERS!
*/
maxlen = len + (maxlen - charlen);
}
@@ -141,6 +141,7 @@ Datum
bpcharin(PG_FUNCTION_ARGS)
{
char *s = PG_GETARG_CSTRING(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -178,6 +179,7 @@ Datum
bpcharrecv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -226,8 +228,8 @@ bpchar(PG_FUNCTION_ARGS)
char *r;
char *s;
int i;
- int charlen; /* number of characters in the input string
- * + VARHDRSZ */
+ int charlen; /* number of characters in the input string +
+ * VARHDRSZ */
/* No work if typmod is invalid */
if (maxlen < (int32) VARHDRSZ)
@@ -254,24 +256,24 @@ bpchar(PG_FUNCTION_ARGS)
for (i = maxmblen - VARHDRSZ; i < len - VARHDRSZ; i++)
if (*(VARDATA(source) + i) != ' ')
ereport(ERROR,
- (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
- errmsg("value too long for type character(%d)",
- maxlen - VARHDRSZ)));
+ (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
+ errmsg("value too long for type character(%d)",
+ maxlen - VARHDRSZ)));
}
len = maxmblen;
/*
- * XXX: at this point, maxlen is the necessary byte
- * length+VARHDRSZ, not the number of CHARACTERS!
+ * XXX: at this point, maxlen is the necessary byte length+VARHDRSZ,
+ * not the number of CHARACTERS!
*/
maxlen = len;
}
else
{
/*
- * XXX: at this point, maxlen is the necessary byte
- * length+VARHDRSZ, not the number of CHARACTERS!
+ * XXX: at this point, maxlen is the necessary byte length+VARHDRSZ,
+ * not the number of CHARACTERS!
*/
maxlen = len + (maxlen - charlen);
}
@@ -407,8 +409,8 @@ varchar_input(const char *s, size_t len, int32 atttypmod)
if (s[j] != ' ')
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
- errmsg("value too long for type character varying(%d)",
- (int) maxlen)));
+ errmsg("value too long for type character varying(%d)",
+ (int) maxlen)));
}
len = mbmaxlen;
@@ -429,6 +431,7 @@ Datum
varcharin(PG_FUNCTION_ARGS)
{
char *s = PG_GETARG_CSTRING(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -466,11 +469,12 @@ Datum
varcharrecv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
int32 atttypmod = PG_GETARG_INT32(2);
- VarChar *result;
+ VarChar *result;
char *str;
int nbytes;
@@ -531,8 +535,8 @@ varchar(PG_FUNCTION_ARGS)
if (*(VARDATA(source) + i) != ' ')
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
- errmsg("value too long for type character varying(%d)",
- maxlen - VARHDRSZ)));
+ errmsg("value too long for type character varying(%d)",
+ maxlen - VARHDRSZ)));
}
len = maxmblen + VARHDRSZ;
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index dcd2b7ff42c..07ba4dc6848 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.135 2005/09/24 17:53:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.136 2005/10/15 02:49:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -147,8 +147,7 @@ byteain(PG_FUNCTION_ARGS)
else
{
/*
- * We should never get here. The first pass should not allow
- * it.
+ * We should never get here. The first pass should not allow it.
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
@@ -550,8 +549,8 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
{
S1 = Max(S, 1);
- if (length_not_specified) /* special case - get length to
- * end of string */
+ if (length_not_specified) /* special case - get length to end of
+ * string */
L1 = -1;
else
{
@@ -559,18 +558,18 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
int E = S + length;
/*
- * A negative value for L is the only way for the end position
- * to be before the start. SQL99 says to throw an error.
+ * A negative value for L is the only way for the end position to
+ * be before the start. SQL99 says to throw an error.
*/
if (E < S)
ereport(ERROR,
(errcode(ERRCODE_SUBSTRING_ERROR),
- errmsg("negative substring length not allowed")));
+ errmsg("negative substring length not allowed")));
/*
- * A zero or negative value for the end position can happen if
- * the start was negative or one. SQL99 says to return a
- * zero-length string.
+ * A zero or negative value for the end position can happen if the
+ * start was negative or one. SQL99 says to return a zero-length
+ * string.
*/
if (E < 1)
return PG_STR_GET_TEXT("");
@@ -579,9 +578,9 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
}
/*
- * If the start position is past the end of the string, SQL99 says
- * to return a zero-length string -- PG_GETARG_TEXT_P_SLICE() will
- * do that for us. Convert to zero-based starting position
+ * If the start position is past the end of the string, SQL99 says to
+ * return a zero-length string -- PG_GETARG_TEXT_P_SLICE() will do
+ * that for us. Convert to zero-based starting position
*/
return DatumGetTextPSlice(str, S1 - 1, L1);
}
@@ -589,8 +588,8 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
{
/*
* When encoding max length is > 1, we can't get LC without
- * detoasting, so we'll grab a conservatively large slice now and
- * go back later to do the right thing
+ * detoasting, so we'll grab a conservatively large slice now and go
+ * back later to do the right thing
*/
int32 slice_start;
int32 slice_size;
@@ -603,38 +602,38 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
text *ret;
/*
- * if S is past the end of the string, the tuple toaster will
- * return a zero-length string to us
+ * if S is past the end of the string, the tuple toaster will return a
+ * zero-length string to us
*/
S1 = Max(S, 1);
/*
- * We need to start at position zero because there is no way to
- * know in advance which byte offset corresponds to the supplied
- * start position.
+ * We need to start at position zero because there is no way to know
+ * in advance which byte offset corresponds to the supplied start
+ * position.
*/
slice_start = 0;
- if (length_not_specified) /* special case - get length to
- * end of string */
+ if (length_not_specified) /* special case - get length to end of
+ * string */
slice_size = L1 = -1;
else
{
int E = S + length;
/*
- * A negative value for L is the only way for the end position
- * to be before the start. SQL99 says to throw an error.
+ * A negative value for L is the only way for the end position to
+ * be before the start. SQL99 says to throw an error.
*/
if (E < S)
ereport(ERROR,
(errcode(ERRCODE_SUBSTRING_ERROR),
- errmsg("negative substring length not allowed")));
+ errmsg("negative substring length not allowed")));
/*
- * A zero or negative value for the end position can happen if
- * the start was negative or one. SQL99 says to return a
- * zero-length string.
+ * A zero or negative value for the end position can happen if the
+ * start was negative or one. SQL99 says to return a zero-length
+ * string.
*/
if (E < 1)
return PG_STR_GET_TEXT("");
@@ -646,9 +645,8 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
L1 = E - S1;
/*
- * Total slice size in bytes can't be any longer than the
- * start position plus substring length times the encoding max
- * length.
+ * Total slice size in bytes can't be any longer than the start
+ * position plus substring length times the encoding max length.
*/
slice_size = (S1 + L1) * eml;
}
@@ -662,16 +660,15 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
slice_strlen = pg_mbstrlen_with_len(VARDATA(slice), VARSIZE(slice) - VARHDRSZ);
/*
- * Check that the start position wasn't > slice_strlen. If so,
- * SQL99 says to return a zero-length string.
+ * Check that the start position wasn't > slice_strlen. If so, SQL99
+ * says to return a zero-length string.
*/
if (S1 > slice_strlen)
return PG_STR_GET_TEXT("");
/*
- * Adjust L1 and E1 now that we know the slice string length.
- * Again remember that S1 is one based, and slice_start is zero
- * based.
+ * Adjust L1 and E1 now that we know the slice string length. Again
+ * remember that S1 is one based, and slice_start is zero based.
*/
if (L1 > -1)
E1 = Min(S1 + L1, slice_start + 1 + slice_strlen);
@@ -679,8 +676,7 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
E1 = slice_start + 1 + slice_strlen;
/*
- * Find the start position in the slice; remember S1 is not zero
- * based
+ * Find the start position in the slice; remember S1 is not zero based
*/
p = VARDATA(slice);
for (i = 0; i < S1 - 1; i++)
@@ -834,11 +830,10 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
int result;
/*
- * Unfortunately, there is no strncoll(), so in the non-C locale case
- * we have to do some memory copying. This turns out to be
- * significantly slower, so we optimize the case where LC_COLLATE is
- * C. We also try to optimize relatively-short strings by avoiding
- * palloc/pfree overhead.
+ * Unfortunately, there is no strncoll(), so in the non-C locale case we
+ * have to do some memory copying. This turns out to be significantly
+ * slower, so we optimize the case where LC_COLLATE is C. We also try to
+ * optimize relatively-short strings by avoiding palloc/pfree overhead.
*/
if (lc_collate_is_c())
{
@@ -859,11 +854,11 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
/* Win32 does not have UTF-8, so we need to map to UTF-16 */
if (GetDatabaseEncoding() == PG_UTF8)
{
- int a1len;
- int a2len;
- int r;
+ int a1len;
+ int a2len;
+ int r;
- if (len1 >= STACKBUFLEN/2)
+ if (len1 >= STACKBUFLEN / 2)
{
a1len = len1 * 2 + 2;
a1p = palloc(a1len);
@@ -873,7 +868,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
a1len = STACKBUFLEN;
a1p = a1buf;
}
- if (len2 >= STACKBUFLEN/2)
+ if (len2 >= STACKBUFLEN / 2)
{
a2len = len2 * 2 + 2;
a2p = palloc(a2len);
@@ -890,7 +885,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
else
{
r = MultiByteToWideChar(CP_UTF8, 0, arg1, len1,
- (LPWSTR) a1p, a1len/2);
+ (LPWSTR) a1p, a1len / 2);
if (!r)
ereport(ERROR,
(errmsg("could not convert string to UTF16: %lu",
@@ -903,7 +898,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
else
{
r = MultiByteToWideChar(CP_UTF8, 0, arg2, len2,
- (LPWSTR) a2p, a2len/2);
+ (LPWSTR) a2p, a2len / 2);
if (!r)
ereport(ERROR,
(errmsg("could not convert string to UTF16: %lu",
@@ -913,7 +908,8 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
errno = 0;
result = wcscoll((LPWSTR) a1p, (LPWSTR) a2p);
- if (result == 2147483647) /* _NLSCMPERROR; missing from mingw headers */
+ if (result == 2147483647) /* _NLSCMPERROR; missing from mingw
+ * headers */
ereport(ERROR,
(errmsg("could not compare unicode strings: %d",
errno)));
@@ -925,7 +921,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
return result;
}
-#endif /* WIN32 */
+#endif /* WIN32 */
if (len1 >= STACKBUFLEN)
a1p = (char *) palloc(len1 + 1);
@@ -1349,9 +1345,8 @@ bytea_substr(PG_FUNCTION_ARGS)
if (fcinfo->nargs == 2)
{
/*
- * Not passed a length - PG_GETARG_BYTEA_P_SLICE() grabs
- * everything to the end of the string if we pass it a negative
- * value for length.
+ * Not passed a length - PG_GETARG_BYTEA_P_SLICE() grabs everything to
+ * the end of the string if we pass it a negative value for length.
*/
L1 = -1;
}
@@ -1361,8 +1356,8 @@ bytea_substr(PG_FUNCTION_ARGS)
int E = S + PG_GETARG_INT32(2);
/*
- * A negative value for L is the only way for the end position to
- * be before the start. SQL99 says to throw an error.
+ * A negative value for L is the only way for the end position to be
+ * before the start. SQL99 says to throw an error.
*/
if (E < S)
ereport(ERROR,
@@ -1382,8 +1377,8 @@ bytea_substr(PG_FUNCTION_ARGS)
/*
* If the start position is past the end of the string, SQL99 says to
- * return a zero-length string -- PG_GETARG_TEXT_P_SLICE() will do
- * that for us. Convert to zero-based starting position
+ * return a zero-length string -- PG_GETARG_TEXT_P_SLICE() will do that
+ * for us. Convert to zero-based starting position
*/
PG_RETURN_BYTEA_P(PG_GETARG_BYTEA_P_SLICE(0, S1 - 1, L1));
}
@@ -1686,7 +1681,7 @@ textToQualifiedNameList(text *textval)
/* Convert to C string (handles possible detoasting). */
/* Note we rely on being able to modify rawname below. */
rawname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(textval)));
+ PointerGetDatum(textval)));
if (!SplitIdentifierString(rawname, '.', &namelist))
ereport(ERROR,
@@ -1788,14 +1783,13 @@ SplitIdentifierString(char *rawstring, char separator,
return false; /* empty unquoted name not allowed */
/*
- * Downcase the identifier, using same code as main lexer
- * does.
+ * Downcase the identifier, using same code as main lexer does.
*
* XXX because we want to overwrite the input in-place, we cannot
- * support a downcasing transformation that increases the
- * string length. This is not a problem given the current
- * implementation of downcase_truncate_identifier, but we'll
- * probably have to do something about this someday.
+ * support a downcasing transformation that increases the string
+ * length. This is not a problem given the current implementation
+ * of downcase_truncate_identifier, but we'll probably have to do
+ * something about this someday.
*/
len = endp - curname;
downname = downcase_truncate_identifier(curname, len, false);
@@ -2083,12 +2077,14 @@ check_replace_text_has_escape_char(const text *replace_text)
if (pg_database_encoding_max_length() == 1)
{
for (; p < p_end; p++)
- if (*p == '\\') return true;
+ if (*p == '\\')
+ return true;
}
else
{
for (; p < p_end; p += pg_mblen(p))
- if (*p == '\\') return true;
+ if (*p == '\\')
+ return true;
}
return false;
@@ -2100,7 +2096,7 @@ check_replace_text_has_escape_char(const text *replace_text)
*/
static void
appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
- regmatch_t *pmatch, text *src_text)
+ regmatch_t *pmatch, text *src_text)
{
const char *p = VARDATA(replace_text);
const char *p_end = p + (VARSIZE(replace_text) - VARHDRSZ);
@@ -2129,19 +2125,20 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
}
/*
- * Copy the text when there is a text in the left of escape char
- * or escape char is not found.
+ * Copy the text when there is a text in the left of escape char or
+ * escape char is not found.
*/
if (ch_cnt)
{
- text *append_text = text_substring(PointerGetDatum(replace_text),
- substr_start, ch_cnt, false);
+ text *append_text = text_substring(PointerGetDatum(replace_text),
+ substr_start, ch_cnt, false);
+
appendStringInfoText(str, append_text);
pfree(append_text);
}
substr_start += ch_cnt + 1;
- if (p >= p_end) /* When escape char is not found. */
+ if (p >= p_end) /* When escape char is not found. */
break;
/* See the next character of escape char. */
@@ -2151,7 +2148,8 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
if (*p >= '1' && *p <= '9')
{
/* Use the back reference of regexp. */
- int idx = *p - '0';
+ int idx = *p - '0';
+
so = pmatch[idx].rm_so;
eo = pmatch[idx].rm_eo;
p++;
@@ -2169,8 +2167,9 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
if (so != -1 && eo != -1)
{
/* Copy the text that is back reference of regexp. */
- text *append_text = text_substring(PointerGetDatum(src_text),
- so + 1, (eo - so), false);
+ text *append_text = text_substring(PointerGetDatum(src_text),
+ so + 1, (eo - so), false);
+
appendStringInfoText(str, append_text);
pfree(append_text);
}
@@ -2189,9 +2188,9 @@ replace_text_regexp(PG_FUNCTION_ARGS)
text *ret_text;
text *src_text = PG_GETARG_TEXT_P(0);
int src_text_len = VARSIZE(src_text) - VARHDRSZ;
- regex_t *re = (regex_t *)PG_GETARG_POINTER(1);
+ regex_t *re = (regex_t *) PG_GETARG_POINTER(1);
text *replace_text = PG_GETARG_TEXT_P(2);
- bool global = PG_GETARG_BOOL(3);
+ bool global = PG_GETARG_BOOL(3);
StringInfo str = makeStringInfo();
int regexec_result;
regmatch_t pmatch[REGEXP_REPLACE_BACKREF_CNT];
@@ -2214,33 +2213,34 @@ replace_text_regexp(PG_FUNCTION_ARGS)
data,
data_len,
search_start,
- NULL, /* no details */
+ NULL, /* no details */
REGEXP_REPLACE_BACKREF_CNT,
pmatch,
0);
if (regexec_result != REG_OKAY && regexec_result != REG_NOMATCH)
{
- char errMsg[100];
+ char errMsg[100];
/* re failed??? */
pg_regerror(regexec_result, re, errMsg, sizeof(errMsg));
ereport(ERROR,
- (errcode(ERRCODE_INVALID_REGULAR_EXPRESSION),
- errmsg("regular expression failed: %s", errMsg)));
+ (errcode(ERRCODE_INVALID_REGULAR_EXPRESSION),
+ errmsg("regular expression failed: %s", errMsg)));
}
if (regexec_result == REG_NOMATCH)
break;
- /*
- * Copy the text when there is a text in the left of matched position.
- */
+ /*
+ * Copy the text when there is a text in the left of matched position.
+ */
if (pmatch[0].rm_so - data_pos > 0)
{
- text *left_text = text_substring(PointerGetDatum(src_text),
- data_pos + 1,
- pmatch[0].rm_so - data_pos, false);
+ text *left_text = text_substring(PointerGetDatum(src_text),
+ data_pos + 1,
+ pmatch[0].rm_so - data_pos, false);
+
appendStringInfoText(str, left_text);
pfree(left_text);
}
@@ -2270,13 +2270,14 @@ replace_text_regexp(PG_FUNCTION_ARGS)
}
/*
- * Copy the text when there is a text at the right of last matched
- * or regexp is not matched.
+ * Copy the text when there is a text at the right of last matched or
+ * regexp is not matched.
*/
if (data_pos < data_len)
{
- text *right_text = text_substring(PointerGetDatum(src_text),
- data_pos + 1, -1, true);
+ text *right_text = text_substring(PointerGetDatum(src_text),
+ data_pos + 1, -1, true);
+
appendStringInfoText(str, right_text);
pfree(right_text);
}
@@ -2392,7 +2393,7 @@ text_to_array(PG_FUNCTION_ARGS)
*/
if (fldsep_len < 1)
PG_RETURN_ARRAYTYPE_P(create_singleton_array(fcinfo, TEXTOID,
- CStringGetDatum(inputstring), 1));
+ CStringGetDatum(inputstring), 1));
/* start with end position holding the initial start position */
end_posn = 0;
@@ -2409,17 +2410,17 @@ text_to_array(PG_FUNCTION_ARGS)
if (fldnum == 1)
{
/*
- * first element return one element, 1D, array using the
- * input string
+ * first element return one element, 1D, array using the input
+ * string
*/
PG_RETURN_ARRAYTYPE_P(create_singleton_array(fcinfo, TEXTOID,
- CStringGetDatum(inputstring), 1));
+ CStringGetDatum(inputstring), 1));
}
else
{
/* otherwise create array and exit */
PG_RETURN_ARRAYTYPE_P(makeArrayResult(astate,
- CurrentMemoryContext));
+ CurrentMemoryContext));
}
}
else if (start_posn == 0)
@@ -2439,7 +2440,7 @@ text_to_array(PG_FUNCTION_ARGS)
/* interior field requested */
result_text = text_substring(PointerGetDatum(inputstring),
start_posn + fldsep_len,
- end_posn - start_posn - fldsep_len,
+ end_posn - start_posn - fldsep_len,
false);
}
@@ -2489,14 +2490,14 @@ array_to_text(PG_FUNCTION_ARGS)
/*
* We arrange to look up info about element type, including its output
- * conversion proc, only once per series of calls, assuming the
- * element type doesn't change underneath us.
+ * conversion proc, only once per series of calls, assuming the element
+ * type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
@@ -2504,8 +2505,7 @@ array_to_text(PG_FUNCTION_ARGS)
if (my_extra->element_type != element_type)
{
/*
- * Get info about element type, including its output conversion
- * proc
+ * Get info about element type, including its output conversion proc
*/
get_type_io_data(element_type, IOFunc_output,
&my_extra->typlen, &my_extra->typbyval,
@@ -2606,7 +2606,7 @@ md5_text(PG_FUNCTION_ARGS)
{
text *in_text = PG_GETARG_TEXT_P(0);
size_t len;
- char hexsum[MD5_HASH_LEN + 1];
+ char hexsum[MD5_HASH_LEN + 1];
text *result_text;
/* Calculate the length of the buffer using varlena metadata */
@@ -2661,7 +2661,7 @@ pg_column_size(PG_FUNCTION_ARGS)
if (fcinfo->flinfo->fn_extra == NULL)
{
/* Lookup the datatype of the supplied argument */
- Oid argtypeid = get_fn_expr_argtype(fcinfo->flinfo, 0);
+ Oid argtypeid = get_fn_expr_argtype(fcinfo->flinfo, 0);
typlen = get_typlen(argtypeid);
if (typlen == 0) /* should not happen */
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 2ffcee77695..918ab7c081a 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.124 2005/09/24 22:54:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.125 2005/10/15 02:49:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,7 +35,7 @@
#include "utils/syscache.h"
-/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
+ /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
/*
* Constants related to size of the catcache.
@@ -187,22 +187,22 @@ CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
case 4:
hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
- cur_skey[3].sk_argument)) << 9;
+ cur_skey[3].sk_argument)) << 9;
/* FALLTHROUGH */
case 3:
hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
- cur_skey[2].sk_argument)) << 6;
+ cur_skey[2].sk_argument)) << 6;
/* FALLTHROUGH */
case 2:
hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
- cur_skey[1].sk_argument)) << 3;
+ cur_skey[1].sk_argument)) << 3;
/* FALLTHROUGH */
case 1:
hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
- cur_skey[0].sk_argument));
+ cur_skey[0].sk_argument));
break;
default:
elog(FATAL, "wrong number of hash keys: %d", nkeys);
@@ -448,8 +448,8 @@ CatalogCacheIdInvalidate(int cacheId,
/*
* We don't bother to check whether the cache has finished
- * initialization yet; if not, there will be no entries in it so
- * no problem.
+ * initialization yet; if not, there will be no entries in it so no
+ * problem.
*/
/*
@@ -522,15 +522,15 @@ void
CreateCacheMemoryContext(void)
{
/*
- * Purely for paranoia, check that context doesn't exist; caller
- * probably did so already.
+ * Purely for paranoia, check that context doesn't exist; caller probably
+ * did so already.
*/
if (!CacheMemoryContext)
CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
"CacheMemoryContext",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
}
@@ -768,7 +768,6 @@ do { \
cp->cc_reloid, cp->cc_indexoid, cp->id, \
cp->cc_nkeys, cp->cc_nbuckets); \
} while(0)
-
#else
#define InitCatCache_DEBUG2
#endif
@@ -786,8 +785,8 @@ InitCatCache(int id,
int i;
/*
- * first switch to the cache context so our allocations do not vanish
- * at the end of a transaction
+ * first switch to the cache context so our allocations do not vanish at
+ * the end of a transaction
*/
if (!CacheMemoryContext)
CreateCacheMemoryContext();
@@ -878,7 +877,6 @@ do { \
i+1, cache->cc_nkeys, cache->cc_key[i]); \
} \
} while(0)
-
#else
#define CatalogCacheInitializeCache_DEBUG1
#define CatalogCacheInitializeCache_DEBUG2
@@ -895,15 +893,15 @@ CatalogCacheInitializeCache(CatCache *cache)
CatalogCacheInitializeCache_DEBUG1;
/*
- * Open the relation without locking --- we only need the tupdesc,
- * which we assume will never change ...
+ * Open the relation without locking --- we only need the tupdesc, which
+ * we assume will never change ...
*/
relation = heap_open(cache->cc_reloid, NoLock);
Assert(RelationIsValid(relation));
/*
- * switch to the cache context so our allocations do not vanish at the
- * end of a transaction
+ * switch to the cache context so our allocations do not vanish at the end
+ * of a transaction
*/
Assert(CacheMemoryContext != NULL);
@@ -915,8 +913,8 @@ CatalogCacheInitializeCache(CatCache *cache)
tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
/*
- * save the relation's name and relisshared flag, too (cc_relname
- * is used only for debugging purposes)
+ * save the relation's name and relisshared flag, too (cc_relname is used
+ * only for debugging purposes)
*/
cache->cc_relname = pstrdup(RelationGetRelationName(relation));
cache->cc_relisshared = RelationGetForm(relation)->relisshared;
@@ -957,8 +955,8 @@ CatalogCacheInitializeCache(CatCache *cache)
cache->cc_isname[i] = (keytype == NAMEOID);
/*
- * Do equality-function lookup (we assume this won't need a
- * catalog lookup for any supported type)
+ * Do equality-function lookup (we assume this won't need a catalog
+ * lookup for any supported type)
*/
fmgr_info_cxt(eqfunc,
&cache->cc_skey[i].sk_func,
@@ -1026,9 +1024,9 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
if (cache->id == INDEXRELID)
{
/*
- * Since the OIDs of indexes aren't hardwired, it's painful to
- * figure out which is which. Just force all pg_index searches to
- * be heap scans while building the relcaches.
+ * Since the OIDs of indexes aren't hardwired, it's painful to figure
+ * out which is which. Just force all pg_index searches to be heap
+ * scans while building the relcaches.
*/
if (!criticalRelcachesBuilt)
return false;
@@ -1037,10 +1035,10 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
cache->id == AMNAME)
{
/*
- * Always do heap scans in pg_am, because it's so small there's
- * not much point in an indexscan anyway. We *must* do this when
- * initially building critical relcache entries, but we might as
- * well just always do it.
+ * Always do heap scans in pg_am, because it's so small there's not
+ * much point in an indexscan anyway. We *must* do this when
+ * initially building critical relcache entries, but we might as well
+ * just always do it.
*/
return false;
}
@@ -1146,18 +1144,18 @@ SearchCatCache(CatCache *cache,
continue;
/*
- * we found a match in the cache: move it to the front of the
- * global LRU list. We also move it to the front of the list for
- * its hashbucket, in order to speed subsequent searches. (The
- * most frequently accessed elements in any hashbucket will tend
- * to be near the front of the hashbucket's list.)
+ * we found a match in the cache: move it to the front of the global
+ * LRU list. We also move it to the front of the list for its
+ * hashbucket, in order to speed subsequent searches. (The most
+ * frequently accessed elements in any hashbucket will tend to be near
+ * the front of the hashbucket's list.)
*/
DLMoveToFront(&ct->lrulist_elem);
DLMoveToFront(&ct->cache_elem);
/*
- * If it's a positive entry, bump its refcount and return it. If
- * it's negative, we can report failure to the caller.
+ * If it's a positive entry, bump its refcount and return it. If it's
+ * negative, we can report failure to the caller.
*/
if (!ct->negative)
{
@@ -1188,19 +1186,19 @@ SearchCatCache(CatCache *cache,
}
/*
- * Tuple was not found in cache, so we have to try to retrieve it
- * directly from the relation. If found, we will add it to the cache;
- * if not found, we will add a negative cache entry instead.
+ * Tuple was not found in cache, so we have to try to retrieve it directly
+ * from the relation. If found, we will add it to the cache; if not
+ * found, we will add a negative cache entry instead.
*
- * NOTE: it is possible for recursive cache lookups to occur while
- * reading the relation --- for example, due to shared-cache-inval
- * messages being processed during heap_open(). This is OK. It's
- * even possible for one of those lookups to find and enter the very
- * same tuple we are trying to fetch here. If that happens, we will
- * enter a second copy of the tuple into the cache. The first copy
- * will never be referenced again, and will eventually age out of the
- * cache, so there's no functional problem. This case is rare enough
- * that it's not worth expending extra cycles to detect.
+ * NOTE: it is possible for recursive cache lookups to occur while reading
+ * the relation --- for example, due to shared-cache-inval messages being
+ * processed during heap_open(). This is OK. It's even possible for one
+ * of those lookups to find and enter the very same tuple we are trying to
+ * fetch here. If that happens, we will enter a second copy of the tuple
+ * into the cache. The first copy will never be referenced again, and
+ * will eventually age out of the cache, so there's no functional problem.
+ * This case is rare enough that it's not worth expending extra cycles to
+ * detect.
*/
relation = heap_open(cache->cc_reloid, AccessShareLock);
@@ -1231,13 +1229,13 @@ SearchCatCache(CatCache *cache,
/*
* If tuple was not found, we need to build a negative cache entry
- * containing a fake tuple. The fake tuple has the correct key
- * columns, but nulls everywhere else.
+ * containing a fake tuple. The fake tuple has the correct key columns,
+ * but nulls everywhere else.
*
- * In bootstrap mode, we don't build negative entries, because the
- * cache invalidation mechanism isn't alive and can't clear them
- * if the tuple gets created later. (Bootstrap doesn't do UPDATEs,
- * so it doesn't need cache inval for that.)
+ * In bootstrap mode, we don't build negative entries, because the cache
+ * invalidation mechanism isn't alive and can't clear them if the tuple
+ * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
+ * cache inval for that.)
*/
if (ct == NULL)
{
@@ -1256,8 +1254,8 @@ SearchCatCache(CatCache *cache,
cache->cc_relname, hashIndex);
/*
- * We are not returning the negative entry to the caller, so leave
- * its refcount zero.
+ * We are not returning the negative entry to the caller, so leave its
+ * refcount zero.
*/
return NULL;
@@ -1331,7 +1329,7 @@ SearchCatCacheList(CatCache *cache,
Dlelem *elt;
CatCList *cl;
CatCTup *ct;
- List * volatile ctlist;
+ List *volatile ctlist;
ListCell *ctlist_item;
int nmembers;
bool ordered;
@@ -1362,8 +1360,8 @@ SearchCatCacheList(CatCache *cache,
/*
* compute a hash value of the given keys for faster search. We don't
- * presently divide the CatCList items into buckets, but this still
- * lets us skip non-matching items quickly most of the time.
+ * presently divide the CatCList items into buckets, but this still lets
+ * us skip non-matching items quickly most of the time.
*/
lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
@@ -1399,11 +1397,11 @@ SearchCatCacheList(CatCache *cache,
/*
* We found a matching list: mark it as touched since the last
- * CatalogCacheCleanup() sweep. Also move the list to the front
- * of the cache's list-of-lists, to speed subsequent searches.
- * (We do not move the members to the fronts of their hashbucket
- * lists, however, since there's no point in that unless they are
- * searched for individually.)
+ * CatalogCacheCleanup() sweep. Also move the list to the front of
+ * the cache's list-of-lists, to speed subsequent searches. (We do not
+ * move the members to the fronts of their hashbucket lists, however,
+ * since there's no point in that unless they are searched for
+ * individually.)
*/
cl->touched = true;
DLMoveToFront(&cl->cache_elem);
@@ -1428,10 +1426,10 @@ SearchCatCacheList(CatCache *cache,
* relation. For each matching tuple found in the relation, use an
* existing cache entry if possible, else build a new one.
*
- * We have to bump the member refcounts temporarily to ensure they
- * won't get dropped from the cache while loading other members.
- * We use a PG_TRY block to ensure we can undo those refcounts if
- * we get an error before we finish constructing the CatCList.
+ * We have to bump the member refcounts temporarily to ensure they won't get
+ * dropped from the cache while loading other members. We use a PG_TRY
+ * block to ensure we can undo those refcounts if we get an error before
+ * we finish constructing the CatCList.
*/
ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
@@ -1473,13 +1471,13 @@ SearchCatCacheList(CatCache *cache,
ct = (CatCTup *) DLE_VAL(elt);
if (ct->dead || ct->negative)
- continue; /* ignore dead and negative entries */
+ continue; /* ignore dead and negative entries */
if (ct->hash_value != hashValue)
- continue; /* quickly skip entry if wrong hash val */
+ continue; /* quickly skip entry if wrong hash val */
if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
- continue; /* not same tuple */
+ continue; /* not same tuple */
/*
* Found a match, but can't use it if it belongs to another
@@ -1526,9 +1524,9 @@ SearchCatCacheList(CatCache *cache,
heap_freetuple(ntp);
/*
- * We are now past the last thing that could trigger an elog before
- * we have finished building the CatCList and remembering it in the
- * resource owner. So it's OK to fall out of the PG_TRY, and indeed
+ * We are now past the last thing that could trigger an elog before we
+ * have finished building the CatCList and remembering it in the
+ * resource owner. So it's OK to fall out of the PG_TRY, and indeed
* we'd better do so before we start marking the members as belonging
* to the list.
*/
@@ -1629,8 +1627,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
MemoryContext oldcxt;
/*
- * Allocate CatCTup header in cache memory, and copy the tuple there
- * too.
+ * Allocate CatCTup header in cache memory, and copy the tuple there too.
*/
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
ct = (CatCTup *) palloc(sizeof(CatCTup));
@@ -1658,9 +1655,9 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
CacheHdr->ch_ntup++;
/*
- * If we've exceeded the desired size of the caches, try to throw away
- * the least recently used entry(s). NB: be careful not to throw away
- * the newly-built entry...
+ * If we've exceeded the desired size of the caches, try to throw away the
+ * least recently used entry(s). NB: be careful not to throw away the
+ * newly-built entry...
*/
if (CacheHdr->ch_ntup > CacheHdr->ch_maxtup)
CatalogCacheCleanup(ct);
@@ -1684,22 +1681,22 @@ CatalogCacheCleanup(CatCTup *savect)
*prevelt;
/*
- * Each time we have to do this, try to cut the cache size down to
- * about 90% of the maximum.
+ * Each time we have to do this, try to cut the cache size down to about
+ * 90% of the maximum.
*/
tup_target = (CacheHdr->ch_maxtup * 9) / 10;
/*
- * Our strategy for managing CatCLists is that, each time we have to
- * throw away some cache entries, we first move-to-front all the members
- * of CatCLists that have been touched since the last cleanup sweep.
- * Then we do strict LRU elimination by individual tuples, zapping a list
- * if any of its members gets zapped. Before PostgreSQL 8.1, we moved
- * members to front each time their owning list was touched, which was
- * arguably more fair in balancing list members against standalone tuples
- * --- but the overhead for large lists was horrendous. This scheme is
- * more heavily biased towards preserving lists, but that is not
- * necessarily bad either.
+ * Our strategy for managing CatCLists is that, each time we have to throw
+ * away some cache entries, we first move-to-front all the members of
+ * CatCLists that have been touched since the last cleanup sweep. Then we
+ * do strict LRU elimination by individual tuples, zapping a list if any
+ * of its members gets zapped. Before PostgreSQL 8.1, we moved members to
+ * front each time their owning list was touched, which was arguably more
+ * fair in balancing list members against standalone tuples --- but the
+ * overhead for large lists was horrendous. This scheme is more heavily
+ * biased towards preserving lists, but that is not necessarily bad
+ * either.
*/
for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
{
@@ -1710,7 +1707,7 @@ CatalogCacheCleanup(CatCTup *savect)
Assert(cl->cl_magic == CL_MAGIC);
if (cl->touched && !cl->dead)
{
- int i;
+ int i;
for (i = 0; i < cl->n_members; i++)
DLMoveToFront(&cl->members[i]->lrulist_elem);
@@ -1775,9 +1772,9 @@ build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
if (attindex > 0)
{
/*
- * Here we must be careful in case the caller passed a C
- * string where a NAME is wanted: convert the given argument
- * to a correctly padded NAME. Otherwise the memcpy() done in
+ * Here we must be careful in case the caller passed a C string
+ * where a NAME is wanted: convert the given argument to a
+ * correctly padded NAME. Otherwise the memcpy() done in
* heap_formtuple could fall off the end of memory.
*/
if (cache->cc_isname[i])
@@ -1840,7 +1837,7 @@ build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
void
PrepareToInvalidateCacheTuple(Relation relation,
HeapTuple tuple,
- void (*function) (int, uint32, ItemPointer, Oid))
+ void (*function) (int, uint32, ItemPointer, Oid))
{
CatCache *ccp;
Oid reloid;
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index da0ffad16b2..59250feac1a 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -53,10 +53,10 @@
*
* Also, whenever we see an operation on a pg_class or pg_attribute tuple,
* we register a relcache flush operation for the relation described by that
- * tuple. pg_class updates trigger an smgr flush operation as well.
+ * tuple. pg_class updates trigger an smgr flush operation as well.
*
* We keep the relcache and smgr flush requests in lists separate from the
- * catcache tuple flush requests. This allows us to issue all the pending
+ * catcache tuple flush requests. This allows us to issue all the pending
* catcache flushes before we issue relcache flushes, which saves us from
* loading a catcache tuple during relcache load only to flush it again
* right away. Also, we avoid queuing multiple relcache flush requests for
@@ -80,7 +80,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.72 2005/06/17 22:32:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.73 2005/10/15 02:49:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -142,7 +142,7 @@ typedef struct TransInvalidationInfo
struct TransInvalidationInfo *parent;
/* Subtransaction nesting depth */
- int my_level;
+ int my_level;
/* head of current-command event list */
InvalidationListHeader CurrentCmdInvalidMsgs;
@@ -173,9 +173,9 @@ static struct CACHECALLBACK
static int cache_callback_count = 0;
/* info values for 2PC callback */
-#define TWOPHASE_INFO_MSG 0 /* SharedInvalidationMessage */
-#define TWOPHASE_INFO_FILE_BEFORE 1 /* relcache file inval */
-#define TWOPHASE_INFO_FILE_AFTER 2 /* relcache file inval */
+#define TWOPHASE_INFO_MSG 0 /* SharedInvalidationMessage */
+#define TWOPHASE_INFO_FILE_BEFORE 1 /* relcache file inval */
+#define TWOPHASE_INFO_FILE_AFTER 2 /* relcache file inval */
static void PersistInvalidationMessage(SharedInvalidationMessage *msg);
@@ -208,7 +208,7 @@ AddInvalidationMessage(InvalidationChunk **listHdr,
chunk = (InvalidationChunk *)
MemoryContextAlloc(CurTransactionContext,
sizeof(InvalidationChunk) +
- (FIRSTCHUNKSIZE - 1) *sizeof(SharedInvalidationMessage));
+ (FIRSTCHUNKSIZE - 1) *sizeof(SharedInvalidationMessage));
chunk->nitems = 0;
chunk->maxitems = FIRSTCHUNKSIZE;
chunk->next = *listHdr;
@@ -222,7 +222,7 @@ AddInvalidationMessage(InvalidationChunk **listHdr,
chunk = (InvalidationChunk *)
MemoryContextAlloc(CurTransactionContext,
sizeof(InvalidationChunk) +
- (chunksize - 1) *sizeof(SharedInvalidationMessage));
+ (chunksize - 1) *sizeof(SharedInvalidationMessage));
chunk->nitems = 0;
chunk->maxitems = chunksize;
chunk->next = *listHdr;
@@ -316,7 +316,7 @@ AddRelcacheInvalidationMessage(InvalidationListHeader *hdr,
ProcessMessageList(hdr->rclist,
if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
msg->rc.relId == relId)
- return);
+ return);
/* OK, add the item */
msg.rc.id = SHAREDINVALRELCACHE_ID;
@@ -338,7 +338,7 @@ AddSmgrInvalidationMessage(InvalidationListHeader *hdr,
ProcessMessageList(hdr->rclist,
if (msg->sm.id == SHAREDINVALSMGR_ID &&
RelFileNodeEquals(msg->sm.rnode, rnode))
- return);
+ return);
/* OK, add the item */
msg.sm.id = SHAREDINVALSMGR_ID;
@@ -470,8 +470,8 @@ LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
else if (msg->id == SHAREDINVALSMGR_ID)
{
/*
- * We could have smgr entries for relations of other databases,
- * so no short-circuit test is possible here.
+ * We could have smgr entries for relations of other databases, so no
+ * short-circuit test is possible here.
*/
smgrclosenode(msg->sm.rnode);
}
@@ -523,17 +523,16 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
return;
/*
- * We only need to worry about invalidation for tuples that are in
- * system relations; user-relation tuples are never in catcaches and
- * can't affect the relcache either.
+ * We only need to worry about invalidation for tuples that are in system
+ * relations; user-relation tuples are never in catcaches and can't affect
+ * the relcache either.
*/
if (!IsSystemRelation(relation))
return;
/*
- * TOAST tuples can likewise be ignored here. Note that TOAST tables
- * are considered system relations so they are not filtered by the
- * above test.
+ * TOAST tuples can likewise be ignored here. Note that TOAST tables are
+ * considered system relations so they are not filtered by the above test.
*/
if (IsToastRelation(relation))
return;
@@ -561,16 +560,15 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
databaseId = MyDatabaseId;
/*
- * We need to send out an smgr inval as well as a relcache inval.
- * This is needed because other backends might possibly possess
- * smgr cache but not relcache entries for the target relation.
+ * We need to send out an smgr inval as well as a relcache inval. This
+ * is needed because other backends might possibly possess smgr cache
+ * but not relcache entries for the target relation.
*
- * Note: during a pg_class row update that assigns a new
- * relfilenode or reltablespace value, we will be called on both
- * the old and new tuples, and thus will broadcast invalidation
- * messages showing both the old and new RelFileNode values. This
- * ensures that other backends will close smgr references to the
- * old file.
+ * Note: during a pg_class row update that assigns a new relfilenode or
+ * reltablespace value, we will be called on both the old and new
+ * tuples, and thus will broadcast invalidation messages showing both
+ * the old and new RelFileNode values. This ensures that other
+ * backends will close smgr references to the old file.
*
* XXX possible future cleanup: it might be better to trigger smgr
* flushes explicitly, rather than indirectly from pg_class updates.
@@ -590,13 +588,12 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
relationId = atttup->attrelid;
/*
- * KLUGE ALERT: we always send the relcache event with
- * MyDatabaseId, even if the rel in question is shared (which we
- * can't easily tell). This essentially means that only backends
- * in this same database will react to the relcache flush request.
- * This is in fact appropriate, since only those backends could
- * see our pg_attribute change anyway. It looks a bit ugly
- * though.
+ * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
+ * even if the rel in question is shared (which we can't easily tell).
+ * This essentially means that only backends in this same database
+ * will react to the relcache flush request. This is in fact
+ * appropriate, since only those backends could see our pg_attribute
+ * change anyway. It looks a bit ugly though.
*/
databaseId = MyDatabaseId;
}
@@ -646,7 +643,7 @@ AtStart_Inval(void)
/*
* AtPrepare_Inval
- * Save the inval lists state at 2PC transaction prepare.
+ * Save the inval lists state at 2PC transaction prepare.
*
* In this phase we just generate 2PC records for all the pending invalidation
* work.
@@ -658,8 +655,8 @@ AtPrepare_Inval(void)
Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL);
/*
- * Relcache init file invalidation requires processing both before
- * and after we send the SI messages.
+ * Relcache init file invalidation requires processing both before and
+ * after we send the SI messages.
*/
if (transInvalInfo->RelcacheInitFileInval)
RegisterTwoPhaseRecord(TWOPHASE_RM_INVAL_ID, TWOPHASE_INFO_FILE_BEFORE,
@@ -678,7 +675,7 @@ AtPrepare_Inval(void)
/*
* PostPrepare_Inval
- * Clean up after successful PREPARE.
+ * Clean up after successful PREPARE.
*
* Here, we want to act as though the transaction aborted, so that we will
* undo any syscache changes it made, thereby bringing us into sync with the
@@ -714,7 +711,7 @@ AtSubStart_Inval(void)
/*
* PersistInvalidationMessage
- * Write an invalidation message to the 2PC state file.
+ * Write an invalidation message to the 2PC state file.
*/
static void
PersistInvalidationMessage(SharedInvalidationMessage *msg)
@@ -736,7 +733,7 @@ inval_twophase_postcommit(TransactionId xid, uint16 info,
switch (info)
{
case TWOPHASE_INFO_MSG:
- msg = (SharedInvalidationMessage *) recdata;
+ msg = (SharedInvalidationMessage *) recdata;
Assert(len == sizeof(SharedInvalidationMessage));
SendSharedInvalidMessage(msg);
break;
@@ -786,15 +783,15 @@ AtEOXact_Inval(bool isCommit)
Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL);
/*
- * Relcache init file invalidation requires processing both before
- * and after we send the SI messages. However, we need not do
- * anything unless we committed.
+ * Relcache init file invalidation requires processing both before and
+ * after we send the SI messages. However, we need not do anything
+ * unless we committed.
*/
if (transInvalInfo->RelcacheInitFileInval)
RelationCacheInitFileInvalidate(true);
AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
- &transInvalInfo->CurrentCmdInvalidMsgs);
+ &transInvalInfo->CurrentCmdInvalidMsgs);
ProcessInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
SendSharedInvalidMessage);
@@ -897,9 +894,9 @@ void
CommandEndInvalidationMessages(void)
{
/*
- * You might think this shouldn't be called outside any transaction,
- * but bootstrap does it, and also ABORT issued when not in a
- * transaction. So just quietly return if no state to work on.
+ * You might think this shouldn't be called outside any transaction, but
+ * bootstrap does it, and also ABORT issued when not in a transaction. So
+ * just quietly return if no state to work on.
*/
if (transInvalInfo == NULL)
return;
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index ebb884dc258..096a3cb942b 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.128 2005/10/11 17:27:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.129 2005/10/15 02:49:31 momjian Exp $
*
* NOTES
* Eventually, the index information should go through here, too.
@@ -149,10 +149,10 @@ get_op_hash_function(Oid opno)
Oid opclass = InvalidOid;
/*
- * Search pg_amop to see if the target operator is registered as the
- * "=" operator of any hash opclass. If the operator is registered in
- * multiple opclasses, assume we can use the associated hash function
- * from any one.
+ * Search pg_amop to see if the target operator is registered as the "="
+ * operator of any hash opclass. If the operator is registered in
+ * multiple opclasses, assume we can use the associated hash function from
+ * any one.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(opno),
@@ -1223,9 +1223,9 @@ getTypeIOParam(HeapTuple typeTuple)
Form_pg_type typeStruct = (Form_pg_type) GETSTRUCT(typeTuple);
/*
- * Array types get their typelem as parameter; everybody else gets
- * their own type OID as parameter. (This is a change from 8.0,
- * in which only composite types got their own OID as parameter.)
+ * Array types get their typelem as parameter; everybody else gets their
+ * own type OID as parameter. (This is a change from 8.0, in which only
+ * composite types got their own OID as parameter.)
*/
if (OidIsValid(typeStruct->typelem))
return typeStruct->typelem;
@@ -1414,7 +1414,7 @@ get_typdefault(Oid typid)
/* Convert C string to a value of the given type */
datum = OidFunctionCall3(type->typinput,
CStringGetDatum(strDefaultVal),
- ObjectIdGetDatum(getTypeIOParam(typeTuple)),
+ ObjectIdGetDatum(getTypeIOParam(typeTuple)),
Int32GetDatum(-1));
/* Build a Const node containing the value */
expr = (Node *) makeConst(typid,
@@ -1501,8 +1501,8 @@ get_typavgwidth(Oid typid, int32 typmod)
{
/*
* For BPCHAR, the max width is also the only width. Otherwise we
- * need to guess about the typical data width given the max. A
- * sliding scale for percentage of max width seems reasonable.
+ * need to guess about the typical data width given the max. A sliding
+ * scale for percentage of max width seems reasonable.
*/
if (typid == BPCHAROID)
return maxwidth;
@@ -1513,8 +1513,8 @@ get_typavgwidth(Oid typid, int32 typmod)
/*
* Beyond 1000, assume we're looking at something like
- * "varchar(10000)" where the limit isn't actually reached often,
- * and use a fixed estimate.
+ * "varchar(10000)" where the limit isn't actually reached often, and
+ * use a fixed estimate.
*/
return 32 + (1000 - 32) / 2;
}
@@ -1905,9 +1905,9 @@ get_attstatsslot(HeapTuple statstuple,
values, nvalues);
/*
- * If the element type is pass-by-reference, we now have a bunch
- * of Datums that are pointers into the syscache value. Copy them
- * to avoid problems if syscache decides to drop the entry.
+ * If the element type is pass-by-reference, we now have a bunch of
+ * Datums that are pointers into the syscache value. Copy them to
+ * avoid problems if syscache decides to drop the entry.
*/
if (!typeForm->typbyval)
{
@@ -1938,9 +1938,9 @@ get_attstatsslot(HeapTuple statstuple,
statarray = DatumGetArrayTypeP(val);
/*
- * We expect the array to be a 1-D float4 array; verify that. We
- * don't need to use deconstruct_array() since the array data is
- * just going to look like a C array of float4 values.
+ * We expect the array to be a 1-D float4 array; verify that. We don't
+ * need to use deconstruct_array() since the array data is just going
+ * to look like a C array of float4 values.
*/
narrayelem = ARR_DIMS(statarray)[0];
if (ARR_NDIM(statarray) != 1 || narrayelem <= 0 ||
@@ -2038,7 +2038,7 @@ get_roleid(const char *rolname)
Oid
get_roleid_checked(const char *rolname)
{
- Oid roleid;
+ Oid roleid;
roleid = get_roleid(rolname);
if (!OidIsValid(roleid))
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index d74982dcb0c..e877c1f828b 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.229 2005/09/16 04:13:18 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.230 2005/10/15 02:49:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -192,7 +192,7 @@ static bool load_relcache_init_file(void);
static void write_relcache_init_file(void);
static void formrdesc(const char *relationName, Oid relationReltype,
- bool hasoids, int natts, FormData_pg_attribute *att);
+ bool hasoids, int natts, FormData_pg_attribute *att);
static HeapTuple ScanPgRelation(Oid targetRelId, bool indexOK);
static Relation AllocateRelationDesc(Relation relation, Form_pg_class relp);
@@ -241,9 +241,9 @@ ScanPgRelation(Oid targetRelId, bool indexOK)
/*
* Open pg_class and fetch a tuple. Force heap scan if we haven't yet
- * built the critical relcache entries (this includes initdb and
- * startup without a pg_internal.init file). The caller can also
- * force a heap scan by setting indexOK == false.
+ * built the critical relcache entries (this includes initdb and startup
+ * without a pg_internal.init file). The caller can also force a heap
+ * scan by setting indexOK == false.
*/
pg_class_desc = heap_open(RelationRelationId, AccessShareLock);
pg_class_scan = systable_beginscan(pg_class_desc, ClassOidIndexId,
@@ -303,12 +303,11 @@ AllocateRelationDesc(Relation relation, Form_pg_class relp)
/*
* Copy the relation tuple form
*
- * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE.
- * relacl is NOT stored in the relcache --- there'd be little point in
- * it, since we don't copy the tuple's nullvalues bitmap and hence
- * wouldn't know if the value is valid ... bottom line is that relacl
- * *cannot* be retrieved from the relcache. Get it from the syscache
- * if you need it.
+ * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE. relacl
+ * is NOT stored in the relcache --- there'd be little point in it, since
+ * we don't copy the tuple's nullvalues bitmap and hence wouldn't know if
+ * the value is valid ... bottom line is that relacl *cannot* be retrieved
+ * from the relcache. Get it from the syscache if you need it.
*/
relationForm = (Form_pg_class) palloc(CLASS_TUPLE_SIZE);
@@ -355,8 +354,8 @@ RelationBuildTupleDesc(Relation relation)
/*
* Form a scan key that selects only user attributes (attnum > 0).
- * (Eliminating system attribute rows at the index level is lots
- * faster than fetching them.)
+ * (Eliminating system attribute rows at the index level is lots faster
+ * than fetching them.)
*/
ScanKeyInit(&skey[0],
Anum_pg_attribute_attrelid,
@@ -368,9 +367,9 @@ RelationBuildTupleDesc(Relation relation)
Int16GetDatum(0));
/*
- * Open pg_attribute and begin a scan. Force heap scan if we haven't
- * yet built the critical relcache entries (this includes initdb and
- * startup without a pg_internal.init file).
+ * Open pg_attribute and begin a scan. Force heap scan if we haven't yet
+ * built the critical relcache entries (this includes initdb and startup
+ * without a pg_internal.init file).
*/
pg_attribute_desc = heap_open(AttributeRelationId, AccessShareLock);
pg_attribute_scan = systable_beginscan(pg_attribute_desc,
@@ -445,9 +444,8 @@ RelationBuildTupleDesc(Relation relation)
/*
* However, we can easily set the attcacheoff value for the first
- * attribute: it must be zero. This eliminates the need for special
- * cases for attnum=1 that used to exist in fastgetattr() and
- * index_getattr().
+ * attribute: it must be zero. This eliminates the need for special cases
+ * for attnum=1 that used to exist in fastgetattr() and index_getattr().
*/
if (relation->rd_rel->relnatts > 0)
relation->rd_att->attrs[0]->attcacheoff = 0;
@@ -477,7 +475,7 @@ RelationBuildTupleDesc(Relation relation)
constr->num_check = relation->rd_rel->relchecks;
constr->check = (ConstrCheck *)
MemoryContextAllocZero(CacheMemoryContext,
- constr->num_check * sizeof(ConstrCheck));
+ constr->num_check * sizeof(ConstrCheck));
CheckConstraintFetch(relation);
}
else
@@ -521,8 +519,8 @@ RelationBuildRuleLock(Relation relation)
int maxlocks;
/*
- * Make the private context. Parameters are set on the assumption
- * that it'll probably not contain much data.
+ * Make the private context. Parameters are set on the assumption that
+ * it'll probably not contain much data.
*/
rulescxt = AllocSetContextCreate(CacheMemoryContext,
RelationGetRelationName(relation),
@@ -532,8 +530,8 @@ RelationBuildRuleLock(Relation relation)
relation->rd_rulescxt = rulescxt;
/*
- * allocate an array to hold the rewrite rules (the array is extended
- * if necessary)
+ * allocate an array to hold the rewrite rules (the array is extended if
+ * necessary)
*/
maxlocks = 4;
rules = (RewriteRule **)
@@ -551,10 +549,10 @@ RelationBuildRuleLock(Relation relation)
/*
* open pg_rewrite and begin a scan
*
- * Note: since we scan the rules using RewriteRelRulenameIndexId,
- * we will be reading the rules in name order, except possibly during
- * emergency-recovery operations (ie, IsIgnoringSystemIndexes). This
- * in turn ensures that rules will be fired in name order.
+ * Note: since we scan the rules using RewriteRelRulenameIndexId, we will be
+ * reading the rules in name order, except possibly during
+ * emergency-recovery operations (ie, IsIgnoringSystemIndexes). This in
+ * turn ensures that rules will be fired in name order.
*/
rewrite_desc = heap_open(RewriteRelationId, AccessShareLock);
rewrite_tupdesc = RelationGetDescr(rewrite_desc);
@@ -602,7 +600,7 @@ RelationBuildRuleLock(Relation relation)
&isnull);
Assert(!isnull);
rule_evqual_str = DatumGetCString(DirectFunctionCall1(textout,
- rule_evqual));
+ rule_evqual));
oldcxt = MemoryContextSwitchTo(rulescxt);
rule->qual = (Node *) stringToNode(rule_evqual_str);
MemoryContextSwitchTo(oldcxt);
@@ -647,8 +645,8 @@ equalRuleLocks(RuleLock *rlock1, RuleLock *rlock2)
/*
* As of 7.3 we assume the rule ordering is repeatable, because
- * RelationBuildRuleLock should read 'em in a consistent order. So
- * just compare corresponding slots.
+ * RelationBuildRuleLock should read 'em in a consistent order. So just
+ * compare corresponding slots.
*/
if (rlock1 != NULL)
{
@@ -717,8 +715,8 @@ RelationBuildDesc(Oid targetRelId, Relation oldrelation)
relp = (Form_pg_class) GETSTRUCT(pg_class_tuple);
/*
- * allocate storage for the relation descriptor, and copy
- * pg_class_tuple to relation->rd_rel.
+ * allocate storage for the relation descriptor, and copy pg_class_tuple
+ * to relation->rd_rel.
*/
relation = AllocateRelationDesc(oldrelation, relp);
@@ -733,10 +731,9 @@ RelationBuildDesc(Oid targetRelId, Relation oldrelation)
RelationGetRelid(relation) = relid;
/*
- * normal relations are not nailed into the cache; nor can a
- * pre-existing relation be new. It could be temp though. (Actually,
- * it could be new too, but it's okay to forget that fact if forced to
- * flush the entry.)
+ * normal relations are not nailed into the cache; nor can a pre-existing
+ * relation be new. It could be temp though. (Actually, it could be new
+ * too, but it's okay to forget that fact if forced to flush the entry.)
*/
relation->rd_refcnt = 0;
relation->rd_isnailed = false;
@@ -834,9 +831,8 @@ RelationInitIndexAccessInfo(Relation relation)
/*
* Make a copy of the pg_index entry for the index. Since pg_index
- * contains variable-length and possibly-null fields, we have to do
- * this honestly rather than just treating it as a Form_pg_index
- * struct.
+ * contains variable-length and possibly-null fields, we have to do this
+ * honestly rather than just treating it as a Form_pg_index struct.
*/
tuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(RelationGetRelid(relation)),
@@ -851,9 +847,9 @@ RelationInitIndexAccessInfo(Relation relation)
ReleaseSysCache(tuple);
/*
- * indclass cannot be referenced directly through the C struct, because
- * it is after the variable-width indkey field. Therefore we extract
- * the datum the hard way and provide a direct link in the relcache.
+ * indclass cannot be referenced directly through the C struct, because it
+ * is after the variable-width indkey field. Therefore we extract the
+ * datum the hard way and provide a direct link in the relcache.
*/
indclassDatum = fastgetattr(relation->rd_indextuple,
Anum_pg_index_indclass,
@@ -884,9 +880,9 @@ RelationInitIndexAccessInfo(Relation relation)
amsupport = aform->amsupport;
/*
- * Make the private context to hold index access info. The reason we
- * need a context, and not just a couple of pallocs, is so that we
- * won't leak any subsidiary info attached to fmgr lookup records.
+ * Make the private context to hold index access info. The reason we need
+ * a context, and not just a couple of pallocs, is so that we won't leak
+ * any subsidiary info attached to fmgr lookup records.
*
* Context parameters are set on the assumption that it'll probably not
* contain much data.
@@ -931,7 +927,7 @@ RelationInitIndexAccessInfo(Relation relation)
relation->rd_supportinfo = supportinfo;
/*
- * Fill the operator and support procedure OID arrays. (aminfo and
+ * Fill the operator and support procedure OID arrays. (aminfo and
* supportinfo are left as zeroes, and are filled on-the-fly when used)
*/
IndexSupportInitialize(relation->rd_indclass,
@@ -1070,17 +1066,17 @@ LookupOpclassInfo(Oid operatorClassOid,
opcentry->supportProcs = NULL;
/*
- * To avoid infinite recursion during startup, force heap scans if
- * we're looking up info for the opclasses used by the indexes we
- * would like to reference here.
+ * To avoid infinite recursion during startup, force heap scans if we're
+ * looking up info for the opclasses used by the indexes we would like to
+ * reference here.
*/
indexOK = criticalRelcachesBuilt ||
(operatorClassOid != OID_BTREE_OPS_OID &&
operatorClassOid != INT2_BTREE_OPS_OID);
/*
- * Scan pg_amop to obtain operators for the opclass. We only fetch
- * the default ones (those with subtype zero).
+ * Scan pg_amop to obtain operators for the opclass. We only fetch the
+ * default ones (those with subtype zero).
*/
if (numStrats > 0)
{
@@ -1113,8 +1109,8 @@ LookupOpclassInfo(Oid operatorClassOid,
}
/*
- * Scan pg_amproc to obtain support procs for the opclass. We only
- * fetch the default ones (those with subtype zero).
+ * Scan pg_amproc to obtain support procs for the opclass. We only fetch
+ * the default ones (those with subtype zero).
*/
if (numSupport > 0)
{
@@ -1193,8 +1189,8 @@ formrdesc(const char *relationName, Oid relationReltype,
relation->rd_refcnt = 1;
/*
- * all entries built with this routine are nailed-in-cache; none are
- * for new or temp relations.
+ * all entries built with this routine are nailed-in-cache; none are for
+ * new or temp relations.
*/
relation->rd_isnailed = true;
relation->rd_createSubid = InvalidSubTransactionId;
@@ -1203,9 +1199,9 @@ formrdesc(const char *relationName, Oid relationReltype,
/*
* initialize relation tuple form
*
- * The data we insert here is pretty incomplete/bogus, but it'll serve to
- * get us launched. RelationCacheInitializePhase2() will read the
- * real data from pg_class and replace what we've done here.
+ * The data we insert here is pretty incomplete/bogus, but it'll serve to get
+ * us launched. RelationCacheInitializePhase2() will read the real data
+ * from pg_class and replace what we've done here.
*/
relation->rd_rel = (Form_pg_class) palloc0(CLASS_TUPLE_SIZE);
@@ -1214,10 +1210,9 @@ formrdesc(const char *relationName, Oid relationReltype,
relation->rd_rel->reltype = relationReltype;
/*
- * It's important to distinguish between shared and non-shared
- * relations, even at bootstrap time, to make sure we know where they
- * are stored. At present, all relations that formrdesc is used for
- * are not shared.
+ * It's important to distinguish between shared and non-shared relations,
+ * even at bootstrap time, to make sure we know where they are stored. At
+ * present, all relations that formrdesc is used for are not shared.
*/
relation->rd_rel->relisshared = false;
@@ -1231,8 +1226,8 @@ formrdesc(const char *relationName, Oid relationReltype,
* initialize attribute tuple form
*
* Unlike the case with the relation tuple, this data had better be right
- * because it will never be replaced. The input values must be
- * correctly defined by macros in src/include/catalog/ headers.
+ * because it will never be replaced. The input values must be correctly
+ * defined by macros in src/include/catalog/ headers.
*/
relation->rd_att = CreateTemplateTupleDesc(natts, hasoids);
relation->rd_att->tdtypeid = relationReltype;
@@ -1361,8 +1356,8 @@ RelationIdGetRelation(Oid relationId)
return rd;
/*
- * no reldesc in the cache, so have RelationBuildDesc() build one and
- * add it.
+ * no reldesc in the cache, so have RelationBuildDesc() build one and add
+ * it.
*/
rd = RelationBuildDesc(relationId, NULL);
if (RelationIsValid(rd))
@@ -1454,11 +1449,12 @@ RelationReloadClassinfo(Relation relation)
/* Should be called only for invalidated nailed indexes */
Assert(relation->rd_isnailed && !relation->rd_isvalid &&
relation->rd_rel->relkind == RELKIND_INDEX);
+
/*
* Read the pg_class row
*
- * Don't try to use an indexscan of pg_class_oid_index to reload the
- * info for pg_class_oid_index ...
+ * Don't try to use an indexscan of pg_class_oid_index to reload the info for
+ * pg_class_oid_index ...
*/
indexOK = (RelationGetRelid(relation) != ClassOidIndexId);
pg_class_tuple = ScanPgRelation(RelationGetRelid(relation), indexOK);
@@ -1492,25 +1488,25 @@ RelationClearRelation(Relation relation, bool rebuild)
/*
* Make sure smgr and lower levels close the relation's files, if they
- * weren't closed already. If the relation is not getting deleted,
- * the next smgr access should reopen the files automatically. This
- * ensures that the low-level file access state is updated after, say,
- * a vacuum truncation.
+ * weren't closed already. If the relation is not getting deleted, the
+ * next smgr access should reopen the files automatically. This ensures
+ * that the low-level file access state is updated after, say, a vacuum
+ * truncation.
*/
RelationCloseSmgr(relation);
/*
- * Never, never ever blow away a nailed-in system relation, because
- * we'd be unable to recover. However, we must reset rd_targblock, in
- * case we got called because of a relation cache flush that was
- * triggered by VACUUM.
+ * Never, never ever blow away a nailed-in system relation, because we'd
+ * be unable to recover. However, we must reset rd_targblock, in case we
+ * got called because of a relation cache flush that was triggered by
+ * VACUUM.
*
- * If it's a nailed index, then we need to re-read the pg_class row to
- * see if its relfilenode changed. We can't necessarily do that here,
- * because we might be in a failed transaction. We assume it's okay
- * to do it if there are open references to the relcache entry (cf
- * notes for AtEOXact_RelationCache). Otherwise just mark the entry
- * as possibly invalid, and it'll be fixed when next opened.
+ * If it's a nailed index, then we need to re-read the pg_class row to see if
+ * its relfilenode changed. We can't necessarily do that here, because we
+ * might be in a failed transaction. We assume it's okay to do it if
+ * there are open references to the relcache entry (cf notes for
+ * AtEOXact_RelationCache). Otherwise just mark the entry as possibly
+ * invalid, and it'll be fixed when next opened.
*/
if (relation->rd_isnailed)
{
@@ -1542,8 +1538,8 @@ RelationClearRelation(Relation relation, bool rebuild)
* Free all the subsidiary data structures of the relcache entry. We
* cannot free rd_att if we are trying to rebuild the entry, however,
* because pointers to it may be cached in various places. The rule
- * manager might also have pointers into the rewrite rules. So to
- * begin with, we can only get rid of these fields:
+ * manager might also have pointers into the rewrite rules. So to begin
+ * with, we can only get rid of these fields:
*/
FreeTriggerDesc(relation->trigdesc);
if (relation->rd_indextuple)
@@ -1558,9 +1554,9 @@ RelationClearRelation(Relation relation, bool rebuild)
/*
* If we're really done with the relcache entry, blow it away. But if
- * someone is still using it, reconstruct the whole deal without
- * moving the physical RelationData record (so that the someone's
- * pointer is still valid).
+ * someone is still using it, reconstruct the whole deal without moving
+ * the physical RelationData record (so that the someone's pointer is
+ * still valid).
*/
if (!rebuild)
{
@@ -1574,12 +1570,12 @@ RelationClearRelation(Relation relation, bool rebuild)
else
{
/*
- * When rebuilding an open relcache entry, must preserve ref count
- * and rd_createSubid state. Also attempt to preserve the
- * tupledesc and rewrite-rule substructures in place.
+ * When rebuilding an open relcache entry, must preserve ref count and
+ * rd_createSubid state. Also attempt to preserve the tupledesc and
+ * rewrite-rule substructures in place.
*
- * Note that this process does not touch CurrentResourceOwner; which
- * is good because whatever ref counts the entry may have do not
+ * Note that this process does not touch CurrentResourceOwner; which is
+ * good because whatever ref counts the entry may have do not
* necessarily belong to that resource owner.
*/
Oid save_relid = RelationGetRelid(relation);
@@ -1773,8 +1769,8 @@ RelationCacheInvalidate(void)
{
/*
* Add this entry to list of stuff to rebuild in second pass.
- * pg_class_oid_index goes on the front of rebuildFirstList,
- * other nailed indexes on the back, and everything else into
+ * pg_class_oid_index goes on the front of rebuildFirstList, other
+ * nailed indexes on the back, and everything else into
* rebuildList (in no particular order).
*/
if (relation->rd_isnailed &&
@@ -1793,9 +1789,9 @@ RelationCacheInvalidate(void)
rebuildList = list_concat(rebuildFirstList, rebuildList);
/*
- * Now zap any remaining smgr cache entries. This must happen before
- * we start to rebuild entries, since that may involve catalog fetches
- * which will re-open catalog files.
+ * Now zap any remaining smgr cache entries. This must happen before we
+ * start to rebuild entries, since that may involve catalog fetches which
+ * will re-open catalog files.
*/
smgrcloseall();
@@ -1832,13 +1828,13 @@ AtEOXact_RelationCache(bool isCommit)
/*
* To speed up transaction exit, we want to avoid scanning the relcache
- * unless there is actually something for this routine to do. Other
- * than the debug-only Assert checks, most transactions don't create
- * any work for us to do here, so we keep a static flag that gets set
- * if there is anything to do. (Currently, this means either a relation
- * is created in the current xact, or an index list is forced.) For
- * simplicity, the flag remains set till end of top-level transaction,
- * even though we could clear it at subtransaction end in some cases.
+ * unless there is actually something for this routine to do. Other than
+ * the debug-only Assert checks, most transactions don't create any work
+ * for us to do here, so we keep a static flag that gets set if there is
+ * anything to do. (Currently, this means either a relation is created in
+ * the current xact, or an index list is forced.) For simplicity, the
+ * flag remains set till end of top-level transaction, even though we
+ * could clear it at subtransaction end in some cases.
*/
if (!need_eoxact_work
#ifdef USE_ASSERT_CHECKING
@@ -1857,10 +1853,9 @@ AtEOXact_RelationCache(bool isCommit)
* The relcache entry's ref count should be back to its normal
* not-in-a-transaction state: 0 unless it's nailed in cache.
*
- * In bootstrap mode, this is NOT true, so don't check it ---
- * the bootstrap code expects relations to stay open across
- * start/commit transaction calls. (That seems bogus, but it's
- * not worth fixing.)
+ * In bootstrap mode, this is NOT true, so don't check it --- the
+ * bootstrap code expects relations to stay open across start/commit
+ * transaction calls. (That seems bogus, but it's not worth fixing.)
*/
#ifdef USE_ASSERT_CHECKING
if (!IsBootstrapProcessingMode())
@@ -1939,8 +1934,8 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
/*
* Is it a relation created in the current subtransaction?
*
- * During subcommit, mark it as belonging to the parent, instead.
- * During subabort, simply delete the relcache entry.
+ * During subcommit, mark it as belonging to the parent, instead. During
+ * subabort, simply delete the relcache entry.
*/
if (relation->rd_createSubid == mySubid)
{
@@ -2041,11 +2036,10 @@ RelationBuildLocalRelation(const char *relname,
/*
* create a new tuple descriptor from the one passed in. We do this
- * partly to copy it into the cache context, and partly because the
- * new relation can't have any defaults or constraints yet; they have
- * to be added in later steps, because they require additions to
- * multiple system catalogs. We can copy attnotnull constraints here,
- * however.
+ * partly to copy it into the cache context, and partly because the new
+ * relation can't have any defaults or constraints yet; they have to be
+ * added in later steps, because they require additions to multiple system
+ * catalogs. We can copy attnotnull constraints here, however.
*/
rel->rd_att = CreateTupleDescCopy(tupDesc);
has_not_null = false;
@@ -2079,9 +2073,9 @@ RelationBuildLocalRelation(const char *relname,
rel->rd_rel->relowner = BOOTSTRAP_SUPERUSERID;
/*
- * Insert relation physical and logical identifiers (OIDs) into the
- * right places. Note that the physical ID (relfilenode) is initially
- * the same as the logical ID (OID).
+ * Insert relation physical and logical identifiers (OIDs) into the right
+ * places. Note that the physical ID (relfilenode) is initially the same
+ * as the logical ID (OID).
*/
rel->rd_rel->relisshared = shared_relation;
@@ -2157,8 +2151,8 @@ RelationCacheInitialize(void)
/*
* Try to load the relcache cache file. If successful, we're done for
- * now. Otherwise, initialize the cache with pre-made descriptors for
- * the critical "nailed-in" system catalogs.
+ * now. Otherwise, initialize the cache with pre-made descriptors for the
+ * critical "nailed-in" system catalogs.
*/
if (IsBootstrapProcessingMode() ||
!load_relcache_init_file())
@@ -2197,24 +2191,22 @@ RelationCacheInitializePhase2(void)
return;
/*
- * If we didn't get the critical system indexes loaded into relcache,
- * do so now. These are critical because the catcache depends on them
- * for catcache fetches that are done during relcache load. Thus, we
- * have an infinite-recursion problem. We can break the recursion by
- * doing heapscans instead of indexscans at certain key spots. To
- * avoid hobbling performance, we only want to do that until we have
- * the critical indexes loaded into relcache. Thus, the flag
- * criticalRelcachesBuilt is used to decide whether to do heapscan or
- * indexscan at the key spots, and we set it true after we've loaded
- * the critical indexes.
+ * If we didn't get the critical system indexes loaded into relcache, do
+ * so now. These are critical because the catcache depends on them for
+ * catcache fetches that are done during relcache load. Thus, we have an
+ * infinite-recursion problem. We can break the recursion by doing
+ * heapscans instead of indexscans at certain key spots. To avoid hobbling
+ * performance, we only want to do that until we have the critical indexes
+ * loaded into relcache. Thus, the flag criticalRelcachesBuilt is used to
+ * decide whether to do heapscan or indexscan at the key spots, and we set
+ * it true after we've loaded the critical indexes.
*
- * The critical indexes are marked as "nailed in cache", partly to make
- * it easy for load_relcache_init_file to count them, but mainly
- * because we cannot flush and rebuild them once we've set
- * criticalRelcachesBuilt to true. (NOTE: perhaps it would be
- * possible to reload them by temporarily setting
- * criticalRelcachesBuilt to false again. For now, though, we just
- * nail 'em in.)
+ * The critical indexes are marked as "nailed in cache", partly to make it
+ * easy for load_relcache_init_file to count them, but mainly because we
+ * cannot flush and rebuild them once we've set criticalRelcachesBuilt to
+ * true. (NOTE: perhaps it would be possible to reload them by
+ * temporarily setting criticalRelcachesBuilt to false again. For now,
+ * though, we just nail 'em in.)
*/
if (!criticalRelcachesBuilt)
{
@@ -2240,12 +2232,12 @@ RelationCacheInitializePhase2(void)
}
/*
- * Now, scan all the relcache entries and update anything that might
- * be wrong in the results from formrdesc or the relcache cache file.
- * If we faked up relcache entries using formrdesc, then read the real
- * pg_class rows and replace the fake entries with them. Also, if any
- * of the relcache entries have rules or triggers, load that info the
- * hard way since it isn't recorded in the cache file.
+ * Now, scan all the relcache entries and update anything that might be
+ * wrong in the results from formrdesc or the relcache cache file. If we
+ * faked up relcache entries using formrdesc, then read the real pg_class
+ * rows and replace the fake entries with them. Also, if any of the
+ * relcache entries have rules or triggers, load that info the hard way
+ * since it isn't recorded in the cache file.
*/
hash_seq_init(&status, RelationIdCache);
@@ -2262,7 +2254,7 @@ RelationCacheInitializePhase2(void)
Form_pg_class relp;
htup = SearchSysCache(RELOID,
- ObjectIdGetDatum(RelationGetRelid(relation)),
+ ObjectIdGetDatum(RelationGetRelid(relation)),
0, 0, 0);
if (!HeapTupleIsValid(htup))
elog(FATAL, "cache lookup failed for relation %u",
@@ -2311,11 +2303,10 @@ RelationCacheInitializePhase3(void)
if (needNewCacheFile)
{
/*
- * Force all the catcaches to finish initializing and thereby open
- * the catalogs and indexes they use. This will preload the
- * relcache with entries for all the most important system
- * catalogs and indexes, so that the init file will be most useful
- * for future backends.
+ * Force all the catcaches to finish initializing and thereby open the
+ * catalogs and indexes they use. This will preload the relcache with
+ * entries for all the most important system catalogs and indexes, so
+ * that the init file will be most useful for future backends.
*/
InitCatalogCachePhase2();
@@ -2349,7 +2340,7 @@ GetPgIndexDescriptor(void)
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
pgindexdesc = CreateTemplateTupleDesc(Natts_pg_index, false);
- pgindexdesc->tdtypeid = RECORDOID; /* not right, but we don't care */
+ pgindexdesc->tdtypeid = RECORDOID; /* not right, but we don't care */
pgindexdesc->tdtypmod = -1;
for (i = 0; i < Natts_pg_index; i++)
@@ -2405,7 +2396,7 @@ AttrDefaultFetch(Relation relation)
continue;
if (attrdef[i].adbin != NULL)
elog(WARNING, "multiple attrdef records found for attr %s of rel %s",
- NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname),
+ NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname),
RelationGetRelationName(relation));
else
found++;
@@ -2415,12 +2406,12 @@ AttrDefaultFetch(Relation relation)
adrel->rd_att, &isnull);
if (isnull)
elog(WARNING, "null adbin for attr %s of rel %s",
- NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname),
+ NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname),
RelationGetRelationName(relation));
else
attrdef[i].adbin = MemoryContextStrdup(CacheMemoryContext,
- DatumGetCString(DirectFunctionCall1(textout,
- val)));
+ DatumGetCString(DirectFunctionCall1(textout,
+ val)));
break;
}
@@ -2472,7 +2463,7 @@ CheckConstraintFetch(Relation relation)
RelationGetRelationName(relation));
check[found].ccname = MemoryContextStrdup(CacheMemoryContext,
- NameStr(conform->conname));
+ NameStr(conform->conname));
/* Grab and test conbin is actually set */
val = fastgetattr(htup,
@@ -2483,8 +2474,8 @@ CheckConstraintFetch(Relation relation)
RelationGetRelationName(relation));
check[found].ccbin = MemoryContextStrdup(CacheMemoryContext,
- DatumGetCString(DirectFunctionCall1(textout,
- val)));
+ DatumGetCString(DirectFunctionCall1(textout,
+ val)));
found++;
}
@@ -2514,7 +2505,7 @@ CheckConstraintFetch(Relation relation)
*
* Since shared cache inval causes the relcache's copy of the list to go away,
* we return a copy of the list palloc'd in the caller's context. The caller
- * may list_free() the returned list after scanning it. This is necessary
+ * may list_free() the returned list after scanning it. This is necessary
* since the caller will typically be doing syscache lookups on the relevant
* indexes, and syscache lookup could cause SI messages to be processed!
*
@@ -2539,10 +2530,10 @@ RelationGetIndexList(Relation relation)
return list_copy(relation->rd_indexlist);
/*
- * We build the list we intend to return (in the caller's context)
- * while doing the scan. After successfully completing the scan, we
- * copy that list into the relcache entry. This avoids cache-context
- * memory leakage if we get some sort of error partway through.
+ * We build the list we intend to return (in the caller's context) while
+ * doing the scan. After successfully completing the scan, we copy that
+ * list into the relcache entry. This avoids cache-context memory leakage
+ * if we get some sort of error partway through.
*/
result = NIL;
oidIndex = InvalidOid;
@@ -2662,9 +2653,9 @@ RelationGetOidIndex(Relation relation)
List *ilist;
/*
- * If relation doesn't have OIDs at all, caller is probably confused.
- * (We could just silently return InvalidOid, but it seems better to
- * throw an assertion.)
+ * If relation doesn't have OIDs at all, caller is probably confused. (We
+ * could just silently return InvalidOid, but it seems better to throw an
+ * assertion.)
*/
Assert(relation->rd_rel->relhasoids);
@@ -2707,10 +2698,9 @@ RelationGetIndexExpressions(Relation relation)
return NIL;
/*
- * We build the tree we intend to return in the caller's context.
- * After successfully completing the work, we copy it into the
- * relcache entry. This avoids problems if we get some sort of error
- * partway through.
+ * We build the tree we intend to return in the caller's context. After
+ * successfully completing the work, we copy it into the relcache entry.
+ * This avoids problems if we get some sort of error partway through.
*/
exprsDatum = heap_getattr(relation->rd_indextuple,
Anum_pg_index_indexprs,
@@ -2775,10 +2765,9 @@ RelationGetIndexPredicate(Relation relation)
return NIL;
/*
- * We build the tree we intend to return in the caller's context.
- * After successfully completing the work, we copy it into the
- * relcache entry. This avoids problems if we get some sort of error
- * partway through.
+ * We build the tree we intend to return in the caller's context. After
+ * successfully completing the work, we copy it into the relcache entry.
+ * This avoids problems if we get some sort of error partway through.
*/
predDatum = heap_getattr(relation->rd_indextuple,
Anum_pg_index_indpred,
@@ -2795,8 +2784,8 @@ RelationGetIndexPredicate(Relation relation)
* will be comparing it to similarly-processed qual clauses, and may fail
* to detect valid matches without this. This must match the processing
* done to qual clauses in preprocess_expression()! (We can skip the
- * stuff involving subqueries, however, since we don't allow any in
- * index predicates.)
+ * stuff involving subqueries, however, since we don't allow any in index
+ * predicates.)
*/
result = (List *) eval_const_expressions((Node *) result);
@@ -2897,9 +2886,9 @@ load_relcache_init_file(void)
}
/*
- * Read the index relcache entries from the file. Note we will not
- * enter any of them into the cache if the read fails partway through;
- * this helps to guard against broken init files.
+ * Read the index relcache entries from the file. Note we will not enter
+ * any of them into the cache if the read fails partway through; this
+ * helps to guard against broken init files.
*/
max_rels = 100;
rels = (Relation *) palloc(max_rels * sizeof(Relation));
@@ -3086,10 +3075,10 @@ load_relcache_init_file(void)
/*
* Rules and triggers are not saved (mainly because the internal
- * format is complex and subject to change). They must be rebuilt
- * if needed by RelationCacheInitializePhase2. This is not
- * expected to be a big performance hit since few system catalogs
- * have such. Ditto for index expressions and predicates.
+ * format is complex and subject to change). They must be rebuilt if
+ * needed by RelationCacheInitializePhase2. This is not expected to
+ * be a big performance hit since few system catalogs have such.
+ * Ditto for index expressions and predicates.
*/
rel->rd_rules = NULL;
rel->rd_rulescxt = NULL;
@@ -3114,17 +3103,17 @@ load_relcache_init_file(void)
/*
* Recompute lock and physical addressing info. This is needed in
- * case the pg_internal.init file was copied from some other
- * database by CREATE DATABASE.
+ * case the pg_internal.init file was copied from some other database
+ * by CREATE DATABASE.
*/
RelationInitLockInfo(rel);
RelationInitPhysicalAddr(rel);
}
/*
- * We reached the end of the init file without apparent problem. Did
- * we get the right number of nailed items? (This is a useful
- * crosscheck in case the set of critical rels or indexes changes.)
+ * We reached the end of the init file without apparent problem. Did we
+ * get the right number of nailed items? (This is a useful crosscheck in
+ * case the set of critical rels or indexes changes.)
*/
if (nailed_rels != NUM_CRITICAL_RELS ||
nailed_indexes != NUM_CRITICAL_INDEXES)
@@ -3150,9 +3139,9 @@ load_relcache_init_file(void)
return true;
/*
- * init file is broken, so do it the hard way. We don't bother trying
- * to free the clutter we just allocated; it's not in the relcache so
- * it won't hurt.
+ * init file is broken, so do it the hard way. We don't bother trying to
+ * free the clutter we just allocated; it's not in the relcache so it
+ * won't hurt.
*/
read_failed:
pfree(rels);
@@ -3180,8 +3169,8 @@ write_relcache_init_file(void)
/*
* We must write a temporary file and rename it into place. Otherwise,
- * another backend starting at about the same time might crash trying
- * to read the partially-complete file.
+ * another backend starting at about the same time might crash trying to
+ * read the partially-complete file.
*/
snprintf(tempfilename, sizeof(tempfilename), "%s/%s.%d",
DatabasePath, RELCACHE_INIT_FILENAME, MyProcPid);
@@ -3201,7 +3190,7 @@ write_relcache_init_file(void)
(errcode_for_file_access(),
errmsg("could not create relation-cache initialization file \"%s\": %m",
tempfilename),
- errdetail("Continuing anyway, but there's something wrong.")));
+ errdetail("Continuing anyway, but there's something wrong.")));
return;
}
@@ -3308,11 +3297,11 @@ write_relcache_init_file(void)
/*
* Now we have to check whether the data we've so painstakingly
- * accumulated is already obsolete due to someone else's
- * just-committed catalog changes. If so, we just delete the temp
- * file and leave it to the next backend to try again. (Our own
- * relcache entries will be updated by SI message processing, but we
- * can't be sure whether what we wrote out was up-to-date.)
+ * accumulated is already obsolete due to someone else's just-committed
+ * catalog changes. If so, we just delete the temp file and leave it to
+ * the next backend to try again. (Our own relcache entries will be
+ * updated by SI message processing, but we can't be sure whether what we
+ * wrote out was up-to-date.)
*
* This mustn't run concurrently with RelationCacheInitFileInvalidate, so
* grab a serialization lock for the duration.
@@ -3323,8 +3312,8 @@ write_relcache_init_file(void)
AcceptInvalidationMessages();
/*
- * If we have received any SI relcache invals since backend start,
- * assume we may have written out-of-date data.
+ * If we have received any SI relcache invals since backend start, assume
+ * we may have written out-of-date data.
*/
if (relcacheInvalsReceived == 0L)
{
@@ -3332,10 +3321,10 @@ write_relcache_init_file(void)
* OK, rename the temp file to its final name, deleting any
* previously-existing init file.
*
- * Note: a failure here is possible under Cygwin, if some other
- * backend is holding open an unlinked-but-not-yet-gone init file.
- * So treat this as a noncritical failure; just remove the useless
- * temp file on failure.
+ * Note: a failure here is possible under Cygwin, if some other backend
+ * is holding open an unlinked-but-not-yet-gone init file. So treat
+ * this as a noncritical failure; just remove the useless temp file on
+ * failure.
*/
if (rename(tempfilename, finalfilename) < 0)
unlink(tempfilename);
@@ -3401,11 +3390,10 @@ RelationCacheInitFileInvalidate(bool beforeSend)
/*
* We need to interlock this against write_relcache_init_file, to
* guard against possibility that someone renames a new-but-
- * already-obsolete init file into place just after we unlink.
- * With the interlock, it's certain that write_relcache_init_file
- * will notice our SI inval message before renaming into place, or
- * else that we will execute second and successfully unlink the
- * file.
+ * already-obsolete init file into place just after we unlink. With
+ * the interlock, it's certain that write_relcache_init_file will
+ * notice our SI inval message before renaming into place, or else
+ * that we will execute second and successfully unlink the file.
*/
LWLockAcquire(RelCacheInitLock, LW_EXCLUSIVE);
unlink(initfilename);
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index cd24460857f..1ee237fafd9 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.100 2005/06/28 05:09:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.101 2005/10/15 02:49:32 momjian Exp $
*
* NOTES
* These routines allow the parser/planner/executor to perform
@@ -56,7 +56,7 @@
Add your entry to the cacheinfo[] array below. All cache lists are
alphabetical, so add it in the proper place. Specify the relation
- OID, index OID, number of keys, and key attribute numbers. If the
+ OID, index OID, number of keys, and key attribute numbers. If the
relation contains tuples that are associated with a particular relation
(for example, its attributes, rules, triggers, etc) then specify the
attribute number that contains the OID of the associated relation.
@@ -92,7 +92,7 @@ struct cachedesc
};
static const struct cachedesc cacheinfo[] = {
- {AggregateRelationId, /* AGGFNOID */
+ {AggregateRelationId, /* AGGFNOID */
AggregateFnoidIndexId,
0,
1,
@@ -102,7 +102,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {AccessMethodRelationId, /* AMNAME */
+ {AccessMethodRelationId, /* AMNAME */
AmNameIndexId,
0,
1,
@@ -112,7 +112,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {AccessMethodRelationId, /* AMOID */
+ {AccessMethodRelationId, /* AMOID */
AmOidIndexId,
0,
1,
@@ -152,7 +152,7 @@ static const struct cachedesc cacheinfo[] = {
Anum_pg_amproc_amprocnum,
0
}},
- {AttributeRelationId, /* ATTNAME */
+ {AttributeRelationId, /* ATTNAME */
AttributeRelidNameIndexId,
Anum_pg_attribute_attrelid,
2,
@@ -162,7 +162,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {AttributeRelationId, /* ATTNUM */
+ {AttributeRelationId, /* ATTNUM */
AttributeRelidNumIndexId,
Anum_pg_attribute_attrelid,
2,
@@ -172,7 +172,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {AuthMemRelationId, /* AUTHMEMMEMROLE */
+ {AuthMemRelationId, /* AUTHMEMMEMROLE */
AuthMemMemRoleIndexId,
0,
2,
@@ -182,7 +182,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {AuthMemRelationId, /* AUTHMEMROLEMEM */
+ {AuthMemRelationId, /* AUTHMEMROLEMEM */
AuthMemRoleMemIndexId,
0,
2,
@@ -192,7 +192,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {AuthIdRelationId, /* AUTHNAME */
+ {AuthIdRelationId, /* AUTHNAME */
AuthIdRolnameIndexId,
0,
1,
@@ -202,7 +202,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {AuthIdRelationId, /* AUTHOID */
+ {AuthIdRelationId, /* AUTHOID */
AuthIdOidIndexId,
0,
1,
@@ -213,7 +213,7 @@ static const struct cachedesc cacheinfo[] = {
0
}},
{
- CastRelationId, /* CASTSOURCETARGET */
+ CastRelationId, /* CASTSOURCETARGET */
CastSourceTargetIndexId,
0,
2,
@@ -223,7 +223,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {OperatorClassRelationId, /* CLAAMNAMENSP */
+ {OperatorClassRelationId, /* CLAAMNAMENSP */
OpclassAmNameNspIndexId,
0,
3,
@@ -233,7 +233,7 @@ static const struct cachedesc cacheinfo[] = {
Anum_pg_opclass_opcnamespace,
0
}},
- {OperatorClassRelationId, /* CLAOID */
+ {OperatorClassRelationId, /* CLAOID */
OpclassOidIndexId,
0,
1,
@@ -243,7 +243,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {ConversionRelationId, /* CONDEFAULT */
+ {ConversionRelationId, /* CONDEFAULT */
ConversionDefaultIndexId,
0,
4,
@@ -253,7 +253,7 @@ static const struct cachedesc cacheinfo[] = {
Anum_pg_conversion_contoencoding,
ObjectIdAttributeNumber,
}},
- {ConversionRelationId, /* CONNAMENSP */
+ {ConversionRelationId, /* CONNAMENSP */
ConversionNameNspIndexId,
0,
2,
@@ -263,7 +263,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {ConversionRelationId, /* CONOID */
+ {ConversionRelationId, /* CONOID */
ConversionOidIndexId,
0,
1,
@@ -273,7 +273,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {IndexRelationId, /* INDEXRELID */
+ {IndexRelationId, /* INDEXRELID */
IndexRelidIndexId,
Anum_pg_index_indrelid,
1,
@@ -283,7 +283,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {InheritsRelationId, /* INHRELID */
+ {InheritsRelationId, /* INHRELID */
InheritsRelidSeqnoIndexId,
Anum_pg_inherits_inhrelid,
2,
@@ -293,7 +293,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {LanguageRelationId, /* LANGNAME */
+ {LanguageRelationId, /* LANGNAME */
LanguageNameIndexId,
0,
1,
@@ -303,7 +303,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {LanguageRelationId, /* LANGOID */
+ {LanguageRelationId, /* LANGOID */
LanguageOidIndexId,
0,
1,
@@ -313,7 +313,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {NamespaceRelationId, /* NAMESPACENAME */
+ {NamespaceRelationId, /* NAMESPACENAME */
NamespaceNameIndexId,
0,
1,
@@ -323,7 +323,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {NamespaceRelationId, /* NAMESPACEOID */
+ {NamespaceRelationId, /* NAMESPACEOID */
NamespaceOidIndexId,
0,
1,
@@ -333,7 +333,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {OperatorRelationId, /* OPERNAMENSP */
+ {OperatorRelationId, /* OPERNAMENSP */
OperatorNameNspIndexId,
0,
4,
@@ -343,7 +343,7 @@ static const struct cachedesc cacheinfo[] = {
Anum_pg_operator_oprright,
Anum_pg_operator_oprnamespace
}},
- {OperatorRelationId, /* OPEROID */
+ {OperatorRelationId, /* OPEROID */
OperatorOidIndexId,
0,
1,
@@ -353,7 +353,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {ProcedureRelationId, /* PROCNAMEARGSNSP */
+ {ProcedureRelationId, /* PROCNAMEARGSNSP */
ProcedureNameArgsNspIndexId,
0,
3,
@@ -363,7 +363,7 @@ static const struct cachedesc cacheinfo[] = {
Anum_pg_proc_pronamespace,
0
}},
- {ProcedureRelationId, /* PROCOID */
+ {ProcedureRelationId, /* PROCOID */
ProcedureOidIndexId,
0,
1,
@@ -373,7 +373,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {RelationRelationId, /* RELNAMENSP */
+ {RelationRelationId, /* RELNAMENSP */
ClassNameNspIndexId,
ObjectIdAttributeNumber,
2,
@@ -383,7 +383,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {RelationRelationId, /* RELOID */
+ {RelationRelationId, /* RELOID */
ClassOidIndexId,
ObjectIdAttributeNumber,
1,
@@ -393,7 +393,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {RewriteRelationId, /* RULERELNAME */
+ {RewriteRelationId, /* RULERELNAME */
RewriteRelRulenameIndexId,
Anum_pg_rewrite_ev_class,
2,
@@ -403,7 +403,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {StatisticRelationId, /* STATRELATT */
+ {StatisticRelationId, /* STATRELATT */
StatisticRelidAttnumIndexId,
Anum_pg_statistic_starelid,
2,
@@ -413,7 +413,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {TypeRelationId, /* TYPENAMENSP */
+ {TypeRelationId, /* TYPENAMENSP */
TypeNameNspIndexId,
Anum_pg_type_typrelid,
2,
@@ -423,7 +423,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {TypeRelationId, /* TYPEOID */
+ {TypeRelationId, /* TYPEOID */
TypeOidIndexId,
Anum_pg_type_typrelid,
1,
@@ -435,7 +435,8 @@ static const struct cachedesc cacheinfo[] = {
}}
};
-static CatCache *SysCache[lengthof(cacheinfo)];
+static CatCache *SysCache[
+ lengthof(cacheinfo)];
static int SysCacheSize = lengthof(cacheinfo);
static bool CacheInitialized = false;
@@ -697,10 +698,10 @@ SysCacheGetAttr(int cacheId, HeapTuple tup,
bool *isNull)
{
/*
- * We just need to get the TupleDesc out of the cache entry, and then
- * we can apply heap_getattr(). We expect that the cache control data
- * is currently valid --- if the caller recently fetched the tuple,
- * then it should be.
+ * We just need to get the TupleDesc out of the cache entry, and then we
+ * can apply heap_getattr(). We expect that the cache control data is
+ * currently valid --- if the caller recently fetched the tuple, then it
+ * should be.
*/
if (cacheId < 0 || cacheId >= SysCacheSize)
elog(ERROR, "invalid cache id: %d", cacheId);
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index b0b890516df..ff9cc975437 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -36,7 +36,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.14 2005/05/29 04:23:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.15 2005/10/15 02:49:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -135,9 +135,9 @@ lookup_type_cache(Oid type_id, int flags)
if (typentry == NULL)
{
/*
- * If we didn't find one, we want to make one. But first look up
- * the pg_type row, just to make sure we don't make a cache entry
- * for an invalid type OID.
+ * If we didn't find one, we want to make one. But first look up the
+ * pg_type row, just to make sure we don't make a cache entry for an
+ * invalid type OID.
*/
HeapTuple tp;
Form_pg_type typtup;
@@ -190,8 +190,8 @@ lookup_type_cache(Oid type_id, int flags)
{
/*
* If we find a btree opclass where previously we only found a
- * hash opclass, forget the hash equality operator so we can
- * use the btree operator instead.
+ * hash opclass, forget the hash equality operator so we can use
+ * the btree operator instead.
*/
typentry->eq_opr = InvalidOid;
typentry->eq_opr_finfo.fn_oid = InvalidOid;
@@ -224,7 +224,7 @@ lookup_type_cache(Oid type_id, int flags)
if (typentry->btree_opc != InvalidOid)
typentry->gt_opr = get_opclass_member(typentry->btree_opc,
InvalidOid,
- BTGreaterStrategyNumber);
+ BTGreaterStrategyNumber);
}
if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
typentry->cmp_proc == InvalidOid)
@@ -238,9 +238,9 @@ lookup_type_cache(Oid type_id, int flags)
/*
* Set up fmgr lookup info as requested
*
- * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
- * which is not quite right (they're really in DynaHashContext) but
- * this will do for our purposes.
+ * Note: we tell fmgr the finfo structures live in CacheMemoryContext, which
+ * is not quite right (they're really in DynaHashContext) but this will do
+ * for our purposes.
*/
if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
typentry->eq_opr_finfo.fn_oid == InvalidOid &&
@@ -277,9 +277,9 @@ lookup_type_cache(Oid type_id, int flags)
Assert(rel->rd_rel->reltype == typentry->type_id);
/*
- * Notice that we simply store a link to the relcache's tupdesc.
- * Since we are relying on relcache to detect cache flush events,
- * there's not a lot of point to maintaining an independent copy.
+ * Notice that we simply store a link to the relcache's tupdesc. Since
+ * we are relying on relcache to detect cache flush events, there's
+ * not a lot of point to maintaining an independent copy.
*/
typentry->tupDesc = RelationGetDescr(rel);
@@ -316,12 +316,11 @@ lookup_default_opclass(Oid type_id, Oid am_id)
* (either exactly or binary-compatibly, but prefer an exact match).
*
* We could find more than one binary-compatible match, in which case we
- * require the user to specify which one he wants. If we find more
- * than one exact match, then someone put bogus entries in pg_opclass.
+ * require the user to specify which one he wants. If we find more than
+ * one exact match, then someone put bogus entries in pg_opclass.
*
- * This is the same logic as GetDefaultOpClass() in indexcmds.c, except
- * that we consider all opclasses, regardless of the current search
- * path.
+ * This is the same logic as GetDefaultOpClass() in indexcmds.c, except that
+ * we consider all opclasses, regardless of the current search path.
*/
rel = heap_open(OperatorClassRelationId, AccessShareLock);
@@ -361,8 +360,8 @@ lookup_default_opclass(Oid type_id, Oid am_id)
if (nexact != 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("there are multiple default operator classes for data type %s",
- format_type_be(type_id))));
+ errmsg("there are multiple default operator classes for data type %s",
+ format_type_be(type_id))));
if (ncompatible == 1)
return compatibleOid;
@@ -506,7 +505,7 @@ assign_record_type_typmod(TupleDesc tupDesc)
int32 newlen = RecordCacheArrayLen * 2;
RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray,
- newlen * sizeof(TupleDesc));
+ newlen * sizeof(TupleDesc));
RecordCacheArrayLen = newlen;
}
diff --git a/src/backend/utils/error/assert.c b/src/backend/utils/error/assert.c
index 43205d07fda..d55c9d4f630 100644
--- a/src/backend/utils/error/assert.c
+++ b/src/backend/utils/error/assert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/error/assert.c,v 1.30 2004/12/31 22:01:27 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/error/assert.c,v 1.31 2005/10/15 02:49:32 momjian Exp $
*
* NOTE
* This should eventually work with elog()
@@ -42,8 +42,8 @@ ExceptionalCondition(char *conditionName,
#ifdef SLEEP_ON_ASSERT
/*
- * It would be nice to use pg_usleep() here, but only does 2000 sec or
- * 33 minutes, which seems too short.
+ * It would be nice to use pg_usleep() here, but only does 2000 sec or 33
+ * minutes, which seems too short.
*/
sleep(1000000);
#endif
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index d24242e8409..b4f1000be86 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -25,7 +25,7 @@
* scenario of this sort is "out of memory"; and it's also the nastiest
* to handle because we'd likely also run out of memory while trying to
* report this error! Our escape hatch for this case is to reset the
- * ErrorContext to empty before trying to process the inner error. Since
+ * ErrorContext to empty before trying to process the inner error. Since
* ErrorContext is guaranteed to have at least 8K of space in it (see mcxt.c),
* we should be able to process an "out of memory" message successfully.
* Since we lose the prior error state due to the reset, we won't be able
@@ -42,7 +42,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/error/elog.c,v 1.164 2005/10/14 20:53:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/error/elog.c,v 1.165 2005/10/15 02:49:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -188,8 +188,8 @@ errstart(int elevel, const char *filename, int lineno,
/*
* Now decide whether we need to process this report at all; if it's
- * warning or less and not enabled for logging, just return FALSE
- * without starting up any error logging machinery.
+ * warning or less and not enabled for logging, just return FALSE without
+ * starting up any error logging machinery.
*/
/* Determine whether message is enabled for server log output */
@@ -256,8 +256,8 @@ errstart(int elevel, const char *filename, int lineno,
MemoryContextReset(ErrorContext);
/*
- * If we recurse more than once, the problem might be something
- * broken in a context traceback routine. Abandon them too.
+ * If we recurse more than once, the problem might be something broken
+ * in a context traceback routine. Abandon them too.
*/
if (recursion_depth > 2)
error_context_stack = NULL;
@@ -316,15 +316,15 @@ errfinish(int dummy,...)
CHECK_STACK_DEPTH();
/*
- * Do processing in ErrorContext, which we hope has enough reserved
- * space to report an error.
+ * Do processing in ErrorContext, which we hope has enough reserved space
+ * to report an error.
*/
oldcontext = MemoryContextSwitchTo(ErrorContext);
/*
* Call any context callback functions. Errors occurring in callback
- * functions will be treated as recursive errors --- this ensures we
- * will avoid infinite recursion (see errstart).
+ * functions will be treated as recursive errors --- this ensures we will
+ * avoid infinite recursion (see errstart).
*/
for (econtext = error_context_stack;
econtext != NULL;
@@ -333,34 +333,32 @@ errfinish(int dummy,...)
/*
* If ERROR (not more nor less) we pass it off to the current handler.
- * Printing it and popping the stack is the responsibility of
- * the handler.
+ * Printing it and popping the stack is the responsibility of the handler.
*/
if (elevel == ERROR)
{
/*
- * We do some minimal cleanup before longjmp'ing so that handlers
- * can execute in a reasonably sane state.
+ * We do some minimal cleanup before longjmp'ing so that handlers can
+ * execute in a reasonably sane state.
*/
/* This is just in case the error came while waiting for input */
ImmediateInterruptOK = false;
/*
- * Reset InterruptHoldoffCount in case we ereport'd from
- * inside an interrupt holdoff section. (We assume here that
- * no handler will itself be inside a holdoff section. If
- * necessary, such a handler could save and restore
- * InterruptHoldoffCount for itself, but this should make life
- * easier for most.)
+ * Reset InterruptHoldoffCount in case we ereport'd from inside an
+ * interrupt holdoff section. (We assume here that no handler will
+ * itself be inside a holdoff section. If necessary, such a handler
+ * could save and restore InterruptHoldoffCount for itself, but this
+ * should make life easier for most.)
*/
InterruptHoldoffCount = 0;
- CritSectionCount = 0; /* should be unnecessary, but... */
+ CritSectionCount = 0; /* should be unnecessary, but... */
/*
- * Note that we leave CurrentMemoryContext set to ErrorContext.
- * The handler should reset it to something else soon.
+ * Note that we leave CurrentMemoryContext set to ErrorContext. The
+ * handler should reset it to something else soon.
*/
recursion_depth--;
@@ -370,12 +368,11 @@ errfinish(int dummy,...)
/*
* If we are doing FATAL or PANIC, abort any old-style COPY OUT in
* progress, so that we can report the message before dying. (Without
- * this, pq_putmessage will refuse to send the message at all, which
- * is what we want for NOTICE messages, but not for fatal exits.) This
- * hack is necessary because of poor design of old-style copy
- * protocol. Note we must do this even if client is fool enough to
- * have set client_min_messages above FATAL, so don't look at
- * output_to_client.
+ * this, pq_putmessage will refuse to send the message at all, which is
+ * what we want for NOTICE messages, but not for fatal exits.) This hack
+ * is necessary because of poor design of old-style copy protocol. Note
+ * we must do this even if client is fool enough to have set
+ * client_min_messages above FATAL, so don't look at output_to_client.
*/
if (elevel >= FATAL && whereToSendOutput == Remote)
pq_endcopyout(true);
@@ -412,28 +409,27 @@ errfinish(int dummy,...)
ImmediateInterruptOK = false;
/*
- * If we just reported a startup failure, the client will
- * disconnect on receiving it, so don't send any more to the
- * client.
+ * If we just reported a startup failure, the client will disconnect
+ * on receiving it, so don't send any more to the client.
*/
if (PG_exception_stack == NULL && whereToSendOutput == Remote)
whereToSendOutput = None;
/*
* fflush here is just to improve the odds that we get to see the
- * error message, in case things are so hosed that proc_exit
- * crashes. Any other code you might be tempted to add here
- * should probably be in an on_proc_exit callback instead.
+ * error message, in case things are so hosed that proc_exit crashes.
+ * Any other code you might be tempted to add here should probably be
+ * in an on_proc_exit callback instead.
*/
fflush(stdout);
fflush(stderr);
/*
- * If proc_exit is already running, we exit with nonzero exit code
- * to indicate that something's pretty wrong. We also want to
- * exit with nonzero exit code if not running under the postmaster
- * (for example, if we are being run from the initdb script, we'd
- * better return an error status).
+ * If proc_exit is already running, we exit with nonzero exit code to
+ * indicate that something's pretty wrong. We also want to exit with
+ * nonzero exit code if not running under the postmaster (for example,
+ * if we are being run from the initdb script, we'd better return an
+ * error status).
*/
proc_exit(proc_exit_inprogress || !IsUnderPostmaster);
}
@@ -441,8 +437,8 @@ errfinish(int dummy,...)
if (elevel >= PANIC)
{
/*
- * Serious crash time. Postmaster will observe nonzero process
- * exit status and kill the other backends too.
+ * Serious crash time. Postmaster will observe nonzero process exit
+ * status and kill the other backends too.
*
* XXX: what if we are *in* the postmaster? abort() won't kill our
* children...
@@ -977,8 +973,8 @@ CopyErrorData(void)
ErrorData *newedata;
/*
- * we don't increment recursion_depth because out-of-memory here does
- * not indicate a problem within the error subsystem.
+ * we don't increment recursion_depth because out-of-memory here does not
+ * indicate a problem within the error subsystem.
*/
CHECK_STACK_DEPTH();
@@ -1037,9 +1033,9 @@ void
FlushErrorState(void)
{
/*
- * Reset stack to empty. The only case where it would be more than
- * one deep is if we serviced an error that interrupted construction
- * of another message. We assume control escaped out of that message
+ * Reset stack to empty. The only case where it would be more than one
+ * deep is if we serviced an error that interrupted construction of
+ * another message. We assume control escaped out of that message
* construction and won't ever go back.
*/
errordata_stack_depth = -1;
@@ -1117,7 +1113,7 @@ DebugFileOpen(void)
0666)) < 0)
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\": %m", OutputFileName)));
+ errmsg("could not open file \"%s\": %m", OutputFileName)));
istty = isatty(fd);
close(fd);
@@ -1131,17 +1127,17 @@ DebugFileOpen(void)
OutputFileName)));
/*
- * If the file is a tty and we're running under the postmaster,
- * try to send stdout there as well (if it isn't a tty then stderr
- * will block out stdout, so we may as well let stdout go wherever
- * it was going before).
+ * If the file is a tty and we're running under the postmaster, try to
+ * send stdout there as well (if it isn't a tty then stderr will block
+ * out stdout, so we may as well let stdout go wherever it was going
+ * before).
*/
if (istty && IsUnderPostmaster)
if (!freopen(OutputFileName, "a", stdout))
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not reopen file \"%s\" as stdout: %m",
- OutputFileName)));
+ errmsg("could not reopen file \"%s\" as stdout: %m",
+ OutputFileName)));
}
}
@@ -1156,13 +1152,13 @@ void
set_syslog_parameters(const char *ident, int facility)
{
/*
- * guc.c is likely to call us repeatedly with same parameters, so
- * don't thrash the syslog connection unnecessarily. Also, we do not
- * re-open the connection until needed, since this routine will get called
- * whether or not Log_destination actually mentions syslog.
+ * guc.c is likely to call us repeatedly with same parameters, so don't
+ * thrash the syslog connection unnecessarily. Also, we do not re-open
+ * the connection until needed, since this routine will get called whether
+ * or not Log_destination actually mentions syslog.
*
- * Note that we make our own copy of the ident string rather than relying
- * on guc.c's. This may be overly paranoid, but it ensures that we cannot
+ * Note that we make our own copy of the ident string rather than relying on
+ * guc.c's. This may be overly paranoid, but it ensures that we cannot
* accidentally free a string that syslog is still using.
*/
if (syslog_ident == NULL || strcmp(syslog_ident, ident) != 0 ||
@@ -1212,13 +1208,12 @@ write_syslog(int level, const char *line)
seq++;
/*
- * Our problem here is that many syslog implementations don't handle
- * long messages in an acceptable manner. While this function doesn't
- * help that fact, it does work around by splitting up messages into
- * smaller pieces.
+ * Our problem here is that many syslog implementations don't handle long
+ * messages in an acceptable manner. While this function doesn't help that
+ * fact, it does work around by splitting up messages into smaller pieces.
*
- * We divide into multiple syslog() calls if message is too long
- * or if the message contains embedded NewLine(s) '\n'.
+ * We divide into multiple syslog() calls if message is too long or if the
+ * message contains embedded NewLine(s) '\n'.
*/
len = strlen(line);
if (len > PG_SYSLOG_LIMIT || strchr(line, '\n') != NULL)
@@ -1290,7 +1285,7 @@ write_syslog(int level, const char *line)
static void
write_eventlog(int level, const char *line)
{
- int eventlevel = EVENTLOG_ERROR_TYPE;
+ int eventlevel = EVENTLOG_ERROR_TYPE;
static HANDLE evtHandle = INVALID_HANDLE_VALUE;
if (evtHandle == INVALID_HANDLE_VALUE)
@@ -1356,9 +1351,9 @@ log_line_prefix(StringInfo buf)
int i;
/*
- * This is one of the few places where we'd rather not inherit a
- * static variable's value from the postmaster. But since we will,
- * reset it when MyProcPid changes.
+ * This is one of the few places where we'd rather not inherit a static
+ * variable's value from the postmaster. But since we will, reset it when
+ * MyProcPid changes.
*/
if (log_my_pid != MyProcPid)
{
@@ -1412,8 +1407,8 @@ log_line_prefix(StringInfo buf)
if (MyProcPort)
{
appendStringInfo(buf, "%lx.%x",
- (long) (MyProcPort->session_start.tv_sec),
- MyProcPid);
+ (long) (MyProcPort->session_start.tv_sec),
+ MyProcPid);
}
break;
case 'p':
@@ -1425,21 +1420,22 @@ log_line_prefix(StringInfo buf)
case 'm':
{
/*
- * Note: for %m, %t, and %s we deliberately use the
- * C library's strftime/localtime, and not the
- * equivalent functions from src/timezone. This
- * ensures that all backends will report log entries
- * in the same timezone, namely whatever C-library
- * setting they inherit from the postmaster. If we
- * used src/timezone then local settings of the
- * TimeZone GUC variable would confuse the log.
+ * Note: for %m, %t, and %s we deliberately use the C
+ * library's strftime/localtime, and not the equivalent
+ * functions from src/timezone. This ensures that all
+ * backends will report log entries in the same timezone,
+ * namely whatever C-library setting they inherit from the
+ * postmaster. If we used src/timezone then local
+ * settings of the TimeZone GUC variable would confuse the
+ * log.
*/
- time_t stamp_time;
- char strfbuf[128], msbuf[8];
+ time_t stamp_time;
+ char strfbuf[128],
+ msbuf[8];
struct timeval tv;
gettimeofday(&tv, NULL);
- stamp_time = tv.tv_sec;
+ stamp_time = tv.tv_sec;
strftime(strfbuf, sizeof(strfbuf),
/* leave room for milliseconds... */
@@ -1452,8 +1448,8 @@ log_line_prefix(StringInfo buf)
localtime(&stamp_time));
/* 'paste' milliseconds into place... */
- sprintf(msbuf, ".%03d", (int) (tv.tv_usec/1000));
- strncpy(strfbuf+19, msbuf, 4);
+ sprintf(msbuf, ".%03d", (int) (tv.tv_usec / 1000));
+ strncpy(strfbuf + 19, msbuf, 4);
appendStringInfoString(buf, strfbuf);
}
@@ -1535,7 +1531,7 @@ log_line_prefix(StringInfo buf)
char *
unpack_sql_state(int sql_state)
{
- static char buf[12];
+ static char buf[12];
int i;
for (i = 0; i < 5; i++)
@@ -1629,8 +1625,7 @@ send_message_to_server_log(ErrorData *edata)
}
/*
- * If the user wants the query that generated this error logged, do
- * it.
+ * If the user wants the query that generated this error logged, do it.
*/
if (edata->elevel >= log_min_error_statement && debug_query_string != NULL)
{
@@ -1692,12 +1687,13 @@ send_message_to_server_log(ErrorData *edata)
if ((Log_destination & LOG_DESTINATION_STDERR) || whereToSendOutput == Debug)
{
#ifdef WIN32
+
/*
* In a win32 service environment, there is no usable stderr. Capture
* anything going there and write it to the eventlog instead.
*
- * If stderr redirection is active, it's ok to write to stderr
- * because that's really a pipe to the syslogger process.
+ * If stderr redirection is active, it's ok to write to stderr because
+ * that's really a pipe to the syslogger process.
*/
if ((!Redirect_stderr || am_syslogger) && pgwin32_is_service())
write_eventlog(edata->elevel, buf.data);
@@ -1847,12 +1843,12 @@ send_message_to_frontend(ErrorData *edata)
pq_endmessage(&msgbuf);
/*
- * This flush is normally not necessary, since postgres.c will flush
- * out waiting data when control returns to the main loop. But it
- * seems best to leave it here, so that the client has some clue what
- * happened if the backend dies before getting back to the main loop
- * ... error/notice messages should not be a performance-critical path
- * anyway, so an extra flush won't hurt much ...
+ * This flush is normally not necessary, since postgres.c will flush out
+ * waiting data when control returns to the main loop. But it seems best
+ * to leave it here, so that the client has some clue what happened if the
+ * backend dies before getting back to the main loop ... error/notice
+ * messages should not be a performance-critical path anyway, so an extra
+ * flush won't hurt much ...
*/
pq_flush();
}
@@ -1887,9 +1883,9 @@ expand_fmt_string(const char *fmt, ErrorData *edata)
if (*cp == 'm')
{
/*
- * Replace %m by system error string. If there are any
- * %'s in the string, we'd better double them so that
- * vsnprintf won't misinterpret.
+ * Replace %m by system error string. If there are any %'s in
+ * the string, we'd better double them so that vsnprintf won't
+ * misinterpret.
*/
const char *cp2;
@@ -1934,8 +1930,8 @@ useful_strerror(int errnum)
str = strerror(errnum);
/*
- * Some strerror()s return an empty string for out-of-range errno.
- * This is ANSI C spec compliant, but not exactly useful.
+ * Some strerror()s return an empty string for out-of-range errno. This is
+ * ANSI C spec compliant, but not exactly useful.
*/
if (str == NULL || *str == '\0')
{
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index 3c33fbfa6f0..2212f49fc46 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.80 2005/05/11 01:26:02 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.81 2005/10/15 02:49:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,8 +30,8 @@ typedef struct df_files
{
struct df_files *next; /* List link */
dev_t device; /* Device file is on */
-#ifndef WIN32 /* ensures we never again depend on this
- * under win32 */
+#ifndef WIN32 /* ensures we never again depend on this under
+ * win32 */
ino_t inode; /* Inode number of file */
#endif
void *handle; /* a handle for pg_dl* functions */
@@ -200,8 +200,8 @@ load_file(char *filename)
/*
* We need to do stat() in order to determine whether this is the same
- * file as a previously loaded file; it's also handy so as to give a
- * good error message if bogus file name given.
+ * file as a previously loaded file; it's also handy so as to give a good
+ * error message if bogus file name given.
*/
if (stat(fullname, &stat_buf) == -1)
ereport(ERROR,
@@ -209,8 +209,8 @@ load_file(char *filename)
errmsg("could not access file \"%s\": %m", fullname)));
/*
- * We have to zap all entries in the list that match on either
- * filename or inode, else load_external_function() won't do anything.
+ * We have to zap all entries in the list that match on either filename or
+ * inode, else load_external_function() won't do anything.
*/
prv = NULL;
for (file_scanner = file_list; file_scanner != NULL; file_scanner = nxt)
@@ -351,7 +351,7 @@ substitute_libpath_macro(const char *name)
strncmp(name, "$libdir", strlen("$libdir")) != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("invalid macro name in dynamic library path: %s", name)));
+ errmsg("invalid macro name in dynamic library path: %s", name)));
ret = palloc(strlen(pkglib_path) + strlen(sep_ptr) + 1);
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index dd6134ccfd0..4e5dcc3002b 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.96 2005/06/28 05:09:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.97 2005/10/15 02:49:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,9 +41,9 @@
* some warnings about int->pointer conversions...
*/
#if (defined(__mc68000__) || (defined(__m68k__))) && defined(__ELF__)
-typedef int32 (*func_ptr) ();
+typedef int32 (*func_ptr) ();
#else
-typedef char * (*func_ptr) ();
+typedef char *(*func_ptr) ();
#endif
/*
@@ -52,8 +52,8 @@ typedef char * (*func_ptr) ();
typedef struct
{
func_ptr func; /* Address of the oldstyle function */
- bool arg_toastable[FUNC_MAX_ARGS]; /* is n'th arg of a
- * toastable datatype? */
+ bool arg_toastable[FUNC_MAX_ARGS]; /* is n'th arg of a toastable
+ * datatype? */
} Oldstyle_fnextra;
/*
@@ -95,8 +95,8 @@ fmgr_isbuiltin(Oid id)
int high = fmgr_nbuiltins - 1;
/*
- * Loop invariant: low is the first index that could contain target
- * entry, and high is the last index that could contain it.
+ * Loop invariant: low is the first index that could contain target entry,
+ * and high is the last index that could contain it.
*/
while (low <= high)
{
@@ -177,9 +177,9 @@ fmgr_info_cxt_security(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt,
char *prosrc;
/*
- * fn_oid *must* be filled in last. Some code assumes that if fn_oid
- * is valid, the whole struct is valid. Some FmgrInfo struct's do
- * survive elogs.
+ * fn_oid *must* be filled in last. Some code assumes that if fn_oid is
+ * valid, the whole struct is valid. Some FmgrInfo struct's do survive
+ * elogs.
*/
finfo->fn_oid = InvalidOid;
finfo->fn_extra = NULL;
@@ -189,8 +189,7 @@ fmgr_info_cxt_security(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt,
if ((fbp = fmgr_isbuiltin(functionId)) != NULL)
{
/*
- * Fast path for builtin functions: don't bother consulting
- * pg_proc
+ * Fast path for builtin functions: don't bother consulting pg_proc
*/
finfo->fn_nargs = fbp->nargs;
finfo->fn_strict = fbp->strict;
@@ -227,11 +226,11 @@ fmgr_info_cxt_security(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt,
/*
* For an ordinary builtin function, we should never get here
* because the isbuiltin() search above will have succeeded.
- * However, if the user has done a CREATE FUNCTION to create
- * an alias for a builtin function, we can end up here. In
- * that case we have to look up the function by name. The
- * name of the internal function is stored in prosrc (it
- * doesn't have to be the same as the name of the alias!)
+ * However, if the user has done a CREATE FUNCTION to create an
+ * alias for a builtin function, we can end up here. In that case
+ * we have to look up the function by name. The name of the
+ * internal function is stored in prosrc (it doesn't have to be
+ * the same as the name of the alias!)
*/
prosrcdatum = SysCacheGetAttr(PROCOID, procedureTuple,
Anum_pg_proc_prosrc, &isnull);
@@ -300,8 +299,7 @@ fmgr_info_C_lang(Oid functionId, FmgrInfo *finfo, HeapTuple procedureTuple)
void *libraryhandle;
/*
- * Get prosrc and probin strings (link symbol and library
- * filename)
+ * Get prosrc and probin strings (link symbol and library filename)
*/
prosrcattr = SysCacheGetAttr(PROCOID, procedureTuple,
Anum_pg_proc_prosrc, &isnull);
@@ -605,14 +603,13 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
fnextra = (Oldstyle_fnextra *) fcinfo->flinfo->fn_extra;
/*
- * Result is NULL if any argument is NULL, but we still call the
- * function (peculiar, but that's the way it worked before, and after
- * all this is a backwards-compatibility wrapper). Note, however,
- * that we'll never get here with NULL arguments if the function is
- * marked strict.
+ * Result is NULL if any argument is NULL, but we still call the function
+ * (peculiar, but that's the way it worked before, and after all this is a
+ * backwards-compatibility wrapper). Note, however, that we'll never get
+ * here with NULL arguments if the function is marked strict.
*
- * We also need to detoast any TOAST-ed inputs, since it's unlikely that
- * an old-style function knows about TOASTing.
+ * We also need to detoast any TOAST-ed inputs, since it's unlikely that an
+ * old-style function knows about TOASTing.
*/
isnull = false;
for (i = 0; i < n_arguments; i++)
@@ -634,9 +631,9 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
case 1:
/*
- * nullvalue() used to use isNull to check if arg is NULL;
- * perhaps there are other functions still out there that also
- * rely on this undocumented hack?
+ * nullvalue() used to use isNull to check if arg is NULL; perhaps
+ * there are other functions still out there that also rely on
+ * this undocumented hack?
*/
returnValue = (*user_fn) (fcinfo->arg[0], &fcinfo->isnull);
break;
@@ -744,16 +741,16 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
default:
/*
- * Increasing FUNC_MAX_ARGS doesn't automatically add cases to
- * the above code, so mention the actual value in this error
- * not FUNC_MAX_ARGS. You could add cases to the above if you
- * needed to support old-style functions with many arguments,
- * but making 'em be new-style is probably a better idea.
+ * Increasing FUNC_MAX_ARGS doesn't automatically add cases to the
+ * above code, so mention the actual value in this error not
+ * FUNC_MAX_ARGS. You could add cases to the above if you needed
+ * to support old-style functions with many arguments, but making
+ * 'em be new-style is probably a better idea.
*/
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
- errmsg("function %u has too many arguments (%d, maximum is %d)",
- fcinfo->flinfo->fn_oid, n_arguments, 16)));
+ errmsg("function %u has too many arguments (%d, maximum is %d)",
+ fcinfo->flinfo->fn_oid, n_arguments, 16)));
returnValue = NULL; /* keep compiler quiet */
break;
}
@@ -769,7 +766,7 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
struct fmgr_security_definer_cache
{
FmgrInfo flinfo;
- Oid userid;
+ Oid userid;
};
/*
@@ -785,8 +782,8 @@ fmgr_security_definer(PG_FUNCTION_ARGS)
{
Datum result;
FmgrInfo *save_flinfo;
- struct fmgr_security_definer_cache * volatile fcache;
- Oid save_userid;
+ struct fmgr_security_definer_cache *volatile fcache;
+ Oid save_userid;
HeapTuple tuple;
if (!fcinfo->flinfo->fn_extra)
@@ -1719,8 +1716,8 @@ fmgr(Oid procedureId,...)
if (n_arguments > FUNC_MAX_ARGS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
- errmsg("function %u has too many arguments (%d, maximum is %d)",
- flinfo.fn_oid, n_arguments, FUNC_MAX_ARGS)));
+ errmsg("function %u has too many arguments (%d, maximum is %d)",
+ flinfo.fn_oid, n_arguments, FUNC_MAX_ARGS)));
va_start(pvar, procedureId);
for (i = 0; i < n_arguments; i++)
fcinfo.arg[i] = (Datum) va_arg(pvar, char *);
@@ -1760,10 +1757,10 @@ Int64GetDatum(int64 X)
#else /* INT64_IS_BUSTED */
/*
- * On a machine with no 64-bit-int C datatype, sizeof(int64) will not
- * be 8, but we want Int64GetDatum to return an 8-byte object anyway,
- * with zeroes in the unused bits. This is needed so that, for
- * example, hash join of int8 will behave properly.
+ * On a machine with no 64-bit-int C datatype, sizeof(int64) will not be
+ * 8, but we want Int64GetDatum to return an 8-byte object anyway, with
+ * zeroes in the unused bits. This is needed so that, for example, hash
+ * join of int8 will behave properly.
*/
int64 *retval = (int64 *) palloc0(Max(sizeof(int64), 8));
@@ -1846,8 +1843,8 @@ get_fn_expr_rettype(FmgrInfo *flinfo)
Node *expr;
/*
- * can't return anything useful if we have no FmgrInfo or if its
- * fn_expr node has not been initialized
+ * can't return anything useful if we have no FmgrInfo or if its fn_expr
+ * node has not been initialized
*/
if (!flinfo || !flinfo->fn_expr)
return InvalidOid;
@@ -1866,8 +1863,8 @@ Oid
get_fn_expr_argtype(FmgrInfo *flinfo, int argnum)
{
/*
- * can't return anything useful if we have no FmgrInfo or if its
- * fn_expr node has not been initialized
+ * can't return anything useful if we have no FmgrInfo or if its fn_expr
+ * node has not been initialized
*/
if (!flinfo || !flinfo->fn_expr)
return InvalidOid;
@@ -1909,8 +1906,8 @@ get_call_expr_argtype(Node *expr, int argnum)
argtype = exprType((Node *) list_nth(args, argnum));
/*
- * special hack for ScalarArrayOpExpr: what the underlying function
- * will actually get passed is the element type of the array.
+ * special hack for ScalarArrayOpExpr: what the underlying function will
+ * actually get passed is the element type of the array.
*/
if (IsA(expr, ScalarArrayOpExpr) &&
argnum == 1)
diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c
index 598168a70a0..0a51f7ae0f2 100644
--- a/src/backend/utils/fmgr/funcapi.c
+++ b/src/backend/utils/fmgr/funcapi.c
@@ -7,7 +7,7 @@
* Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/funcapi.c,v 1.25 2005/10/06 19:51:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/funcapi.c,v 1.26 2005/10/15 02:49:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,13 +29,13 @@
static void shutdown_MultiFuncCall(Datum arg);
static TypeFuncClass internal_get_result_type(Oid funcid,
- Node *call_expr,
- ReturnSetInfo *rsinfo,
- Oid *resultTypeId,
- TupleDesc *resultTupleDesc);
+ Node *call_expr,
+ ReturnSetInfo *rsinfo,
+ Oid *resultTypeId,
+ TupleDesc *resultTupleDesc);
static bool resolve_polymorphic_tupdesc(TupleDesc tupdesc,
- oidvector *declared_args,
- Node *call_expr);
+ oidvector *declared_args,
+ Node *call_expr);
static TypeFuncClass get_type_func_class(Oid typid);
@@ -89,8 +89,8 @@ init_MultiFuncCall(PG_FUNCTION_ARGS)
fcinfo->flinfo->fn_extra = retval;
/*
- * Ensure we will get shut down cleanly if the exprcontext is not
- * run to completion.
+ * Ensure we will get shut down cleanly if the exprcontext is not run
+ * to completion.
*/
RegisterExprContextCallback(rsi->econtext,
shutdown_MultiFuncCall,
@@ -119,16 +119,16 @@ per_MultiFuncCall(PG_FUNCTION_ARGS)
FuncCallContext *retval = (FuncCallContext *) fcinfo->flinfo->fn_extra;
/*
- * Clear the TupleTableSlot, if present. This is for safety's sake:
- * the Slot will be in a long-lived context (it better be, if the
+ * Clear the TupleTableSlot, if present. This is for safety's sake: the
+ * Slot will be in a long-lived context (it better be, if the
* FuncCallContext is pointing to it), but in most usage patterns the
- * tuples stored in it will be in the function's per-tuple context. So
- * at the beginning of each call, the Slot will hold a dangling
- * pointer to an already-recycled tuple. We clear it out here.
+ * tuples stored in it will be in the function's per-tuple context. So at
+ * the beginning of each call, the Slot will hold a dangling pointer to an
+ * already-recycled tuple. We clear it out here.
*
* Note: use of retval->slot is obsolete as of 8.0, and we expect that it
- * will always be NULL. This is just here for backwards compatibility
- * in case someone creates a slot anyway.
+ * will always be NULL. This is just here for backwards compatibility in
+ * case someone creates a slot anyway.
*/
if (retval->slot != NULL)
ExecClearTuple(retval->slot);
@@ -168,8 +168,8 @@ shutdown_MultiFuncCall(Datum arg)
flinfo->fn_extra = NULL;
/*
- * Caller is responsible to free up memory for individual struct
- * elements other than att_in_funcinfo and elements.
+ * Caller is responsible to free up memory for individual struct elements
+ * other than att_in_funcinfo and elements.
*/
if (funcctx->attinmeta != NULL)
pfree(funcctx->attinmeta);
@@ -183,14 +183,14 @@ shutdown_MultiFuncCall(Datum arg)
* Given a function's call info record, determine the kind of datatype
* it is supposed to return. If resultTypeId isn't NULL, *resultTypeId
* receives the actual datatype OID (this is mainly useful for scalar
- * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
+ * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
* receives a pointer to a TupleDesc when the result is of a composite
* type, or NULL when it's a scalar result. NB: the tupledesc should
* be copied if it is to be accessed over a long period.
*
* One hard case that this handles is resolution of actual rowtypes for
* functions returning RECORD (from either the function's OUT parameter
- * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
+ * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
* only when we couldn't resolve the actual rowtype for lack of information.
*
* The other hard case that this handles is resolution of polymorphism.
@@ -238,7 +238,7 @@ get_expr_result_type(Node *expr,
else
{
/* handle as a generic expression; no chance to resolve RECORD */
- Oid typid = exprType(expr);
+ Oid typid = exprType(expr);
if (resultTypeId)
*resultTypeId = typid;
@@ -273,7 +273,7 @@ get_func_result_type(Oid functionId,
/*
* internal_get_result_type -- workhorse code implementing all the above
*
- * funcid must always be supplied. call_expr and rsinfo can be NULL if not
+ * funcid must always be supplied. call_expr and rsinfo can be NULL if not
* available. We will return TYPEFUNC_RECORD, and store NULL into
* *resultTupleDesc, if we cannot deduce the complete result rowtype from
* the available information.
@@ -306,9 +306,9 @@ internal_get_result_type(Oid funcid,
if (tupdesc)
{
/*
- * It has OUT parameters, so it's basically like a regular
- * composite type, except we have to be able to resolve any
- * polymorphic OUT parameters.
+ * It has OUT parameters, so it's basically like a regular composite
+ * type, except we have to be able to resolve any polymorphic OUT
+ * parameters.
*/
if (resultTypeId)
*resultTypeId = rettype;
@@ -341,7 +341,7 @@ internal_get_result_type(Oid funcid,
*/
if (rettype == ANYARRAYOID || rettype == ANYELEMENTOID)
{
- Oid newrettype = exprType(call_expr);
+ Oid newrettype = exprType(call_expr);
if (newrettype == InvalidOid) /* this probably should not happen */
ereport(ERROR,
@@ -355,7 +355,7 @@ internal_get_result_type(Oid funcid,
if (resultTypeId)
*resultTypeId = rettype;
if (resultTupleDesc)
- *resultTupleDesc = NULL; /* default result */
+ *resultTupleDesc = NULL; /* default result */
/* Classify the result type */
result = get_type_func_class(rettype);
@@ -391,7 +391,7 @@ internal_get_result_type(Oid funcid,
/*
* Given the result tuple descriptor for a function with OUT parameters,
* replace any polymorphic columns (ANYELEMENT/ANYARRAY) with correct data
- * types deduced from the input arguments. Returns TRUE if able to deduce
+ * types deduced from the input arguments. Returns TRUE if able to deduce
* all types, FALSE if not.
*/
static bool
@@ -425,7 +425,7 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args,
return true;
/*
- * Otherwise, extract actual datatype(s) from input arguments. (We assume
+ * Otherwise, extract actual datatype(s) from input arguments. (We assume
* the parser already validated consistency of the arguments.)
*/
if (!call_expr)
@@ -468,14 +468,14 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args,
switch (tupdesc->attrs[i]->atttypid)
{
case ANYELEMENTOID:
- TupleDescInitEntry(tupdesc, i+1,
+ TupleDescInitEntry(tupdesc, i + 1,
NameStr(tupdesc->attrs[i]->attname),
anyelement_type,
-1,
0);
break;
case ANYARRAYOID:
- TupleDescInitEntry(tupdesc, i+1,
+ TupleDescInitEntry(tupdesc, i + 1,
NameStr(tupdesc->attrs[i]->attname),
anyarray_type,
-1,
@@ -492,7 +492,7 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args,
/*
* Given the declared argument types and modes for a function,
* replace any polymorphic types (ANYELEMENT/ANYARRAY) with correct data
- * types deduced from the input arguments. Returns TRUE if able to deduce
+ * types deduced from the input arguments. Returns TRUE if able to deduce
* all types, FALSE if not. This is the same logic as
* resolve_polymorphic_tupdesc, but with a different argument representation.
*
@@ -513,7 +513,7 @@ resolve_polymorphic_argtypes(int numargs, Oid *argtypes, char *argmodes,
inargno = 0;
for (i = 0; i < numargs; i++)
{
- char argmode = argmodes ? argmodes[i] : PROARGMODE_IN;
+ char argmode = argmodes ? argmodes[i] : PROARGMODE_IN;
switch (argtypes[i])
{
@@ -612,10 +612,11 @@ get_type_func_class(Oid typid)
case 'p':
if (typid == RECORDOID)
return TYPEFUNC_RECORD;
+
/*
* We treat VOID and CSTRING as legitimate scalar datatypes,
- * mostly for the convenience of the JDBC driver (which wants
- * to be able to do "SELECT * FROM foo()" for all legitimately
+ * mostly for the convenience of the JDBC driver (which wants to
+ * be able to do "SELECT * FROM foo()" for all legitimately
* user-callable functions).
*/
if (typid == VOIDOID || typid == CSTRINGOID)
@@ -681,14 +682,14 @@ get_func_result_name(Oid functionId)
* since the array data is just going to look like a C array of
* values.
*/
- arr = DatumGetArrayTypeP(proargmodes); /* ensure not toasted */
+ arr = DatumGetArrayTypeP(proargmodes); /* ensure not toasted */
numargs = ARR_DIMS(arr)[0];
if (ARR_NDIM(arr) != 1 ||
numargs < 0 ||
ARR_ELEMTYPE(arr) != CHAROID)
elog(ERROR, "proargmodes is not a 1-D char array");
argmodes = (char *) ARR_DATA_PTR(arr);
- arr = DatumGetArrayTypeP(proargnames); /* ensure not toasted */
+ arr = DatumGetArrayTypeP(proargnames); /* ensure not toasted */
if (ARR_NDIM(arr) != 1 ||
ARR_DIMS(arr)[0] != numargs ||
ARR_ELEMTYPE(arr) != TEXTOID)
@@ -769,7 +770,7 @@ build_function_result_tupdesc_t(HeapTuple procTuple)
Anum_pg_proc_proargnames,
&isnull);
if (isnull)
- proargnames = PointerGetDatum(NULL); /* just to be sure */
+ proargnames = PointerGetDatum(NULL); /* just to be sure */
return build_function_result_tupdesc_d(proallargtypes,
proargmodes,
@@ -848,7 +849,7 @@ build_function_result_tupdesc_d(Datum proallargtypes,
numoutargs = 0;
for (i = 0; i < numargs; i++)
{
- char *pname;
+ char *pname;
if (argmodes[i] == PROARGMODE_IN)
continue;
@@ -879,7 +880,7 @@ build_function_result_tupdesc_d(Datum proallargtypes,
desc = CreateTemplateTupleDesc(numoutargs, false);
for (i = 0; i < numoutargs; i++)
{
- TupleDescInitEntry(desc, i+1,
+ TupleDescInitEntry(desc, i + 1,
outargnames[i],
outargtypes[i],
-1,
@@ -986,7 +987,7 @@ TypeGetTupleDesc(Oid typeoid, List *colaliases)
if (list_length(colaliases) != 1)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("number of aliases does not match number of columns")));
+ errmsg("number of aliases does not match number of columns")));
/* OK, get the column alias */
attname = strVal(linitial(colaliases));
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 66be64a4e56..292673ac26a 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.64 2005/08/20 23:26:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.65 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -115,14 +115,14 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
HASHHDR *hctl;
/*
- * For shared hash tables, we have a local hash header (HTAB struct)
- * that we allocate in TopMemoryContext; all else is in shared memory.
+ * For shared hash tables, we have a local hash header (HTAB struct) that
+ * we allocate in TopMemoryContext; all else is in shared memory.
*
- * For non-shared hash tables, everything including the hash header
- * is in a memory context created specially for the hash table ---
- * this makes hash_destroy very simple. The memory context is made
- * a child of either a context specified by the caller, or
- * TopMemoryContext if nothing is specified.
+ * For non-shared hash tables, everything including the hash header is in a
+ * memory context created specially for the hash table --- this makes
+ * hash_destroy very simple. The memory context is made a child of either
+ * a context specified by the caller, or TopMemoryContext if nothing is
+ * specified.
*/
if (flags & HASH_SHARED_MEM)
{
@@ -144,7 +144,7 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
}
/* Initialize the hash header, plus a copy of the table name */
- hashp = (HTAB *) DynaHashAlloc(sizeof(HTAB) + strlen(tabname) + 1);
+ hashp = (HTAB *) DynaHashAlloc(sizeof(HTAB) + strlen(tabname) +1);
MemSet(hashp, 0, sizeof(HTAB));
hashp->tabname = (char *) (hashp + 1);
@@ -156,10 +156,9 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
hashp->hash = string_hash; /* default hash function */
/*
- * If you don't specify a match function, it defaults to strncmp() if
- * you used string_hash (either explicitly or by default) and to
- * memcmp() otherwise. (Prior to PostgreSQL 7.4, memcmp() was always
- * used.)
+ * If you don't specify a match function, it defaults to strncmp() if you
+ * used string_hash (either explicitly or by default) and to memcmp()
+ * otherwise. (Prior to PostgreSQL 7.4, memcmp() was always used.)
*/
if (flags & HASH_COMPARE)
hashp->match = info->match;
@@ -186,8 +185,8 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
if (flags & HASH_SHARED_MEM)
{
/*
- * ctl structure is preallocated for shared memory tables. Note
- * that HASH_DIRSIZE and HASH_ALLOC had better be set as well.
+ * ctl structure is preallocated for shared memory tables. Note that
+ * HASH_DIRSIZE and HASH_ALLOC had better be set as well.
*/
hashp->hctl = info->hctl;
hashp->dir = info->dir;
@@ -243,8 +242,8 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
}
/*
- * hash table now allocates space for key and data but you have to say
- * how much space to allocate
+ * hash table now allocates space for key and data but you have to say how
+ * much space to allocate
*/
if (flags & HASH_ELEM)
{
@@ -318,8 +317,8 @@ init_htab(HTAB *hashp, long nelem)
/*
* Divide number of elements by the fill factor to determine a desired
- * number of buckets. Allocate space for the next greater power of
- * two number of buckets
+ * number of buckets. Allocate space for the next greater power of two
+ * number of buckets
*/
lnbuckets = (nelem - 1) / hctl->ffactor + 1;
@@ -329,15 +328,14 @@ init_htab(HTAB *hashp, long nelem)
hctl->high_mask = (nbuckets << 1) - 1;
/*
- * Figure number of directory segments needed, round up to a power of
- * 2
+ * Figure number of directory segments needed, round up to a power of 2
*/
nsegs = (nbuckets - 1) / hctl->ssize + 1;
nsegs = 1 << my_log2(nsegs);
/*
- * Make sure directory is big enough. If pre-allocated directory is
- * too small, choke (caller screwed up).
+ * Make sure directory is big enough. If pre-allocated directory is too
+ * small, choke (caller screwed up).
*/
if (nsegs > hctl->dsize)
{
@@ -418,7 +416,7 @@ hash_estimate_size(long num_entries, Size entrysize)
size = add_size(size, mul_size(nDirEntries, sizeof(HASHSEGMENT)));
/* segments */
size = add_size(size, mul_size(nSegments,
- MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET))));
+ MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET))));
/* elements --- allocated in groups of up to HASHELEMENT_ALLOC_MAX */
elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
elementAllocCnt = Min(num_entries, HASHELEMENT_ALLOC_MAX);
@@ -528,7 +526,7 @@ calc_bucket(HASHHDR *hctl, uint32 hash_val)
* the result is a dangling pointer that shouldn't be dereferenced!)
*
* HASH_ENTER will normally ereport a generic "out of memory" error if
- * it is unable to create a new entry. The HASH_ENTER_NULL operation is
+ * it is unable to create a new entry. The HASH_ENTER_NULL operation is
* the same except it will return NULL if out of memory. Note that
* HASH_ENTER_NULL cannot be used with the default palloc-based allocator,
* since palloc internally ereports on out-of-memory.
@@ -623,8 +621,8 @@ hash_search(HTAB *hashp,
/*
* better hope the caller is synchronizing access to this
- * element, because someone else is going to reuse it the
- * next time something is added to the table
+ * element, because someone else is going to reuse it the next
+ * time something is added to the table
*/
return (void *) ELEMENTKEY(currBucket);
}
@@ -680,9 +678,8 @@ hash_search(HTAB *hashp,
if (++hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor)
{
/*
- * NOTE: failure to expand table is not a fatal error, it
- * just means we have to run at higher fill factor than we
- * wanted.
+ * NOTE: failure to expand table is not a fatal error, it just
+ * means we have to run at higher fill factor than we wanted.
*/
expand_table(hashp);
}
@@ -731,7 +728,7 @@ hash_seq_search(HASH_SEQ_STATUS *status)
{
/* Continuing scan of curBucket... */
status->curEntry = curElem->link;
- if (status->curEntry == NULL) /* end of this bucket */
+ if (status->curEntry == NULL) /* end of this bucket */
++status->curBucket;
return (void *) ELEMENTKEY(curElem);
}
@@ -746,7 +743,7 @@ hash_seq_search(HASH_SEQ_STATUS *status)
max_bucket = hctl->max_bucket;
if (curBucket > max_bucket)
- return NULL; /* search is done */
+ return NULL; /* search is done */
/*
* first find the right segment in the table directory.
@@ -768,7 +765,7 @@ hash_seq_search(HASH_SEQ_STATUS *status)
if (++curBucket > max_bucket)
{
status->curBucket = curBucket;
- return NULL; /* search is done */
+ return NULL; /* search is done */
}
if (++segment_ndx >= ssize)
{
@@ -833,10 +830,9 @@ expand_table(HTAB *hashp)
/*
* *Before* changing masks, find old bucket corresponding to same hash
- * values; values in that bucket may need to be relocated to new
- * bucket. Note that new_bucket is certainly larger than low_mask at
- * this point, so we can skip the first step of the regular hash mask
- * calc.
+ * values; values in that bucket may need to be relocated to new bucket.
+ * Note that new_bucket is certainly larger than low_mask at this point,
+ * so we can skip the first step of the regular hash mask calc.
*/
old_bucket = (new_bucket & hctl->low_mask);
@@ -850,10 +846,10 @@ expand_table(HTAB *hashp)
}
/*
- * Relocate records to the new bucket. NOTE: because of the way the
- * hash masking is done in calc_bucket, only one old bucket can need
- * to be split at this point. With a different way of reducing the
- * hash value, that might not be true!
+ * Relocate records to the new bucket. NOTE: because of the way the hash
+ * masking is done in calc_bucket, only one old bucket can need to be
+ * split at this point. With a different way of reducing the hash value,
+ * that might not be true!
*/
old_segnum = old_bucket >> hctl->sshift;
old_segndx = MOD(old_bucket, hctl->ssize);
diff --git a/src/backend/utils/hash/hashfn.c b/src/backend/utils/hash/hashfn.c
index c5968658161..43dac9daad1 100644
--- a/src/backend/utils/hash/hashfn.c
+++ b/src/backend/utils/hash/hashfn.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/hashfn.c,v 1.24 2005/06/08 23:02:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/hashfn.c,v 1.25 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,7 +64,7 @@ uint32
bitmap_hash(const void *key, Size keysize)
{
Assert(keysize == sizeof(Bitmapset *));
- return bms_hash_value(*((const Bitmapset * const *) key));
+ return bms_hash_value(*((const Bitmapset *const *) key));
}
/*
@@ -74,6 +74,6 @@ int
bitmap_match(const void *key1, const void *key2, Size keysize)
{
Assert(keysize == sizeof(Bitmapset *));
- return !bms_equal(*((const Bitmapset * const *) key1),
- *((const Bitmapset * const *) key2));
+ return !bms_equal(*((const Bitmapset *const *) key1),
+ *((const Bitmapset *const *) key2));
}
diff --git a/src/backend/utils/hash/pg_crc.c b/src/backend/utils/hash/pg_crc.c
index 211da1aa729..2cfdff44e22 100644
--- a/src/backend/utils/hash/pg_crc.c
+++ b/src/backend/utils/hash/pg_crc.c
@@ -19,7 +19,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/pg_crc.c,v 1.13 2005/06/02 05:55:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/pg_crc.c,v 1.14 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -378,7 +378,6 @@ const uint32 pg_crc64_table1[256] = {
0x5DEDC41A, 0x1F1D25F1,
0xD80C07CD, 0x9AFCE626
};
-
#else /* int64 works */
const uint64 pg_crc64_table[256] = {
@@ -511,7 +510,6 @@ const uint64 pg_crc64_table[256] = {
UINT64CONST(0x5DEDC41A34BBEEB2), UINT64CONST(0x1F1D25F19D51D821),
UINT64CONST(0xD80C07CD676F8394), UINT64CONST(0x9AFCE626CE85B507)
};
-
#endif /* INT64_IS_BUSTED */
-#endif /* PROVIDE_64BIT_CRC */
+#endif /* PROVIDE_64BIT_CRC */
diff --git a/src/backend/utils/init/flatfiles.c b/src/backend/utils/init/flatfiles.c
index 7d9d2e6cb25..9906682c320 100644
--- a/src/backend/utils/init/flatfiles.c
+++ b/src/backend/utils/init/flatfiles.c
@@ -4,9 +4,9 @@
* Routines for maintaining "flat file" images of the shared catalogs.
*
* We use flat files so that the postmaster and not-yet-fully-started
- * backends can look at the contents of pg_database, pg_authid, and
- * pg_auth_members for authentication purposes. This module is
- * responsible for keeping the flat-file images as nearly in sync with
+ * backends can look at the contents of pg_database, pg_authid, and
+ * pg_auth_members for authentication purposes. This module is
+ * responsible for keeping the flat-file images as nearly in sync with
* database reality as possible.
*
* The tricky part of the write_xxx_file() routines in this module is that
@@ -23,7 +23,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/utils/init/flatfiles.c,v 1.14 2005/08/11 21:11:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/flatfiles.c,v 1.15 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,7 +55,7 @@
#define AUTH_FLAT_FILE "global/pg_auth"
/* Info bits in a flatfiles 2PC record */
-#define FF_BIT_DATABASE 1
+#define FF_BIT_DATABASE 1
#define FF_BIT_AUTH 2
@@ -181,8 +181,8 @@ write_database_file(Relation drel)
/*
* Create a temporary filename to be renamed later. This prevents the
- * backend from clobbering the flat file while the postmaster
- * might be reading from it.
+ * backend from clobbering the flat file while the postmaster might be
+ * reading from it.
*/
filename = database_getflatfilename();
bufsize = strlen(filename) + 12;
@@ -209,7 +209,7 @@ write_database_file(Relation drel)
Oid datoid;
Oid dattablespace;
TransactionId datfrozenxid,
- datvacuumxid;
+ datvacuumxid;
datname = NameStr(dbform->datname);
datoid = HeapTupleGetOid(tuple);
@@ -219,7 +219,7 @@ write_database_file(Relation drel)
/*
* Identify the oldest datfrozenxid, ignoring databases that are not
- * connectable (we assume they are safely frozen). This must match
+ * connectable (we assume they are safely frozen). This must match
* the logic in vac_truncate_clog() in vacuum.c.
*/
if (dbform->datallowconn &&
@@ -262,8 +262,8 @@ write_database_file(Relation drel)
tempname)));
/*
- * Rename the temp file to its final name, deleting the old flat file.
- * We expect that rename(2) is an atomic action.
+ * Rename the temp file to its final name, deleting the old flat file. We
+ * expect that rename(2) is an atomic action.
*/
if (rename(tempname, filename))
ereport(ERROR,
@@ -295,16 +295,18 @@ write_database_file(Relation drel)
* and build data structures in-memory before writing the file.
*/
-typedef struct {
+typedef struct
+{
Oid roleid;
bool rolcanlogin;
- char* rolname;
- char* rolpassword;
- char* rolvaliduntil;
- List* member_of;
+ char *rolname;
+ char *rolpassword;
+ char *rolvaliduntil;
+ List *member_of;
} auth_entry;
-typedef struct {
+typedef struct
+{
Oid roleid;
Oid memberid;
} authmem_entry;
@@ -314,11 +316,13 @@ typedef struct {
static int
oid_compar(const void *a, const void *b)
{
- const auth_entry *a_auth = (const auth_entry*) a;
- const auth_entry *b_auth = (const auth_entry*) b;
+ const auth_entry *a_auth = (const auth_entry *) a;
+ const auth_entry *b_auth = (const auth_entry *) b;
- if (a_auth->roleid < b_auth->roleid) return -1;
- if (a_auth->roleid > b_auth->roleid) return 1;
+ if (a_auth->roleid < b_auth->roleid)
+ return -1;
+ if (a_auth->roleid > b_auth->roleid)
+ return 1;
return 0;
}
@@ -326,21 +330,23 @@ oid_compar(const void *a, const void *b)
static int
name_compar(const void *a, const void *b)
{
- const auth_entry *a_auth = (const auth_entry*) a;
- const auth_entry *b_auth = (const auth_entry*) b;
+ const auth_entry *a_auth = (const auth_entry *) a;
+ const auth_entry *b_auth = (const auth_entry *) b;
- return strcmp(a_auth->rolname,b_auth->rolname);
+ return strcmp(a_auth->rolname, b_auth->rolname);
}
/* qsort comparator for sorting authmem_entry array by memberid */
static int
mem_compar(const void *a, const void *b)
{
- const authmem_entry *a_auth = (const authmem_entry*) a;
- const authmem_entry *b_auth = (const authmem_entry*) b;
+ const authmem_entry *a_auth = (const authmem_entry *) a;
+ const authmem_entry *b_auth = (const authmem_entry *) b;
- if (a_auth->memberid < b_auth->memberid) return -1;
- if (a_auth->memberid > b_auth->memberid) return 1;
+ if (a_auth->memberid < b_auth->memberid)
+ return -1;
+ if (a_auth->memberid > b_auth->memberid)
+ return 1;
return 0;
}
@@ -354,7 +360,7 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
char *filename,
*tempname;
int bufsize;
- BlockNumber totalblocks;
+ BlockNumber totalblocks;
FILE *fp;
mode_t oumask;
HeapScanDesc scan;
@@ -364,13 +370,13 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
int curr_mem = 0;
int total_mem = 0;
int est_rows;
- auth_entry *auth_info;
+ auth_entry *auth_info;
authmem_entry *authmem_info;
/*
* Create a temporary filename to be renamed later. This prevents the
- * backend from clobbering the flat file while the postmaster might
- * be reading from it.
+ * backend from clobbering the flat file while the postmaster might be
+ * reading from it.
*/
filename = auth_getflatfilename();
bufsize = strlen(filename) + 12;
@@ -387,29 +393,29 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
tempname)));
/*
- * Read pg_authid and fill temporary data structures. Note we must
- * read all roles, even those without rolcanlogin.
+ * Read pg_authid and fill temporary data structures. Note we must read
+ * all roles, even those without rolcanlogin.
*/
totalblocks = RelationGetNumberOfBlocks(rel_authid);
totalblocks = totalblocks ? totalblocks : 1;
- est_rows = totalblocks * (BLCKSZ / (sizeof(HeapTupleHeaderData)+sizeof(FormData_pg_authid)));
- auth_info = (auth_entry*) palloc(est_rows*sizeof(auth_entry));
+ est_rows = totalblocks * (BLCKSZ / (sizeof(HeapTupleHeaderData) + sizeof(FormData_pg_authid)));
+ auth_info = (auth_entry *) palloc(est_rows * sizeof(auth_entry));
scan = heap_beginscan(rel_authid, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
Form_pg_authid aform = (Form_pg_authid) GETSTRUCT(tuple);
HeapTupleHeader tup = tuple->t_data;
- char *tp; /* ptr to tuple data */
- long off; /* offset in tuple data */
+ char *tp; /* ptr to tuple data */
+ long off; /* offset in tuple data */
bits8 *bp = tup->t_bits; /* ptr to null bitmask in tuple */
Datum datum;
if (curr_role >= est_rows)
{
est_rows *= 2;
- auth_info = (auth_entry*)
- repalloc(auth_info, est_rows*sizeof(auth_entry));
+ auth_info = (auth_entry *)
+ repalloc(auth_info, est_rows * sizeof(auth_entry));
}
auth_info[curr_role].roleid = HeapTupleGetOid(tuple);
@@ -418,10 +424,10 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
auth_info[curr_role].member_of = NIL;
/*
- * We can't use heap_getattr() here because during startup we will
- * not have any tupdesc for pg_authid. Fortunately it's not too
- * hard to work around this. rolpassword is the first possibly-null
- * field so we can compute its offset directly.
+ * We can't use heap_getattr() here because during startup we will not
+ * have any tupdesc for pg_authid. Fortunately it's not too hard to
+ * work around this. rolpassword is the first possibly-null field so
+ * we can compute its offset directly.
*/
tp = (char *) tup + tup->t_hoff;
off = offsetof(FormData_pg_authid, rolpassword);
@@ -438,8 +444,8 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
datum = PointerGetDatum(tp + off);
/*
- * The password probably shouldn't ever be out-of-line toasted;
- * if it is, ignore it, since we can't handle that in startup mode.
+ * The password probably shouldn't ever be out-of-line toasted; if
+ * it is, ignore it, since we can't handle that in startup mode.
*/
if (VARATT_IS_EXTERNAL(DatumGetPointer(datum)))
auth_info[curr_role].rolpassword = pstrdup("");
@@ -495,8 +501,8 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
*/
totalblocks = RelationGetNumberOfBlocks(rel_authmem);
totalblocks = totalblocks ? totalblocks : 1;
- est_rows = totalblocks * (BLCKSZ / (sizeof(HeapTupleHeaderData)+sizeof(FormData_pg_auth_members)));
- authmem_info = (authmem_entry*) palloc(est_rows*sizeof(authmem_entry));
+ est_rows = totalblocks * (BLCKSZ / (sizeof(HeapTupleHeaderData) + sizeof(FormData_pg_auth_members)));
+ authmem_info = (authmem_entry *) palloc(est_rows * sizeof(authmem_entry));
scan = heap_beginscan(rel_authmem, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
@@ -506,8 +512,8 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
if (curr_mem >= est_rows)
{
est_rows *= 2;
- authmem_info = (authmem_entry*)
- repalloc(authmem_info, est_rows*sizeof(authmem_entry));
+ authmem_info = (authmem_entry *)
+ repalloc(authmem_info, est_rows * sizeof(authmem_entry));
}
authmem_info[curr_mem].roleid = memform->roleid;
@@ -518,8 +524,8 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
heap_endscan(scan);
/*
- * Search for memberships. We can skip all this if pg_auth_members
- * is empty.
+ * Search for memberships. We can skip all this if pg_auth_members is
+ * empty.
*/
if (total_mem > 0)
{
@@ -528,22 +534,23 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
*/
qsort(auth_info, total_roles, sizeof(auth_entry), oid_compar);
qsort(authmem_info, total_mem, sizeof(authmem_entry), mem_compar);
+
/*
* For each role, find what it belongs to.
*/
for (curr_role = 0; curr_role < total_roles; curr_role++)
{
- List *roles_list;
- List *roles_names_list = NIL;
- ListCell *mem;
+ List *roles_list;
+ List *roles_names_list = NIL;
+ ListCell *mem;
/* We can skip this for non-login roles */
if (!auth_info[curr_role].rolcanlogin)
continue;
/*
- * This search algorithm is the same as in is_member_of_role;
- * we are just working with a different input data structure.
+ * This search algorithm is the same as in is_member_of_role; we
+ * are just working with a different input data structure.
*/
roles_list = list_make1_oid(auth_info[curr_role].roleid);
@@ -551,17 +558,20 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
{
authmem_entry key;
authmem_entry *found_mem;
- int first_found, last_found, i;
+ int first_found,
+ last_found,
+ i;
key.memberid = lfirst_oid(mem);
found_mem = bsearch(&key, authmem_info, total_mem,
sizeof(authmem_entry), mem_compar);
if (!found_mem)
continue;
+
/*
- * bsearch found a match for us; but if there were
- * multiple matches it could have found any one of them.
- * Locate first and last match.
+ * bsearch found a match for us; but if there were multiple
+ * matches it could have found any one of them. Locate first
+ * and last match.
*/
first_found = last_found = (found_mem - authmem_info);
while (first_found > 0 &&
@@ -570,30 +580,31 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
while (last_found + 1 < total_mem &&
mem_compar(&key, &authmem_info[last_found + 1]) == 0)
last_found++;
+
/*
* Now add all the new roles to roles_list.
*/
for (i = first_found; i <= last_found; i++)
roles_list = list_append_unique_oid(roles_list,
- authmem_info[i].roleid);
+ authmem_info[i].roleid);
}
/*
- * Convert list of role Oids to list of role names.
- * We must do this before re-sorting auth_info.
+ * Convert list of role Oids to list of role names. We must do
+ * this before re-sorting auth_info.
*
- * We skip the first list element (curr_role itself) since there
- * is no point in writing that a role is a member of itself.
+ * We skip the first list element (curr_role itself) since there is
+ * no point in writing that a role is a member of itself.
*/
for_each_cell(mem, lnext(list_head(roles_list)))
{
- auth_entry key_auth;
+ auth_entry key_auth;
auth_entry *found_role;
key_auth.roleid = lfirst_oid(mem);
found_role = bsearch(&key_auth, auth_info, total_roles,
sizeof(auth_entry), oid_compar);
- if (found_role) /* paranoia */
+ if (found_role) /* paranoia */
roles_names_list = lappend(roles_names_list,
found_role->rolname);
}
@@ -613,7 +624,7 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
if (arole->rolcanlogin)
{
- ListCell *mem;
+ ListCell *mem;
fputs_quote(arole->rolname, fp);
fputs(" ", fp);
@@ -638,8 +649,8 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
tempname)));
/*
- * Rename the temp file to its final name, deleting the old flat file.
- * We expect that rename(2) is an atomic action.
+ * Rename the temp file to its final name, deleting the old flat file. We
+ * expect that rename(2) is an atomic action.
*/
if (rename(tempname, filename))
ereport(ERROR,
@@ -671,11 +682,13 @@ BuildFlatFiles(bool database_only)
{
ResourceOwner owner;
RelFileNode rnode;
- Relation rel_db, rel_authid, rel_authmem;
+ Relation rel_db,
+ rel_authid,
+ rel_authmem;
/*
- * We don't have any hope of running a real relcache, but we can use
- * the same fake-relcache facility that WAL replay uses.
+ * We don't have any hope of running a real relcache, but we can use the
+ * same fake-relcache facility that WAL replay uses.
*/
XLogInitRelationCache();
@@ -749,21 +762,21 @@ AtEOXact_UpdateFlatFiles(bool isCommit)
}
/*
- * Advance command counter to be certain we see all effects of the
- * current transaction.
+ * Advance command counter to be certain we see all effects of the current
+ * transaction.
*/
CommandCounterIncrement();
/*
- * We use ExclusiveLock to ensure that only one backend writes the
- * flat file(s) at a time. That's sufficient because it's okay to
- * allow plain reads of the tables in parallel. There is some chance
- * of a deadlock here (if we were triggered by a user update of one
- * of the tables, which likely won't have gotten a strong enough lock),
- * so get the locks we need before writing anything.
+ * We use ExclusiveLock to ensure that only one backend writes the flat
+ * file(s) at a time. That's sufficient because it's okay to allow plain
+ * reads of the tables in parallel. There is some chance of a deadlock
+ * here (if we were triggered by a user update of one of the tables, which
+ * likely won't have gotten a strong enough lock), so get the locks we
+ * need before writing anything.
*
- * For writing the auth file, it's sufficient to ExclusiveLock pg_authid;
- * we take just regular AccessShareLock on pg_auth_members.
+ * For writing the auth file, it's sufficient to ExclusiveLock pg_authid; we
+ * take just regular AccessShareLock on pg_auth_members.
*/
if (database_file_update_subid != InvalidSubTransactionId)
drel = heap_open(DatabaseRelationId, ExclusiveLock);
@@ -863,7 +876,7 @@ AtEOSubXact_UpdateFlatFiles(bool isCommit,
* or pg_auth_members via general-purpose INSERT/UPDATE/DELETE commands.
*
* It is sufficient for this to be a STATEMENT trigger since we don't
- * care which individual rows changed. It doesn't much matter whether
+ * care which individual rows changed. It doesn't much matter whether
* it's a BEFORE or AFTER trigger.
*/
Datum
@@ -906,11 +919,11 @@ flatfile_twophase_postcommit(TransactionId xid, uint16 info,
void *recdata, uint32 len)
{
/*
- * Set flags to do the needed file updates at the end of my own
- * current transaction. (XXX this has some issues if my own
- * transaction later rolls back, or if there is any significant
- * delay before I commit. OK for now because we disallow
- * COMMIT PREPARED inside a transaction block.)
+ * Set flags to do the needed file updates at the end of my own current
+ * transaction. (XXX this has some issues if my own transaction later
+ * rolls back, or if there is any significant delay before I commit. OK
+ * for now because we disallow COMMIT PREPARED inside a transaction
+ * block.)
*/
if (info & FF_BIT_DATABASE)
database_file_update_needed();
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index 148e2609734..5c6f2f95d5f 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.149 2005/08/17 22:14:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.150 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,7 +37,7 @@
#include "storage/ipc.h"
#include "storage/pg_shmem.h"
#include "storage/proc.h"
-#include "storage/procarray.h"
+#include "storage/procarray.h"
#include "utils/builtins.h"
#include "utils/guc.h"
#include "utils/lsyscache.h"
@@ -295,10 +295,10 @@ make_absolute_path(const char *path)
* DEFINER functions, as well as locally in some specialized commands.
* ----------------------------------------------------------------
*/
-static Oid AuthenticatedUserId = InvalidOid;
-static Oid SessionUserId = InvalidOid;
-static Oid OuterUserId = InvalidOid;
-static Oid CurrentUserId = InvalidOid;
+static Oid AuthenticatedUserId = InvalidOid;
+static Oid SessionUserId = InvalidOid;
+static Oid OuterUserId = InvalidOid;
+static Oid CurrentUserId = InvalidOid;
/* We also have to remember the superuser state of some of these levels */
static bool AuthenticatedUserIsSuperuser = false;
@@ -418,8 +418,8 @@ InitializeSessionUserId(const char *rolename)
/*
* These next checks are not enforced when in standalone mode, so that
- * there is a way to recover from sillinesses like
- * "UPDATE pg_authid SET rolcanlogin = false;".
+ * there is a way to recover from sillinesses like "UPDATE pg_authid SET
+ * rolcanlogin = false;".
*
* We do not enforce them for the autovacuum process either.
*/
@@ -433,15 +433,16 @@ InitializeSessionUserId(const char *rolename)
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("role \"%s\" is not permitted to log in",
rolename)));
+
/*
* Check connection limit for this role.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
- * exactly seems more trouble than it is worth, however; instead
- * we just document that the connection limit is approximate.
+ * exactly seems more trouble than it is worth, however; instead we
+ * just document that the connection limit is approximate.
*/
if (rform->rolconnlimit >= 0 &&
!AuthenticatedUserIsSuperuser &&
@@ -451,7 +452,7 @@ InitializeSessionUserId(const char *rolename)
errmsg("too many connections for role \"%s\"",
rolename)));
}
-
+
/* Record username and superuser status as GUC settings too */
SetConfigOption("session_authorization", rolename,
PGC_BACKEND, PGC_S_OVERRIDE);
@@ -460,9 +461,8 @@ InitializeSessionUserId(const char *rolename)
PGC_INTERNAL, PGC_S_OVERRIDE);
/*
- * Set up user-specific configuration variables. This is a good place
- * to do it so we don't have to read pg_authid twice during session
- * startup.
+ * Set up user-specific configuration variables. This is a good place to
+ * do it so we don't have to read pg_authid twice during session startup.
*/
datum = SysCacheGetAttr(AUTHNAME, roleTup,
Anum_pg_authid_rolconfig, &isnull);
@@ -534,7 +534,7 @@ SetSessionAuthorization(Oid userid, bool is_superuser)
!AuthenticatedUserIsSuperuser)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to set session authorization")));
+ errmsg("permission denied to set session authorization")));
SetSessionUserId(userid, is_superuser);
@@ -562,7 +562,7 @@ GetCurrentRoleId(void)
* Change Role ID while running (SET ROLE)
*
* If roleid is InvalidOid, we are doing SET ROLE NONE: revert to the
- * session user authorization. In this case the is_superuser argument
+ * session user authorization. In this case the is_superuser argument
* is ignored.
*
* When roleid is not InvalidOid, the caller must have checked whether
@@ -686,17 +686,17 @@ CreateLockFile(const char *filename, bool amPostmaster,
pid_t my_pid = getpid();
/*
- * We need a loop here because of race conditions. But don't loop
- * forever (for example, a non-writable $PGDATA directory might cause
- * a failure that won't go away). 100 tries seems like plenty.
+ * We need a loop here because of race conditions. But don't loop forever
+ * (for example, a non-writable $PGDATA directory might cause a failure
+ * that won't go away). 100 tries seems like plenty.
*/
for (ntries = 0;; ntries++)
{
/*
* Try to create the lock file --- O_EXCL makes this atomic.
*
- * Think not to make the file protection weaker than 0600. See
- * comments below.
+ * Think not to make the file protection weaker than 0600. See comments
+ * below.
*/
fd = open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
if (fd >= 0)
@@ -745,38 +745,38 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Check to see if the other process still exists
*
- * If the PID in the lockfile is our own PID or our parent's PID,
- * then the file must be stale (probably left over from a previous
- * system boot cycle). We need this test because of the likelihood
- * that a reboot will assign exactly the same PID as we had in the
- * previous reboot. Also, if there is just one more process launch
- * in this reboot than in the previous one, the lockfile might mention
- * our parent's PID. We can reject that since we'd never be launched
- * directly by a competing postmaster. We can't detect grandparent
- * processes unfortunately, but if the init script is written carefully
- * then all but the immediate parent shell will be root-owned processes
- * and so the kill test will fail with EPERM.
+ * If the PID in the lockfile is our own PID or our parent's PID, then
+ * the file must be stale (probably left over from a previous system
+ * boot cycle). We need this test because of the likelihood that a
+ * reboot will assign exactly the same PID as we had in the previous
+ * reboot. Also, if there is just one more process launch in this
+ * reboot than in the previous one, the lockfile might mention our
+ * parent's PID. We can reject that since we'd never be launched
+ * directly by a competing postmaster. We can't detect grandparent
+ * processes unfortunately, but if the init script is written
+ * carefully then all but the immediate parent shell will be
+ * root-owned processes and so the kill test will fail with EPERM.
*
* We can treat the EPERM-error case as okay because that error implies
* that the existing process has a different userid than we do, which
* means it cannot be a competing postmaster. A postmaster cannot
* successfully attach to a data directory owned by a userid other
- * than its own. (This is now checked directly in checkDataDir(),
- * but has been true for a long time because of the restriction that
- * the data directory isn't group- or world-accessible.) Also,
- * since we create the lockfiles mode 600, we'd have failed above
- * if the lockfile belonged to another userid --- which means that
- * whatever process kill() is reporting about isn't the one that
- * made the lockfile. (NOTE: this last consideration is the only
- * one that keeps us from blowing away a Unix socket file belonging
- * to an instance of Postgres being run by someone else, at least
- * on machines where /tmp hasn't got a stickybit.)
+ * than its own. (This is now checked directly in checkDataDir(), but
+ * has been true for a long time because of the restriction that the
+ * data directory isn't group- or world-accessible.) Also, since we
+ * create the lockfiles mode 600, we'd have failed above if the
+ * lockfile belonged to another userid --- which means that whatever
+ * process kill() is reporting about isn't the one that made the
+ * lockfile. (NOTE: this last consideration is the only one that
+ * keeps us from blowing away a Unix socket file belonging to an
+ * instance of Postgres being run by someone else, at least on
+ * machines where /tmp hasn't got a stickybit.)
*
- * Windows hasn't got getppid(), but doesn't need it since it's not
- * using real kill() either...
+ * Windows hasn't got getppid(), but doesn't need it since it's not using
+ * real kill() either...
*
- * Normally kill() will fail with ESRCH if the given PID doesn't
- * exist. BeOS returns EINVAL for some silly reason, however.
+ * Normally kill() will fail with ESRCH if the given PID doesn't exist.
+ * BeOS returns EINVAL for some silly reason, however.
*/
if (other_pid != my_pid
#ifndef WIN32
@@ -811,11 +811,11 @@ CreateLockFile(const char *filename, bool amPostmaster,
}
/*
- * No, the creating process did not exist. However, it could be
- * that the postmaster crashed (or more likely was kill -9'd by a
- * clueless admin) but has left orphan backends behind. Check for
- * this by looking to see if there is an associated shmem segment
- * that is still in use.
+ * No, the creating process did not exist. However, it could be that
+ * the postmaster crashed (or more likely was kill -9'd by a clueless
+ * admin) but has left orphan backends behind. Check for this by
+ * looking to see if there is an associated shmem segment that is
+ * still in use.
*/
if (isDDLock)
{
@@ -833,23 +833,23 @@ CreateLockFile(const char *filename, bool amPostmaster,
if (PGSharedMemoryIsInUse(id1, id2))
ereport(FATAL,
(errcode(ERRCODE_LOCK_FILE_EXISTS),
- errmsg("pre-existing shared memory block "
- "(key %lu, ID %lu) is still in use",
- id1, id2),
- errhint("If you're sure there are no old "
- "server processes still running, remove "
- "the shared memory block with "
- "the command \"ipcclean\", \"ipcrm\", "
- "or just delete the file \"%s\".",
- filename)));
+ errmsg("pre-existing shared memory block "
+ "(key %lu, ID %lu) is still in use",
+ id1, id2),
+ errhint("If you're sure there are no old "
+ "server processes still running, remove "
+ "the shared memory block with "
+ "the command \"ipcclean\", \"ipcrm\", "
+ "or just delete the file \"%s\".",
+ filename)));
}
}
}
/*
- * Looks like nobody's home. Unlink the file and try again to
- * create it. Need a loop because of possible race condition
- * against other would-be creators.
+ * Looks like nobody's home. Unlink the file and try again to create
+ * it. Need a loop because of possible race condition against other
+ * would-be creators.
*/
if (unlink(filename) < 0)
ereport(FATAL,
@@ -857,7 +857,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
errmsg("could not remove old lock file \"%s\": %m",
filename),
errhint("The file seems accidentally left over, but "
- "it could not be removed. Please remove the file "
+ "it could not be removed. Please remove the file "
"by hand and try again.")));
}
@@ -878,7 +878,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
errno = save_errno ? save_errno : ENOSPC;
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not write lock file \"%s\": %m", filename)));
+ errmsg("could not write lock file \"%s\": %m", filename)));
}
if (close(fd))
{
@@ -888,7 +888,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
errno = save_errno;
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not write lock file \"%s\": %m", filename)));
+ errmsg("could not write lock file \"%s\": %m", filename)));
}
/*
@@ -939,10 +939,10 @@ TouchSocketLockFile(void)
if (socketLockFile[0] != '\0')
{
/*
- * utime() is POSIX standard, utimes() is a common alternative; if
- * we have neither, fall back to actually reading the file (which
- * only sets the access time not mod time, but that should be
- * enough in most cases). In all paths, we ignore errors.
+ * utime() is POSIX standard, utimes() is a common alternative; if we
+ * have neither, fall back to actually reading the file (which only
+ * sets the access time not mod time, but that should be enough in
+ * most cases). In all paths, we ignore errors.
*/
#ifdef HAVE_UTIME
utime(socketLockFile, NULL);
@@ -1093,7 +1093,7 @@ ValidatePgVersion(const char *path)
else
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\": %m", full_path)));
+ errmsg("could not open file \"%s\": %m", full_path)));
}
ret = fscanf(file, "%ld.%ld", &file_major, &file_minor);
@@ -1113,7 +1113,7 @@ ValidatePgVersion(const char *path)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("database files are incompatible with server"),
errdetail("The data directory was initialized by PostgreSQL version %ld.%ld, "
- "which is not compatible with this version %s.",
+ "which is not compatible with this version %s.",
file_major, file_minor, version_string)));
}
@@ -1149,7 +1149,7 @@ process_preload_libraries(char *preload_libraries_string)
list_free(elemlist);
ereport(LOG,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("invalid list syntax for parameter \"preload_libraries\"")));
+ errmsg("invalid list syntax for parameter \"preload_libraries\"")));
return;
}
@@ -1164,9 +1164,8 @@ process_preload_libraries(char *preload_libraries_string)
if (sep)
{
/*
- * a colon separator implies there is an initialization
- * function that we need to run in addition to loading the
- * library
+ * a colon separator implies there is an initialization function
+ * that we need to run in addition to loading the library
*/
size_t filename_len = sep - tok;
size_t funcname_len = strlen(tok) - filename_len - 1;
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 73fedbdd477..3c763e39292 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.157 2005/08/11 21:11:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.158 2005/10/15 02:49:33 momjian Exp $
*
*
*-------------------------------------------------------------------------
@@ -112,7 +112,7 @@ FindMyDatabase(const char *name, Oid *db_id, Oid *db_tablespace)
*
* Since FindMyDatabase cannot lock pg_database, the information it read
* could be stale; for example we might have attached to a database that's in
- * process of being destroyed by dropdb(). This routine is called after
+ * process of being destroyed by dropdb(). This routine is called after
* we have all the locking and other infrastructure running --- now we can
* check that we are really attached to a valid database.
*
@@ -134,14 +134,14 @@ static void
ReverifyMyDatabase(const char *name)
{
Relation pgdbrel;
- SysScanDesc pgdbscan;
+ SysScanDesc pgdbscan;
ScanKeyData key;
HeapTuple tup;
Form_pg_database dbform;
/*
- * Because we grab RowShareLock here, we can be sure that dropdb()
- * is not running in parallel with us (any more).
+ * Because we grab RowShareLock here, we can be sure that dropdb() is not
+ * running in parallel with us (any more).
*/
pgdbrel = heap_open(DatabaseRelationId, RowShareLock);
@@ -161,17 +161,17 @@ ReverifyMyDatabase(const char *name)
heap_close(pgdbrel, RowShareLock);
/*
- * The only real problem I could have created is to load dirty
- * buffers for the dead database into shared buffer cache; if I
- * did, some other backend will eventually try to write them and
- * die in mdblindwrt. Flush any such pages to forestall trouble.
+ * The only real problem I could have created is to load dirty buffers
+ * for the dead database into shared buffer cache; if I did, some
+ * other backend will eventually try to write them and die in
+ * mdblindwrt. Flush any such pages to forestall trouble.
*/
DropBuffers(MyDatabaseId);
/* Now I can commit hara-kiri with a clear conscience... */
ereport(FATAL,
(errcode(ERRCODE_UNDEFINED_DATABASE),
- errmsg("database \"%s\", OID %u, has disappeared from pg_database",
- name, MyDatabaseId)));
+ errmsg("database \"%s\", OID %u, has disappeared from pg_database",
+ name, MyDatabaseId)));
}
dbform = (Form_pg_database) GETSTRUCT(tup);
@@ -191,17 +191,18 @@ ReverifyMyDatabase(const char *name)
if (!dbform->datallowconn)
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("database \"%s\" is not currently accepting connections",
- name)));
+ errmsg("database \"%s\" is not currently accepting connections",
+ name)));
+
/*
* Check connection limit for this database.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
- * exactly seems more trouble than it is worth, however; instead
- * we just document that the connection limit is approximate.
+ * exactly seems more trouble than it is worth, however; instead we
+ * just document that the connection limit is approximate.
*/
if (dbform->datconnlimit >= 0 &&
!superuser() &&
@@ -213,8 +214,8 @@ ReverifyMyDatabase(const char *name)
}
/*
- * OK, we're golden. Next to-do item is to save the encoding
- * info out of the pg_database tuple.
+ * OK, we're golden. Next to-do item is to save the encoding info out of
+ * the pg_database tuple.
*/
SetDatabaseEncoding(dbform->encoding);
/* Record it as a GUC internal option, too */
@@ -264,8 +265,8 @@ InitCommunication(void)
if (!IsUnderPostmaster) /* postmaster already did this */
{
/*
- * We're running a postgres bootstrap process or a standalone
- * backend. Create private "shmem" and semaphores.
+ * We're running a postgres bootstrap process or a standalone backend.
+ * Create private "shmem" and semaphores.
*/
CreateSharedMemoryAndSemaphores(true, 0);
}
@@ -309,7 +310,7 @@ BaseInit(void)
* The return value indicates whether the userID is a superuser. (That
* can only be tested inside a transaction, so we want to do it during
* the startup transaction rather than doing a separate one in postgres.c.)
- *
+ *
* Note:
* Be very careful with the order of calls in the InitPostgres function.
* --------------------------------
@@ -324,8 +325,8 @@ InitPostgres(const char *dbname, const char *username)
/*
* Set up the global variables holding database id and path.
*
- * We take a shortcut in the bootstrap case, otherwise we have to look up
- * the db name in pg_database.
+ * We take a shortcut in the bootstrap case, otherwise we have to look up the
+ * db name in pg_database.
*/
if (bootstrap)
{
@@ -338,13 +339,12 @@ InitPostgres(const char *dbname, const char *username)
char *fullpath;
/*
- * Formerly we validated DataDir here, but now that's done
- * earlier.
+ * Formerly we validated DataDir here, but now that's done earlier.
*/
/*
- * Find oid and tablespace of the database we're about to open.
- * Since we're not yet up and running we have to use the hackish
+ * Find oid and tablespace of the database we're about to open. Since
+ * we're not yet up and running we have to use the hackish
* FindMyDatabase.
*/
if (!FindMyDatabase(dbname, &MyDatabaseId, &MyDatabaseTableSpace))
@@ -364,8 +364,8 @@ InitPostgres(const char *dbname, const char *username)
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" does not exist",
dbname),
- errdetail("The database subdirectory \"%s\" is missing.",
- fullpath)));
+ errdetail("The database subdirectory \"%s\" is missing.",
+ fullpath)));
else
ereport(FATAL,
(errcode_for_file_access(),
@@ -383,17 +383,17 @@ InitPostgres(const char *dbname, const char *username)
*/
/*
- * Set up my per-backend PGPROC struct in shared memory. (We need
- * to know MyDatabaseId before we can do this, since it's entered into
- * the PGPROC struct.)
+ * Set up my per-backend PGPROC struct in shared memory. (We need to
+ * know MyDatabaseId before we can do this, since it's entered into the
+ * PGPROC struct.)
*/
InitProcess();
/*
* Initialize my entry in the shared-invalidation manager's array of
- * per-backend data. (Formerly this came before InitProcess, but now
- * it must happen after, because it uses MyProc.) Once I have done
- * this, I am visible to other backends!
+ * per-backend data. (Formerly this came before InitProcess, but now it
+ * must happen after, because it uses MyProc.) Once I have done this, I
+ * am visible to other backends!
*
* Sets up MyBackendId, a unique backend identifier.
*/
@@ -410,22 +410,22 @@ InitPostgres(const char *dbname, const char *username)
InitBufferPoolBackend();
/*
- * Initialize local process's access to XLOG. In bootstrap case we
- * may skip this since StartupXLOG() was run instead.
+ * Initialize local process's access to XLOG. In bootstrap case we may
+ * skip this since StartupXLOG() was run instead.
*/
if (!bootstrap)
InitXLOGAccess();
/*
- * Initialize the relation descriptor cache. This must create at
- * least the minimum set of "nailed-in" cache entries. No catalog
- * access happens here.
+ * Initialize the relation descriptor cache. This must create at least
+ * the minimum set of "nailed-in" cache entries. No catalog access
+ * happens here.
*/
RelationCacheInitialize();
/*
- * Initialize all the system catalog caches. Note that no catalog
- * access happens here; we only set up the cache structure.
+ * Initialize all the system catalog caches. Note that no catalog access
+ * happens here; we only set up the cache structure.
*/
InitCatalogCache();
@@ -433,14 +433,13 @@ InitPostgres(const char *dbname, const char *username)
EnablePortalManager();
/*
- * Set up process-exit callback to do pre-shutdown cleanup. This
- * has to be after we've initialized all the low-level modules
- * like the buffer manager, because during shutdown this has to
- * run before the low-level modules start to close down. On the
- * other hand, we want it in place before we begin our first
- * transaction --- if we fail during the initialization transaction,
- * as is entirely possible, we need the AbortTransaction call to
- * clean up.
+ * Set up process-exit callback to do pre-shutdown cleanup. This has to
+ * be after we've initialized all the low-level modules like the buffer
+ * manager, because during shutdown this has to run before the low-level
+ * modules start to close down. On the other hand, we want it in place
+ * before we begin our first transaction --- if we fail during the
+ * initialization transaction, as is entirely possible, we need the
+ * AbortTransaction call to clean up.
*/
on_shmem_exit(ShutdownPostgres, 0);
@@ -479,18 +478,18 @@ InitPostgres(const char *dbname, const char *username)
}
/*
- * Unless we are bootstrapping, double-check that InitMyDatabaseInfo()
- * got a correct result. We can't do this until all the
- * database-access infrastructure is up. (Also, it wants to know if
- * the user is a superuser, so the above stuff has to happen first.)
+ * Unless we are bootstrapping, double-check that InitMyDatabaseInfo() got
+ * a correct result. We can't do this until all the database-access
+ * infrastructure is up. (Also, it wants to know if the user is a
+ * superuser, so the above stuff has to happen first.)
*/
if (!bootstrap)
ReverifyMyDatabase(dbname);
/*
* Final phase of relation cache startup: write a new cache file if
- * necessary. This is done after ReverifyMyDatabase to avoid writing
- * a cache file into a dead database.
+ * necessary. This is done after ReverifyMyDatabase to avoid writing a
+ * cache file into a dead database.
*/
RelationCacheInitializePhase3();
@@ -555,8 +554,8 @@ ShutdownPostgres(int code, Datum arg)
AbortOutOfAnyTransaction();
/*
- * User locks are not released by transaction end, so be sure to
- * release them explicitly.
+ * User locks are not released by transaction end, so be sure to release
+ * them explicitly.
*/
#ifdef USER_LOCKS
LockReleaseAll(USER_LOCKMETHOD, true);
diff --git a/src/backend/utils/mb/conv.c b/src/backend/utils/mb/conv.c
index 02082db5ef7..a395384c931 100644
--- a/src/backend/utils/mb/conv.c
+++ b/src/backend/utils/mb/conv.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conv.c,v 1.54 2005/09/24 17:53:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conv.c,v 1.55 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -223,8 +223,8 @@ pg_mic2ascii(unsigned char *mic, unsigned char *p, int len)
void
latin2mic_with_table(
unsigned char *l, /* local charset string (source) */
- unsigned char *p, /* pointer to store mule internal
- * code (destination) */
+ unsigned char *p, /* pointer to store mule internal code
+ * (destination) */
int len, /* length of l */
int lc, /* leading character of p */
unsigned char *tab /* code conversion table */
@@ -265,8 +265,7 @@ latin2mic_with_table(
*/
void
mic2latin_with_table(
- unsigned char *mic, /* mule internal code
- * (source) */
+ unsigned char *mic, /* mule internal code (source) */
unsigned char *p, /* local code (destination) */
int len, /* length of p */
int lc, /* leading character */
@@ -380,8 +379,8 @@ UtfToLocal(unsigned char *utf, unsigned char *iso,
{
ereport(WARNING,
(errcode(ERRCODE_UNTRANSLATABLE_CHARACTER),
- errmsg("ignoring unconvertible UTF8 character 0x%04x",
- iutf)));
+ errmsg("ignoring unconvertible UTF8 character 0x%04x",
+ iutf)));
continue;
}
if (p->code & 0xff000000)
diff --git a/src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c b/src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c
index 277224103ac..3b215b2c4fb 100644
--- a/src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c
+++ b/src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c,v 1.12 2005/09/24 17:53:18 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c,v 1.13 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -462,7 +462,7 @@ euc_jp2sjis(unsigned char *euc, unsigned char *p, int len)
while (euc_end >= euc && (c1 = *euc++))
{
- if(c1 < 0x80)
+ if (c1 < 0x80)
{
/* should be ASCII */
*p++ = c1;
@@ -487,7 +487,8 @@ euc_jp2sjis(unsigned char *euc, unsigned char *p, int len)
}
else
{
- int i, k2;
+ int i,
+ k2;
/* IBM kanji */
for (i = 0;; i++)
@@ -508,9 +509,9 @@ euc_jp2sjis(unsigned char *euc, unsigned char *p, int len)
}
}
}
- }
+ }
else
- {
+ {
/* JIS X0208 kanji? */
c2 = *euc++;
k = (c1 << 8) | (c2 & 0xff);
@@ -543,7 +544,7 @@ sjis2euc_jp(unsigned char *sjis, unsigned char *p, int len)
while (sjis_end >= sjis && (c1 = *sjis++))
{
- if(c1 < 0x80)
+ if (c1 < 0x80)
{
/* should be ASCII */
*p++ = c1;
@@ -643,4 +644,3 @@ sjis2euc_jp(unsigned char *sjis, unsigned char *p, int len)
}
*p = '\0';
}
-
diff --git a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
index 4bdbfe95635..0447c2a9e7d 100644
--- a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
+++ b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
@@ -7,7 +7,7 @@
*
* 1999/1/15 Tatsuo Ishii
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c,v 1.5 2004/08/30 02:54:40 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c,v 1.6 2005/10/15 02:49:34 momjian Exp $
*/
/* can be used in either frontend or backend */
@@ -19,7 +19,7 @@ typedef struct
{
unsigned short code,
peer;
-} codes_t;
+} codes_t;
/* map Big5 Level 1 to CNS 11643-1992 Plane 1 */
static codes_t big5Level1ToCnsPlane1[25] = { /* range */
@@ -205,7 +205,7 @@ static unsigned short b2c3[][2] = {
};
static unsigned short BinarySearchRange
- (codes_t *array, int high, unsigned short code)
+ (codes_t * array, int high, unsigned short code)
{
int low,
mid,
@@ -230,24 +230,24 @@ static unsigned short BinarySearchRange
/*
* NOTE: big5 high_byte: 0xa1-0xfe, low_byte: 0x40-0x7e,
- * 0xa1-0xfe (radicals: 0x00-0x3e, 0x3f-0x9c) big5 radix
- * is 0x9d. [region_low, region_high]
- * We should remember big5 has two different regions
- * (above). There is a bias for the distance between these
- * regions. 0xa1 - 0x7e + bias = 1 (Distance between 0xa1
- * and 0x7e is 1.) bias = - 0x22.
+ * 0xa1-0xfe (radicals: 0x00-0x3e, 0x3f-0x9c) big5 radix is
+ * 0x9d. [region_low, region_high] We
+ * should remember big5 has two different regions (above).
+ * There is a bias for the distance between these regions.
+ * 0xa1 - 0x7e + bias = 1 (Distance between 0xa1 and 0x7e is
+ * 1.) bias = - 0x22.
*/
distance = tmp * 0x9d + high - low +
(high >= 0xa1 ? (low >= 0xa1 ? 0 : -0x22)
: (low >= 0xa1 ? +0x22 : 0));
/*
- * NOTE: we have to convert the distance into a code
- * point. The code point's low_byte is 0x21 plus mod_0x5e.
- * In the first, we extract the mod_0x5e of the starting
- * code point, subtracting 0x21, and add distance to it.
- * Then we calculate again mod_0x5e of them, and restore
- * the final codepoint, adding 0x21.
+ * NOTE: we have to convert the distance into a code point.
+ * The code point's low_byte is 0x21 plus mod_0x5e. In the
+ * first, we extract the mod_0x5e of the starting code point,
+ * subtracting 0x21, and add distance to it. Then we calculate
+ * again mod_0x5e of them, and restore the final codepoint,
+ * adding 0x21.
*/
tmp = (array[mid].peer & 0x00ff) + distance - 0x21;
tmp = (array[mid].peer & 0xff00) + ((tmp / 0x5e) << 8)
@@ -260,9 +260,8 @@ static unsigned short BinarySearchRange
tmp = ((code & 0xff00) - (array[mid].code & 0xff00)) >> 8;
/*
- * NOTE: ISO charsets ranges between 0x21-0xfe
- * (94charset). Its radix is 0x5e. But there is no
- * distance bias like big5.
+ * NOTE: ISO charsets ranges between 0x21-0xfe (94charset).
+ * Its radix is 0x5e. But there is no distance bias like big5.
*/
distance = tmp * 0x5e
+ ((int) (code & 0x00ff) - (int) (array[mid].code & 0x00ff));
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c b/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c
index 3330f89d5df..efab622c94f 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c,v 1.11 2005/09/24 17:53:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -75,7 +75,7 @@ koi8r_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapKOI8R,
- sizeof(LUmapKOI8R) / sizeof(pg_local_to_utf), PG_KOI8R, len);
+ sizeof(LUmapKOI8R) / sizeof(pg_local_to_utf), PG_KOI8R, len);
PG_RETURN_VOID();
}
@@ -109,7 +109,7 @@ win1251_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN1251,
- sizeof(LUmapWIN1251) / sizeof(pg_local_to_utf), PG_WIN1251, len);
+ sizeof(LUmapWIN1251) / sizeof(pg_local_to_utf), PG_WIN1251, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c b/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c
index 29196cb4e8d..9dcd87355a8 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c,v 1.11 2005/09/24 17:53:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ euc_cn_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapEUC_CN,
- sizeof(LUmapEUC_CN) / sizeof(pg_local_to_utf), PG_EUC_CN, len);
+ sizeof(LUmapEUC_CN) / sizeof(pg_local_to_utf), PG_EUC_CN, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c b/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c
index bbe849de904..4231bc08dff 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c,v 1.11 2005/09/24 17:53:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ euc_jp_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapEUC_JP,
- sizeof(LUmapEUC_JP) / sizeof(pg_local_to_utf), PG_EUC_JP, len);
+ sizeof(LUmapEUC_JP) / sizeof(pg_local_to_utf), PG_EUC_JP, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c b/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c
index db1505ab626..b197b064eeb 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c,v 1.11 2005/09/24 17:53:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ euc_kr_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapEUC_KR,
- sizeof(LUmapEUC_KR) / sizeof(pg_local_to_utf), PG_EUC_KR, len);
+ sizeof(LUmapEUC_KR) / sizeof(pg_local_to_utf), PG_EUC_KR, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c b/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c
index cc05c64cc13..b2f7d465a77 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c,v 1.11 2005/09/24 17:53:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ euc_tw_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapEUC_TW,
- sizeof(LUmapEUC_TW) / sizeof(pg_local_to_utf), PG_EUC_TW, len);
+ sizeof(LUmapEUC_TW) / sizeof(pg_local_to_utf), PG_EUC_TW, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c b/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c
index 37f207ec153..6cc235e7327 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c,v 1.11 2005/09/24 17:53:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ gb18030_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapGB18030,
- sizeof(LUmapGB18030) / sizeof(pg_local_to_utf), PG_GB18030, len);
+ sizeof(LUmapGB18030) / sizeof(pg_local_to_utf), PG_GB18030, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
index b6c56324ec3..0038db58e62 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c,v 1.14 2005/09/24 17:53:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c,v 1.15 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,7 +65,7 @@ typedef struct
pg_utf_to_local *map2; /* from UTF8 map name */
int size1; /* size of map1 */
int size2; /* size of map2 */
-} pg_conv_map;
+} pg_conv_map;
static pg_conv_map maps[] = {
{PG_SQL_ASCII}, /* SQL/ASCII */
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c b/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c
index 9e93be9c496..3de91947af8 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c,v 1.11 2005/09/24 17:53:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c,v 1.12 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ johab_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapJOHAB,
- sizeof(LUmapJOHAB) / sizeof(pg_local_to_utf), PG_JOHAB, len);
+ sizeof(LUmapJOHAB) / sizeof(pg_local_to_utf), PG_JOHAB, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_win1250/utf8_and_win1250.c b/src/backend/utils/mb/conversion_procs/utf8_and_win1250/utf8_and_win1250.c
index cd79893ae8c..6789ca7aaa5 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_win1250/utf8_and_win1250.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_win1250/utf8_and_win1250.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1250/utf8_and_win1250.c,v 1.12 2005/09/24 17:53:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1250/utf8_and_win1250.c,v 1.13 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,7 +63,7 @@ win1250_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN1250,
- sizeof(LUmapWIN1250) / sizeof(pg_local_to_utf), PG_WIN1250, len);
+ sizeof(LUmapWIN1250) / sizeof(pg_local_to_utf), PG_WIN1250, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_win1252/utf8_and_win1252.c b/src/backend/utils/mb/conversion_procs/utf8_and_win1252/utf8_and_win1252.c
index b7592b6c30c..b4d2b2375a9 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_win1252/utf8_and_win1252.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_win1252/utf8_and_win1252.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1252/utf8_and_win1252.c,v 1.4 2005/09/24 17:53:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1252/utf8_and_win1252.c,v 1.5 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,7 +63,7 @@ win1252_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN1252,
- sizeof(LUmapWIN1252) / sizeof(pg_local_to_utf), PG_WIN1252, len);
+ sizeof(LUmapWIN1252) / sizeof(pg_local_to_utf), PG_WIN1252, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_win1256/utf8_and_win1256.c b/src/backend/utils/mb/conversion_procs/utf8_and_win1256/utf8_and_win1256.c
index a0036d80e63..d6b83d8f837 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_win1256/utf8_and_win1256.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_win1256/utf8_and_win1256.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1256/utf8_and_win1256.c,v 1.12 2005/09/24 17:53:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1256/utf8_and_win1256.c,v 1.13 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,7 +63,7 @@ win1256_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN1256,
- sizeof(LUmapWIN1256) / sizeof(pg_local_to_utf), PG_WIN1256, len);
+ sizeof(LUmapWIN1256) / sizeof(pg_local_to_utf), PG_WIN1256, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_win1258/utf8_and_win1258.c b/src/backend/utils/mb/conversion_procs/utf8_and_win1258/utf8_and_win1258.c
index 3b74f81ba9c..7cdcfd3c120 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_win1258/utf8_and_win1258.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_win1258/utf8_and_win1258.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1258/utf8_and_win1258.c,v 1.2 2005/09/24 17:53:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1258/utf8_and_win1258.c,v 1.3 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ win1258_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN1258,
- sizeof(LUmapWIN1258) / sizeof(pg_local_to_utf), PG_WIN1258, len);
+ sizeof(LUmapWIN1258) / sizeof(pg_local_to_utf), PG_WIN1258, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_win874/utf8_and_win874.c b/src/backend/utils/mb/conversion_procs/utf8_and_win874/utf8_and_win874.c
index 48f30b2cad9..7eda096a9be 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_win874/utf8_and_win874.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_win874/utf8_and_win874.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win874/utf8_and_win874.c,v 1.12 2005/09/24 17:53:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win874/utf8_and_win874.c,v 1.13 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,7 +63,7 @@ win874_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN874,
- sizeof(LUmapWIN874) / sizeof(pg_local_to_utf), PG_WIN874, len);
+ sizeof(LUmapWIN874) / sizeof(pg_local_to_utf), PG_WIN874, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/encnames.c b/src/backend/utils/mb/encnames.c
index 5c0b15fd745..145343a881d 100644
--- a/src/backend/utils/mb/encnames.c
+++ b/src/backend/utils/mb/encnames.c
@@ -2,7 +2,7 @@
* Encoding names and routines for work with it. All
* in this file is shared bedween FE and BE.
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/encnames.c,v 1.25 2005/03/14 18:31:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/encnames.c,v 1.26 2005/10/15 02:49:33 momjian Exp $
*/
#ifdef FRONTEND
#include "postgres_fe.h"
@@ -45,16 +45,16 @@ pg_encname pg_encname_tbl[] =
}, /* Big5; Chinese for Taiwan multibyte set */
{
"euccn", PG_EUC_CN
- }, /* EUC-CN; Extended Unix Code for
- * simplified Chinese */
+ }, /* EUC-CN; Extended Unix Code for simplified
+ * Chinese */
{
"eucjp", PG_EUC_JP
- }, /* EUC-JP; Extended UNIX Code fixed Width
- * for Japanese, standard OSF */
+ }, /* EUC-JP; Extended UNIX Code fixed Width for
+ * Japanese, standard OSF */
{
"euckr", PG_EUC_KR
- }, /* EUC-KR; Extended Unix Code for Korean ,
- * KS X 1001 standard */
+ }, /* EUC-KR; Extended Unix Code for Korean , KS
+ * X 1001 standard */
{
"euctw", PG_EUC_TW
}, /* EUC-TW; Extended Unix Code for
@@ -111,8 +111,8 @@ pg_encname pg_encname_tbl[] =
}, /* ISO-8859-9; RFC1345,KXS2 */
{
"johab", PG_JOHAB
- }, /* JOHAB; Extended Unix Code for
- * simplified Chinese */
+ }, /* JOHAB; Extended Unix Code for simplified
+ * Chinese */
{
"koi8", PG_KOI8R
}, /* _dirty_ alias for KOI8-R (backward
@@ -185,8 +185,8 @@ pg_encname pg_encname_tbl[] =
}, /* alias for WIN1258 */
{
"win", PG_WIN1251
- }, /* _dirty_ alias for windows-1251
- * (backward compatibility) */
+ }, /* _dirty_ alias for windows-1251 (backward
+ * compatibility) */
{
"win1250", PG_WIN1250
}, /* alias for Windows-1250 */
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index 8058fc8d27a..f8dc7a31922 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -4,7 +4,7 @@
* (currently mule internal code (mic) is used)
* Tatsuo Ishii
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.51 2005/09/24 17:53:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.52 2005/10/15 02:49:33 momjian Exp $
*/
#include "postgres.h"
@@ -107,12 +107,11 @@ SetClientEncoding(int encoding, bool doit)
}
/*
- * If we're not inside a transaction then we can't do catalog lookups,
- * so fail. After backend startup, this could only happen if we are
+ * If we're not inside a transaction then we can't do catalog lookups, so
+ * fail. After backend startup, this could only happen if we are
* re-reading postgresql.conf due to SIGHUP --- so basically this just
* constrains the ability to change client_encoding on the fly from
- * postgresql.conf. Which would probably be a stupid thing to do
- * anyway.
+ * postgresql.conf. Which would probably be a stupid thing to do anyway.
*/
if (!IsTransactionState())
return -1;
@@ -136,8 +135,8 @@ SetClientEncoding(int encoding, bool doit)
return 0;
/*
- * load the fmgr info into TopMemoryContext so that it survives
- * outside transaction.
+ * load the fmgr info into TopMemoryContext so that it survives outside
+ * transaction.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
to_server = palloc(sizeof(FmgrInfo));
@@ -180,8 +179,8 @@ InitializeClientEncoding(void)
if (SetClientEncoding(pending_client_encoding, true) < 0)
{
/*
- * Oops, the requested conversion is not available. We couldn't
- * fail before, but we can now.
+ * Oops, the requested conversion is not available. We couldn't fail
+ * before, but we can now.
*/
ereport(FATAL,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -256,8 +255,8 @@ pg_do_encoding_conversion(unsigned char *src, int len,
}
/*
- * XXX we should avoid throwing errors in OidFunctionCall. Otherwise
- * we are going into infinite loop! So we have to make sure that the
+ * XXX we should avoid throwing errors in OidFunctionCall. Otherwise we
+ * are going into infinite loop! So we have to make sure that the
* function exists before calling OidFunctionCall.
*/
if (!SearchSysCacheExists(PROCOID,
@@ -290,11 +289,11 @@ pg_convert(PG_FUNCTION_ARGS)
Datum string = PG_GETARG_DATUM(0);
Datum dest_encoding_name = PG_GETARG_DATUM(1);
Datum src_encoding_name = DirectFunctionCall1(
- namein, CStringGetDatum(DatabaseEncoding->name));
+ namein, CStringGetDatum(DatabaseEncoding->name));
Datum result;
result = DirectFunctionCall3(
- pg_convert2, string, src_encoding_name, dest_encoding_name);
+ pg_convert2, string, src_encoding_name, dest_encoding_name);
/* free memory allocated by namein */
pfree((void *) src_encoding_name);
@@ -343,8 +342,7 @@ pg_convert2(PG_FUNCTION_ARGS)
/*
* build text data type structure. we cannot use textin() here, since
- * textin assumes that input string encoding is same as database
- * encoding.
+ * textin assumes that input string encoding is same as database encoding.
*/
len = strlen((char *) result) + VARHDRSZ;
retval = palloc(len);
@@ -502,7 +500,7 @@ pg_mbstrlen_with_len(const char *mbstr, int limit)
while (limit > 0 && *mbstr)
{
- int l = pg_mblen(mbstr);
+ int l = pg_mblen(mbstr);
limit -= l;
mbstr += l;
diff --git a/src/backend/utils/mb/wchar.c b/src/backend/utils/mb/wchar.c
index 59116e2e818..e8866ba35ca 100644
--- a/src/backend/utils/mb/wchar.c
+++ b/src/backend/utils/mb/wchar.c
@@ -1,7 +1,7 @@
/*
* conversion functions between pg_wchar and multibyte streams.
* Tatsuo Ishii
- * $PostgreSQL: pgsql/src/backend/utils/mb/wchar.c,v 1.45 2005/09/24 17:53:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/wchar.c,v 1.46 2005/10/15 02:49:33 momjian Exp $
*
* WIN1250 client encoding updated by Pavel Behal
*
@@ -406,14 +406,14 @@ pg_utf_mblen(const unsigned char *s)
len = 1;
else if ((*s & 0xe0) == 0xc0)
len = 2;
- else if ((*s & 0xf0) == 0xe0)
- len = 3;
- else if ((*s & 0xf8) == 0xf0)
- len = 4;
- else if ((*s & 0xfc) == 0xf8)
- len = 5;
- else if ((*s & 0xfe) == 0xfc)
- len = 6;
+ else if ((*s & 0xf0) == 0xe0)
+ len = 3;
+ else if ((*s & 0xf8) == 0xf0)
+ len = 4;
+ else if ((*s & 0xfc) == 0xf8)
+ len = 5;
+ else if ((*s & 0xfe) == 0xfc)
+ len = 6;
return (len);
}
@@ -727,8 +727,8 @@ pg_wchar_tbl pg_wchar_table[] = {
{pg_euckr2wchar_with_len, pg_euckr_mblen, pg_euckr_dsplen, 3}, /* 3; PG_EUC_KR */
{pg_euctw2wchar_with_len, pg_euctw_mblen, pg_euctw_dsplen, 3}, /* 4; PG_EUC_TW */
{pg_johab2wchar_with_len, pg_johab_mblen, pg_johab_dsplen, 3}, /* 5; PG_JOHAB */
- {pg_utf2wchar_with_len, pg_utf_mblen, pg_utf_dsplen, 4}, /* 6; PG_UTF8 */
- {pg_mule2wchar_with_len, pg_mule_mblen, pg_mule_dsplen, 3}, /* 7; PG_MULE_INTERNAL */
+ {pg_utf2wchar_with_len, pg_utf_mblen, pg_utf_dsplen, 4}, /* 6; PG_UTF8 */
+ {pg_mule2wchar_with_len, pg_mule_mblen, pg_mule_dsplen, 3}, /* 7; PG_MULE_INTERNAL */
{pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 8; PG_LATIN1 */
{pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 9; PG_LATIN2 */
{pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 10; PG_LATIN3 */
@@ -775,8 +775,8 @@ pg_encoding_mblen(int encoding, const char *mbstr)
return ((encoding >= 0 &&
encoding < sizeof(pg_wchar_table) / sizeof(pg_wchar_tbl)) ?
- ((*pg_wchar_table[encoding].mblen) ((const unsigned char *) mbstr)) :
- ((*pg_wchar_table[PG_SQL_ASCII].mblen) ((const unsigned char *) mbstr)));
+ ((*pg_wchar_table[encoding].mblen) ((const unsigned char *) mbstr)) :
+ ((*pg_wchar_table[PG_SQL_ASCII].mblen) ((const unsigned char *) mbstr)));
}
/*
@@ -789,8 +789,8 @@ pg_encoding_dsplen(int encoding, const char *mbstr)
return ((encoding >= 0 &&
encoding < sizeof(pg_wchar_table) / sizeof(pg_wchar_tbl)) ?
- ((*pg_wchar_table[encoding].dsplen) ((const unsigned char *) mbstr)) :
- ((*pg_wchar_table[PG_SQL_ASCII].dsplen) ((const unsigned char *) mbstr)));
+ ((*pg_wchar_table[encoding].dsplen) ((const unsigned char *) mbstr)) :
+ ((*pg_wchar_table[PG_SQL_ASCII].dsplen) ((const unsigned char *) mbstr)));
}
/*
@@ -806,28 +806,57 @@ pg_encoding_max_length(int encoding)
#ifndef FRONTEND
-bool pg_utf8_islegal(const unsigned char *source, int length) {
- unsigned char a;
- const unsigned char *srcptr = source+length;
- switch (length) {
- default: return false;
- /* Everything else falls through when "true"... */
- case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
- case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
- case 2: if ((a = (*--srcptr)) > 0xBF) return false;
- switch (*source) {
- /* no fall-through in this inner switch */
- case 0xE0: if (a < 0xA0) return false; break;
- case 0xED: if (a > 0x9F) return false; break;
- case 0xF0: if (a < 0x90) return false; break;
- case 0xF4: if (a > 0x8F) return false; break;
- default: if (a < 0x80) return false;
- }
-
- case 1: if (*source >= 0x80 && *source < 0xC2) return false;
- }
- if (*source > 0xF4) return false;
- return true;
+bool
+pg_utf8_islegal(const unsigned char *source, int length)
+{
+ unsigned char a;
+ const unsigned char *srcptr = source + length;
+
+ switch (length)
+ {
+ default:
+ return false;
+ /* Everything else falls through when "true"... */
+ case 4:
+ if ((a = (*--srcptr)) < 0x80 || a > 0xBF)
+ return false;
+ case 3:
+ if ((a = (*--srcptr)) < 0x80 || a > 0xBF)
+ return false;
+ case 2:
+ if ((a = (*--srcptr)) > 0xBF)
+ return false;
+ switch (*source)
+ {
+ /* no fall-through in this inner switch */
+ case 0xE0:
+ if (a < 0xA0)
+ return false;
+ break;
+ case 0xED:
+ if (a > 0x9F)
+ return false;
+ break;
+ case 0xF0:
+ if (a < 0x90)
+ return false;
+ break;
+ case 0xF4:
+ if (a > 0x8F)
+ return false;
+ break;
+ default:
+ if (a < 0x80)
+ return false;
+ }
+
+ case 1:
+ if (*source >= 0x80 && *source < 0xC2)
+ return false;
+ }
+ if (*source > 0xF4)
+ return false;
+ return true;
}
@@ -855,11 +884,11 @@ pg_verifymbstr(const char *mbstr, int len, bool noError)
while (len > 0 && *mbstr)
{
l = pg_mblen(mbstr);
-
+
/* special UTF-8 check */
if (encoding == PG_UTF8)
{
- if(!pg_utf8_islegal((const unsigned char *) mbstr, l))
+ if (!pg_utf8_islegal((const unsigned char *) mbstr, l))
{
if (noError)
return false;
@@ -868,7 +897,9 @@ pg_verifymbstr(const char *mbstr, int len, bool noError)
errmsg("invalid UNICODE byte sequence detected near byte 0x%02x",
(unsigned char) *mbstr)));
}
- } else {
+ }
+ else
+ {
for (i = 1; i < l; i++)
{
/*
@@ -878,23 +909,23 @@ pg_verifymbstr(const char *mbstr, int len, bool noError)
if (i >= len || (mbstr[i] & 0x80) == 0)
{
char buf[8 * 2 + 1];
- char *p = buf;
- int j,
- jlimit;
+ char *p = buf;
+ int j,
+ jlimit;
if (noError)
return false;
jlimit = Min(l, len);
- jlimit = Min(jlimit, 8); /* prevent buffer overrun */
+ jlimit = Min(jlimit, 8); /* prevent buffer overrun */
for (j = 0; j < jlimit; j++)
p += sprintf(p, "%02x", (unsigned char) mbstr[j]);
ereport(ERROR,
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
- errmsg("invalid byte sequence for encoding \"%s\": 0x%s",
- GetDatabaseEncodingName(), buf)));
+ errmsg("invalid byte sequence for encoding \"%s\": 0x%s",
+ GetDatabaseEncodingName(), buf)));
}
}
}
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 1ba1ac31d3f..3394fd77e31 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -10,7 +10,7 @@
* Written by Peter Eisentraut <peter_e@gmx.net>.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.292 2005/10/14 20:53:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.293 2005/10/15 02:49:36 momjian Exp $
*
*--------------------------------------------------------------------
*/
@@ -72,7 +72,7 @@
#define PG_KRB_SRVNAM ""
#endif
-#define CONFIG_FILENAME "postgresql.conf"
+#define CONFIG_FILENAME "postgresql.conf"
#define HBA_FILENAME "pg_hba.conf"
#define IDENT_FILENAME "pg_ident.conf"
@@ -94,9 +94,10 @@ extern DLLIMPORT bool check_function_bodies;
extern int CommitDelay;
extern int CommitSiblings;
extern char *default_tablespace;
-extern bool fullPageWrites;
+extern bool fullPageWrites;
+
#ifdef TRACE_SORT
-extern bool trace_sort;
+extern bool trace_sort;
#endif
static const char *assign_log_destination(const char *value,
@@ -106,9 +107,9 @@ static const char *assign_log_destination(const char *value,
static int syslog_facility = LOG_LOCAL0;
static const char *assign_syslog_facility(const char *facility,
- bool doit, GucSource source);
+ bool doit, GucSource source);
static const char *assign_syslog_ident(const char *ident,
- bool doit, GucSource source);
+ bool doit, GucSource source);
#endif
static const char *assign_defaultxactisolevel(const char *newval, bool doit,
@@ -157,8 +158,8 @@ bool Explain_pretty_print = true;
bool log_parser_stats = false;
bool log_planner_stats = false;
bool log_executor_stats = false;
-bool log_statement_stats = false; /* this is sort of all
- * three above together */
+bool log_statement_stats = false; /* this is sort of all three
+ * above together */
bool log_btree_build_stats = false;
bool SQL_inheritance = true;
@@ -181,9 +182,9 @@ char *HbaFileName;
char *IdentFileName;
char *external_pid_file;
-int tcp_keepalives_idle;
-int tcp_keepalives_interval;
-int tcp_keepalives_count;
+int tcp_keepalives_idle;
+int tcp_keepalives_interval;
+int tcp_keepalives_count;
/*
* These variables are all dummies that don't do anything, except in some
@@ -217,8 +218,8 @@ static int max_function_args;
static int max_index_keys;
static int max_identifier_length;
static int block_size;
-static bool integer_datetimes;
-static bool standard_conforming_strings;
+static bool integer_datetimes;
+static bool standard_conforming_strings;
/* should be static, but commands/variable.c needs to get at these */
char *role_string;
@@ -501,7 +502,7 @@ static struct config_bool ConfigureNamesBool[] =
{"fsync", PGC_SIGHUP, WAL_SETTINGS,
gettext_noop("Forces synchronization of updates to disk."),
gettext_noop("The server will use the fsync() system call in several places to make "
- "sure that updates are physically written to disk. This insures "
+ "sure that updates are physically written to disk. This insures "
"that a database cluster will recover to a consistent state after "
"an operating system or hardware crash.")
},
@@ -512,7 +513,7 @@ static struct config_bool ConfigureNamesBool[] =
{"zero_damaged_pages", PGC_SUSET, DEVELOPER_OPTIONS,
gettext_noop("Continues processing past damaged page headers."),
gettext_noop("Detection of a damaged page header normally causes PostgreSQL to "
- "report an error, aborting the current transaction. Setting "
+ "report an error, aborting the current transaction. Setting "
"zero_damaged_pages to true causes the system to instead report a "
"warning, zero out the damaged page, and continue processing. This "
"behavior will destroy data, namely all the rows on the damaged page."),
@@ -526,7 +527,7 @@ static struct config_bool ConfigureNamesBool[] =
gettext_noop("Writes full pages to WAL when first modified after a checkpoint."),
gettext_noop("A page write in process during an operating system crash might be "
"only partially written to disk. During recovery, the row changes"
- "stored in WAL are not enough to recover. This option writes "
+ "stored in WAL are not enough to recover. This option writes "
"pages when first modified after a checkpoint to WAL so full recovery "
"is possible.")
},
@@ -537,7 +538,7 @@ static struct config_bool ConfigureNamesBool[] =
{"silent_mode", PGC_POSTMASTER, LOGGING_WHEN,
gettext_noop("Runs the server silently."),
gettext_noop("If this parameter is set, the server will automatically run in the "
- "background and any controlling terminals are dissociated.")
+ "background and any controlling terminals are dissociated.")
},
&SilentMode,
false, NULL, NULL
@@ -693,7 +694,7 @@ static struct config_bool ConfigureNamesBool[] =
{"stats_command_string", PGC_SUSET, STATS_COLLECTOR,
gettext_noop("Collects statistics about executing commands."),
gettext_noop("Enables the collection of statistics on the currently "
- "executing command of each session, along with the time "
+ "executing command of each session, along with the time "
"at which that command began execution.")
},
&pgstat_collect_querystring,
@@ -722,7 +723,7 @@ static struct config_bool ConfigureNamesBool[] =
NULL
},
&autovacuum_start_daemon,
- false, NULL, NULL
+ false, NULL, NULL
},
{
@@ -779,8 +780,8 @@ static struct config_bool ConfigureNamesBool[] =
gettext_noop("Logs the host name in the connection logs."),
gettext_noop("By default, connection logs only show the IP address "
"of the connecting host. If you want them to show the host name you "
- "can turn this on, but depending on your host name resolution "
- "setup it might impose a non-negligible performance penalty.")
+ "can turn this on, but depending on your host name resolution "
+ "setup it might impose a non-negligible performance penalty.")
},
&log_hostname,
false, NULL, NULL
@@ -806,7 +807,7 @@ static struct config_bool ConfigureNamesBool[] =
{"password_encryption", PGC_USERSET, CONN_AUTH_SECURITY,
gettext_noop("Encrypt passwords."),
gettext_noop("When a password is specified in CREATE USER or "
- "ALTER USER without writing either ENCRYPTED or UNENCRYPTED, "
+ "ALTER USER without writing either ENCRYPTED or UNENCRYPTED, "
"this parameter determines whether the password is to be encrypted.")
},
&Password_encryption,
@@ -816,9 +817,9 @@ static struct config_bool ConfigureNamesBool[] =
{"transform_null_equals", PGC_USERSET, COMPAT_OPTIONS_CLIENT,
gettext_noop("Treats \"expr=NULL\" as \"expr IS NULL\"."),
gettext_noop("When turned on, expressions of the form expr = NULL "
- "(or NULL = expr) are treated as expr IS NULL, that is, they "
- "return true if expr evaluates to the null value, and false "
- "otherwise. The correct behavior of expr = NULL is to always "
+ "(or NULL = expr) are treated as expr IS NULL, that is, they "
+ "return true if expr evaluates to the null value, and false "
+ "otherwise. The correct behavior of expr = NULL is to always "
"return null (unknown).")
},
&Transform_null_equals,
@@ -979,7 +980,7 @@ static struct config_int ConfigureNamesInt[] =
{"default_statistics_target", PGC_USERSET, QUERY_TUNING_OTHER,
gettext_noop("Sets the default statistics target."),
gettext_noop("This applies to table columns that have not had a "
- "column-specific target set via ALTER TABLE SET STATISTICS.")
+ "column-specific target set via ALTER TABLE SET STATISTICS.")
},
&default_statistics_target,
10, 1, 1000, NULL, NULL
@@ -989,7 +990,7 @@ static struct config_int ConfigureNamesInt[] =
gettext_noop("Sets the FROM-list size beyond which subqueries are not "
"collapsed."),
gettext_noop("The planner will merge subqueries into upper "
- "queries if the resulting FROM list would have no more than "
+ "queries if the resulting FROM list would have no more than "
"this many items.")
},
&from_collapse_limit,
@@ -1000,7 +1001,7 @@ static struct config_int ConfigureNamesInt[] =
gettext_noop("Sets the FROM-list size beyond which JOIN constructs are not "
"flattened."),
gettext_noop("The planner will flatten explicit inner JOIN "
- "constructs into lists of FROM items whenever a list of no more "
+ "constructs into lists of FROM items whenever a list of no more "
"than this many items would result.")
},
&join_collapse_limit,
@@ -1052,12 +1053,12 @@ static struct config_int ConfigureNamesInt[] =
* Note: There is some postprocessing done in PostmasterMain() to make
* sure the buffers are at least twice the number of backends, so the
* constraints here are partially unused. Similarly, the superuser
- * reserved number is checked to ensure it is less than the max
- * backends number.
+ * reserved number is checked to ensure it is less than the max backends
+ * number.
*
* MaxBackends is limited to INT_MAX/4 because some places compute
- * 4*MaxBackends without any overflow check. Likewise we have to
- * limit NBuffers to INT_MAX/2.
+ * 4*MaxBackends without any overflow check. Likewise we have to limit
+ * NBuffers to INT_MAX/2.
*/
{
{"max_connections", PGC_POSTMASTER, CONN_AUTH_SETTINGS,
@@ -1121,7 +1122,7 @@ static struct config_int ConfigureNamesInt[] =
{"work_mem", PGC_USERSET, RESOURCES_MEM,
gettext_noop("Sets the maximum memory to be used for query workspaces."),
gettext_noop("This much memory may be used by each internal "
- "sort operation and hash table before switching to "
+ "sort operation and hash table before switching to "
"temporary disk files.")
},
&work_mem,
@@ -1278,7 +1279,7 @@ static struct config_int ConfigureNamesInt[] =
{"max_locks_per_transaction", PGC_POSTMASTER, LOCK_MANAGEMENT,
gettext_noop("Sets the maximum number of locks per transaction."),
gettext_noop("The shared lock table is sized on the assumption that "
- "at most max_locks_per_transaction * max_connections distinct "
+ "at most max_locks_per_transaction * max_connections distinct "
"objects will need to be locked at any one time.")
},
&max_locks_per_xact,
@@ -1328,7 +1329,7 @@ static struct config_int ConfigureNamesInt[] =
gettext_noop("Logs if filling of checkpoint segments happens more "
"frequently than this (in seconds)."),
gettext_noop("Write a message to the server log if checkpoints "
- "caused by the filling of checkpoint segment files happens more "
+ "caused by the filling of checkpoint segment files happens more "
"frequently than this number of seconds. Zero turns off the warning.")
},
&CheckPointWarning,
@@ -1368,7 +1369,7 @@ static struct config_int ConfigureNamesInt[] =
{"extra_float_digits", PGC_USERSET, CLIENT_CONN_LOCALE,
gettext_noop("Sets the number of digits displayed for floating-point values."),
gettext_noop("This affects real, double precision, and geometric data types. "
- "The parameter value is added to the standard number of digits "
+ "The parameter value is added to the standard number of digits "
"(FLT_DIG or DBL_DIG as appropriate).")
},
&extra_float_digits,
@@ -1497,29 +1498,29 @@ static struct config_int ConfigureNamesInt[] =
{
{"tcp_keepalives_idle", PGC_USERSET, CLIENT_CONN_OTHER,
- gettext_noop("Seconds between issuing TCP keepalives."),
- gettext_noop("A value of 0 uses the system default."),
- },
+ gettext_noop("Seconds between issuing TCP keepalives."),
+ gettext_noop("A value of 0 uses the system default."),
+ },
&tcp_keepalives_idle,
0, 0, INT_MAX, assign_tcp_keepalives_idle, show_tcp_keepalives_idle
},
{
{"tcp_keepalives_interval", PGC_USERSET, CLIENT_CONN_OTHER,
- gettext_noop("Seconds between TCP keepalive retransmits."),
- gettext_noop("A value of 0 uses the system default."),
- },
+ gettext_noop("Seconds between TCP keepalive retransmits."),
+ gettext_noop("A value of 0 uses the system default."),
+ },
&tcp_keepalives_interval,
0, 0, INT_MAX, assign_tcp_keepalives_interval, show_tcp_keepalives_interval
},
{
{"tcp_keepalives_count", PGC_USERSET, CLIENT_CONN_OTHER,
- gettext_noop("Maximum number of TCP keepalive retransmits."),
- gettext_noop("This controls the number of consecutive keepalive retransmits that can be "
- "lost before a connection is considered dead. A value of 0 uses the "
- "system default."),
- },
+ gettext_noop("Maximum number of TCP keepalive retransmits."),
+ gettext_noop("This controls the number of consecutive keepalive retransmits that can be "
+ "lost before a connection is considered dead. A value of 0 uses the "
+ "system default."),
+ },
&tcp_keepalives_count,
0, 0, INT_MAX, assign_tcp_keepalives_count, show_tcp_keepalives_count
},
@@ -1548,7 +1549,7 @@ static struct config_real ConfigureNamesReal[] =
gettext_noop("Sets the planner's estimate of the cost of a nonsequentially "
"fetched disk page."),
gettext_noop("This is measured as a multiple of the cost of a "
- "sequential page fetch. A higher value makes it more likely a "
+ "sequential page fetch. A higher value makes it more likely a "
"sequential scan will be used, a lower value makes it more likely an "
"index scan will be used.")
},
@@ -1683,7 +1684,7 @@ static struct config_string ConfigureNamesString[] =
{"log_min_messages", PGC_SUSET, LOGGING_WHEN,
gettext_noop("Sets the message levels that are logged."),
gettext_noop("Valid values are DEBUG5, DEBUG4, DEBUG3, DEBUG2, DEBUG1, "
- "INFO, NOTICE, WARNING, ERROR, LOG, FATAL, and PANIC. Each level "
+ "INFO, NOTICE, WARNING, ERROR, LOG, FATAL, and PANIC. Each level "
"includes all the levels that follow it.")
},
&log_min_messages_str,
@@ -2060,9 +2061,9 @@ static struct config_string ConfigureNamesString[] =
{
{"data_directory", PGC_POSTMASTER, FILE_LOCATIONS,
- gettext_noop("Sets the server's data directory."),
- NULL,
- GUC_SUPERUSER_ONLY
+ gettext_noop("Sets the server's data directory."),
+ NULL,
+ GUC_SUPERUSER_ONLY
},
&data_directory,
NULL, NULL, NULL
@@ -2070,9 +2071,9 @@ static struct config_string ConfigureNamesString[] =
{
{"config_file", PGC_POSTMASTER, FILE_LOCATIONS,
- gettext_noop("Sets the server's main configuration file."),
- NULL,
- GUC_DISALLOW_IN_FILE | GUC_SUPERUSER_ONLY
+ gettext_noop("Sets the server's main configuration file."),
+ NULL,
+ GUC_DISALLOW_IN_FILE | GUC_SUPERUSER_ONLY
},
&ConfigFileName,
NULL, NULL, NULL
@@ -2080,9 +2081,9 @@ static struct config_string ConfigureNamesString[] =
{
{"hba_file", PGC_POSTMASTER, FILE_LOCATIONS,
- gettext_noop("Sets the server's \"hba\" configuration file"),
- NULL,
- GUC_SUPERUSER_ONLY
+ gettext_noop("Sets the server's \"hba\" configuration file"),
+ NULL,
+ GUC_SUPERUSER_ONLY
},
&HbaFileName,
NULL, NULL, NULL
@@ -2090,9 +2091,9 @@ static struct config_string ConfigureNamesString[] =
{
{"ident_file", PGC_POSTMASTER, FILE_LOCATIONS,
- gettext_noop("Sets the server's \"ident\" configuration file"),
- NULL,
- GUC_SUPERUSER_ONLY
+ gettext_noop("Sets the server's \"ident\" configuration file"),
+ NULL,
+ GUC_SUPERUSER_ONLY
},
&IdentFileName,
NULL, NULL, NULL
@@ -2100,9 +2101,9 @@ static struct config_string ConfigureNamesString[] =
{
{"external_pid_file", PGC_POSTMASTER, FILE_LOCATIONS,
- gettext_noop("Writes the postmaster PID to the specified file."),
- NULL,
- GUC_SUPERUSER_ONLY
+ gettext_noop("Writes the postmaster PID to the specified file."),
+ NULL,
+ GUC_SUPERUSER_ONLY
},
&external_pid_file,
NULL, assign_canonical_path, NULL
@@ -2341,8 +2342,8 @@ static bool
is_custom_class(const char *name, int dotPos)
{
/*
- * assign_custom_variable_classes() has made sure no empty
- * identifiers or whitespace exists in the variable
+ * assign_custom_variable_classes() has made sure no empty identifiers or
+ * whitespace exists in the variable
*/
bool result = false;
const char *ccs = GetConfigOption("custom_variable_classes");
@@ -2472,21 +2473,21 @@ find_option(const char *name, int elevel)
Assert(name);
/*
- * By equating const char ** with struct config_generic *, we are
- * assuming the name field is first in config_generic.
+ * By equating const char ** with struct config_generic *, we are assuming
+ * the name field is first in config_generic.
*/
res = (struct config_generic **) bsearch((void *) &key,
(void *) guc_variables,
num_guc_variables,
- sizeof(struct config_generic *),
+ sizeof(struct config_generic *),
guc_var_compare);
if (res)
return *res;
/*
- * See if the name is an obsolete name for a variable. We assume that
- * the set of supported old names is short enough that a brute-force
- * search is the best way.
+ * See if the name is an obsolete name for a variable. We assume that the
+ * set of supported old names is short enough that a brute-force search is
+ * the best way.
*/
for (i = 0; map_old_guc_names[i] != NULL; i += 2)
{
@@ -2495,8 +2496,8 @@ find_option(const char *name, int elevel)
}
/*
- * Check if the name is qualified, and if so, check if the qualifier
- * maps to a custom variable class.
+ * Check if the name is qualified, and if so, check if the qualifier maps
+ * to a custom variable class.
*/
dot = strchr(name, GUC_QUALIFIER_SEPARATOR);
if (dot != NULL && is_custom_class(name, dot - name))
@@ -2525,9 +2526,9 @@ static int
guc_name_compare(const char *namea, const char *nameb)
{
/*
- * The temptation to use strcasecmp() here must be resisted, because
- * the array ordering has to remain stable across setlocale() calls.
- * So, build our own with a simple ASCII-only downcasing.
+ * The temptation to use strcasecmp() here must be resisted, because the
+ * array ordering has to remain stable across setlocale() calls. So, build
+ * our own with a simple ASCII-only downcasing.
*/
while (*namea && *nameb)
{
@@ -2656,8 +2657,7 @@ InitializeGUCOptions(void)
free(str);
/*
- * See notes in set_config_option about
- * casting
+ * See notes in set_config_option about casting
*/
str = (char *) newstr;
conf->reset_val = str;
@@ -2683,8 +2683,8 @@ InitializeGUCOptions(void)
PGC_POSTMASTER, PGC_S_OVERRIDE);
/*
- * For historical reasons, some GUC parameters can receive defaults
- * from environment variables. Process those settings.
+ * For historical reasons, some GUC parameters can receive defaults from
+ * environment variables. Process those settings.
*/
env = getenv("PGPORT");
@@ -2727,9 +2727,9 @@ SelectConfigFiles(const char *userDoption, const char *progname)
/*
* Find the configuration file: if config_file was specified on the
- * command line, use it, else use configdir/postgresql.conf. In any
- * case ensure the result is an absolute path, so that it will be
- * interpreted the same way by future backends.
+ * command line, use it, else use configdir/postgresql.conf. In any case
+ * ensure the result is an absolute path, so that it will be interpreted
+ * the same way by future backends.
*/
if (ConfigFileName)
fname = make_absolute_path(ConfigFileName);
@@ -2749,8 +2749,8 @@ SelectConfigFiles(const char *userDoption, const char *progname)
}
/*
- * Set the ConfigFileName GUC variable to its final value, ensuring
- * that it can't be overridden later.
+ * Set the ConfigFileName GUC variable to its final value, ensuring that
+ * it can't be overridden later.
*/
SetConfigOption("config_file", fname, PGC_POSTMASTER, PGC_S_OVERRIDE);
free(fname);
@@ -2771,8 +2771,8 @@ SelectConfigFiles(const char *userDoption, const char *progname)
* If the data_directory GUC variable has been set, use that as DataDir;
* otherwise use configdir if set; else punt.
*
- * Note: SetDataDir will copy and absolute-ize its argument,
- * so we don't have to.
+ * Note: SetDataDir will copy and absolute-ize its argument, so we don't have
+ * to.
*/
if (data_directory)
SetDataDir(data_directory);
@@ -2792,9 +2792,9 @@ SelectConfigFiles(const char *userDoption, const char *progname)
* Reflect the final DataDir value back into the data_directory GUC var.
* (If you are wondering why we don't just make them a single variable,
* it's because the EXEC_BACKEND case needs DataDir to be transmitted to
- * child backends specially. XXX is that still true? Given that we
- * now chdir to DataDir, EXEC_BACKEND can read the config file without
- * knowing DataDir in advance.)
+ * child backends specially. XXX is that still true? Given that we now
+ * chdir to DataDir, EXEC_BACKEND can read the config file without knowing
+ * DataDir in advance.)
*/
SetConfigOption("data_directory", DataDir, PGC_POSTMASTER, PGC_S_OVERRIDE);
@@ -2954,8 +2954,7 @@ ResetAllOptions(void)
else if (newstr != str)
{
/*
- * See notes in set_config_option about
- * casting
+ * See notes in set_config_option about casting
*/
str = (char *) newstr;
}
@@ -3005,8 +3004,8 @@ push_old_value(struct config_generic * gconf)
/*
* We keep all the stack entries in TopTransactionContext so as to
- * avoid allocation problems when a subtransaction back-fills
- * stack entries for upper transaction levels.
+ * avoid allocation problems when a subtransaction back-fills stack
+ * entries for upper transaction levels.
*/
stack = (GucStack *) MemoryContextAlloc(TopTransactionContext,
sizeof(GucStack));
@@ -3098,27 +3097,26 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
Assert(stack->nest_level == my_level);
/*
- * We will pop the stack entry. Start by restoring outer xact
- * status (since we may want to modify it below). Be careful to
- * use my_status to reference the inner xact status below this
- * point...
+ * We will pop the stack entry. Start by restoring outer xact status
+ * (since we may want to modify it below). Be careful to use
+ * my_status to reference the inner xact status below this point...
*/
gconf->status = stack->status;
/*
* We have two cases:
*
- * If commit and HAVE_TENTATIVE, set actual value to tentative (this
- * is to override a SET LOCAL if one occurred later than SET). We
- * keep the tentative value and propagate HAVE_TENTATIVE to the
- * parent status, allowing the SET's effect to percolate up. (But
- * if we're exiting the outermost transaction, we'll drop the
- * HAVE_TENTATIVE bit below.)
+ * If commit and HAVE_TENTATIVE, set actual value to tentative (this is
+ * to override a SET LOCAL if one occurred later than SET). We keep
+ * the tentative value and propagate HAVE_TENTATIVE to the parent
+ * status, allowing the SET's effect to percolate up. (But if we're
+ * exiting the outermost transaction, we'll drop the HAVE_TENTATIVE
+ * bit below.)
*
* Otherwise, we have a transaction that aborted or executed only SET
- * LOCAL (or no SET at all). In either case it should have no
- * further effect, so restore both tentative and actual values
- * from the stack entry.
+ * LOCAL (or no SET at all). In either case it should have no further
+ * effect, so restore both tentative and actual values from the stack
+ * entry.
*/
useTentative = isCommit && (my_status & GUC_HAVE_TENTATIVE) != 0;
@@ -3150,7 +3148,7 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
{
if (conf->assign_hook)
if (!(*conf->assign_hook) (newval,
- true, PGC_S_OVERRIDE))
+ true, PGC_S_OVERRIDE))
elog(LOG, "failed to commit %s",
conf->gen.name);
*conf->variable = newval;
@@ -3183,7 +3181,7 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
{
if (conf->assign_hook)
if (!(*conf->assign_hook) (newval,
- true, PGC_S_OVERRIDE))
+ true, PGC_S_OVERRIDE))
elog(LOG, "failed to commit %s",
conf->gen.name);
*conf->variable = newval;
@@ -3216,7 +3214,7 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
{
if (conf->assign_hook)
if (!(*conf->assign_hook) (newval,
- true, PGC_S_OVERRIDE))
+ true, PGC_S_OVERRIDE))
elog(LOG, "failed to commit %s",
conf->gen.name);
*conf->variable = newval;
@@ -3253,7 +3251,7 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
const char *newstr;
newstr = (*conf->assign_hook) (newval, true,
- PGC_S_OVERRIDE);
+ PGC_S_OVERRIDE);
if (newstr == NULL)
elog(LOG, "failed to commit %s",
conf->gen.name);
@@ -3263,8 +3261,7 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
* If newval should now be freed, it'll be
* taken care of below.
*
- * See notes in set_config_option about
- * casting
+ * See notes in set_config_option about casting
*/
newval = (char *) newstr;
}
@@ -3291,8 +3288,8 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
pfree(stack);
/*
- * If we're now out of all xact levels, forget TENTATIVE status
- * bit; there's nothing tentative about the value anymore.
+ * If we're now out of all xact levels, forget TENTATIVE status bit;
+ * there's nothing tentative about the value anymore.
*/
if (!isSubXact)
{
@@ -3306,10 +3303,10 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
}
/*
- * If we're now out of all xact levels, we can clear guc_dirty. (Note:
- * we cannot reset guc_dirty when exiting a subtransaction, because we
- * know that all outer transaction levels will have stacked values to
- * deal with.)
+ * If we're now out of all xact levels, we can clear guc_dirty. (Note: we
+ * cannot reset guc_dirty when exiting a subtransaction, because we know
+ * that all outer transaction levels will have stacked values to deal
+ * with.)
*/
if (!isSubXact)
guc_dirty = false;
@@ -3326,8 +3323,8 @@ BeginReportingGUCOptions(void)
int i;
/*
- * Don't do anything unless talking to an interactive frontend of
- * protocol 3.0 or later.
+ * Don't do anything unless talking to an interactive frontend of protocol
+ * 3.0 or later.
*/
if (whereToSendOutput != Remote ||
PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
@@ -3566,15 +3563,14 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
return false;
}
/*
- * Check if the option can be set at this time. See guc.h for the
- * precise rules. Note that we don't want to throw errors if we're in
- * the SIGHUP context. In that case we just ignore the attempt and
- * return true.
+ * Check if the option can be set at this time. See guc.h for the precise
+ * rules. Note that we don't want to throw errors if we're in the SIGHUP
+ * context. In that case we just ignore the attempt and return true.
*/
switch (record->context)
{
@@ -3613,22 +3609,22 @@ set_config_option(const char *name, const char *value,
}
/*
- * Hmm, the idea of the SIGHUP context is "ought to be global,
- * but can be changed after postmaster start". But there's
- * nothing that prevents a crafty administrator from sending
- * SIGHUP signals to individual backends only.
+ * Hmm, the idea of the SIGHUP context is "ought to be global, but
+ * can be changed after postmaster start". But there's nothing
+ * that prevents a crafty administrator from sending SIGHUP
+ * signals to individual backends only.
*/
break;
case PGC_BACKEND:
if (context == PGC_SIGHUP)
{
/*
- * If a PGC_BACKEND parameter is changed in the config
- * file, we want to accept the new value in the postmaster
- * (whence it will propagate to subsequently-started
- * backends), but ignore it in existing backends. This is
- * a tad klugy, but necessary because we don't re-read the
- * config file during backend start.
+ * If a PGC_BACKEND parameter is changed in the config file,
+ * we want to accept the new value in the postmaster (whence
+ * it will propagate to subsequently-started backends), but
+ * ignore it in existing backends. This is a tad klugy, but
+ * necessary because we don't re-read the config file during
+ * backend start.
*/
if (IsUnderPostmaster)
return true;
@@ -3647,8 +3643,8 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to set parameter \"%s\"",
- name)));
+ errmsg("permission denied to set parameter \"%s\"",
+ name)));
return false;
}
break;
@@ -3666,10 +3662,9 @@ set_config_option(const char *name, const char *value,
/*
* Ignore attempted set if overridden by previously processed setting.
* However, if changeVal is false then plow ahead anyway since we are
- * trying to find out if the value is potentially good, not actually
- * use it. Also keep going if makeDefault is true, since we may want
- * to set the reset/stacked values even if we can't set the variable
- * itself.
+ * trying to find out if the value is potentially good, not actually use
+ * it. Also keep going if makeDefault is true, since we may want to set
+ * the reset/stacked values even if we can't set the variable itself.
*/
if (record->source > source)
{
@@ -3698,8 +3693,8 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" requires a Boolean value",
- name)));
+ errmsg("parameter \"%s\" requires a Boolean value",
+ name)));
return false;
}
}
@@ -3714,8 +3709,8 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": %d",
- name, (int) newval)));
+ errmsg("invalid value for parameter \"%s\": %d",
+ name, (int) newval)));
return false;
}
@@ -3774,8 +3769,8 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" requires an integer value",
- name)));
+ errmsg("parameter \"%s\" requires an integer value",
+ name)));
return false;
}
if (newval < conf->min || newval > conf->max)
@@ -3783,7 +3778,7 @@ set_config_option(const char *name, const char *value,
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("%d is outside the valid range for parameter \"%s\" (%d .. %d)",
- newval, name, conf->min, conf->max)));
+ newval, name, conf->min, conf->max)));
return false;
}
}
@@ -3798,8 +3793,8 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": %d",
- name, newval)));
+ errmsg("invalid value for parameter \"%s\": %d",
+ name, newval)));
return false;
}
@@ -3858,8 +3853,8 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" requires a numeric value",
- name)));
+ errmsg("parameter \"%s\" requires a numeric value",
+ name)));
return false;
}
if (newval < conf->min || newval > conf->max)
@@ -3867,7 +3862,7 @@ set_config_option(const char *name, const char *value,
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("%g is outside the valid range for parameter \"%s\" (%g .. %g)",
- newval, name, conf->min, conf->max)));
+ newval, name, conf->min, conf->max)));
return false;
}
}
@@ -3882,8 +3877,8 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": %g",
- name, newval)));
+ errmsg("invalid value for parameter \"%s\": %g",
+ name, newval)));
return false;
}
@@ -3945,9 +3940,8 @@ set_config_option(const char *name, const char *value,
else if (conf->reset_val)
{
/*
- * We could possibly avoid strdup here, but easier to
- * make this case work the same as the normal
- * assignment case.
+ * We could possibly avoid strdup here, but easier to make
+ * this case work the same as the normal assignment case.
*/
newval = guc_strdup(elevel, conf->reset_val);
if (newval == NULL)
@@ -3977,8 +3971,8 @@ set_config_option(const char *name, const char *value,
free(newval);
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": \"%s\"",
- name, value ? value : "")));
+ errmsg("invalid value for parameter \"%s\": \"%s\"",
+ name, value ? value : "")));
return false;
}
else if (hookresult != newval)
@@ -3986,13 +3980,12 @@ set_config_option(const char *name, const char *value,
free(newval);
/*
- * Having to cast away const here is annoying, but
- * the alternative is to declare assign_hooks as
- * returning char*, which would mean they'd have
- * to cast away const, or as both taking and
- * returning char*, which doesn't seem attractive
- * either --- we don't want them to scribble on
- * the passed str.
+ * Having to cast away const here is annoying, but the
+ * alternative is to declare assign_hooks as returning
+ * char*, which would mean they'd have to cast away
+ * const, or as both taking and returning char*, which
+ * doesn't seem attractive either --- we don't want
+ * them to scribble on the passed str.
*/
newval = (char *) hookresult;
}
@@ -4087,7 +4080,7 @@ GetConfigOption(const char *name)
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
if ((record->flags & GUC_SUPERUSER_ONLY) && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
@@ -4127,7 +4120,7 @@ GetConfigOptionResetString(const char *name)
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
if ((record->flags & GUC_SUPERUSER_ONLY) && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
@@ -4191,8 +4184,8 @@ flatten_set_variable_args(const char *name, List *args)
ListCell *l;
/*
- * Fast path if just DEFAULT. We do not check the variable name in
- * this case --- necessary for RESET ALL to work correctly.
+ * Fast path if just DEFAULT. We do not check the variable name in this
+ * case --- necessary for RESET ALL to work correctly.
*/
if (args == NIL)
return NULL;
@@ -4202,7 +4195,7 @@ flatten_set_variable_args(const char *name, List *args)
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
flags = record->flags;
@@ -4240,18 +4233,18 @@ flatten_set_variable_args(const char *name, List *args)
if (arg->typename != NULL)
{
/*
- * Must be a ConstInterval argument for TIME ZONE.
- * Coerce to interval and back to normalize the value
- * and account for any typmod.
+ * Must be a ConstInterval argument for TIME ZONE. Coerce
+ * to interval and back to normalize the value and account
+ * for any typmod.
*/
- Datum interval;
+ Datum interval;
char *intervalout;
interval =
- DirectFunctionCall3(interval_in,
- CStringGetDatum(val),
- ObjectIdGetDatum(InvalidOid),
- Int32GetDatum(arg->typename->typmod));
+ DirectFunctionCall3(interval_in,
+ CStringGetDatum(val),
+ ObjectIdGetDatum(InvalidOid),
+ Int32GetDatum(arg->typename->typmod));
intervalout =
DatumGetCString(DirectFunctionCall1(interval_out,
@@ -4261,8 +4254,8 @@ flatten_set_variable_args(const char *name, List *args)
else
{
/*
- * Plain string literal or identifier. For quote
- * mode, quote it if it's not a vanilla identifier.
+ * Plain string literal or identifier. For quote mode,
+ * quote it if it's not a vanilla identifier.
*/
if (flags & GUC_LIST_QUOTE)
appendStringInfoString(&buf, quote_identifier(val));
@@ -4325,8 +4318,8 @@ set_config_by_name(PG_FUNCTION_ARGS)
value = DatumGetCString(DirectFunctionCall1(textout, PG_GETARG_DATUM(1)));
/*
- * Get the desired state of is_local. Default to false if provided
- * value is NULL
+ * Get the desired state of is_local. Default to false if provided value
+ * is NULL
*/
if (PG_ARGISNULL(2))
is_local = false;
@@ -4359,11 +4352,11 @@ define_custom_variable(struct config_generic * variable)
const char *value;
struct config_string *pHolder;
struct config_generic **res = (struct config_generic **) bsearch(
- (void *) &nameAddr,
- (void *) guc_variables,
- num_guc_variables,
- sizeof(struct config_generic *),
- guc_var_compare);
+ (void *) &nameAddr,
+ (void *) guc_variables,
+ num_guc_variables,
+ sizeof(struct config_generic *),
+ guc_var_compare);
if (res == NULL)
{
@@ -4388,8 +4381,7 @@ define_custom_variable(struct config_generic * variable)
value = *pHolder->variable;
/*
- * Assign the string value stored in the placeholder to the real
- * variable.
+ * Assign the string value stored in the placeholder to the real variable.
*
* XXX this is not really good enough --- it should be a nontransactional
* assignment, since we don't want it to roll back if the current xact
@@ -4656,7 +4648,7 @@ ShowAllGUCConfig(DestReceiver *dest)
TEXTOID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 3, "description",
TEXTOID, -1, 0);
-
+
/* prepare for projection of tuples */
tstate = begin_tup_output_tupdesc(dest, tupdesc);
@@ -4698,7 +4690,7 @@ GetConfigOptionByName(const char *name, const char **varname)
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
if ((record->flags & GUC_SUPERUSER_ONLY) && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
@@ -4814,8 +4806,7 @@ GetConfigOptionByNum(int varnum, const char **values, bool *noshow)
default:
{
/*
- * should never get here, but in case we do, set 'em to
- * NULL
+ * should never get here, but in case we do, set 'em to NULL
*/
/* min_val */
@@ -4884,14 +4875,13 @@ show_all_settings(PG_FUNCTION_ARGS)
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
/*
- * need a tuple descriptor representing NUM_PG_SETTINGS_ATTS
- * columns of the appropriate types
+ * need a tuple descriptor representing NUM_PG_SETTINGS_ATTS columns
+ * of the appropriate types
*/
tupdesc = CreateTemplateTupleDesc(NUM_PG_SETTINGS_ATTS, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
@@ -4916,8 +4906,8 @@ show_all_settings(PG_FUNCTION_ARGS)
TEXTOID, -1, 0);
/*
- * Generate attribute metadata needed later to produce tuples from
- * raw C strings
+ * Generate attribute metadata needed later to produce tuples from raw
+ * C strings
*/
attinmeta = TupleDescGetAttInMetadata(tupdesc);
funcctx->attinmeta = attinmeta;
@@ -5144,8 +5134,8 @@ write_nondefault_variables(GucContext context)
}
/*
- * Put new file in place. This could delay on Win32, but we don't
- * hold any exclusive locks.
+ * Put new file in place. This could delay on Win32, but we don't hold
+ * any exclusive locks.
*/
rename(CONFIG_EXEC_PARAMS_NEW, CONFIG_EXEC_PARAMS);
}
@@ -5233,8 +5223,7 @@ read_nondefault_variables(void)
FreeFile(fp);
}
-
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
/*
@@ -5317,15 +5306,15 @@ ProcessGUCArray(ArrayType *array, GucSource source)
{
ereport(WARNING,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("could not parse setting for parameter \"%s\"", name)));
+ errmsg("could not parse setting for parameter \"%s\"", name)));
free(name);
continue;
}
/*
- * We process all these options at SUSET level. We assume that
- * the right to insert an option into pg_database or pg_authid was
- * checked when it was inserted.
+ * We process all these options at SUSET level. We assume that the
+ * right to insert an option into pg_database or pg_authid was checked
+ * when it was inserted.
*/
SetConfigOption(name, value, PGC_SUSET, source);
@@ -5515,7 +5504,7 @@ assign_log_destination(const char *value, bool doit, GucSource source)
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid list syntax for parameter \"log_destination\"")));
+ errmsg("invalid list syntax for parameter \"log_destination\"")));
return NULL;
}
@@ -5538,8 +5527,8 @@ assign_log_destination(const char *value, bool doit, GucSource source)
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unrecognized \"log_destination\" key word: \"%s\"",
- tok)));
+ errmsg("unrecognized \"log_destination\" key word: \"%s\"",
+ tok)));
pfree(rawstring);
list_free(elemlist);
return NULL;
@@ -5560,7 +5549,7 @@ assign_log_destination(const char *value, bool doit, GucSource source)
static const char *
assign_syslog_facility(const char *facility, bool doit, GucSource source)
{
- int syslog_fac;
+ int syslog_fac;
if (pg_strcasecmp(facility, "LOCAL0") == 0)
syslog_fac = LOG_LOCAL0;
@@ -5599,8 +5588,7 @@ assign_syslog_ident(const char *ident, bool doit, GucSource source)
return ident;
}
-
-#endif /* HAVE_SYSLOG */
+#endif /* HAVE_SYSLOG */
static const char *
@@ -5690,8 +5678,8 @@ assign_msglvl(int *var, const char *newval, bool doit, GucSource source)
}
/*
- * Client_min_messages always prints 'info', but we allow it as a
- * value anyway.
+ * Client_min_messages always prints 'info', but we allow it as a value
+ * anyway.
*/
else if (pg_strcasecmp(newval, "info") == 0)
{
@@ -5784,8 +5772,8 @@ static const char *
show_num_temp_buffers(void)
{
/*
- * We show the GUC var until local buffers have been initialized,
- * and NLocBuffer afterwards.
+ * We show the GUC var until local buffers have been initialized, and
+ * NLocBuffer afterwards.
*/
static char nbuf[32];
@@ -5801,7 +5789,7 @@ assign_phony_autocommit(bool newval, bool doit, GucSource source)
if (doit && source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SET AUTOCOMMIT TO OFF is no longer supported")));
+ errmsg("SET AUTOCOMMIT TO OFF is no longer supported")));
return false;
}
return true;
@@ -5844,8 +5832,8 @@ assign_custom_variable_classes(const char *newval, bool doit, GucSource source)
if (hasSpaceAfterToken || !isalnum(c))
{
/*
- * Syntax error due to token following space after token or
- * non alpha numeric character
+ * Syntax error due to token following space after token or non
+ * alpha numeric character
*/
ereport(LOG,
(errcode(ERRCODE_SYNTAX_ERROR),
diff --git a/src/backend/utils/misc/pg_rusage.c b/src/backend/utils/misc/pg_rusage.c
index a4a6d9e586d..cf7bbb427cb 100644
--- a/src/backend/utils/misc/pg_rusage.c
+++ b/src/backend/utils/misc/pg_rusage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/pg_rusage.c,v 1.1 2005/10/03 22:52:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/pg_rusage.c,v 1.2 2005/10/15 02:49:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,9 +65,9 @@ pg_rusage_show(const PGRUsage *ru0)
snprintf(result, sizeof(result),
"CPU %d.%02ds/%d.%02du sec elapsed %d.%02d sec",
(int) (ru1.ru.ru_stime.tv_sec - ru0->ru.ru_stime.tv_sec),
- (int) (ru1.ru.ru_stime.tv_usec - ru0->ru.ru_stime.tv_usec) / 10000,
+ (int) (ru1.ru.ru_stime.tv_usec - ru0->ru.ru_stime.tv_usec) / 10000,
(int) (ru1.ru.ru_utime.tv_sec - ru0->ru.ru_utime.tv_sec),
- (int) (ru1.ru.ru_utime.tv_usec - ru0->ru.ru_utime.tv_usec) / 10000,
+ (int) (ru1.ru.ru_utime.tv_usec - ru0->ru.ru_utime.tv_usec) / 10000,
(int) (ru1.tv.tv_sec - ru0->tv.tv_sec),
(int) (ru1.tv.tv_usec - ru0->tv.tv_usec) / 10000);
diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c
index 1627cf49f73..af1421cd2f6 100644
--- a/src/backend/utils/misc/ps_status.c
+++ b/src/backend/utils/misc/ps_status.c
@@ -5,7 +5,7 @@
* to contain some useful information. Mechanism differs wildly across
* platforms.
*
- * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.24 2005/05/24 07:16:27 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.25 2005/10/15 02:49:36 momjian Exp $
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
* various details abducted from various places
@@ -85,7 +85,6 @@ extern char **environ;
#define PS_BUFFER_SIZE 256
static char ps_buffer[PS_BUFFER_SIZE];
static const size_t ps_buffer_size = PS_BUFFER_SIZE;
-
#else /* PS_USE_CLOBBER_ARGV */
static char *ps_buffer; /* will point to argv area */
static size_t ps_buffer_size; /* space determined at run time */
@@ -98,20 +97,22 @@ static int save_argc;
static char **save_argv;
#ifdef WIN32
- /*
- * Win32 does not support showing any changed arguments. To make it
- * at all possible to track which backend is doing what, we create
- * a named object that can be viewed with for example Process Explorer
- */
+
+ /*
+ * Win32 does not support showing any changed arguments. To make it at all
+ * possible to track which backend is doing what, we create a named object
+ * that can be viewed with for example Process Explorer
+ */
static HANDLE ident_handle = INVALID_HANDLE_VALUE;
-static void pgwin32_update_ident(char *ident)
+static void
+pgwin32_update_ident(char *ident)
{
- char name[PS_BUFFER_SIZE+32];
+ char name[PS_BUFFER_SIZE + 32];
if (ident_handle != INVALID_HANDLE_VALUE)
CloseHandle(ident_handle);
- sprintf(name,"pgident: %s",ident);
+ sprintf(name, "pgident: %s", ident);
ident_handle = CreateEvent(NULL,
TRUE,
@@ -130,7 +131,7 @@ static void pgwin32_update_ident(char *ident)
* environment strings may be moved, so this should be called before any code
* that might try to hang onto a getenv() result.)
*/
-char **
+char **
save_ps_display_args(int argc, char **argv)
{
save_argc = argc;
@@ -139,8 +140,8 @@ save_ps_display_args(int argc, char **argv)
#if defined(PS_USE_CLOBBER_ARGV)
/*
- * If we're going to overwrite the argv area, count the available
- * space. Also move the environment to make additional room.
+ * If we're going to overwrite the argv area, count the available space.
+ * Also move the environment to make additional room.
*/
{
char *end_of_area = NULL;
@@ -193,12 +194,12 @@ save_ps_display_args(int argc, char **argv)
* argument parsing purposes.
*
* (NB: do NOT think to remove the copying of argv[], even though
- * postmaster.c finishes looking at argv[] long before we ever
- * consider changing the ps display. On some platforms, getopt()
- * keeps pointers into the argv array, and will get horribly confused
- * when it is re-called to analyze a subprocess' argument string if
- * the argv storage has been clobbered meanwhile. Other platforms
- * have other dependencies on argv[].
+ * postmaster.c finishes looking at argv[] long before we ever consider
+ * changing the ps display. On some platforms, getopt() keeps pointers
+ * into the argv array, and will get horribly confused when it is
+ * re-called to analyze a subprocess' argument string if the argv storage
+ * has been clobbered meanwhile. Other platforms have other dependencies
+ * on argv[].
*/
{
char **new_argv;
@@ -220,8 +221,7 @@ save_ps_display_args(int argc, char **argv)
argv = new_argv;
}
-#endif /* PS_USE_CHANGE_ARGV or
- * PS_USE_CLOBBER_ARGV */
+#endif /* PS_USE_CHANGE_ARGV or PS_USE_CLOBBER_ARGV */
return argv;
}
@@ -278,8 +278,8 @@ init_ps_display(const char *username, const char *dbname,
#ifdef PS_USE_SETPROCTITLE
/*
- * apparently setproctitle() already adds a `progname:' prefix to the
- * ps line
+ * apparently setproctitle() already adds a `progname:' prefix to the ps
+ * line
*/
snprintf(ps_buffer, ps_buffer_size,
"%s %s %s ",
@@ -295,7 +295,6 @@ init_ps_display(const char *username, const char *dbname,
#ifdef WIN32
pgwin32_update_ident(ps_buffer);
#endif
-
#endif /* not PS_USE_NONE */
}
@@ -360,7 +359,6 @@ set_ps_display(const char *activity)
#ifdef WIN32
pgwin32_update_ident(ps_buffer);
#endif
-
#endif /* not PS_USE_NONE */
}
diff --git a/src/backend/utils/misc/superuser.c b/src/backend/utils/misc/superuser.c
index c9c17cef704..6eba2fb9354 100644
--- a/src/backend/utils/misc/superuser.c
+++ b/src/backend/utils/misc/superuser.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/superuser.c,v 1.33 2005/08/15 02:40:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/superuser.c,v 1.34 2005/10/15 02:49:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,9 +32,9 @@
* the status of the last requested roleid. The cache can be flushed
* at need by watching for cache update events on pg_authid.
*/
-static Oid last_roleid = InvalidOid; /* InvalidOid == cache not valid */
-static bool last_roleid_is_super = false;
-static bool roleid_callback_registered = false;
+static Oid last_roleid = InvalidOid; /* InvalidOid == cache not valid */
+static bool last_roleid_is_super = false;
+static bool roleid_callback_registered = false;
static void RoleidCallback(Datum arg, Oid relid);
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index 6c1b5f390da..70bcf778a14 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/aset.c,v 1.63 2005/09/01 18:15:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/aset.c,v 1.64 2005/10/15 02:49:36 momjian Exp $
*
* NOTE:
* This is a new (Feb. 05, 1999) implementation of the allocation set
@@ -140,8 +140,7 @@ typedef struct AllocSetContext
/* Allocation parameters for this context: */
Size initBlockSize; /* initial block size */
Size maxBlockSize; /* maximum block size */
- AllocBlock keeper; /* if not NULL, keep this block over
- * resets */
+ AllocBlock keeper; /* if not NULL, keep this block over resets */
} AllocSetContext;
typedef AllocSetContext *AllocSet;
@@ -342,8 +341,8 @@ AllocSetContextCreate(MemoryContext parent,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
- errdetail("Failed while creating memory context \"%s\".",
- name)));
+ errdetail("Failed while creating memory context \"%s\".",
+ name)));
}
block->aset = context;
block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
@@ -505,8 +504,8 @@ AllocSetAlloc(MemoryContext context, Size size)
AssertArg(AllocSetIsValid(set));
/*
- * If requested size exceeds maximum for chunks, allocate an entire
- * block for this request.
+ * If requested size exceeds maximum for chunks, allocate an entire block
+ * for this request.
*/
if (size > ALLOC_CHUNK_LIMIT)
{
@@ -536,8 +535,8 @@ AllocSetAlloc(MemoryContext context, Size size)
#endif
/*
- * Stick the new block underneath the active allocation block, so
- * that we don't lose the use of the space remaining therein.
+ * Stick the new block underneath the active allocation block, so that
+ * we don't lose the use of the space remaining therein.
*/
if (set->blocks != NULL)
{
@@ -558,8 +557,7 @@ AllocSetAlloc(MemoryContext context, Size size)
/*
* Request is small enough to be treated as a chunk. Look in the
- * corresponding free list to see if there is a free chunk we could
- * reuse.
+ * corresponding free list to see if there is a free chunk we could reuse.
*/
fidx = AllocSetFreeIndex(size);
priorfree = NULL;
@@ -571,8 +569,8 @@ AllocSetAlloc(MemoryContext context, Size size)
}
/*
- * If one is found, remove it from the free list, make it again a
- * member of the alloc set and return its data address.
+ * If one is found, remove it from the free list, make it again a member
+ * of the alloc set and return its data address.
*/
if (chunk != NULL)
{
@@ -604,8 +602,8 @@ AllocSetAlloc(MemoryContext context, Size size)
Assert(chunk_size >= size);
/*
- * If there is enough room in the active allocation block, we will put
- * the chunk into that block. Else must start a new one.
+ * If there is enough room in the active allocation block, we will put the
+ * chunk into that block. Else must start a new one.
*/
if ((block = set->blocks) != NULL)
{
@@ -614,16 +612,16 @@ AllocSetAlloc(MemoryContext context, Size size)
if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
{
/*
- * The existing active (top) block does not have enough room
- * for the requested allocation, but it might still have a
- * useful amount of space in it. Once we push it down in the
- * block list, we'll never try to allocate more space from it.
- * So, before we do that, carve up its free space into chunks
- * that we can put on the set's freelists.
+ * The existing active (top) block does not have enough room for
+ * the requested allocation, but it might still have a useful
+ * amount of space in it. Once we push it down in the block list,
+ * we'll never try to allocate more space from it. So, before we
+ * do that, carve up its free space into chunks that we can put on
+ * the set's freelists.
*
* Because we can only get here when there's less than
- * ALLOC_CHUNK_LIMIT left in the block, this loop cannot
- * iterate more than ALLOCSET_NUM_FREELISTS-1 times.
+ * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
+ * more than ALLOCSET_NUM_FREELISTS-1 times.
*/
while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
{
@@ -631,10 +629,9 @@ AllocSetAlloc(MemoryContext context, Size size)
int a_fidx = AllocSetFreeIndex(availchunk);
/*
- * In most cases, we'll get back the index of the next
- * larger freelist than the one we need to put this chunk
- * on. The exception is when availchunk is exactly a
- * power of 2.
+ * In most cases, we'll get back the index of the next larger
+ * freelist than the one we need to put this chunk on. The
+ * exception is when availchunk is exactly a power of 2.
*/
if (availchunk != (1 << (a_fidx + ALLOC_MINBITS)))
{
@@ -676,11 +673,11 @@ AllocSetAlloc(MemoryContext context, Size size)
else
{
/*
- * Use first power of 2 that is larger than previous block,
- * but not more than the allowed limit. (We don't simply
- * double the prior block size, because in some cases this
- * could be a funny size, eg if very first allocation was for
- * an odd-sized large chunk.)
+ * Use first power of 2 that is larger than previous block, but
+ * not more than the allowed limit. (We don't simply double the
+ * prior block size, because in some cases this could be a funny
+ * size, eg if very first allocation was for an odd-sized large
+ * chunk.)
*/
Size pblksize = set->blocks->endptr - ((char *) set->blocks);
@@ -692,8 +689,8 @@ AllocSetAlloc(MemoryContext context, Size size)
}
/*
- * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need
- * more space... but try to keep it a power of 2.
+ * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
+ * space... but try to keep it a power of 2.
*/
required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
while (blksize < required_size)
@@ -703,9 +700,8 @@ AllocSetAlloc(MemoryContext context, Size size)
block = (AllocBlock) malloc(blksize);
/*
- * We could be asking for pretty big blocks here, so cope if
- * malloc fails. But give up if there's less than a meg or so
- * available...
+ * We could be asking for pretty big blocks here, so cope if malloc
+ * fails. But give up if there's less than a meg or so available...
*/
while (block == NULL && blksize > 1024 * 1024)
{
@@ -730,13 +726,13 @@ AllocSetAlloc(MemoryContext context, Size size)
block->endptr = ((char *) block) + blksize;
/*
- * If this is the first block of the set, make it the "keeper"
- * block. Formerly, a keeper block could only be created during
- * context creation, but allowing it to happen here lets us have
- * fast reset cycling even for contexts created with
- * minContextSize = 0; that way we don't have to force space to be
- * allocated in contexts that might never need any space. Don't
- * mark an oversize block as a keeper, however.
+ * If this is the first block of the set, make it the "keeper" block.
+ * Formerly, a keeper block could only be created during context
+ * creation, but allowing it to happen here lets us have fast reset
+ * cycling even for contexts created with minContextSize = 0; that way
+ * we don't have to force space to be allocated in contexts that might
+ * never need any space. Don't mark an oversize block as a keeper,
+ * however.
*/
if (set->blocks == NULL && blksize == set->initBlockSize)
{
@@ -870,8 +866,8 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
/*
* Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
- * allocated area already is >= the new size. (In particular, we
- * always fall out here if the requested size is a decrease.)
+ * allocated area already is >= the new size. (In particular, we always
+ * fall out here if the requested size is a decrease.)
*/
if (oldsize >= size)
{
@@ -887,9 +883,9 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
if (oldsize > ALLOC_CHUNK_LIMIT)
{
/*
- * The chunk must been allocated as a single-chunk block. Find
- * the containing block and use realloc() to make it bigger with
- * minimum space wastage.
+ * The chunk must been allocated as a single-chunk block. Find the
+ * containing block and use realloc() to make it bigger with minimum
+ * space wastage.
*/
AllocBlock block = set->blocks;
AllocBlock prevblock = NULL;
@@ -944,15 +940,15 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
else
{
/*
- * Small-chunk case. If the chunk is the last one in its block,
- * there might be enough free space after it that we can just
- * enlarge the chunk in-place. It's relatively painful to find
- * the containing block in the general case, but we can detect
- * last-ness quite cheaply for the typical case where the chunk is
- * in the active (topmost) allocation block. (At least with the
- * regression tests and code as of 1/2001, realloc'ing the last
- * chunk of a non-topmost block hardly ever happens, so it's not
- * worth scanning the block list to catch that case.)
+ * Small-chunk case. If the chunk is the last one in its block, there
+ * might be enough free space after it that we can just enlarge the
+ * chunk in-place. It's relatively painful to find the containing
+ * block in the general case, but we can detect last-ness quite
+ * cheaply for the typical case where the chunk is in the active
+ * (topmost) allocation block. (At least with the regression tests
+ * and code as of 1/2001, realloc'ing the last chunk of a non-topmost
+ * block hardly ever happens, so it's not worth scanning the block
+ * list to catch that case.)
*
* NOTE: must be careful not to create a chunk of a size that
* AllocSetAlloc would not create, else we'll get confused later.
@@ -1031,10 +1027,10 @@ AllocSetIsEmpty(MemoryContext context)
AllocSet set = (AllocSet) context;
/*
- * For now, we say "empty" only if the context is new or just reset.
- * We could examine the freelists to determine if all space has been
- * freed, but it's not really worth the trouble for present uses of
- * this functionality.
+ * For now, we say "empty" only if the context is new or just reset. We
+ * could examine the freelists to determine if all space has been freed,
+ * but it's not really worth the trouble for present uses of this
+ * functionality.
*/
if (set->isReset)
return true;
@@ -1073,7 +1069,7 @@ AllocSetStats(MemoryContext context)
}
}
fprintf(stderr,
- "%s: %ld total in %ld blocks; %ld free (%ld chunks); %ld used\n",
+ "%s: %ld total in %ld blocks; %ld free (%ld chunks); %ld used\n",
set->header.name, totalspace, nblocks, freespace, nchunks,
totalspace - freespace);
}
@@ -1144,9 +1140,9 @@ AllocSetCheck(MemoryContext context)
name, chunk, block);
/*
- * If chunk is allocated, check for correct aset pointer. (If
- * it's free, the aset is the freelist pointer, which we can't
- * check as easily...)
+ * If chunk is allocated, check for correct aset pointer. (If it's
+ * free, the aset is the freelist pointer, which we can't check as
+ * easily...)
*/
if (dsize > 0 && chunk->aset != (void *) set)
elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index 6d68e30f7eb..b6532730226 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/mcxt.c,v 1.55 2005/05/14 23:16:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/mcxt.c,v 1.56 2005/10/15 02:49:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -78,8 +78,8 @@ MemoryContextInit(void)
AssertState(TopMemoryContext == NULL);
/*
- * Initialize TopMemoryContext as an AllocSetContext with slow growth
- * rate --- we don't really expect much to be allocated in it.
+ * Initialize TopMemoryContext as an AllocSetContext with slow growth rate
+ * --- we don't really expect much to be allocated in it.
*
* (There is special-case code in MemoryContextCreate() for this call.)
*/
@@ -90,18 +90,18 @@ MemoryContextInit(void)
8 * 1024);
/*
- * Not having any other place to point CurrentMemoryContext, make it
- * point to TopMemoryContext. Caller should change this soon!
+ * Not having any other place to point CurrentMemoryContext, make it point
+ * to TopMemoryContext. Caller should change this soon!
*/
CurrentMemoryContext = TopMemoryContext;
/*
- * Initialize ErrorContext as an AllocSetContext with slow growth rate
- * --- we don't really expect much to be allocated in it. More to the
- * point, require it to contain at least 8K at all times. This is the
- * only case where retained memory in a context is *essential* --- we
- * want to be sure ErrorContext still has some memory even if we've
- * run out elsewhere!
+ * Initialize ErrorContext as an AllocSetContext with slow growth rate ---
+ * we don't really expect much to be allocated in it. More to the point,
+ * require it to contain at least 8K at all times. This is the only case
+ * where retained memory in a context is *essential* --- we want to be
+ * sure ErrorContext still has some memory even if we've run out
+ * elsewhere!
*/
ErrorContext = AllocSetContextCreate(TopMemoryContext,
"ErrorContext",
@@ -169,9 +169,9 @@ MemoryContextDelete(MemoryContext context)
MemoryContextDeleteChildren(context);
/*
- * We delink the context from its parent before deleting it, so that
- * if there's an error we won't have deleted/busted contexts still
- * attached to the context tree. Better a leak than a crash.
+ * We delink the context from its parent before deleting it, so that if
+ * there's an error we won't have deleted/busted contexts still attached
+ * to the context tree. Better a leak than a crash.
*/
if (context->parent)
{
@@ -208,8 +208,8 @@ MemoryContextDeleteChildren(MemoryContext context)
AssertArg(MemoryContextIsValid(context));
/*
- * MemoryContextDelete will delink the child from me, so just iterate
- * as long as there is a child.
+ * MemoryContextDelete will delink the child from me, so just iterate as
+ * long as there is a child.
*/
while (context->firstchild != NULL)
MemoryContextDelete(context->firstchild);
@@ -384,9 +384,9 @@ MemoryContextContains(MemoryContext context, void *pointer)
((char *) pointer - STANDARDCHUNKHEADERSIZE);
/*
- * If the context link doesn't match then we certainly have a
- * non-member chunk. Also check for a reasonable-looking size as
- * extra guard against being fooled by bogus pointers.
+ * If the context link doesn't match then we certainly have a non-member
+ * chunk. Also check for a reasonable-looking size as extra guard against
+ * being fooled by bogus pointers.
*/
if (header->context == context && AllocSizeIsValid(header->size))
return true;
@@ -640,7 +640,6 @@ MemoryContextSwitchTo(MemoryContext context)
CurrentMemoryContext = context;
return old;
}
-
#endif /* ! __GNUC__ */
/*
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index b55a3430256..9866e12d68c 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.81 2005/06/17 22:32:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.82 2005/10/15 02:49:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -272,8 +272,8 @@ PortalCreateHoldStore(Portal portal)
Assert(portal->holdStore == NULL);
/*
- * Create the memory context that is used for storage of the tuple
- * set. Note this is NOT a child of the portal's heap memory.
+ * Create the memory context that is used for storage of the tuple set.
+ * Note this is NOT a child of the portal's heap memory.
*/
portal->holdContext =
AllocSetContextCreate(PortalMemory,
@@ -305,10 +305,10 @@ PortalDrop(Portal portal, bool isTopCommit)
elog(ERROR, "cannot drop active portal");
/*
- * Remove portal from hash table. Because we do this first, we will
- * not come back to try to remove the portal again if there's any
- * error in the subsequent steps. Better to leak a little memory than
- * to get into an infinite error-recovery loop.
+ * Remove portal from hash table. Because we do this first, we will not
+ * come back to try to remove the portal again if there's any error in the
+ * subsequent steps. Better to leak a little memory than to get into an
+ * infinite error-recovery loop.
*/
PortalHashTableDelete(portal);
@@ -317,27 +317,27 @@ PortalDrop(Portal portal, bool isTopCommit)
(*portal->cleanup) (portal);
/*
- * Release any resources still attached to the portal. There are
- * several cases being covered here:
+ * Release any resources still attached to the portal. There are several
+ * cases being covered here:
*
- * Top transaction commit (indicated by isTopCommit): normally we should
- * do nothing here and let the regular end-of-transaction resource
- * releasing mechanism handle these resources too. However, if we
- * have a FAILED portal (eg, a cursor that got an error), we'd better
- * clean up its resources to avoid resource-leakage warning messages.
+ * Top transaction commit (indicated by isTopCommit): normally we should do
+ * nothing here and let the regular end-of-transaction resource releasing
+ * mechanism handle these resources too. However, if we have a FAILED
+ * portal (eg, a cursor that got an error), we'd better clean up its
+ * resources to avoid resource-leakage warning messages.
*
- * Sub transaction commit: never comes here at all, since we don't kill
- * any portals in AtSubCommit_Portals().
+ * Sub transaction commit: never comes here at all, since we don't kill any
+ * portals in AtSubCommit_Portals().
*
* Main or sub transaction abort: we will do nothing here because
* portal->resowner was already set NULL; the resources were already
* cleaned up in transaction abort.
*
- * Ordinary portal drop: must release resources. However, if the portal
- * is not FAILED then we do not release its locks. The locks become
- * the responsibility of the transaction's ResourceOwner (since it is
- * the parent of the portal's owner) and will be released when the
- * transaction eventually ends.
+ * Ordinary portal drop: must release resources. However, if the portal is
+ * not FAILED then we do not release its locks. The locks become the
+ * responsibility of the transaction's ResourceOwner (since it is the
+ * parent of the portal's owner) and will be released when the transaction
+ * eventually ends.
*/
if (portal->resowner &&
(!isTopCommit || portal->status == PORTAL_FAILED))
@@ -419,7 +419,7 @@ DropDependentPortals(MemoryContext queryContext)
bool
CommitHoldablePortals(void)
{
- bool result = false;
+ bool result = false;
HASH_SEQ_STATUS status;
PortalHashEnt *hentry;
@@ -435,27 +435,26 @@ CommitHoldablePortals(void)
portal->status == PORTAL_READY)
{
/*
- * We are exiting the transaction that created a holdable
- * cursor. Instead of dropping the portal, prepare it for
- * access by later transactions.
+ * We are exiting the transaction that created a holdable cursor.
+ * Instead of dropping the portal, prepare it for access by later
+ * transactions.
*
- * Note that PersistHoldablePortal() must release all resources
- * used by the portal that are local to the creating
- * transaction.
+ * Note that PersistHoldablePortal() must release all resources used
+ * by the portal that are local to the creating transaction.
*/
PortalCreateHoldStore(portal);
PersistHoldablePortal(portal);
/*
- * Any resources belonging to the portal will be released in
- * the upcoming transaction-wide cleanup; the portal will no
- * longer have its own resources.
+ * Any resources belonging to the portal will be released in the
+ * upcoming transaction-wide cleanup; the portal will no longer
+ * have its own resources.
*/
portal->resowner = NULL;
/*
- * Having successfully exported the holdable cursor, mark it
- * as not belonging to this transaction.
+ * Having successfully exported the holdable cursor, mark it as
+ * not belonging to this transaction.
*/
portal->createSubid = InvalidSubTransactionId;
@@ -480,7 +479,7 @@ CommitHoldablePortals(void)
bool
PrepareHoldablePortals(void)
{
- bool result = false;
+ bool result = false;
HASH_SEQ_STATUS status;
PortalHashEnt *hentry;
@@ -496,8 +495,8 @@ PrepareHoldablePortals(void)
portal->status == PORTAL_READY)
{
/*
- * We are exiting the transaction that created a holdable
- * cursor. Can't do PREPARE.
+ * We are exiting the transaction that created a holdable cursor.
+ * Can't do PREPARE.
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -527,8 +526,8 @@ AtCommit_Portals(void)
Portal portal = hentry->portal;
/*
- * Do not touch active portals --- this can only happen in the
- * case of a multi-transaction utility command, such as VACUUM.
+ * Do not touch active portals --- this can only happen in the case of
+ * a multi-transaction utility command, such as VACUUM.
*
* Note however that any resource owner attached to such a portal is
* still going to go away, so don't leave a dangling pointer.
@@ -579,8 +578,7 @@ AtAbort_Portals(void)
portal->status = PORTAL_FAILED;
/*
- * Do nothing else to cursors held over from a previous
- * transaction.
+ * Do nothing else to cursors held over from a previous transaction.
*/
if (portal->createSubid == InvalidSubTransactionId)
continue;
@@ -594,8 +592,8 @@ AtAbort_Portals(void)
/*
* Any resources belonging to the portal will be released in the
- * upcoming transaction-wide cleanup; they will be gone before we
- * run PortalDrop.
+ * upcoming transaction-wide cleanup; they will be gone before we run
+ * PortalDrop.
*/
portal->resowner = NULL;
}
@@ -686,11 +684,10 @@ AtSubAbort_Portals(SubTransactionId mySubid,
continue;
/*
- * Force any active portals of my own transaction into FAILED
- * state. This is mostly to ensure that a portal running a FETCH
- * will go FAILED if the underlying cursor fails. (Note we do NOT
- * want to do this to upper-level portals, since they may be able
- * to continue.)
+ * Force any active portals of my own transaction into FAILED state.
+ * This is mostly to ensure that a portal running a FETCH will go
+ * FAILED if the underlying cursor fails. (Note we do NOT want to do
+ * this to upper-level portals, since they may be able to continue.)
*
* This is only needed to dodge the sanity check in PortalDrop.
*/
@@ -701,11 +698,11 @@ AtSubAbort_Portals(SubTransactionId mySubid,
* If the portal is READY then allow it to survive into the parent
* transaction; otherwise shut it down.
*
- * Currently, we can't actually support that because the portal's
- * query might refer to objects created or changed in the failed
- * subtransaction, leading to crashes if execution is resumed.
- * So, even READY portals are deleted. It would be nice to detect
- * whether the query actually depends on any such object, instead.
+ * Currently, we can't actually support that because the portal's query
+ * might refer to objects created or changed in the failed
+ * subtransaction, leading to crashes if execution is resumed. So,
+ * even READY portals are deleted. It would be nice to detect whether
+ * the query actually depends on any such object, instead.
*/
#ifdef NOT_USED
if (portal->status == PORTAL_READY)
@@ -725,9 +722,9 @@ AtSubAbort_Portals(SubTransactionId mySubid,
}
/*
- * Any resources belonging to the portal will be released in
- * the upcoming transaction-wide cleanup; they will be gone
- * before we run PortalDrop.
+ * Any resources belonging to the portal will be released in the
+ * upcoming transaction-wide cleanup; they will be gone before we
+ * run PortalDrop.
*/
portal->resowner = NULL;
}
diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c
index 786652a757b..97933de820b 100644
--- a/src/backend/utils/resowner/resowner.c
+++ b/src/backend/utils/resowner/resowner.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/resowner/resowner.c,v 1.13 2005/08/08 19:17:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/resowner/resowner.c,v 1.14 2005/10/15 02:49:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -108,7 +108,7 @@ ResourceOwnerCreate(ResourceOwner parent, const char *name)
ResourceOwner owner;
owner = (ResourceOwner) MemoryContextAllocZero(TopMemoryContext,
- sizeof(ResourceOwnerData));
+ sizeof(ResourceOwnerData));
owner->name = name;
if (parent)
@@ -185,9 +185,9 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
ResourceOwnerReleaseInternal(child, phase, isCommit, isTopLevel);
/*
- * Make CurrentResourceOwner point to me, so that ReleaseBuffer etc
- * don't get confused. We needn't PG_TRY here because the outermost
- * level will fix it on error abort.
+ * Make CurrentResourceOwner point to me, so that ReleaseBuffer etc don't
+ * get confused. We needn't PG_TRY here because the outermost level will
+ * fix it on error abort.
*/
save = CurrentResourceOwner;
CurrentResourceOwner = owner;
@@ -195,16 +195,16 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
if (phase == RESOURCE_RELEASE_BEFORE_LOCKS)
{
/*
- * Release buffer pins. Note that ReleaseBuffer will
- * remove the buffer entry from my list, so I just have to
- * iterate till there are none.
+ * Release buffer pins. Note that ReleaseBuffer will remove the
+ * buffer entry from my list, so I just have to iterate till there are
+ * none.
*
- * During a commit, there shouldn't be any remaining pins ---
- * that would indicate failure to clean up the executor correctly ---
- * so issue warnings. In the abort case, just clean up quietly.
+ * During a commit, there shouldn't be any remaining pins --- that would
+ * indicate failure to clean up the executor correctly --- so issue
+ * warnings. In the abort case, just clean up quietly.
*
- * We are careful to do the releasing back-to-front, so as to
- * avoid O(N^2) behavior in ResourceOwnerForgetBuffer().
+ * We are careful to do the releasing back-to-front, so as to avoid
+ * O(N^2) behavior in ResourceOwnerForgetBuffer().
*/
while (owner->nbuffers > 0)
{
@@ -214,12 +214,12 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
}
/*
- * Release relcache references. Note that RelationClose will
- * remove the relref entry from my list, so I just have to
- * iterate till there are none.
+ * Release relcache references. Note that RelationClose will remove
+ * the relref entry from my list, so I just have to iterate till there
+ * are none.
*
- * As with buffer pins, warn if any are left at commit time,
- * and release back-to-front for speed.
+ * As with buffer pins, warn if any are left at commit time, and release
+ * back-to-front for speed.
*/
while (owner->nrelrefs > 0)
{
@@ -233,9 +233,9 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
if (isTopLevel)
{
/*
- * For a top-level xact we are going to release all locks (or
- * at least all non-session locks), so just do a single lmgr
- * call at the top of the recursion.
+ * For a top-level xact we are going to release all locks (or at
+ * least all non-session locks), so just do a single lmgr call at
+ * the top of the recursion.
*/
if (owner == TopTransactionResourceOwner)
ProcReleaseLocks(isCommit);
@@ -244,8 +244,8 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
{
/*
* Release locks retail. Note that if we are committing a
- * subtransaction, we do NOT release its locks yet, but
- * transfer them to the parent.
+ * subtransaction, we do NOT release its locks yet, but transfer
+ * them to the parent.
*/
Assert(owner->parent != NULL);
if (isCommit)
@@ -257,12 +257,12 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
else if (phase == RESOURCE_RELEASE_AFTER_LOCKS)
{
/*
- * Release catcache references. Note that ReleaseCatCache
- * will remove the catref entry from my list, so I just have
- * to iterate till there are none. Ditto for catcache lists.
+ * Release catcache references. Note that ReleaseCatCache will remove
+ * the catref entry from my list, so I just have to iterate till there
+ * are none. Ditto for catcache lists.
*
- * As with buffer pins, warn if any are left at commit time,
- * and release back-to-front for speed.
+ * As with buffer pins, warn if any are left at commit time, and release
+ * back-to-front for speed.
*/
while (owner->ncatrefs > 0)
{
@@ -309,16 +309,16 @@ ResourceOwnerDelete(ResourceOwner owner)
Assert(owner->nrelrefs == 0);
/*
- * Delete children. The recursive call will delink the child from me,
- * so just iterate as long as there is a child.
+ * Delete children. The recursive call will delink the child from me, so
+ * just iterate as long as there is a child.
*/
while (owner->firstchild != NULL)
ResourceOwnerDelete(owner->firstchild);
/*
* We delink the owner from its parent before deleting it, so that if
- * there's an error we won't have deleted/busted owners still attached
- * to the owner tree. Better a leak than a crash.
+ * there's an error we won't have deleted/busted owners still attached to
+ * the owner tree. Better a leak than a crash.
*/
ResourceOwnerNewParent(owner, NULL);
@@ -502,8 +502,8 @@ ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
/*
* Scan back-to-front because it's more likely we are releasing a
- * recently pinned buffer. This isn't always the case of course,
- * but it's the way to bet.
+ * recently pinned buffer. This isn't always the case of course, but
+ * it's the way to bet.
*/
for (i = nb1; i >= 0; i--)
{
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index e4066821de4..b8c760f4823 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -64,7 +64,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/logtape.c,v 1.15 2004/12/31 22:02:52 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/logtape.c,v 1.16 2005/10/15 02:49:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -91,10 +91,9 @@
typedef struct IndirectBlock
{
int nextSlot; /* next pointer slot to write or read */
- struct IndirectBlock *nextup; /* parent indirect level, or NULL
- * if top */
- long ptrs[BLOCKS_PER_INDIR_BLOCK]; /* indexes of contained
- * blocks */
+ struct IndirectBlock *nextup; /* parent indirect level, or NULL if
+ * top */
+ long ptrs[BLOCKS_PER_INDIR_BLOCK]; /* indexes of contained blocks */
} IndirectBlock;
/*
@@ -107,24 +106,23 @@ typedef struct LogicalTape
{
IndirectBlock *indirect; /* bottom of my indirect-block hierarchy */
bool writing; /* T while in write phase */
- bool frozen; /* T if blocks should not be freed when
- * read */
+ bool frozen; /* T if blocks should not be freed when read */
bool dirty; /* does buffer need to be written? */
/*
- * The total data volume in the logical tape is numFullBlocks * BLCKSZ
- * + lastBlockBytes. BUT: we do not update lastBlockBytes during
- * writing, only at completion of a write phase.
+ * The total data volume in the logical tape is numFullBlocks * BLCKSZ +
+ * lastBlockBytes. BUT: we do not update lastBlockBytes during writing,
+ * only at completion of a write phase.
*/
long numFullBlocks; /* number of complete blocks in log tape */
int lastBlockBytes; /* valid bytes in last (incomplete) block */
/*
* Buffer for current data block. Note we don't bother to store the
- * actual file block number of the data block (during the write phase
- * it hasn't been assigned yet, and during read we don't care
- * anymore). But we do need the relative block number so we can detect
- * end-of-tape while reading.
+ * actual file block number of the data block (during the write phase it
+ * hasn't been assigned yet, and during read we don't care anymore). But
+ * we do need the relative block number so we can detect end-of-tape while
+ * reading.
*/
long curBlockNumber; /* this block's logical blk# within tape */
int pos; /* next read/write position in buffer */
@@ -144,20 +142,18 @@ struct LogicalTapeSet
long nFileBlocks; /* # of blocks used in underlying file */
/*
- * We store the numbers of recycled-and-available blocks in
- * freeBlocks[]. When there are no such blocks, we extend the
- * underlying file. Note that the block numbers in freeBlocks are
- * always in *decreasing* order, so that removing the last entry gives
- * us the lowest free block.
+ * We store the numbers of recycled-and-available blocks in freeBlocks[].
+ * When there are no such blocks, we extend the underlying file. Note
+ * that the block numbers in freeBlocks are always in *decreasing* order,
+ * so that removing the last entry gives us the lowest free block.
*/
long *freeBlocks; /* resizable array */
int nFreeBlocks; /* # of currently free blocks */
- int freeBlocksLen; /* current allocated length of
- * freeBlocks[] */
+ int freeBlocksLen; /* current allocated length of freeBlocks[] */
/*
- * tapes[] is declared size 1 since C wants a fixed size, but actually
- * it is of length nTapes.
+ * tapes[] is declared size 1 since C wants a fixed size, but actually it
+ * is of length nTapes.
*/
int nTapes; /* # of logical tapes in set */
LogicalTape *tapes[1]; /* must be last in struct! */
@@ -232,9 +228,9 @@ static long
ltsGetFreeBlock(LogicalTapeSet *lts)
{
/*
- * If there are multiple free blocks, we select the one appearing last
- * in freeBlocks[]. If there are none, assign the next block at the
- * end of the file.
+ * If there are multiple free blocks, we select the one appearing last in
+ * freeBlocks[]. If there are none, assign the next block at the end of
+ * the file.
*/
if (lts->nFreeBlocks > 0)
return lts->freeBlocks[--lts->nFreeBlocks];
@@ -258,14 +254,14 @@ ltsReleaseBlock(LogicalTapeSet *lts, long blocknum)
{
lts->freeBlocksLen *= 2;
lts->freeBlocks = (long *) repalloc(lts->freeBlocks,
- lts->freeBlocksLen * sizeof(long));
+ lts->freeBlocksLen * sizeof(long));
}
/*
* Insert blocknum into array, preserving decreasing order (so that
- * ltsGetFreeBlock returns the lowest available block number). This
- * could get fairly slow if there were many free blocks, but we don't
- * expect there to be very many at one time.
+ * ltsGetFreeBlock returns the lowest available block number). This could
+ * get fairly slow if there were many free blocks, but we don't expect
+ * there to be very many at one time.
*/
ndx = lts->nFreeBlocks++;
ptr = lts->freeBlocks + ndx;
@@ -293,8 +289,8 @@ ltsRecordBlockNum(LogicalTapeSet *lts, IndirectBlock *indirect,
if (indirect->nextSlot >= BLOCKS_PER_INDIR_BLOCK)
{
/*
- * This indirect block is full, so dump it out and recursively
- * save its address in the next indirection level. Create a new
+ * This indirect block is full, so dump it out and recursively save
+ * its address in the next indirection level. Create a new
* indirection level if there wasn't one before.
*/
long indirblock = ltsGetFreeBlock(lts);
@@ -336,8 +332,8 @@ ltsRewindIndirectBlock(LogicalTapeSet *lts,
indirect->ptrs[indirect->nextSlot] = -1L;
/*
- * If block is not topmost, write it out, and recurse to obtain
- * address of first block in this hierarchy level. Read that one in.
+ * If block is not topmost, write it out, and recurse to obtain address of
+ * first block in this hierarchy level. Read that one in.
*/
if (indirect->nextup != NULL)
{
@@ -371,8 +367,8 @@ ltsRewindFrozenIndirectBlock(LogicalTapeSet *lts,
IndirectBlock *indirect)
{
/*
- * If block is not topmost, recurse to obtain address of first block
- * in this hierarchy level. Read that one in.
+ * If block is not topmost, recurse to obtain address of first block in
+ * this hierarchy level. Read that one in.
*/
if (indirect->nextup != NULL)
{
@@ -448,8 +444,8 @@ ltsRecallPrevBlockNum(LogicalTapeSet *lts,
ltsReadBlock(lts, indirblock, (void *) indirect->ptrs);
/*
- * The previous block would only have been written out if full, so
- * we need not search it for a -1 sentinel.
+ * The previous block would only have been written out if full, so we
+ * need not search it for a -1 sentinel.
*/
indirect->nextSlot = BLOCKS_PER_INDIR_BLOCK + 1;
}
@@ -471,8 +467,8 @@ LogicalTapeSetCreate(int ntapes)
int i;
/*
- * Create top-level struct. First LogicalTape pointer is already
- * counted in sizeof(LogicalTapeSet).
+ * Create top-level struct. First LogicalTape pointer is already counted
+ * in sizeof(LogicalTapeSet).
*/
Assert(ntapes > 0);
lts = (LogicalTapeSet *) palloc(sizeof(LogicalTapeSet) +
@@ -617,8 +613,8 @@ LogicalTapeRewind(LogicalTapeSet *lts, int tapenum, bool forWrite)
if (lt->writing)
{
/*
- * Completion of a write phase. Flush last partial data
- * block, flush any partial indirect blocks, rewind for normal
+ * Completion of a write phase. Flush last partial data block,
+ * flush any partial indirect blocks, rewind for normal
* (destructive) read.
*/
if (lt->dirty)
@@ -630,8 +626,8 @@ LogicalTapeRewind(LogicalTapeSet *lts, int tapenum, bool forWrite)
else
{
/*
- * This is only OK if tape is frozen; we rewind for (another)
- * read pass.
+ * This is only OK if tape is frozen; we rewind for (another) read
+ * pass.
*/
Assert(lt->frozen);
datablocknum = ltsRewindFrozenIndirectBlock(lts, lt->indirect);
@@ -656,8 +652,8 @@ LogicalTapeRewind(LogicalTapeSet *lts, int tapenum, bool forWrite)
*
* NOTE: we assume the caller has read the tape to the end; otherwise
* untouched data and indirect blocks will not have been freed. We
- * could add more code to free any unread blocks, but in current
- * usage of this module it'd be useless code.
+ * could add more code to free any unread blocks, but in current usage
+ * of this module it'd be useless code.
*/
IndirectBlock *ib,
*nextib;
@@ -757,8 +753,8 @@ LogicalTapeFreeze(LogicalTapeSet *lts, int tapenum)
Assert(lt->writing);
/*
- * Completion of a write phase. Flush last partial data block, flush
- * any partial indirect blocks, rewind for nondestructive read.
+ * Completion of a write phase. Flush last partial data block, flush any
+ * partial indirect blocks, rewind for nondestructive read.
*/
if (lt->dirty)
ltsDumpBuffer(lts, lt);
@@ -826,9 +822,9 @@ LogicalTapeBackspace(LogicalTapeSet *lts, int tapenum, size_t size)
return false; /* a seek too far... */
/*
- * OK, we need to back up nblocks blocks. This implementation would
- * be pretty inefficient for long seeks, but we really aren't
- * expecting that (a seek over one tuple is typical).
+ * OK, we need to back up nblocks blocks. This implementation would be
+ * pretty inefficient for long seeks, but we really aren't expecting that
+ * (a seek over one tuple is typical).
*/
while (nblocks-- > 0)
{
@@ -883,9 +879,9 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
return false;
/*
- * OK, advance or back up to the target block. This implementation
- * would be pretty inefficient for long seeks, but we really aren't
- * expecting that (a seek over one tuple is typical).
+ * OK, advance or back up to the target block. This implementation would
+ * be pretty inefficient for long seeks, but we really aren't expecting
+ * that (a seek over one tuple is typical).
*/
while (lt->curBlockNumber > blocknum)
{
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 53f2b546f46..2007d7a6949 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -78,7 +78,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.51 2005/10/03 22:55:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.52 2005/10/15 02:49:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -102,7 +102,7 @@
/* GUC variable */
#ifdef TRACE_SORT
-bool trace_sort = false;
+bool trace_sort = false;
#endif
@@ -112,8 +112,7 @@ bool trace_sort = false;
*/
typedef enum
{
- TSS_INITIAL, /* Loading tuples; still within memory
- * limit */
+ TSS_INITIAL, /* Loading tuples; still within memory limit */
TSS_BUILDRUNS, /* Loading tuples; writing to tape */
TSS_SORTEDINMEM, /* Sort completed entirely in memory */
TSS_SORTEDONTAPE, /* Sort completed, final run is on tape */
@@ -135,13 +134,12 @@ struct Tuplesortstate
TupSortStatus status; /* enumerated value as shown above */
bool randomAccess; /* did caller request random access? */
long availMem; /* remaining memory available, in bytes */
- LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp
- * file */
+ LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */
/*
- * These function pointers decouple the routines that must know what
- * kind of tuple we are sorting from the routines that don't need to
- * know it. They are set up by the tuplesort_begin_xxx routines.
+ * These function pointers decouple the routines that must know what kind
+ * of tuple we are sorting from the routines that don't need to know it.
+ * They are set up by the tuplesort_begin_xxx routines.
*
* Function to compare two tuples; result is per qsort() convention, ie:
*
@@ -150,83 +148,78 @@ struct Tuplesortstate
int (*comparetup) (Tuplesortstate *state, const void *a, const void *b);
/*
- * Function to copy a supplied input tuple into palloc'd space. (NB:
- * we assume that a single pfree() is enough to release the tuple
- * later, so the representation must be "flat" in one palloc chunk.)
- * state->availMem must be decreased by the amount of space used.
+ * Function to copy a supplied input tuple into palloc'd space. (NB: we
+ * assume that a single pfree() is enough to release the tuple later, so
+ * the representation must be "flat" in one palloc chunk.) state->availMem
+ * must be decreased by the amount of space used.
*/
void *(*copytup) (Tuplesortstate *state, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of
- * the tuple on tape need not be the same as it is in memory;
- * requirements on the tape representation are given below. After
- * writing the tuple, pfree() it, and increase state->availMem by the
- * amount of memory space thereby released.
+ * Function to write a stored tuple onto tape. The representation of the
+ * tuple on tape need not be the same as it is in memory; requirements on
+ * the tape representation are given below. After writing the tuple,
+ * pfree() it, and increase state->availMem by the amount of memory space
+ * thereby released.
*/
void (*writetup) (Tuplesortstate *state, int tapenum, void *tup);
/*
- * Function to read a stored tuple from tape back into memory. 'len'
- * is the already-read length of the stored tuple. Create and return
- * a palloc'd copy, and decrease state->availMem by the amount of
- * memory space consumed.
+ * Function to read a stored tuple from tape back into memory. 'len' is
+ * the already-read length of the stored tuple. Create and return a
+ * palloc'd copy, and decrease state->availMem by the amount of memory
+ * space consumed.
*/
void *(*readtup) (Tuplesortstate *state, int tapenum, unsigned int len);
/*
- * This array holds pointers to tuples in sort memory. If we are in
- * state INITIAL, the tuples are in no particular order; if we are in
- * state SORTEDINMEM, the tuples are in final sorted order; in states
- * BUILDRUNS and FINALMERGE, the tuples are organized in "heap" order
- * per Algorithm H. (Note that memtupcount only counts the tuples
- * that are part of the heap --- during merge passes, memtuples[]
- * entries beyond TAPERANGE are never in the heap and are used to hold
- * pre-read tuples.) In state SORTEDONTAPE, the array is not used.
+ * This array holds pointers to tuples in sort memory. If we are in state
+ * INITIAL, the tuples are in no particular order; if we are in state
+ * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
+ * and FINALMERGE, the tuples are organized in "heap" order per Algorithm
+ * H. (Note that memtupcount only counts the tuples that are part of the
+ * heap --- during merge passes, memtuples[] entries beyond TAPERANGE are
+ * never in the heap and are used to hold pre-read tuples.) In state
+ * SORTEDONTAPE, the array is not used.
*/
void **memtuples; /* array of pointers to palloc'd tuples */
int memtupcount; /* number of tuples currently present */
int memtupsize; /* allocated length of memtuples array */
/*
- * While building initial runs, this array holds the run number for
- * each tuple in memtuples[]. During merge passes, we re-use it to
- * hold the input tape number that each tuple in the heap was read
- * from, or to hold the index of the next tuple pre-read from the same
- * tape in the case of pre-read entries. This array is never
- * allocated unless we need to use tapes. Whenever it is allocated,
- * it has the same length as memtuples[].
+ * While building initial runs, this array holds the run number for each
+ * tuple in memtuples[]. During merge passes, we re-use it to hold the
+ * input tape number that each tuple in the heap was read from, or to hold
+ * the index of the next tuple pre-read from the same tape in the case of
+ * pre-read entries. This array is never allocated unless we need to use
+ * tapes. Whenever it is allocated, it has the same length as
+ * memtuples[].
*/
- int *memtupindex; /* index value associated with
- * memtuples[i] */
+ int *memtupindex; /* index value associated with memtuples[i] */
/*
* While building initial runs, this is the current output run number
- * (starting at 0). Afterwards, it is the number of initial runs we
- * made.
+ * (starting at 0). Afterwards, it is the number of initial runs we made.
*/
int currentRun;
/*
- * These variables are only used during merge passes. mergeactive[i]
- * is true if we are reading an input run from (actual) tape number i
- * and have not yet exhausted that run. mergenext[i] is the memtuples
- * index of the next pre-read tuple (next to be loaded into the heap)
- * for tape i, or 0 if we are out of pre-read tuples. mergelast[i]
- * similarly points to the last pre-read tuple from each tape.
- * mergeavailmem[i] is the amount of unused space allocated for tape
- * i. mergefreelist and mergefirstfree keep track of unused locations
- * in the memtuples[] array. memtupindex[] links together pre-read
- * tuples for each tape as well as recycled locations in
- * mergefreelist. It is OK to use 0 as a null link in these lists,
- * because memtuples[0] is part of the merge heap and is never a
- * pre-read tuple.
+ * These variables are only used during merge passes. mergeactive[i] is
+ * true if we are reading an input run from (actual) tape number i and
+ * have not yet exhausted that run. mergenext[i] is the memtuples index
+ * of the next pre-read tuple (next to be loaded into the heap) for tape
+ * i, or 0 if we are out of pre-read tuples. mergelast[i] similarly
+ * points to the last pre-read tuple from each tape. mergeavailmem[i] is
+ * the amount of unused space allocated for tape i. mergefreelist and
+ * mergefirstfree keep track of unused locations in the memtuples[] array.
+ * memtupindex[] links together pre-read tuples for each tape as well as
+ * recycled locations in mergefreelist. It is OK to use 0 as a null link
+ * in these lists, because memtuples[0] is part of the merge heap and is
+ * never a pre-read tuple.
*/
bool mergeactive[MAXTAPES]; /* Active input run source? */
- int mergenext[MAXTAPES]; /* first preread tuple for each
- * source */
- int mergelast[MAXTAPES]; /* last preread tuple for each
- * source */
+ int mergenext[MAXTAPES]; /* first preread tuple for each source */
+ int mergelast[MAXTAPES]; /* last preread tuple for each source */
long mergeavailmem[MAXTAPES]; /* availMem for prereading
* tapes */
long spacePerTape; /* actual per-tape target usage */
@@ -240,17 +233,15 @@ struct Tuplesortstate
*/
int Level; /* Knuth's l */
int destTape; /* current output tape (Knuth's j, less 1) */
- int tp_fib[MAXTAPES]; /* Target Fibonacci run counts
- * (A[]) */
+ int tp_fib[MAXTAPES]; /* Target Fibonacci run counts (A[]) */
int tp_runs[MAXTAPES]; /* # of real runs on each tape */
- int tp_dummy[MAXTAPES]; /* # of dummy runs for each tape
- * (D[]) */
+ int tp_dummy[MAXTAPES]; /* # of dummy runs for each tape (D[]) */
int tp_tapenum[MAXTAPES]; /* Actual tape numbers (TAPE[]) */
/*
- * These variables are used after completion of sorting to keep track
- * of the next tuple to return. (In the tape case, the tape's current
- * read position is also critical state.)
+ * These variables are used after completion of sorting to keep track of
+ * the next tuple to return. (In the tape case, the tape's current read
+ * position is also critical state.)
*/
int result_tape; /* actual tape number of finished output */
int current; /* array index (only used if SORTEDINMEM) */
@@ -258,8 +249,7 @@ struct Tuplesortstate
/* markpos_xxx holds marked position for mark and restore */
long markpos_block; /* tape block# (only used if SORTEDONTAPE) */
- int markpos_offset; /* saved "current", or offset in tape
- * block */
+ int markpos_offset; /* saved "current", or offset in tape block */
bool markpos_eof; /* saved "eof_reached" */
/*
@@ -272,8 +262,8 @@ struct Tuplesortstate
SortFunctionKind *sortFnKinds;
/*
- * These variables are specific to the IndexTuple case; they are set
- * by tuplesort_begin_index and used only by the IndexTuple routines.
+ * These variables are specific to the IndexTuple case; they are set by
+ * tuplesort_begin_index and used only by the IndexTuple routines.
*/
Relation indexRel;
ScanKey indexScanKey;
@@ -458,8 +448,7 @@ tuplesort_begin_common(int workMem, bool randomAccess)
/* Algorithm D variables will be initialized by inittapes, if needed */
- state->result_tape = -1; /* flag that result tape has not been
- * formed */
+ state->result_tape = -1; /* flag that result tape has not been formed */
return state;
}
@@ -505,8 +494,8 @@ tuplesort_begin_heap(TupleDesc tupDesc,
&state->sortFnKinds[i]);
/*
- * We needn't fill in sk_strategy or sk_subtype since these
- * scankeys will never be passed to an index.
+ * We needn't fill in sk_strategy or sk_subtype since these scankeys
+ * will never be passed to an index.
*/
ScanKeyInit(&state->scanKeys[i],
attNums[i],
@@ -606,8 +595,7 @@ tuplesort_end(Tuplesortstate *state)
pfree(state->memtupindex);
/*
- * this stuff might better belong in a variant-specific shutdown
- * routine
+ * this stuff might better belong in a variant-specific shutdown routine
*/
if (state->scanKeys)
pfree(state->scanKeys);
@@ -724,16 +712,16 @@ puttuple_common(Tuplesortstate *state, void *tuple)
/*
* Insert the copied tuple into the heap, with run number
- * currentRun if it can go into the current run, else run
- * number currentRun+1. The tuple can go into the current run
- * if it is >= the first not-yet-output tuple. (Actually, it
- * could go into the current run if it is >= the most recently
- * output tuple ... but that would require keeping around the
- * tuple we last output, and it's simplest to let writetup
- * free each tuple as soon as it's written.)
+ * currentRun if it can go into the current run, else run number
+ * currentRun+1. The tuple can go into the current run if it is
+ * >= the first not-yet-output tuple. (Actually, it could go into
+ * the current run if it is >= the most recently output tuple ...
+ * but that would require keeping around the tuple we last output,
+ * and it's simplest to let writetup free each tuple as soon as
+ * it's written.)
*
- * Note there will always be at least one tuple in the heap at
- * this point; see dumptuples.
+ * Note there will always be at least one tuple in the heap at this
+ * point; see dumptuples.
*/
Assert(state->memtupcount > 0);
if (COMPARETUP(state, tuple, state->memtuples[0]) >= 0)
@@ -742,8 +730,7 @@ puttuple_common(Tuplesortstate *state, void *tuple)
tuplesort_heap_insert(state, tuple, state->currentRun + 1, true);
/*
- * If we are over the memory limit, dump tuples till we're
- * under.
+ * If we are over the memory limit, dump tuples till we're under.
*/
dumptuples(state, false);
break;
@@ -770,8 +757,8 @@ tuplesort_performsort(Tuplesortstate *state)
case TSS_INITIAL:
/*
- * We were able to accumulate all the tuples within the
- * allowed amount of memory. Just qsort 'em and we're done.
+ * We were able to accumulate all the tuples within the allowed
+ * amount of memory. Just qsort 'em and we're done.
*/
if (state->memtupcount > 1)
{
@@ -788,10 +775,10 @@ tuplesort_performsort(Tuplesortstate *state)
case TSS_BUILDRUNS:
/*
- * Finish tape-based sort. First, flush all tuples remaining
- * in memory out to tape; then merge until we have a single
- * remaining run (or, if !randomAccess, one run per tape).
- * Note that mergeruns sets the correct state->status.
+ * Finish tape-based sort. First, flush all tuples remaining in
+ * memory out to tape; then merge until we have a single remaining
+ * run (or, if !randomAccess, one run per tape). Note that
+ * mergeruns sets the correct state->status.
*/
dumptuples(state, true);
mergeruns(state);
@@ -880,16 +867,15 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
/*
* Backward.
*
- * if all tuples are fetched already then we return last tuple,
- * else - tuple before last returned.
+ * if all tuples are fetched already then we return last tuple, else
+ * - tuple before last returned.
*/
if (state->eof_reached)
{
/*
- * Seek position is pointing just past the zero tuplen at
- * the end of file; back up to fetch last tuple's ending
- * length word. If seek fails we must have a completely
- * empty file.
+ * Seek position is pointing just past the zero tuplen at the
+ * end of file; back up to fetch last tuple's ending length
+ * word. If seek fails we must have a completely empty file.
*/
if (!LogicalTapeBackspace(state->tapeset,
state->result_tape,
@@ -900,9 +886,8 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
else
{
/*
- * Back up and fetch previously-returned tuple's ending
- * length word. If seek fails, assume we are at start of
- * file.
+ * Back up and fetch previously-returned tuple's ending length
+ * word. If seek fails, assume we are at start of file.
*/
if (!LogicalTapeBackspace(state->tapeset,
state->result_tape,
@@ -915,17 +900,17 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
*/
if (!LogicalTapeBackspace(state->tapeset,
state->result_tape,
- tuplen + 2 * sizeof(unsigned int)))
+ tuplen + 2 * sizeof(unsigned int)))
{
/*
- * If that fails, presumably the prev tuple is the
- * first in the file. Back up so that it becomes next
- * to read in forward direction (not obviously right,
- * but that is what in-memory case does).
+ * If that fails, presumably the prev tuple is the first
+ * in the file. Back up so that it becomes next to read
+ * in forward direction (not obviously right, but that is
+ * what in-memory case does).
*/
if (!LogicalTapeBackspace(state->tapeset,
state->result_tape,
- tuplen + sizeof(unsigned int)))
+ tuplen + sizeof(unsigned int)))
elog(ERROR, "bogus tuple length in backward scan");
return NULL;
}
@@ -934,9 +919,9 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
tuplen = getlen(state, state->result_tape, false);
/*
- * Now we have the length of the prior tuple, back up and read
- * it. Note: READTUP expects we are positioned after the
- * initial length word of the tuple, so back up to that point.
+ * Now we have the length of the prior tuple, back up and read it.
+ * Note: READTUP expects we are positioned after the initial
+ * length word of the tuple, so back up to that point.
*/
if (!LogicalTapeBackspace(state->tapeset,
state->result_tape,
@@ -968,14 +953,12 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
if ((tupIndex = state->mergenext[srcTape]) == 0)
{
/*
- * out of preloaded data on this tape, try to read
- * more
+ * out of preloaded data on this tape, try to read more
*/
mergepreread(state);
/*
- * if still no data, we've reached end of run on this
- * tape
+ * if still no data, we've reached end of run on this tape
*/
if ((tupIndex = state->mergenext[srcTape]) == 0)
return tup;
@@ -1062,12 +1045,12 @@ inittapes(Tuplesortstate *state)
USEMEM(state, GetMemoryChunkSpace(state->memtupindex));
/*
- * Convert the unsorted contents of memtuples[] into a heap. Each
- * tuple is marked as belonging to run number zero.
+ * Convert the unsorted contents of memtuples[] into a heap. Each tuple is
+ * marked as belonging to run number zero.
*
* NOTE: we pass false for checkIndex since there's no point in comparing
- * indexes in this step, even though we do intend the indexes to be
- * part of the sort key...
+ * indexes in this step, even though we do intend the indexes to be part
+ * of the sort key...
*/
ntuples = state->memtupcount;
state->memtupcount = 0; /* make the heap empty */
@@ -1150,8 +1133,8 @@ mergeruns(Tuplesortstate *state)
/*
* If we produced only one initial run (quite likely if the total data
- * volume is between 1X and 2X workMem), we can just use that tape as
- * the finished output, rather than doing a useless merge.
+ * volume is between 1X and 2X workMem), we can just use that tape as the
+ * finished output, rather than doing a useless merge.
*/
if (state->currentRun == 1)
{
@@ -1183,8 +1166,8 @@ mergeruns(Tuplesortstate *state)
}
/*
- * If we don't have to produce a materialized sorted tape,
- * quit as soon as we're down to one real/dummy run per tape.
+ * If we don't have to produce a materialized sorted tape, quit as
+ * soon as we're down to one real/dummy run per tape.
*/
if (!state->randomAccess && allOneRun)
{
@@ -1215,8 +1198,7 @@ mergeruns(Tuplesortstate *state)
state->tp_runs[TAPERANGE - 1] = 0;
/*
- * reassign tape units per step D6; note we no longer care about
- * A[]
+ * reassign tape units per step D6; note we no longer care about A[]
*/
svTape = state->tp_tapenum[TAPERANGE];
svDummy = state->tp_dummy[TAPERANGE];
@@ -1233,12 +1215,12 @@ mergeruns(Tuplesortstate *state)
}
/*
- * Done. Knuth says that the result is on TAPE[1], but since we
- * exited the loop without performing the last iteration of step D6,
- * we have not rearranged the tape unit assignment, and therefore the
- * result is on TAPE[T]. We need to do it this way so that we can
- * freeze the final output tape while rewinding it. The last
- * iteration of step D6 would be a waste of cycles anyway...
+ * Done. Knuth says that the result is on TAPE[1], but since we exited
+ * the loop without performing the last iteration of step D6, we have not
+ * rearranged the tape unit assignment, and therefore the result is on
+ * TAPE[T]. We need to do it this way so that we can freeze the final
+ * output tape while rewinding it. The last iteration of step D6 would be
+ * a waste of cycles anyway...
*/
state->result_tape = state->tp_tapenum[TAPERANGE];
LogicalTapeFreeze(state->tapeset, state->result_tape);
@@ -1262,16 +1244,15 @@ mergeonerun(Tuplesortstate *state)
spaceFreed;
/*
- * Start the merge by loading one tuple from each active source tape
- * into the heap. We can also decrease the input run/dummy run
- * counts.
+ * Start the merge by loading one tuple from each active source tape into
+ * the heap. We can also decrease the input run/dummy run counts.
*/
beginmerge(state);
/*
- * Execute merge by repeatedly extracting lowest tuple in heap,
- * writing it out, and replacing it with next tuple from same tape (if
- * there is another one).
+ * Execute merge by repeatedly extracting lowest tuple in heap, writing it
+ * out, and replacing it with next tuple from same tape (if there is
+ * another one).
*/
while (state->memtupcount > 0)
{
@@ -1304,8 +1285,8 @@ mergeonerun(Tuplesortstate *state)
}
/*
- * When the heap empties, we're done. Write an end-of-run marker on
- * the output tape, and increment its count of real runs.
+ * When the heap empties, we're done. Write an end-of-run marker on the
+ * output tape, and increment its count of real runs.
*/
markrunend(state, destTape);
state->tp_runs[TAPERANGE]++;
@@ -1341,8 +1322,7 @@ beginmerge(Tuplesortstate *state)
memset(state->mergelast, 0, sizeof(state->mergelast));
memset(state->mergeavailmem, 0, sizeof(state->mergeavailmem));
state->mergefreelist = 0; /* nothing in the freelist */
- state->mergefirstfree = MAXTAPES; /* first slot available for
- * preread */
+ state->mergefirstfree = MAXTAPES; /* first slot available for preread */
/* Adjust run counts and mark the active tapes */
activeTapes = 0;
@@ -1361,8 +1341,8 @@ beginmerge(Tuplesortstate *state)
}
/*
- * Initialize space allocation to let each active input tape have an
- * equal share of preread space.
+ * Initialize space allocation to let each active input tape have an equal
+ * share of preread space.
*/
Assert(activeTapes > 0);
state->spacePerTape = state->availMem / activeTapes;
@@ -1373,8 +1353,8 @@ beginmerge(Tuplesortstate *state)
}
/*
- * Preread as many tuples as possible (and at least one) from each
- * active tape
+ * Preread as many tuples as possible (and at least one) from each active
+ * tape
*/
mergepreread(state);
@@ -1432,8 +1412,8 @@ mergepreread(Tuplesortstate *state)
continue;
/*
- * Read tuples from this tape until it has used up its free
- * memory, but ensure that we have at least one.
+ * Read tuples from this tape until it has used up its free memory,
+ * but ensure that we have at least one.
*/
priorAvail = state->availMem;
state->availMem = state->mergeavailmem[srcTape];
@@ -1508,8 +1488,8 @@ dumptuples(Tuplesortstate *state, bool alltuples)
(LACKMEM(state) && state->memtupcount > 1))
{
/*
- * Dump the heap's frontmost entry, and sift up to remove it from
- * the heap.
+ * Dump the heap's frontmost entry, and sift up to remove it from the
+ * heap.
*/
Assert(state->memtupcount > 0);
WRITETUP(state, state->tp_tapenum[state->destTape],
@@ -1680,8 +1660,8 @@ tuplesort_heap_insert(Tuplesortstate *state, void *tuple,
memtupindex = state->memtupindex;
/*
- * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth
- * is using 1-based array indexes, not 0-based.
+ * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
+ * using 1-based array indexes, not 0-based.
*/
j = state->memtupcount++;
while (j > 0)
@@ -1805,12 +1785,12 @@ SelectSortFunction(Oid sortOperator,
Oid opclass = InvalidOid;
/*
- * Search pg_amop to see if the target operator is registered as the
- * "<" or ">" operator of any btree opclass. It's possible that it
- * might be registered both ways (eg, if someone were to build a
- * "reverse sort" opclass for some reason); prefer the "<" case if so.
- * If the operator is registered the same way in multiple opclasses,
- * assume we can use the associated comparator function from any one.
+ * Search pg_amop to see if the target operator is registered as the "<"
+ * or ">" operator of any btree opclass. It's possible that it might be
+ * registered both ways (eg, if someone were to build a "reverse sort"
+ * opclass for some reason); prefer the "<" case if so. If the operator is
+ * registered the same way in multiple opclasses, assume we can use the
+ * associated comparator function from any one.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(sortOperator),
@@ -1854,11 +1834,11 @@ SelectSortFunction(Oid sortOperator,
}
/*
- * Can't find a comparator, so use the operator as-is. Decide whether
- * it is forward or reverse sort by looking at its name (grotty, but
- * this only matters for deciding which end NULLs should get sorted
- * to). XXX possibly better idea: see whether its selectivity
- * function is scalargtcmp?
+ * Can't find a comparator, so use the operator as-is. Decide whether it
+ * is forward or reverse sort by looking at its name (grotty, but this
+ * only matters for deciding which end NULLs should get sorted to). XXX
+ * possibly better idea: see whether its selectivity function is
+ * scalargtcmp?
*/
tuple = SearchSysCache(OPEROID,
ObjectIdGetDatum(sortOperator),
@@ -2142,15 +2122,15 @@ comparetup_index(Tuplesortstate *state, const void *a, const void *b)
* If btree has asked us to enforce uniqueness, complain if two equal
* tuples are detected (unless there was at least one NULL field).
*
- * It is sufficient to make the test here, because if two tuples are
- * equal they *must* get compared at some stage of the sort ---
- * otherwise the sort algorithm wouldn't have checked whether one must
- * appear before the other.
+ * It is sufficient to make the test here, because if two tuples are equal
+ * they *must* get compared at some stage of the sort --- otherwise the
+ * sort algorithm wouldn't have checked whether one must appear before the
+ * other.
*
- * Some rather brain-dead implementations of qsort will sometimes call
- * the comparison routine to compare a value to itself. (At this
- * writing only QNX 4 is known to do such silly things.) Don't raise
- * a bogus error in that case.
+ * Some rather brain-dead implementations of qsort will sometimes call the
+ * comparison routine to compare a value to itself. (At this writing only
+ * QNX 4 is known to do such silly things.) Don't raise a bogus error in
+ * that case.
*/
if (state->enforceUnique && !equal_hasnull && tuple1 != tuple2)
ereport(ERROR,
@@ -2159,10 +2139,10 @@ comparetup_index(Tuplesortstate *state, const void *a, const void *b)
errdetail("Table contains duplicated values.")));
/*
- * If key values are equal, we sort on ItemPointer. This does not
- * affect validity of the finished index, but it offers cheap
- * insurance against performance problems with bad qsort
- * implementations that have trouble with large numbers of equal keys.
+ * If key values are equal, we sort on ItemPointer. This does not affect
+ * validity of the finished index, but it offers cheap insurance against
+ * performance problems with bad qsort implementations that have trouble
+ * with large numbers of equal keys.
*/
{
BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index 1c00e06371f..d409121418e 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -36,7 +36,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.22 2005/05/06 17:24:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.23 2005/10/15 02:49:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -72,41 +72,41 @@ struct Tuplestorestate
BufFile *myfile; /* underlying file, or NULL if none */
/*
- * These function pointers decouple the routines that must know what
- * kind of tuple we are handling from the routines that don't need to
- * know it. They are set up by the tuplestore_begin_xxx routines.
+ * These function pointers decouple the routines that must know what kind
+ * of tuple we are handling from the routines that don't need to know it.
+ * They are set up by the tuplestore_begin_xxx routines.
*
- * (Although tuplestore.c currently only supports heap tuples, I've
- * copied this part of tuplesort.c so that extension to other kinds of
- * objects will be easy if it's ever needed.)
+ * (Although tuplestore.c currently only supports heap tuples, I've copied
+ * this part of tuplesort.c so that extension to other kinds of objects
+ * will be easy if it's ever needed.)
*
* Function to copy a supplied input tuple into palloc'd space. (NB: we
- * assume that a single pfree() is enough to release the tuple later,
- * so the representation must be "flat" in one palloc chunk.)
- * state->availMem must be decreased by the amount of space used.
+ * assume that a single pfree() is enough to release the tuple later, so
+ * the representation must be "flat" in one palloc chunk.) state->availMem
+ * must be decreased by the amount of space used.
*/
void *(*copytup) (Tuplestorestate *state, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of
- * the tuple on tape need not be the same as it is in memory;
- * requirements on the tape representation are given below. After
- * writing the tuple, pfree() it, and increase state->availMem by the
- * amount of memory space thereby released.
+ * Function to write a stored tuple onto tape. The representation of the
+ * tuple on tape need not be the same as it is in memory; requirements on
+ * the tape representation are given below. After writing the tuple,
+ * pfree() it, and increase state->availMem by the amount of memory space
+ * thereby released.
*/
void (*writetup) (Tuplestorestate *state, void *tup);
/*
- * Function to read a stored tuple from tape back into memory. 'len'
- * is the already-read length of the stored tuple. Create and return
- * a palloc'd copy, and decrease state->availMem by the amount of
- * memory space consumed.
+ * Function to read a stored tuple from tape back into memory. 'len' is
+ * the already-read length of the stored tuple. Create and return a
+ * palloc'd copy, and decrease state->availMem by the amount of memory
+ * space consumed.
*/
void *(*readtup) (Tuplestorestate *state, unsigned int len);
/*
- * This array holds pointers to tuples in memory if we are in state
- * INMEM. In states WRITEFILE and READFILE it's not used.
+ * This array holds pointers to tuples in memory if we are in state INMEM.
+ * In states WRITEFILE and READFILE it's not used.
*/
void **memtuples; /* array of pointers to palloc'd tuples */
int memtupcount; /* number of tuples currently present */
@@ -115,17 +115,17 @@ struct Tuplestorestate
/*
* These variables are used to keep track of the current position.
*
- * In state WRITEFILE, the current file seek position is the write point,
- * and the read position is remembered in readpos_xxx; in state
- * READFILE, the current file seek position is the read point, and the
- * write position is remembered in writepos_xxx. (The write position
- * is the same as EOF, but since BufFileSeek doesn't currently
- * implement SEEK_END, we have to remember it explicitly.)
+ * In state WRITEFILE, the current file seek position is the write point, and
+ * the read position is remembered in readpos_xxx; in state READFILE, the
+ * current file seek position is the read point, and the write position is
+ * remembered in writepos_xxx. (The write position is the same as EOF,
+ * but since BufFileSeek doesn't currently implement SEEK_END, we have to
+ * remember it explicitly.)
*
- * Special case: if we are in WRITEFILE state and eof_reached is true,
- * then the read position is implicitly equal to the write position
- * (and hence to the file seek position); this way we need not update
- * the readpos_xxx variables on each write.
+ * Special case: if we are in WRITEFILE state and eof_reached is true, then
+ * the read position is implicitly equal to the write position (and hence
+ * to the file seek position); this way we need not update the readpos_xxx
+ * variables on each write.
*/
bool eof_reached; /* read reached EOF (always valid) */
int current; /* next array index (valid if INMEM) */
@@ -429,7 +429,7 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
&state->writepos_file, &state->writepos_offset);
if (!state->eof_reached)
if (BufFileSeek(state->myfile,
- state->readpos_file, state->readpos_offset,
+ state->readpos_file, state->readpos_offset,
SEEK_SET) != 0)
elog(ERROR, "seek failed");
state->status = TSS_READFILE;
@@ -454,11 +454,11 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
/*
* Backward.
*
- * if all tuples are fetched already then we return last tuple,
- * else - tuple before last returned.
+ * if all tuples are fetched already then we return last tuple, else
+ * - tuple before last returned.
*
- * Back up to fetch previously-returned tuple's ending length
- * word. If seek fails, assume we are at start of file.
+ * Back up to fetch previously-returned tuple's ending length word.
+ * If seek fails, assume we are at start of file.
*/
if (BufFileSeek(state->myfile, 0, -(long) sizeof(unsigned int),
SEEK_CUR) != 0)
@@ -476,17 +476,17 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
* Back up to get ending length word of tuple before it.
*/
if (BufFileSeek(state->myfile, 0,
- -(long) (tuplen + 2 * sizeof(unsigned int)),
+ -(long) (tuplen + 2 * sizeof(unsigned int)),
SEEK_CUR) != 0)
{
/*
- * If that fails, presumably the prev tuple is the
- * first in the file. Back up so that it becomes next
- * to read in forward direction (not obviously right,
- * but that is what in-memory case does).
+ * If that fails, presumably the prev tuple is the first
+ * in the file. Back up so that it becomes next to read
+ * in forward direction (not obviously right, but that is
+ * what in-memory case does).
*/
if (BufFileSeek(state->myfile, 0,
- -(long) (tuplen + sizeof(unsigned int)),
+ -(long) (tuplen + sizeof(unsigned int)),
SEEK_CUR) != 0)
elog(ERROR, "bogus tuple length in backward scan");
return NULL;
@@ -495,9 +495,9 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
}
/*
- * Now we have the length of the prior tuple, back up and read
- * it. Note: READTUP expects we are positioned after the
- * initial length word of the tuple, so back up to that point.
+ * Now we have the length of the prior tuple, back up and read it.
+ * Note: READTUP expects we are positioned after the initial
+ * length word of the tuple, so back up to that point.
*/
if (BufFileSeek(state->myfile, 0,
-(long) tuplen,
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index f8dcf43b64d..fa6bd4a3c58 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -21,7 +21,7 @@
* TransactionIdDidCommit will both return true. If we check only
* TransactionIdDidCommit, we could consider a tuple committed when a
* later GetSnapshotData call will still think the originating transaction
- * is in progress, which leads to application-level inconsistency. The
+ * is in progress, which leads to application-level inconsistency. The
* upshot is that we gotta check TransactionIdIsInProgress first in all
* code paths, except for a few cases where we are looking at
* subtransactions of our own main transaction and so there can't be any
@@ -32,7 +32,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.90 2005/08/20 00:39:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.91 2005/10/15 02:49:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -559,8 +559,7 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
else if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple)))
{
if (HeapTupleHeaderGetCmin(tuple) >= curcid)
- return HeapTupleInvisible; /* inserted after scan
- * started */
+ return HeapTupleInvisible; /* inserted after scan started */
if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
return HeapTupleMayBeUpdated;
@@ -581,11 +580,9 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
Assert(TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmax(tuple)));
if (HeapTupleHeaderGetCmax(tuple) >= curcid)
- return HeapTupleSelfUpdated; /* updated after scan
- * started */
+ return HeapTupleSelfUpdated; /* updated after scan started */
else
- return HeapTupleInvisible; /* updated before scan
- * started */
+ return HeapTupleInvisible; /* updated before scan started */
}
else if (TransactionIdIsInProgress(HeapTupleHeaderGetXmin(tuple)))
return HeapTupleInvisible;
@@ -632,8 +629,7 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
if (tuple->t_infomask & HEAP_IS_LOCKED)
return HeapTupleMayBeUpdated;
if (HeapTupleHeaderGetCmax(tuple) >= curcid)
- return HeapTupleSelfUpdated; /* updated after scan
- * started */
+ return HeapTupleSelfUpdated; /* updated after scan started */
else
return HeapTupleInvisible; /* updated before scan started */
}
@@ -945,12 +941,12 @@ HeapTupleSatisfiesSnapshot(HeapTupleHeader tuple, Snapshot snapshot,
* By here, the inserting transaction has committed - have to check
* when...
*
- * Note that the provided snapshot contains only top-level XIDs, so we
- * have to convert a subxact XID to its parent for comparison.
- * However, we can make first-pass range checks with the given XID,
- * because a subxact with XID < xmin has surely also got a parent with
- * XID < xmin, while one with XID >= xmax must belong to a parent that
- * was not yet committed at the time of this snapshot.
+ * Note that the provided snapshot contains only top-level XIDs, so we have
+ * to convert a subxact XID to its parent for comparison. However, we can
+ * make first-pass range checks with the given XID, because a subxact with
+ * XID < xmin has surely also got a parent with XID < xmin, while one with
+ * XID >= xmax must belong to a parent that was not yet committed at the
+ * time of this snapshot.
*/
if (TransactionIdFollowsOrEquals(HeapTupleHeaderGetXmin(tuple),
snapshot->xmin))
@@ -1074,8 +1070,8 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
/*
* Has inserting transaction committed?
*
- * If the inserting transaction aborted, then the tuple was never visible
- * to any other transaction, so we can delete it immediately.
+ * If the inserting transaction aborted, then the tuple was never visible to
+ * any other transaction, so we can delete it immediately.
*/
if (!(tuple->t_infomask & HEAP_XMIN_COMMITTED))
{
@@ -1135,8 +1131,7 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
else
{
/*
- * Not in Progress, Not Committed, so either Aborted or
- * crashed
+ * Not in Progress, Not Committed, so either Aborted or crashed
*/
tuple->t_infomask |= HEAP_XMIN_INVALID;
SetBufferCommitInfoNeedsSave(buffer);
@@ -1147,8 +1142,8 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
}
/*
- * Okay, the inserter committed, so it was good at some point. Now
- * what about the deleting transaction?
+ * Okay, the inserter committed, so it was good at some point. Now what
+ * about the deleting transaction?
*/
if (tuple->t_infomask & HEAP_XMAX_INVALID)
return HEAPTUPLE_LIVE;
@@ -1156,10 +1151,10 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
if (tuple->t_infomask & HEAP_IS_LOCKED)
{
/*
- * "Deleting" xact really only locked it, so the tuple
- * is live in any case. However, we must make sure that either
- * XMAX_COMMITTED or XMAX_INVALID gets set once the xact is gone;
- * otherwise it is unsafe to recycle CLOG status after vacuuming.
+ * "Deleting" xact really only locked it, so the tuple is live in any
+ * case. However, we must make sure that either XMAX_COMMITTED or
+ * XMAX_INVALID gets set once the xact is gone; otherwise it is unsafe
+ * to recycle CLOG status after vacuuming.
*/
if (!(tuple->t_infomask & HEAP_XMAX_COMMITTED))
{
@@ -1175,9 +1170,9 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
}
/*
- * We don't really care whether xmax did commit, abort or
- * crash. We know that xmax did lock the tuple, but
- * it did not and will never actually update it.
+ * We don't really care whether xmax did commit, abort or crash.
+ * We know that xmax did lock the tuple, but it did not and will
+ * never actually update it.
*/
tuple->t_infomask |= HEAP_XMAX_INVALID;
SetBufferCommitInfoNeedsSave(buffer);
@@ -1204,8 +1199,7 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
else
{
/*
- * Not in Progress, Not Committed, so either Aborted or
- * crashed
+ * Not in Progress, Not Committed, so either Aborted or crashed
*/
tuple->t_infomask |= HEAP_XMAX_INVALID;
SetBufferCommitInfoNeedsSave(buffer);
@@ -1223,10 +1217,10 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
HeapTupleHeaderGetXmax(tuple)))
{
/*
- * Inserter also deleted it, so it was never visible to anyone
- * else. However, we can only remove it early if it's not an
- * updated tuple; else its parent tuple is linking to it via t_ctid,
- * and this tuple mustn't go away before the parent does.
+ * Inserter also deleted it, so it was never visible to anyone else.
+ * However, we can only remove it early if it's not an updated tuple;
+ * else its parent tuple is linking to it via t_ctid, and this tuple
+ * mustn't go away before the parent does.
*/
if (!(tuple->t_infomask & HEAP_UPDATED))
return HEAPTUPLE_DEAD;