diff options
author | Tom Lane <tgl@sss.pgh.pa.us> | 2022-04-08 14:55:14 -0400 |
---|---|---|
committer | Tom Lane <tgl@sss.pgh.pa.us> | 2022-04-08 14:55:14 -0400 |
commit | 9a374b77fb53e4cfbca121e4fa278a7d71bde7c4 (patch) | |
tree | 6591af757bd9df12549279b4b87f01e0ce98bd79 /src/bin/pg_dump/pg_backup_archiver.c | |
parent | 5c431c7fb327e1abc70b7a197650f8d45fd5bede (diff) |
Improve frontend error logging style.
Get rid of the separate "FATAL" log level, as it was applied
so inconsistently as to be meaningless. This mostly involves
s/pg_log_fatal/pg_log_error/g.
Create a macro pg_fatal() to handle the common use-case of
pg_log_error() immediately followed by exit(1). Various
modules had already invented either this or equivalent macros;
standardize on pg_fatal() and apply it where possible.
Invent the ability to add "detail" and "hint" messages to a
frontend message, much as we have long had in the backend.
Except where rewording was needed to convert existing coding
to detail/hint style, I have (mostly) resisted the temptation
to change existing message wording.
Patch by me. Design and patch reviewed at various stages by
Robert Haas, Kyotaro Horiguchi, Peter Eisentraut and
Daniel Gustafsson.
Discussion: https://postgr.es/m/1363732.1636496441@sss.pgh.pa.us
Diffstat (limited to 'src/bin/pg_dump/pg_backup_archiver.c')
-rw-r--r-- | src/bin/pg_dump/pg_backup_archiver.c | 154 |
1 files changed, 77 insertions, 77 deletions
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index d41a99d6ea7..24e42fa5d7d 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -276,7 +276,7 @@ CloseArchive(Archive *AHX) res = fclose(AH->OF); if (res != 0) - fatal("could not close output file: %m"); + pg_fatal("could not close output file: %m"); } /* Public */ @@ -330,8 +330,8 @@ ProcessArchiveRestoreOptions(Archive *AHX) /* ok no matter which section we were in */ break; default: - fatal("unexpected section code %d", - (int) te->section); + pg_fatal("unexpected section code %d", + (int) te->section); break; } } @@ -367,11 +367,11 @@ RestoreArchive(Archive *AHX) { /* We haven't got round to making this work for all archive formats */ if (AH->ClonePtr == NULL || AH->ReopenPtr == NULL) - fatal("parallel restore is not supported with this archive file format"); + pg_fatal("parallel restore is not supported with this archive file format"); /* Doesn't work if the archive represents dependencies as OIDs */ if (AH->version < K_VERS_1_8) - fatal("parallel restore is not supported with archives made by pre-8.0 pg_dump"); + pg_fatal("parallel restore is not supported with archives made by pre-8.0 pg_dump"); /* * It's also not gonna work if we can't reopen the input file, so @@ -389,7 +389,7 @@ RestoreArchive(Archive *AHX) for (te = AH->toc->next; te != AH->toc; te = te->next) { if (te->hadDumper && (te->reqs & REQ_DATA) != 0) - fatal("cannot restore from compressed archive (compression not supported in this installation)"); + pg_fatal("cannot restore from compressed archive (compression not supported in this installation)"); } } #endif @@ -408,7 +408,7 @@ RestoreArchive(Archive *AHX) { pg_log_info("connecting to database for restore"); if (AH->version < K_VERS_1_3) - fatal("direct database connections are not supported in pre-1.3 archives"); + pg_fatal("direct database connections are not supported in pre-1.3 archives"); /* * We don't want to guess at whether the dump will successfully @@ -1037,7 +1037,7 @@ WriteData(Archive *AHX, const void *data, size_t dLen) ArchiveHandle *AH = (ArchiveHandle *) AHX; if (!AH->currToc) - fatal("internal error -- WriteData cannot be called outside the context of a DataDumper routine"); + pg_fatal("internal error -- WriteData cannot be called outside the context of a DataDumper routine"); AH->WriteDataPtr(AH, data, dLen); } @@ -1220,7 +1220,7 @@ StartBlob(Archive *AHX, Oid oid) ArchiveHandle *AH = (ArchiveHandle *) AHX; if (!AH->StartBlobPtr) - fatal("large-object output not supported in chosen format"); + pg_fatal("large-object output not supported in chosen format"); AH->StartBlobPtr(AH, AH->currToc, oid); @@ -1311,13 +1311,13 @@ StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop) { loOid = lo_create(AH->connection, oid); if (loOid == 0 || loOid != oid) - fatal("could not create large object %u: %s", - oid, PQerrorMessage(AH->connection)); + pg_fatal("could not create large object %u: %s", + oid, PQerrorMessage(AH->connection)); } AH->loFd = lo_open(AH->connection, oid, INV_WRITE); if (AH->loFd == -1) - fatal("could not open large object %u: %s", - oid, PQerrorMessage(AH->connection)); + pg_fatal("could not open large object %u: %s", + oid, PQerrorMessage(AH->connection)); } else { @@ -1372,7 +1372,7 @@ SortTocFromFile(Archive *AHX) /* Setup the file */ fh = fopen(ropt->tocFile, PG_BINARY_R); if (!fh) - fatal("could not open TOC file \"%s\": %m", ropt->tocFile); + pg_fatal("could not open TOC file \"%s\": %m", ropt->tocFile); initStringInfo(&linebuf); @@ -1407,8 +1407,8 @@ SortTocFromFile(Archive *AHX) /* Find TOC entry */ te = getTocEntryByDumpId(AH, id); if (!te) - fatal("could not find entry for ID %d", - id); + pg_fatal("could not find entry for ID %d", + id); /* Mark it wanted */ ropt->idWanted[id - 1] = true; @@ -1430,7 +1430,7 @@ SortTocFromFile(Archive *AHX) pg_free(linebuf.data); if (fclose(fh) != 0) - fatal("could not close TOC file: %m"); + pg_fatal("could not close TOC file: %m"); } /********************** @@ -1544,9 +1544,9 @@ SetOutput(ArchiveHandle *AH, const char *filename, int compression) if (!AH->OF) { if (filename) - fatal("could not open output file \"%s\": %m", filename); + pg_fatal("could not open output file \"%s\": %m", filename); else - fatal("could not open output file: %m"); + pg_fatal("could not open output file: %m"); } } @@ -1573,7 +1573,7 @@ RestoreOutput(ArchiveHandle *AH, OutputContext savedContext) res = fclose(AH->OF); if (res != 0) - fatal("could not close output file: %m"); + pg_fatal("could not close output file: %m"); AH->gzOut = savedContext.gzOut; AH->OF = savedContext.OF; @@ -1736,34 +1736,34 @@ warn_or_exit_horribly(ArchiveHandle *AH, const char *fmt,...) case STAGE_INITIALIZING: if (AH->stage != AH->lastErrorStage) - pg_log_generic(PG_LOG_INFO, "while INITIALIZING:"); + pg_log_info("while INITIALIZING:"); break; case STAGE_PROCESSING: if (AH->stage != AH->lastErrorStage) - pg_log_generic(PG_LOG_INFO, "while PROCESSING TOC:"); + pg_log_info("while PROCESSING TOC:"); break; case STAGE_FINALIZING: if (AH->stage != AH->lastErrorStage) - pg_log_generic(PG_LOG_INFO, "while FINALIZING:"); + pg_log_info("while FINALIZING:"); break; } if (AH->currentTE != NULL && AH->currentTE != AH->lastErrorTE) { - pg_log_generic(PG_LOG_INFO, "from TOC entry %d; %u %u %s %s %s", - AH->currentTE->dumpId, - AH->currentTE->catalogId.tableoid, - AH->currentTE->catalogId.oid, - AH->currentTE->desc ? AH->currentTE->desc : "(no desc)", - AH->currentTE->tag ? AH->currentTE->tag : "(no tag)", - AH->currentTE->owner ? AH->currentTE->owner : "(no owner)"); + pg_log_info("from TOC entry %d; %u %u %s %s %s", + AH->currentTE->dumpId, + AH->currentTE->catalogId.tableoid, + AH->currentTE->catalogId.oid, + AH->currentTE->desc ? AH->currentTE->desc : "(no desc)", + AH->currentTE->tag ? AH->currentTE->tag : "(no tag)", + AH->currentTE->owner ? AH->currentTE->owner : "(no owner)"); } AH->lastErrorStage = AH->stage; AH->lastErrorTE = AH->currentTE; va_start(ap, fmt); - pg_log_generic_v(PG_LOG_ERROR, fmt, ap); + pg_log_generic_v(PG_LOG_ERROR, PG_LOG_PRIMARY, fmt, ap); va_end(ap); if (AH->public.exit_on_error) @@ -1827,7 +1827,7 @@ buildTocEntryArrays(ArchiveHandle *AH) { /* this check is purely paranoia, maxDumpId should be correct */ if (te->dumpId <= 0 || te->dumpId > maxDumpId) - fatal("bad dumpId"); + pg_fatal("bad dumpId"); /* tocsByDumpId indexes all TOCs by their dump ID */ AH->tocsByDumpId[te->dumpId] = te; @@ -1848,7 +1848,7 @@ buildTocEntryArrays(ArchiveHandle *AH) * item's dump ID, so there should be a place for it in the array. */ if (tableId <= 0 || tableId > maxDumpId) - fatal("bad table dumpId for TABLE DATA item"); + pg_fatal("bad table dumpId for TABLE DATA item"); AH->tableDataId[tableId] = te->dumpId; } @@ -1940,7 +1940,7 @@ ReadOffset(ArchiveHandle *AH, pgoff_t * o) break; default: - fatal("unexpected data offset flag %d", offsetFlg); + pg_fatal("unexpected data offset flag %d", offsetFlg); } /* @@ -1953,7 +1953,7 @@ ReadOffset(ArchiveHandle *AH, pgoff_t * o) else { if (AH->ReadBytePtr(AH) != 0) - fatal("file offset in dump file is too large"); + pg_fatal("file offset in dump file is too large"); } } @@ -2091,8 +2091,8 @@ _discoverArchiveFormat(ArchiveHandle *AH) char buf[MAXPGPATH]; if (snprintf(buf, MAXPGPATH, "%s/toc.dat", AH->fSpec) >= MAXPGPATH) - fatal("directory name too long: \"%s\"", - AH->fSpec); + pg_fatal("directory name too long: \"%s\"", + AH->fSpec); if (stat(buf, &st) == 0 && S_ISREG(st.st_mode)) { AH->format = archDirectory; @@ -2101,39 +2101,39 @@ _discoverArchiveFormat(ArchiveHandle *AH) #ifdef HAVE_LIBZ if (snprintf(buf, MAXPGPATH, "%s/toc.dat.gz", AH->fSpec) >= MAXPGPATH) - fatal("directory name too long: \"%s\"", - AH->fSpec); + pg_fatal("directory name too long: \"%s\"", + AH->fSpec); if (stat(buf, &st) == 0 && S_ISREG(st.st_mode)) { AH->format = archDirectory; return AH->format; } #endif - fatal("directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)", - AH->fSpec); + pg_fatal("directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)", + AH->fSpec); fh = NULL; /* keep compiler quiet */ } else { fh = fopen(AH->fSpec, PG_BINARY_R); if (!fh) - fatal("could not open input file \"%s\": %m", AH->fSpec); + pg_fatal("could not open input file \"%s\": %m", AH->fSpec); } } else { fh = stdin; if (!fh) - fatal("could not open input file: %m"); + pg_fatal("could not open input file: %m"); } if ((cnt = fread(sig, 1, 5, fh)) != 5) { if (ferror(fh)) - fatal("could not read input file: %m"); + pg_fatal("could not read input file: %m"); else - fatal("input file is too short (read %lu, expected 5)", - (unsigned long) cnt); + pg_fatal("input file is too short (read %lu, expected 5)", + (unsigned long) cnt); } /* Save it, just in case we need it later */ @@ -2164,19 +2164,19 @@ _discoverArchiveFormat(ArchiveHandle *AH) * looks like it's probably a text format dump. so suggest they * try psql */ - fatal("input file appears to be a text format dump. Please use psql."); + pg_fatal("input file appears to be a text format dump. Please use psql."); } if (AH->lookaheadLen != 512) { if (feof(fh)) - fatal("input file does not appear to be a valid archive (too short?)"); + pg_fatal("input file does not appear to be a valid archive (too short?)"); else READ_ERROR_EXIT(fh); } if (!isValidTarHeader(AH->lookahead)) - fatal("input file does not appear to be a valid archive"); + pg_fatal("input file does not appear to be a valid archive"); AH->format = archTar; } @@ -2185,7 +2185,7 @@ _discoverArchiveFormat(ArchiveHandle *AH) if (wantClose) { if (fclose(fh) != 0) - fatal("could not close input file: %m"); + pg_fatal("could not close input file: %m"); /* Forget lookahead, since we'll re-read header after re-opening */ AH->readHeader = 0; AH->lookaheadLen = 0; @@ -2302,7 +2302,7 @@ _allocAH(const char *FileSpec, const ArchiveFormat fmt, break; default: - fatal("unrecognized file format \"%d\"", fmt); + pg_fatal("unrecognized file format \"%d\"", fmt); } return AH; @@ -2388,8 +2388,8 @@ mark_dump_job_done(ArchiveHandle *AH, te->dumpId, te->desc, te->tag); if (status != 0) - fatal("worker process failed: exit code %d", - status); + pg_fatal("worker process failed: exit code %d", + status); } @@ -2509,8 +2509,8 @@ ReadToc(ArchiveHandle *AH) /* Sanity check */ if (te->dumpId <= 0) - fatal("entry ID %d out of range -- perhaps a corrupt TOC", - te->dumpId); + pg_fatal("entry ID %d out of range -- perhaps a corrupt TOC", + te->dumpId); te->hadDumper = ReadInt(AH); @@ -2671,13 +2671,13 @@ processEncodingEntry(ArchiveHandle *AH, TocEntry *te) *ptr2 = '\0'; encoding = pg_char_to_encoding(ptr1); if (encoding < 0) - fatal("unrecognized encoding \"%s\"", - ptr1); + pg_fatal("unrecognized encoding \"%s\"", + ptr1); AH->public.encoding = encoding; } else - fatal("invalid ENCODING item: %s", - te->defn); + pg_fatal("invalid ENCODING item: %s", + te->defn); free(defn); } @@ -2694,8 +2694,8 @@ processStdStringsEntry(ArchiveHandle *AH, TocEntry *te) else if (ptr1 && strncmp(ptr1, "'off'", 5) == 0) AH->public.std_strings = false; else - fatal("invalid STDSTRINGS item: %s", - te->defn); + pg_fatal("invalid STDSTRINGS item: %s", + te->defn); } static void @@ -2719,35 +2719,35 @@ StrictNamesCheck(RestoreOptions *ropt) { missing_name = simple_string_list_not_touched(&ropt->schemaNames); if (missing_name != NULL) - fatal("schema \"%s\" not found", missing_name); + pg_fatal("schema \"%s\" not found", missing_name); } if (ropt->tableNames.head != NULL) { missing_name = simple_string_list_not_touched(&ropt->tableNames); if (missing_name != NULL) - fatal("table \"%s\" not found", missing_name); + pg_fatal("table \"%s\" not found", missing_name); } if (ropt->indexNames.head != NULL) { missing_name = simple_string_list_not_touched(&ropt->indexNames); if (missing_name != NULL) - fatal("index \"%s\" not found", missing_name); + pg_fatal("index \"%s\" not found", missing_name); } if (ropt->functionNames.head != NULL) { missing_name = simple_string_list_not_touched(&ropt->functionNames); if (missing_name != NULL) - fatal("function \"%s\" not found", missing_name); + pg_fatal("function \"%s\" not found", missing_name); } if (ropt->triggerNames.head != NULL) { missing_name = simple_string_list_not_touched(&ropt->triggerNames); if (missing_name != NULL) - fatal("trigger \"%s\" not found", missing_name); + pg_fatal("trigger \"%s\" not found", missing_name); } } @@ -3140,8 +3140,8 @@ _doSetSessionAuth(ArchiveHandle *AH, const char *user) if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) /* NOT warn_or_exit_horribly... use -O instead to skip this. */ - fatal("could not set session user to \"%s\": %s", - user, PQerrorMessage(AH->connection)); + pg_fatal("could not set session user to \"%s\": %s", + user, PQerrorMessage(AH->connection)); PQclear(res); } @@ -3751,7 +3751,7 @@ ReadHead(ArchiveHandle *AH) AH->ReadBufPtr(AH, tmpMag, 5); if (strncmp(tmpMag, "PGDMP", 5) != 0) - fatal("did not find magic string in file header"); + pg_fatal("did not find magic string in file header"); } vmaj = AH->ReadBytePtr(AH); @@ -3765,13 +3765,13 @@ ReadHead(ArchiveHandle *AH) AH->version = MAKE_ARCHIVE_VERSION(vmaj, vmin, vrev); if (AH->version < K_VERS_1_0 || AH->version > K_VERS_MAX) - fatal("unsupported version (%d.%d) in file header", - vmaj, vmin); + pg_fatal("unsupported version (%d.%d) in file header", + vmaj, vmin); AH->intSize = AH->ReadBytePtr(AH); if (AH->intSize > 32) - fatal("sanity check on integer size (%lu) failed", - (unsigned long) AH->intSize); + pg_fatal("sanity check on integer size (%lu) failed", + (unsigned long) AH->intSize); if (AH->intSize > sizeof(int)) pg_log_warning("archive was made on a machine with larger integers, some operations might fail"); @@ -3784,8 +3784,8 @@ ReadHead(ArchiveHandle *AH) fmt = AH->ReadBytePtr(AH); if (AH->format != fmt) - fatal("expected format (%d) differs from format found in file (%d)", - AH->format, fmt); + pg_fatal("expected format (%d) differs from format found in file (%d)", + AH->format, fmt); if (AH->version >= K_VERS_1_2) { @@ -4455,8 +4455,8 @@ mark_restore_job_done(ArchiveHandle *AH, else if (status == WORKER_IGNORED_ERRORS) AH->public.n_errors++; else if (status != 0) - fatal("worker process failed: exit code %d", - status); + pg_fatal("worker process failed: exit code %d", + status); reduce_dependencies(AH, te, ready_list); } |