Thanks to visit codestin.com
Credit goes to doxygen.postgresql.org

PostgreSQL Source Code git master
pg_backup_archiver.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * pg_backup_archiver.c
4 *
5 * Private implementation of the archiver routines.
6 *
7 * See the headers to pg_restore for more details.
8 *
9 * Copyright (c) 2000, Philip Warner
10 * Rights are granted to use this software in any way so long
11 * as this notice is not removed.
12 *
13 * The author is not responsible for loss or damages that may
14 * result from its use.
15 *
16 *
17 * IDENTIFICATION
18 * src/bin/pg_dump/pg_backup_archiver.c
19 *
20 *-------------------------------------------------------------------------
21 */
22#include "postgres_fe.h"
23
24#include <ctype.h>
25#include <fcntl.h>
26#include <unistd.h>
27#include <sys/stat.h>
28#include <sys/wait.h>
29#ifdef WIN32
30#include <io.h>
31#endif
32
33#include "catalog/pg_class_d.h"
34#include "catalog/pg_largeobject_metadata_d.h"
35#include "catalog/pg_shdepend_d.h"
36#include "common/string.h"
37#include "compress_io.h"
38#include "dumputils.h"
40#include "lib/binaryheap.h"
41#include "lib/stringinfo.h"
42#include "libpq/libpq-fs.h"
43#include "parallel.h"
44#include "pg_backup_archiver.h"
45#include "pg_backup_db.h"
46#include "pg_backup_utils.h"
47
48#define TEXT_DUMP_HEADER "--\n-- PostgreSQL database dump\n--\n\n"
49#define TEXT_DUMPALL_HEADER "--\n-- PostgreSQL database cluster dump\n--\n\n"
50
51#define TOC_PREFIX_NONE ""
52#define TOC_PREFIX_DATA "Data for "
53#define TOC_PREFIX_STATS "Statistics for "
54
55static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt,
56 const pg_compress_specification compression_spec,
58 SetupWorkerPtrType setupWorkerPtr,
60static void _getObjectDescription(PQExpBuffer buf, const TocEntry *te);
61static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, const char *pfx);
63static void _doSetSessionAuth(ArchiveHandle *AH, const char *user);
64static void _reconnectToDB(ArchiveHandle *AH, const char *dbname);
65static void _becomeUser(ArchiveHandle *AH, const char *user);
66static void _becomeOwner(ArchiveHandle *AH, TocEntry *te);
67static void _selectOutputSchema(ArchiveHandle *AH, const char *schemaName);
68static void _selectTablespace(ArchiveHandle *AH, const char *tablespace);
69static void _selectTableAccessMethod(ArchiveHandle *AH, const char *tableam);
71 TocEntry *te);
72static void processEncodingEntry(ArchiveHandle *AH, TocEntry *te);
75static int _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH);
77static bool _tocEntryIsACL(TocEntry *te);
81static void buildTocEntryArrays(ArchiveHandle *AH);
82static void _moveBefore(TocEntry *pos, TocEntry *te);
84
85static int RestoringToDB(ArchiveHandle *AH);
86static void dump_lo_buf(ArchiveHandle *AH);
87static void dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim);
88static void SetOutput(ArchiveHandle *AH, const char *filename,
89 const pg_compress_specification compression_spec);
91static void RestoreOutput(ArchiveHandle *AH, CompressFileHandle *savedOutput);
92
93static int restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel);
95 TocEntry *pending_list);
97 ParallelState *pstate,
98 TocEntry *pending_list);
100 TocEntry *pending_list);
101static void pending_list_header_init(TocEntry *l);
102static void pending_list_append(TocEntry *l, TocEntry *te);
103static void pending_list_remove(TocEntry *te);
104static int TocEntrySizeCompareQsort(const void *p1, const void *p2);
105static int TocEntrySizeCompareBinaryheap(void *p1, void *p2, void *arg);
106static void move_to_ready_heap(TocEntry *pending_list,
107 binaryheap *ready_heap,
108 RestorePass pass);
109static TocEntry *pop_next_work_item(binaryheap *ready_heap,
110 ParallelState *pstate);
111static void mark_dump_job_done(ArchiveHandle *AH,
112 TocEntry *te,
113 int status,
114 void *callback_data);
116 TocEntry *te,
117 int status,
118 void *callback_data);
119static void fix_dependencies(ArchiveHandle *AH);
120static bool has_lock_conflicts(TocEntry *te1, TocEntry *te2);
123static void reduce_dependencies(ArchiveHandle *AH, TocEntry *te,
124 binaryheap *ready_heap);
125static void mark_create_done(ArchiveHandle *AH, TocEntry *te);
127
128static void StrictNamesCheck(RestoreOptions *ropt);
129
130
131/*
132 * Allocate a new DumpOptions block containing all default values.
133 */
136{
138
140 return opts;
141}
142
143/*
144 * Initialize a DumpOptions struct to all default values
145 */
146void
148{
149 memset(opts, 0, sizeof(DumpOptions));
150 /* set any fields that shouldn't default to zeroes */
151 opts->include_everything = true;
152 opts->cparams.promptPassword = TRI_DEFAULT;
153 opts->dumpSections = DUMP_UNSECTIONED;
154 opts->dumpSchema = true;
155 opts->dumpData = true;
156 opts->dumpStatistics = false;
157}
158
159/*
160 * Create a freshly allocated DumpOptions with options equivalent to those
161 * found in the given RestoreOptions.
162 */
165{
166 DumpOptions *dopt = NewDumpOptions();
167
168 /* this is the inverse of what's at the end of pg_dump.c's main() */
169 dopt->cparams.dbname = ropt->cparams.dbname ? pg_strdup(ropt->cparams.dbname) : NULL;
170 dopt->cparams.pgport = ropt->cparams.pgport ? pg_strdup(ropt->cparams.pgport) : NULL;
171 dopt->cparams.pghost = ropt->cparams.pghost ? pg_strdup(ropt->cparams.pghost) : NULL;
172 dopt->cparams.username = ropt->cparams.username ? pg_strdup(ropt->cparams.username) : NULL;
174 dopt->outputClean = ropt->dropSchema;
175 dopt->dumpData = ropt->dumpData;
176 dopt->dumpSchema = ropt->dumpSchema;
177 dopt->dumpSections = ropt->dumpSections;
178 dopt->dumpStatistics = ropt->dumpStatistics;
179 dopt->if_exists = ropt->if_exists;
180 dopt->column_inserts = ropt->column_inserts;
181 dopt->aclsSkip = ropt->aclsSkip;
182 dopt->outputSuperuser = ropt->superuser;
183 dopt->outputCreateDB = ropt->createDB;
184 dopt->outputNoOwner = ropt->noOwner;
185 dopt->outputNoTableAm = ropt->noTableAm;
186 dopt->outputNoTablespaces = ropt->noTablespace;
188 dopt->use_setsessauth = ropt->use_setsessauth;
190 dopt->dump_inserts = ropt->dump_inserts;
191 dopt->no_comments = ropt->no_comments;
192 dopt->no_policies = ropt->no_policies;
193 dopt->no_publications = ropt->no_publications;
196 dopt->lockWaitTimeout = ropt->lockWaitTimeout;
199 dopt->sequence_data = ropt->sequence_data;
200 dopt->restrict_key = ropt->restrict_key ? pg_strdup(ropt->restrict_key) : NULL;
201
202 return dopt;
203}
204
205
206/*
207 * Wrapper functions.
208 *
209 * The objective is to make writing new formats and dumpers as simple
210 * as possible, if necessary at the expense of extra function calls etc.
211 *
212 */
213
214/*
215 * The dump worker setup needs lots of knowledge of the internals of pg_dump,
216 * so it's defined in pg_dump.c and passed into OpenArchive. The restore worker
217 * setup doesn't need to know anything much, so it's defined here.
218 */
219static void
221{
222 ArchiveHandle *AH = (ArchiveHandle *) AHX;
223
224 AH->ReopenPtr(AH);
225}
226
227
228/* Create a new archive */
229/* Public */
230Archive *
231CreateArchive(const char *FileSpec, const ArchiveFormat fmt,
232 const pg_compress_specification compression_spec,
233 bool dosync, ArchiveMode mode,
236
237{
238 ArchiveHandle *AH = _allocAH(FileSpec, fmt, compression_spec,
240
241 return (Archive *) AH;
242}
243
244/* Open an existing archive */
245/* Public */
246Archive *
247OpenArchive(const char *FileSpec, const ArchiveFormat fmt)
248{
249 ArchiveHandle *AH;
250 pg_compress_specification compression_spec = {0};
251
252 compression_spec.algorithm = PG_COMPRESSION_NONE;
253 AH = _allocAH(FileSpec, fmt, compression_spec, true,
256
257 return (Archive *) AH;
258}
259
260/* Public */
261void
263{
264 ArchiveHandle *AH = (ArchiveHandle *) AHX;
265
266 AH->ClosePtr(AH);
267
268 /* Close the output */
269 errno = 0;
270 if (!EndCompressFileHandle(AH->OF))
271 pg_fatal("could not close output file: %m");
272}
273
274/* Public */
275void
277{
278 /* Caller can omit dump options, in which case we synthesize them */
279 if (dopt == NULL && ropt != NULL)
281
282 /* Save options for later access */
283 AH->dopt = dopt;
284 AH->ropt = ropt;
285}
286
287/* Public */
288void
290{
291 ArchiveHandle *AH = (ArchiveHandle *) AHX;
292 RestoreOptions *ropt = AH->public.ropt;
293 TocEntry *te;
294 teSection curSection;
295
296 /* Decide which TOC entries will be dumped/restored, and mark them */
297 curSection = SECTION_PRE_DATA;
298 for (te = AH->toc->next; te != AH->toc; te = te->next)
299 {
300 /*
301 * When writing an archive, we also take this opportunity to check
302 * that we have generated the entries in a sane order that respects
303 * the section divisions. When reading, don't complain, since buggy
304 * old versions of pg_dump might generate out-of-order archives.
305 */
306 if (AH->mode != archModeRead)
307 {
308 switch (te->section)
309 {
310 case SECTION_NONE:
311 /* ok to be anywhere */
312 break;
313 case SECTION_PRE_DATA:
314 if (curSection != SECTION_PRE_DATA)
315 pg_log_warning("archive items not in correct section order");
316 break;
317 case SECTION_DATA:
318 if (curSection == SECTION_POST_DATA)
319 pg_log_warning("archive items not in correct section order");
320 break;
322 /* ok no matter which section we were in */
323 break;
324 default:
325 pg_fatal("unexpected section code %d",
326 (int) te->section);
327 break;
328 }
329 }
330
331 if (te->section != SECTION_NONE)
332 curSection = te->section;
333
334 te->reqs = _tocEntryRequired(te, curSection, AH);
335 }
336
337 /* Enforce strict names checking */
338 if (ropt->strict_names)
339 StrictNamesCheck(ropt);
340}
341
342/* Public */
343void
345{
346 ArchiveHandle *AH = (ArchiveHandle *) AHX;
347 RestoreOptions *ropt = AH->public.ropt;
348 bool parallel_mode;
349 TocEntry *te;
351
353
354 /*
355 * If we're going to do parallel restore, there are some restrictions.
356 */
357 parallel_mode = (AH->public.numWorkers > 1 && ropt->useDB);
358 if (parallel_mode)
359 {
360 /* We haven't got round to making this work for all archive formats */
361 if (AH->ClonePtr == NULL || AH->ReopenPtr == NULL)
362 pg_fatal("parallel restore is not supported with this archive file format");
363
364 /* Doesn't work if the archive represents dependencies as OIDs */
365 if (AH->version < K_VERS_1_8)
366 pg_fatal("parallel restore is not supported with archives made by pre-8.0 pg_dump");
367
368 /*
369 * It's also not gonna work if we can't reopen the input file, so
370 * let's try that immediately.
371 */
372 AH->ReopenPtr(AH);
373 }
374
375 /*
376 * Make sure we won't need (de)compression we haven't got
377 */
378 if (AH->PrintTocDataPtr != NULL)
379 {
380 for (te = AH->toc->next; te != AH->toc; te = te->next)
381 {
382 if (te->hadDumper && (te->reqs & REQ_DATA) != 0)
383 {
385
386 if (errmsg)
387 pg_fatal("cannot restore from compressed archive (%s)",
388 errmsg);
389 else
390 break;
391 }
392 }
393 }
394
395 /*
396 * Prepare index arrays, so we can assume we have them throughout restore.
397 * It's possible we already did this, though.
398 */
399 if (AH->tocsByDumpId == NULL)
401
402 /*
403 * If we're using a DB connection, then connect it.
404 */
405 if (ropt->useDB)
406 {
407 pg_log_info("connecting to database for restore");
408 if (AH->version < K_VERS_1_3)
409 pg_fatal("direct database connections are not supported in pre-1.3 archives");
410
411 /*
412 * We don't want to guess at whether the dump will successfully
413 * restore; allow the attempt regardless of the version of the restore
414 * target.
415 */
416 AHX->minRemoteVersion = 0;
417 AHX->maxRemoteVersion = 9999999;
418
419 ConnectDatabaseAhx(AHX, &ropt->cparams, false);
420
421 /*
422 * If we're talking to the DB directly, don't send comments since they
423 * obscure SQL when displaying errors
424 */
425 AH->noTocComments = 1;
426 }
427
428 /*
429 * Work out if we have an implied schema-less restore. This can happen if
430 * the dump excluded the schema or the user has used a toc list to exclude
431 * all of the schema data. All we do is look for schema entries - if none
432 * are found then we unset the dumpSchema flag.
433 *
434 * We could scan for wanted TABLE entries, but that is not the same as
435 * data-only. At this stage, it seems unnecessary (6-Mar-2001).
436 */
437 if (ropt->dumpSchema)
438 {
439 bool no_schema_found = true;
440
441 for (te = AH->toc->next; te != AH->toc; te = te->next)
442 {
443 if ((te->reqs & REQ_SCHEMA) != 0)
444 {
445 no_schema_found = false;
446 break;
447 }
448 }
449 if (no_schema_found)
450 {
451 ropt->dumpSchema = false;
452 pg_log_info("implied no-schema restore");
453 }
454 }
455
456 /*
457 * Setup the output file if necessary.
458 */
459 sav = SaveOutput(AH);
461 SetOutput(AH, ropt->filename, ropt->compression_spec);
462
463 ahprintf(AH, "--\n-- PostgreSQL database dump\n--\n\n");
464
465 /*
466 * If generating plain-text output, enter restricted mode to block any
467 * unexpected psql meta-commands. A malicious source might try to inject
468 * a variety of things via bogus responses to queries. While we cannot
469 * prevent such sources from affecting the destination at restore time, we
470 * can block psql meta-commands so that the client machine that runs psql
471 * with the dump output remains unaffected.
472 */
473 if (ropt->restrict_key)
474 ahprintf(AH, "\\restrict %s\n\n", ropt->restrict_key);
475
476 if (AH->archiveRemoteVersion)
477 ahprintf(AH, "-- Dumped from database version %s\n",
479 if (AH->archiveDumpVersion)
480 ahprintf(AH, "-- Dumped by pg_dump version %s\n",
482
483 ahprintf(AH, "\n");
484
485 if (AH->public.verbose)
486 dumpTimestamp(AH, "Started on", AH->createDate);
487
488 if (ropt->single_txn)
489 {
490 if (AH->connection)
491 StartTransaction(AHX);
492 else
493 ahprintf(AH, "BEGIN;\n\n");
494 }
495
496 /*
497 * Establish important parameter values right away.
498 */
500
502
503 /*
504 * Drop the items at the start, in reverse order
505 */
506 if (ropt->dropSchema)
507 {
508 for (te = AH->toc->prev; te != AH->toc; te = te->prev)
509 {
510 AH->currentTE = te;
511
512 /*
513 * In createDB mode, issue a DROP *only* for the database as a
514 * whole. Issuing drops against anything else would be wrong,
515 * because at this point we're connected to the wrong database.
516 * (The DATABASE PROPERTIES entry, if any, should be treated like
517 * the DATABASE entry.)
518 */
519 if (ropt->createDB)
520 {
521 if (strcmp(te->desc, "DATABASE") != 0 &&
522 strcmp(te->desc, "DATABASE PROPERTIES") != 0)
523 continue;
524 }
525
526 /* Otherwise, drop anything that's selected and has a dropStmt */
527 if (((te->reqs & (REQ_SCHEMA | REQ_DATA)) != 0) && te->dropStmt)
528 {
529 bool not_allowed_in_txn = false;
530
531 pg_log_info("dropping %s %s", te->desc, te->tag);
532
533 /*
534 * In --transaction-size mode, we have to temporarily exit our
535 * transaction block to drop objects that can't be dropped
536 * within a transaction.
537 */
538 if (ropt->txn_size > 0)
539 {
540 if (strcmp(te->desc, "DATABASE") == 0 ||
541 strcmp(te->desc, "DATABASE PROPERTIES") == 0)
542 {
543 not_allowed_in_txn = true;
544 if (AH->connection)
546 else
547 ahprintf(AH, "COMMIT;\n");
548 }
549 }
550
551 /* Select owner and schema as necessary */
552 _becomeOwner(AH, te);
553 _selectOutputSchema(AH, te->namespace);
554
555 /*
556 * Now emit the DROP command, if the object has one. Note we
557 * don't necessarily emit it verbatim; at this point we add an
558 * appropriate IF EXISTS clause, if the user requested it.
559 */
560 if (strcmp(te->desc, "BLOB METADATA") == 0)
561 {
562 /* We must generate the per-blob commands */
563 if (ropt->if_exists)
564 IssueCommandPerBlob(AH, te,
565 "SELECT pg_catalog.lo_unlink(oid) "
566 "FROM pg_catalog.pg_largeobject_metadata "
567 "WHERE oid = '", "'");
568 else
569 IssueCommandPerBlob(AH, te,
570 "SELECT pg_catalog.lo_unlink('",
571 "')");
572 }
573 else if (*te->dropStmt != '\0')
574 {
575 if (!ropt->if_exists ||
576 strncmp(te->dropStmt, "--", 2) == 0)
577 {
578 /*
579 * Without --if-exists, or if it's just a comment (as
580 * happens for the public schema), print the dropStmt
581 * as-is.
582 */
583 ahprintf(AH, "%s", te->dropStmt);
584 }
585 else
586 {
587 /*
588 * Inject an appropriate spelling of "if exists". For
589 * old-style large objects, we have a routine that
590 * knows how to do it, without depending on
591 * te->dropStmt; use that. For other objects we need
592 * to parse the command.
593 */
594 if (strcmp(te->desc, "BLOB") == 0)
595 {
597 }
598 else
599 {
600 char *dropStmt = pg_strdup(te->dropStmt);
601 char *dropStmtOrig = dropStmt;
603
604 /*
605 * Need to inject IF EXISTS clause after ALTER
606 * TABLE part in ALTER TABLE .. DROP statement
607 */
608 if (strncmp(dropStmt, "ALTER TABLE", 11) == 0)
609 {
611 "ALTER TABLE IF EXISTS");
612 dropStmt = dropStmt + 11;
613 }
614
615 /*
616 * ALTER TABLE..ALTER COLUMN..DROP DEFAULT does
617 * not support the IF EXISTS clause, and therefore
618 * we simply emit the original command for DEFAULT
619 * objects (modulo the adjustment made above).
620 *
621 * Likewise, don't mess with DATABASE PROPERTIES.
622 *
623 * If we used CREATE OR REPLACE VIEW as a means of
624 * quasi-dropping an ON SELECT rule, that should
625 * be emitted unchanged as well.
626 *
627 * For other object types, we need to extract the
628 * first part of the DROP which includes the
629 * object type. Most of the time this matches
630 * te->desc, so search for that; however for the
631 * different kinds of CONSTRAINTs, we know to
632 * search for hardcoded "DROP CONSTRAINT" instead.
633 */
634 if (strcmp(te->desc, "DEFAULT") == 0 ||
635 strcmp(te->desc, "DATABASE PROPERTIES") == 0 ||
636 strncmp(dropStmt, "CREATE OR REPLACE VIEW", 22) == 0)
637 appendPQExpBufferStr(ftStmt, dropStmt);
638 else
639 {
640 char buffer[40];
641 char *mark;
642
643 if (strcmp(te->desc, "CONSTRAINT") == 0 ||
644 strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
645 strcmp(te->desc, "FK CONSTRAINT") == 0)
646 strcpy(buffer, "DROP CONSTRAINT");
647 else
648 snprintf(buffer, sizeof(buffer), "DROP %s",
649 te->desc);
650
651 mark = strstr(dropStmt, buffer);
652
653 if (mark)
654 {
655 *mark = '\0';
656 appendPQExpBuffer(ftStmt, "%s%s IF EXISTS%s",
657 dropStmt, buffer,
658 mark + strlen(buffer));
659 }
660 else
661 {
662 /* complain and emit unmodified command */
663 pg_log_warning("could not find where to insert IF EXISTS in statement \"%s\"",
664 dropStmtOrig);
665 appendPQExpBufferStr(ftStmt, dropStmt);
666 }
667 }
668
669 ahprintf(AH, "%s", ftStmt->data);
670
671 destroyPQExpBuffer(ftStmt);
672 pg_free(dropStmtOrig);
673 }
674 }
675 }
676
677 /*
678 * In --transaction-size mode, re-establish the transaction
679 * block if needed; otherwise, commit after every N drops.
680 */
681 if (ropt->txn_size > 0)
682 {
683 if (not_allowed_in_txn)
684 {
685 if (AH->connection)
686 StartTransaction(AHX);
687 else
688 ahprintf(AH, "BEGIN;\n");
689 AH->txnCount = 0;
690 }
691 else if (++AH->txnCount >= ropt->txn_size)
692 {
693 if (AH->connection)
694 {
696 StartTransaction(AHX);
697 }
698 else
699 ahprintf(AH, "COMMIT;\nBEGIN;\n");
700 AH->txnCount = 0;
701 }
702 }
703 }
704 }
705
706 /*
707 * _selectOutputSchema may have set currSchema to reflect the effect
708 * of a "SET search_path" command it emitted. However, by now we may
709 * have dropped that schema; or it might not have existed in the first
710 * place. In either case the effective value of search_path will not
711 * be what we think. Forcibly reset currSchema so that we will
712 * re-establish the search_path setting when needed (after creating
713 * the schema).
714 *
715 * If we treated users as pg_dump'able objects then we'd need to reset
716 * currUser here too.
717 */
718 free(AH->currSchema);
719 AH->currSchema = NULL;
720 }
721
722 if (parallel_mode)
723 {
724 /*
725 * In parallel mode, turn control over to the parallel-restore logic.
726 */
727 ParallelState *pstate;
728 TocEntry pending_list;
729
730 /* The archive format module may need some setup for this */
733
734 pending_list_header_init(&pending_list);
735
736 /* This runs PRE_DATA items and then disconnects from the database */
737 restore_toc_entries_prefork(AH, &pending_list);
738 Assert(AH->connection == NULL);
739
740 /* ParallelBackupStart() will actually fork the processes */
741 pstate = ParallelBackupStart(AH);
742 restore_toc_entries_parallel(AH, pstate, &pending_list);
743 ParallelBackupEnd(AH, pstate);
744
745 /* reconnect the leader and see if we missed something */
746 restore_toc_entries_postfork(AH, &pending_list);
747 Assert(AH->connection != NULL);
748 }
749 else
750 {
751 /*
752 * In serial mode, process everything in three phases: normal items,
753 * then ACLs, then post-ACL items. We might be able to skip one or
754 * both extra phases in some cases, eg data-only restores.
755 */
756 bool haveACL = false;
757 bool havePostACL = false;
758
759 for (te = AH->toc->next; te != AH->toc; te = te->next)
760 {
761 if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) == 0)
762 continue; /* ignore if not to be dumped at all */
763
764 switch (_tocEntryRestorePass(te))
765 {
767 (void) restore_toc_entry(AH, te, false);
768 break;
769 case RESTORE_PASS_ACL:
770 haveACL = true;
771 break;
773 havePostACL = true;
774 break;
775 }
776 }
777
778 if (haveACL)
779 {
780 for (te = AH->toc->next; te != AH->toc; te = te->next)
781 {
782 if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) != 0 &&
784 (void) restore_toc_entry(AH, te, false);
785 }
786 }
787
788 if (havePostACL)
789 {
790 for (te = AH->toc->next; te != AH->toc; te = te->next)
791 {
792 if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) != 0 &&
794 (void) restore_toc_entry(AH, te, false);
795 }
796 }
797 }
798
799 /*
800 * Close out any persistent transaction we may have. While these two
801 * cases are started in different places, we can end both cases here.
802 */
803 if (ropt->single_txn || ropt->txn_size > 0)
804 {
805 if (AH->connection)
807 else
808 ahprintf(AH, "COMMIT;\n\n");
809 }
810
811 if (AH->public.verbose)
812 dumpTimestamp(AH, "Completed on", time(NULL));
813
814 ahprintf(AH, "--\n-- PostgreSQL database dump complete\n--\n\n");
815
816 /*
817 * If generating plain-text output, exit restricted mode at the very end
818 * of the script. This is not pro forma; in particular, pg_dumpall
819 * requires this when transitioning from one database to another.
820 */
821 if (ropt->restrict_key)
822 ahprintf(AH, "\\unrestrict %s\n\n", ropt->restrict_key);
823
824 /*
825 * Clean up & we're done.
826 */
828
830 RestoreOutput(AH, sav);
831
832 if (ropt->useDB)
834}
835
836/*
837 * Restore a single TOC item. Used in both parallel and non-parallel restore;
838 * is_parallel is true if we are in a worker child process.
839 *
840 * Returns 0 normally, but WORKER_CREATE_DONE or WORKER_INHIBIT_DATA if
841 * the parallel parent has to make the corresponding status update.
842 */
843static int
844restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel)
845{
846 RestoreOptions *ropt = AH->public.ropt;
847 int status = WORKER_OK;
848 int reqs;
849 bool defnDumped;
850
851 AH->currentTE = te;
852
853 /* Dump any relevant dump warnings to stderr */
854 if (!ropt->suppressDumpWarnings && strcmp(te->desc, "WARNING") == 0)
855 {
856 if (ropt->dumpSchema && te->defn != NULL && strlen(te->defn) != 0)
857 pg_log_warning("warning from original dump file: %s", te->defn);
858 else if (te->copyStmt != NULL && strlen(te->copyStmt) != 0)
859 pg_log_warning("warning from original dump file: %s", te->copyStmt);
860 }
861
862 /* Work out what, if anything, we want from this entry */
863 reqs = te->reqs;
864
865 defnDumped = false;
866
867 /*
868 * If it has a schema component that we want, then process that
869 */
870 if ((reqs & REQ_SCHEMA) != 0)
871 {
872 bool object_is_db = false;
873
874 /*
875 * In --transaction-size mode, must exit our transaction block to
876 * create a database or set its properties.
877 */
878 if (strcmp(te->desc, "DATABASE") == 0 ||
879 strcmp(te->desc, "DATABASE PROPERTIES") == 0)
880 {
881 object_is_db = true;
882 if (ropt->txn_size > 0)
883 {
884 if (AH->connection)
886 else
887 ahprintf(AH, "COMMIT;\n\n");
888 }
889 }
890
891 /* Show namespace in log message if available */
892 if (te->namespace)
893 pg_log_info("creating %s \"%s.%s\"",
894 te->desc, te->namespace, te->tag);
895 else
896 pg_log_info("creating %s \"%s\"",
897 te->desc, te->tag);
898
900 defnDumped = true;
901
902 if (strcmp(te->desc, "TABLE") == 0)
903 {
904 if (AH->lastErrorTE == te)
905 {
906 /*
907 * We failed to create the table. If
908 * --no-data-for-failed-tables was given, mark the
909 * corresponding TABLE DATA to be ignored.
910 *
911 * In the parallel case this must be done in the parent, so we
912 * just set the return value.
913 */
914 if (ropt->noDataForFailedTables)
915 {
916 if (is_parallel)
917 status = WORKER_INHIBIT_DATA;
918 else
920 }
921 }
922 else
923 {
924 /*
925 * We created the table successfully. Mark the corresponding
926 * TABLE DATA for possible truncation.
927 *
928 * In the parallel case this must be done in the parent, so we
929 * just set the return value.
930 */
931 if (is_parallel)
932 status = WORKER_CREATE_DONE;
933 else
934 mark_create_done(AH, te);
935 }
936 }
937
938 /*
939 * If we created a DB, connect to it. Also, if we changed DB
940 * properties, reconnect to ensure that relevant GUC settings are
941 * applied to our session. (That also restarts the transaction block
942 * in --transaction-size mode.)
943 */
944 if (object_is_db)
945 {
946 pg_log_info("connecting to new database \"%s\"", te->tag);
947 _reconnectToDB(AH, te->tag);
948 }
949 }
950
951 /*
952 * If it has a data component that we want, then process that
953 */
954 if ((reqs & REQ_DATA) != 0)
955 {
956 /*
957 * hadDumper will be set if there is genuine data component for this
958 * node. Otherwise, we need to check the defn field for statements
959 * that need to be executed in data-only restores.
960 */
961 if (te->hadDumper)
962 {
963 /*
964 * If we can output the data, then restore it.
965 */
966 if (AH->PrintTocDataPtr != NULL)
967 {
969
970 if (strcmp(te->desc, "BLOBS") == 0 ||
971 strcmp(te->desc, "BLOB COMMENTS") == 0)
972 {
973 pg_log_info("processing %s", te->desc);
974
975 _selectOutputSchema(AH, "pg_catalog");
976
977 /* Send BLOB COMMENTS data to ExecuteSimpleCommands() */
978 if (strcmp(te->desc, "BLOB COMMENTS") == 0)
980
981 AH->PrintTocDataPtr(AH, te);
982
984 }
985 else
986 {
987 bool use_truncate;
988
990
991 /* Select owner and schema as necessary */
992 _becomeOwner(AH, te);
993 _selectOutputSchema(AH, te->namespace);
994
995 pg_log_info("processing data for table \"%s.%s\"",
996 te->namespace, te->tag);
997
998 /*
999 * In parallel restore, if we created the table earlier in
1000 * this run (so that we know it is empty) and we are not
1001 * restoring a load-via-partition-root data item then we
1002 * wrap the COPY in a transaction and precede it with a
1003 * TRUNCATE. If wal_level is set to minimal this prevents
1004 * WAL-logging the COPY. This obtains a speedup similar
1005 * to that from using single_txn mode in non-parallel
1006 * restores.
1007 *
1008 * We mustn't do this for load-via-partition-root cases
1009 * because some data might get moved across partition
1010 * boundaries, risking deadlock and/or loss of previously
1011 * loaded data. (We assume that all partitions of a
1012 * partitioned table will be treated the same way.)
1013 */
1014 use_truncate = is_parallel && te->created &&
1016
1017 if (use_truncate)
1018 {
1019 /*
1020 * Parallel restore is always talking directly to a
1021 * server, so no need to see if we should issue BEGIN.
1022 */
1024
1025 /*
1026 * Issue TRUNCATE with ONLY so that child tables are
1027 * not wiped.
1028 */
1029 ahprintf(AH, "TRUNCATE TABLE ONLY %s;\n\n",
1030 fmtQualifiedId(te->namespace, te->tag));
1031 }
1032
1033 /*
1034 * If we have a copy statement, use it.
1035 */
1036 if (te->copyStmt && strlen(te->copyStmt) > 0)
1037 {
1038 ahprintf(AH, "%s", te->copyStmt);
1040 }
1041 else
1043
1044 AH->PrintTocDataPtr(AH, te);
1045
1046 /*
1047 * Terminate COPY if needed.
1048 */
1049 if (AH->outputKind == OUTPUT_COPYDATA &&
1050 RestoringToDB(AH))
1051 EndDBCopyMode(&AH->public, te->tag);
1053
1054 /* close out the transaction started above */
1055 if (use_truncate)
1057
1059 }
1060 }
1061 }
1062 else if (!defnDumped)
1063 {
1064 /* If we haven't already dumped the defn part, do so now */
1065 pg_log_info("executing %s %s", te->desc, te->tag);
1067 }
1068 }
1069
1070 /*
1071 * If it has a statistics component that we want, then process that
1072 */
1073 if ((reqs & REQ_STATS) != 0)
1075
1076 /*
1077 * If we emitted anything for this TOC entry, that counts as one action
1078 * against the transaction-size limit. Commit if it's time to.
1079 */
1080 if ((reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) != 0 && ropt->txn_size > 0)
1081 {
1082 if (++AH->txnCount >= ropt->txn_size)
1083 {
1084 if (AH->connection)
1085 {
1088 }
1089 else
1090 ahprintf(AH, "COMMIT;\nBEGIN;\n\n");
1091 AH->txnCount = 0;
1092 }
1093 }
1094
1095 if (AH->public.n_errors > 0 && status == WORKER_OK)
1096 status = WORKER_IGNORED_ERRORS;
1097
1098 return status;
1099}
1100
1101/*
1102 * Allocate a new RestoreOptions block.
1103 * This is mainly so we can initialize it, but also for future expansion,
1104 */
1107{
1109
1111
1112 /* set any fields that shouldn't default to zeroes */
1113 opts->format = archUnknown;
1114 opts->cparams.promptPassword = TRI_DEFAULT;
1115 opts->dumpSections = DUMP_UNSECTIONED;
1116 opts->compression_spec.algorithm = PG_COMPRESSION_NONE;
1117 opts->compression_spec.level = 0;
1118 opts->dumpSchema = true;
1119 opts->dumpData = true;
1120 opts->dumpStatistics = true;
1121
1122 return opts;
1123}
1124
1125static void
1127{
1128 RestoreOptions *ropt = AH->public.ropt;
1129
1130 /* This hack is only needed in a data-only restore */
1131 if (ropt->dumpSchema || !ropt->disable_triggers)
1132 return;
1133
1134 pg_log_info("disabling triggers for %s", te->tag);
1135
1136 /*
1137 * Become superuser if possible, since they are the only ones who can
1138 * disable constraint triggers. If -S was not given, assume the initial
1139 * user identity is a superuser. (XXX would it be better to become the
1140 * table owner?)
1141 */
1142 _becomeUser(AH, ropt->superuser);
1143
1144 /*
1145 * Disable them.
1146 */
1147 ahprintf(AH, "ALTER TABLE %s DISABLE TRIGGER ALL;\n\n",
1148 fmtQualifiedId(te->namespace, te->tag));
1149}
1150
1151static void
1153{
1154 RestoreOptions *ropt = AH->public.ropt;
1155
1156 /* This hack is only needed in a data-only restore */
1157 if (ropt->dumpSchema || !ropt->disable_triggers)
1158 return;
1159
1160 pg_log_info("enabling triggers for %s", te->tag);
1161
1162 /*
1163 * Become superuser if possible, since they are the only ones who can
1164 * disable constraint triggers. If -S was not given, assume the initial
1165 * user identity is a superuser. (XXX would it be better to become the
1166 * table owner?)
1167 */
1168 _becomeUser(AH, ropt->superuser);
1169
1170 /*
1171 * Enable them.
1172 */
1173 ahprintf(AH, "ALTER TABLE %s ENABLE TRIGGER ALL;\n\n",
1174 fmtQualifiedId(te->namespace, te->tag));
1175}
1176
1177/*
1178 * Detect whether a TABLE DATA TOC item is performing "load via partition
1179 * root", that is the target table is an ancestor partition rather than the
1180 * table the TOC item is nominally for.
1181 *
1182 * In newer archive files this can be detected by checking for a special
1183 * comment placed in te->defn. In older files we have to fall back to seeing
1184 * if the COPY statement targets the named table or some other one. This
1185 * will not work for data dumped as INSERT commands, so we could give a false
1186 * negative in that case; fortunately, that's a rarely-used option.
1187 */
1188static bool
1190{
1191 if (te->defn &&
1192 strncmp(te->defn, "-- load via partition root ", 27) == 0)
1193 return true;
1194 if (te->copyStmt && *te->copyStmt)
1195 {
1196 PQExpBuffer copyStmt = createPQExpBuffer();
1197 bool result;
1198
1199 /*
1200 * Build the initial part of the COPY as it would appear if the
1201 * nominal target table is the actual target. If we see anything
1202 * else, it must be a load-via-partition-root case.
1203 */
1204 appendPQExpBuffer(copyStmt, "COPY %s ",
1205 fmtQualifiedId(te->namespace, te->tag));
1206 result = strncmp(te->copyStmt, copyStmt->data, copyStmt->len) != 0;
1207 destroyPQExpBuffer(copyStmt);
1208 return result;
1209 }
1210 /* Assume it's not load-via-partition-root */
1211 return false;
1212}
1213
1214/*
1215 * This is a routine that is part of the dumper interface, hence the 'Archive*' parameter.
1216 */
1217
1218/* Public */
1219void
1220WriteData(Archive *AHX, const void *data, size_t dLen)
1221{
1222 ArchiveHandle *AH = (ArchiveHandle *) AHX;
1223
1224 if (!AH->currToc)
1225 pg_fatal("internal error -- WriteData cannot be called outside the context of a DataDumper routine");
1226
1227 AH->WriteDataPtr(AH, data, dLen);
1228}
1229
1230/*
1231 * Create a new TOC entry. The TOC was designed as a TOC, but is now the
1232 * repository for all metadata. But the name has stuck.
1233 *
1234 * The new entry is added to the Archive's TOC list. Most callers can ignore
1235 * the result value because nothing else need be done, but a few want to
1236 * manipulate the TOC entry further.
1237 */
1238
1239/* Public */
1240TocEntry *
1241ArchiveEntry(Archive *AHX, CatalogId catalogId, DumpId dumpId,
1243{
1244 ArchiveHandle *AH = (ArchiveHandle *) AHX;
1245 TocEntry *newToc;
1246
1247 newToc = (TocEntry *) pg_malloc0(sizeof(TocEntry));
1248
1249 AH->tocCount++;
1250 if (dumpId > AH->maxDumpId)
1251 AH->maxDumpId = dumpId;
1252
1253 newToc->prev = AH->toc->prev;
1254 newToc->next = AH->toc;
1255 AH->toc->prev->next = newToc;
1256 AH->toc->prev = newToc;
1257
1258 newToc->catalogId = catalogId;
1259 newToc->dumpId = dumpId;
1260 newToc->section = opts->section;
1261
1262 newToc->tag = pg_strdup(opts->tag);
1263 newToc->namespace = opts->namespace ? pg_strdup(opts->namespace) : NULL;
1264 newToc->tablespace = opts->tablespace ? pg_strdup(opts->tablespace) : NULL;
1265 newToc->tableam = opts->tableam ? pg_strdup(opts->tableam) : NULL;
1266 newToc->relkind = opts->relkind;
1267 newToc->owner = opts->owner ? pg_strdup(opts->owner) : NULL;
1268 newToc->desc = pg_strdup(opts->description);
1269 newToc->defn = opts->createStmt ? pg_strdup(opts->createStmt) : NULL;
1270 newToc->dropStmt = opts->dropStmt ? pg_strdup(opts->dropStmt) : NULL;
1271 newToc->copyStmt = opts->copyStmt ? pg_strdup(opts->copyStmt) : NULL;
1272
1273 if (opts->nDeps > 0)
1274 {
1275 newToc->dependencies = (DumpId *) pg_malloc(opts->nDeps * sizeof(DumpId));
1276 memcpy(newToc->dependencies, opts->deps, opts->nDeps * sizeof(DumpId));
1277 newToc->nDeps = opts->nDeps;
1278 }
1279 else
1280 {
1281 newToc->dependencies = NULL;
1282 newToc->nDeps = 0;
1283 }
1284
1285 newToc->dataDumper = opts->dumpFn;
1286 newToc->dataDumperArg = opts->dumpArg;
1287 newToc->hadDumper = opts->dumpFn ? true : false;
1288
1289 newToc->defnDumper = opts->defnFn;
1290 newToc->defnDumperArg = opts->defnArg;
1291
1292 newToc->formatData = NULL;
1293 newToc->dataLength = 0;
1294
1295 if (AH->ArchiveEntryPtr != NULL)
1296 AH->ArchiveEntryPtr(AH, newToc);
1297
1298 return newToc;
1299}
1300
1301/* Public */
1302void
1304{
1305 ArchiveHandle *AH = (ArchiveHandle *) AHX;
1306 RestoreOptions *ropt = AH->public.ropt;
1307 TocEntry *te;
1308 pg_compress_specification out_compression_spec = {0};
1309 teSection curSection;
1310 CompressFileHandle *sav;
1311 const char *fmtName;
1312 char stamp_str[64];
1313
1314 /* TOC is always uncompressed */
1315 out_compression_spec.algorithm = PG_COMPRESSION_NONE;
1316
1317 sav = SaveOutput(AH);
1318 if (ropt->filename)
1319 SetOutput(AH, ropt->filename, out_compression_spec);
1320
1321 if (strftime(stamp_str, sizeof(stamp_str), PGDUMP_STRFTIME_FMT,
1322 localtime(&AH->createDate)) == 0)
1323 strcpy(stamp_str, "[unknown]");
1324
1325 ahprintf(AH, ";\n; Archive created at %s\n", stamp_str);
1326 ahprintf(AH, "; dbname: %s\n; TOC Entries: %d\n; Compression: %s\n",
1327 sanitize_line(AH->archdbname, false),
1328 AH->tocCount,
1330
1331 switch (AH->format)
1332 {
1333 case archCustom:
1334 fmtName = "CUSTOM";
1335 break;
1336 case archDirectory:
1337 fmtName = "DIRECTORY";
1338 break;
1339 case archTar:
1340 fmtName = "TAR";
1341 break;
1342 default:
1343 fmtName = "UNKNOWN";
1344 }
1345
1346 ahprintf(AH, "; Dump Version: %d.%d-%d\n",
1348 ahprintf(AH, "; Format: %s\n", fmtName);
1349 ahprintf(AH, "; Integer: %d bytes\n", (int) AH->intSize);
1350 ahprintf(AH, "; Offset: %d bytes\n", (int) AH->offSize);
1351 if (AH->archiveRemoteVersion)
1352 ahprintf(AH, "; Dumped from database version: %s\n",
1354 if (AH->archiveDumpVersion)
1355 ahprintf(AH, "; Dumped by pg_dump version: %s\n",
1356 AH->archiveDumpVersion);
1357
1358 ahprintf(AH, ";\n;\n; Selected TOC Entries:\n;\n");
1359
1360 curSection = SECTION_PRE_DATA;
1361 for (te = AH->toc->next; te != AH->toc; te = te->next)
1362 {
1363 /* This bit must match ProcessArchiveRestoreOptions' marking logic */
1364 if (te->section != SECTION_NONE)
1365 curSection = te->section;
1366 te->reqs = _tocEntryRequired(te, curSection, AH);
1367 /* Now, should we print it? */
1368 if (ropt->verbose ||
1369 (te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) != 0)
1370 {
1371 char *sanitized_name;
1372 char *sanitized_schema;
1373 char *sanitized_owner;
1374
1375 /*
1376 */
1377 sanitized_name = sanitize_line(te->tag, false);
1378 sanitized_schema = sanitize_line(te->namespace, true);
1379 sanitized_owner = sanitize_line(te->owner, false);
1380
1381 ahprintf(AH, "%d; %u %u %s %s %s %s\n", te->dumpId,
1383 te->desc, sanitized_schema, sanitized_name,
1384 sanitized_owner);
1385
1386 free(sanitized_name);
1387 free(sanitized_schema);
1388 free(sanitized_owner);
1389 }
1390 if (ropt->verbose && te->nDeps > 0)
1391 {
1392 int i;
1393
1394 ahprintf(AH, ";\tdepends on:");
1395 for (i = 0; i < te->nDeps; i++)
1396 ahprintf(AH, " %d", te->dependencies[i]);
1397 ahprintf(AH, "\n");
1398 }
1399 }
1400
1401 /* Enforce strict names checking */
1402 if (ropt->strict_names)
1403 StrictNamesCheck(ropt);
1404
1405 if (ropt->filename)
1406 RestoreOutput(AH, sav);
1407}
1408
1409/***********
1410 * Large Object Archival
1411 ***********/
1412
1413/* Called by a dumper to signal start of a LO */
1414int
1416{
1417 ArchiveHandle *AH = (ArchiveHandle *) AHX;
1418
1419 if (!AH->StartLOPtr)
1420 pg_fatal("large-object output not supported in chosen format");
1421
1422 AH->StartLOPtr(AH, AH->currToc, oid);
1423
1424 return 1;
1425}
1426
1427/* Called by a dumper to signal end of a LO */
1428int
1430{
1431 ArchiveHandle *AH = (ArchiveHandle *) AHX;
1432
1433 if (AH->EndLOPtr)
1434 AH->EndLOPtr(AH, AH->currToc, oid);
1435
1436 return 1;
1437}
1438
1439/**********
1440 * Large Object Restoration
1441 **********/
1442
1443/*
1444 * Called by a format handler before a group of LOs is restored
1445 */
1446void
1448{
1449 RestoreOptions *ropt = AH->public.ropt;
1450
1451 /*
1452 * LOs must be restored within a transaction block, since we need the LO
1453 * handle to stay open while we write it. Establish a transaction unless
1454 * there's one being used globally.
1455 */
1456 if (!(ropt->single_txn || ropt->txn_size > 0))
1457 {
1458 if (AH->connection)
1460 else
1461 ahprintf(AH, "BEGIN;\n\n");
1462 }
1463
1464 AH->loCount = 0;
1465}
1466
1467/*
1468 * Called by a format handler after a group of LOs is restored
1469 */
1470void
1472{
1473 RestoreOptions *ropt = AH->public.ropt;
1474
1475 if (!(ropt->single_txn || ropt->txn_size > 0))
1476 {
1477 if (AH->connection)
1479 else
1480 ahprintf(AH, "COMMIT;\n\n");
1481 }
1482
1483 pg_log_info(ngettext("restored %d large object",
1484 "restored %d large objects",
1485 AH->loCount),
1486 AH->loCount);
1487}
1488
1489
1490/*
1491 * Called by a format handler to initiate restoration of a LO
1492 */
1493void
1495{
1496 bool old_lo_style = (AH->version < K_VERS_1_12);
1497 Oid loOid;
1498
1499 AH->loCount++;
1500
1501 /* Initialize the LO Buffer */
1502 if (AH->lo_buf == NULL)
1503 {
1504 /* First time through (in this process) so allocate the buffer */
1505 AH->lo_buf_size = LOBBUFSIZE;
1507 }
1508 AH->lo_buf_used = 0;
1509
1510 pg_log_info("restoring large object with OID %u", oid);
1511
1512 /* With an old archive we must do drop and create logic here */
1513 if (old_lo_style && drop)
1514 DropLOIfExists(AH, oid);
1515
1516 if (AH->connection)
1517 {
1518 if (old_lo_style)
1519 {
1520 loOid = lo_create(AH->connection, oid);
1521 if (loOid == 0 || loOid != oid)
1522 pg_fatal("could not create large object %u: %s",
1523 oid, PQerrorMessage(AH->connection));
1524 }
1525 AH->loFd = lo_open(AH->connection, oid, INV_WRITE);
1526 if (AH->loFd == -1)
1527 pg_fatal("could not open large object %u: %s",
1528 oid, PQerrorMessage(AH->connection));
1529 }
1530 else
1531 {
1532 if (old_lo_style)
1533 ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n",
1534 oid, INV_WRITE);
1535 else
1536 ahprintf(AH, "SELECT pg_catalog.lo_open('%u', %d);\n",
1537 oid, INV_WRITE);
1538 }
1539
1540 AH->writingLO = true;
1541}
1542
1543void
1545{
1546 if (AH->lo_buf_used > 0)
1547 {
1548 /* Write remaining bytes from the LO buffer */
1549 dump_lo_buf(AH);
1550 }
1551
1552 AH->writingLO = false;
1553
1554 if (AH->connection)
1555 {
1556 lo_close(AH->connection, AH->loFd);
1557 AH->loFd = -1;
1558 }
1559 else
1560 {
1561 ahprintf(AH, "SELECT pg_catalog.lo_close(0);\n\n");
1562 }
1563}
1564
1565/***********
1566 * Sorting and Reordering
1567 ***********/
1568
1569void
1571{
1572 ArchiveHandle *AH = (ArchiveHandle *) AHX;
1573 RestoreOptions *ropt = AH->public.ropt;
1574 FILE *fh;
1575 StringInfoData linebuf;
1576
1577 /* Allocate space for the 'wanted' array, and init it */
1578 ropt->idWanted = (bool *) pg_malloc0(sizeof(bool) * AH->maxDumpId);
1579
1580 /* Setup the file */
1581 fh = fopen(ropt->tocFile, PG_BINARY_R);
1582 if (!fh)
1583 pg_fatal("could not open TOC file \"%s\": %m", ropt->tocFile);
1584
1585 initStringInfo(&linebuf);
1586
1587 while (pg_get_line_buf(fh, &linebuf))
1588 {
1589 char *cmnt;
1590 char *endptr;
1591 DumpId id;
1592 TocEntry *te;
1593
1594 /* Truncate line at comment, if any */
1595 cmnt = strchr(linebuf.data, ';');
1596 if (cmnt != NULL)
1597 {
1598 cmnt[0] = '\0';
1599 linebuf.len = cmnt - linebuf.data;
1600 }
1601
1602 /* Ignore if all blank */
1603 if (strspn(linebuf.data, " \t\r\n") == linebuf.len)
1604 continue;
1605
1606 /* Get an ID, check it's valid and not already seen */
1607 id = strtol(linebuf.data, &endptr, 10);
1608 if (endptr == linebuf.data || id <= 0 || id > AH->maxDumpId ||
1609 ropt->idWanted[id - 1])
1610 {
1611 pg_log_warning("line ignored: %s", linebuf.data);
1612 continue;
1613 }
1614
1615 /* Find TOC entry */
1616 te = getTocEntryByDumpId(AH, id);
1617 if (!te)
1618 pg_fatal("could not find entry for ID %d",
1619 id);
1620
1621 /* Mark it wanted */
1622 ropt->idWanted[id - 1] = true;
1623
1624 /*
1625 * Move each item to the end of the list as it is selected, so that
1626 * they are placed in the desired order. Any unwanted items will end
1627 * up at the front of the list, which may seem unintuitive but it's
1628 * what we need. In an ordinary serial restore that makes no
1629 * difference, but in a parallel restore we need to mark unrestored
1630 * items' dependencies as satisfied before we start examining
1631 * restorable items. Otherwise they could have surprising
1632 * side-effects on the order in which restorable items actually get
1633 * restored.
1634 */
1635 _moveBefore(AH->toc, te);
1636 }
1637
1638 pg_free(linebuf.data);
1639
1640 if (fclose(fh) != 0)
1641 pg_fatal("could not close TOC file: %m");
1642}
1643
1644/**********************
1645 * Convenience functions that look like standard IO functions
1646 * for writing data when in dump mode.
1647 **********************/
1648
1649/* Public */
1650void
1651archputs(const char *s, Archive *AH)
1652{
1653 WriteData(AH, s, strlen(s));
1654}
1655
1656/* Public */
1657int
1658archprintf(Archive *AH, const char *fmt,...)
1659{
1660 int save_errno = errno;
1661 char *p;
1662 size_t len = 128; /* initial assumption about buffer size */
1663 size_t cnt;
1664
1665 for (;;)
1666 {
1667 va_list args;
1668
1669 /* Allocate work buffer. */
1670 p = (char *) pg_malloc(len);
1671
1672 /* Try to format the data. */
1673 errno = save_errno;
1674 va_start(args, fmt);
1675 cnt = pvsnprintf(p, len, fmt, args);
1676 va_end(args);
1677
1678 if (cnt < len)
1679 break; /* success */
1680
1681 /* Release buffer and loop around to try again with larger len. */
1682 free(p);
1683 len = cnt;
1684 }
1685
1686 WriteData(AH, p, cnt);
1687 free(p);
1688 return (int) cnt;
1689}
1690
1691
1692/*******************************
1693 * Stuff below here should be 'private' to the archiver routines
1694 *******************************/
1695
1696static void
1698 const pg_compress_specification compression_spec)
1699{
1700 CompressFileHandle *CFH;
1701 const char *mode;
1702 int fn = -1;
1703
1704 if (filename)
1705 {
1706 if (strcmp(filename, "-") == 0)
1707 fn = fileno(stdout);
1708 }
1709 else if (AH->FH)
1710 fn = fileno(AH->FH);
1711 else if (AH->fSpec)
1712 {
1713 filename = AH->fSpec;
1714 }
1715 else
1716 fn = fileno(stdout);
1717
1718 if (AH->mode == archModeAppend)
1719 mode = PG_BINARY_A;
1720 else
1721 mode = PG_BINARY_W;
1722
1723 CFH = InitCompressFileHandle(compression_spec);
1724
1725 if (!CFH->open_func(filename, fn, mode, CFH))
1726 {
1727 if (filename)
1728 pg_fatal("could not open output file \"%s\": %m", filename);
1729 else
1730 pg_fatal("could not open output file: %m");
1731 }
1732
1733 AH->OF = CFH;
1734}
1735
1736static CompressFileHandle *
1738{
1739 return (CompressFileHandle *) AH->OF;
1740}
1741
1742static void
1744{
1745 errno = 0;
1746 if (!EndCompressFileHandle(AH->OF))
1747 pg_fatal("could not close output file: %m");
1748
1749 AH->OF = savedOutput;
1750}
1751
1752
1753
1754/*
1755 * Print formatted text to the output file (usually stdout).
1756 */
1757int
1758ahprintf(ArchiveHandle *AH, const char *fmt,...)
1759{
1760 int save_errno = errno;
1761 char *p;
1762 size_t len = 128; /* initial assumption about buffer size */
1763 size_t cnt;
1764
1765 for (;;)
1766 {
1767 va_list args;
1768
1769 /* Allocate work buffer. */
1770 p = (char *) pg_malloc(len);
1771
1772 /* Try to format the data. */
1773 errno = save_errno;
1774 va_start(args, fmt);
1775 cnt = pvsnprintf(p, len, fmt, args);
1776 va_end(args);
1777
1778 if (cnt < len)
1779 break; /* success */
1780
1781 /* Release buffer and loop around to try again with larger len. */
1782 free(p);
1783 len = cnt;
1784 }
1785
1786 ahwrite(p, 1, cnt, AH);
1787 free(p);
1788 return (int) cnt;
1789}
1790
1791/*
1792 * Single place for logic which says 'We are restoring to a direct DB connection'.
1793 */
1794static int
1796{
1797 RestoreOptions *ropt = AH->public.ropt;
1798
1799 return (ropt && ropt->useDB && AH->connection);
1800}
1801
1802/*
1803 * Dump the current contents of the LO data buffer while writing a LO
1804 */
1805static void
1807{
1808 if (AH->connection)
1809 {
1810 int res;
1811
1812 res = lo_write(AH->connection, AH->loFd, AH->lo_buf, AH->lo_buf_used);
1813 pg_log_debug(ngettext("wrote %zu byte of large object data (result = %d)",
1814 "wrote %zu bytes of large object data (result = %d)",
1815 AH->lo_buf_used),
1816 AH->lo_buf_used, res);
1817 /* We assume there are no short writes, only errors */
1818 if (res != AH->lo_buf_used)
1819 warn_or_exit_horribly(AH, "could not write to large object: %s",
1821 }
1822 else
1823 {
1825
1827 (const unsigned char *) AH->lo_buf,
1828 AH->lo_buf_used,
1829 AH);
1830
1831 /* Hack: turn off writingLO so ahwrite doesn't recurse to here */
1832 AH->writingLO = false;
1833 ahprintf(AH, "SELECT pg_catalog.lowrite(0, %s);\n", buf->data);
1834 AH->writingLO = true;
1835
1837 }
1838 AH->lo_buf_used = 0;
1839}
1840
1841
1842/*
1843 * Write buffer to the output file (usually stdout). This is used for
1844 * outputting 'restore' scripts etc. It is even possible for an archive
1845 * format to create a custom output routine to 'fake' a restore if it
1846 * wants to generate a script (see TAR output).
1847 */
1848void
1849ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH)
1850{
1851 int bytes_written = 0;
1852
1853 if (AH->writingLO)
1854 {
1855 size_t remaining = size * nmemb;
1856
1857 while (AH->lo_buf_used + remaining > AH->lo_buf_size)
1858 {
1859 size_t avail = AH->lo_buf_size - AH->lo_buf_used;
1860
1861 memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, avail);
1862 ptr = (const char *) ptr + avail;
1863 remaining -= avail;
1864 AH->lo_buf_used += avail;
1865 dump_lo_buf(AH);
1866 }
1867
1868 memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, remaining);
1869 AH->lo_buf_used += remaining;
1870
1871 bytes_written = size * nmemb;
1872 }
1873 else if (AH->CustomOutPtr)
1874 bytes_written = AH->CustomOutPtr(AH, ptr, size * nmemb);
1875
1876 /*
1877 * If we're doing a restore, and it's direct to DB, and we're connected
1878 * then send it to the DB.
1879 */
1880 else if (RestoringToDB(AH))
1881 bytes_written = ExecuteSqlCommandBuf(&AH->public, (const char *) ptr, size * nmemb);
1882 else
1883 {
1885
1886 CFH->write_func(ptr, size * nmemb, CFH);
1887 bytes_written = size * nmemb;
1888 }
1889
1890 if (bytes_written != size * nmemb)
1892}
1893
1894/* on some error, we may decide to go on... */
1895void
1896warn_or_exit_horribly(ArchiveHandle *AH, const char *fmt,...)
1897{
1898 va_list ap;
1899
1900 switch (AH->stage)
1901 {
1902
1903 case STAGE_NONE:
1904 /* Do nothing special */
1905 break;
1906
1907 case STAGE_INITIALIZING:
1908 if (AH->stage != AH->lastErrorStage)
1909 pg_log_info("while INITIALIZING:");
1910 break;
1911
1912 case STAGE_PROCESSING:
1913 if (AH->stage != AH->lastErrorStage)
1914 pg_log_info("while PROCESSING TOC:");
1915 break;
1916
1917 case STAGE_FINALIZING:
1918 if (AH->stage != AH->lastErrorStage)
1919 pg_log_info("while FINALIZING:");
1920 break;
1921 }
1922 if (AH->currentTE != NULL && AH->currentTE != AH->lastErrorTE)
1923 {
1924 pg_log_info("from TOC entry %d; %u %u %s %s %s",
1925 AH->currentTE->dumpId,
1927 AH->currentTE->catalogId.oid,
1928 AH->currentTE->desc ? AH->currentTE->desc : "(no desc)",
1929 AH->currentTE->tag ? AH->currentTE->tag : "(no tag)",
1930 AH->currentTE->owner ? AH->currentTE->owner : "(no owner)");
1931 }
1932 AH->lastErrorStage = AH->stage;
1933 AH->lastErrorTE = AH->currentTE;
1934
1935 va_start(ap, fmt);
1937 va_end(ap);
1938
1939 if (AH->public.exit_on_error)
1940 exit_nicely(1);
1941 else
1942 AH->public.n_errors++;
1943}
1944
1945#ifdef NOT_USED
1946
1947static void
1948_moveAfter(ArchiveHandle *AH, TocEntry *pos, TocEntry *te)
1949{
1950 /* Unlink te from list */
1951 te->prev->next = te->next;
1952 te->next->prev = te->prev;
1953
1954 /* and insert it after "pos" */
1955 te->prev = pos;
1956 te->next = pos->next;
1957 pos->next->prev = te;
1958 pos->next = te;
1959}
1960#endif
1961
1962static void
1964{
1965 /* Unlink te from list */
1966 te->prev->next = te->next;
1967 te->next->prev = te->prev;
1968
1969 /* and insert it before "pos" */
1970 te->prev = pos->prev;
1971 te->next = pos;
1972 pos->prev->next = te;
1973 pos->prev = te;
1974}
1975
1976/*
1977 * Build index arrays for the TOC list
1978 *
1979 * This should be invoked only after we have created or read in all the TOC
1980 * items.
1981 *
1982 * The arrays are indexed by dump ID (so entry zero is unused). Note that the
1983 * array entries run only up to maxDumpId. We might see dependency dump IDs
1984 * beyond that (if the dump was partial); so always check the array bound
1985 * before trying to touch an array entry.
1986 */
1987static void
1989{
1990 DumpId maxDumpId = AH->maxDumpId;
1991 TocEntry *te;
1992
1993 AH->tocsByDumpId = (TocEntry **) pg_malloc0((maxDumpId + 1) * sizeof(TocEntry *));
1994 AH->tableDataId = (DumpId *) pg_malloc0((maxDumpId + 1) * sizeof(DumpId));
1995
1996 for (te = AH->toc->next; te != AH->toc; te = te->next)
1997 {
1998 /* this check is purely paranoia, maxDumpId should be correct */
1999 if (te->dumpId <= 0 || te->dumpId > maxDumpId)
2000 pg_fatal("bad dumpId");
2001
2002 /* tocsByDumpId indexes all TOCs by their dump ID */
2003 AH->tocsByDumpId[te->dumpId] = te;
2004
2005 /*
2006 * tableDataId provides the TABLE DATA item's dump ID for each TABLE
2007 * TOC entry that has a DATA item. We compute this by reversing the
2008 * TABLE DATA item's dependency, knowing that a TABLE DATA item has
2009 * just one dependency and it is the TABLE item.
2010 */
2011 if (strcmp(te->desc, "TABLE DATA") == 0 && te->nDeps > 0)
2012 {
2013 DumpId tableId = te->dependencies[0];
2014
2015 /*
2016 * The TABLE item might not have been in the archive, if this was
2017 * a data-only dump; but its dump ID should be less than its data
2018 * item's dump ID, so there should be a place for it in the array.
2019 */
2020 if (tableId <= 0 || tableId > maxDumpId)
2021 pg_fatal("bad table dumpId for TABLE DATA item");
2022
2023 AH->tableDataId[tableId] = te->dumpId;
2024 }
2025 }
2026}
2027
2028TocEntry *
2030{
2031 /* build index arrays if we didn't already */
2032 if (AH->tocsByDumpId == NULL)
2034
2035 if (id > 0 && id <= AH->maxDumpId)
2036 return AH->tocsByDumpId[id];
2037
2038 return NULL;
2039}
2040
2041int
2043{
2044 TocEntry *te = getTocEntryByDumpId(AH, id);
2045
2046 if (!te)
2047 return 0;
2048
2049 return te->reqs;
2050}
2051
2052size_t
2054{
2055 int off;
2056
2057 /* Save the flag */
2058 AH->WriteBytePtr(AH, wasSet);
2059
2060 /* Write out pgoff_t smallest byte first, prevents endian mismatch */
2061 for (off = 0; off < sizeof(pgoff_t); off++)
2062 {
2063 AH->WriteBytePtr(AH, o & 0xFF);
2064 o >>= 8;
2065 }
2066 return sizeof(pgoff_t) + 1;
2067}
2068
2069int
2071{
2072 int i;
2073 int off;
2074 int offsetFlg;
2075
2076 /* Initialize to zero */
2077 *o = 0;
2078
2079 /* Check for old version */
2080 if (AH->version < K_VERS_1_7)
2081 {
2082 /* Prior versions wrote offsets using WriteInt */
2083 i = ReadInt(AH);
2084 /* -1 means not set */
2085 if (i < 0)
2086 return K_OFFSET_POS_NOT_SET;
2087 else if (i == 0)
2088 return K_OFFSET_NO_DATA;
2089
2090 /* Cast to pgoff_t because it was written as an int. */
2091 *o = (pgoff_t) i;
2092 return K_OFFSET_POS_SET;
2093 }
2094
2095 /*
2096 * Read the flag indicating the state of the data pointer. Check if valid
2097 * and die if not.
2098 *
2099 * This used to be handled by a negative or zero pointer, now we use an
2100 * extra byte specifically for the state.
2101 */
2102 offsetFlg = AH->ReadBytePtr(AH) & 0xFF;
2103
2104 switch (offsetFlg)
2105 {
2107 case K_OFFSET_NO_DATA:
2108 case K_OFFSET_POS_SET:
2109
2110 break;
2111
2112 default:
2113 pg_fatal("unexpected data offset flag %d", offsetFlg);
2114 }
2115
2116 /*
2117 * Read the bytes
2118 */
2119 for (off = 0; off < AH->offSize; off++)
2120 {
2121 if (off < sizeof(pgoff_t))
2122 *o |= ((pgoff_t) (AH->ReadBytePtr(AH))) << (off * 8);
2123 else
2124 {
2125 if (AH->ReadBytePtr(AH) != 0)
2126 pg_fatal("file offset in dump file is too large");
2127 }
2128 }
2129
2130 return offsetFlg;
2131}
2132
2133size_t
2135{
2136 int b;
2137
2138 /*
2139 * This is a bit yucky, but I don't want to make the binary format very
2140 * dependent on representation, and not knowing much about it, I write out
2141 * a sign byte. If you change this, don't forget to change the file
2142 * version #, and modify ReadInt to read the new format AS WELL AS the old
2143 * formats.
2144 */
2145
2146 /* SIGN byte */
2147 if (i < 0)
2148 {
2149 AH->WriteBytePtr(AH, 1);
2150 i = -i;
2151 }
2152 else
2153 AH->WriteBytePtr(AH, 0);
2154
2155 for (b = 0; b < AH->intSize; b++)
2156 {
2157 AH->WriteBytePtr(AH, i & 0xFF);
2158 i >>= 8;
2159 }
2160
2161 return AH->intSize + 1;
2162}
2163
2164int
2166{
2167 int res = 0;
2168 int bv,
2169 b;
2170 int sign = 0; /* Default positive */
2171 int bitShift = 0;
2172
2173 if (AH->version > K_VERS_1_0)
2174 /* Read a sign byte */
2175 sign = AH->ReadBytePtr(AH);
2176
2177 for (b = 0; b < AH->intSize; b++)
2178 {
2179 bv = AH->ReadBytePtr(AH) & 0xFF;
2180 if (bv != 0)
2181 res = res + (bv << bitShift);
2182 bitShift += 8;
2183 }
2184
2185 if (sign)
2186 res = -res;
2187
2188 return res;
2189}
2190
2191size_t
2192WriteStr(ArchiveHandle *AH, const char *c)
2193{
2194 size_t res;
2195
2196 if (c)
2197 {
2198 int len = strlen(c);
2199
2200 res = WriteInt(AH, len);
2201 AH->WriteBufPtr(AH, c, len);
2202 res += len;
2203 }
2204 else
2205 res = WriteInt(AH, -1);
2206
2207 return res;
2208}
2209
2210char *
2212{
2213 char *buf;
2214 int l;
2215
2216 l = ReadInt(AH);
2217 if (l < 0)
2218 buf = NULL;
2219 else
2220 {
2221 buf = (char *) pg_malloc(l + 1);
2222 AH->ReadBufPtr(AH, buf, l);
2223
2224 buf[l] = '\0';
2225 }
2226
2227 return buf;
2228}
2229
2230static bool
2231_fileExistsInDirectory(const char *dir, const char *filename)
2232{
2233 struct stat st;
2234 char buf[MAXPGPATH];
2235
2236 if (snprintf(buf, MAXPGPATH, "%s/%s", dir, filename) >= MAXPGPATH)
2237 pg_fatal("directory name too long: \"%s\"", dir);
2238
2239 return (stat(buf, &st) == 0 && S_ISREG(st.st_mode));
2240}
2241
2242static int
2244{
2245 FILE *fh;
2246 char sig[6]; /* More than enough */
2247 size_t cnt;
2248 int wantClose = 0;
2249
2250 pg_log_debug("attempting to ascertain archive format");
2251
2252 free(AH->lookahead);
2253
2254 AH->readHeader = 0;
2255 AH->lookaheadSize = 512;
2256 AH->lookahead = pg_malloc0(512);
2257 AH->lookaheadLen = 0;
2258 AH->lookaheadPos = 0;
2259
2260 if (AH->fSpec)
2261 {
2262 struct stat st;
2263
2264 wantClose = 1;
2265
2266 /*
2267 * Check if the specified archive is a directory. If so, check if
2268 * there's a "toc.dat" (or "toc.dat.{gz,lz4,zst}") file in it.
2269 */
2270 if (stat(AH->fSpec, &st) == 0 && S_ISDIR(st.st_mode))
2271 {
2272 AH->format = archDirectory;
2273 if (_fileExistsInDirectory(AH->fSpec, "toc.dat"))
2274 return AH->format;
2275#ifdef HAVE_LIBZ
2276 if (_fileExistsInDirectory(AH->fSpec, "toc.dat.gz"))
2277 return AH->format;
2278#endif
2279#ifdef USE_LZ4
2280 if (_fileExistsInDirectory(AH->fSpec, "toc.dat.lz4"))
2281 return AH->format;
2282#endif
2283#ifdef USE_ZSTD
2284 if (_fileExistsInDirectory(AH->fSpec, "toc.dat.zst"))
2285 return AH->format;
2286#endif
2287 pg_fatal("directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)",
2288 AH->fSpec);
2289 fh = NULL; /* keep compiler quiet */
2290 }
2291 else
2292 {
2293 fh = fopen(AH->fSpec, PG_BINARY_R);
2294 if (!fh)
2295 pg_fatal("could not open input file \"%s\": %m", AH->fSpec);
2296 }
2297 }
2298 else
2299 {
2300 fh = stdin;
2301 if (!fh)
2302 pg_fatal("could not open input file: %m");
2303 }
2304
2305 if ((cnt = fread(sig, 1, 5, fh)) != 5)
2306 {
2307 if (ferror(fh))
2308 pg_fatal("could not read input file: %m");
2309 else
2310 pg_fatal("input file is too short (read %lu, expected 5)",
2311 (unsigned long) cnt);
2312 }
2313
2314 /* Save it, just in case we need it later */
2315 memcpy(&AH->lookahead[0], sig, 5);
2316 AH->lookaheadLen = 5;
2317
2318 if (strncmp(sig, "PGDMP", 5) == 0)
2319 {
2320 /* It's custom format, stop here */
2321 AH->format = archCustom;
2322 AH->readHeader = 1;
2323 }
2324 else
2325 {
2326 /*
2327 * *Maybe* we have a tar archive format file or a text dump ... So,
2328 * read first 512 byte header...
2329 */
2330 cnt = fread(&AH->lookahead[AH->lookaheadLen], 1, 512 - AH->lookaheadLen, fh);
2331 /* read failure is checked below */
2332 AH->lookaheadLen += cnt;
2333
2334 if (AH->lookaheadLen >= strlen(TEXT_DUMPALL_HEADER) &&
2335 (strncmp(AH->lookahead, TEXT_DUMP_HEADER, strlen(TEXT_DUMP_HEADER)) == 0 ||
2336 strncmp(AH->lookahead, TEXT_DUMPALL_HEADER, strlen(TEXT_DUMPALL_HEADER)) == 0))
2337 {
2338 /*
2339 * looks like it's probably a text format dump. so suggest they
2340 * try psql
2341 */
2342 pg_fatal("input file appears to be a text format dump. Please use psql.");
2343 }
2344
2345 if (AH->lookaheadLen != 512)
2346 {
2347 if (feof(fh))
2348 pg_fatal("input file does not appear to be a valid archive (too short?)");
2349 else
2350 READ_ERROR_EXIT(fh);
2351 }
2352
2353 if (!isValidTarHeader(AH->lookahead))
2354 pg_fatal("input file does not appear to be a valid archive");
2355
2356 AH->format = archTar;
2357 }
2358
2359 /* Close the file if we opened it */
2360 if (wantClose)
2361 {
2362 if (fclose(fh) != 0)
2363 pg_fatal("could not close input file: %m");
2364 /* Forget lookahead, since we'll re-read header after re-opening */
2365 AH->readHeader = 0;
2366 AH->lookaheadLen = 0;
2367 }
2368
2369 return AH->format;
2370}
2371
2372
2373/*
2374 * Allocate an archive handle
2375 */
2376static ArchiveHandle *
2377_allocAH(const char *FileSpec, const ArchiveFormat fmt,
2378 const pg_compress_specification compression_spec,
2379 bool dosync, ArchiveMode mode,
2381{
2382 ArchiveHandle *AH;
2383 CompressFileHandle *CFH;
2384 pg_compress_specification out_compress_spec = {0};
2385
2386 pg_log_debug("allocating AH for %s, format %d",
2387 FileSpec ? FileSpec : "(stdio)", fmt);
2388
2389 AH = (ArchiveHandle *) pg_malloc0(sizeof(ArchiveHandle));
2390
2391 AH->version = K_VERS_SELF;
2392
2393 /* initialize for backwards compatible string processing */
2394 AH->public.encoding = 0; /* PG_SQL_ASCII */
2395 AH->public.std_strings = false;
2396
2397 /* sql error handling */
2398 AH->public.exit_on_error = true;
2399 AH->public.n_errors = 0;
2400
2401 AH->archiveDumpVersion = PG_VERSION;
2402
2403 AH->createDate = time(NULL);
2404
2405 AH->intSize = sizeof(int);
2406 AH->offSize = sizeof(pgoff_t);
2407 if (FileSpec)
2408 {
2409 AH->fSpec = pg_strdup(FileSpec);
2410
2411 /*
2412 * Not used; maybe later....
2413 *
2414 * AH->workDir = pg_strdup(FileSpec); for(i=strlen(FileSpec) ; i > 0 ;
2415 * i--) if (AH->workDir[i-1] == '/')
2416 */
2417 }
2418 else
2419 AH->fSpec = NULL;
2420
2421 AH->currUser = NULL; /* unknown */
2422 AH->currSchema = NULL; /* ditto */
2423 AH->currTablespace = NULL; /* ditto */
2424 AH->currTableAm = NULL; /* ditto */
2425
2426 AH->toc = (TocEntry *) pg_malloc0(sizeof(TocEntry));
2427
2428 AH->toc->next = AH->toc;
2429 AH->toc->prev = AH->toc;
2430
2431 AH->mode = mode;
2432 AH->compression_spec = compression_spec;
2433 AH->dosync = dosync;
2435
2436 memset(&(AH->sqlparse), 0, sizeof(AH->sqlparse));
2437
2438 /* Open stdout with no compression for AH output handle */
2439 out_compress_spec.algorithm = PG_COMPRESSION_NONE;
2440 CFH = InitCompressFileHandle(out_compress_spec);
2441 if (!CFH->open_func(NULL, fileno(stdout), PG_BINARY_A, CFH))
2442 pg_fatal("could not open stdout for appending: %m");
2443 AH->OF = CFH;
2444
2445 /*
2446 * On Windows, we need to use binary mode to read/write non-text files,
2447 * which include all archive formats as well as compressed plain text.
2448 * Force stdin/stdout into binary mode if that is what we are using.
2449 */
2450#ifdef WIN32
2451 if ((fmt != archNull || compression_spec.algorithm != PG_COMPRESSION_NONE) &&
2452 (AH->fSpec == NULL || strcmp(AH->fSpec, "") == 0))
2453 {
2454 if (mode == archModeWrite)
2455 _setmode(fileno(stdout), O_BINARY);
2456 else
2457 _setmode(fileno(stdin), O_BINARY);
2458 }
2459#endif
2460
2461 AH->SetupWorkerPtr = setupWorkerPtr;
2462
2463 if (fmt == archUnknown)
2465 else
2466 AH->format = fmt;
2467
2468 switch (AH->format)
2469 {
2470 case archCustom:
2472 break;
2473
2474 case archNull:
2476 break;
2477
2478 case archDirectory:
2480 break;
2481
2482 case archTar:
2484 break;
2485
2486 default:
2487 pg_fatal("unrecognized file format \"%d\"", AH->format);
2488 }
2489
2490 return AH;
2491}
2492
2493/*
2494 * Write out all data (tables & LOs)
2495 */
2496void
2498{
2499 TocEntry *te;
2500
2501 if (pstate && pstate->numWorkers > 1)
2502 {
2503 /*
2504 * In parallel mode, this code runs in the leader process. We
2505 * construct an array of candidate TEs, then sort it into decreasing
2506 * size order, then dispatch each TE to a data-transfer worker. By
2507 * dumping larger tables first, we avoid getting into a situation
2508 * where we're down to one job and it's big, losing parallelism.
2509 */
2510 TocEntry **tes;
2511 int ntes;
2512
2513 tes = (TocEntry **) pg_malloc(AH->tocCount * sizeof(TocEntry *));
2514 ntes = 0;
2515 for (te = AH->toc->next; te != AH->toc; te = te->next)
2516 {
2517 /* Consider only TEs with dataDumper functions ... */
2518 if (!te->dataDumper)
2519 continue;
2520 /* ... and ignore ones not enabled for dump */
2521 if ((te->reqs & REQ_DATA) == 0)
2522 continue;
2523
2524 tes[ntes++] = te;
2525 }
2526
2527 if (ntes > 1)
2528 qsort(tes, ntes, sizeof(TocEntry *), TocEntrySizeCompareQsort);
2529
2530 for (int i = 0; i < ntes; i++)
2531 DispatchJobForTocEntry(AH, pstate, tes[i], ACT_DUMP,
2532 mark_dump_job_done, NULL);
2533
2534 pg_free(tes);
2535
2536 /* Now wait for workers to finish. */
2537 WaitForWorkers(AH, pstate, WFW_ALL_IDLE);
2538 }
2539 else
2540 {
2541 /* Non-parallel mode: just dump all candidate TEs sequentially. */
2542 for (te = AH->toc->next; te != AH->toc; te = te->next)
2543 {
2544 /* Must have same filter conditions as above */
2545 if (!te->dataDumper)
2546 continue;
2547 if ((te->reqs & REQ_DATA) == 0)
2548 continue;
2549
2551 }
2552 }
2553}
2554
2555
2556/*
2557 * Callback function that's invoked in the leader process after a step has
2558 * been parallel dumped.
2559 *
2560 * We don't need to do anything except check for worker failure.
2561 */
2562static void
2564 TocEntry *te,
2565 int status,
2566 void *callback_data)
2567{
2568 pg_log_info("finished item %d %s %s",
2569 te->dumpId, te->desc, te->tag);
2570
2571 if (status != 0)
2572 pg_fatal("worker process failed: exit code %d",
2573 status);
2574}
2575
2576
2577void
2579{
2580 StartDataPtrType startPtr;
2581 EndDataPtrType endPtr;
2582
2583 AH->currToc = te;
2584
2585 if (strcmp(te->desc, "BLOBS") == 0)
2586 {
2587 startPtr = AH->StartLOsPtr;
2588 endPtr = AH->EndLOsPtr;
2589 }
2590 else
2591 {
2592 startPtr = AH->StartDataPtr;
2593 endPtr = AH->EndDataPtr;
2594 }
2595
2596 if (startPtr != NULL)
2597 (*startPtr) (AH, te);
2598
2599 /*
2600 * The user-provided DataDumper routine needs to call AH->WriteData
2601 */
2602 te->dataDumper((Archive *) AH, te->dataDumperArg);
2603
2604 if (endPtr != NULL)
2605 (*endPtr) (AH, te);
2606
2607 AH->currToc = NULL;
2608}
2609
2610void
2612{
2613 TocEntry *te;
2614 char workbuf[32];
2615 int tocCount;
2616 int i;
2617
2618 /* count entries that will actually be dumped */
2619 tocCount = 0;
2620 for (te = AH->toc->next; te != AH->toc; te = te->next)
2621 {
2622 if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS | REQ_SPECIAL)) != 0)
2623 tocCount++;
2624 }
2625
2626 /* printf("%d TOC Entries to save\n", tocCount); */
2627
2628 WriteInt(AH, tocCount);
2629
2630 for (te = AH->toc->next; te != AH->toc; te = te->next)
2631 {
2632 if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS | REQ_SPECIAL)) == 0)
2633 continue;
2634
2635 WriteInt(AH, te->dumpId);
2636 WriteInt(AH, te->dataDumper ? 1 : 0);
2637
2638 /* OID is recorded as a string for historical reasons */
2639 sprintf(workbuf, "%u", te->catalogId.tableoid);
2640 WriteStr(AH, workbuf);
2641 sprintf(workbuf, "%u", te->catalogId.oid);
2642 WriteStr(AH, workbuf);
2643
2644 WriteStr(AH, te->tag);
2645 WriteStr(AH, te->desc);
2646 WriteInt(AH, te->section);
2647
2648 if (te->defnLen)
2649 {
2650 /*
2651 * defnLen should only be set for custom format's second call to
2652 * WriteToc(), which rewrites the TOC in place to update data
2653 * offsets. Instead of calling the defnDumper a second time
2654 * (which could involve re-executing queries), just skip writing
2655 * the entry. While regenerating the definition should
2656 * theoretically produce the same result as before, it's expensive
2657 * and feels risky.
2658 *
2659 * The custom format only calls WriteToc() a second time if
2660 * fseeko() is usable (see _CloseArchive() in pg_backup_custom.c),
2661 * so we can safely use it without checking. For other formats,
2662 * we fail because one of our assumptions must no longer hold
2663 * true.
2664 *
2665 * XXX This is a layering violation, but the alternative is an
2666 * awkward and complicated callback infrastructure for this
2667 * special case. This might be worth revisiting in the future.
2668 */
2669 if (AH->format != archCustom)
2670 pg_fatal("unexpected TOC entry in WriteToc(): %d %s %s",
2671 te->dumpId, te->desc, te->tag);
2672
2673 if (fseeko(AH->FH, te->defnLen, SEEK_CUR) != 0)
2674 pg_fatal("error during file seek: %m");
2675 }
2676 else if (te->defnDumper)
2677 {
2678 char *defn = te->defnDumper((Archive *) AH, te->defnDumperArg, te);
2679
2680 te->defnLen = WriteStr(AH, defn);
2681 pg_free(defn);
2682 }
2683 else
2684 WriteStr(AH, te->defn);
2685
2686 WriteStr(AH, te->dropStmt);
2687 WriteStr(AH, te->copyStmt);
2688 WriteStr(AH, te->namespace);
2689 WriteStr(AH, te->tablespace);
2690 WriteStr(AH, te->tableam);
2691 WriteInt(AH, te->relkind);
2692 WriteStr(AH, te->owner);
2693 WriteStr(AH, "false");
2694
2695 /* Dump list of dependencies */
2696 for (i = 0; i < te->nDeps; i++)
2697 {
2698 sprintf(workbuf, "%d", te->dependencies[i]);
2699 WriteStr(AH, workbuf);
2700 }
2701 WriteStr(AH, NULL); /* Terminate List */
2702
2703 if (AH->WriteExtraTocPtr)
2704 AH->WriteExtraTocPtr(AH, te);
2705 }
2706}
2707
2708void
2710{
2711 int i;
2712 char *tmp;
2713 DumpId *deps;
2714 int depIdx;
2715 int depSize;
2716 TocEntry *te;
2717 bool is_supported;
2718
2719 AH->tocCount = ReadInt(AH);
2720 AH->maxDumpId = 0;
2721
2722 for (i = 0; i < AH->tocCount; i++)
2723 {
2724 te = (TocEntry *) pg_malloc0(sizeof(TocEntry));
2725 te->dumpId = ReadInt(AH);
2726
2727 if (te->dumpId > AH->maxDumpId)
2728 AH->maxDumpId = te->dumpId;
2729
2730 /* Sanity check */
2731 if (te->dumpId <= 0)
2732 pg_fatal("entry ID %d out of range -- perhaps a corrupt TOC",
2733 te->dumpId);
2734
2735 te->hadDumper = ReadInt(AH);
2736
2737 if (AH->version >= K_VERS_1_8)
2738 {
2739 tmp = ReadStr(AH);
2740 sscanf(tmp, "%u", &te->catalogId.tableoid);
2741 free(tmp);
2742 }
2743 else
2745 tmp = ReadStr(AH);
2746 sscanf(tmp, "%u", &te->catalogId.oid);
2747 free(tmp);
2748
2749 te->tag = ReadStr(AH);
2750 te->desc = ReadStr(AH);
2751
2752 if (AH->version >= K_VERS_1_11)
2753 {
2754 te->section = ReadInt(AH);
2755 }
2756 else
2757 {
2758 /*
2759 * Rules for pre-8.4 archives wherein pg_dump hasn't classified
2760 * the entries into sections. This list need not cover entry
2761 * types added later than 8.4.
2762 */
2763 if (strcmp(te->desc, "COMMENT") == 0 ||
2764 strcmp(te->desc, "ACL") == 0 ||
2765 strcmp(te->desc, "ACL LANGUAGE") == 0)
2766 te->section = SECTION_NONE;
2767 else if (strcmp(te->desc, "TABLE DATA") == 0 ||
2768 strcmp(te->desc, "BLOBS") == 0 ||
2769 strcmp(te->desc, "BLOB COMMENTS") == 0)
2770 te->section = SECTION_DATA;
2771 else if (strcmp(te->desc, "CONSTRAINT") == 0 ||
2772 strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
2773 strcmp(te->desc, "FK CONSTRAINT") == 0 ||
2774 strcmp(te->desc, "INDEX") == 0 ||
2775 strcmp(te->desc, "RULE") == 0 ||
2776 strcmp(te->desc, "TRIGGER") == 0)
2778 else
2780 }
2781
2782 te->defn = ReadStr(AH);
2783 te->dropStmt = ReadStr(AH);
2784
2785 if (AH->version >= K_VERS_1_3)
2786 te->copyStmt = ReadStr(AH);
2787
2788 if (AH->version >= K_VERS_1_6)
2789 te->namespace = ReadStr(AH);
2790
2791 if (AH->version >= K_VERS_1_10)
2792 te->tablespace = ReadStr(AH);
2793
2794 if (AH->version >= K_VERS_1_14)
2795 te->tableam = ReadStr(AH);
2796
2797 if (AH->version >= K_VERS_1_16)
2798 te->relkind = ReadInt(AH);
2799
2800 te->owner = ReadStr(AH);
2801 is_supported = true;
2802 if (AH->version < K_VERS_1_9)
2803 is_supported = false;
2804 else
2805 {
2806 tmp = ReadStr(AH);
2807
2808 if (strcmp(tmp, "true") == 0)
2809 is_supported = false;
2810
2811 free(tmp);
2812 }
2813
2814 if (!is_supported)
2815 pg_log_warning("restoring tables WITH OIDS is not supported anymore");
2816
2817 /* Read TOC entry dependencies */
2818 if (AH->version >= K_VERS_1_5)
2819 {
2820 depSize = 100;
2821 deps = (DumpId *) pg_malloc(sizeof(DumpId) * depSize);
2822 depIdx = 0;
2823 for (;;)
2824 {
2825 tmp = ReadStr(AH);
2826 if (!tmp)
2827 break; /* end of list */
2828 if (depIdx >= depSize)
2829 {
2830 depSize *= 2;
2831 deps = (DumpId *) pg_realloc(deps, sizeof(DumpId) * depSize);
2832 }
2833 sscanf(tmp, "%d", &deps[depIdx]);
2834 free(tmp);
2835 depIdx++;
2836 }
2837
2838 if (depIdx > 0) /* We have a non-null entry */
2839 {
2840 deps = (DumpId *) pg_realloc(deps, sizeof(DumpId) * depIdx);
2841 te->dependencies = deps;
2842 te->nDeps = depIdx;
2843 }
2844 else
2845 {
2846 free(deps);
2847 te->dependencies = NULL;
2848 te->nDeps = 0;
2849 }
2850 }
2851 else
2852 {
2853 te->dependencies = NULL;
2854 te->nDeps = 0;
2855 }
2856 te->dataLength = 0;
2857
2858 if (AH->ReadExtraTocPtr)
2859 AH->ReadExtraTocPtr(AH, te);
2860
2861 pg_log_debug("read TOC entry %d (ID %d) for %s %s",
2862 i, te->dumpId, te->desc, te->tag);
2863
2864 /* link completed entry into TOC circular list */
2865 te->prev = AH->toc->prev;
2866 AH->toc->prev->next = te;
2867 AH->toc->prev = te;
2868 te->next = AH->toc;
2869
2870 /* special processing immediately upon read for some items */
2871 if (strcmp(te->desc, "ENCODING") == 0)
2872 processEncodingEntry(AH, te);
2873 else if (strcmp(te->desc, "STDSTRINGS") == 0)
2874 processStdStringsEntry(AH, te);
2875 else if (strcmp(te->desc, "SEARCHPATH") == 0)
2876 processSearchPathEntry(AH, te);
2877 }
2878}
2879
2880static void
2882{
2883 /* te->defn should have the form SET client_encoding = 'foo'; */
2884 char *defn = pg_strdup(te->defn);
2885 char *ptr1;
2886 char *ptr2 = NULL;
2887 int encoding;
2888
2889 ptr1 = strchr(defn, '\'');
2890 if (ptr1)
2891 ptr2 = strchr(++ptr1, '\'');
2892 if (ptr2)
2893 {
2894 *ptr2 = '\0';
2896 if (encoding < 0)
2897 pg_fatal("unrecognized encoding \"%s\"",
2898 ptr1);
2899 AH->public.encoding = encoding;
2901 }
2902 else
2903 pg_fatal("invalid ENCODING item: %s",
2904 te->defn);
2905
2906 free(defn);
2907}
2908
2909static void
2911{
2912 /* te->defn should have the form SET standard_conforming_strings = 'x'; */
2913 char *ptr1;
2914
2915 ptr1 = strchr(te->defn, '\'');
2916 if (ptr1 && strncmp(ptr1, "'on'", 4) == 0)
2917 AH->public.std_strings = true;
2918 else if (ptr1 && strncmp(ptr1, "'off'", 5) == 0)
2919 AH->public.std_strings = false;
2920 else
2921 pg_fatal("invalid STDSTRINGS item: %s",
2922 te->defn);
2923}
2924
2925static void
2927{
2928 /*
2929 * te->defn should contain a command to set search_path. We just copy it
2930 * verbatim for use later.
2931 */
2932 AH->public.searchpath = pg_strdup(te->defn);
2933}
2934
2935static void
2937{
2938 const char *missing_name;
2939
2940 Assert(ropt->strict_names);
2941
2942 if (ropt->schemaNames.head != NULL)
2943 {
2944 missing_name = simple_string_list_not_touched(&ropt->schemaNames);
2945 if (missing_name != NULL)
2946 pg_fatal("schema \"%s\" not found", missing_name);
2947 }
2948
2949 if (ropt->tableNames.head != NULL)
2950 {
2951 missing_name = simple_string_list_not_touched(&ropt->tableNames);
2952 if (missing_name != NULL)
2953 pg_fatal("table \"%s\" not found", missing_name);
2954 }
2955
2956 if (ropt->indexNames.head != NULL)
2957 {
2958 missing_name = simple_string_list_not_touched(&ropt->indexNames);
2959 if (missing_name != NULL)
2960 pg_fatal("index \"%s\" not found", missing_name);
2961 }
2962
2963 if (ropt->functionNames.head != NULL)
2964 {
2965 missing_name = simple_string_list_not_touched(&ropt->functionNames);
2966 if (missing_name != NULL)
2967 pg_fatal("function \"%s\" not found", missing_name);
2968 }
2969
2970 if (ropt->triggerNames.head != NULL)
2971 {
2972 missing_name = simple_string_list_not_touched(&ropt->triggerNames);
2973 if (missing_name != NULL)
2974 pg_fatal("trigger \"%s\" not found", missing_name);
2975 }
2976}
2977
2978/*
2979 * Determine whether we want to restore this TOC entry.
2980 *
2981 * Returns 0 if entry should be skipped, or some combination of the
2982 * REQ_SCHEMA, REQ_DATA, and REQ_STATS bits if we want to restore schema, data
2983 * and/or statistics portions of this TOC entry, or REQ_SPECIAL if it's a
2984 * special entry.
2985 */
2986static int
2988{
2989 int res = REQ_SCHEMA | REQ_DATA;
2990 RestoreOptions *ropt = AH->public.ropt;
2991
2992 /*
2993 * For binary upgrade mode, dump pg_largeobject_metadata and the
2994 * associated pg_shdepend rows. This is faster to restore than the
2995 * equivalent set of large object commands. We can only do this for
2996 * upgrades from v12 and newer; in older versions, pg_largeobject_metadata
2997 * was created WITH OIDS, so the OID column is hidden and won't be dumped.
2998 */
2999 if (ropt->binary_upgrade && AH->public.remoteVersion >= 120000 &&
3000 strcmp(te->desc, "TABLE DATA") == 0 &&
3001 (te->catalogId.oid == LargeObjectMetadataRelationId ||
3002 te->catalogId.oid == SharedDependRelationId))
3003 return REQ_DATA;
3004
3005 /* These items are treated specially */
3006 if (strcmp(te->desc, "ENCODING") == 0 ||
3007 strcmp(te->desc, "STDSTRINGS") == 0 ||
3008 strcmp(te->desc, "SEARCHPATH") == 0)
3009 return REQ_SPECIAL;
3010
3011 if (strcmp(te->desc, "STATISTICS DATA") == 0)
3012 {
3013 if (!ropt->dumpStatistics)
3014 return 0;
3015
3016 res = REQ_STATS;
3017 }
3018
3019 /*
3020 * DATABASE and DATABASE PROPERTIES also have a special rule: they are
3021 * restored in createDB mode, and not restored otherwise, independently of
3022 * all else.
3023 */
3024 if (strcmp(te->desc, "DATABASE") == 0 ||
3025 strcmp(te->desc, "DATABASE PROPERTIES") == 0)
3026 {
3027 if (ropt->createDB)
3028 return REQ_SCHEMA;
3029 else
3030 return 0;
3031 }
3032
3033 /*
3034 * Process exclusions that affect certain classes of TOC entries.
3035 */
3036
3037 /* If it's an ACL, maybe ignore it */
3038 if (ropt->aclsSkip && _tocEntryIsACL(te))
3039 return 0;
3040
3041 /* If it's a comment, maybe ignore it */
3042 if (ropt->no_comments && strcmp(te->desc, "COMMENT") == 0)
3043 return 0;
3044
3045 /* If it's a policy, maybe ignore it */
3046 if (ropt->no_policies &&
3047 (strcmp(te->desc, "POLICY") == 0 ||
3048 strcmp(te->desc, "ROW SECURITY") == 0))
3049 return 0;
3050
3051 /*
3052 * If it's a comment on a policy, a publication, or a subscription, maybe
3053 * ignore it.
3054 */
3055 if (strcmp(te->desc, "COMMENT") == 0)
3056 {
3057 if (ropt->no_policies &&
3058 strncmp(te->tag, "POLICY", strlen("POLICY")) == 0)
3059 return 0;
3060
3061 if (ropt->no_publications &&
3062 strncmp(te->tag, "PUBLICATION", strlen("PUBLICATION")) == 0)
3063 return 0;
3064
3065 if (ropt->no_subscriptions &&
3066 strncmp(te->tag, "SUBSCRIPTION", strlen("SUBSCRIPTION")) == 0)
3067 return 0;
3068 }
3069
3070 /*
3071 * If it's a publication or a table part of a publication, maybe ignore
3072 * it.
3073 */
3074 if (ropt->no_publications &&
3075 (strcmp(te->desc, "PUBLICATION") == 0 ||
3076 strcmp(te->desc, "PUBLICATION TABLE") == 0 ||
3077 strcmp(te->desc, "PUBLICATION TABLES IN SCHEMA") == 0))
3078 return 0;
3079
3080 /* If it's a security label, maybe ignore it */
3081 if (ropt->no_security_labels && strcmp(te->desc, "SECURITY LABEL") == 0)
3082 return 0;
3083
3084 /*
3085 * If it's a security label on a publication or a subscription, maybe
3086 * ignore it.
3087 */
3088 if (strcmp(te->desc, "SECURITY LABEL") == 0)
3089 {
3090 if (ropt->no_publications &&
3091 strncmp(te->tag, "PUBLICATION", strlen("PUBLICATION")) == 0)
3092 return 0;
3093
3094 if (ropt->no_subscriptions &&
3095 strncmp(te->tag, "SUBSCRIPTION", strlen("SUBSCRIPTION")) == 0)
3096 return 0;
3097 }
3098
3099 /* If it's a subscription, maybe ignore it */
3100 if (ropt->no_subscriptions && strcmp(te->desc, "SUBSCRIPTION") == 0)
3101 return 0;
3102
3103 /* Ignore it if section is not to be dumped/restored */
3104 switch (curSection)
3105 {
3106 case SECTION_PRE_DATA:
3107 if (!(ropt->dumpSections & DUMP_PRE_DATA))
3108 return 0;
3109 break;
3110 case SECTION_DATA:
3111 if (!(ropt->dumpSections & DUMP_DATA))
3112 return 0;
3113 break;
3114 case SECTION_POST_DATA:
3115 if (!(ropt->dumpSections & DUMP_POST_DATA))
3116 return 0;
3117 break;
3118 default:
3119 /* shouldn't get here, really, but ignore it */
3120 return 0;
3121 }
3122
3123 /* Ignore it if rejected by idWanted[] (cf. SortTocFromFile) */
3124 if (ropt->idWanted && !ropt->idWanted[te->dumpId - 1])
3125 return 0;
3126
3127 /*
3128 * Check options for selective dump/restore.
3129 */
3130 if (strcmp(te->desc, "ACL") == 0 ||
3131 strcmp(te->desc, "COMMENT") == 0 ||
3132 strcmp(te->desc, "STATISTICS DATA") == 0 ||
3133 strcmp(te->desc, "SECURITY LABEL") == 0)
3134 {
3135 /* Database properties react to createDB, not selectivity options. */
3136 if (strncmp(te->tag, "DATABASE ", 9) == 0)
3137 {
3138 if (!ropt->createDB)
3139 return 0;
3140 }
3141 else if (ropt->schemaNames.head != NULL ||
3142 ropt->schemaExcludeNames.head != NULL ||
3143 ropt->selTypes)
3144 {
3145 /*
3146 * In a selective dump/restore, we want to restore these dependent
3147 * TOC entry types only if their parent object is being restored.
3148 * Without selectivity options, we let through everything in the
3149 * archive. Note there may be such entries with no parent, eg
3150 * non-default ACLs for built-in objects. Also, we make
3151 * per-column ACLs additionally depend on the table's ACL if any
3152 * to ensure correct restore order, so those dependencies should
3153 * be ignored in this check.
3154 *
3155 * This code depends on the parent having been marked already,
3156 * which should be the case; if it isn't, perhaps due to
3157 * SortTocFromFile rearrangement, skipping the dependent entry
3158 * seems prudent anyway.
3159 *
3160 * Ideally we'd handle, eg, table CHECK constraints this way too.
3161 * But it's hard to tell which of their dependencies is the one to
3162 * consult.
3163 */
3164 bool dumpthis = false;
3165
3166 for (int i = 0; i < te->nDeps; i++)
3167 {
3168 TocEntry *pte = getTocEntryByDumpId(AH, te->dependencies[i]);
3169
3170 if (!pte)
3171 continue; /* probably shouldn't happen */
3172 if (strcmp(pte->desc, "ACL") == 0)
3173 continue; /* ignore dependency on another ACL */
3174 if (pte->reqs == 0)
3175 continue; /* this object isn't marked, so ignore it */
3176 /* Found a parent to be dumped, so we want to dump this too */
3177 dumpthis = true;
3178 break;
3179 }
3180 if (!dumpthis)
3181 return 0;
3182 }
3183 }
3184 else
3185 {
3186 /* Apply selective-restore rules for standalone TOC entries. */
3187 if (ropt->schemaNames.head != NULL)
3188 {
3189 /* If no namespace is specified, it means all. */
3190 if (!te->namespace)
3191 return 0;
3192 if (!simple_string_list_member(&ropt->schemaNames, te->namespace))
3193 return 0;
3194 }
3195
3196 if (ropt->schemaExcludeNames.head != NULL &&
3197 te->namespace &&
3198 simple_string_list_member(&ropt->schemaExcludeNames, te->namespace))
3199 return 0;
3200
3201 if (ropt->selTypes)
3202 {
3203 if (strcmp(te->desc, "TABLE") == 0 ||
3204 strcmp(te->desc, "TABLE DATA") == 0 ||
3205 strcmp(te->desc, "VIEW") == 0 ||
3206 strcmp(te->desc, "FOREIGN TABLE") == 0 ||
3207 strcmp(te->desc, "MATERIALIZED VIEW") == 0 ||
3208 strcmp(te->desc, "MATERIALIZED VIEW DATA") == 0 ||
3209 strcmp(te->desc, "SEQUENCE") == 0 ||
3210 strcmp(te->desc, "SEQUENCE SET") == 0)
3211 {
3212 if (!ropt->selTable)
3213 return 0;
3214 if (ropt->tableNames.head != NULL &&
3216 return 0;
3217 }
3218 else if (strcmp(te->desc, "INDEX") == 0)
3219 {
3220 if (!ropt->selIndex)
3221 return 0;
3222 if (ropt->indexNames.head != NULL &&
3224 return 0;
3225 }
3226 else if (strcmp(te->desc, "FUNCTION") == 0 ||
3227 strcmp(te->desc, "AGGREGATE") == 0 ||
3228 strcmp(te->desc, "PROCEDURE") == 0)
3229 {
3230 if (!ropt->selFunction)
3231 return 0;
3232 if (ropt->functionNames.head != NULL &&
3234 return 0;
3235 }
3236 else if (strcmp(te->desc, "TRIGGER") == 0)
3237 {
3238 if (!ropt->selTrigger)
3239 return 0;
3240 if (ropt->triggerNames.head != NULL &&
3242 return 0;
3243 }
3244 else
3245 return 0;
3246 }
3247 }
3248
3249
3250 /*
3251 * Determine whether the TOC entry contains schema and/or data components,
3252 * and mask off inapplicable REQ bits. If it had a dataDumper, assume
3253 * it's both schema and data. Otherwise it's probably schema-only, but
3254 * there are exceptions.
3255 */
3256 if (!te->hadDumper)
3257 {
3258 /*
3259 * Special Case: If 'SEQUENCE SET' or anything to do with LOs, then it
3260 * is considered a data entry. We don't need to check for BLOBS or
3261 * old-style BLOB COMMENTS entries, because they will have hadDumper =
3262 * true ... but we do need to check new-style BLOB ACLs, comments,
3263 * etc.
3264 */
3265 if (strcmp(te->desc, "SEQUENCE SET") == 0 ||
3266 strcmp(te->desc, "BLOB") == 0 ||
3267 strcmp(te->desc, "BLOB METADATA") == 0 ||
3268 (strcmp(te->desc, "ACL") == 0 &&
3269 strncmp(te->tag, "LARGE OBJECT", 12) == 0) ||
3270 (strcmp(te->desc, "COMMENT") == 0 &&
3271 strncmp(te->tag, "LARGE OBJECT", 12) == 0) ||
3272 (strcmp(te->desc, "SECURITY LABEL") == 0 &&
3273 strncmp(te->tag, "LARGE OBJECT", 12) == 0))
3274 res = res & REQ_DATA;
3275 else
3276 res = res & ~REQ_DATA;
3277 }
3278
3279 /*
3280 * If there's no definition command, there's no schema component. Treat
3281 * "load via partition root" comments as not schema.
3282 */
3283 if (!te->defn || !te->defn[0] ||
3284 strncmp(te->defn, "-- load via partition root ", 27) == 0)
3285 res = res & ~REQ_SCHEMA;
3286
3287 /*
3288 * Special case: <Init> type with <Max OID> tag; this is obsolete and we
3289 * always ignore it.
3290 */
3291 if ((strcmp(te->desc, "<Init>") == 0) && (strcmp(te->tag, "Max OID") == 0))
3292 return 0;
3293
3294 /* Mask it if we don't want data */
3295 if (!ropt->dumpData)
3296 {
3297 /*
3298 * The sequence_data option overrides dumpData for SEQUENCE SET.
3299 *
3300 * In binary-upgrade mode, even with dumpData unset, we do not mask
3301 * out large objects. (Only large object definitions, comments and
3302 * other metadata should be generated in binary-upgrade mode, not the
3303 * actual data, but that need not concern us here.)
3304 */
3305 if (!(ropt->sequence_data && strcmp(te->desc, "SEQUENCE SET") == 0) &&
3306 !(ropt->binary_upgrade &&
3307 (strcmp(te->desc, "BLOB") == 0 ||
3308 strcmp(te->desc, "BLOB METADATA") == 0 ||
3309 (strcmp(te->desc, "ACL") == 0 &&
3310 strncmp(te->tag, "LARGE OBJECT", 12) == 0) ||
3311 (strcmp(te->desc, "COMMENT") == 0 &&
3312 strncmp(te->tag, "LARGE OBJECT", 12) == 0) ||
3313 (strcmp(te->desc, "SECURITY LABEL") == 0 &&
3314 strncmp(te->tag, "LARGE OBJECT", 12) == 0))))
3315 res = res & (REQ_SCHEMA | REQ_STATS);
3316 }
3317
3318 /* Mask it if we don't want schema */
3319 if (!ropt->dumpSchema)
3320 res = res & (REQ_DATA | REQ_STATS);
3321
3322 return res;
3323}
3324
3325/*
3326 * Identify which pass we should restore this TOC entry in.
3327 *
3328 * See notes with the RestorePass typedef in pg_backup_archiver.h.
3329 */
3330static RestorePass
3332{
3333 /* "ACL LANGUAGE" was a crock emitted only in PG 7.4 */
3334 if (strcmp(te->desc, "ACL") == 0 ||
3335 strcmp(te->desc, "ACL LANGUAGE") == 0 ||
3336 strcmp(te->desc, "DEFAULT ACL") == 0)
3337 return RESTORE_PASS_ACL;
3338 if (strcmp(te->desc, "EVENT TRIGGER") == 0 ||
3339 strcmp(te->desc, "MATERIALIZED VIEW DATA") == 0)
3340 return RESTORE_PASS_POST_ACL;
3341
3342 /*
3343 * Comments and security labels need to be emitted in the same pass as
3344 * their parent objects. ACLs haven't got comments and security labels,
3345 * and neither do matview data objects, but event triggers do.
3346 * (Fortunately, event triggers haven't got ACLs, or we'd need yet another
3347 * weird special case.)
3348 */
3349 if ((strcmp(te->desc, "COMMENT") == 0 ||
3350 strcmp(te->desc, "SECURITY LABEL") == 0) &&
3351 strncmp(te->tag, "EVENT TRIGGER ", 14) == 0)
3352 return RESTORE_PASS_POST_ACL;
3353
3354 /*
3355 * If statistics data is dependent on materialized view data, it must be
3356 * deferred to RESTORE_PASS_POST_ACL. Those entries are already marked as
3357 * SECTION_POST_DATA, and some other stats entries (e.g., index stats)
3358 * will also be marked as SECTION_POST_DATA. Additionally, our lookahead
3359 * code in fetchAttributeStats() assumes that we dump all statistics data
3360 * entries in TOC order. To ensure this assumption holds, we move all
3361 * statistics data entries in SECTION_POST_DATA to RESTORE_PASS_POST_ACL.
3362 */
3363 if (strcmp(te->desc, "STATISTICS DATA") == 0 &&
3365 return RESTORE_PASS_POST_ACL;
3366
3367 /* All else can be handled in the main pass. */
3368 return RESTORE_PASS_MAIN;
3369}
3370
3371/*
3372 * Identify TOC entries that are ACLs.
3373 *
3374 * Note: it seems worth duplicating some code here to avoid a hard-wired
3375 * assumption that these are exactly the same entries that we restore during
3376 * the RESTORE_PASS_ACL phase.
3377 */
3378static bool
3380{
3381 /* "ACL LANGUAGE" was a crock emitted only in PG 7.4 */
3382 if (strcmp(te->desc, "ACL") == 0 ||
3383 strcmp(te->desc, "ACL LANGUAGE") == 0 ||
3384 strcmp(te->desc, "DEFAULT ACL") == 0)
3385 return true;
3386 return false;
3387}
3388
3389/*
3390 * Issue SET commands for parameters that we want to have set the same way
3391 * at all times during execution of a restore script.
3392 */
3393static void
3395{
3396 RestoreOptions *ropt = AH->public.ropt;
3397
3398 /*
3399 * Disable timeouts to allow for slow commands, idle parallel workers, etc
3400 */
3401 ahprintf(AH, "SET statement_timeout = 0;\n");
3402 ahprintf(AH, "SET lock_timeout = 0;\n");
3403 ahprintf(AH, "SET idle_in_transaction_session_timeout = 0;\n");
3404 ahprintf(AH, "SET transaction_timeout = 0;\n");
3405
3406 /* Select the correct character set encoding */
3407 ahprintf(AH, "SET client_encoding = '%s';\n",
3409
3410 /* Select the correct string literal syntax */
3411 ahprintf(AH, "SET standard_conforming_strings = %s;\n",
3412 AH->public.std_strings ? "on" : "off");
3413
3414 /* Select the role to be used during restore */
3415 if (ropt && ropt->use_role)
3416 ahprintf(AH, "SET ROLE %s;\n", fmtId(ropt->use_role));
3417
3418 /* Select the dump-time search_path */
3419 if (AH->public.searchpath)
3420 ahprintf(AH, "%s", AH->public.searchpath);
3421
3422 /* Make sure function checking is disabled */
3423 ahprintf(AH, "SET check_function_bodies = false;\n");
3424
3425 /* Ensure that all valid XML data will be accepted */
3426 ahprintf(AH, "SET xmloption = content;\n");
3427
3428 /* Avoid annoying notices etc */
3429 ahprintf(AH, "SET client_min_messages = warning;\n");
3430 if (!AH->public.std_strings)
3431 ahprintf(AH, "SET escape_string_warning = off;\n");
3432
3433 /* Adjust row-security state */
3434 if (ropt && ropt->enable_row_security)
3435 ahprintf(AH, "SET row_security = on;\n");
3436 else
3437 ahprintf(AH, "SET row_security = off;\n");
3438
3439 /*
3440 * In --transaction-size mode, we should always be in a transaction when
3441 * we begin to restore objects.
3442 */
3443 if (ropt && ropt->txn_size > 0)
3444 {
3445 if (AH->connection)
3447 else
3448 ahprintf(AH, "\nBEGIN;\n");
3449 AH->txnCount = 0;
3450 }
3451
3452 ahprintf(AH, "\n");
3453}
3454
3455/*
3456 * Issue a SET SESSION AUTHORIZATION command. Caller is responsible
3457 * for updating state if appropriate. If user is NULL or an empty string,
3458 * the specification DEFAULT will be used.
3459 */
3460static void
3462{
3464
3465 appendPQExpBufferStr(cmd, "SET SESSION AUTHORIZATION ");
3466
3467 /*
3468 * SQL requires a string literal here. Might as well be correct.
3469 */
3470 if (user && *user)
3471 appendStringLiteralAHX(cmd, user, AH);
3472 else
3473 appendPQExpBufferStr(cmd, "DEFAULT");
3474 appendPQExpBufferChar(cmd, ';');
3475
3476 if (RestoringToDB(AH))
3477 {
3478 PGresult *res;
3479
3480 res = PQexec(AH->connection, cmd->data);
3481
3482 if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
3483 /* NOT warn_or_exit_horribly... use -O instead to skip this. */
3484 pg_fatal("could not set session user to \"%s\": %s",
3486
3487 PQclear(res);
3488 }
3489 else
3490 ahprintf(AH, "%s\n\n", cmd->data);
3491
3492 destroyPQExpBuffer(cmd);
3493}
3494
3495
3496/*
3497 * Issue the commands to connect to the specified database.
3498 *
3499 * If we're currently restoring right into a database, this will
3500 * actually establish a connection. Otherwise it puts a \connect into
3501 * the script output.
3502 */
3503static void
3505{
3506 if (RestoringToDB(AH))
3508 else
3509 {
3510 PQExpBufferData connectbuf;
3511 RestoreOptions *ropt = AH->public.ropt;
3512
3513 /*
3514 * We must temporarily exit restricted mode for \connect, etc.
3515 * Anything added between this line and the following \restrict must
3516 * be careful to avoid any possible meta-command injection vectors.
3517 */
3518 ahprintf(AH, "\\unrestrict %s\n", ropt->restrict_key);
3519
3520 initPQExpBuffer(&connectbuf);
3521 appendPsqlMetaConnect(&connectbuf, dbname);
3522 ahprintf(AH, "%s", connectbuf.data);
3523 termPQExpBuffer(&connectbuf);
3524
3525 ahprintf(AH, "\\restrict %s\n\n", ropt->restrict_key);
3526 }
3527
3528 /*
3529 * NOTE: currUser keeps track of what the imaginary session user in our
3530 * script is. It's now effectively reset to the original userID.
3531 */
3532 free(AH->currUser);
3533 AH->currUser = NULL;
3534
3535 /* don't assume we still know the output schema, tablespace, etc either */
3536 free(AH->currSchema);
3537 AH->currSchema = NULL;
3538
3539 free(AH->currTableAm);
3540 AH->currTableAm = NULL;
3541
3542 free(AH->currTablespace);
3543 AH->currTablespace = NULL;
3544
3545 /* re-establish fixed state */
3547}
3548
3549/*
3550 * Become the specified user, and update state to avoid redundant commands
3551 *
3552 * NULL or empty argument is taken to mean restoring the session default
3553 */
3554static void
3556{
3557 if (!user)
3558 user = ""; /* avoid null pointers */
3559
3560 if (AH->currUser && strcmp(AH->currUser, user) == 0)
3561 return; /* no need to do anything */
3562
3564
3565 /*
3566 * NOTE: currUser keeps track of what the imaginary session user in our
3567 * script is
3568 */
3569 free(AH->currUser);
3570 AH->currUser = pg_strdup(user);
3571}
3572
3573/*
3574 * Become the owner of the given TOC entry object. If
3575 * changes in ownership are not allowed, this doesn't do anything.
3576 */
3577static void
3579{
3580 RestoreOptions *ropt = AH->public.ropt;
3581
3582 if (ropt && (ropt->noOwner || !ropt->use_setsessauth))
3583 return;
3584
3585 _becomeUser(AH, te->owner);
3586}
3587
3588
3589/*
3590 * Issue the commands to select the specified schema as the current schema
3591 * in the target database.
3592 */
3593static void
3594_selectOutputSchema(ArchiveHandle *AH, const char *schemaName)
3595{
3596 PQExpBuffer qry;
3597
3598 /*
3599 * If there was a SEARCHPATH TOC entry, we're supposed to just stay with
3600 * that search_path rather than switching to entry-specific paths.
3601 * Otherwise, it's an old archive that will not restore correctly unless
3602 * we set the search_path as it's expecting.
3603 */
3604 if (AH->public.searchpath)
3605 return;
3606
3607 if (!schemaName || *schemaName == '\0' ||
3608 (AH->currSchema && strcmp(AH->currSchema, schemaName) == 0))
3609 return; /* no need to do anything */
3610
3611 qry = createPQExpBuffer();
3612
3613 appendPQExpBuffer(qry, "SET search_path = %s",
3614 fmtId(schemaName));
3615 if (strcmp(schemaName, "pg_catalog") != 0)
3616 appendPQExpBufferStr(qry, ", pg_catalog");
3617
3618 if (RestoringToDB(AH))
3619 {
3620 PGresult *res;
3621
3622 res = PQexec(AH->connection, qry->data);
3623
3624 if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
3626 "could not set \"search_path\" to \"%s\": %s",
3627 schemaName, PQerrorMessage(AH->connection));
3628
3629 PQclear(res);
3630 }
3631 else
3632 ahprintf(AH, "%s;\n\n", qry->data);
3633
3634 free(AH->currSchema);
3635 AH->currSchema = pg_strdup(schemaName);
3636
3637 destroyPQExpBuffer(qry);
3638}
3639
3640/*
3641 * Issue the commands to select the specified tablespace as the current one
3642 * in the target database.
3643 */
3644static void
3646{
3647 RestoreOptions *ropt = AH->public.ropt;
3648 PQExpBuffer qry;
3649 const char *want,
3650 *have;
3651
3652 /* do nothing in --no-tablespaces mode */
3653 if (ropt->noTablespace)
3654 return;
3655
3656 have = AH->currTablespace;
3657 want = tablespace;
3658
3659 /* no need to do anything for non-tablespace object */
3660 if (!want)
3661 return;
3662
3663 if (have && strcmp(want, have) == 0)
3664 return; /* no need to do anything */
3665
3666 qry = createPQExpBuffer();
3667
3668 if (strcmp(want, "") == 0)
3669 {
3670 /* We want the tablespace to be the database's default */
3671 appendPQExpBufferStr(qry, "SET default_tablespace = ''");
3672 }
3673 else
3674 {
3675 /* We want an explicit tablespace */
3676 appendPQExpBuffer(qry, "SET default_tablespace = %s", fmtId(want));
3677 }
3678
3679 if (RestoringToDB(AH))
3680 {
3681 PGresult *res;
3682
3683 res = PQexec(AH->connection, qry->data);
3684
3685 if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
3687 "could not set \"default_tablespace\" to %s: %s",
3688 fmtId(want), PQerrorMessage(AH->connection));
3689
3690 PQclear(res);
3691 }
3692 else
3693 ahprintf(AH, "%s;\n\n", qry->data);
3694
3695 free(AH->currTablespace);
3696 AH->currTablespace = pg_strdup(want);
3697
3698 destroyPQExpBuffer(qry);
3699}
3700
3701/*
3702 * Set the proper default_table_access_method value for the table.
3703 */
3704static void
3706{
3707 RestoreOptions *ropt = AH->public.ropt;
3708 PQExpBuffer cmd;
3709 const char *want,
3710 *have;
3711
3712 /* do nothing in --no-table-access-method mode */
3713 if (ropt->noTableAm)
3714 return;
3715
3716 have = AH->currTableAm;
3717 want = tableam;
3718
3719 if (!want)
3720 return;
3721
3722 if (have && strcmp(want, have) == 0)
3723 return;
3724
3725 cmd = createPQExpBuffer();
3726 appendPQExpBuffer(cmd, "SET default_table_access_method = %s;", fmtId(want));
3727
3728 if (RestoringToDB(AH))
3729 {
3730 PGresult *res;
3731
3732 res = PQexec(AH->connection, cmd->data);
3733
3734 if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
3736 "could not set \"default_table_access_method\": %s",
3738
3739 PQclear(res);
3740 }
3741 else
3742 ahprintf(AH, "%s\n\n", cmd->data);
3743
3744 destroyPQExpBuffer(cmd);
3745
3746 free(AH->currTableAm);
3747 AH->currTableAm = pg_strdup(want);
3748}
3749
3750/*
3751 * Set the proper default table access method for a table without storage.
3752 * Currently, this is required only for partitioned tables with a table AM.
3753 */
3754static void
3756{
3757 RestoreOptions *ropt = AH->public.ropt;
3758 const char *tableam = te->tableam;
3759 PQExpBuffer cmd;
3760
3761 /* do nothing in --no-table-access-method mode */
3762 if (ropt->noTableAm)
3763 return;
3764
3765 if (!tableam)
3766 return;
3767
3768 Assert(te->relkind == RELKIND_PARTITIONED_TABLE);
3769
3770 cmd = createPQExpBuffer();
3771
3772 appendPQExpBufferStr(cmd, "ALTER TABLE ");
3773 appendPQExpBuffer(cmd, "%s ", fmtQualifiedId(te->namespace, te->tag));
3774 appendPQExpBuffer(cmd, "SET ACCESS METHOD %s;",
3775 fmtId(tableam));
3776
3777 if (RestoringToDB(AH))
3778 {
3779 PGresult *res;
3780
3781 res = PQexec(AH->connection, cmd->data);
3782
3783 if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
3785 "could not alter table access method: %s",
3787 PQclear(res);
3788 }
3789 else
3790 ahprintf(AH, "%s\n\n", cmd->data);
3791
3792 destroyPQExpBuffer(cmd);
3793}
3794
3795/*
3796 * Extract an object description for a TOC entry, and append it to buf.
3797 *
3798 * This is used for ALTER ... OWNER TO.
3799 *
3800 * If the object type has no owner, do nothing.
3801 */
3802static void
3804{
3805 const char *type = te->desc;
3806
3807 /* objects that don't require special decoration */
3808 if (strcmp(type, "COLLATION") == 0 ||
3809 strcmp(type, "CONVERSION") == 0 ||
3810 strcmp(type, "DOMAIN") == 0 ||
3811 strcmp(type, "FOREIGN TABLE") == 0 ||
3812 strcmp(type, "MATERIALIZED VIEW") == 0 ||
3813 strcmp(type, "SEQUENCE") == 0 ||
3814 strcmp(type, "STATISTICS") == 0 ||
3815 strcmp(type, "TABLE") == 0 ||
3816 strcmp(type, "TEXT SEARCH DICTIONARY") == 0 ||
3817 strcmp(type, "TEXT SEARCH CONFIGURATION") == 0 ||
3818 strcmp(type, "TYPE") == 0 ||
3819 strcmp(type, "VIEW") == 0 ||
3820 /* non-schema-specified objects */
3821 strcmp(type, "DATABASE") == 0 ||
3822 strcmp(type, "PROCEDURAL LANGUAGE") == 0 ||
3823 strcmp(type, "SCHEMA") == 0 ||
3824 strcmp(type, "EVENT TRIGGER") == 0 ||
3825 strcmp(type, "FOREIGN DATA WRAPPER") == 0 ||
3826 strcmp(type, "SERVER") == 0 ||
3827 strcmp(type, "PUBLICATION") == 0 ||
3828 strcmp(type, "SUBSCRIPTION") == 0)
3829 {
3830 appendPQExpBuffer(buf, "%s ", type);
3831 if (te->namespace && *te->namespace)
3832 appendPQExpBuffer(buf, "%s.", fmtId(te->namespace));
3834 }
3835 /* LOs just have a name, but it's numeric so must not use fmtId */
3836 else if (strcmp(type, "BLOB") == 0)
3837 {
3838 appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag);
3839 }
3840
3841 /*
3842 * These object types require additional decoration. Fortunately, the
3843 * information needed is exactly what's in the DROP command.
3844 */
3845 else if (strcmp(type, "AGGREGATE") == 0 ||
3846 strcmp(type, "FUNCTION") == 0 ||
3847 strcmp(type, "OPERATOR") == 0 ||
3848 strcmp(type, "OPERATOR CLASS") == 0 ||
3849 strcmp(type, "OPERATOR FAMILY") == 0 ||
3850 strcmp(type, "PROCEDURE") == 0)
3851 {
3852 /* Chop "DROP " off the front and make a modifiable copy */
3853 char *first = pg_strdup(te->dropStmt + 5);
3854 char *last;
3855
3856 /* point to last character in string */
3857 last = first + strlen(first) - 1;
3858
3859 /* Strip off any ';' or '\n' at the end */
3860 while (last >= first && (*last == '\n' || *last == ';'))
3861 last--;
3862 *(last + 1) = '\0';
3863
3864 appendPQExpBufferStr(buf, first);
3865
3866 free(first);
3867 return;
3868 }
3869 /* these object types don't have separate owners */
3870 else if (strcmp(type, "CAST") == 0 ||
3871 strcmp(type, "CHECK CONSTRAINT") == 0 ||
3872 strcmp(type, "CONSTRAINT") == 0 ||
3873 strcmp(type, "DATABASE PROPERTIES") == 0 ||
3874 strcmp(type, "DEFAULT") == 0 ||
3875 strcmp(type, "FK CONSTRAINT") == 0 ||
3876 strcmp(type, "INDEX") == 0 ||
3877 strcmp(type, "RULE") == 0 ||
3878 strcmp(type, "TRIGGER") == 0 ||
3879 strcmp(type, "ROW SECURITY") == 0 ||
3880 strcmp(type, "POLICY") == 0 ||
3881 strcmp(type, "USER MAPPING") == 0)
3882 {
3883 /* do nothing */
3884 }
3885 else
3886 pg_fatal("don't know how to set owner for object type \"%s\"", type);
3887}
3888
3889/*
3890 * Emit the SQL commands to create the object represented by a TOC entry
3891 *
3892 * This now also includes issuing an ALTER OWNER command to restore the
3893 * object's ownership, if wanted. But note that the object's permissions
3894 * will remain at default, until the matching ACL TOC entry is restored.
3895 */
3896static void
3897_printTocEntry(ArchiveHandle *AH, TocEntry *te, const char *pfx)
3898{
3899 RestoreOptions *ropt = AH->public.ropt;
3900
3901 /*
3902 * Select owner, schema, tablespace and default AM as necessary. The
3903 * default access method for partitioned tables is handled after
3904 * generating the object definition, as it requires an ALTER command
3905 * rather than SET.
3906 */
3907 _becomeOwner(AH, te);
3908 _selectOutputSchema(AH, te->namespace);
3910 if (te->relkind != RELKIND_PARTITIONED_TABLE)
3912
3913 /* Emit header comment for item */
3914 if (!AH->noTocComments)
3915 {
3916 char *sanitized_name;
3917 char *sanitized_schema;
3918 char *sanitized_owner;
3919
3920 ahprintf(AH, "--\n");
3921 if (AH->public.verbose)
3922 {
3923 ahprintf(AH, "-- TOC entry %d (class %u OID %u)\n",
3924 te->dumpId, te->catalogId.tableoid, te->catalogId.oid);
3925 if (te->nDeps > 0)
3926 {
3927 int i;
3928
3929 ahprintf(AH, "-- Dependencies:");
3930 for (i = 0; i < te->nDeps; i++)
3931 ahprintf(AH, " %d", te->dependencies[i]);
3932 ahprintf(AH, "\n");
3933 }
3934 }
3935
3936 sanitized_name = sanitize_line(te->tag, false);
3937 sanitized_schema = sanitize_line(te->namespace, true);
3938 sanitized_owner = sanitize_line(ropt->noOwner ? NULL : te->owner, true);
3939
3940 ahprintf(AH, "-- %sName: %s; Type: %s; Schema: %s; Owner: %s",
3941 pfx, sanitized_name, te->desc, sanitized_schema,
3942 sanitized_owner);
3943
3944 free(sanitized_name);
3945 free(sanitized_schema);
3946 free(sanitized_owner);
3947
3948 if (te->tablespace && strlen(te->tablespace) > 0 && !ropt->noTablespace)
3949 {
3950 char *sanitized_tablespace;
3951
3952 sanitized_tablespace = sanitize_line(te->tablespace, false);
3953 ahprintf(AH, "; Tablespace: %s", sanitized_tablespace);
3954 free(sanitized_tablespace);
3955 }
3956 ahprintf(AH, "\n");
3957
3958 if (AH->PrintExtraTocPtr != NULL)
3959 AH->PrintExtraTocPtr(AH, te);
3960 ahprintf(AH, "--\n\n");
3961 }
3962
3963 /*
3964 * Actually print the definition. Normally we can just print the defn
3965 * string if any, but we have four special cases:
3966 *
3967 * 1. A crude hack for suppressing AUTHORIZATION clause that old pg_dump
3968 * versions put into CREATE SCHEMA. Don't mutate the variant for schema
3969 * "public" that is a comment. We have to do this when --no-owner mode is
3970 * selected. This is ugly, but I see no other good way ...
3971 *
3972 * 2. BLOB METADATA entries need special processing since their defn
3973 * strings are just lists of OIDs, not complete SQL commands.
3974 *
3975 * 3. ACL LARGE OBJECTS entries need special processing because they
3976 * contain only one copy of the ACL GRANT/REVOKE commands, which we must
3977 * apply to each large object listed in the associated BLOB METADATA.
3978 *
3979 * 4. Entries with a defnDumper need to call it to generate the
3980 * definition. This is primarily intended to provide a way to save memory
3981 * for objects that would otherwise need a lot of it (e.g., statistics
3982 * data).
3983 */
3984 if (ropt->noOwner &&
3985 strcmp(te->desc, "SCHEMA") == 0 && strncmp(te->defn, "--", 2) != 0)
3986 {
3987 ahprintf(AH, "CREATE SCHEMA %s;\n\n\n", fmtId(te->tag));
3988 }
3989 else if (strcmp(te->desc, "BLOB METADATA") == 0)
3990 {
3991 IssueCommandPerBlob(AH, te, "SELECT pg_catalog.lo_create('", "')");
3992 }
3993 else if (strcmp(te->desc, "ACL") == 0 &&
3994 strncmp(te->tag, "LARGE OBJECTS", 13) == 0)
3995 {
3996 IssueACLPerBlob(AH, te);
3997 }
3998 else if (te->defnLen && AH->format != archTar)
3999 {
4000 /*
4001 * If defnLen is set, the defnDumper has already been called for this
4002 * TOC entry. We don't normally expect a defnDumper to be called for
4003 * a TOC entry a second time in _printTocEntry(), but there's an
4004 * exception. The tar format first calls WriteToc(), which scans the
4005 * entire TOC, and then it later calls RestoreArchive() to generate
4006 * restore.sql, which scans the TOC again. There doesn't appear to be
4007 * a good way to prevent a second defnDumper call in this case without
4008 * storing the definition in memory, which defeats the purpose. This
4009 * second defnDumper invocation should generate the same output as the
4010 * first, but even if it doesn't, the worst-case scenario is that
4011 * restore.sql might have different statistics data than the archive.
4012 *
4013 * In all other cases, encountering a TOC entry a second time in
4014 * _printTocEntry() is unexpected, so we fail because one of our
4015 * assumptions must no longer hold true.
4016 *
4017 * XXX This is a layering violation, but the alternative is an awkward
4018 * and complicated callback infrastructure for this special case. This
4019 * might be worth revisiting in the future.
4020 */
4021 pg_fatal("unexpected TOC entry in _printTocEntry(): %d %s %s",
4022 te->dumpId, te->desc, te->tag);
4023 }
4024 else if (te->defnDumper)
4025 {
4026 char *defn = te->defnDumper((Archive *) AH, te->defnDumperArg, te);
4027
4028 te->defnLen = ahprintf(AH, "%s\n\n", defn);
4029 pg_free(defn);
4030 }
4031 else if (te->defn && strlen(te->defn) > 0)
4032 {
4033 ahprintf(AH, "%s\n\n", te->defn);
4034
4035 /*
4036 * If the defn string contains multiple SQL commands, txn_size mode
4037 * should count it as N actions not one. But rather than build a full
4038 * SQL parser, approximate this by counting semicolons. One case
4039 * where that tends to be badly fooled is function definitions, so
4040 * ignore them. (restore_toc_entry will count one action anyway.)
4041 */
4042 if (ropt->txn_size > 0 &&
4043 strcmp(te->desc, "FUNCTION") != 0 &&
4044 strcmp(te->desc, "PROCEDURE") != 0)
4045 {
4046 const char *p = te->defn;
4047 int nsemis = 0;
4048
4049 while ((p = strchr(p, ';')) != NULL)
4050 {
4051 nsemis++;
4052 p++;
4053 }
4054 if (nsemis > 1)
4055 AH->txnCount += nsemis - 1;
4056 }
4057 }
4058
4059 /*
4060 * If we aren't using SET SESSION AUTH to determine ownership, we must
4061 * instead issue an ALTER OWNER command. Schema "public" is special; when
4062 * a dump emits a comment in lieu of creating it, we use ALTER OWNER even
4063 * when using SET SESSION for all other objects. We assume that anything
4064 * without a DROP command is not a separately ownable object.
4065 */
4066 if (!ropt->noOwner &&
4067 (!ropt->use_setsessauth ||
4068 (strcmp(te->desc, "SCHEMA") == 0 &&
4069 strncmp(te->defn, "--", 2) == 0)) &&
4070 te->owner && strlen(te->owner) > 0 &&
4071 te->dropStmt && strlen(te->dropStmt) > 0)
4072 {
4073 if (strcmp(te->desc, "BLOB METADATA") == 0)
4074 {
4075 /* BLOB METADATA needs special code to handle multiple LOs */
4076 char *cmdEnd = psprintf(" OWNER TO %s", fmtId(te->owner));
4077
4078 IssueCommandPerBlob(AH, te, "ALTER LARGE OBJECT ", cmdEnd);
4079 pg_free(cmdEnd);
4080 }
4081 else
4082 {
4083 /* For all other cases, we can use _getObjectDescription */
4084 PQExpBufferData temp;
4085
4086 initPQExpBuffer(&temp);
4087 _getObjectDescription(&temp, te);
4088
4089 /*
4090 * If _getObjectDescription() didn't fill the buffer, then there
4091 * is no owner.
4092 */
4093 if (temp.data[0])
4094 ahprintf(AH, "ALTER %s OWNER TO %s;\n\n",
4095 temp.data, fmtId(te->owner));
4096 termPQExpBuffer(&temp);
4097 }
4098 }
4099
4100 /*
4101 * Select a partitioned table's default AM, once the table definition has
4102 * been generated.
4103 */
4104 if (te->relkind == RELKIND_PARTITIONED_TABLE)
4106
4107 /*
4108 * If it's an ACL entry, it might contain SET SESSION AUTHORIZATION
4109 * commands, so we can no longer assume we know the current auth setting.
4110 */
4111 if (_tocEntryIsACL(te))
4112 {
4113 free(AH->currUser);
4114 AH->currUser = NULL;
4115 }
4116}
4117
4118/*
4119 * Write the file header for a custom-format archive
4120 */
4121void
4123{
4124 struct tm crtm;
4125
4126 AH->WriteBufPtr(AH, "PGDMP", 5); /* Magic code */
4127 AH->WriteBytePtr(AH, ARCHIVE_MAJOR(AH->version));
4128 AH->WriteBytePtr(AH, ARCHIVE_MINOR(AH->version));
4129 AH->WriteBytePtr(AH, ARCHIVE_REV(AH->version));
4130 AH->WriteBytePtr(AH, AH->intSize);
4131 AH->WriteBytePtr(AH, AH->offSize);
4132 AH->WriteBytePtr(AH, AH->format);
4134 crtm = *localtime(&AH->createDate);
4135 WriteInt(AH, crtm.tm_sec);
4136 WriteInt(AH, crtm.tm_min);
4137 WriteInt(AH, crtm.tm_hour);
4138 WriteInt(AH, crtm.tm_mday);
4139 WriteInt(AH, crtm.tm_mon);
4140 WriteInt(AH, crtm.tm_year);
4141 WriteInt(AH, crtm.tm_isdst);
4142 WriteStr(AH, PQdb(AH->connection));
4144 WriteStr(AH, PG_VERSION);
4145}
4146
4147void
4149{
4150 char *errmsg;
4151 char vmaj,
4152 vmin,
4153 vrev;
4154 int fmt;
4155
4156 /*
4157 * If we haven't already read the header, do so.
4158 *
4159 * NB: this code must agree with _discoverArchiveFormat(). Maybe find a
4160 * way to unify the cases?
4161 */
4162 if (!AH->readHeader)
4163 {
4164 char tmpMag[7];
4165
4166 AH->ReadBufPtr(AH, tmpMag, 5);
4167
4168 if (strncmp(tmpMag, "PGDMP", 5) != 0)
4169 pg_fatal("did not find magic string in file header");
4170 }
4171
4172 vmaj = AH->ReadBytePtr(AH);
4173 vmin = AH->ReadBytePtr(AH);
4174
4175 if (vmaj > 1 || (vmaj == 1 && vmin > 0)) /* Version > 1.0 */
4176 vrev = AH->ReadBytePtr(AH);
4177 else
4178 vrev = 0;
4179
4180 AH->version = MAKE_ARCHIVE_VERSION(vmaj, vmin, vrev);
4181
4182 if (AH->version < K_VERS_1_0 || AH->version > K_VERS_MAX)
4183 pg_fatal("unsupported version (%d.%d) in file header",
4184 vmaj, vmin);
4185
4186 AH->intSize = AH->ReadBytePtr(AH);
4187 if (AH->intSize > 32)
4188 pg_fatal("sanity check on integer size (%lu) failed",
4189 (unsigned long) AH->intSize);
4190
4191 if (AH->intSize > sizeof(int))
4192 pg_log_warning("archive was made on a machine with larger integers, some operations might fail");
4193
4194 if (AH->version >= K_VERS_1_7)
4195 AH->offSize = AH->ReadBytePtr(AH);
4196 else
4197 AH->offSize = AH->intSize;
4198
4199 fmt = AH->ReadBytePtr(AH);
4200
4201 if (AH->format != fmt)
4202 pg_fatal("expected format (%d) differs from format found in file (%d)",
4203 AH->format, fmt);
4204
4205 if (AH->version >= K_VERS_1_15)
4207 else if (AH->version >= K_VERS_1_2)
4208 {
4209 /* Guess the compression method based on the level */
4210 if (AH->version < K_VERS_1_4)
4211 AH->compression_spec.level = AH->ReadBytePtr(AH);
4212 else
4213 AH->compression_spec.level = ReadInt(AH);
4214
4215 if (AH->compression_spec.level != 0)
4217 }
4218 else
4220
4222 if (errmsg)
4223 {
4224 pg_log_warning("archive is compressed, but this installation does not support compression (%s) -- no data will be available",
4225 errmsg);
4226 pg_free(errmsg);
4227 }
4228
4229 if (AH->version >= K_VERS_1_4)
4230 {
4231 struct tm crtm;
4232
4233 crtm.tm_sec = ReadInt(AH);
4234 crtm.tm_min = ReadInt(AH);
4235 crtm.tm_hour = ReadInt(AH);
4236 crtm.tm_mday = ReadInt(AH);
4237 crtm.tm_mon = ReadInt(AH);
4238 crtm.tm_year = ReadInt(AH);
4239 crtm.tm_isdst = ReadInt(AH);
4240
4241 /*
4242 * Newer versions of glibc have mktime() report failure if tm_isdst is
4243 * inconsistent with the prevailing timezone, e.g. tm_isdst = 1 when
4244 * TZ=UTC. This is problematic when restoring an archive under a
4245 * different timezone setting. If we get a failure, try again with
4246 * tm_isdst set to -1 ("don't know").
4247 *
4248 * XXX with or without this hack, we reconstruct createDate
4249 * incorrectly when the prevailing timezone is different from
4250 * pg_dump's. Next time we bump the archive version, we should flush
4251 * this representation and store a plain seconds-since-the-Epoch
4252 * timestamp instead.
4253 */
4254 AH->createDate = mktime(&crtm);
4255 if (AH->createDate == (time_t) -1)
4256 {
4257 crtm.tm_isdst = -1;
4258 AH->createDate = mktime(&crtm);
4259 if (AH->createDate == (time_t) -1)
4260 pg_log_warning("invalid creation date in header");
4261 }
4262 }
4263
4264 if (AH->version >= K_VERS_1_4)
4265 {
4266 AH->archdbname = ReadStr(AH);
4267 }
4268
4269 if (AH->version >= K_VERS_1_10)
4270 {
4271 AH->archiveRemoteVersion = ReadStr(AH);
4272 AH->archiveDumpVersion = ReadStr(AH);
4273 }
4274}
4275
4276
4277/*
4278 * checkSeek
4279 * check to see if ftell/fseek can be performed.
4280 */
4281bool
4282checkSeek(FILE *fp)
4283{
4284 pgoff_t tpos;
4285
4286 /* Check that ftello works on this file */
4287 tpos = ftello(fp);
4288 if (tpos < 0)
4289 return false;
4290
4291 /*
4292 * Check that fseeko(SEEK_SET) works, too. NB: we used to try to test
4293 * this with fseeko(fp, 0, SEEK_CUR). But some platforms treat that as a
4294 * successful no-op even on files that are otherwise unseekable.
4295 */
4296 if (fseeko(fp, tpos, SEEK_SET) != 0)
4297 return false;
4298
4299 return true;
4300}
4301
4302
4303/*
4304 * dumpTimestamp
4305 */
4306static void
4307dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim)
4308{
4309 char buf[64];
4310
4311 if (strftime(buf, sizeof(buf), PGDUMP_STRFTIME_FMT, localtime(&tim)) != 0)
4312 ahprintf(AH, "-- %s %s\n\n", msg, buf);
4313}
4314
4315/*
4316 * Main engine for parallel restore.
4317 *
4318 * Parallel restore is done in three phases. In this first phase,
4319 * we'll process all SECTION_PRE_DATA TOC entries that are allowed to be
4320 * processed in the RESTORE_PASS_MAIN pass. (In practice, that's all
4321 * PRE_DATA items other than ACLs.) Entries we can't process now are
4322 * added to the pending_list for later phases to deal with.
4323 */
4324static void
4326{
4327 bool skipped_some;
4328 TocEntry *next_work_item;
4329
4330 pg_log_debug("entering restore_toc_entries_prefork");
4331
4332 /* Adjust dependency information */
4333 fix_dependencies(AH);
4334
4335 /*
4336 * Do all the early stuff in a single connection in the parent. There's no
4337 * great point in running it in parallel, in fact it will actually run
4338 * faster in a single connection because we avoid all the connection and
4339 * setup overhead. Also, pre-9.2 pg_dump versions were not very good
4340 * about showing all the dependencies of SECTION_PRE_DATA items, so we do
4341 * not risk trying to process them out-of-order.
4342 *
4343 * Stuff that we can't do immediately gets added to the pending_list.
4344 * Note: we don't yet filter out entries that aren't going to be restored.
4345 * They might participate in dependency chains connecting entries that
4346 * should be restored, so we treat them as live until we actually process
4347 * them.
4348 *
4349 * Note: as of 9.2, it should be guaranteed that all PRE_DATA items appear
4350 * before DATA items, and all DATA items before POST_DATA items. That is
4351 * not certain to be true in older archives, though, and in any case use
4352 * of a list file would destroy that ordering (cf. SortTocFromFile). So
4353 * this loop cannot assume that it holds.
4354 */
4356 skipped_some = false;
4357 for (next_work_item = AH->toc->next; next_work_item != AH->toc; next_work_item = next_work_item->next)
4358 {
4359 bool do_now = true;
4360
4361 if (next_work_item->section != SECTION_PRE_DATA)
4362 {
4363 /* DATA and POST_DATA items are just ignored for now */
4364 if (next_work_item->section == SECTION_DATA ||
4365 next_work_item->section == SECTION_POST_DATA)
4366 {
4367 do_now = false;
4368 skipped_some = true;
4369 }
4370 else
4371 {
4372 /*
4373 * SECTION_NONE items, such as comments, can be processed now
4374 * if we are still in the PRE_DATA part of the archive. Once
4375 * we've skipped any items, we have to consider whether the
4376 * comment's dependencies are satisfied, so skip it for now.
4377 */
4378 if (skipped_some)
4379 do_now = false;
4380 }
4381 }
4382
4383 /*
4384 * Also skip items that need to be forced into later passes. We need
4385 * not set skipped_some in this case, since by assumption no main-pass
4386 * items could depend on these.
4387 */
4388 if (_tocEntryRestorePass(next_work_item) != RESTORE_PASS_MAIN)
4389 do_now = false;
4390
4391 if (do_now)
4392 {
4393 /* OK, restore the item and update its dependencies */
4394 pg_log_info("processing item %d %s %s",
4395 next_work_item->dumpId,
4396 next_work_item->desc, next_work_item->tag);
4397
4398 (void) restore_toc_entry(AH, next_work_item, false);
4399
4400 /* Reduce dependencies, but don't move anything to ready_heap */
4401 reduce_dependencies(AH, next_work_item, NULL);
4402 }
4403 else
4404 {
4405 /* Nope, so add it to pending_list */
4406 pending_list_append(pending_list, next_work_item);
4407 }
4408 }
4409
4410 /*
4411 * In --transaction-size mode, we must commit the open transaction before
4412 * dropping the database connection. This also ensures that child workers
4413 * can see the objects we've created so far.
4414 */
4415 if (AH->public.ropt->txn_size > 0)
4417
4418 /*
4419 * Now close parent connection in prep for parallel steps. We do this
4420 * mainly to ensure that we don't exceed the specified number of parallel
4421 * connections.
4422 */
4424
4425 /* blow away any transient state from the old connection */
4426 free(AH->currUser);
4427 AH->currUser = NULL;
4428 free(AH->currSchema);
4429 AH->currSchema = NULL;
4430 free(AH->currTablespace);
4431 AH->currTablespace = NULL;
4432 free(AH->currTableAm);
4433 AH->currTableAm = NULL;
4434}
4435
4436/*
4437 * Main engine for parallel restore.
4438 *
4439 * Parallel restore is done in three phases. In this second phase,
4440 * we process entries by dispatching them to parallel worker children
4441 * (processes on Unix, threads on Windows), each of which connects
4442 * separately to the database. Inter-entry dependencies are respected,
4443 * and so is the RestorePass multi-pass structure. When we can no longer
4444 * make any entries ready to process, we exit. Normally, there will be
4445 * nothing left to do; but if there is, the third phase will mop up.
4446 */
4447static void
4449 TocEntry *pending_list)
4450{
4451 binaryheap *ready_heap;
4452 TocEntry *next_work_item;
4453
4454 pg_log_debug("entering restore_toc_entries_parallel");
4455
4456 /* Set up ready_heap with enough room for all known TocEntrys */
4457 ready_heap = binaryheap_allocate(AH->tocCount,
4459 NULL);
4460
4461 /*
4462 * The pending_list contains all items that we need to restore. Move all
4463 * items that are available to process immediately into the ready_heap.
4464 * After this setup, the pending list is everything that needs to be done
4465 * but is blocked by one or more dependencies, while the ready heap
4466 * contains items that have no remaining dependencies and are OK to
4467 * process in the current restore pass.
4468 */
4470 move_to_ready_heap(pending_list, ready_heap, AH->restorePass);
4471
4472 /*
4473 * main parent loop
4474 *
4475 * Keep going until there is no worker still running AND there is no work
4476 * left to be done. Note invariant: at top of loop, there should always
4477 * be at least one worker available to dispatch a job to.
4478 */
4479 pg_log_info("entering main parallel loop");
4480
4481 for (;;)
4482 {
4483 /* Look for an item ready to be dispatched to a worker */
4484 next_work_item = pop_next_work_item(ready_heap, pstate);
4485 if (next_work_item != NULL)
4486 {
4487 /* If not to be restored, don't waste time launching a worker */
4488 if ((next_work_item->reqs & (REQ_SCHEMA | REQ_DATA | REQ_STATS)) == 0)
4489 {
4490 pg_log_info("skipping item %d %s %s",
4491 next_work_item->dumpId,
4492 next_work_item->desc, next_work_item->tag);
4493 /* Update its dependencies as though we'd completed it */
4494 reduce_dependencies(AH, next_work_item, ready_heap);
4495 /* Loop around to see if anything else can be dispatched */
4496 continue;
4497 }
4498
4499 pg_log_info("launching item %d %s %s",
4500 next_work_item->dumpId,
4501 next_work_item->desc, next_work_item->tag);
4502
4503 /* Dispatch to some worker */
4504 DispatchJobForTocEntry(AH, pstate, next_work_item, ACT_RESTORE,
4505 mark_restore_job_done, ready_heap);
4506 }
4507 else if (IsEveryWorkerIdle(pstate))
4508 {
4509 /*
4510 * Nothing is ready and no worker is running, so we're done with
4511 * the current pass or maybe with the whole process.
4512 */
4513 if (AH->restorePass == RESTORE_PASS_LAST)
4514 break; /* No more parallel processing is possible */
4515
4516 /* Advance to next restore pass */
4517 AH->restorePass++;
4518 /* That probably allows some stuff to be made ready */
4519 move_to_ready_heap(pending_list, ready_heap, AH->restorePass);
4520 /* Loop around to see if anything's now ready */
4521 continue;
4522 }
4523 else
4524 {
4525 /*
4526 * We have nothing ready, but at least one child is working, so
4527 * wait for some subjob to finish.
4528 */
4529 }
4530
4531 /*
4532 * Before dispatching another job, check to see if anything has
4533 * finished. We should check every time through the loop so as to
4534 * reduce dependencies as soon as possible. If we were unable to
4535 * dispatch any job this time through, wait until some worker finishes
4536 * (and, hopefully, unblocks some pending item). If we did dispatch
4537 * something, continue as soon as there's at least one idle worker.
4538 * Note that in either case, there's guaranteed to be at least one
4539 * idle worker when we return to the top of the loop. This ensures we
4540 * won't block inside DispatchJobForTocEntry, which would be
4541 * undesirable: we'd rather postpone dispatching until we see what's
4542 * been unblocked by finished jobs.
4543 */
4544 WaitForWorkers(AH, pstate,
4545 next_work_item ? WFW_ONE_IDLE : WFW_GOT_STATUS);
4546 }
4547
4548 /* There should now be nothing in ready_heap. */
4549 Assert(binaryheap_empty(ready_heap));
4550
4551 binaryheap_free(ready_heap);
4552
4553 pg_log_info("finished main parallel loop");
4554}
4555
4556/*
4557 * Main engine for parallel restore.
4558 *
4559 * Parallel restore is done in three phases. In this third phase,
4560 * we mop up any remaining TOC entries by processing them serially.
4561 * This phase normally should have nothing to do, but if we've somehow
4562 * gotten stuck due to circular dependencies or some such, this provides
4563 * at least some chance of completing the restore successfully.
4564 */
4565static void
4567{
4568 RestoreOptions *ropt = AH->public.ropt;
4569 TocEntry *te;
4570
4571 pg_log_debug("entering restore_toc_entries_postfork");
4572
4573 /*
4574 * Now reconnect the single parent connection.
4575 */
4576 ConnectDatabaseAhx((Archive *) AH, &ropt->cparams, true);
4577
4578 /* re-establish fixed state */
4580
4581 /*
4582 * Make sure there is no work left due to, say, circular dependencies, or
4583 * some other pathological condition. If so, do it in the single parent
4584 * connection. We don't sweat about RestorePass ordering; it's likely we
4585 * already violated that.
4586 */
4587 for (te = pending_list->pending_next; te != pending_list; te = te->pending_next)
4588 {
4589 pg_log_info("processing missed item %d %s %s",
4590 te->dumpId, te->desc, te->tag);
4591 (void) restore_toc_entry(AH, te, false);
4592 }
4593}
4594
4595/*
4596 * Check if te1 has an exclusive lock requirement for an item that te2 also
4597 * requires, whether or not te2's requirement is for an exclusive lock.
4598 */
4599static bool
4601{
4602 int j,
4603 k;
4604
4605 for (j = 0; j < te1->nLockDeps; j++)
4606 {
4607 for (k = 0; k < te2->nDeps; k++)
4608 {
4609 if (te1->lockDeps[j] == te2->dependencies[k])
4610 return true;
4611 }
4612 }
4613 return false;
4614}
4615
4616
4617/*
4618 * Initialize the header of the pending-items list.
4619 *
4620 * This is a circular list with a dummy TocEntry as header, just like the
4621 * main TOC list; but we use separate list links so that an entry can be in
4622 * the main TOC list as well as in the pending list.
4623 */
4624static void
4626{
4627 l->pending_prev = l->pending_next = l;
4628}
4629
4630/* Append te to the end of the pending-list headed by l */
4631static void
4633{
4634 te->pending_prev = l->pending_prev;
4635 l->pending_prev->pending_next = te;
4636 l->pending_prev = te;
4637 te->pending_next = l;
4638}
4639
4640/* Remove te from the pending-list */
4641static void
4643{
4646 te->pending_prev = NULL;
4647 te->pending_next = NULL;
4648}
4649
4650
4651/* qsort comparator for sorting TocEntries by dataLength */
4652static int
4653TocEntrySizeCompareQsort(const void *p1, const void *p2)
4654{
4655 const TocEntry *te1 = *(const TocEntry *const *) p1;
4656 const TocEntry *te2 = *(const TocEntry *const *) p2;
4657
4658 /* Sort by decreasing dataLength */
4659 if (te1->dataLength > te2->dataLength)
4660 return -1;
4661 if (te1->dataLength < te2->dataLength)
4662 return 1;
4663
4664 /* For equal dataLengths, sort by dumpId, just to be stable */
4665 if (te1->dumpId < te2->dumpId)
4666 return -1;
4667 if (te1->dumpId > te2->dumpId)
4668 return 1;
4669
4670 return 0;
4671}
4672
4673/* binaryheap comparator for sorting TocEntries by dataLength */
4674static int
4675TocEntrySizeCompareBinaryheap(void *p1, void *p2, void *arg)
4676{
4677 /* return opposite of qsort comparator for max-heap */
4678 return -TocEntrySizeCompareQsort(&p1, &p2);
4679}
4680
4681
4682/*
4683 * Move all immediately-ready items from pending_list to ready_heap.
4684 *
4685 * Items are considered ready if they have no remaining dependencies and
4686 * they belong in the current restore pass. (See also reduce_dependencies,
4687 * which applies the same logic one-at-a-time.)
4688 */
4689static void
4691 binaryheap *ready_heap,
4692 RestorePass pass)
4693{
4694 TocEntry *te;
4695 TocEntry *next_te;
4696
4697 for (te = pending_list->pending_next; te != pending_list; te = next_te)
4698 {
4699 /* must save list link before possibly removing te from list */
4700 next_te = te->pending_next;
4701
4702 if (te->depCount == 0 &&
4703 _tocEntryRestorePass(te) == pass)
4704 {
4705 /* Remove it from pending_list ... */
4707 /* ... and add to ready_heap */
4708 binaryheap_add(ready_heap, te);
4709 }
4710 }
4711}
4712
4713/*
4714 * Find the next work item (if any) that is capable of being run now,
4715 * and remove it from the ready_heap.
4716 *
4717 * Returns the item, or NULL if nothing is runnable.
4718 *
4719 * To qualify, the item must have no remaining dependencies
4720 * and no requirements for locks that are incompatible with
4721 * items currently running. Items in the ready_heap are known to have
4722 * no remaining dependencies, but we have to check for lock conflicts.
4723 */
4724static TocEntry *
4726 ParallelState *pstate)
4727{
4728 /*
4729 * Search the ready_heap until we find a suitable item. Note that we do a
4730 * sequential scan through the heap nodes, so even though we will first
4731 * try to choose the highest-priority item, we might end up picking
4732 * something with a much lower priority. However, we expect that we will
4733 * typically be able to pick one of the first few items, which should
4734 * usually have a relatively high priority.
4735 */
4736 for (int i = 0; i < binaryheap_size(ready_heap); i++)
4737 {
4738 TocEntry *te = (TocEntry *) binaryheap_get_node(ready_heap, i);
4739 bool conflicts = false;
4740
4741 /*
4742 * Check to see if the item would need exclusive lock on something
4743 * that a currently running item also needs lock on, or vice versa. If
4744 * so, we don't want to schedule them together.
4745 */
4746 for (int k = 0; k < pstate->numWorkers; k++)
4747 {
4748 TocEntry *running_te = pstate->te[k];
4749
4750 if (running_te == NULL)
4751 continue;
4752 if (has_lock_conflicts(te, running_te) ||
4753 has_lock_conflicts(running_te, te))
4754 {
4755 conflicts = true;
4756 break;
4757 }
4758 }
4759
4760 if (conflicts)
4761 continue;
4762
4763 /* passed all tests, so this item can run */
4764 binaryheap_remove_node(ready_heap, i);
4765 return te;
4766 }
4767
4768 pg_log_debug("no item ready");
4769 return NULL;
4770}
4771
4772
4773/*
4774 * Restore a single TOC item in parallel with others
4775 *
4776 * this is run in the worker, i.e. in a thread (Windows) or a separate process
4777 * (everything else). A worker process executes several such work items during
4778 * a parallel backup or restore. Once we terminate here and report back that
4779 * our work is finished, the leader process will assign us a new work item.
4780 */
4781int
4783{
4784 int status;
4785
4786 Assert(AH->connection != NULL);
4787
4788 /* Count only errors associated with this TOC entry */
4789 AH->public.n_errors = 0;
4790
4791 /* Restore the TOC item */
4792 status = restore_toc_entry(AH, te, true);
4793
4794 return status;
4795}
4796
4797
4798/*
4799 * Callback function that's invoked in the leader process after a step has
4800 * been parallel restored.
4801 *
4802 * Update status and reduce the dependency count of any dependent items.
4803 */
4804static void
4806 TocEntry *te,
4807 int status,
4808 void *callback_data)
4809{
4810 binaryheap *ready_heap = (binaryheap *) callback_data;
4811
4812 pg_log_info("finished item %d %s %s",
4813 te->dumpId, te->desc, te->tag);
4814
4815 if (status == WORKER_CREATE_DONE)
4816 mark_create_done(AH, te);
4817 else if (status == WORKER_INHIBIT_DATA)
4818 {
4820 AH->public.n_errors++;
4821 }
4822 else if (status == WORKER_IGNORED_ERRORS)
4823 AH->public.n_errors++;
4824 else if (status != 0)
4825 pg_fatal("worker process failed: exit code %d",
4826 status);
4827
4828 reduce_dependencies(AH, te, ready_heap);
4829}
4830
4831
4832/*
4833 * Process the dependency information into a form useful for parallel restore.
4834 *
4835 * This function takes care of fixing up some missing or badly designed
4836 * dependencies, and then prepares subsidiary data structures that will be
4837 * used in the main parallel-restore logic, including:
4838 * 1. We build the revDeps[] arrays of incoming dependency dumpIds.
4839 * 2. We set up depCount fields that are the number of as-yet-unprocessed
4840 * dependencies for each TOC entry.
4841 *
4842 * We also identify locking dependencies so that we can avoid trying to
4843 * schedule conflicting items at the same time.
4844 */
4845static void
4847{
4848 TocEntry *te;
4849 int i;
4850
4851 /*
4852 * Initialize the depCount/revDeps/nRevDeps fields, and make sure the TOC
4853 * items are marked as not being in any parallel-processing list.
4854 */
4855 for (te = AH->toc->next; te != AH->toc; te = te->next)
4856 {
4857 te->depCount = te->nDeps;
4858 te->revDeps = NULL;
4859 te->nRevDeps = 0;
4860 te->pending_prev = NULL;
4861 te->pending_next = NULL;
4862 }
4863
4864 /*
4865 * POST_DATA items that are shown as depending on a table need to be
4866 * re-pointed to depend on that table's data, instead. This ensures they
4867 * won't get scheduled until the data has been loaded.
4868 */
4870
4871 /*
4872 * Pre-8.4 versions of pg_dump neglected to set up a dependency from BLOB
4873 * COMMENTS to BLOBS. Cope. (We assume there's only one BLOBS and only
4874 * one BLOB COMMENTS in such files.)
4875 */
4876 if (AH->version < K_VERS_1_11)
4877 {
4878 for (te = AH->toc->next; te != AH->toc; te = te->next)
4879 {
4880 if (strcmp(te->desc, "BLOB COMMENTS") == 0 && te->nDeps == 0)
4881 {
4882 TocEntry *te2;
4883
4884 for (te2 = AH->toc->next; te2 != AH->toc; te2 = te2->next)
4885 {
4886 if (strcmp(te2->desc, "BLOBS") == 0)
4887 {
4888 te->dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
4889 te->dependencies[0] = te2->dumpId;
4890 te->nDeps++;
4891 te->depCount++;
4892 break;
4893 }
4894 }
4895 break;
4896 }
4897 }
4898 }
4899
4900 /*
4901 * At this point we start to build the revDeps reverse-dependency arrays,
4902 * so all changes of dependencies must be complete.
4903 */
4904
4905 /*
4906 * Count the incoming dependencies for each item. Also, it is possible
4907 * that the dependencies list items that are not in the archive at all
4908 * (that should not happen in 9.2 and later, but is highly likely in older
4909 * archives). Subtract such items from the depCounts.
4910 */
4911 for (te = AH->toc->next; te != AH->toc; te = te->next)
4912 {
4913 for (i = 0; i < te->nDeps; i++)
4914 {
4915 DumpId depid = te->dependencies[i];
4916
4917 if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL)
4918 AH->tocsByDumpId[depid]->nRevDeps++;
4919 else
4920 te->depCount--;
4921 }
4922 }
4923
4924 /*
4925 * Allocate space for revDeps[] arrays, and reset nRevDeps so we can use
4926 * it as a counter below.
4927 */
4928 for (te = AH->toc->next; te != AH->toc; te = te->next)
4929 {
4930 if (te->nRevDeps > 0)
4931 te->revDeps = (DumpId *) pg_malloc(te->nRevDeps * sizeof(DumpId));
4932 te->nRevDeps = 0;
4933 }
4934
4935 /*
4936 * Build the revDeps[] arrays of incoming-dependency dumpIds. This had
4937 * better agree with the loops above.
4938 */
4939 for (te = AH->toc->next; te != AH->toc; te = te->next)
4940 {
4941 for (i = 0; i < te->nDeps; i++)
4942 {
4943 DumpId depid = te->dependencies[i];
4944
4945 if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL)
4946 {
4947 TocEntry *otherte = AH->tocsByDumpId[depid];
4948
4949 otherte->revDeps[otherte->nRevDeps++] = te->dumpId;
4950 }
4951 }
4952 }
4953
4954 /*
4955 * Lastly, work out the locking dependencies.
4956 */
4957 for (te = AH->toc->next; te != AH->toc; te = te->next)
4958 {
4959 te->lockDeps = NULL;
4960 te->nLockDeps = 0;
4962 }
4963}
4964
4965/*
4966 * Change dependencies on table items to depend on table data items instead,
4967 * but only in POST_DATA items.
4968 *
4969 * Also, for any item having such dependency(s), set its dataLength to the
4970 * largest dataLength of the table data items it depends on. This ensures
4971 * that parallel restore will prioritize larger jobs (index builds, FK
4972 * constraint checks, etc) over smaller ones, avoiding situations where we
4973 * end a restore with only one active job working on a large table.
4974 */
4975static void
4977{
4978 TocEntry *te;
4979 int i;
4980 DumpId olddep;
4981
4982 for (te = AH->toc->next; te != AH->toc; te = te->next)
4983 {
4984 if (te->section != SECTION_POST_DATA)
4985 continue;
4986 for (i = 0; i < te->nDeps; i++)
4987 {
4988 olddep = te->dependencies[i];
4989 if (olddep <= AH->maxDumpId &&
4990 AH->tableDataId[olddep] != 0)
4991 {
4992 DumpId tabledataid = AH->tableDataId[olddep];
4993 TocEntry *tabledatate = AH->tocsByDumpId[tabledataid];
4994
4995 te->dependencies[i] = tabledataid;
4996 te->dataLength = Max(te->dataLength, tabledatate->dataLength);
4997 pg_log_debug("transferring dependency %d -> %d to %d",
4998 te->dumpId, olddep, tabledataid);
4999 }
5000 }
5001 }
5002}
5003
5004/*
5005 * Identify which objects we'll need exclusive lock on in order to restore
5006 * the given TOC entry (*other* than the one identified by the TOC entry
5007 * itself). Record their dump IDs in the entry's lockDeps[] array.
5008 */
5009static void
5011{
5012 DumpId *lockids;
5013 int nlockids;
5014 int i;
5015
5016 /*
5017 * We only care about this for POST_DATA items. PRE_DATA items are not
5018 * run in parallel, and DATA items are all independent by assumption.
5019 */
5020 if (te->section != SECTION_POST_DATA)
5021 return;
5022
5023 /* Quick exit if no dependencies at all */
5024 if (te->nDeps == 0)
5025 return;
5026
5027 /*
5028 * Most POST_DATA items are ALTER TABLEs or some moral equivalent of that,
5029 * and hence require exclusive lock. However, we know that CREATE INDEX
5030 * does not. (Maybe someday index-creating CONSTRAINTs will fall in that
5031 * category too ... but today is not that day.)
5032 */
5033 if (strcmp(te->desc, "INDEX") == 0)
5034 return;
5035
5036 /*
5037 * We assume the entry requires exclusive lock on each TABLE or TABLE DATA
5038 * item listed among its dependencies. Originally all of these would have
5039 * been TABLE items, but repoint_table_dependencies would have repointed
5040 * them to the TABLE DATA items if those are present (which they might not
5041 * be, eg in a schema-only dump). Note that all of the entries we are
5042 * processing here are POST_DATA; otherwise there might be a significant
5043 * difference between a dependency on a table and a dependency on its
5044 * data, so that closer analysis would be needed here.
5045 */
5046 lockids = (DumpId *) pg_malloc(te->nDeps * sizeof(DumpId));
5047 nlockids = 0;
5048 for (i = 0; i < te->nDeps; i++)
5049 {
5050 DumpId depid = te->dependencies[i];
5051
5052 if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL &&
5053 ((strcmp(AH->tocsByDumpId[depid]->desc, "TABLE DATA") == 0) ||
5054 strcmp(AH->tocsByDumpId[depid]->desc, "TABLE") == 0))
5055 lockids[nlockids++] = depid;
5056 }
5057
5058 if (nlockids == 0)
5059 {
5060 free(lockids);
5061 return;
5062 }
5063
5064 te->lockDeps = pg_realloc(lockids, nlockids * sizeof(DumpId));
5065 te->nLockDeps = nlockids;
5066}
5067
5068/*
5069 * Remove the specified TOC entry from the depCounts of items that depend on
5070 * it, thereby possibly making them ready-to-run. Any pending item that
5071 * becomes ready should be moved to the ready_heap, if that's provided.
5072 */
5073static void
5075 binaryheap *ready_heap)
5076{
5077 int i;
5078
5079 pg_log_debug("reducing dependencies for %d", te->dumpId);
5080
5081 for (i = 0; i < te->nRevDeps; i++)
5082 {
5083 TocEntry *otherte = AH->tocsByDumpId[te->revDeps[i]];
5084
5085 Assert(otherte->depCount > 0);
5086 otherte->depCount--;
5087
5088 /*
5089 * It's ready if it has no remaining dependencies, and it belongs in
5090 * the current restore pass, and it is currently a member of the
5091 * pending list (that check is needed to prevent double restore in
5092 * some cases where a list-file forces out-of-order restoring).
5093 * However, if ready_heap == NULL then caller doesn't want any list
5094 * memberships changed.
5095 */
5096 if (otherte->depCount == 0 &&
5097 _tocEntryRestorePass(otherte) == AH->restorePass &&
5098 otherte->pending_prev != NULL &&
5099 ready_heap != NULL)
5100 {
5101 /* Remove it from pending list ... */
5102 pending_list_remove(otherte);
5103 /* ... and add to ready_heap */
5104 binaryheap_add(ready_heap, otherte);
5105 }
5106 }
5107}
5108
5109/*
5110 * Set the created flag on the DATA member corresponding to the given
5111 * TABLE member
5112 */
5113static void
5115{
5116 if (AH->tableDataId[te->dumpId] != 0)
5117 {
5118 TocEntry *ted = AH->tocsByDumpId[AH->tableDataId[te->dumpId]];
5119
5120 ted->created = true;
5121 }
5122}
5123
5124/*
5125 * Mark the DATA member corresponding to the given TABLE member
5126 * as not wanted
5127 */
5128static void
5130{
5131 pg_log_info("table \"%s\" could not be created, will not restore its data",
5132 te->tag);
5133
5134 if (AH->tableDataId[te->dumpId] != 0)
5135 {
5136 TocEntry *ted = AH->tocsByDumpId[AH->tableDataId[te->dumpId]];
5137
5138 ted->reqs = 0;
5139 }
5140}
5141
5142/*
5143 * Clone and de-clone routines used in parallel restoration.
5144 *
5145 * Enough of the structure is cloned to ensure that there is no
5146 * conflict between different threads each with their own clone.
5147 */
5150{
5151 ArchiveHandle *clone;
5152
5153 /* Make a "flat" copy */
5154 clone = (ArchiveHandle *) pg_malloc(sizeof(ArchiveHandle));
5155 memcpy(clone, AH, sizeof(ArchiveHandle));
5156
5157 /* Likewise flat-copy the RestoreOptions, so we can alter them locally */
5158 clone->public.ropt = (RestoreOptions *) pg_malloc(sizeof(RestoreOptions));
5159 memcpy(clone->public.ropt, AH->public.ropt, sizeof(RestoreOptions));
5160
5161 /* Handle format-independent fields */
5162 memset(&(clone->sqlparse), 0, sizeof(clone->sqlparse));
5163
5164 /* The clone will have its own connection, so disregard connection state */
5165 clone->connection = NULL;
5166 clone->connCancel = NULL;
5167 clone->currUser = NULL;
5168 clone->currSchema = NULL;
5169 clone->currTableAm = NULL;
5170 clone->currTablespace = NULL;
5171
5172 /* savedPassword must be local in case we change it while connecting */
5173 if (clone->savedPassword)
5174 clone->savedPassword = pg_strdup(clone->savedPassword);
5175
5176 /* clone has its own error count, too */
5177 clone->public.n_errors = 0;
5178
5179 /* clones should not share lo_buf */
5180 clone->lo_buf = NULL;
5181
5182 /*
5183 * Clone connections disregard --transaction-size; they must commit after
5184 * each command so that the results are immediately visible to other
5185 * workers.
5186 */
5187 clone->public.ropt->txn_size = 0;
5188
5189 /*
5190 * Connect our new clone object to the database, using the same connection
5191 * parameters used for the original connection.
5192 */
5193 ConnectDatabaseAhx((Archive *) clone, &clone->public.ropt->cparams, true);
5194
5195 /* re-establish fixed state */
5196 if (AH->mode == archModeRead)
5198 /* in write case, setupDumpWorker will fix up connection state */
5199
5200 /* Let the format-specific code have a chance too */
5201 clone->ClonePtr(clone);
5202
5203 Assert(clone->connection != NULL);
5204 return clone;
5205}
5206
5207/*
5208 * Release clone-local storage.
5209 *
5210 * Note: we assume any clone-local connection was already closed.
5211 */
5212void
5214{
5215 /* Should not have an open database connection */
5216 Assert(AH->connection == NULL);
5217
5218 /* Clear format-specific state */
5219 AH->DeClonePtr(AH);
5220
5221 /* Clear state allocated by CloneArchive */
5222 if (AH->sqlparse.curCmd)
5224
5225 /* Clear any connection-local state */
5226 free(AH->currUser);
5227 free(AH->currSchema);
5228 free(AH->currTablespace);
5229 free(AH->currTableAm);
5230 free(AH->savedPassword);
5231
5232 free(AH);
5233}
int lo_write(int fd, const char *buf, int len)
Definition: be-fsstubs.c:182
void ParallelBackupEnd(ArchiveHandle *AH, ParallelState *pstate)
Definition: parallel.c:1061
void WaitForWorkers(ArchiveHandle *AH, ParallelState *pstate, WFW_WaitOption mode)
Definition: parallel.c:1453
ParallelState * ParallelBackupStart(ArchiveHandle *AH)
Definition: parallel.c:899
void DispatchJobForTocEntry(ArchiveHandle *AH, ParallelState *pstate, TocEntry *te, T_Action act, ParallelCompletionPtr callback, void *callback_data)
Definition: parallel.c:1207
bool IsEveryWorkerIdle(ParallelState *pstate)
Definition: parallel.c:1270
@ WFW_ALL_IDLE
Definition: parallel.h:35
@ WFW_GOT_STATUS
Definition: parallel.h:33
@ WFW_ONE_IDLE
Definition: parallel.h:34
void binaryheap_remove_node(binaryheap *heap, int n)
Definition: binaryheap.c:225
void binaryheap_add(binaryheap *heap, bh_node_type d)
Definition: binaryheap.c:154
void binaryheap_free(binaryheap *heap)
Definition: binaryheap.c:75
binaryheap * binaryheap_allocate(int capacity, binaryheap_comparator compare, void *arg)
Definition: binaryheap.c:39
#define binaryheap_size(h)
Definition: binaryheap.h:66
#define binaryheap_empty(h)
Definition: binaryheap.h:65
#define binaryheap_get_node(h, n)
Definition: binaryheap.h:67
#define PG_BINARY_R
Definition: c.h:1275
#define ngettext(s, p, n)
Definition: c.h:1181
#define PG_BINARY_A
Definition: c.h:1274
#define Max(x, y)
Definition: c.h:998
#define PG_BINARY_W
Definition: c.h:1276
bool EndCompressFileHandle(CompressFileHandle *CFH)
Definition: compress_io.c:289
char * supports_compression(const pg_compress_specification compression_spec)
Definition: compress_io.c:87
CompressFileHandle * InitCompressFileHandle(const pg_compress_specification compression_spec)
Definition: compress_io.c:194
const char * get_compress_algorithm_name(pg_compress_algorithm algorithm)
Definition: compression.c:69
@ PG_COMPRESSION_GZIP
Definition: compression.h:24
@ PG_COMPRESSION_NONE
Definition: compression.h:23
char * sanitize_line(const char *str, bool want_hyphen)
Definition: dumputils.c:52
#define PGDUMP_STRFTIME_FMT
Definition: dumputils.h:33
int errmsg(const char *fmt,...)
Definition: elog.c:1071
char * PQdb(const PGconn *conn)
Definition: fe-connect.c:7513
char * PQerrorMessage(const PGconn *conn)
Definition: fe-connect.c:7679
PGresult * PQexec(PGconn *conn, const char *query)
Definition: fe-exec.c:2273
int lo_close(PGconn *conn, int fd)
Definition: fe-lobj.c:96
int lo_open(PGconn *conn, Oid lobjId, int mode)
Definition: fe-lobj.c:57
Oid lo_create(PGconn *conn, Oid lobjId)
Definition: fe-lobj.c:474
void * pg_malloc(size_t size)
Definition: fe_memutils.c:47
char * pg_strdup(const char *in)
Definition: fe_memutils.c:85
void * pg_malloc0(size_t size)
Definition: fe_memutils.c:53
void pg_free(void *ptr)
Definition: fe_memutils.c:105
void * pg_realloc(void *ptr, size_t size)
Definition: fe_memutils.c:65
DataDirSyncMethod
Definition: file_utils.h:28
@ DATA_DIR_SYNC_METHOD_FSYNC
Definition: file_utils.h:29
Assert(PointerIsAligned(start, uint64))
#define free(a)
Definition: header.h:65
int remaining
Definition: informix.c:692
char sign
Definition: informix.c:693
static DataDirSyncMethod sync_method
Definition: initdb.c:170
int b
Definition: isn.c:74
return true
Definition: isn.c:130
int j
Definition: isn.c:78
int i
Definition: isn.c:77
#define PQclear
Definition: libpq-be-fe.h:245
#define PQresultStatus
Definition: libpq-be-fe.h:247
@ PGRES_COMMAND_OK
Definition: libpq-fe.h:125
#define INV_WRITE
Definition: libpq-fs.h:21
static struct pg_tm tm
Definition: localtime.c:104
void pg_log_generic_v(enum pg_log_level level, enum pg_log_part part, const char *pg_restrict fmt, va_list ap)
Definition: logging.c:219
#define pg_log_info(...)
Definition: logging.h:124
@ PG_LOG_PRIMARY
Definition: logging.h:67
@ PG_LOG_ERROR
Definition: logging.h:43
#define pg_log_debug(...)
Definition: logging.h:133
static AmcheckOptions opts
Definition: pg_amcheck.c:112
@ SECTION_NONE
Definition: pg_backup.h:57
@ SECTION_POST_DATA
Definition: pg_backup.h:60
@ SECTION_PRE_DATA
Definition: pg_backup.h:58
@ SECTION_DATA
Definition: pg_backup.h:59
int DumpId
Definition: pg_backup.h:284
void(* SetupWorkerPtrType)(Archive *AH)
Definition: pg_backup.h:291
enum _archiveFormat ArchiveFormat
void ConnectDatabaseAhx(Archive *AHX, const ConnParams *cparams, bool isReconnect)
Definition: pg_backup_db.c:109
@ archModeWrite
Definition: pg_backup.h:51
@ archModeAppend
Definition: pg_backup.h:50
@ archModeRead
Definition: pg_backup.h:52
void DisconnectDatabase(Archive *AHX)
Definition: pg_backup_db.c:164
enum _teSection teSection
@ archUnknown
Definition: pg_backup.h:41
@ archTar
Definition: pg_backup.h:43
@ archCustom
Definition: pg_backup.h:42
@ archDirectory
Definition: pg_backup.h:45
@ archNull
Definition: pg_backup.h:44
static void fix_dependencies(ArchiveHandle *AH)
static void repoint_table_dependencies(ArchiveHandle *AH)
void DeCloneArchive(ArchiveHandle *AH)
static int _discoverArchiveFormat(ArchiveHandle *AH)
#define TEXT_DUMPALL_HEADER
int TocIDRequired(ArchiveHandle *AH, DumpId id)
bool checkSeek(FILE *fp)
void ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH)
void warn_or_exit_horribly(ArchiveHandle *AH, const char *fmt,...)
void WriteDataChunksForTocEntry(ArchiveHandle *AH, TocEntry *te)
static void _becomeOwner(ArchiveHandle *AH, TocEntry *te)
void WriteHead(ArchiveHandle *AH)
int EndLO(Archive *AHX, Oid oid)
static CompressFileHandle * SaveOutput(ArchiveHandle *AH)
#define TOC_PREFIX_DATA
static void _becomeUser(ArchiveHandle *AH, const char *user)
static void pending_list_append(TocEntry *l, TocEntry *te)
size_t WriteInt(ArchiveHandle *AH, int i)
void ProcessArchiveRestoreOptions(Archive *AHX)
static int restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel)
static void _moveBefore(TocEntry *pos, TocEntry *te)
RestoreOptions * NewRestoreOptions(void)
static bool _tocEntryIsACL(TocEntry *te)
static void move_to_ready_heap(TocEntry *pending_list, binaryheap *ready_heap, RestorePass pass)
char * ReadStr(ArchiveHandle *AH)
static void _getObjectDescription(PQExpBuffer buf, const TocEntry *te)
static void buildTocEntryArrays(ArchiveHandle *AH)
static void identify_locking_dependencies(ArchiveHandle *AH, TocEntry *te)
static void processEncodingEntry(ArchiveHandle *AH, TocEntry *te)
static void processSearchPathEntry(ArchiveHandle *AH, TocEntry *te)
int archprintf(Archive *AH, const char *fmt,...)
static void StrictNamesCheck(RestoreOptions *ropt)
static void mark_restore_job_done(ArchiveHandle *AH, TocEntry *te, int status, void *callback_data)
size_t WriteOffset(ArchiveHandle *AH, pgoff_t o, int wasSet)
static RestorePass _tocEntryRestorePass(TocEntry *te)
TocEntry * ArchiveEntry(Archive *AHX, CatalogId catalogId, DumpId dumpId, ArchiveOpts *opts)
int StartLO(Archive *AHX, Oid oid)
static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, const char *pfx)
#define TOC_PREFIX_STATS
ArchiveHandle * CloneArchive(ArchiveHandle *AH)
static void setupRestoreWorker(Archive *AHX)
static void _reconnectToDB(ArchiveHandle *AH, const char *dbname)
static int TocEntrySizeCompareQsort(const void *p1, const void *p2)
Archive * OpenArchive(const char *FileSpec, const ArchiveFormat fmt)
void StartRestoreLOs(ArchiveHandle *AH)
void CloseArchive(Archive *AHX)
static void pending_list_header_init(TocEntry *l)
static void restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate, TocEntry *pending_list)
static void SetOutput(ArchiveHandle *AH, const char *filename, const pg_compress_specification compression_spec)
TocEntry * getTocEntryByDumpId(ArchiveHandle *AH, DumpId id)
static void mark_create_done(ArchiveHandle *AH, TocEntry *te)
#define TEXT_DUMP_HEADER
static void _selectTableAccessMethod(ArchiveHandle *AH, const char *tableam)
void WriteDataChunks(ArchiveHandle *AH, ParallelState *pstate)
static void dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim)
int ahprintf(ArchiveHandle *AH, const char *fmt,...)
static void _selectOutputSchema(ArchiveHandle *AH, const char *schemaName)
static void mark_dump_job_done(ArchiveHandle *AH, TocEntry *te, int status, void *callback_data)
static bool is_load_via_partition_root(TocEntry *te)
Archive * CreateArchive(const char *FileSpec, const ArchiveFormat fmt, const pg_compress_specification compression_spec, bool dosync, ArchiveMode mode, SetupWorkerPtrType setupDumpWorker, DataDirSyncMethod sync_method)
static void _enableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te)
DumpOptions * NewDumpOptions(void)
void SortTocFromFile(Archive *AHX)
int ReadOffset(ArchiveHandle *AH, pgoff_t *o)
static void inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te)
int ReadInt(ArchiveHandle *AH)
static void restore_toc_entries_prefork(ArchiveHandle *AH, TocEntry *pending_list)
static void _printTableAccessMethodNoStorage(ArchiveHandle *AH, TocEntry *te)
static void restore_toc_entries_postfork(ArchiveHandle *AH, TocEntry *pending_list)
#define TOC_PREFIX_NONE
static void RestoreOutput(ArchiveHandle *AH, CompressFileHandle *savedOutput)
static void _doSetFixedOutputState(ArchiveHandle *AH)
void PrintTOCSummary(Archive *AHX)
static void processStdStringsEntry(ArchiveHandle *AH, TocEntry *te)
static int TocEntrySizeCompareBinaryheap(void *p1, void *p2, void *arg)
static int RestoringToDB(ArchiveHandle *AH)
void ReadHead(ArchiveHandle *AH)
void SetArchiveOptions(Archive *AH, DumpOptions *dopt, RestoreOptions *ropt)
static void pending_list_remove(TocEntry *te)
static bool has_lock_conflicts(TocEntry *te1, TocEntry *te2)
void ReadToc(ArchiveHandle *AH)
static void reduce_dependencies(ArchiveHandle *AH, TocEntry *te, binaryheap *ready_heap)
void EndRestoreLO(ArchiveHandle *AH, Oid oid)
static void _selectTablespace(ArchiveHandle *AH, const char *tablespace)
void RestoreArchive(Archive *AHX)
void WriteToc(ArchiveHandle *AH)
void archputs(const char *s, Archive *AH)
static bool _fileExistsInDirectory(const char *dir, const char *filename)
DumpOptions * dumpOptionsFromRestoreOptions(RestoreOptions *ropt)
static int _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH)
static void _doSetSessionAuth(ArchiveHandle *AH, const char *user)
void EndRestoreLOs(ArchiveHandle *AH)
void StartRestoreLO(ArchiveHandle *AH, Oid oid, bool drop)
void InitDumpOptions(DumpOptions *opts)
static ArchiveHandle * _allocAH(const char *FileSpec, const ArchiveFormat fmt, const pg_compress_specification compression_spec, bool dosync, ArchiveMode mode, SetupWorkerPtrType setupWorkerPtr, DataDirSyncMethod sync_method)
static void dump_lo_buf(ArchiveHandle *AH)
static TocEntry * pop_next_work_item(binaryheap *ready_heap, ParallelState *pstate)
void WriteData(Archive *AHX, const void *data, size_t dLen)
int parallel_restore(ArchiveHandle *AH, TocEntry *te)
size_t WriteStr(ArchiveHandle *AH, const char *c)
static void _disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te)
#define K_VERS_1_15
void InitArchiveFmt_Null(ArchiveHandle *AH)
#define WORKER_CREATE_DONE
#define LOBBUFSIZE
void IssueACLPerBlob(ArchiveHandle *AH, TocEntry *te)
Definition: pg_backup_db.c:538
#define K_VERS_1_14
#define appendByteaLiteralAHX(buf, str, len, AH)
void struct _archiveOpts ArchiveOpts
void(* EndDataPtrType)(ArchiveHandle *AH, TocEntry *te)
#define K_VERS_SELF
#define K_VERS_1_10
void(* StartDataPtrType)(ArchiveHandle *AH, TocEntry *te)
#define ARCHIVE_MAJOR(version)
#define ARCHIVE_MINOR(version)
#define K_VERS_1_2
#define RESTORE_PASS_LAST
void InitArchiveFmt_Custom(ArchiveHandle *AH)
#define K_OFFSET_NO_DATA
void InitArchiveFmt_Tar(ArchiveHandle *AH)
#define REQ_SCHEMA
#define K_VERS_1_4
#define appendStringLiteralAHX(buf, str, AH)
void DropLOIfExists(ArchiveHandle *AH, Oid oid)
Definition: pg_backup_db.c:612
#define MAKE_ARCHIVE_VERSION(major, minor, rev)
#define K_VERS_1_5
void ReconnectToServer(ArchiveHandle *AH, const char *dbname)
Definition: pg_backup_db.c:73
#define ARCHIVE_REV(version)
#define REQ_STATS
#define WRITE_ERROR_EXIT
bool isValidTarHeader(char *header)
#define WORKER_IGNORED_ERRORS
#define REQ_SPECIAL
void IssueCommandPerBlob(ArchiveHandle *AH, TocEntry *te, const char *cmdBegin, const char *cmdEnd)
Definition: pg_backup_db.c:491
#define K_VERS_1_6
@ STAGE_INITIALIZING
@ STAGE_PROCESSING
@ STAGE_NONE
@ STAGE_FINALIZING
@ ACT_RESTORE
@ ACT_DUMP
#define K_VERS_1_0
#define K_OFFSET_POS_NOT_SET
#define K_OFFSET_POS_SET
#define K_VERS_MAX
#define K_VERS_1_8
#define WORKER_OK
@ OUTPUT_COPYDATA
@ OUTPUT_SQLCMDS
@ OUTPUT_OTHERDATA
#define K_VERS_1_12
#define REQ_DATA
#define READ_ERROR_EXIT(fd)
void InitArchiveFmt_Directory(ArchiveHandle *AH)
#define K_VERS_1_11
#define K_VERS_1_9
#define WORKER_INHIBIT_DATA
#define K_VERS_1_16
@ RESTORE_PASS_POST_ACL
@ RESTORE_PASS_ACL
@ RESTORE_PASS_MAIN
#define K_VERS_1_3
#define K_VERS_1_7
void EndDBCopyMode(Archive *AHX, const char *tocEntryTag)
Definition: pg_backup_db.c:439
int ExecuteSqlCommandBuf(Archive *AHX, const char *buf, size_t bufLen)
Definition: pg_backup_db.c:384
void exit_nicely(int code)
void * arg
#define DUMP_PRE_DATA
#define DUMP_DATA
#define DUMP_UNSECTIONED
#define pg_fatal(...)
#define DUMP_POST_DATA
static PgChecksumMode mode
Definition: pg_checksums.c:55
#define MAXPGPATH
const void size_t len
const void * data
static int sig
Definition: pg_ctl.c:80
int32 encoding
Definition: pg_database.h:41
static bool dosync
Definition: pg_dump.c:150
static void setupDumpWorker(Archive *AH)
Definition: pg_dump.c:1582
static char * filename
Definition: pg_dumpall.c:120
bool pg_get_line_buf(FILE *stream, StringInfo buf)
Definition: pg_get_line.c:95
static char * user
Definition: pg_regress.c:119
static char * buf
Definition: pg_test_fsync.c:72
#define pg_encoding_to_char
Definition: pg_wchar.h:630
#define pg_char_to_encoding
Definition: pg_wchar.h:629
static char * tablespace
Definition: pgbench.c:217
#define pg_log_warning(...)
Definition: pgfnames.c:24
#define sprintf
Definition: port.h:241
#define snprintf
Definition: port.h:239
#define qsort(a, b, c, d)
Definition: port.h:479
#define pgoff_t
Definition: port.h:401
#define InvalidOid
Definition: postgres_ext.h:37
unsigned int Oid
Definition: postgres_ext.h:32
PQExpBuffer createPQExpBuffer(void)
Definition: pqexpbuffer.c:72
void initPQExpBuffer(PQExpBuffer str)
Definition: pqexpbuffer.c:90
void appendPQExpBuffer(PQExpBuffer str, const char *fmt,...)
Definition: pqexpbuffer.c:265
void destroyPQExpBuffer(PQExpBuffer str)
Definition: pqexpbuffer.c:114
void appendPQExpBufferChar(PQExpBuffer str, char ch)
Definition: pqexpbuffer.c:378
void appendPQExpBufferStr(PQExpBuffer str, const char *data)
Definition: pqexpbuffer.c:367
void termPQExpBuffer(PQExpBuffer str)
Definition: pqexpbuffer.c:129
char * c
size_t pvsnprintf(char *buf, size_t len, const char *fmt, va_list args)
Definition: psprintf.c:103
char * psprintf(const char *fmt,...)
Definition: psprintf.c:43
const char * simple_string_list_not_touched(SimpleStringList *list)
Definition: simple_list.c:144
bool simple_string_list_member(SimpleStringList *list, const char *val)
Definition: simple_list.c:87
char * dbname
Definition: streamutil.c:49
const char * fmtQualifiedId(const char *schema, const char *id)
Definition: string_utils.c:296
const char * fmtId(const char *rawid)
Definition: string_utils.c:248
void setFmtEncoding(int encoding)
Definition: string_utils.c:69
void appendPsqlMetaConnect(PQExpBuffer buf, const char *dbname)
Definition: string_utils.c:743
void initStringInfo(StringInfo str)
Definition: stringinfo.c:97
int minRemoteVersion
Definition: pg_backup.h:236
int remoteVersion
Definition: pg_backup.h:233
char * remoteVersionStr
Definition: pg_backup.h:232
DumpOptions * dopt
Definition: pg_backup.h:228
bool exit_on_error
Definition: pg_backup.h:251
char * searchpath
Definition: pg_backup.h:247
int maxRemoteVersion
Definition: pg_backup.h:237
int n_errors
Definition: pg_backup.h:252
bool std_strings
Definition: pg_backup.h:244
int numWorkers
Definition: pg_backup.h:239
int encoding
Definition: pg_backup.h:243
int verbose
Definition: pg_backup.h:231
RestoreOptions * ropt
Definition: pg_backup.h:229
Oid tableoid
Definition: pg_backup.h:280
bool(* open_func)(const char *path, int fd, const char *mode, CompressFileHandle *CFH)
Definition: compress_io.h:111
void(* write_func)(const void *ptr, size_t size, CompressFileHandle *CFH)
Definition: compress_io.h:140
TocEntry ** te
Definition: parallel.h:59
int numWorkers
Definition: parallel.h:57
SimpleStringListCell * head
Definition: simple_list.h:42
ArchiverStage stage
RestorePass restorePass
ArchiveFormat format
struct _tocEntry * toc
DeClonePtrType DeClonePtr
EndLOsPtrType EndLOsPtr
DataDirSyncMethod sync_method
struct _tocEntry * lastErrorTE
ReadExtraTocPtrType ReadExtraTocPtr
struct _tocEntry * currentTE
CustomOutPtrType CustomOutPtr
PGcancel *volatile connCancel
StartLOsPtrType StartLOsPtr
ArchiveEntryPtrType ArchiveEntryPtr
pg_compress_specification compression_spec
WriteDataPtrType WriteDataPtr
StartLOPtrType StartLOPtr
struct _tocEntry ** tocsByDumpId
ClonePtrType ClonePtr
WriteBufPtrType WriteBufPtr
PrepParallelRestorePtrType PrepParallelRestorePtr
EndLOPtrType EndLOPtr
WriteExtraTocPtrType WriteExtraTocPtr
ReadBytePtrType ReadBytePtr
PrintTocDataPtrType PrintTocDataPtr
struct _tocEntry * currToc
WriteBytePtrType WriteBytePtr
sqlparseInfo sqlparse
ReadBufPtrType ReadBufPtr
PrintExtraTocPtrType PrintExtraTocPtr
ArchiverStage lastErrorStage
StartDataPtrType StartDataPtr
ReopenPtrType ReopenPtr
ArchiverOutput outputKind
EndDataPtrType EndDataPtr
SetupWorkerPtrType SetupWorkerPtr
ClosePtrType ClosePtr
char * pgport
Definition: pg_backup.h:87
char * pghost
Definition: pg_backup.h:88
trivalue promptPassword
Definition: pg_backup.h:90
char * username
Definition: pg_backup.h:89
char * dbname
Definition: pg_backup.h:86
int dump_inserts
Definition: pg_backup.h:180
char * restrict_key
Definition: pg_backup.h:219
int column_inserts
Definition: pg_backup.h:184
int use_setsessauth
Definition: pg_backup.h:197
int outputCreateDB
Definition: pg_backup.h:205
bool include_everything
Definition: pg_backup.h:202
int sequence_data
Definition: pg_backup.h:211
int disable_dollar_quoting
Definition: pg_backup.h:183
bool dumpSchema
Definition: pg_backup.h:215
int no_comments
Definition: pg_backup.h:186
int outputNoTableAm
Definition: pg_backup.h:195
int enable_row_security
Definition: pg_backup.h:198
char * outputSuperuser
Definition: pg_backup.h:209
int dumpSections
Definition: pg_backup.h:177
int no_security_labels
Definition: pg_backup.h:189
bool dumpData
Definition: pg_backup.h:216
bool dumpStatistics
Definition: pg_backup.h:217
int no_publications
Definition: pg_backup.h:188
ConnParams cparams
Definition: pg_backup.h:172
const char * lockWaitTimeout
Definition: pg_backup.h:179
int no_subscriptions
Definition: pg_backup.h:190
bool aclsSkip
Definition: pg_backup.h:178
int outputClean
Definition: pg_backup.h:204
int no_policies
Definition: pg_backup.h:187
int outputNoTablespaces
Definition: pg_backup.h:196
int disable_triggers
Definition: pg_backup.h:194
int outputNoOwner
Definition: pg_backup.h:208
SimpleStringList schemaExcludeNames
Definition: pg_backup.h:140
int include_everything
Definition: pg_backup.h:125
bool * idWanted
Definition: pg_backup.h:157
int suppressDumpWarnings
Definition: pg_backup.h:151
ConnParams cparams
Definition: pg_backup.h:145
SimpleStringList functionNames
Definition: pg_backup.h:138
char * use_role
Definition: pg_backup.h:107
SimpleStringList tableNames
Definition: pg_backup.h:142
SimpleStringList indexNames
Definition: pg_backup.h:137
pg_compress_specification compression_spec
Definition: pg_backup.h:149
int no_subscriptions
Definition: pg_backup.h:117
SimpleStringList triggerNames
Definition: pg_backup.h:141
bool dumpStatistics
Definition: pg_backup.h:165
int disable_dollar_quoting
Definition: pg_backup.h:109
SimpleStringList schemaNames
Definition: pg_backup.h:139
char * restrict_key
Definition: pg_backup.h:167
const char * filename
Definition: pg_backup.h:120
int no_security_labels
Definition: pg_backup.h:116
char * tocFile
Definition: pg_backup.h:128
char * superuser
Definition: pg_backup.h:106
const char * lockWaitTimeout
Definition: pg_backup.h:124
int enable_row_security
Definition: pg_backup.h:158
int disable_triggers
Definition: pg_backup.h:102
int noDataForFailedTables
Definition: pg_backup.h:147
struct _tocEntry * pending_next
struct _tocEntry * prev
teSection section
struct _tocEntry * pending_prev
DefnDumperPtr defnDumper
DataDumperPtr dataDumper
pgoff_t dataLength
CatalogId catalogId
struct _tocEntry * next
const void * dataDumperArg
DumpId * revDeps
const void * defnDumperArg
DumpId * dependencies
DumpId * lockDeps
pg_compress_algorithm algorithm
Definition: compression.h:34
int tm_sec
Definition: pgtime.h:36
PQExpBuffer curCmd
unsigned short st_mode
Definition: win32_port.h:258
static void * fn(void *arg)
Definition: thread-alloc.c:119
@ TRI_DEFAULT
Definition: vacuumlo.c:36
const char * type
#define stat
Definition: win32_port.h:274
#define S_ISDIR(m)
Definition: win32_port.h:315
#define ftello(stream)
Definition: win32_port.h:209
#define S_ISREG(m)
Definition: win32_port.h:318
#define fseeko(stream, offset, origin)
Definition: win32_port.h:206
static void StartTransaction(void)
Definition: xact.c:2076
static void CommitTransaction(void)
Definition: xact.c:2240
ArchiveMode
Definition: xlog.h:64