From c11535220dbcbbb1e7c3b59c63e66002e5cfa629 Mon Sep 17 00:00:00 2001 From: Mahendra Singh Thalor Date: Thu, 17 Jul 2025 18:16:47 +0530 Subject: [PATCH] use appendShellString to append file names create global_drop.dat file for database/role/tablespace note: we should keep only DROP DATABASE but for testing, i kept all 3. --- doc/src/sgml/ref/pg_restore.sgml | 6 +- src/bin/pg_dump/pg_dumpall.c | 94 ++++++++++++++++-------- src/bin/pg_dump/pg_restore.c | 122 +++++++++++++++++++++---------- 3 files changed, 154 insertions(+), 68 deletions(-) diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml index b649bd3a5ae..f4eb31f2324 100644 --- a/doc/src/sgml/ref/pg_restore.sgml +++ b/doc/src/sgml/ref/pg_restore.sgml @@ -150,7 +150,9 @@ PostgreSQL documentation Access privileges for the database itself are also restored, unless is specified. is required when restoring multiple databases - from an archive created by pg_dumpall. + from an archive created by pg_dumpall and if + database is already created, then this will restore database without any + error. @@ -621,6 +623,8 @@ PostgreSQL documentation This option is only relevant when restoring from an archive made using pg_dumpall. + If there is no database connection exist, then pattern will be considered + as name only. diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c index 100317b1aa9..e947255c52c 100644 --- a/src/bin/pg_dump/pg_dumpall.c +++ b/src/bin/pg_dump/pg_dumpall.c @@ -642,6 +642,25 @@ main(int argc, char *argv[]) */ if (output_clean) { + FILE *drop_OPF = NULL; + FILE *old_OPF = OPF; + + if (archDumpFormat != archNull) + { + char global_drop_path[MAXPGPATH]; + + snprintf(global_drop_path, MAXPGPATH, "%s/global_drop.dat", filename); + + drop_OPF = fopen(global_drop_path, PG_BINARY_W); + + if (!drop_OPF) + pg_fatal("could not open file \"%s\": %m", global_drop_path); + + } + + if (drop_OPF) + OPF = drop_OPF; + if (!globals_only && !roles_only && !tablespaces_only) dropDBs(conn); @@ -650,6 +669,12 @@ main(int argc, char *argv[]) if (!tablespaces_only) dropRoles(conn); + + if (drop_OPF) + { + fclose(drop_OPF); + OPF = old_OPF; + } } /* @@ -1622,8 +1647,8 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) { PGresult *res; int i; - char db_subdir[MAXPGPATH]; - char dbfilepath[MAXPGPATH]; + PQExpBufferData db_subdir; + PQExpBufferData dbfilepath; FILE *map_file = NULL; /* @@ -1653,20 +1678,28 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) */ if (archDumpFormat != archNull) { - char map_file_path[MAXPGPATH]; + PQExpBufferData map_file_path; - snprintf(db_subdir, MAXPGPATH, "%s/databases", filename); + initPQExpBuffer(&db_subdir); + initPQExpBuffer(&dbfilepath); + initPQExpBuffer(&map_file_path); + + appendShellString(&db_subdir, filename); + appendPQExpBufferChar(&db_subdir, '/'); + appendPQExpBufferStr(&db_subdir, "databases"); /* Create a subdirectory with 'databases' name under main directory. */ - if (mkdir(db_subdir, pg_dir_create_mode) != 0) - pg_fatal("could not create directory \"%s\": %m", db_subdir); + if (mkdir(db_subdir.data, pg_dir_create_mode) != 0) + pg_fatal("could not create directory \"%s\": %m", db_subdir.data); - snprintf(map_file_path, MAXPGPATH, "%s/map.dat", filename); + appendShellString(&map_file_path, filename); + appendPQExpBufferChar(&map_file_path, '/'); + appendPQExpBufferStr(&map_file_path, "map.dat"); /* Create a map file (to store dboid and dbname) */ - map_file = fopen(map_file_path, PG_BINARY_W); + map_file = fopen(map_file_path.data, PG_BINARY_W); if (!map_file) - pg_fatal("could not open file \"%s\": %m", map_file_path); + pg_fatal("could not open file \"%s\": %m", map_file_path.data); } for (i = 0; i < PQntuples(res); i++) @@ -1693,12 +1726,16 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) */ if (archDumpFormat != archNull) { + resetPQExpBuffer(&dbfilepath); + + appendShellString(&dbfilepath, db_subdir.data); + appendPQExpBufferChar(&dbfilepath, '/'); + appendShellString(&dbfilepath, oid); + if (archDumpFormat == archCustom) - snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\".dmp", db_subdir, oid); + appendPQExpBufferStr(&dbfilepath, ".dmp"); else if (archDumpFormat == archTar) - snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\".tar", db_subdir, oid); - else - snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\"", db_subdir, oid); + appendPQExpBufferStr(&dbfilepath, ".tar"); /* Put one line entry for dboid and dbname in map file. */ fprintf(map_file, "%s %s\n", oid, dbname); @@ -1728,26 +1765,23 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) else create_opts = "--create"; - if (filename) + if (filename && archDumpFormat == archNull) fclose(OPF); - ret = runPgDump(dbname, create_opts, dbfilepath, archDumpFormat); + ret = runPgDump(dbname, create_opts, dbfilepath.data, archDumpFormat); if (ret != 0) pg_fatal("pg_dump failed on database \"%s\", exiting", dbname); - if (filename) + /* + * For non-plain mode, no need to re-open file as only once we write + * data into file. + */ + if (filename && archDumpFormat == archNull) { - char global_path[MAXPGPATH]; - - if (archDumpFormat != archNull) - snprintf(global_path, MAXPGPATH, "%s/global.dat", filename); - else - snprintf(global_path, MAXPGPATH, "%s", filename); - - OPF = fopen(global_path, PG_BINARY_A); + OPF = fopen(filename, PG_BINARY_A); if (!OPF) pg_fatal("could not re-open the output file \"%s\": %m", - global_path); + filename); } } @@ -1774,14 +1808,17 @@ runPgDump(const char *dbname, const char *create_opts, char *dbfile, initPQExpBuffer(&connstrbuf); initPQExpBuffer(&cmd); + printfPQExpBuffer(&cmd, "\"%s\" %s %s ", pg_dump_bin, + pgdumpopts->data, create_opts); + /* * If this is not a plain format dump, then append file name and dump * format to the pg_dump command to get archive dump. */ if (archDumpFormat != archNull) { - printfPQExpBuffer(&cmd, "\"%s\" -f %s %s", pg_dump_bin, - dbfile, create_opts); + appendPQExpBufferStr(&cmd, " -f "); + appendShellString(&cmd, dbfile); if (archDumpFormat == archDirectory) appendPQExpBufferStr(&cmd, " --format=directory "); @@ -1792,9 +1829,6 @@ runPgDump(const char *dbname, const char *create_opts, char *dbfile, } else { - printfPQExpBuffer(&cmd, "\"%s\" %s %s", pg_dump_bin, - pgdumpopts->data, create_opts); - /* * If we have a filename, use the undocumented plain-append pg_dump * format. diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c index 6ef789cb06d..5046c00477d 100644 --- a/src/bin/pg_dump/pg_restore.c +++ b/src/bin/pg_dump/pg_restore.c @@ -64,8 +64,8 @@ static int read_one_statement(StringInfo inBuf, FILE *pfile); static int restore_all_databases(PGconn *conn, const char *dumpdirpath, SimpleStringList db_exclude_patterns, RestoreOptions *opts, int numWorkers); static int process_global_sql_commands(PGconn *conn, const char *dumpdirpath, - const char *outfile); -static void copy_or_print_global_file(const char *outfile, FILE *pfile); + const char *outfile, bool drop_commands); +static void copy_or_print_global_file(const char *outfile, FILE *pfile, FILE *dfile); static int get_dbnames_list_to_restore(PGconn *conn, SimplePtrList *dbname_oid_list, SimpleStringList db_exclude_patterns); @@ -552,7 +552,7 @@ main(int argc, char **argv) * commands. */ n_errors = process_global_sql_commands(conn, inputFileSpec, - opts->filename); + opts->filename, false); if (conn) PQfinish(conn); @@ -1038,7 +1038,7 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi { StringInfoData linebuf; FILE *pfile; - char map_file_path[MAXPGPATH]; + PQExpBufferData map_file_path; int count = 0; @@ -1052,13 +1052,16 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi return 0; } - snprintf(map_file_path, MAXPGPATH, "%s/map.dat", dumpdirpath); + initPQExpBuffer(&map_file_path); + appendShellString(&map_file_path, dumpdirpath); + appendPQExpBufferChar(&map_file_path, '/'); + appendPQExpBufferStr(&map_file_path, "map.dat"); /* Open map.dat file. */ - pfile = fopen(map_file_path, PG_BINARY_R); + pfile = fopen(map_file_path.data, PG_BINARY_R); if (pfile == NULL) - pg_fatal("could not open file \"%s\": %m", map_file_path); + pg_fatal("could not open file \"%s\": %m", map_file_path.data); initStringInfo(&linebuf); @@ -1086,11 +1089,11 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi /* Report error and exit if the file has any corrupted data. */ if (!OidIsValid(db_oid) || namelen <= 1) - pg_fatal("invalid entry in file \"%s\" on line %d", map_file_path, + pg_fatal("invalid entry in file \"%s\" on line %d", map_file_path.data, count + 1); pg_log_info("found database \"%s\" (OID: %u) in file \"%s\"", - dbname, db_oid, map_file_path); + dbname, db_oid, map_file_path.data); dbidname = pg_malloc(offsetof(DbOidName, str) + namelen + 1); dbidname->oid = db_oid; @@ -1140,7 +1143,7 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath, /* If map.dat has no entries, return after processing global.dat */ if (dbname_oid_list.head == NULL) - return process_global_sql_commands(conn, dumpdirpath, opts->filename); + return process_global_sql_commands(conn, dumpdirpath, opts->filename, true); pg_log_info(ngettext("found %d database name in \"%s\"", "found %d database names in \"%s\"", @@ -1173,7 +1176,7 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath, db_exclude_patterns); /* Open global.dat file and execute/append all the global sql commands. */ - n_errors_total = process_global_sql_commands(conn, dumpdirpath, opts->filename); + n_errors_total = process_global_sql_commands(conn, dumpdirpath, opts->filename, true); /* Close the db connection as we are done with globals and patterns. */ if (conn) @@ -1304,22 +1307,25 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath, * Returns the number of errors while processing global.dat */ static int -process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *outfile) +process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *outfile, bool drop_commands) { - char global_file_path[MAXPGPATH]; + PQExpBufferData global_file_path; PGresult *result; StringInfoData sqlstatement, user_create; FILE *pfile; int n_errors = 0; - snprintf(global_file_path, MAXPGPATH, "%s/global.dat", dumpdirpath); + initPQExpBuffer(&global_file_path); + appendShellString(&global_file_path, dumpdirpath); + appendPQExpBufferChar(&global_file_path, '/'); + appendPQExpBufferStr(&global_file_path, "global.dat"); /* Open global.dat file. */ - pfile = fopen(global_file_path, PG_BINARY_R); + pfile = fopen(global_file_path.data, PG_BINARY_R); if (pfile == NULL) - pg_fatal("could not open file \"%s\": %m", global_file_path); + pg_fatal("could not open file \"%s\": %m", global_file_path.data); /* * If outfile is given, then just copy all global.dat file data into @@ -1327,7 +1333,20 @@ process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *o */ if (outfile) { - copy_or_print_global_file(outfile, pfile); + FILE *dfile = NULL; + + if (drop_commands) + { + resetPQExpBuffer(&global_file_path); + appendShellString(&global_file_path, dumpdirpath); + appendPQExpBufferChar(&global_file_path, '/'); + appendPQExpBufferStr(&global_file_path, "global_drop.dat"); + + /* Open global_drop.dat file. */ + dfile = fopen(global_file_path.data, PG_BINARY_R); + } + + copy_or_print_global_file(outfile, pfile, dfile); return 0; } @@ -1341,36 +1360,56 @@ process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *o appendStringInfoString(&user_create, PQuser(conn)); appendStringInfoChar(&user_create, ';'); - /* Process file till EOF and execute sql statements. */ - while (read_one_statement(&sqlstatement, pfile) != EOF) + while(true) { - /* don't try to create the role we are connected as */ - if (strstr(sqlstatement.data, user_create.data)) - continue; + /* Process file till EOF and execute sql statements. */ + while (read_one_statement(&sqlstatement, pfile) != EOF) + { + /* don't try to create the role we are connected as */ + if (strstr(sqlstatement.data, user_create.data)) + continue; + + pg_log_info("executing query: %s", sqlstatement.data); + result = PQexec(conn, sqlstatement.data); + + switch (PQresultStatus(result)) + { + case PGRES_COMMAND_OK: + case PGRES_TUPLES_OK: + case PGRES_EMPTY_QUERY: + break; + default: + n_errors++; + pg_log_error("could not execute query: %s", PQerrorMessage(conn)); + pg_log_error_detail("Command was: %s", sqlstatement.data); + } + PQclear(result); + } - pg_log_info("executing query: %s", sqlstatement.data); - result = PQexec(conn, sqlstatement.data); + fclose(pfile); - switch (PQresultStatus(result)) + if (drop_commands) { - case PGRES_COMMAND_OK: - case PGRES_TUPLES_OK: - case PGRES_EMPTY_QUERY: - break; - default: - n_errors++; - pg_log_error("could not execute query: %s", PQerrorMessage(conn)); - pg_log_error_detail("Command was: %s", sqlstatement.data); + drop_commands = false; + resetPQExpBuffer(&global_file_path); + appendShellString(&global_file_path, dumpdirpath); + appendPQExpBufferChar(&global_file_path, '/'); + appendPQExpBufferStr(&global_file_path, "global_drop.dat"); + + /* Open global_drop.dat file. */ + pfile = fopen(global_file_path.data, PG_BINARY_R); + + if (pfile) + continue; } - PQclear(result); + break; } /* Print a summary of ignored errors during global.dat. */ if (n_errors) pg_log_warning(ngettext("ignored %d error in file \"%s\"", "ignored %d errors in file \"%s\"", n_errors), - n_errors, global_file_path); - fclose(pfile); + n_errors, global_file_path.data); return n_errors; } @@ -1382,7 +1421,7 @@ process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *o * then print commands to stdout. */ static void -copy_or_print_global_file(const char *outfile, FILE *pfile) +copy_or_print_global_file(const char *outfile, FILE *pfile, FILE *dfile) { char out_file_path[MAXPGPATH]; FILE *OPF; @@ -1407,6 +1446,15 @@ copy_or_print_global_file(const char *outfile, FILE *pfile) while ((c = fgetc(pfile)) != EOF) fputc(c, OPF); + /* Append drop database commands. */ + if (dfile) + { + while ((c = fgetc(dfile)) != EOF) + fputc(c, OPF); + + fclose(dfile); + } + fclose(pfile); /* Close output file. */ -- 2.39.3