*** a/doc/src/sgml/ref/vacuumdb.sgml --- b/doc/src/sgml/ref/vacuumdb.sgml *************** *** 224,229 **** PostgreSQL documentation --- 224,239 ---- + + + + + Number of parallel process to perform the operation. + + + + + *** a/src/bin/scripts/Makefile --- b/src/bin/scripts/Makefile *************** *** 32,38 **** dropdb: dropdb.o common.o dumputils.o kwlookup.o keywords.o | submake-libpq subm droplang: droplang.o common.o print.o mbprint.o | submake-libpq submake-libpgport dropuser: dropuser.o common.o dumputils.o kwlookup.o keywords.o | submake-libpq submake-libpgport clusterdb: clusterdb.o common.o dumputils.o kwlookup.o keywords.o | submake-libpq submake-libpgport ! vacuumdb: vacuumdb.o common.o dumputils.o kwlookup.o keywords.o | submake-libpq submake-libpgport reindexdb: reindexdb.o common.o dumputils.o kwlookup.o keywords.o | submake-libpq submake-libpgport pg_isready: pg_isready.o common.o | submake-libpq submake-libpgport --- 32,38 ---- droplang: droplang.o common.o print.o mbprint.o | submake-libpq submake-libpgport dropuser: dropuser.o common.o dumputils.o kwlookup.o keywords.o | submake-libpq submake-libpgport clusterdb: clusterdb.o common.o dumputils.o kwlookup.o keywords.o | submake-libpq submake-libpgport ! vacuumdb: vacuumdb.o vac_parallel.o common.o dumputils.o kwlookup.o keywords.o | submake-libpq submake-libpgport reindexdb: reindexdb.o common.o dumputils.o kwlookup.o keywords.o | submake-libpq submake-libpgport pg_isready: pg_isready.o common.o | submake-libpq submake-libpgport *************** *** 65,71 **** uninstall: clean distclean maintainer-clean: rm -f $(addsuffix $(X), $(PROGRAMS)) $(addsuffix .o, $(PROGRAMS)) ! rm -f common.o dumputils.o kwlookup.o keywords.o print.o mbprint.o $(WIN32RES) rm -f dumputils.c print.c mbprint.c kwlookup.c keywords.c rm -rf tmp_check --- 65,71 ---- clean distclean maintainer-clean: rm -f $(addsuffix $(X), $(PROGRAMS)) $(addsuffix .o, $(PROGRAMS)) ! rm -f common.o dumputils.o kwlookup.o keywords.o print.o mbprint.o vac_parallel.o $(WIN32RES) rm -f dumputils.c print.c mbprint.c kwlookup.c keywords.c rm -rf tmp_check *** /dev/null --- b/src/bin/scripts/vac_parallel.c *************** *** 0 **** --- 1,498 ---- + /*------------------------------------------------------------------------- + * + * vac_parallel.c + * + * Parallel support for the vacuumdb + * + * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * The author is not responsible for loss or damages that may + * result from its use. + * + * IDENTIFICATION + * src/bin/scripts/vac_parallel.c + * + *------------------------------------------------------------------------- + */ + + #include "vac_parallel.h" + + #ifndef WIN32 + #include + #include + #include "signal.h" + #include + #include + #endif + #include "common.h" + + /* file-scope variables */ + #ifdef WIN32 + static unsigned int tMasterThreadId = 0; + + /* + * Structure to hold info passed by _beginthreadex() to the function it calls + * via its single allowed argument. + */ + typedef struct + { + VacOpt *vopt; + int worker; + int pipeRead; + int pipeWrite; + } WorkerInfo; + #endif + + static const char *modulename = gettext_noop("parallel vacuum"); + + static void + SetupWorker(PGconn *connection, int pipefd[2], int worker, int vacStage); + + static void + WaitForCommands(PGconn * connection, int pipefd[2]); + + static void + vacuum_close_connection(int code, void *arg); + + #define messageStartsWith(msg, prefix) \ + (strncmp(msg, prefix, strlen(prefix)) == 0) + #define messageEquals(msg, pattern) \ + (strcmp(msg, pattern) == 0) + + + #ifdef WIN32 + static unsigned __stdcall + init_spawned_worker_win32(WorkerInfo *wi) + { + PGconn *conn; + int pipefd[2] = {wi->pipeRead, wi->pipeWrite}; + int worker = wi->worker; + VacOpt *vopt = wi->vopt; + ParallelSlot *mySlot = &shutdown_info.pstate->parallelSlot[worker]; + + conn = connectDatabase(vopt->dbname, vopt->pghost, vopt->pgport, + vopt->username, vopt->promptPassword, vopt->progname, + false); + + ((ParallelArgs*)mySlot->args)->connection = conn; + + free(wi); + SetupWorker(conn, pipefd, worker, vopt->analyze_stage); + _endthreadex(0); + return 0; + } + #endif + + + + ParallelState * ParallelVacuumStart(VacOpt *vopt, int numWorkers) + { + ParallelState *pstate; + int i; + const size_t slotSize = numWorkers * sizeof(ParallelSlot); + + Assert(numWorkers > 0); + + /* Ensure stdio state is quiesced before forking */ + fflush(NULL); + + pstate = (ParallelState *) pg_malloc(sizeof(ParallelState)); + + pstate->numWorkers = numWorkers; + pstate->parallelSlot = NULL; + + if (numWorkers == 1) + return pstate; + + pstate->parallelSlot = (ParallelSlot *) pg_malloc(slotSize); + memset((void *) pstate->parallelSlot, 0, slotSize); + + shutdown_info.pstate = pstate; + + #ifdef WIN32 + tMasterThreadId = GetCurrentThreadId(); + termEvent = CreateEvent(NULL, true, false, "Terminate"); + #else + signal(SIGTERM, sigTermHandler); + signal(SIGINT, sigTermHandler); + signal(SIGQUIT, sigTermHandler); + #endif + + + for (i = 0; i < pstate->numWorkers; i++) + { + #ifdef WIN32 + WorkerInfo *wi; + uintptr_t handle; + #else + pid_t pid; + #endif + int pipeMW[2], + pipeWM[2]; + + if (pgpipe(pipeMW) < 0 || pgpipe(pipeWM) < 0) + exit_horribly(modulename, + "could not create communication channels: %s\n", + strerror(errno)); + + pstate->parallelSlot[i].workerStatus = WRKR_IDLE; + pstate->parallelSlot[i].args = (ParallelArgs *) pg_malloc(sizeof(ParallelArgs)); + ((ParallelArgs*)pstate->parallelSlot[i].args)->vopt = vopt; + #ifdef WIN32 + /* Allocate a new structure for every worker */ + wi = (WorkerInfo *) pg_malloc(sizeof(WorkerInfo)); + wi->worker = i; + wi->pipeRead = pstate->parallelSlot[i].pipeRevRead = pipeMW[PIPE_READ]; + wi->pipeWrite = pstate->parallelSlot[i].pipeRevWrite = pipeWM[PIPE_WRITE]; + wi->vopt = vopt; + handle = _beginthreadex(NULL, 0, (void *) &init_spawned_worker_win32, + wi, 0, &(pstate->parallelSlot[i].threadId)); + pstate->parallelSlot[i].hThread = handle; + #else + pid = fork(); + if (pid == 0) + { + /* we are the worker */ + int j; + int pipefd[2] = {pipeMW[PIPE_READ], pipeWM[PIPE_WRITE]}; + + /* + * Store the fds for the reverse communication in pstate. Actually + * we only use this in case of an error and don't use pstate + * otherwise in the worker process. On Windows we write to the + * global pstate, in Unix we write to our process-local copy but + * that's also where we'd retrieve this information back from. + */ + pstate->parallelSlot[i].pipeRevRead = pipefd[PIPE_READ]; + pstate->parallelSlot[i].pipeRevWrite = pipefd[PIPE_WRITE]; + pstate->parallelSlot[i].pid = getpid(); + + ((ParallelArgs*)pstate->parallelSlot[i].args)->connection + = connectDatabase(vopt->dbname, vopt->pghost, vopt->pgport, + vopt->username, vopt->promptPassword, + vopt->progname, false); + + /* close read end of Worker -> Master */ + closesocket(pipeWM[PIPE_READ]); + + /* close write end of Master -> Worker */ + closesocket(pipeMW[PIPE_WRITE]); + + /* + * Close all inherited fds for communication of the master with + * the other workers. + */ + for (j = 0; j < i; j++) + { + closesocket(pstate->parallelSlot[j].pipeRead); + closesocket(pstate->parallelSlot[j].pipeWrite); + } + + SetupWorker(((ParallelArgs*)pstate->parallelSlot[i].args)->connection, + pipefd, i, vopt->analyze_stage); + exit(0); + } + else if (pid < 0) + /* fork failed */ + exit_horribly(modulename, + "could not create worker process: %s\n", + strerror(errno)); + + /* we are the Master, pid > 0 here */ + Assert(pid > 0); + + /* close read end of Master -> Worker */ + closesocket(pipeMW[PIPE_READ]); + + /* close write end of Worker -> Master */ + closesocket(pipeWM[PIPE_WRITE]); + + pstate->parallelSlot[i].pid = pid; + #endif + + pstate->parallelSlot[i].pipeRead = pipeWM[PIPE_READ]; + pstate->parallelSlot[i].pipeWrite = pipeMW[PIPE_WRITE]; + } + + return pstate; + } + + /* + * Tell all of our workers to terminate. + * + * Pretty straightforward routine, first we tell everyone to terminate, then + * we listen to the workers' replies and finally close the sockets that we + * have used for communication. + */ + void + ParallelVacuumEnd(ParallelState *pstate) + { + int i; + + Assert(IsEveryWorkerIdle(pstate)); + + /* close the sockets so that the workers know they can exit */ + for (i = 0; i < pstate->numWorkers; i++) + { + closesocket(pstate->parallelSlot[i].pipeRead); + closesocket(pstate->parallelSlot[i].pipeWrite); + } + + WaitForTerminatingWorkers(pstate); + + /* + * Remove the pstate again, so the exit handler in the parent will now + * again fall back to closing AH->connection (if connected). + */ + shutdown_info.pstate = NULL; + + free(pstate->parallelSlot); + free(pstate); + } + + /* + * This function is called by both UNIX and Windows variants to set up a + * worker process. + */ + static void + SetupWorker(PGconn *connection, int pipefd[2], int worker, int vacStage) + { + + const char *stage_commands[] = { + "SET default_statistics_target=1; SET vacuum_cost_delay=0;", + "SET default_statistics_target=10; RESET vacuum_cost_delay;", + "RESET default_statistics_target;"}; + + if (vacStage >= 0) + executeMaintenanceCommand(connection, stage_commands[vacStage], false); + + /* + * Call the setup worker function that's defined in the ArchiveHandle. + * + * We get the raw connection only for the reason that we can close it + * properly when we shut down. This happens only that way when it is + * brought down because of an error. + */ + WaitForCommands(connection, pipefd); + closesocket(pipefd[PIPE_READ]); + closesocket(pipefd[PIPE_WRITE]); + } + + + + /* + * That's the main routine for the worker. + * When it starts up it enters this routine and waits for commands from the + * master process. After having processed a command it comes back to here to + * wait for the next command. Finally it will receive a TERMINATE command and + * exit. + */ + static void + WaitForCommands(PGconn * connection, int pipefd[2]) + { + char *command; + PQExpBufferData sql; + + for (;;) + { + if (!(command = getMessageFromMaster(pipefd))) + { + PQfinish(connection); + connection = NULL; + return; + } + + + /* check if master has set the terminate event*/ + checkAborting(); + + if (executeMaintenanceCommand(connection, command, false)) + sendMessageToMaster(pipefd, "OK"); + else + { + initPQExpBuffer(&sql); + appendPQExpBuffer(&sql, "ERROR : %s", + PQerrorMessage(connection)); + sendMessageToMaster(pipefd, sql.data); + termPQExpBuffer(&sql); + } + + /* command was pg_malloc'd and we are responsible for free()ing it. */ + free(command); + } + } + + /* + * --------------------------------------------------------------------- + * Note the status change: + * + * DispatchJobForTocEntry WRKR_IDLE -> WRKR_WORKING + * ListenToWorkers WRKR_WORKING -> WRKR_FINISHED / WRKR_TERMINATED + * ReapWorkerStatus WRKR_FINISHED -> WRKR_IDLE + * --------------------------------------------------------------------- + * + * Just calling ReapWorkerStatus() when all workers are working might or might + * not give you an idle worker because you need to call ListenToWorkers() in + * between and only thereafter ReapWorkerStatus(). This is necessary in order + * to get and deal with the status (=result) of the worker's execution. + */ + void + ListenToWorkers(ParallelState *pstate, bool do_wait) + { + int worker; + char *msg; + + msg = getMessageFromWorker(pstate, do_wait, &worker); + + if (!msg) + { + if (do_wait) + exit_horribly(modulename, "a worker process died unexpectedly\n"); + return; + } + + if (messageStartsWith(msg, "OK")) + { + pstate->parallelSlot[worker].workerStatus = WRKR_FINISHED; + } + else if (messageStartsWith(msg, "ERROR ")) + { + ParallelSlot *mySlot = &pstate->parallelSlot[worker]; + + mySlot->workerStatus = WRKR_TERMINATED; + exit_horribly(modulename, + "vacuuming of database \"%s\" failed %s", + ((ParallelArgs*)mySlot->args)->vopt->dbname, msg + strlen("ERROR ")); + } + else + { + exit_horribly(modulename, + "invalid message received from worker: %s\n", msg); + } + + /* both Unix and Win32 return pg_malloc()ed space, so we free it */ + free(msg); + } + + /* + * This function is executed in the master process. + * + * It looks for an idle worker process and only returns if there is one. + */ + void + EnsureIdleWorker(ParallelState *pstate) + { + int ret_worker; + int work_status; + + for (;;) + { + int nTerm = 0; + + while ((ret_worker = ReapWorkerStatus(pstate, &work_status)) != NO_SLOT) + { + if (work_status != 0) + exit_horribly(modulename, "error processing a parallel work item\n"); + + nTerm++; + } + + /* + * We need to make sure that we have an idle worker before dispatching + * the next item. If nTerm > 0 we already have that (quick check). + */ + if (nTerm > 0) + return; + + /* explicit check for an idle worker */ + if (GetIdleWorker(pstate) != NO_SLOT) + return; + + /* + * If we have no idle worker, read the result of one or more workers + * and loop the loop to call ReapWorkerStatus() on them + */ + ListenToWorkers(pstate, true); + } + } + + /* + * This function is executed in the master process. + * + * It waits for all workers to terminate. + */ + void + EnsureWorkersFinished(ParallelState *pstate) + { + int work_status; + + if (!pstate || pstate->numWorkers == 1) + return; + + /* Waiting for the remaining worker processes to finish */ + while (!IsEveryWorkerIdle(pstate)) + { + if (ReapWorkerStatus(pstate, &work_status) == NO_SLOT) + ListenToWorkers(pstate, true); + else if (work_status != 0) + exit_horribly(modulename, + "error processing a parallel work item\n"); + } + } + + void + DispatchJob(ParallelState *pstate, char * command) + { + int worker; + + /* our caller makes sure that at least one worker is idle */ + Assert(GetIdleWorker(pstate) != NO_SLOT); + worker = GetIdleWorker(pstate); + Assert(worker != NO_SLOT); + + sendMessageToWorker(pstate, worker, command); + pstate->parallelSlot[worker].workerStatus = WRKR_WORKING; + } + + void + on_exit_close_vacuum(PGconn *conn) + { + shutdown_info.handle = (void*)conn; + on_exit_nicely(vacuum_close_connection, &shutdown_info); + } + + /* + * This function can close archives in both the parallel and non-parallel + * case. + */ + static void + vacuum_close_connection(int code, void *arg) + { + ShutdownInformation *si = (ShutdownInformation *) arg; + + if (si->pstate) + { + ParallelSlot *slot = GetMyPSlot(si->pstate); + + if (!slot) + { + PQfinish((PGconn*)si->handle); + #ifndef WIN32 + + /* + * Setting aborting to true switches to best-effort-mode + * (send/receive but ignore errors) in communicating with our + * workers. + */ + aborting = true; + #endif + ShutdownWorkersHard(si->pstate); + } + else if (((ParallelArgs*)slot->args)->connection) + PQfinish((((ParallelArgs*)slot->args)->connection)); + } + else if ((PGconn*)si->handle) + PQfinish((PGconn*)si->handle); + } + *** /dev/null --- b/src/bin/scripts/vac_parallel.h *************** *** 0 **** --- 1,58 ---- + /*------------------------------------------------------------------------- + * + * vac_parallel.h + * + * Parallel support header file for the vacuumdb + * + * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * The author is not responsible for loss or damages that may + * result from its use. + * + * IDENTIFICATION + * src/bin/scripts/vac_parallel.h + * + *------------------------------------------------------------------------- + */ + + #ifndef VAC_PARALLEL_H + #define VAC_PARALLEL_H + + #include "postgres_fe.h" + #include + #include "libpq-fe.h" + #include "common.h" + #include "parallel_utils.h" + + + typedef struct VacOpt + { + char *dbname; + char *pgport; + char *pghost; + char *username; + char *progname; + enum trivalue promptPassword; + int analyze_stage; + }VacOpt; + + /* Arguments needed for a worker process */ + typedef struct ParallelArgs + { + PGconn *connection; + VacOpt *vopt; + } ParallelArgs; + + + extern ParallelState * ParallelVacuumStart(VacOpt *vopt, int numWorkers); + extern bool IsEveryWorkerIdle(ParallelState *pstate); + extern void ListenToWorkers(ParallelState *pstate, bool do_wait); + extern void EnsureIdleWorker(ParallelState *pstate); + extern void EnsureWorkersFinished(ParallelState *pstate); + + extern void DispatchJob(ParallelState *pstate, char * command); + extern void on_exit_close_vacuum(PGconn *conn); + extern void ParallelVacuumEnd(ParallelState *pstate); + + #endif /* VAC_PARALLEL_H */ *** a/src/bin/scripts/vacuumdb.c --- b/src/bin/scripts/vacuumdb.c *************** *** 13,34 **** #include "postgres_fe.h" #include "common.h" #include "dumputils.h" static void vacuum_one_database(const char *dbname, bool full, bool verbose, ! bool and_analyze, bool analyze_only, bool analyze_in_stages, bool freeze, ! const char *table, const char *host, const char *port, ! const char *username, enum trivalue prompt_password, ! const char *progname, bool echo); static void vacuum_all_databases(bool full, bool verbose, bool and_analyze, bool analyze_only, bool analyze_in_stages, bool freeze, const char *maintenance_db, const char *host, const char *port, const char *username, enum trivalue prompt_password, ! const char *progname, bool echo, bool quiet); static void help(const char *progname); int main(int argc, char *argv[]) --- 13,54 ---- #include "postgres_fe.h" #include "common.h" #include "dumputils.h" + #include "vac_parallel.h" static void vacuum_one_database(const char *dbname, bool full, bool verbose, ! bool and_analyze, bool analyze_only, bool analyze_in_stages, ! bool freeze, const char *table, const char *host, ! const char *port, const char *username, ! enum trivalue prompt_password, bool echo); static void vacuum_all_databases(bool full, bool verbose, bool and_analyze, bool analyze_only, bool analyze_in_stages, bool freeze, const char *maintenance_db, const char *host, const char *port, const char *username, enum trivalue prompt_password, ! bool echo, bool quiet, int parallel); static void help(const char *progname); + void vacuum_parallel(const char *dbname, bool full, bool verbose, + bool and_analyze, bool analyze_only, bool analyze_in_stages, + bool freeze, const char *host, + const char *port, const char *username, + enum trivalue prompt_password, + bool echo, int parallel, SimpleStringList *tables); + + void run_command(ParallelState *pstate, char *command); + + void prepare_command(PGconn *conn, bool full, bool verbose, bool and_analyze, + bool analyze_only, bool freeze, PQExpBuffer sql); + static void + run_parallel_vacuum(PGconn *conn, bool echo, + const char *dbname, SimpleStringList *tables, + bool full, bool verbose, bool and_analyze, + bool analyze_only, bool freeze, int parallel, + VacOpt *vopt); + + const char *progname = NULL; int main(int argc, char *argv[]) *************** *** 49,60 **** main(int argc, char *argv[]) {"table", required_argument, NULL, 't'}, {"full", no_argument, NULL, 'f'}, {"verbose", no_argument, NULL, 'v'}, {"maintenance-db", required_argument, NULL, 2}, {"analyze-in-stages", no_argument, NULL, 3}, {NULL, 0, NULL, 0} }; - const char *progname; int optindex; int c; --- 69,80 ---- {"table", required_argument, NULL, 't'}, {"full", no_argument, NULL, 'f'}, {"verbose", no_argument, NULL, 'v'}, + {"jobs", required_argument, NULL, 'j'}, {"maintenance-db", required_argument, NULL, 2}, {"analyze-in-stages", no_argument, NULL, 3}, {NULL, 0, NULL, 0} }; int optindex; int c; *************** *** 74,86 **** main(int argc, char *argv[]) bool full = false; bool verbose = false; SimpleStringList tables = {NULL, NULL}; progname = get_progname(argv[0]); set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pgscripts")); handle_help_version_opts(argc, argv, "vacuumdb", help); ! while ((c = getopt_long(argc, argv, "h:p:U:wWeqd:zZFat:fv", long_options, &optindex)) != -1) { switch (c) { --- 94,107 ---- bool full = false; bool verbose = false; SimpleStringList tables = {NULL, NULL}; + int parallel = 0; progname = get_progname(argv[0]); set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pgscripts")); handle_help_version_opts(argc, argv, "vacuumdb", help); ! while ((c = getopt_long(argc, argv, "h:p:U:wWeqd:zZFat:fv:j:", long_options, &optindex)) != -1) { switch (c) { *************** *** 129,134 **** main(int argc, char *argv[]) --- 150,165 ---- case 'v': verbose = true; break; + case 'j': + parallel = atoi(optarg); + if (parallel <= 0) + { + fprintf(stderr, _("%s: Number of parallel \"jobs\" should be at least 1\n"), + progname); + exit(1); + } + + break; case 2: maintenance_db = pg_strdup(optarg); break; *************** *** 141,146 **** main(int argc, char *argv[]) --- 172,178 ---- } } + optind++; /* * Non-option argument specifies database name as long as it wasn't *************** *** 196,202 **** main(int argc, char *argv[]) vacuum_all_databases(full, verbose, and_analyze, analyze_only, analyze_in_stages, freeze, maintenance_db, host, port, username, ! prompt_password, progname, echo, quiet); } else { --- 228,234 ---- vacuum_all_databases(full, verbose, and_analyze, analyze_only, analyze_in_stages, freeze, maintenance_db, host, port, username, ! prompt_password, echo, quiet, parallel); } else { *************** *** 210,234 **** main(int argc, char *argv[]) dbname = get_user_name_or_exit(progname); } ! if (tables.head != NULL) { ! SimpleStringListCell *cell; ! for (cell = tables.head; cell; cell = cell->next) { ! vacuum_one_database(dbname, full, verbose, and_analyze, ! analyze_only, analyze_in_stages, ! freeze, cell->val, ! host, port, username, prompt_password, ! progname, echo); } } - else - vacuum_one_database(dbname, full, verbose, and_analyze, - analyze_only, analyze_in_stages, - freeze, NULL, - host, port, username, prompt_password, - progname, echo); } exit(0); --- 242,280 ---- dbname = get_user_name_or_exit(progname); } ! if (parallel > 1) { ! vacuum_parallel(dbname, full, verbose, and_analyze, ! analyze_only, analyze_in_stages, ! freeze, host, port, username, prompt_password, ! echo, parallel, &tables); ! } ! else ! { ! if (tables.head != NULL) { ! SimpleStringListCell *cell; ! ! for (cell = tables.head; cell; cell = cell->next) ! { ! vacuum_one_database(dbname, full, verbose, and_analyze, ! analyze_only, analyze_in_stages, ! freeze, cell->val, ! host, port, username, prompt_password, ! echo); ! } ! } ! else ! { ! vacuum_one_database(dbname, full, verbose, and_analyze, ! analyze_only, analyze_in_stages, ! freeze, NULL, ! host, port, username, prompt_password, ! echo); ! } } } exit(0); *************** *** 253,263 **** run_vacuum_command(PGconn *conn, const char *sql, bool echo, const char *dbname, static void ! vacuum_one_database(const char *dbname, bool full, bool verbose, bool and_analyze, ! bool analyze_only, bool analyze_in_stages, bool freeze, const char *table, ! const char *host, const char *port, ! const char *username, enum trivalue prompt_password, ! const char *progname, bool echo) { PQExpBufferData sql; --- 299,309 ---- static void ! vacuum_one_database(const char *dbname, bool full, bool verbose, ! bool and_analyze, bool analyze_only, bool analyze_in_stages, ! bool freeze, const char *table, const char *host, ! const char *port, const char *username, ! enum trivalue prompt_password, bool echo) { PQExpBufferData sql; *************** *** 352,362 **** vacuum_one_database(const char *dbname, bool full, bool verbose, bool and_analyz static void ! vacuum_all_databases(bool full, bool verbose, bool and_analyze, bool analyze_only, ! bool analyze_in_stages, bool freeze, const char *maintenance_db, ! const char *host, const char *port, ! const char *username, enum trivalue prompt_password, ! const char *progname, bool echo, bool quiet) { PGconn *conn; PGresult *result; --- 398,409 ---- static void ! vacuum_all_databases(bool full, bool verbose, bool and_analyze, ! bool analyze_only, bool analyze_in_stages, bool freeze, ! const char *maintenance_db, const char *host, ! const char *port, const char *username, ! enum trivalue prompt_password, ! bool echo, bool quiet, int parallel) { PGconn *conn; PGresult *result; *************** *** 377,391 **** vacuum_all_databases(bool full, bool verbose, bool and_analyze, bool analyze_onl fflush(stdout); } ! vacuum_one_database(dbname, full, verbose, and_analyze, analyze_only, ! analyze_in_stages, ! freeze, NULL, host, port, username, prompt_password, ! progname, echo); } PQclear(result); } static void help(const char *progname) --- 424,668 ---- fflush(stdout); } ! ! if (parallel > 1) ! { ! vacuum_parallel(dbname, full, verbose, and_analyze, ! analyze_only, analyze_in_stages, ! freeze, host, port, username, prompt_password, ! echo, parallel, NULL); ! ! } ! else ! { ! vacuum_one_database(dbname, full, verbose, and_analyze, ! analyze_only, analyze_in_stages, ! freeze, NULL, host, port, username, prompt_password, ! echo); ! } } PQclear(result); } + static void + run_parallel_vacuum(PGconn *conn, bool echo, + const char *dbname, SimpleStringList *tables, + bool full, bool verbose, bool and_analyze, + bool analyze_only, bool freeze, int parallel, + VacOpt *vopt) + { + ParallelState *pstate; + PQExpBufferData sql; + SimpleStringListCell *cell; + + initPQExpBuffer(&sql); + + pstate = ParallelVacuumStart(vopt, parallel); + + for (cell = tables->head; cell; cell = cell->next) + { + prepare_command(conn, full, verbose, and_analyze, + analyze_only, freeze, &sql); + appendPQExpBuffer(&sql, " %s", cell->val); + run_command(pstate, sql.data); + termPQExpBuffer(&sql); + } + + EnsureWorkersFinished(pstate); + ParallelVacuumEnd(pstate); + termPQExpBuffer(&sql); + } + + + void + vacuum_parallel(const char *dbname, bool full, bool verbose, + bool and_analyze, bool analyze_only, bool analyze_in_stages, + bool freeze, const char *host, const char *port, + const char *username, enum trivalue prompt_password, + bool echo, int parallel, SimpleStringList *tables) + { + + PGconn *conn; + VacOpt vopt = {0}; + + init_parallel_dump_utils(); + + conn = connectDatabase(dbname, host, port, username, prompt_password, + progname, false); + + if (dbname) + vopt.dbname = pg_strdup(dbname); + + if (host) + vopt.pghost = pg_strdup(host); + + if (port) + vopt.pgport = pg_strdup(port); + + if (username) + vopt.username = pg_strdup(username); + + if (progname) + vopt.progname = pg_strdup(progname); + + vopt.promptPassword = prompt_password; + + on_exit_close_vacuum(conn); + + /* if table list is not provided then we need to do vaccum for whole DB + get the list of all tables and prpare the list*/ + if (!tables || !tables->head) + { + SimpleStringList dbtables = {NULL, NULL}; + PGresult *res; + int ntuple; + int i; + char *relName; + char *nspace; + PQExpBufferData sql; + + initPQExpBuffer(&sql); + + res = executeQuery(conn, + "select relname, nspname from pg_class c, pg_namespace ns" + " where relkind= \'r\' and c.relnamespace = ns.oid" + " order by relpages desc", + progname, echo); + + ntuple = PQntuples(res); + for (i = 0; i < ntuple; i++) + { + relName = PQgetvalue(res, i, 0); + nspace = PQgetvalue(res, i, 1); + + appendPQExpBuffer(&sql, " \"%s\".\"%s\"", nspace, relName); + simple_string_list_append(&dbtables, sql.data); + resetPQExpBuffer(&sql); + } + + termPQExpBuffer(&sql); + + tables = &dbtables; + + } + + if (analyze_in_stages) + { + int i; + + for (i = 0; i < 3; i++) + { + const char *stage_messages[] = { + gettext_noop("Generating minimal optimizer statistics (1 target)"), + gettext_noop("Generating medium optimizer statistics (10 targets)"), + gettext_noop("Generating default (full) optimizer statistics")}; + + puts(gettext(stage_messages[i])); + vopt.analyze_stage = i; + run_parallel_vacuum(conn, echo, dbname, tables, full, verbose, + and_analyze, analyze_only, freeze, parallel, &vopt); + } + } + else + { + vopt.analyze_stage = -1; + run_parallel_vacuum(conn, echo, dbname, tables, full, verbose, + and_analyze, analyze_only, freeze, parallel, &vopt); + } + + PQfinish(conn); + } + + void run_command(ParallelState *pstate, char *command) + { + int work_status; + int ret_child; + + DispatchJob(pstate, command); + + /*Listen for worker and get message*/ + for (;;) + { + int nTerm = 0; + + ListenToWorkers(pstate, false); + while ((ret_child = ReapWorkerStatus(pstate, &work_status)) != NO_SLOT) + { + nTerm++; + } + + /* + * We need to make sure that we have an idle worker before + * re-running the loop. If nTerm > 0 we already have that (quick + * check). + */ + if (nTerm > 0) + break; + + /* if nobody terminated, explicitly check for an idle worker */ + if (GetIdleWorker(pstate) != NO_SLOT) + break; + } + } + + void prepare_command(PGconn *conn, bool full, bool verbose, bool and_analyze, + bool analyze_only, bool freeze, PQExpBuffer sql) + { + initPQExpBuffer(sql); + + if (analyze_only) + { + appendPQExpBuffer(sql, "ANALYZE"); + if (verbose) + appendPQExpBuffer(sql, " VERBOSE"); + } + else + { + appendPQExpBuffer(sql, "VACUUM"); + if (PQserverVersion(conn) >= 90000) + { + const char *paren = " ("; + const char *comma = ", "; + const char *sep = paren; + + if (full) + { + appendPQExpBuffer(sql, "%sFULL", sep); + sep = comma; + } + if (freeze) + { + appendPQExpBuffer(sql, "%sFREEZE", sep); + sep = comma; + } + if (verbose) + { + appendPQExpBuffer(sql, "%sVERBOSE", sep); + sep = comma; + } + if (and_analyze) + { + appendPQExpBuffer(sql, "%sANALYZE", sep); + sep = comma; + } + if (sep != paren) + appendPQExpBuffer(sql, ")"); + } + else + { + if (full) + appendPQExpBuffer(sql, " FULL"); + if (freeze) + appendPQExpBuffer(sql, " FREEZE"); + if (verbose) + appendPQExpBuffer(sql, " VERBOSE"); + if (and_analyze) + appendPQExpBuffer(sql, " ANALYZE"); + } + } + } + static void help(const char *progname) *************** *** 405,410 **** help(const char *progname) --- 682,688 ---- printf(_(" -V, --version output version information, then exit\n")); printf(_(" -z, --analyze update optimizer statistics\n")); printf(_(" -Z, --analyze-only only update optimizer statistics\n")); + printf(_(" -j, --jobs=NUM use this many parallel jobs to vacuum\n")); printf(_(" --analyze-in-stages only update optimizer statistics, in multiple\n" " stages for faster results\n")); printf(_(" -?, --help show this help, then exit\n"));