From db03efc9f21fa8cd9597cff146bbcce0becdf86a Mon Sep 17 00:00:00 2001 From: "Imseih (AWS)" Date: Mon, 28 Nov 2022 17:47:04 -0600 Subject: [PATCH 1/1] Add 2 new columns to pg_stat_progress_vacuum. The columns are indexes_total as the total indexes to be vacuumed or cleaned and indexes_processed as the number of indexes vacuumed or cleaned up so far. Author: Sami Imseih, based on suggestions by Nathan Bossart, Peter Geoghegan and Masahiko Sawada Reviewed by: Nathan Bossart, Masahiko Sawada Discussion: https://www.postgresql.org/message-id/flat/5478DFCD-2333-401A-B2F0-0D186AB09228@amazon.com --- contrib/bloom/blvacuum.c | 7 ++ doc/src/sgml/monitoring.sgml | 26 ++++++ src/backend/access/gin/ginvacuum.c | 8 ++ src/backend/access/gist/gistvacuum.c | 6 ++ src/backend/access/hash/hash.c | 6 ++ src/backend/access/heap/vacuumlazy.c | 33 ++++++- src/backend/access/nbtree/nbtree.c | 2 + src/backend/access/spgist/spgvacuum.c | 5 ++ src/backend/catalog/index.c | 1 + src/backend/catalog/system_views.sql | 3 +- src/backend/commands/vacuum.c | 2 + src/backend/commands/vacuumparallel.c | 110 +++++++++++++++++++++++- src/backend/utils/activity/wait_event.c | 3 + src/include/access/genam.h | 1 + src/include/commands/progress.h | 2 + src/include/commands/vacuum.h | 9 ++ src/include/utils/wait_event.h | 3 +- src/test/regress/expected/rules.out | 4 +- 18 files changed, 223 insertions(+), 8 deletions(-) diff --git a/contrib/bloom/blvacuum.c b/contrib/bloom/blvacuum.c index 91fae5b..023fc7e 100644 --- a/contrib/bloom/blvacuum.c +++ b/contrib/bloom/blvacuum.c @@ -15,12 +15,14 @@ #include "access/genam.h" #include "bloom.h" #include "catalog/storage.h" +#include "commands/progress.h" #include "commands/vacuum.h" #include "miscadmin.h" #include "postmaster/autovacuum.h" #include "storage/bufmgr.h" #include "storage/indexfsm.h" #include "storage/lmgr.h" +#include "utils/backend_progress.h" /* @@ -62,6 +64,9 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, *itupEnd; vacuum_delay_point(); + if (info->report_parallel_progress && (blkno % REPORT_PARALLEL_VACUUM_EVERY_PAGES) == 0) + parallel_vacuum_update_progress(); + buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno, RBM_NORMAL, info->strategy); @@ -192,6 +197,8 @@ blvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) Page page; vacuum_delay_point(); + if (info->report_parallel_progress && (blkno % REPORT_PARALLEL_VACUUM_EVERY_PAGES) == 0) + parallel_vacuum_update_progress(); buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno, RBM_NORMAL, info->strategy); diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index 5579b8b..477dfe2 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -1759,6 +1759,10 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser ParallelFinish Waiting for parallel workers to finish computing. + + ParallelVacuumFinish + Waiting for parallel vacuum workers to finish computing. + ProcArrayGroupUpdate Waiting for the group leader to clear the transaction ID at @@ -6815,6 +6819,28 @@ FROM pg_stat_get_backend_idset() AS backendid; Number of dead tuples collected since the last index vacuum cycle. + + + + indexes_total bigint + + + Number of indexes that wil be vacuumed. This value will be + 0 if there are no indexes to vacuum, INDEX_CLEANUP + is set to OFF, or vacuum failsafe is triggered. + See + for more on vacuum failsafe. + + + + + + indexes_completed bigint + + + Number of indexes vacuumed in the current vacuum cycle. + + diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index b4fa5f6..e681164 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -17,12 +17,14 @@ #include "access/gin_private.h" #include "access/ginxlog.h" #include "access/xloginsert.h" +#include "commands/progress.h" #include "commands/vacuum.h" #include "miscadmin.h" #include "postmaster/autovacuum.h" #include "storage/indexfsm.h" #include "storage/lmgr.h" #include "storage/predicate.h" +#include "utils/backend_progress.h" #include "utils/memutils.h" struct GinVacuumState @@ -665,6 +667,9 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, vacuum_delay_point(); + if (info->report_parallel_progress && (blkno % REPORT_PARALLEL_VACUUM_EVERY_PAGES) == 0) + parallel_vacuum_update_progress(); + for (i = 0; i < nRoot; i++) { ginVacuumPostingTree(&gvs, rootOfPostingTree[i]); @@ -751,6 +756,9 @@ ginvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) vacuum_delay_point(); + if (info->report_parallel_progress && (blkno % REPORT_PARALLEL_VACUUM_EVERY_PAGES) == 0) + parallel_vacuum_update_progress(); + buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno, RBM_NORMAL, info->strategy); LockBuffer(buffer, GIN_SHARE); diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c index 0aa6e58..cb3deb1 100644 --- a/src/backend/access/gist/gistvacuum.c +++ b/src/backend/access/gist/gistvacuum.c @@ -17,11 +17,13 @@ #include "access/genam.h" #include "access/gist_private.h" #include "access/transam.h" +#include "commands/progress.h" #include "commands/vacuum.h" #include "lib/integerset.h" #include "miscadmin.h" #include "storage/indexfsm.h" #include "storage/lmgr.h" +#include "utils/backend_progress.h" #include "utils/memutils.h" /* Working state needed by gistbulkdelete */ @@ -223,7 +225,11 @@ gistvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, break; /* Iterate over pages, then loop back to recheck length */ for (; blkno < num_pages; blkno++) + { gistvacuumpage(&vstate, blkno, blkno); + if (info->report_parallel_progress && (blkno % REPORT_PARALLEL_VACUUM_EVERY_PAGES) == 0) + parallel_vacuum_update_progress(); + } } /* diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index 77fd147..d807c9f 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -505,6 +505,12 @@ loop_top: blkno = bucket_blkno; + /* + * For hash indexes, we report parallel vacuum progress + * for every bucket. + */ + if (info->report_parallel_progress) + parallel_vacuum_update_progress(); /* * We need to acquire a cleanup lock on the primary bucket page to out * wait concurrent scans before deleting the dead tuples. diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index d59711b..f11a1b1 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -415,6 +415,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, vacrel->rel = rel; vac_open_indexes(vacrel->rel, RowExclusiveLock, &vacrel->nindexes, &vacrel->indrels); + if (instrument && vacrel->nindexes > 0) { /* Copy index names used by instrumentation (not error reporting) */ @@ -459,6 +460,10 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, Assert(params->index_cleanup == VACOPTVALUE_AUTO); } + /* report number of indexes to vacuum, if we are told to cleanup indexes */ + if (vacrel->do_index_cleanup) + pgstat_progress_update_param(PROGRESS_VACUUM_INDEX_TOTAL, vacrel->nindexes); + vacrel->bstrategy = bstrategy; vacrel->relfrozenxid = rel->rd_rel->relfrozenxid; vacrel->relminmxid = rel->rd_rel->relminmxid; @@ -2301,6 +2306,12 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) lazy_vacuum_one_index(indrel, istat, vacrel->old_live_tuples, vacrel); + /* + * Done vacuuming an index. Increment the indexes completed + */ + pgstat_progress_update_param(PROGRESS_VACUUM_INDEX_COMPLETED, + idx + 1); + if (lazy_check_wraparound_failsafe(vacrel)) { /* Wraparound emergency -- end current index scan */ @@ -2335,11 +2346,14 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) Assert(allindexes || vacrel->failsafe_active); /* - * Increase and report the number of index scans. + * Reset and report the number of indexes scanned. + * Also, increase and report the number of index + * scans. * * We deliberately include the case where we started a round of bulk * deletes that we weren't able to finish due to the failsafe triggering. */ + pgstat_progress_update_param(PROGRESS_VACUUM_INDEX_COMPLETED, 0); vacrel->num_index_scans++; pgstat_progress_update_param(PROGRESS_VACUUM_NUM_INDEX_VACUUMS, vacrel->num_index_scans); @@ -2593,10 +2607,17 @@ lazy_check_wraparound_failsafe(LVRelState *vacrel) { vacrel->failsafe_active = true; - /* Disable index vacuuming, index cleanup, and heap rel truncation */ + /* + * Disable index vacuuming, index cleanup, and heap rel truncation + * + * Also, report to progress.h that we are no longer tracking + * index vacuum/cleanup. + */ vacrel->do_index_vacuuming = false; vacrel->do_index_cleanup = false; vacrel->do_rel_truncate = false; + pgstat_progress_update_param(PROGRESS_VACUUM_INDEX_TOTAL, 0); + pgstat_progress_update_param(PROGRESS_VACUUM_INDEX_COMPLETED, 0); ereport(WARNING, (errmsg("bypassing nonessential maintenance of table \"%s.%s.%s\" as a failsafe after %d index scans", @@ -2644,6 +2665,12 @@ lazy_cleanup_all_indexes(LVRelState *vacrel) vacrel->indstats[idx] = lazy_cleanup_one_index(indrel, istat, reltuples, estimated_count, vacrel); + + /* + * Done cleaning an index. Increment the indexes completed + */ + pgstat_progress_update_param(PROGRESS_VACUUM_INDEX_COMPLETED, + idx + 1); } } else @@ -2678,6 +2705,7 @@ lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, ivinfo.index = indrel; ivinfo.analyze_only = false; ivinfo.report_progress = false; + ivinfo.report_parallel_progress = false; ivinfo.estimated_count = true; ivinfo.message_level = DEBUG2; ivinfo.num_heap_tuples = reltuples; @@ -2726,6 +2754,7 @@ lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat, ivinfo.index = indrel; ivinfo.analyze_only = false; ivinfo.report_progress = false; + ivinfo.report_parallel_progress = false; ivinfo.estimated_count = estimated_count; ivinfo.message_level = DEBUG2; diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index b52eca8..39349bb 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -998,6 +998,8 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, if (info->report_progress) pgstat_progress_update_param(PROGRESS_SCAN_BLOCKS_DONE, scanblkno); + if (info->report_parallel_progress && (scanblkno % REPORT_PARALLEL_VACUUM_EVERY_PAGES) == 0) + parallel_vacuum_update_progress(); } } diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c index ad90b21..0589683 100644 --- a/src/backend/access/spgist/spgvacuum.c +++ b/src/backend/access/spgist/spgvacuum.c @@ -21,11 +21,13 @@ #include "access/transam.h" #include "access/xloginsert.h" #include "catalog/storage_xlog.h" +#include "commands/progress.h" #include "commands/vacuum.h" #include "miscadmin.h" #include "storage/bufmgr.h" #include "storage/indexfsm.h" #include "storage/lmgr.h" +#include "utils/backend_progress.h" #include "utils/snapmgr.h" @@ -843,6 +845,9 @@ spgvacuumscan(spgBulkDeleteState *bds) /* empty the pending-list after each page */ if (bds->pendingList != NULL) spgprocesspending(bds); + /* report parallel vacuum progress */ + if (bds->info->report_parallel_progress && (blkno % REPORT_PARALLEL_VACUUM_EVERY_PAGES) == 0) + parallel_vacuum_update_progress(); } } diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index 61f1d39..11b3212 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -3348,6 +3348,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot) ivinfo.index = indexRelation; ivinfo.analyze_only = false; ivinfo.report_progress = true; + ivinfo.report_parallel_progress = false; ivinfo.estimated_count = true; ivinfo.message_level = DEBUG2; ivinfo.num_heap_tuples = heapRelation->rd_rel->reltuples; diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql index 2d8104b..c37b20b 100644 --- a/src/backend/catalog/system_views.sql +++ b/src/backend/catalog/system_views.sql @@ -1165,7 +1165,8 @@ CREATE VIEW pg_stat_progress_vacuum AS END AS phase, S.param2 AS heap_blks_total, S.param3 AS heap_blks_scanned, S.param4 AS heap_blks_vacuumed, S.param5 AS index_vacuum_count, - S.param6 AS max_dead_tuples, S.param7 AS num_dead_tuples + S.param6 AS max_dead_tuples, S.param7 AS num_dead_tuples, + S.param8 AS indexes_total, S.param9 AS indexes_completed FROM pg_stat_get_progress_info('VACUUM') AS S LEFT JOIN pg_database D ON S.datid = D.oid; diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index a6d5ed1..2da956f 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -75,6 +75,8 @@ int vacuum_multixact_failsafe_age; static MemoryContext vac_context = NULL; static BufferAccessStrategy vac_strategy; +/* Shared parameter to track vacuum parallel progress */ +pg_atomic_uint32 *ParallelVacuumProgress = NULL; /* * Variables for cost-based parallel vacuum. See comments atop diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c index f26d796..b5b80c9 100644 --- a/src/backend/commands/vacuumparallel.c +++ b/src/backend/commands/vacuumparallel.c @@ -30,6 +30,7 @@ #include "access/table.h" #include "access/xact.h" #include "catalog/index.h" +#include "commands/progress.h" #include "commands/vacuum.h" #include "optimizer/paths.h" #include "pgstat.h" @@ -50,6 +51,8 @@ #define PARALLEL_VACUUM_KEY_WAL_USAGE 5 #define PARALLEL_VACUUM_KEY_INDEX_STATS 6 +#define PARALLEL_VACUUM_PROGRESS_TIMEOUT 1000 + /* * Shared information among parallel workers. So this is allocated in the DSM * segment. @@ -103,6 +106,17 @@ typedef struct PVShared /* Counter for vacuuming and cleanup */ pg_atomic_uint32 idx; + + /* + * Counter for vacuuming and cleanup progress reporting. + * This value is used to report index vacuum/cleanup progress + * in parallel_vacuum_progress_report. We keep this + * counter to avoid having to loop through + * ParallelVacuumState->indstats to determine the number + * of indexes completed. + */ + pg_atomic_uint32 idx_completed_progress; + } PVShared; /* Status used during parallel index vacuum or cleanup */ @@ -213,6 +227,7 @@ static void parallel_vacuum_process_one_index(ParallelVacuumState *pvs, Relation static bool parallel_vacuum_index_is_parallel_safe(Relation indrel, int num_index_scans, bool vacuum); static void parallel_vacuum_error_callback(void *arg); +static void parallel_wait_for_workers_to_finish(ParallelVacuumState *pvs); /* * Try to enter parallel mode and create a parallel context. Then initialize @@ -364,6 +379,7 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, pg_atomic_init_u32(&(shared->cost_balance), 0); pg_atomic_init_u32(&(shared->active_nworkers), 0); pg_atomic_init_u32(&(shared->idx), 0); + pg_atomic_init_u32(&(shared->idx_completed_progress), 0); shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_SHARED, shared); pvs->shared = shared; @@ -618,8 +634,9 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan vacuum)); } - /* Reset the parallel index processing counter */ + /* Reset the parallel index processing counter ( index progress counter also ) */ pg_atomic_write_u32(&(pvs->shared->idx), 0); + pg_atomic_write_u32(&(pvs->shared->idx_completed_progress), 0); /* Setup the shared cost-based vacuum delay and launch workers */ if (nworkers > 0) @@ -645,6 +662,14 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan LaunchParallelWorkers(pvs->pcxt); + /* + * Set the shared parallel vacuum progress. This will be used + * to periodically update progress.h with completed indexes + * in a parallel vacuum. See comments in parallel_vacuum_update_progress + * for more details. + */ + ParallelVacuumProgress = &(pvs->shared->idx_completed_progress); + if (pvs->pcxt->nworkers_launched > 0) { /* @@ -688,7 +713,21 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan */ if (nworkers > 0) { - /* Wait for all vacuum workers to finish */ + /* + * To wait for parallel workers to finish, + * first call parallel_wait_for_workers_to_finish + * which is responsible for reporting the + * number of indexes completed. + * + * Afterwards, WaitForParallelWorkersToFinish is called + * to do the real work of waiting for parallel workers + * to finish. + * + * Note: Both routines will acquire a WaitLatch in their + * respective loops. + */ + parallel_wait_for_workers_to_finish(pvs); + WaitForParallelWorkersToFinish(pvs->pcxt); for (int i = 0; i < pvs->pcxt->nworkers_launched; i++) @@ -710,6 +749,9 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan indstats->status = PARALLEL_INDVAC_STATUS_INITIAL; } + /* Reset parallel progress */ + ParallelVacuumProgress = NULL; + /* * Carry the shared balance value to heap scan and disable shared costing */ @@ -838,7 +880,8 @@ parallel_vacuum_process_one_index(ParallelVacuumState *pvs, Relation indrel, ivinfo.estimated_count = pvs->shared->estimated_count; ivinfo.num_heap_tuples = pvs->shared->reltuples; ivinfo.strategy = pvs->bstrategy; - + /* Only the leader should report parallel vacuum progress */ + ivinfo.report_parallel_progress = !IsParallelWorker(); /* Update error traceback information */ pvs->indname = pstrdup(RelationGetRelationName(indrel)); pvs->status = indstats->status; @@ -857,6 +900,9 @@ parallel_vacuum_process_one_index(ParallelVacuumState *pvs, Relation indrel, RelationGetRelationName(indrel)); } + if (ivinfo.report_parallel_progress) + parallel_vacuum_update_progress(); + /* * Copy the index bulk-deletion result returned from ambulkdelete and * amvacuumcleanup to the DSM segment if it's the first cycle because they @@ -888,6 +934,9 @@ parallel_vacuum_process_one_index(ParallelVacuumState *pvs, Relation indrel, pvs->status = PARALLEL_INDVAC_STATUS_COMPLETED; pfree(pvs->indname); pvs->indname = NULL; + + /* Update the number of indexes completed. */ + pg_atomic_add_fetch_u32(&(pvs->shared->idx_completed_progress), 1); } /* @@ -1072,3 +1121,58 @@ parallel_vacuum_error_callback(void *arg) return; } } + +/* + * Read the shared ParallelVacuumProgress and update progress.h + * with indexes vacuumed so far. This function is called periodically + * by index AMs as well as parallel_vacuum_process_one_index. + * + * To avoid unnecessarily updating progress, we check the progress + * values from the backend entry and only update if the value + * of completed indexes increases. + * + * Note: This function should be used by the leader process only, + * and it's up to the caller to ensure this. + */ +void +parallel_vacuum_update_progress(void) +{ + volatile PgBackendStatus *beentry = MyBEEntry; + + Assert(!IsParallelWorker); + + if (beentry && ParallelVacuumProgress) + { + int parallel_vacuum_current_value = beentry->st_progress_param[PROGRESS_VACUUM_INDEX_COMPLETED]; + int parallel_vacuum_new_value = pg_atomic_read_u32(ParallelVacuumProgress); + + if (parallel_vacuum_new_value > parallel_vacuum_current_value) + pgstat_progress_update_param(PROGRESS_VACUUM_INDEX_COMPLETED, parallel_vacuum_new_value); + } +} + +/* + * Check if we are done vacuuming indexes and report + * progress. + * + * We nap using with a WaitLatch to avoid a busy loop. + * + * Note: This function should be used by the leader process only, + * and it's up to the caller to ensure this. + */ +void +parallel_wait_for_workers_to_finish(ParallelVacuumState *pvs) +{ + Assert(!IsParallelWorker); + + while (pg_atomic_read_u32(ParallelVacuumProgress) < pvs->nindexes) + { + CHECK_FOR_INTERRUPTS(); + + parallel_vacuum_update_progress(); + + (void) WaitLatch(MyLatch, WL_TIMEOUT | WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, PARALLEL_VACUUM_PROGRESS_TIMEOUT, + WAIT_EVENT_PARALLEL_VACUUM_FINISH); + ResetLatch(MyLatch); + } +} diff --git a/src/backend/utils/activity/wait_event.c b/src/backend/utils/activity/wait_event.c index b2abd75..eeba2be 100644 --- a/src/backend/utils/activity/wait_event.c +++ b/src/backend/utils/activity/wait_event.c @@ -460,6 +460,9 @@ pgstat_get_wait_ipc(WaitEventIPC w) case WAIT_EVENT_XACT_GROUP_UPDATE: event_name = "XactGroupUpdate"; break; + case WAIT_EVENT_PARALLEL_VACUUM_FINISH: + event_name = "ParallelVacuumFinish"; + break; /* no default case, so that compiler will warn */ } diff --git a/src/include/access/genam.h b/src/include/access/genam.h index e1c4fdb..7474734 100644 --- a/src/include/access/genam.h +++ b/src/include/access/genam.h @@ -46,6 +46,7 @@ typedef struct IndexVacuumInfo Relation index; /* the index being vacuumed */ bool analyze_only; /* ANALYZE (without any actual vacuum) */ bool report_progress; /* emit progress.h status reports */ + bool report_parallel_progress; /* emit progress.h status reports for parallel vacuum */ bool estimated_count; /* num_heap_tuples is an estimate */ int message_level; /* ereport level for progress messages */ double num_heap_tuples; /* tuples remaining in heap */ diff --git a/src/include/commands/progress.h b/src/include/commands/progress.h index a28938c..0e97c6d 100644 --- a/src/include/commands/progress.h +++ b/src/include/commands/progress.h @@ -25,6 +25,8 @@ #define PROGRESS_VACUUM_NUM_INDEX_VACUUMS 4 #define PROGRESS_VACUUM_MAX_DEAD_TUPLES 5 #define PROGRESS_VACUUM_NUM_DEAD_TUPLES 6 +#define PROGRESS_VACUUM_INDEX_TOTAL 7 +#define PROGRESS_VACUUM_INDEX_COMPLETED 8 /* Phases of vacuum (as advertised via PROGRESS_VACUUM_PHASE) */ #define PROGRESS_VACUUM_PHASE_SCAN_HEAP 1 diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h index 4e4bc26..5a6b454 100644 --- a/src/include/commands/vacuum.h +++ b/src/include/commands/vacuum.h @@ -64,6 +64,12 @@ /* value for checking vacuum flags */ #define VACUUM_OPTION_MAX_VALID_VALUE ((1 << 3) - 1) +/* + * Parallel Index vacuum progress is reported every 1GB of blocks + * scanned. + */ +#define REPORT_PARALLEL_VACUUM_EVERY_PAGES ((BlockNumber) (((uint64) 1024 * 1024 * 1024) / BLCKSZ)) + /* Abstract type for parallel vacuum state */ typedef struct ParallelVacuumState ParallelVacuumState; @@ -259,6 +265,8 @@ extern PGDLLIMPORT int vacuum_multixact_freeze_table_age; extern PGDLLIMPORT int vacuum_failsafe_age; extern PGDLLIMPORT int vacuum_multixact_failsafe_age; +extern PGDLLIMPORT pg_atomic_uint32 *ParallelVacuumProgress; + /* Variables for cost-based parallel vacuum */ extern PGDLLIMPORT pg_atomic_uint32 *VacuumSharedCostBalance; extern PGDLLIMPORT pg_atomic_uint32 *VacuumActiveNWorkers; @@ -333,5 +341,6 @@ extern bool std_typanalyze(VacAttrStats *stats); extern double anl_random_fract(void); extern double anl_init_selection_state(int n); extern double anl_get_next_S(double t, int n, double *stateptr); +extern void parallel_vacuum_update_progress(void); #endif /* VACUUM_H */ diff --git a/src/include/utils/wait_event.h b/src/include/utils/wait_event.h index 0b2100b..95e9fef 100644 --- a/src/include/utils/wait_event.h +++ b/src/include/utils/wait_event.h @@ -128,7 +128,8 @@ typedef enum WAIT_EVENT_SYNC_REP, WAIT_EVENT_WAL_RECEIVER_EXIT, WAIT_EVENT_WAL_RECEIVER_WAIT_START, - WAIT_EVENT_XACT_GROUP_UPDATE + WAIT_EVENT_XACT_GROUP_UPDATE, + WAIT_EVENT_PARALLEL_VACUUM_FINISH } WaitEventIPC; /* ---------- diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index 37c1c86..896043e 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -2021,7 +2021,9 @@ pg_stat_progress_vacuum| SELECT s.pid, s.param4 AS heap_blks_vacuumed, s.param5 AS index_vacuum_count, s.param6 AS max_dead_tuples, - s.param7 AS num_dead_tuples + s.param7 AS num_dead_tuples, + s.param8 AS indexes_total, + s.param9 AS indexes_completed FROM (pg_stat_get_progress_info('VACUUM'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) LEFT JOIN pg_database d ON ((s.datid = d.oid))); pg_stat_recovery_prefetch| SELECT s.stats_reset, -- 2.32.1 (Apple Git-133)