From 895131049b2416566781ff585a577bc0342f5f71 Mon Sep 17 00:00:00 2001 From: Daniil Davidov Date: Sun, 23 Nov 2025 01:07:47 +0700 Subject: [PATCH v19 2/5] Logging for parallel autovacuum --- src/backend/access/heap/vacuumlazy.c | 27 +++++++++++++++++++++++++-- src/backend/commands/vacuumparallel.c | 21 +++++++++++++++------ src/include/commands/vacuum.h | 16 ++++++++++++++-- src/tools/pgindent/typedefs.list | 1 + 4 files changed, 55 insertions(+), 10 deletions(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 1fcb212ab3d..0be33cb84a6 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -347,6 +347,12 @@ typedef struct LVRelState int num_index_scans; int num_dead_items_resets; Size total_dead_items_bytes; + + /* + * Total number of planned and actually launched parallel workers for + * index scans. + */ + PVWorkersUsage workers_usage; /* Counters that follow are only for scanned_pages */ int64 tuples_deleted; /* # deleted from table */ int64 tuples_frozen; /* # newly frozen */ @@ -630,6 +636,7 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, LVRelState *vacrel; bool verbose, instrument, + log_workers_usage = false, /* for parallel [auto]vacuum only */ skipwithvm, frozenxid_updated, minmulti_updated; @@ -709,6 +716,12 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, indnames = palloc_array(char *, vacrel->nindexes); for (int i = 0; i < vacrel->nindexes; i++) indnames[i] = pstrdup(RelationGetRelationName(vacrel->indrels[i])); + + /* + * Worker usage statistics must be accumulated for parallel autovacuum + * and for VACUUM (PARALLEL, VERBOSE). + */ + log_workers_usage = (params.nworkers > -1); } /* @@ -781,6 +794,9 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, vacrel->vm_new_visible_frozen_pages = 0; vacrel->vm_new_frozen_pages = 0; + vacrel->workers_usage.nlaunched = 0; + vacrel->workers_usage.nplanned = 0; + /* * Get cutoffs that determine which deleted tuples are considered DEAD, * not just RECENTLY_DEAD, and which XIDs/MXIDs to freeze. Then determine @@ -1123,6 +1139,11 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, orig_rel_pages == 0 ? 100.0 : 100.0 * vacrel->lpdead_item_pages / orig_rel_pages, vacrel->lpdead_items); + if (log_workers_usage) + appendStringInfo(&buf, + _("parallel index vacuum/cleanup: %d workers were planned and %d workers were launched in total\n"), + vacrel->workers_usage.nplanned, + vacrel->workers_usage.nlaunched); for (int i = 0; i < vacrel->nindexes; i++) { IndexBulkDeleteResult *istat = vacrel->indstats[i]; @@ -2698,7 +2719,8 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) { /* Outsource everything to parallel variant */ parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, old_live_tuples, - vacrel->num_index_scans); + vacrel->num_index_scans, + &vacrel->workers_usage); /* * Do a postcheck to consider applying wraparound failsafe now. Note @@ -3131,7 +3153,8 @@ lazy_cleanup_all_indexes(LVRelState *vacrel) /* Outsource everything to parallel variant */ parallel_vacuum_cleanup_all_indexes(vacrel->pvs, reltuples, vacrel->num_index_scans, - estimated_count); + estimated_count, + &vacrel->workers_usage); } /* Reset the progress counters */ diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c index cb42d4e572f..c32314f9731 100644 --- a/src/backend/commands/vacuumparallel.c +++ b/src/backend/commands/vacuumparallel.c @@ -227,7 +227,7 @@ struct ParallelVacuumState static int parallel_vacuum_compute_workers(Relation *indrels, int nindexes, int nrequested, bool *will_parallel_vacuum); static void parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scans, - bool vacuum); + bool vacuum, PVWorkersUsage *wusage); static void parallel_vacuum_process_safe_indexes(ParallelVacuumState *pvs); static void parallel_vacuum_process_unsafe_indexes(ParallelVacuumState *pvs); static void parallel_vacuum_process_one_index(ParallelVacuumState *pvs, Relation indrel, @@ -502,7 +502,7 @@ parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs) */ void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, - int num_index_scans) + int num_index_scans, PVWorkersUsage *wusage) { Assert(!IsParallelWorker()); @@ -513,7 +513,7 @@ parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tup pvs->shared->reltuples = num_table_tuples; pvs->shared->estimated_count = true; - parallel_vacuum_process_all_indexes(pvs, num_index_scans, true); + parallel_vacuum_process_all_indexes(pvs, num_index_scans, true, wusage); } /* @@ -521,7 +521,8 @@ parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tup */ void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, - int num_index_scans, bool estimated_count) + int num_index_scans, bool estimated_count, + PVWorkersUsage *wusage) { Assert(!IsParallelWorker()); @@ -533,7 +534,7 @@ parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tup pvs->shared->reltuples = num_table_tuples; pvs->shared->estimated_count = estimated_count; - parallel_vacuum_process_all_indexes(pvs, num_index_scans, false); + parallel_vacuum_process_all_indexes(pvs, num_index_scans, false, wusage); } /* @@ -618,7 +619,7 @@ parallel_vacuum_compute_workers(Relation *indrels, int nindexes, int nrequested, */ static void parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scans, - bool vacuum) + bool vacuum, PVWorkersUsage *wusage) { int nworkers; PVIndVacStatus new_status; @@ -655,6 +656,10 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan */ nworkers = Min(nworkers, pvs->pcxt->nworkers); + /* Remember this value, if we asked to */ + if (wusage != NULL && nworkers > 0) + wusage->nplanned += nworkers; + /* * Reserve workers in autovacuum global state. Note that we may be given * fewer workers than we requested. @@ -725,6 +730,10 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan /* Enable shared cost balance for leader backend */ VacuumSharedCostBalance = &(pvs->shared->cost_balance); VacuumActiveNWorkers = &(pvs->shared->active_nworkers); + + /* Remember this value, if we asked to */ + if (wusage != NULL) + wusage->nlaunched += pvs->pcxt->nworkers_launched; } if (vacuum) diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h index e885a4b9c77..ec5d70aacdc 100644 --- a/src/include/commands/vacuum.h +++ b/src/include/commands/vacuum.h @@ -300,6 +300,16 @@ typedef struct VacDeadItemsInfo int64 num_items; /* current # of entries */ } VacDeadItemsInfo; +/* + * PVWorkersUsage stores information about total number of launched and planned + * workers during parallel vacuum. + */ +typedef struct PVWorkersUsage +{ + int nlaunched; + int nplanned; +} PVWorkersUsage; + /* GUC parameters */ extern PGDLLIMPORT int default_statistics_target; /* PGDLLIMPORT for PostGIS */ extern PGDLLIMPORT int vacuum_freeze_min_age; @@ -394,11 +404,13 @@ extern TidStore *parallel_vacuum_get_dead_items(ParallelVacuumState *pvs, extern void parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs); extern void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, - int num_index_scans); + int num_index_scans, + PVWorkersUsage *wusage); extern void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, - bool estimated_count); + bool estimated_count, + PVWorkersUsage *wusage); extern void parallel_vacuum_main(dsm_segment *seg, shm_toc *toc); /* in commands/analyze.c */ diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index 3f3a888fd0e..afebde72235 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -2404,6 +2404,7 @@ PullFilterOps PushFilter PushFilterOps PushFunction +PVWorkersUsage PyCFunction PyMethodDef PyModuleDef -- 2.43.0