From af2040cb5408f3876748f95dd8ee055358314caa Mon Sep 17 00:00:00 2001 From: Daniil Davidov Date: Mon, 18 Aug 2025 15:14:25 +0700 Subject: [PATCH v11 2/3] Logging for parallel autovacuum --- src/backend/access/heap/vacuumlazy.c | 27 ++++++++++++++++++++++++-- src/backend/commands/vacuumparallel.c | 28 ++++++++++++++++++--------- src/include/commands/vacuum.h | 16 +++++++++++++-- src/tools/pgindent/typedefs.list | 1 + 4 files changed, 59 insertions(+), 13 deletions(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 981d9380a92..6fe84d8747a 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -347,6 +347,12 @@ typedef struct LVRelState /* Instrumentation counters */ int num_index_scans; + + /* + * Number of planned and actually launched parallel workers for all index + * scans, or NULL + */ + PVWorkersUsage *workers_usage; /* Counters that follow are only for scanned_pages */ int64 tuples_deleted; /* # deleted from table */ int64 tuples_frozen; /* # newly frozen */ @@ -687,6 +693,16 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, indnames = palloc(sizeof(char *) * vacrel->nindexes); for (int i = 0; i < vacrel->nindexes; i++) indnames[i] = pstrdup(RelationGetRelationName(vacrel->indrels[i])); + + /* + * Allocate space for workers usage statistics. Thus, we explicitly + * make clear that such statistics must be accumulated. For now, this + * is used only by autovacuum leader worker, because it must log it in + * the end of table processing. + */ + vacrel->workers_usage = AmAutoVacuumWorkerProcess() ? + (PVWorkersUsage *) palloc0(sizeof(PVWorkersUsage)) : + NULL; } /* @@ -1011,6 +1027,11 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, vacrel->relnamespace, vacrel->relname, vacrel->num_index_scans); + if (vacrel->workers_usage) + appendStringInfo(&buf, + _("workers usage statistics for all of index scans : launched in total = %d, planned in total = %d\n"), + vacrel->workers_usage->nlaunched, + vacrel->workers_usage->nplanned); appendStringInfo(&buf, _("pages: %u removed, %u remain, %u scanned (%.2f%% of total), %u eagerly scanned\n"), vacrel->removed_pages, new_rel_pages, @@ -2639,7 +2660,8 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) { /* Outsource everything to parallel variant */ parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, old_live_tuples, - vacrel->num_index_scans); + vacrel->num_index_scans, + vacrel->workers_usage); /* * Do a postcheck to consider applying wraparound failsafe now. Note @@ -3052,7 +3074,8 @@ lazy_cleanup_all_indexes(LVRelState *vacrel) /* Outsource everything to parallel variant */ parallel_vacuum_cleanup_all_indexes(vacrel->pvs, reltuples, vacrel->num_index_scans, - estimated_count); + estimated_count, + vacrel->workers_usage); } /* Reset the progress counters */ diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c index 4221e6084f5..cada1722b76 100644 --- a/src/backend/commands/vacuumparallel.c +++ b/src/backend/commands/vacuumparallel.c @@ -227,9 +227,10 @@ struct ParallelVacuumState static int parallel_vacuum_compute_workers(Relation *indrels, int nindexes, int nrequested, bool *will_parallel_vacuum); static void parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scans, - bool vacuum); + bool vacuum, PVWorkersUsage *wusage); static void parallel_vacuum_process_all_indexes_internal(ParallelVacuumState *pvs, - int num_index_scans, bool vacuum); + int num_index_scans, bool vacuum, + PVWorkersUsage *wusage); static void parallel_vacuum_process_safe_indexes(ParallelVacuumState *pvs); static void parallel_vacuum_process_unsafe_indexes(ParallelVacuumState *pvs); static void parallel_vacuum_process_one_index(ParallelVacuumState *pvs, Relation indrel, @@ -504,7 +505,7 @@ parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs) */ void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, - int num_index_scans) + int num_index_scans, PVWorkersUsage *wusage) { Assert(!IsParallelWorker()); @@ -515,7 +516,7 @@ parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tup pvs->shared->reltuples = num_table_tuples; pvs->shared->estimated_count = true; - parallel_vacuum_process_all_indexes(pvs, num_index_scans, true); + parallel_vacuum_process_all_indexes(pvs, num_index_scans, true, wusage); } /* @@ -523,7 +524,8 @@ parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tup */ void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, - int num_index_scans, bool estimated_count) + int num_index_scans, bool estimated_count, + PVWorkersUsage *wusage) { Assert(!IsParallelWorker()); @@ -535,7 +537,7 @@ parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tup pvs->shared->reltuples = num_table_tuples; pvs->shared->estimated_count = estimated_count; - parallel_vacuum_process_all_indexes(pvs, num_index_scans, false); + parallel_vacuum_process_all_indexes(pvs, num_index_scans, false, wusage); } /* @@ -620,7 +622,7 @@ parallel_vacuum_compute_workers(Relation *indrels, int nindexes, int nrequested, */ static void parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scans, - bool vacuum) + bool vacuum, PVWorkersUsage *wusage) { /* * Parallel autovacuum can reserve parallel workers. Use try/catch block @@ -629,7 +631,7 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan PG_TRY(); { parallel_vacuum_process_all_indexes_internal(pvs, num_index_scans, - false); + false, wusage); } PG_CATCH(); { @@ -644,7 +646,8 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan static void parallel_vacuum_process_all_indexes_internal(ParallelVacuumState *pvs, - int num_index_scans, bool vacuum) + int num_index_scans, bool vacuum, + PVWorkersUsage *wusage) { int nworkers; PVIndVacStatus new_status; @@ -768,6 +771,13 @@ parallel_vacuum_process_all_indexes_internal(ParallelVacuumState *pvs, "launched %d parallel vacuum workers for index cleanup (planned: %d)", pvs->pcxt->nworkers_launched), pvs->pcxt->nworkers_launched, nworkers))); + + /* Remember these values, if we asked to. */ + if (wusage != NULL) + { + wusage->nlaunched += pvs->pcxt->nworkers_launched; + wusage->nplanned += nworkers; + } } /* Vacuum the indexes that can be processed by only leader process */ diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h index 14eeccbd718..0829a9658f2 100644 --- a/src/include/commands/vacuum.h +++ b/src/include/commands/vacuum.h @@ -295,6 +295,16 @@ typedef struct VacDeadItemsInfo int64 num_items; /* current # of entries */ } VacDeadItemsInfo; +/* + * PVWorkersUsage stores information about total number of launched and planned + * workers during parallel vacuum. + */ +typedef struct PVWorkersUsage +{ + int nlaunched; + int nplanned; +} PVWorkersUsage; + /* GUC parameters */ extern PGDLLIMPORT int default_statistics_target; /* PGDLLIMPORT for PostGIS */ extern PGDLLIMPORT int vacuum_freeze_min_age; @@ -389,11 +399,13 @@ extern TidStore *parallel_vacuum_get_dead_items(ParallelVacuumState *pvs, extern void parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs); extern void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, - int num_index_scans); + int num_index_scans, + PVWorkersUsage *wusage); extern void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, - bool estimated_count); + bool estimated_count, + PVWorkersUsage *wusage); extern void parallel_vacuum_main(dsm_segment *seg, shm_toc *toc); /* in commands/analyze.c */ diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index a13e8162890..6f9c418689c 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -2366,6 +2366,7 @@ PullFilterOps PushFilter PushFilterOps PushFunction +PVWorkersUsage PyCFunction PyMethodDef PyModuleDef -- 2.39.5 (Apple Git-154)