From 054ab1e3ebf6f46877b394da6e5e86709f964abb Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Sun, 28 Mar 2021 20:55:55 -0700 Subject: [PATCH v10 5/5] Bypass index vacuuming in some cases. Bypass index vacuuming in two cases: The case where there are so few dead tuples that index vacuuming seems unnecessary, and the case where the relfrozenxid of the table being vacuumed is dangerously far in the past. This commit add new GUC parameters vacuum_skip_index_age and vacuum_multixact_skip_index_age that specify age at which VACUUM should skip index cleanup to hurry finishing in order to advance relfrozenxid/relminmxid. After each index vacuuming (in non-parallel vacuum case), we check if the table's relfrozenxid/relminmxid are too old comparing those new GUC parameters. If so, we skip further index vacuuming within the vacuum operation. This behavior is intended to deal with the risk of XID wraparound, the default values are much higher, 1.8 billion. Although users can set those parameters, VACUUM will silently adjust the effective value more than 105% of autovacuum_freeze_max_age/autovacuum_multixact_freeze_max_age, so that only anti-wraparound autovacuuma and aggressive scan have a change to skip index vacuuming. Author: Masahiko Sawada Author: Peter Geoghegan Discussion: https://postgr.es/m/CAD21AoD0SkE11fMw4jD4RENAwBMcw1wasVnwpJVw3tVqPOQgAw@mail.gmail.com Discussion: https://postgr.es/m/CAH2-WzmkebqPd4MVGuPTOS9bMFvp9MDs5cRTCOsv1rQJ3jCbXw@mail.gmail.com --- src/include/commands/vacuum.h | 4 + src/backend/access/heap/vacuumlazy.c | 300 ++++++++++++++++-- src/backend/commands/vacuum.c | 61 ++++ src/backend/utils/misc/guc.c | 25 +- src/backend/utils/misc/postgresql.conf.sample | 2 + doc/src/sgml/config.sgml | 51 +++ doc/src/sgml/maintenance.sgml | 10 +- 7 files changed, 431 insertions(+), 22 deletions(-) diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h index d029da5ac0..f1815e7892 100644 --- a/src/include/commands/vacuum.h +++ b/src/include/commands/vacuum.h @@ -235,6 +235,8 @@ extern int vacuum_freeze_min_age; extern int vacuum_freeze_table_age; extern int vacuum_multixact_freeze_min_age; extern int vacuum_multixact_freeze_table_age; +extern int vacuum_skip_index_age; +extern int vacuum_multixact_skip_index_age; /* Variables for cost-based parallel vacuum */ extern pg_atomic_uint32 *VacuumSharedCostBalance; @@ -270,6 +272,8 @@ extern void vacuum_set_xid_limits(Relation rel, TransactionId *xidFullScanLimit, MultiXactId *multiXactCutoff, MultiXactId *mxactFullScanLimit); +extern bool vacuum_xid_limit_emergency(TransactionId relfrozenxid, + MultiXactId relminmxid); extern void vac_update_datfrozenxid(void); extern void vacuum_delay_point(void); extern bool vacuum_is_relation_owner(Oid relid, Form_pg_class reltuple, diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index c7bb0b1f23..3eeecb954f 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -103,6 +103,17 @@ #define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50 /* ms */ #define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000 /* ms */ +/* + * Threshold that controls whether we bypass index vacuuming and heap + * vacuuming. When we're under the threshold they're deemed unnecessary. + * BYPASS_THRESHOLD_PAGES is applied as a multiplier on the table's rel_pages + * for those pages known to contain one or more LP_DEAD items. + */ +#define BYPASS_THRESHOLD_PAGES 0.02 /* i.e. 2% of rel_pages */ + +#define BYPASS_EMERGENCY_MIN_PAGES \ + ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ)) + /* * When a table has no indexes, vacuum the FSM after every 8GB, approximately * (it won't be exact because we only vacuum FSM after processing a heap page @@ -299,6 +310,7 @@ typedef struct LVRelState /* Do index vacuuming/cleanup? */ bool do_index_vacuuming; bool do_index_cleanup; + bool do_failsafe_speedup; /* Buffer access strategy and parallel state */ BufferAccessStrategy bstrategy; @@ -392,13 +404,14 @@ static void lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, GlobalVisState *vistest, LVPagePruneState *prunestate); -static void lazy_vacuum(LVRelState *vacrel); -static void lazy_vacuum_all_indexes(LVRelState *vacrel); +static void lazy_vacuum(LVRelState *vacrel, bool onecall); +static bool lazy_vacuum_all_indexes(LVRelState *vacrel); static void lazy_vacuum_heap_rel(LVRelState *vacrel); static int lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, int tupindex, Buffer *vmbuffer); static bool lazy_check_needs_freeze(Buffer buf, bool *hastup, LVRelState *vacrel); +static bool should_speedup_failsafe(LVRelState *vacrel); static void do_parallel_lazy_vacuum_all_indexes(LVRelState *vacrel); static void do_parallel_lazy_cleanup_all_indexes(LVRelState *vacrel); static void do_parallel_vacuum_or_cleanup(LVRelState *vacrel, int nworkers); @@ -544,6 +557,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, &vacrel->indrels); vacrel->do_index_vacuuming = true; vacrel->do_index_cleanup = true; + vacrel->do_failsafe_speedup = false; if (params->index_cleanup == VACOPT_TERNARY_DISABLED) { vacrel->do_index_vacuuming = false; @@ -743,6 +757,29 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, (long long) VacuumPageHit, (long long) VacuumPageMiss, (long long) VacuumPageDirty); + if (vacrel->rel_pages > 0) + { + msgfmt = _(" %u pages from table (%.2f%% of total) had %lld dead item identifiers removed\n"); + + if (vacrel->nindexes == 0 || (vacrel->do_index_vacuuming && + vacrel->num_index_scans == 0)) + appendStringInfo(&buf, _("index scan not needed:")); + else if (vacrel->do_index_vacuuming && vacrel->num_index_scans > 0) + appendStringInfo(&buf, _("index scan needed:")); + else + { + msgfmt = _(" %u pages from table (%.2f%% of total) have %lld dead item identifiers\n"); + + if (!vacrel->do_failsafe_speedup) + appendStringInfo(&buf, _("index scan bypassed:")); + else + appendStringInfo(&buf, _("index scan bypassed due to emergency:")); + } + appendStringInfo(&buf, msgfmt, + vacrel->lpdead_item_pages, + 100.0 * vacrel->lpdead_item_pages / vacrel->rel_pages, + (long long) vacrel->lpdead_items); + } for (int i = 0; i < vacrel->nindexes; i++) { IndexBulkDeleteResult *istat = vacrel->indstats[i]; @@ -833,7 +870,8 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) next_fsm_block_to_vacuum; PGRUsage ru0; Buffer vmbuffer = InvalidBuffer; - bool skipping_blocks; + bool skipping_blocks, + have_vacuumed_indexes = false; StringInfoData buf; const int initprog_index[] = { PROGRESS_VACUUM_PHASE, @@ -1087,11 +1125,18 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) } /* Remove the collected garbage tuples from table and indexes */ - lazy_vacuum(vacrel); + lazy_vacuum(vacrel, false); + have_vacuumed_indexes = true; /* * Vacuum the Free Space Map to make newly-freed space visible on * upper-level FSM pages. Note we have not yet processed blkno. + * + * Note also that it's possible that the call to lazy_vacuum() + * decided to end index vacuuming due to an emergency (though not + * for any other reason). When that happens we can miss out on + * some of the free space that we originally expected to be able + * to pick up within lazy_vacuum_heap_rel(). */ FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, blkno); @@ -1306,15 +1351,18 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) /* * Periodically do incremental FSM vacuuming to make newly-freed - * space visible on upper FSM pages. Note: although we've cleaned - * the current block, we haven't yet updated its FSM entry (that - * happens further down), so passing end == blkno is correct. + * space visible on upper FSM pages. This is also a convenient + * point to check if we should do failsafe speedup to avoid + * wraparound failures. Note: although we've cleaned the current + * block, we haven't yet updated its FSM entry (that happens + * further down), so passing end == blkno is correct. */ if (blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES) { FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, blkno); next_fsm_block_to_vacuum = blkno; + should_speedup_failsafe(vacrel); } Assert(dead_tuples->num_tuples == 0); @@ -1454,6 +1502,14 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) * make available in cases where it's possible to truncate the * page's line pointer array. * + * Note: It's not in fact 100% certain that we really will call + * lazy_vacuum_heap_rel() -- lazy_vacuum() might yet opt to skip + * index vacuuming (and so must skip heap vacuuming). This is + * deemed okay because it only happens in emergencies, or when + * there is very little free space anyway. (Besides, we start + * recording free space in FSM once we know that index vacuuming + * was abandoned.) + * * Note that the one-pass (no indexes) case is only supposed to * make it this far when there were no LP_DEAD items during * pruning. @@ -1498,13 +1554,12 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) } /* If any tuples need to be deleted, perform final vacuum cycle */ - /* XXX put a threshold on min number of tuples here? */ if (dead_tuples->num_tuples > 0) - lazy_vacuum(vacrel); + lazy_vacuum(vacrel, !have_vacuumed_indexes); /* * Vacuum the remainder of the Free Space Map. We must do this whether or - * not there were indexes. + * not there were indexes, and whether or not we bypassed index vacuuming. */ if (blkno > next_fsm_block_to_vacuum) FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, blkno); @@ -1531,6 +1586,16 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) * If table has no indexes and at least one heap pages was vacuumed, make * log report that lazy_vacuum_heap_rel would've made had there been * indexes (having indexes implies using the two pass strategy). + * + * We deliberately don't do this in the case where there are indexes but + * index vacuuming was bypassed. We make a similar report at the point + * that index vacuuming is bypassed, but that's actually quite different + * in one important sense: it shows information about work we _haven't_ + * done. + * + * log_autovacuum output does things differently; it consistently presents + * information about LP_DEAD items for the VACUUM as a whole. We always + * report on each round of index and heap vacuuming separately, though. */ if (vacrel->nindexes == 0 && vacrel->lpdead_item_pages > 0) ereport(elevel, @@ -1974,10 +2039,19 @@ retry: /* * Remove the collected garbage tuples from the table and its indexes. + * + * We may choose to bypass index vacuuming at this point. + * + * In rare emergencies, the ongoing VACUUM operation can be made to skip both + * index vacuuming and index cleanup at the point we're called. This avoids + * having the whole system refuse to allocate further XIDs/MultiXactIds due to + * wraparound. */ static void -lazy_vacuum(LVRelState *vacrel) +lazy_vacuum(LVRelState *vacrel, bool onecall) { + bool do_bypass_optimization; + /* Should not end up here with no indexes */ Assert(vacrel->nindexes > 0); Assert(!IsParallelWorker()); @@ -1990,11 +2064,100 @@ lazy_vacuum(LVRelState *vacrel) return; } - /* Okay, we're going to do index vacuuming */ - lazy_vacuum_all_indexes(vacrel); + /* + * Consider bypassing index vacuuming (and heap vacuuming) entirely. + * + * It's far from clear how we might assess the point at which bypassing + * index vacuuming starts to make sense. But it is at least clear that + * VACUUM should not go ahead with index vacuuming in certain extreme + * (though still fairly common) cases. These are the cases where we have + * _close to_ zero LP_DEAD items/TIDs to delete from indexes. It would be + * totally arbitrary to perform a round of full index scans in that case, + * while not also doing the same thing when we happen to have _precisely_ + * zero TIDs -- so we do neither. This avoids sharp discontinuities in + * the duration and overhead of successive VACUUM operations that run + * against the same table with the same workload. + * + * Our approach is to bypass index vacuuming only when there are very few + * heap pages with dead items. Even then, it must be the first and last + * call here for the VACUUM. We never apply the optimization when + * multiple index scans will be required -- we cannot accumulate "debt" + * without bound. + * + * This threshold we apply allows us to not give as much weight to items + * that are concentrated in relatively few heap pages. Concentrated + * build-up of LP_DEAD items tends to occur with workloads that have + * non-HOT updates that affect the same logical rows again and again. It + * is probably not possible for us to keep the visibility map bits for + * these pages set for a useful amount of time anyway. + * + * We apply one further check: the space currently used to store the TIDs + * (the TIDs that tie back to the index tuples we're thinking about not + * deleting this time around) must not exceed 32MB. This limits the risk + * that we will bypass index vacuuming again and again until eventually + * there is a VACUUM whose dead_tuples space is not resident in L3 cache. + * + * We can be conservative about avoiding eventually reaching some kind of + * cliff edge while still avoiding almost all truly unnecessary index + * vacuuming. + */ + do_bypass_optimization = false; + if (onecall && vacrel->rel_pages > 0) + { + BlockNumber threshold; - /* Remove tuples from heap */ - lazy_vacuum_heap_rel(vacrel); + Assert(vacrel->num_index_scans == 0); + Assert(vacrel->lpdead_items == vacrel->dead_tuples->num_tuples); + Assert(vacrel->do_index_vacuuming); + Assert(vacrel->do_index_cleanup); + + threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES; + + do_bypass_optimization = + (vacrel->lpdead_item_pages < threshold && + vacrel->lpdead_items < MAXDEADTUPLES(32L * 1024L * 1024L)); + } + + if (do_bypass_optimization) + { + /* + * Bypass index vacuuming. + * + * Since VACUUM aims to behave as if there were precisely zero index + * tuples, even when there are actually slightly more than zero, we + * will still do index cleanup. This is expected to have practically + * no overhead with tables where bypassing index vacuuming helps. + */ + vacrel->do_index_vacuuming = false; + ereport(elevel, + (errmsg("\"%s\": index scan bypassed: %u pages from table (%.2f%% of total) have %lld dead item identifiers", + vacrel->relname, vacrel->rel_pages, + 100.0 * vacrel->lpdead_item_pages / vacrel->rel_pages, + (long long) vacrel->lpdead_items))); + } + else if (lazy_vacuum_all_indexes(vacrel)) + { + /* + * We successfully completed a round of index vacuuming. Do related + * heap vacuuming now. + */ + lazy_vacuum_heap_rel(vacrel); + } + else + { + /* + * Emergency case. + * + * we attempted index vacuuming, but didn't finish a full round/full + * index scan. This happens when relfrozenxid or relminmxid is too + * far in the past. + * + * From this point on the VACUUM operation will do no further index + * vacuuming or heap vacuuming. It will do any remaining pruning that + * may be required, plus other heap-related and relation-level + * maintenance tasks. But that's it. + */ + } /* * Forget the now-vacuumed tuples -- just press on @@ -2004,16 +2167,30 @@ lazy_vacuum(LVRelState *vacrel) /* * lazy_vacuum_all_indexes() -- Main entry for index vacuuming + * + * Returns true in the common case when all indexes were successfully + * vacuumed. Returns false in rare cases where we determined that the ongoing + * VACUUM operation is at risk of taking too long to finish, leading to + * wraparound failure. */ -static void +static bool lazy_vacuum_all_indexes(LVRelState *vacrel) { + bool allindexes = true; + Assert(vacrel->nindexes > 0); Assert(vacrel->do_index_vacuuming); Assert(vacrel->do_index_cleanup); Assert(TransactionIdIsNormal(vacrel->relfrozenxid)); Assert(MultiXactIdIsValid(vacrel->relminmxid)); + /* Precheck for XID wraparound emergencies */ + if (should_speedup_failsafe(vacrel)) + { + /* Wraparound emergency -- don't even start an index scan */ + return false; + } + /* Report that we are now vacuuming indexes */ pgstat_progress_update_param(PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_INDEX); @@ -2028,26 +2205,42 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) vacrel->indstats[idx] = lazy_vacuum_one_index(indrel, istat, vacrel->old_live_tuples, vacrel); + + if (should_speedup_failsafe(vacrel)) + { + /* Wraparound emergency -- end current index scan */ + allindexes = false; + break; + } } } else { + /* Note: parallel VACUUM only gets the precheck */ + allindexes = true; + /* Outsource everything to parallel variant */ do_parallel_lazy_vacuum_all_indexes(vacrel); } /* * We delete all LP_DEAD items from the first heap pass in all indexes on - * each call here. This makes the next call to lazy_vacuum_heap_rel() - * safe. + * each call here (except calls where we don't finish all indexes). This + * makes the next call to lazy_vacuum_heap_rel() safe. */ Assert(vacrel->num_index_scans > 0 || vacrel->dead_tuples->num_tuples == vacrel->lpdead_items); - /* Increase and report the number of index scans */ + /* + * Increase and report the number of index scans. Note that we include + * the case where we started a round index scanning that we weren't able + * to finish. + */ vacrel->num_index_scans++; pgstat_progress_update_param(PROGRESS_VACUUM_NUM_INDEX_VACUUMS, vacrel->num_index_scans); + + return allindexes; } /* @@ -2340,6 +2533,75 @@ lazy_check_needs_freeze(Buffer buf, bool *hastup, LVRelState *vacrel) return (offnum <= maxoff); } +/* + * Determine if there is an unacceptable risk of wraparound failure due to the + * fact that the ongoing VACUUM is taking too long -- the table that is being + * vacuumed should not have a relfrozenxid or relminmxid that is too far in + * the past. + * + * Note that we deliberately don't vary our behavior based on factors like + * whether or not the ongoing VACUUM is aggressive. If it's not aggressive we + * probably won't be able to advance relfrozenxid during this VACUUM. If we + * can't, then an anti-wraparound VACUUM should take place immediately after + * we finish up. We should be able to bypass all index vacuuming for the + * later anti-wraparound VACUUM. + * + * If the user-configurable threshold has been crossed then hurry things up: + * Stop applying any VACUUM cost delay going forward, and remember to skip any + * further index vacuuming (and heap vacuuming too, in the common case where + * table has indexes but not in one-pass VACUUM case). Return true to inform + * caller of the emergency. Otherwise return false. + * + * Caller is expected to call here before and after vacuuming each index in + * the case of two-pass VACUUM, or every BYPASS_EMERGENCY_MIN_PAGES blocks in + * the case of no-indexes/one-pass VACUUM. + */ +static bool +should_speedup_failsafe(LVRelState *vacrel) +{ + /* Avoid calling vacuum_xid_limit_emergency() very frequently */ + if (vacrel->num_index_scans == 0 && + vacrel->rel_pages <= BYPASS_EMERGENCY_MIN_PAGES) + return false; + + /* Don't warn more than once per VACUUM */ + if (vacrel->do_failsafe_speedup) + return true; + + if (unlikely(vacuum_xid_limit_emergency(vacrel->relfrozenxid, + vacrel->relminmxid))) + { + /* + * Wraparound emergency -- the table's relfrozenxid or relminmxid is + * too far in the past + */ + Assert(vacrel->do_index_vacuuming); + Assert(vacrel->do_index_cleanup); + + vacrel->do_index_vacuuming = false; + vacrel->do_index_cleanup = false; + vacrel->do_failsafe_speedup = true; + + ereport(WARNING, + (errmsg("abandoned index vacuuming of table \"%s.%s.%s\" as a fail safe after %d index scans", + get_database_name(MyDatabaseId), + vacrel->relnamespace, + vacrel->relname, + vacrel->num_index_scans), + errdetail("table's relfrozenxid or relminmxid is too far in the past"), + errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n" + "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs."))); + + /* Stop applying cost limits from this point on */ + VacuumCostActive = false; + VacuumCostBalance = 0; + + return true; + } + + return false; +} + static void do_parallel_lazy_vacuum_all_indexes(LVRelState *vacrel) { diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index 25465b05dd..51e0f4db4d 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -62,6 +62,8 @@ int vacuum_freeze_min_age; int vacuum_freeze_table_age; int vacuum_multixact_freeze_min_age; int vacuum_multixact_freeze_table_age; +int vacuum_skip_index_age; +int vacuum_multixact_skip_index_age; /* A few variables that don't seem worth passing around as parameters */ @@ -1134,6 +1136,65 @@ vacuum_set_xid_limits(Relation rel, } } +/* + * vacuum_xid_limit_emergency() -- Handle wraparound emergencies + * + * Input parameters are the target relation's relfrozenxid and relminmxid. + */ +bool +vacuum_xid_limit_emergency(TransactionId relfrozenxid, MultiXactId relminmxid) +{ + TransactionId xid_skip_limit; + MultiXactId multi_skip_limit; + int skip_index_vacuum; + + Assert(TransactionIdIsNormal(relfrozenxid)); + Assert(MultiXactIdIsValid(relminmxid)); + + /* + * Determine the index skipping age to use. In any case not less than + * autovacuum_freeze_max_age * 1.05, so that VACUUM always does an + * aggressive scan. + */ + skip_index_vacuum = Max(vacuum_skip_index_age, autovacuum_freeze_max_age * 1.05); + + xid_skip_limit = ReadNextTransactionId() - skip_index_vacuum; + if (!TransactionIdIsNormal(xid_skip_limit)) + xid_skip_limit = FirstNormalTransactionId; + + if (TransactionIdIsNormal(relfrozenxid) && + TransactionIdPrecedes(relfrozenxid, xid_skip_limit)) + { + /* The table's relfrozenxid is too old */ + return true; + } + + /* + * Similar to above, determine the index skipping age to use for multixact. + * In any case not less than autovacuum_multixact_freeze_max_age * 1.05. + */ + skip_index_vacuum = Max(vacuum_multixact_skip_index_age, + autovacuum_multixact_freeze_max_age * 1.05); + + /* + * Compute the multixact age for which freezing is urgent. This is + * normally autovacuum_multixact_freeze_max_age, but may be less if we are + * short of multixact member space. + */ + multi_skip_limit = ReadNextMultiXactId() - skip_index_vacuum; + if (multi_skip_limit < FirstMultiXactId) + multi_skip_limit = FirstMultiXactId; + + if (MultiXactIdIsValid(relminmxid) && + MultiXactIdPrecedes(relminmxid, multi_skip_limit)) + { + /* The table's relminmxid is too old */ + return true; + } + + return false; +} + /* * vac_estimate_reltuples() -- estimate the new value for pg_class.reltuples * diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 60a9c7a2a0..e5c6561c6c 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -2646,6 +2646,26 @@ static struct config_int ConfigureNamesInt[] = 0, 0, 1000000, /* see ComputeXidHorizons */ NULL, NULL, NULL }, + { + {"vacuum_skip_index_age", PGC_USERSET, CLIENT_CONN_STATEMENT, + gettext_noop("Age at which VACUUM should skip index vacuuming."), + NULL + }, + &vacuum_skip_index_age, + /* This upper-limit can be 1.05 of autovacuum_freeze_max_age */ + 1800000000, 0, 2100000000, + NULL, NULL, NULL + }, + { + {"vacuum_multixact_skip_index_age", PGC_USERSET, CLIENT_CONN_STATEMENT, + gettext_noop("Multixact age at which VACUUM should skip index vacuuming."), + NULL + }, + &vacuum_multixact_skip_index_age, + /* This upper-limit can be 1.05 of autovacuum_multixact_freeze_max_age */ + 1800000000, 0, 2100000000, + NULL, NULL, NULL + }, /* * See also CheckRequiredParameterValues() if this parameter changes @@ -3246,7 +3266,10 @@ static struct config_int ConfigureNamesInt[] = NULL }, &autovacuum_freeze_max_age, - /* see pg_resetwal if you change the upper-limit value */ + /* + * see pg_resetwal and vacuum_skip_index_age if you change the + * upper-limit value. + */ 200000000, 100000, 2000000000, NULL, NULL, NULL }, diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample index 39da7cc942..79a6a47219 100644 --- a/src/backend/utils/misc/postgresql.conf.sample +++ b/src/backend/utils/misc/postgresql.conf.sample @@ -675,6 +675,8 @@ #vacuum_freeze_table_age = 150000000 #vacuum_multixact_freeze_min_age = 5000000 #vacuum_multixact_freeze_table_age = 150000000 +#vacuum_skip_index_age = 1800000000 +#vacuum_multixact_skip_index_age = 1800000000 #bytea_output = 'hex' # hex, escape #xmlbinary = 'base64' #xmloption = 'content' diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 0c9128a55d..8b2fe112e4 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -8601,6 +8601,31 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; + + vacuum_skip_index_age (integer) + + vacuum_skip_index_age configuration parameter + + + + + VACUUM skips index cleanup if the table's + pg_class.relfrozenxid field has reached + the age specified by this setting. A VACUUM with skipping + index cleanup hurries finishing VACUUM to advance + pg_class.relfrozenxid + as quickly as possible. This is an equivalent behavior to setting + OFF to INDEX_CLEANUP option except that + this parameters skips index cleanup even in the middle of vacuum operation. + The default is 1.8 billion transactions. Although users can set this value + anywhere from zero to 2.1 billion, VACUUM will silently + adjust the effective value more than 105% of + , so that only anti-wraparound + autovacuums and aggressive scans have a chance to skip index cleanup. + + + + vacuum_multixact_freeze_table_age (integer) @@ -8647,6 +8672,32 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; + + vacuum_multixact_skip_index_age (integer) + + vacuum_multixact_skip_index_age configuration parameter + + + + + VACUUM skips index cleanup if the table's + pg_class.relminmxid field has reached + the age specified by this setting. A VACUUM with skipping + index cleanup hurries finishing VACUUM to advance + pg_class.relminmxid + as quickly as possible. This is an equivalent behavior to setting + OFF to INDEX_CLEANUP option except that + this parameters skips index cleanup even in the middle of vacuum operation. + The default is 1.8 billion multixacts. Although users can set this value + anywhere from zero to 2.1 billion, VACUUM will silently + adjust the effective value more than 105% of + , so that only + anti-wraparound autovacuums and aggressive scans have a chance to skip + index cleanup. + + + + bytea_output (enum) diff --git a/doc/src/sgml/maintenance.sgml b/doc/src/sgml/maintenance.sgml index 3bbae6dd91..a81d9ce839 100644 --- a/doc/src/sgml/maintenance.sgml +++ b/doc/src/sgml/maintenance.sgml @@ -609,8 +609,14 @@ SELECT datname, age(datfrozenxid) FROM pg_database; If for some reason autovacuum fails to clear old XIDs from a table, the - system will begin to emit warning messages like this when the database's - oldest XIDs reach forty million transactions from the wraparound point: + system will begin to skip index cleanup to hurry finishing vacuum + operation. controls when + VACUUM and autovacuum do that. + + + + The system emits warning messages like this when the database's + oldest XIDs reach forty million transactions from the wraparound point: WARNING: database "mydb" must be vacuumed within 39985967 transactions -- 2.27.0