From fd380a199f38545a56d7fa11c45ec088d62389f4 Mon Sep 17 00:00:00 2001 From: Masahiko Sawada Date: Tue, 31 Jan 2023 22:44:40 +0900 Subject: [PATCH v24 9/9] Update vacuum integration patch from v23. --- src/backend/access/heap/vacuumlazy.c | 64 +++++++++++++-------------- src/backend/commands/vacuumparallel.c | 11 +++-- 2 files changed, 37 insertions(+), 38 deletions(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 3537df16fd..b4e40423a8 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -3,18 +3,18 @@ * vacuumlazy.c * Concurrent ("lazy") vacuuming. * - * The major space usage for vacuuming is storage for the array of dead TIDs + * The major space usage for vacuuming is TidStore, a storage for dead TIDs * that are to be removed from indexes. We want to ensure we can vacuum even * the very largest relations with finite memory space usage. To do that, we - * set upper bounds on the number of TIDs we can keep track of at once. + * set upper bounds on the maximum memory that can be used for keeping track + * of dead TIDs at once. * * We are willing to use at most maintenance_work_mem (or perhaps * autovacuum_work_mem) memory space to keep track of dead TIDs. We initially - * allocate an array of TIDs of that size, with an upper limit that depends on - * table size (this limit ensures we don't allocate a huge area uselessly for - * vacuuming small tables). If the array threatens to overflow, we must call - * lazy_vacuum to vacuum indexes (and to vacuum the pages that we've pruned). - * This frees up the memory space dedicated to storing dead TIDs. + * create a TidStore with the maximum bytes that can be used by the TidStore. + * If the TidStore is full, we must call lazy_vacuum to vacuum indexes (and to + * vacuum the pages that we've pruned). This frees up the memory space dedicated + * to storing dead TIDs. * * In practice VACUUM will often complete its initial pass over the target * heap relation without ever running out of space to store TIDs. This means @@ -492,11 +492,11 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, } /* - * Allocate dead_items array memory using dead_items_alloc. This handles - * parallel VACUUM initialization as part of allocating shared memory - * space used for dead_items. (But do a failsafe precheck first, to - * ensure that parallel VACUUM won't be attempted at all when relfrozenxid - * is already dangerously old.) + * Allocate dead_items memory using dead_items_alloc. This handles parallel + * VACUUM initialization as part of allocating shared memory space used for + * dead_items. (But do a failsafe precheck first, to ensure that parallel + * VACUUM won't be attempted at all when relfrozenxid is already dangerously + * old.) */ lazy_check_wraparound_failsafe(vacrel); dead_items_alloc(vacrel, params->nworkers); @@ -802,7 +802,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, * have collected the TIDs whose index tuples need to be removed. * * Finally, invokes lazy_vacuum_heap_rel to vacuum heap pages, which - * largely consists of marking LP_DEAD items (from collected TID array) + * largely consists of marking LP_DEAD items (from vacrel->dead_items) * as LP_UNUSED. This has to happen in a second, final pass over the * heap, to preserve a basic invariant that all index AMs rely on: no * extant index tuple can ever be allowed to contain a TID that points to @@ -973,7 +973,7 @@ lazy_scan_heap(LVRelState *vacrel) continue; } - /* Collect LP_DEAD items in dead_items array, count tuples */ + /* Collect LP_DEAD items in dead_items, count tuples */ if (lazy_scan_noprune(vacrel, buf, blkno, page, &hastup, &recordfreespace)) { @@ -1015,10 +1015,10 @@ lazy_scan_heap(LVRelState *vacrel) * Prune, freeze, and count tuples. * * Accumulates details of remaining LP_DEAD line pointers on page in - * dead_items array. This includes LP_DEAD line pointers that we - * pruned ourselves, as well as existing LP_DEAD line pointers that - * were pruned some time earlier. Also considers freezing XIDs in the - * tuple headers of remaining items with storage. + * dead_items. This includes LP_DEAD line pointers that we pruned + * ourselves, as well as existing LP_DEAD line pointers that were pruned + * some time earlier. Also considers freezing XIDs in the tuple headers + * of remaining items with storage. */ lazy_scan_prune(vacrel, buf, blkno, page, &prunestate); @@ -1084,7 +1084,7 @@ lazy_scan_heap(LVRelState *vacrel) } else if (prunestate.num_offsets > 0) { - /* Save details of the LP_DEAD items from the page */ + /* Save details of the LP_DEAD items from the page in dead_items */ tidstore_add_tids(dead_items, blkno, prunestate.deadoffsets, prunestate.num_offsets); @@ -1535,9 +1535,9 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, * The approach we take now is to restart pruning when the race condition is * detected. This allows heap_page_prune() to prune the tuples inserted by * the now-aborted transaction. This is a little crude, but it guarantees - * that any items that make it into the dead_items array are simple LP_DEAD - * line pointers, and that every remaining item with tuple storage is - * considered as a candidate for freezing. + * that any items that make it into the dead_items are simple LP_DEAD line + * pointers, and that every remaining item with tuple storage is considered + * as a candidate for freezing. */ static void lazy_scan_prune(LVRelState *vacrel, @@ -1929,7 +1929,7 @@ retry: * lazy_scan_prune, which requires a full cleanup lock. While pruning isn't * performed here, it's quite possible that an earlier opportunistic pruning * operation left LP_DEAD items behind. We'll at least collect any such items - * in the dead_items array for removal from indexes. + * in the dead_items for removal from indexes. * * For aggressive VACUUM callers, we may return false to indicate that a full * cleanup lock is required for processing by lazy_scan_prune. This is only @@ -2088,7 +2088,7 @@ lazy_scan_noprune(LVRelState *vacrel, vacrel->NewRelfrozenXid = NoFreezePageRelfrozenXid; vacrel->NewRelminMxid = NoFreezePageRelminMxid; - /* Save any LP_DEAD items found on the page in dead_items array */ + /* Save any LP_DEAD items found on the page in dead_items */ if (vacrel->nindexes == 0) { /* Using one-pass strategy (since table has no indexes) */ @@ -2373,9 +2373,8 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) /* * lazy_vacuum_heap_rel() -- second pass over the heap for two pass strategy * - * This routine marks LP_DEAD items in vacrel->dead_items array as LP_UNUSED. - * Pages that never had lazy_scan_prune record LP_DEAD items are not visited - * at all. + * This routine marks LP_DEAD items in vacrel->dead_items as LP_UNUSED. Pages + * that never had lazy_scan_prune record LP_DEAD items are not visited at all. * * We may also be able to truncate the line pointer array of the heap pages we * visit. If there is a contiguous group of LP_UNUSED items at the end of the @@ -2461,7 +2460,8 @@ lazy_vacuum_heap_rel(LVRelState *vacrel) ereport(DEBUG2, (errmsg("table \"%s\": removed " UINT64_FORMAT "dead item identifiers in %u pages", - vacrel->relname, tidstore_num_tids(vacrel->dead_items), vacuumed_pages))); + vacrel->relname, tidstore_num_tids(vacrel->dead_items), + vacuumed_pages))); /* Revert to the previous phase information for error traceback */ restore_vacuum_error_info(vacrel, &saved_err_info); @@ -2660,8 +2660,8 @@ lazy_cleanup_all_indexes(LVRelState *vacrel) * lazy_vacuum_one_index() -- vacuum index relation. * * Delete all the index tuples containing a TID collected in - * vacrel->dead_items array. Also update running statistics. - * Exact details depend on index AM's ambulkdelete routine. + * vacrel->dead_items. Also update running statistics. Exact + * details depend on index AM's ambulkdelete routine. * * reltuples is the number of heap tuples to be passed to the * bulkdelete callback. It's always assumed to be estimated. @@ -3067,8 +3067,8 @@ count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected) } /* - * Allocate dead_items (either using palloc, or in dynamic shared memory). - * Sets dead_items in vacrel for caller. + * Allocate a (local or shared) TidStore for storing dead TIDs. Sets dead_items + * in vacrel for caller. * * Also handles parallel initialization as part of allocating dead_items in * DSM when required. diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c index 5c7e6ed99c..d653683693 100644 --- a/src/backend/commands/vacuumparallel.c +++ b/src/backend/commands/vacuumparallel.c @@ -9,12 +9,11 @@ * In a parallel vacuum, we perform both index bulk deletion and index cleanup * with parallel worker processes. Individual indexes are processed by one * vacuum process. ParalleVacuumState contains shared information as well as - * the memory space for storing dead items allocated in the DSM segment. We - * launch parallel worker processes at the start of parallel index - * bulk-deletion and index cleanup and once all indexes are processed, the - * parallel worker processes exit. Each time we process indexes in parallel, - * the parallel context is re-initialized so that the same DSM can be used for - * multiple passes of index bulk-deletion and index cleanup. + * the shared TidStore. We launch parallel worker processes at the start of + * parallel index bulk-deletion and index cleanup and once all indexes are + * processed, the parallel worker processes exit. Each time we process indexes + * in parallel, the parallel context is re-initialized so that the same DSM can + * be used for multiple passes of index bulk-deletion and index cleanup. * * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California -- 2.31.1