From 702de7c3cf081e860923644c6871b18741792aaa Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Sun, 28 Mar 2021 20:55:55 -0700 Subject: [PATCH v8 2/4] Break lazy_scan_heap() up into functions. Aside from being useful cleanup work in its own right, this is also preparation for an upcoming patch that removes the "tupgone" special case from vacuumlazy.c. The INDEX_CLEANUP=off case no longer uses the one-pass code path used when vacuuming a table with no indexes. It doesn't make sense to think of the two cases as equivalent because only the no-indexes case can do heap vacuuming. The INDEX_CLEANUP=off case is now structured as a two-pass VACUUM that opts to not do index vacuuming (and so naturally cannot safely perform heap vacuuming). --- src/backend/access/heap/vacuumlazy.c | 1403 +++++++++++++++---------- contrib/pg_visibility/pg_visibility.c | 8 +- contrib/pgstattuple/pgstatapprox.c | 9 +- 3 files changed, 835 insertions(+), 585 deletions(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 9c1cfe42e1..72cb066e0a 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -291,8 +291,9 @@ typedef struct LVRelState Relation onerel; Relation *indrels; int nindexes; - /* useindex = true means two-pass strategy; false means one-pass */ - bool useindex; + /* Do index and/or heap vacuuming (don't skip them)? */ + bool do_index_vacuuming; + bool do_index_cleanup; /* Buffer access strategy and parallel state */ BufferAccessStrategy bstrategy; @@ -351,6 +352,29 @@ typedef struct LVRelState int64 nunused; /* # existing unused line pointers */ } LVRelState; +/* + * State set up and maintained in lazy_scan_heap() (also maintained in + * lazy_scan_prune()) that represents VM bit status. + * + * Used by lazy_scan_setvmbit() when we're done pruning. + */ +typedef struct LVPageVisMapState +{ + bool all_visible_according_to_vm; + TransactionId visibility_cutoff_xid; +} LVPageVisMapState; + +/* + * State output by lazy_scan_prune() + */ +typedef struct LVPagePruneState +{ + bool hastup; /* Page is truncatable? */ + bool has_lpdead_items; /* includes existing LP_DEAD items */ + bool all_visible; /* Every item visible to all? */ + bool all_frozen; /* provided all_visible is also true */ +} LVPagePruneState; + /* Struct for saving and restoring vacuum error information. */ typedef struct LVSavedErrInfo { @@ -366,8 +390,21 @@ static int elevel = -1; /* non-export function prototypes */ static void lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive); -static bool lazy_check_needs_freeze(Buffer buf, bool *hastup, - LVRelState *vacrel); +static bool lazy_scan_needs_freeze(Buffer buf, bool *hastup, + LVRelState *vacrel); +static void lazy_scan_new_page(LVRelState *vacrel, Buffer buf); +static void lazy_scan_empty_page(LVRelState *vacrel, Buffer buf, + Buffer vmbuffer); +static void lazy_scan_setvmbit(LVRelState *vacrel, Buffer buf, + Buffer vmbuffer, + LVPagePruneState *pageprunestate, + LVPageVisMapState *pagevmstate); +static void lazy_scan_prune(LVRelState *vacrel, Buffer buf, + GlobalVisState *vistest, + LVPagePruneState *pageprunestate, + LVPageVisMapState *pagevmstate, + VacOptTernaryValue index_cleanup); +static void lazy_vacuum(LVRelState *vacrel); static void lazy_vacuum_all_indexes(LVRelState *vacrel); static IndexBulkDeleteResult *lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat, @@ -386,13 +423,11 @@ static void update_index_statistics(LVRelState *vacrel); static bool should_attempt_truncation(LVRelState *vacrel, VacuumParams *params); static void lazy_truncate_heap(LVRelState *vacrel); -static void lazy_record_dead_tuple(LVDeadTuples *dead_tuples, - ItemPointer itemptr); static bool lazy_tid_reaped(ItemPointer itemptr, void *state); static int vac_cmp_itemptr(const void *left, const void *right); static bool heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen); -static BlockNumber count_nondeletable_pages(LVRelState *vacrel); +static BlockNumber lazy_truncate_count_nondeletable(LVRelState *vacrel); static long compute_max_dead_tuples(BlockNumber relblocks, bool hasindex); static void lazy_space_alloc(LVRelState *vacrel, int nworkers, BlockNumber relblocks); @@ -517,8 +552,13 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params, vacrel->onerel = onerel; vac_open_indexes(vacrel->onerel, RowExclusiveLock, &vacrel->nindexes, &vacrel->indrels); - vacrel->useindex = (vacrel->nindexes > 0 && - params->index_cleanup == VACOPT_TERNARY_ENABLED); + vacrel->do_index_vacuuming = true; + vacrel->do_index_cleanup = true; + if (params->index_cleanup == VACOPT_TERNARY_DISABLED) + { + vacrel->do_index_vacuuming = false; + vacrel->do_index_cleanup = false; + } vacrel->bstrategy = bstrategy; vacrel->lps = NULL; /* for now */ vacrel->old_rel_pages = onerel->rd_rel->relpages; @@ -810,8 +850,8 @@ vacuum_log_cleanup_info(LVRelState *vacrel) * lists of dead tuples and pages with free space, calculates statistics * on the number of live tuples in the heap, and marks pages as * all-visible if appropriate. When done, or when we run low on space - * for dead-tuple TIDs, invoke vacuuming of indexes and reclaim dead line - * pointers. + * for dead-tuple TIDs, invoke lazy_vacuum to vacuum indexes and vacuum + * heap relation during its own second pass over the heap. * * If the table has at least two indexes, we execute both index vacuum * and index cleanup with parallel workers unless parallel vacuum is @@ -834,22 +874,12 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) { LVDeadTuples *dead_tuples; BlockNumber nblocks, - blkno; - HeapTupleData tuple; - BlockNumber empty_pages, - vacuumed_pages, + blkno, + next_unskippable_block, next_fsm_block_to_vacuum; - double num_tuples, /* total number of nonremovable tuples */ - live_tuples, /* live tuples (reltuples estimate) */ - tups_vacuumed, /* tuples cleaned up by current vacuum */ - nkeep, /* dead-but-not-removable tuples */ - nunused; /* # existing unused line pointers */ - int i; PGRUsage ru0; Buffer vmbuffer = InvalidBuffer; - BlockNumber next_unskippable_block; bool skipping_blocks; - xl_heap_freeze_tuple *frozen; StringInfoData buf; const int initprog_index[] = { PROGRESS_VACUUM_PHASE, @@ -859,6 +889,10 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) int64 initprog_val[3]; GlobalVisState *vistest; + /* Counters of # blocks in onerel: */ + BlockNumber empty_pages, + vacuumed_pages; + pg_rusage_init(&ru0); if (aggressive) @@ -873,8 +907,6 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) vacrel->relname))); empty_pages = vacuumed_pages = 0; - next_fsm_block_to_vacuum = (BlockNumber) 0; - num_tuples = live_tuples = tups_vacuumed = nkeep = nunused = 0; nblocks = RelationGetNumberOfBlocks(vacrel->onerel); next_unskippable_block = 0; @@ -909,7 +941,6 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) */ lazy_space_alloc(vacrel, params->nworkers, nblocks); dead_tuples = vacrel->dead_tuples; - frozen = palloc(sizeof(xl_heap_freeze_tuple) * MaxHeapTuplesPerPage); /* Report that we're scanning the heap, advertising total # of blocks */ initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP; @@ -994,20 +1025,25 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) { Buffer buf; Page page; - OffsetNumber offnum, - maxoff; - bool tupgone, - hastup; - int prev_dead_count; - int nfrozen; + LVPageVisMapState pagevmstate; + LVPagePruneState pageprunestate; + bool savefreespace; Size freespace; - bool all_visible_according_to_vm = false; - bool all_visible; - bool all_frozen = true; /* provided all_visible is also true */ - bool has_dead_items; /* includes existing LP_DEAD items */ - TransactionId visibility_cutoff_xid = InvalidTransactionId; - /* see note above about forcing scanning of last page */ + /* + * Initialize vm state for page + * + * Can't touch pageprunestate for page until we reach + * lazy_scan_prune(), though -- that's output state only + */ + pagevmstate.all_visible_according_to_vm = false; + pagevmstate.visibility_cutoff_xid = InvalidTransactionId; + + /* + * Step 1 for block: Consider need to skip blocks. + * + * See note above about forcing scanning of last page. + */ #define FORCE_CHECK_PAGE() \ (blkno == nblocks - 1 && should_attempt_truncation(vacrel, params)) @@ -1060,7 +1096,7 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) */ if (aggressive && VM_ALL_VISIBLE(vacrel->onerel, blkno, &vmbuffer)) - all_visible_according_to_vm = true; + pagevmstate.all_visible_according_to_vm = true; } else { @@ -1088,12 +1124,15 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) vacrel->frozenskipped_pages++; continue; } - all_visible_according_to_vm = true; + pagevmstate.all_visible_according_to_vm = true; } vacuum_delay_point(); /* + * Step 2 for block: Consider if we definitely have enough space to + * process TIDs on page already. + * * If we are close to overrunning the available space for dead-tuple * TIDs, pause and do a cycle of vacuuming before we tackle this page. */ @@ -1112,24 +1151,18 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) vmbuffer = InvalidBuffer; } - /* Work on all the indexes, then the heap */ - lazy_vacuum_all_indexes(vacrel); - - /* Remove tuples from heap */ - lazy_vacuum_heap_rel(vacrel); - - /* - * Forget the now-vacuumed tuples, and press on, but be careful - * not to reset latestRemovedXid since we want that value to be - * valid. - */ - dead_tuples->num_tuples = 0; + /* Remove the collected garbage tuples from table and indexes */ + lazy_vacuum(vacrel); /* * Vacuum the Free Space Map to make newly-freed space visible on * upper-level FSM pages. Note we have not yet processed blkno. + * Even if we skipped heap vacuum, FSM vacuuming could be + * worthwhile since we could have updated the freespace of empty + * pages. */ - FreeSpaceMapVacuumRange(vacrel->onerel, next_fsm_block_to_vacuum, blkno); + FreeSpaceMapVacuumRange(vacrel->onerel, next_fsm_block_to_vacuum, + blkno); next_fsm_block_to_vacuum = blkno; /* Report that we are once again scanning the heap */ @@ -1138,6 +1171,8 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) } /* + * Step 3 for block: Set up visibility map page as needed. + * * Pin the visibility map page in case we need to mark the page * all-visible. In most cases this will be very cheap, because we'll * already have the correct page pinned anyway. However, it's @@ -1150,9 +1185,15 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) buf = ReadBufferExtended(vacrel->onerel, MAIN_FORKNUM, blkno, RBM_NORMAL, vacrel->bstrategy); - /* We need buffer cleanup lock so that we can prune HOT chains. */ + /* + * Step 4 for block: Acquire super-exclusive lock for pruning. + * + * We need buffer cleanup lock so that we can prune HOT chains. + */ if (!ConditionalLockBufferForCleanup(buf)) { + bool hastup; + /* * If we're not performing an aggressive scan to guard against XID * wraparound, and we don't want to forcibly check the page, then @@ -1183,7 +1224,7 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) * to use lazy_check_needs_freeze() for both situations, though. */ LockBuffer(buf, BUFFER_LOCK_SHARE); - if (!lazy_check_needs_freeze(buf, &hastup, vacrel)) + if (!lazy_scan_needs_freeze(buf, &hastup, vacrel)) { UnlockReleaseBuffer(buf); vacrel->scanned_pages++; @@ -1209,6 +1250,12 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) /* drop through to normal processing */ } + /* + * Step 5 for block: Handle empty/new pages. + * + * By here we have a super-exclusive lock, and it's clear that this + * page is one that we consider scanned + */ vacrel->scanned_pages++; vacrel->tupcount_pages++; @@ -1216,396 +1263,81 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) if (PageIsNew(page)) { - /* - * All-zeroes pages can be left over if either a backend extends - * the relation by a single page, but crashes before the newly - * initialized page has been written out, or when bulk-extending - * the relation (which creates a number of empty pages at the tail - * end of the relation, but enters them into the FSM). - * - * Note we do not enter the page into the visibilitymap. That has - * the downside that we repeatedly visit this page in subsequent - * vacuums, but otherwise we'll never not discover the space on a - * promoted standby. The harm of repeated checking ought to - * normally not be too bad - the space usually should be used at - * some point, otherwise there wouldn't be any regular vacuums. - * - * Make sure these pages are in the FSM, to ensure they can be - * reused. Do that by testing if there's any space recorded for - * the page. If not, enter it. We do so after releasing the lock - * on the heap page, the FSM is approximate, after all. - */ - UnlockReleaseBuffer(buf); - empty_pages++; - - if (GetRecordedFreeSpace(vacrel->onerel, blkno) == 0) - { - Size freespace; - - freespace = BufferGetPageSize(buf) - SizeOfPageHeaderData; - RecordPageWithFreeSpace(vacrel->onerel, blkno, freespace); - } + /* Releases lock on buf for us: */ + lazy_scan_new_page(vacrel, buf); continue; } - - if (PageIsEmpty(page)) + else if (PageIsEmpty(page)) { empty_pages++; - freespace = PageGetHeapFreeSpace(page); - - /* - * Empty pages are always all-visible and all-frozen (note that - * the same is currently not true for new pages, see above). - */ - if (!PageIsAllVisible(page)) - { - START_CRIT_SECTION(); - - /* mark buffer dirty before writing a WAL record */ - MarkBufferDirty(buf); - - /* - * It's possible that another backend has extended the heap, - * initialized the page, and then failed to WAL-log the page - * due to an ERROR. Since heap extension is not WAL-logged, - * recovery might try to replay our record setting the page - * all-visible and find that the page isn't initialized, which - * will cause a PANIC. To prevent that, check whether the - * page has been previously WAL-logged, and if not, do that - * now. - */ - if (RelationNeedsWAL(vacrel->onerel) && - PageGetLSN(page) == InvalidXLogRecPtr) - log_newpage_buffer(buf, true); - - PageSetAllVisible(page); - visibilitymap_set(vacrel->onerel, blkno, buf, InvalidXLogRecPtr, - vmbuffer, InvalidTransactionId, - VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN); - END_CRIT_SECTION(); - } - - UnlockReleaseBuffer(buf); - RecordPageWithFreeSpace(vacrel->onerel, blkno, freespace); + /* Releases lock on buf for us (though keeps vmbuffer pin): */ + lazy_scan_empty_page(vacrel, buf, vmbuffer); continue; } /* - * Prune all HOT-update chains in this page. + * Step 6 for block: Do pruning. * - * We count tuples removed by the pruning step as removed by VACUUM - * (existing LP_DEAD line pointers don't count). + * Also accumulates details of remaining LP_DEAD line pointers on page + * in dead tuple list. This includes LP_DEAD line pointers that we + * ourselves just pruned, as well as existing LP_DEAD line pointers + * pruned earlier. + * + * Also handles tuple freezing -- considers freezing XIDs from all + * tuple headers left behind following pruning. */ - tups_vacuumed += heap_page_prune(vacrel->onerel, buf, vistest, - InvalidTransactionId, 0, false, - &vacrel->latestRemovedXid, - &vacrel->offnum); + lazy_scan_prune(vacrel, buf, vistest, &pageprunestate, &pagevmstate, + params->index_cleanup); /* - * Now scan the page to collect vacuumable items and check for tuples - * requiring freezing. + * Step 7 for block: Set up details for saving free space in FSM at + * end of loop. (Also performs extra single pass strategy steps in + * "nindexes == 0" case.) + * + * If we have any LP_DEAD items on this page (i.e. any new dead_tuples + * entries compared to just before lazy_scan_prune()) then the page + * will be visited again by lazy_vacuum_heap_rel(), which will compute + * and record its post-compaction free space. If not, then we're done + * with this page, so remember its free space as-is. */ - all_visible = true; - has_dead_items = false; - nfrozen = 0; - hastup = false; - prev_dead_count = dead_tuples->num_tuples; - maxoff = PageGetMaxOffsetNumber(page); - - /* - * Note: If you change anything in the loop below, also look at - * heap_page_is_all_visible to see if that needs to be changed. - */ - for (offnum = FirstOffsetNumber; - offnum <= maxoff; - offnum = OffsetNumberNext(offnum)) + savefreespace = false; + freespace = 0; + if (vacrel->nindexes > 0 && pageprunestate.has_lpdead_items && + vacrel->do_index_vacuuming) { - ItemId itemid; - - /* - * Set the offset number so that we can display it along with any - * error that occurred while processing this tuple. - */ - vacrel->offnum = offnum; - itemid = PageGetItemId(page, offnum); - - /* Unused items require no processing, but we count 'em */ - if (!ItemIdIsUsed(itemid)) - { - nunused += 1; - continue; - } - - /* Redirect items mustn't be touched */ - if (ItemIdIsRedirected(itemid)) - { - hastup = true; /* this page won't be truncatable */ - continue; - } - - ItemPointerSet(&(tuple.t_self), blkno, offnum); - - /* - * LP_DEAD line pointers are to be vacuumed normally; but we don't - * count them in tups_vacuumed, else we'd be double-counting (at - * least in the common case where heap_page_prune() just freed up - * a non-HOT tuple). Note also that the final tups_vacuumed value - * might be very low for tables where opportunistic page pruning - * happens to occur very frequently (via heap_page_prune_opt() - * calls that free up non-HOT tuples). - */ - if (ItemIdIsDead(itemid)) - { - lazy_record_dead_tuple(dead_tuples, &(tuple.t_self)); - all_visible = false; - has_dead_items = true; - continue; - } - - Assert(ItemIdIsNormal(itemid)); - - tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid); - tuple.t_len = ItemIdGetLength(itemid); - tuple.t_tableOid = RelationGetRelid(vacrel->onerel); - - tupgone = false; - - /* - * The criteria for counting a tuple as live in this block need to - * match what analyze.c's acquire_sample_rows() does, otherwise - * VACUUM and ANALYZE may produce wildly different reltuples - * values, e.g. when there are many recently-dead tuples. - * - * The logic here is a bit simpler than acquire_sample_rows(), as - * VACUUM can't run inside a transaction block, which makes some - * cases impossible (e.g. in-progress insert from the same - * transaction). - */ - switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->OldestXmin, buf)) - { - case HEAPTUPLE_DEAD: - - /* - * Ordinarily, DEAD tuples would have been removed by - * heap_page_prune(), but it's possible that the tuple - * state changed since heap_page_prune() looked. In - * particular an INSERT_IN_PROGRESS tuple could have - * changed to DEAD if the inserter aborted. So this - * cannot be considered an error condition. - * - * If the tuple is HOT-updated then it must only be - * removed by a prune operation; so we keep it just as if - * it were RECENTLY_DEAD. Also, if it's a heap-only - * tuple, we choose to keep it, because it'll be a lot - * cheaper to get rid of it in the next pruning pass than - * to treat it like an indexed tuple. Finally, if index - * cleanup is disabled, the second heap pass will not - * execute, and the tuple will not get removed, so we must - * treat it like any other dead tuple that we choose to - * keep. - * - * If this were to happen for a tuple that actually needed - * to be deleted, we'd be in trouble, because it'd - * possibly leave a tuple below the relation's xmin - * horizon alive. heap_prepare_freeze_tuple() is prepared - * to detect that case and abort the transaction, - * preventing corruption. - */ - if (HeapTupleIsHotUpdated(&tuple) || - HeapTupleIsHeapOnly(&tuple) || - params->index_cleanup == VACOPT_TERNARY_DISABLED) - nkeep += 1; - else - tupgone = true; /* we can delete the tuple */ - all_visible = false; - break; - case HEAPTUPLE_LIVE: - - /* - * Count it as live. Not only is this natural, but it's - * also what acquire_sample_rows() does. - */ - live_tuples += 1; - - /* - * Is the tuple definitely visible to all transactions? - * - * NB: Like with per-tuple hint bits, we can't set the - * PD_ALL_VISIBLE flag if the inserter committed - * asynchronously. See SetHintBits for more info. Check - * that the tuple is hinted xmin-committed because of - * that. - */ - if (all_visible) - { - TransactionId xmin; - - if (!HeapTupleHeaderXminCommitted(tuple.t_data)) - { - all_visible = false; - break; - } - - /* - * The inserter definitely committed. But is it old - * enough that everyone sees it as committed? - */ - xmin = HeapTupleHeaderGetXmin(tuple.t_data); - if (!TransactionIdPrecedes(xmin, vacrel->OldestXmin)) - { - all_visible = false; - break; - } - - /* Track newest xmin on page. */ - if (TransactionIdFollows(xmin, visibility_cutoff_xid)) - visibility_cutoff_xid = xmin; - } - break; - case HEAPTUPLE_RECENTLY_DEAD: - - /* - * If tuple is recently deleted then we must not remove it - * from relation. - */ - nkeep += 1; - all_visible = false; - break; - case HEAPTUPLE_INSERT_IN_PROGRESS: - - /* - * This is an expected case during concurrent vacuum. - * - * We do not count these rows as live, because we expect - * the inserting transaction to update the counters at - * commit, and we assume that will happen only after we - * report our results. This assumption is a bit shaky, - * but it is what acquire_sample_rows() does, so be - * consistent. - */ - all_visible = false; - break; - case HEAPTUPLE_DELETE_IN_PROGRESS: - /* This is an expected case during concurrent vacuum */ - all_visible = false; - - /* - * Count such rows as live. As above, we assume the - * deleting transaction will commit and update the - * counters after we report. - */ - live_tuples += 1; - break; - default: - elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result"); - break; - } - - if (tupgone) - { - lazy_record_dead_tuple(dead_tuples, &(tuple.t_self)); - HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data, - &vacrel->latestRemovedXid); - tups_vacuumed += 1; - has_dead_items = true; - } - else - { - bool tuple_totally_frozen; - - num_tuples += 1; - hastup = true; - - /* - * Each non-removable tuple must be checked to see if it needs - * freezing. Note we already have exclusive buffer lock. - */ - if (heap_prepare_freeze_tuple(tuple.t_data, - vacrel->relfrozenxid, - vacrel->relminmxid, - vacrel->FreezeLimit, - vacrel->MultiXactCutoff, - &frozen[nfrozen], - &tuple_totally_frozen)) - frozen[nfrozen++].offset = offnum; - - if (!tuple_totally_frozen) - all_frozen = false; - } - } /* scan along page */ - - /* - * Clear the offset information once we have processed all the tuples - * on the page. - */ - vacrel->offnum = InvalidOffsetNumber; - - /* - * If we froze any tuples, mark the buffer dirty, and write a WAL - * record recording the changes. We must log the changes to be - * crash-safe against future truncation of CLOG. - */ - if (nfrozen > 0) + /* Wait until lazy_vacuum_heap_rel() to save free space */ + } + else { - START_CRIT_SECTION(); - - MarkBufferDirty(buf); - - /* execute collected freezes */ - for (i = 0; i < nfrozen; i++) - { - ItemId itemid; - HeapTupleHeader htup; - - itemid = PageGetItemId(page, frozen[i].offset); - htup = (HeapTupleHeader) PageGetItem(page, itemid); - - heap_execute_freeze_tuple(htup, &frozen[i]); - } - - /* Now WAL-log freezing if necessary */ - if (RelationNeedsWAL(vacrel->onerel)) - { - XLogRecPtr recptr; - - recptr = log_heap_freeze(vacrel->onerel, buf, - vacrel->FreezeLimit, frozen, nfrozen); - PageSetLSN(page, recptr); - } - - END_CRIT_SECTION(); + /* Save space right away */ + savefreespace = true; + freespace = PageGetHeapFreeSpace(page); } - /* - * If there are no indexes we can vacuum the page right now instead of - * doing a second scan. Also we don't do that but forget dead tuples - * when index cleanup is disabled. - */ - if (!vacrel->useindex && dead_tuples->num_tuples > 0) + if (vacrel->nindexes == 0 && pageprunestate.has_lpdead_items) { - if (vacrel->nindexes == 0) - { - /* Remove tuples from heap if the table has no index */ - lazy_vacuum_heap_page(vacrel, blkno, buf, 0, &vmbuffer); - vacuumed_pages++; - has_dead_items = false; - } - else - { - /* - * Here, we have indexes but index cleanup is disabled. - * Instead of vacuuming the dead tuples on the heap, we just - * forget them. - */ - Assert(params->index_cleanup == VACOPT_TERNARY_DISABLED); - } + Assert(dead_tuples->num_tuples > 0); /* - * Forget the now-vacuumed tuples, and press on, but be careful - * not to reset latestRemovedXid since we want that value to be - * valid. + * One pass strategy (no indexes) case. + * + * Mark LP_DEAD item pointers for LP_UNUSED now, since there won't + * be a second pass in lazy_vacuum_heap_rel(). */ + lazy_vacuum_heap_page(vacrel, blkno, buf, 0, &vmbuffer); + vacuumed_pages++; + + /* This won't have changed: */ + Assert(savefreespace && freespace == PageGetHeapFreeSpace(page)); + + /* + * Make sure lazy_scan_setvmbit() won't stop setting VM due to + * now-vacuumed LP_DEAD items: + */ + pageprunestate.has_lpdead_items = false; + + /* Forget the now-vacuumed tuples */ dead_tuples->num_tuples = 0; /* @@ -1616,115 +1348,34 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) */ if (blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES) { - FreeSpaceMapVacuumRange(vacrel->onerel, next_fsm_block_to_vacuum, - blkno); + FreeSpaceMapVacuumRange(vacrel->onerel, + next_fsm_block_to_vacuum, blkno); next_fsm_block_to_vacuum = blkno; } } - freespace = PageGetHeapFreeSpace(page); - - /* mark page all-visible, if appropriate */ - if (all_visible && !all_visible_according_to_vm) - { - uint8 flags = VISIBILITYMAP_ALL_VISIBLE; - - if (all_frozen) - flags |= VISIBILITYMAP_ALL_FROZEN; - - /* - * It should never be the case that the visibility map page is set - * while the page-level bit is clear, but the reverse is allowed - * (if checksums are not enabled). Regardless, set both bits so - * that we get back in sync. - * - * NB: If the heap page is all-visible but the VM bit is not set, - * we don't need to dirty the heap page. However, if checksums - * are enabled, we do need to make sure that the heap page is - * dirtied before passing it to visibilitymap_set(), because it - * may be logged. Given that this situation should only happen in - * rare cases after a crash, it is not worth optimizing. - */ - PageSetAllVisible(page); - MarkBufferDirty(buf); - visibilitymap_set(vacrel->onerel, blkno, buf, InvalidXLogRecPtr, - vmbuffer, visibility_cutoff_xid, flags); - } + /* One pass strategy had better have no dead tuples by now: */ + Assert(vacrel->nindexes > 0 || dead_tuples->num_tuples == 0); /* - * As of PostgreSQL 9.2, the visibility map bit should never be set if - * the page-level bit is clear. However, it's possible that the bit - * got cleared after we checked it and before we took the buffer - * content lock, so we must recheck before jumping to the conclusion - * that something bad has happened. + * Step 8 for block: Handle setting visibility map bit as appropriate */ - else if (all_visible_according_to_vm && !PageIsAllVisible(page) - && VM_ALL_VISIBLE(vacrel->onerel, blkno, &vmbuffer)) - { - elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u", - vacrel->relname, blkno); - visibilitymap_clear(vacrel->onerel, blkno, vmbuffer, - VISIBILITYMAP_VALID_BITS); - } + lazy_scan_setvmbit(vacrel, buf, vmbuffer, &pageprunestate, + &pagevmstate); /* - * It's possible for the value returned by - * GetOldestNonRemovableTransactionId() to move backwards, so it's not - * wrong for us to see tuples that appear to not be visible to - * everyone yet, while PD_ALL_VISIBLE is already set. The real safe - * xmin value never moves backwards, but - * GetOldestNonRemovableTransactionId() is conservative and sometimes - * returns a value that's unnecessarily small, so if we see that - * contradiction it just means that the tuples that we think are not - * visible to everyone yet actually are, and the PD_ALL_VISIBLE flag - * is correct. - * - * There should never be dead tuples on a page with PD_ALL_VISIBLE - * set, however. + * Step 9 for block: drop super-exclusive lock, finalize page by + * recording its free space in the FSM as appropriate */ - else if (PageIsAllVisible(page) && has_dead_items) - { - elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u", - vacrel->relname, blkno); - PageClearAllVisible(page); - MarkBufferDirty(buf); - visibilitymap_clear(vacrel->onerel, blkno, vmbuffer, - VISIBILITYMAP_VALID_BITS); - } - - /* - * If the all-visible page is all-frozen but not marked as such yet, - * mark it as all-frozen. Note that all_frozen is only valid if - * all_visible is true, so we must check both. - */ - else if (all_visible_according_to_vm && all_visible && all_frozen && - !VM_ALL_FROZEN(vacrel->onerel, blkno, &vmbuffer)) - { - /* - * We can pass InvalidTransactionId as the cutoff XID here, - * because setting the all-frozen bit doesn't cause recovery - * conflicts. - */ - visibilitymap_set(vacrel->onerel, blkno, buf, InvalidXLogRecPtr, - vmbuffer, InvalidTransactionId, - VISIBILITYMAP_ALL_FROZEN); - } UnlockReleaseBuffer(buf); - /* Remember the location of the last page with nonremovable tuples */ - if (hastup) + if (pageprunestate.hastup) vacrel->nonempty_pages = blkno + 1; - - /* - * If we remembered any tuples for deletion, then the page will be - * visited again by lazy_vacuum_heap_rel, which will compute and record - * its post-compaction free space. If not, then we're done with this - * page, so remember its free space as-is. (This path will always be - * taken if there are no indexes.) - */ - if (dead_tuples->num_tuples == prev_dead_count) + if (savefreespace) RecordPageWithFreeSpace(vacrel->onerel, blkno, freespace); + + /* Finished all steps for block by here (at the latest) */ } /* report that everything is scanned and vacuumed */ @@ -1733,16 +1384,10 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) /* Clear the block number information */ vacrel->blkno = InvalidBlockNumber; - pfree(frozen); - - /* save stats for use later */ - vacrel->tuples_deleted = tups_vacuumed; - vacrel->new_dead_tuples = nkeep; - /* now we can compute the new value for pg_class.reltuples */ vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->onerel, nblocks, vacrel->tupcount_pages, - live_tuples); + vacrel->live_tuples); /* * Also compute the total number of surviving heap entries. In the @@ -1761,19 +1406,13 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) } /* If any tuples need to be deleted, perform final vacuum cycle */ - /* XXX put a threshold on min number of tuples here? */ + Assert(vacrel->nindexes > 0 || dead_tuples->num_tuples == 0); if (dead_tuples->num_tuples > 0) - { - /* Work on all the indexes, and then the heap */ - lazy_vacuum_all_indexes(vacrel); - - /* Remove tuples from heap */ - lazy_vacuum_heap_rel(vacrel); - } + lazy_vacuum(vacrel); /* * Vacuum the remainder of the Free Space Map. We must do this whether or - * not there were indexes. + * not there were indexes, and whether or not we skipped index vacuuming. */ if (blkno > next_fsm_block_to_vacuum) FreeSpaceMapVacuumRange(vacrel->onerel, next_fsm_block_to_vacuum, @@ -1783,29 +1422,34 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno); /* Do post-vacuum cleanup */ - if (vacrel->useindex) + if (vacrel->nindexes > 0 && vacrel->do_index_cleanup) lazy_cleanup_all_indexes(vacrel); /* Free resources managed by lazy_space_alloc() */ lazy_space_free(vacrel); /* Update index statistics */ - if (vacrel->useindex) + if (vacrel->nindexes > 0 && vacrel->do_index_cleanup) update_index_statistics(vacrel); - /* If no indexes, make log report that lazy_vacuum_heap_rel would've made */ - if (vacuumed_pages) + /* + * If table has no indexes and at least one heap pages was vacuumed, make + * log report that lazy_vacuum_heap_rel would've made had there been + * indexes (having indexes implies using the two pass strategy). + */ + Assert(vacrel->nindexes == 0 || vacuumed_pages == 0); + if (vacuumed_pages > 0) ereport(elevel, - (errmsg("\"%s\": removed %.0f row versions in %u pages", - vacrel->relname, - tups_vacuumed, vacuumed_pages))); + (errmsg("\"%s\": removed %lld dead item identifiers in %u pages", + vacrel->relname, (long long) vacrel->lpdead_items, + vacuumed_pages))); initStringInfo(&buf); appendStringInfo(&buf, - _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"), - nkeep, vacrel->OldestXmin); - appendStringInfo(&buf, _("There were %.0f unused item identifiers.\n"), - nunused); + _("%lld dead row versions cannot be removed yet, oldest xmin: %u\n"), + (long long) vacrel->new_dead_tuples, vacrel->OldestXmin); + appendStringInfo(&buf, _("There were %lld unused item identifiers.\n"), + (long long) vacrel->nunused); appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ", "Skipped %u pages due to buffer pins, ", vacrel->pinskipped_pages), @@ -1821,23 +1465,24 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) appendStringInfo(&buf, _("%s."), pg_rusage_show(&ru0)); ereport(elevel, - (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages", + (errmsg("\"%s\": found %lld removable, %lld nonremovable row versions in %u out of %u pages", vacrel->relname, - tups_vacuumed, num_tuples, - vacrel->scanned_pages, nblocks), + (long long) vacrel->tuples_deleted, + (long long) vacrel->num_tuples, vacrel->scanned_pages, + nblocks), errdetail_internal("%s", buf.data))); pfree(buf.data); } /* - * lazy_check_needs_freeze() -- scan page to see if any tuples - * need to be cleaned to avoid wraparound + * lazy_scan_needs_freeze() -- see if any tuples need to be cleaned to avoid + * wraparound * * Returns true if the page needs to be vacuumed using cleanup lock. * Also returns a flag indicating whether page contains any tuples at all. */ static bool -lazy_check_needs_freeze(Buffer buf, bool *hastup, LVRelState *vacrel) +lazy_scan_needs_freeze(Buffer buf, bool *hastup, LVRelState *vacrel) { Page page = BufferGetPage(buf); OffsetNumber offnum, @@ -1869,7 +1514,9 @@ lazy_check_needs_freeze(Buffer buf, bool *hastup, LVRelState *vacrel) vacrel->offnum = offnum; itemid = PageGetItemId(page, offnum); - /* this should match hastup test in count_nondeletable_pages() */ + /* + * This should match hastup test in lazy_truncate_count_nondeletable() + */ if (ItemIdIsUsed(itemid)) *hastup = true; @@ -1890,6 +1537,619 @@ lazy_check_needs_freeze(Buffer buf, bool *hastup, LVRelState *vacrel) return (offnum <= maxoff); } +/* + * Handle new page during lazy_scan_heap(). + * + * Caller must hold pin and buffer cleanup lock on buf. + * + * All-zeroes pages can be left over if either a backend extends the relation + * by a single page, but crashes before the newly initialized page has been + * written out, or when bulk-extending the relation (which creates a number of + * empty pages at the tail end of the relation, but enters them into the FSM). + * + * Note we do not enter the page into the visibilitymap. That has the downside + * that we repeatedly visit this page in subsequent vacuums, but otherwise + * we'll never not discover the space on a promoted standby. The harm of + * repeated checking ought to normally not be too bad - the space usually + * should be used at some point, otherwise there wouldn't be any regular + * vacuums. + * + * Make sure these pages are in the FSM, to ensure they can be reused. Do that + * by testing if there's any space recorded for the page. If not, enter it. We + * do so after releasing the lock on the heap page, the FSM is approximate, + * after all. + */ +static void +lazy_scan_new_page(LVRelState *vacrel, Buffer buf) +{ + Relation onerel = vacrel->onerel; + BlockNumber blkno = BufferGetBlockNumber(buf); + + if (GetRecordedFreeSpace(onerel, blkno) == 0) + { + Size freespace = BufferGetPageSize(buf) - SizeOfPageHeaderData; + + UnlockReleaseBuffer(buf); + RecordPageWithFreeSpace(onerel, blkno, freespace); + return; + } + + UnlockReleaseBuffer(buf); +} + +/* + * Handle empty page during lazy_scan_heap(). + * + * Caller must hold pin and buffer cleanup lock on buf, as well as a pin (but + * not a lock) on vmbuffer. + */ +static void +lazy_scan_empty_page(LVRelState *vacrel, Buffer buf, Buffer vmbuffer) +{ + Relation onerel = vacrel->onerel; + Page page = BufferGetPage(buf); + BlockNumber blkno = BufferGetBlockNumber(buf); + Size freespace = PageGetHeapFreeSpace(page); + + /* + * Empty pages are always all-visible and all-frozen (note that the same + * is currently not true for new pages, see lazy_scan_new_page()). + */ + if (!PageIsAllVisible(page)) + { + START_CRIT_SECTION(); + + /* mark buffer dirty before writing a WAL record */ + MarkBufferDirty(buf); + + /* + * It's possible that another backend has extended the heap, + * initialized the page, and then failed to WAL-log the page due to an + * ERROR. Since heap extension is not WAL-logged, recovery might try + * to replay our record setting the page all-visible and find that the + * page isn't initialized, which will cause a PANIC. To prevent that, + * check whether the page has been previously WAL-logged, and if not, + * do that now. + */ + if (RelationNeedsWAL(onerel) && + PageGetLSN(page) == InvalidXLogRecPtr) + log_newpage_buffer(buf, true); + + PageSetAllVisible(page); + visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr, + vmbuffer, InvalidTransactionId, + VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN); + END_CRIT_SECTION(); + } + + UnlockReleaseBuffer(buf); + RecordPageWithFreeSpace(onerel, blkno, freespace); +} + +/* + * Handle setting VM bit inside lazy_scan_heap(), after pruning and freezing. + */ +static void +lazy_scan_setvmbit(LVRelState *vacrel, Buffer buf, Buffer vmbuffer, + LVPagePruneState *pageprunestate, + LVPageVisMapState *pagevmstate) +{ + Relation onerel = vacrel->onerel; + Page page = BufferGetPage(buf); + BlockNumber blkno = BufferGetBlockNumber(buf); + + /* mark page all-visible, if appropriate */ + if (pageprunestate->all_visible && + !pagevmstate->all_visible_according_to_vm) + { + uint8 flags = VISIBILITYMAP_ALL_VISIBLE; + + if (pageprunestate->all_frozen) + flags |= VISIBILITYMAP_ALL_FROZEN; + + /* + * It should never be the case that the visibility map page is set + * while the page-level bit is clear, but the reverse is allowed (if + * checksums are not enabled). Regardless, set both bits so that we + * get back in sync. + * + * NB: If the heap page is all-visible but the VM bit is not set, we + * don't need to dirty the heap page. However, if checksums are + * enabled, we do need to make sure that the heap page is dirtied + * before passing it to visibilitymap_set(), because it may be logged. + * Given that this situation should only happen in rare cases after a + * crash, it is not worth optimizing. + */ + PageSetAllVisible(page); + MarkBufferDirty(buf); + visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr, vmbuffer, + pagevmstate->visibility_cutoff_xid, flags); + } + + /* + * The visibility map bit should never be set if the page-level bit is + * clear. However, it's possible that the bit got cleared after we + * checked it and before we took the buffer content lock, so we must + * recheck before jumping to the conclusion that something bad has + * happened. + */ + else if (pagevmstate->all_visible_according_to_vm && + !PageIsAllVisible(page) && VM_ALL_VISIBLE(onerel, blkno, + &vmbuffer)) + { + elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u", + RelationGetRelationName(onerel), blkno); + visibilitymap_clear(onerel, blkno, vmbuffer, + VISIBILITYMAP_VALID_BITS); + } + + /* + * It's possible for the value returned by + * GetOldestNonRemovableTransactionId() to move backwards, so it's not + * wrong for us to see tuples that appear to not be visible to everyone + * yet, while PD_ALL_VISIBLE is already set. The real safe xmin value + * never moves backwards, but GetOldestNonRemovableTransactionId() is + * conservative and sometimes returns a value that's unnecessarily small, + * so if we see that contradiction it just means that the tuples that we + * think are not visible to everyone yet actually are, and the + * PD_ALL_VISIBLE flag is correct. + * + * There should never be dead tuples on a page with PD_ALL_VISIBLE set, + * however. + */ + else if (PageIsAllVisible(page) && pageprunestate->has_lpdead_items) + { + elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u", + RelationGetRelationName(onerel), blkno); + PageClearAllVisible(page); + MarkBufferDirty(buf); + visibilitymap_clear(onerel, blkno, vmbuffer, + VISIBILITYMAP_VALID_BITS); + } + + /* + * If the all-visible page is all-frozen but not marked as such yet, mark + * it as all-frozen. Note that all_frozen is only valid if all_visible is + * true, so we must check both. + */ + else if (pagevmstate->all_visible_according_to_vm && + pageprunestate->all_visible && pageprunestate->all_frozen && + !VM_ALL_FROZEN(onerel, blkno, &vmbuffer)) + { + /* + * We can pass InvalidTransactionId as the cutoff XID here, because + * setting the all-frozen bit doesn't cause recovery conflicts. + */ + visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr, + vmbuffer, InvalidTransactionId, + VISIBILITYMAP_ALL_FROZEN); + } +} + +/* + * lazy_scan_prune() -- lazy_scan_heap() pruning and freezing. + * + * Caller must hold pin and buffer cleanup lock on the buffer. + */ +static void +lazy_scan_prune(LVRelState *vacrel, Buffer buf, GlobalVisState *vistest, + LVPagePruneState *pageprunestate, + LVPageVisMapState *pagevmstate, + VacOptTernaryValue index_cleanup) +{ + Relation onerel = vacrel->onerel; + bool tupgone; + BlockNumber blkno; + Page page; + OffsetNumber offnum, + maxoff; + ItemId itemid; + HeapTupleData tuple; + int tuples_deleted, + lpdead_items, + new_dead_tuples, + num_tuples, + live_tuples, + nunused; + int nredirect PG_USED_FOR_ASSERTS_ONLY; + int ntupoffsets; + OffsetNumber deadoffsets[MaxHeapTuplesPerPage]; + OffsetNumber tupoffsets[MaxHeapTuplesPerPage]; + + blkno = BufferGetBlockNumber(buf); + page = BufferGetPage(buf); + + /* Initialize (or reset) page-level counters */ + tuples_deleted = 0; + lpdead_items = 0; + new_dead_tuples = 0; + num_tuples = 0; + live_tuples = 0; + nunused = 0; + nredirect = 0; + + /* + * Prune all HOT-update chains in this page. + * + * We count tuples removed by the pruning step as removed by VACUUM + * (existing LP_DEAD line pointers don't count). + */ + tuples_deleted = heap_page_prune(onerel, buf, vistest, + InvalidTransactionId, 0, false, + &vacrel->latestRemovedXid, + &vacrel->offnum); + + /* + * Now scan the page to collect vacuumable items and check for tuples + * requiring freezing. + */ + pageprunestate->hastup = false; + pageprunestate->has_lpdead_items = false; + pageprunestate->all_visible = true; + pageprunestate->all_frozen = true; + ntupoffsets = 0; + tupgone = false; + maxoff = PageGetMaxOffsetNumber(page); + + /* + * Note: If you change anything in the loop below, also look at + * heap_page_is_all_visible to see if that needs to be changed. + */ + for (offnum = FirstOffsetNumber; + offnum <= maxoff; + offnum = OffsetNumberNext(offnum)) + { + /* + * Set the offset number so that we can display it along with any + * error that occurred while processing this tuple. + */ + vacrel->offnum = offnum; + itemid = PageGetItemId(page, offnum); + + /* Unused items require no processing, but we count 'em */ + if (!ItemIdIsUsed(itemid)) + { + nunused++; + continue; + } + + /* Redirect items mustn't be touched */ + if (ItemIdIsRedirected(itemid)) + { + pageprunestate->hastup = true; /* page won't be truncatable */ + nredirect++; + continue; + } + + /* + * LP_DEAD line pointers are to be vacuumed normally; but we don't + * count them in tuples_deleted, else we'd be double-counting (at + * least in the common case where heap_page_prune() just freed up a + * non-HOT tuple). + * + * We are usually able to log lpdead_items separately, though, which + * shows a count of precisely these dead items -- items that we'll + * delete from indexes. It's treated as index-related + * instrumentation. + */ + if (ItemIdIsDead(itemid)) + { + deadoffsets[lpdead_items++] = offnum; + continue; + } + + Assert(ItemIdIsNormal(itemid)); + + ItemPointerSet(&(tuple.t_self), blkno, offnum); + tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid); + tuple.t_len = ItemIdGetLength(itemid); + tuple.t_tableOid = RelationGetRelid(onerel); + + /* + * The criteria for counting a tuple as live in this block need to + * match what analyze.c's acquire_sample_rows() does, otherwise VACUUM + * and ANALYZE may produce wildly different reltuples values, e.g. + * when there are many recently-dead tuples. + * + * The logic here is a bit simpler than acquire_sample_rows(), as + * VACUUM can't run inside a transaction block, which makes some cases + * impossible (e.g. in-progress insert from the same transaction). + */ + switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->OldestXmin, buf)) + { + case HEAPTUPLE_DEAD: + + /* + * Ordinarily, DEAD tuples would have been removed by + * heap_page_prune(), but it's possible that the tuple state + * changed since heap_page_prune() looked. In particular an + * INSERT_IN_PROGRESS tuple could have changed to DEAD if the + * inserter aborted. So this cannot be considered an error + * condition. + * + * If the tuple is HOT-updated then it must only be removed by + * a prune operation; so we keep it just as if it were + * RECENTLY_DEAD. Also, if it's a heap-only tuple, we choose + * to keep it, because it'll be a lot cheaper to get rid of it + * in the next pruning pass than to treat it like an indexed + * tuple. Finally, if index cleanup is disabled, the second + * heap pass will not execute, and the tuple will not get + * removed, so we must treat it like any other dead tuple that + * we choose to keep. + * + * If this were to happen for a tuple that actually needed to + * be deleted, we'd be in trouble, because it'd possibly leave + * a tuple below the relation's xmin horizon alive. + * heap_prepare_freeze_tuple() is prepared to detect that case + * and abort the transaction, preventing corruption. + */ + if (HeapTupleIsHotUpdated(&tuple) || + HeapTupleIsHeapOnly(&tuple) || + index_cleanup == VACOPT_TERNARY_DISABLED) + new_dead_tuples++; + else + tupgone = true; /* we can delete the tuple */ + pageprunestate->all_visible = false; + break; + case HEAPTUPLE_LIVE: + + /* + * Count it as live. Not only is this natural, but it's also + * what acquire_sample_rows() does. + */ + live_tuples++; + + /* + * Is the tuple definitely visible to all transactions? + * + * NB: Like with per-tuple hint bits, we can't set the + * PD_ALL_VISIBLE flag if the inserter committed + * asynchronously. See SetHintBits for more info. Check that + * the tuple is hinted xmin-committed because of that. + */ + if (pageprunestate->all_visible) + { + TransactionId xmin; + + if (!HeapTupleHeaderXminCommitted(tuple.t_data)) + { + pageprunestate->all_visible = false; + break; + } + + /* + * The inserter definitely committed. But is it old enough + * that everyone sees it as committed? + */ + xmin = HeapTupleHeaderGetXmin(tuple.t_data); + if (!TransactionIdPrecedes(xmin, vacrel->OldestXmin)) + { + pageprunestate->all_visible = false; + break; + } + + /* Track newest xmin on page. */ + if (TransactionIdFollows(xmin, + pagevmstate->visibility_cutoff_xid)) + pagevmstate->visibility_cutoff_xid = xmin; + } + break; + case HEAPTUPLE_RECENTLY_DEAD: + + /* + * If tuple is recently deleted then we must not remove it + * from relation. + */ + new_dead_tuples++; + pageprunestate->all_visible = false; + break; + case HEAPTUPLE_INSERT_IN_PROGRESS: + + /* + * This is an expected case during concurrent vacuum. + * + * We do not count these rows as live, because we expect the + * inserting transaction to update the counters at commit, and + * we assume that will happen only after we report our + * results. This assumption is a bit shaky, but it is what + * acquire_sample_rows() does, so be consistent. + */ + pageprunestate->all_visible = false; + break; + case HEAPTUPLE_DELETE_IN_PROGRESS: + /* This is an expected case during concurrent vacuum */ + pageprunestate->all_visible = false; + + /* + * Count such rows as live. As above, we assume the deleting + * transaction will commit and update the counters after we + * report. + */ + live_tuples++; + break; + default: + elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result"); + break; + } + + if (tupgone) + { + /* Pretend that this is an LP_DEAD item */ + deadoffsets[lpdead_items++] = offnum; + /* But remember it for XLOG_HEAP2_CLEANUP_INFO record */ + HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data, + &vacrel->latestRemovedXid); + } + else + { + /* + * Each non-removable tuple must be checked to see if it needs + * freezing + */ + tupoffsets[ntupoffsets++] = offnum; + num_tuples++; + pageprunestate->hastup = true; + } + } + + /* + * We have now divided every item on the page into either an LP_DEAD item + * that will need to be vacuumed in indexes later, or a LP_NORMAL tuple + * that remains and needs to be considered for freezing now (LP_UNUSED and + * LP_REDIRECT items also remain, but are of no further interest to us). + * + * Add page level counters to caller's counts, and then actually process + * LP_DEAD and LP_NORMAL items. + * + * TODO: Remove tupgone logic entirely in next commit -- we shouldn't have + * to pretend that DEAD items are LP_DEAD items. + */ + Assert(lpdead_items + ntupoffsets + nunused + nredirect == maxoff); + vacrel->offnum = InvalidOffsetNumber; + + vacrel->tuples_deleted += tuples_deleted; + vacrel->lpdead_items += lpdead_items; + vacrel->new_dead_tuples += new_dead_tuples; + vacrel->num_tuples += num_tuples; + vacrel->live_tuples += live_tuples; + vacrel->nunused += nunused; + + /* + * Consider the need to freeze any items with tuple storage from the page + * first (arbitrary) + */ + if (ntupoffsets > 0) + { + xl_heap_freeze_tuple frozen[MaxHeapTuplesPerPage]; + int nfrozen = 0; + + Assert(pageprunestate->hastup); + + for (int i = 0; i < ntupoffsets; i++) + { + OffsetNumber item = tupoffsets[i]; + bool tuple_totally_frozen; + + ItemPointerSet(&(tuple.t_self), blkno, item); + itemid = PageGetItemId(page, item); + tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid); + Assert(ItemIdIsNormal(itemid) && ItemIdHasStorage(itemid)); + tuple.t_len = ItemIdGetLength(itemid); + tuple.t_tableOid = RelationGetRelid(vacrel->onerel); + if (heap_prepare_freeze_tuple(tuple.t_data, + vacrel->relfrozenxid, + vacrel->relminmxid, + vacrel->FreezeLimit, + vacrel->MultiXactCutoff, + &frozen[nfrozen], + &tuple_totally_frozen)) + frozen[nfrozen++].offset = item; + if (!tuple_totally_frozen) + pageprunestate->all_frozen = false; + } + + if (nfrozen > 0) + { + /* + * At least one tuple with storage needs to be frozen -- execute + * that now. + * + * If we need to freeze any tuples we'll mark the buffer dirty, + * and write a WAL record recording the changes. We must log the + * changes to be crash-safe against future truncation of CLOG. + */ + START_CRIT_SECTION(); + + MarkBufferDirty(buf); + + /* execute collected freezes */ + for (int i = 0; i < nfrozen; i++) + { + HeapTupleHeader htup; + + itemid = PageGetItemId(page, frozen[i].offset); + htup = (HeapTupleHeader) PageGetItem(page, itemid); + + heap_execute_freeze_tuple(htup, &frozen[i]); + } + + /* Now WAL-log freezing if necessary */ + if (RelationNeedsWAL(vacrel->onerel)) + { + XLogRecPtr recptr; + + recptr = log_heap_freeze(vacrel->onerel, buf, vacrel->FreezeLimit, + frozen, nfrozen); + PageSetLSN(page, recptr); + } + + END_CRIT_SECTION(); + } + } + + /* + * Now save details of the LP_DEAD items from the page in the dead_tuples + * array. Also record that page has dead items in per-page prunestate. + */ + if (lpdead_items > 0) + { + LVDeadTuples *dead_tuples = vacrel->dead_tuples; + ItemPointerData tmp; + + pageprunestate->all_visible = false; + pageprunestate->has_lpdead_items = true; + vacrel->lpdead_item_pages++; + + /* + * Don't actually save item when it is known for sure that both index + * vacuuming and heap vacuuming cannot go ahead during the ongoing VACUUM + */ + if (!vacrel->do_index_vacuuming && vacrel->nindexes > 0) + return; + + ItemPointerSetBlockNumber(&tmp, blkno); + + for (int i = 0; i < lpdead_items; i++) + { + ItemPointerSetOffsetNumber(&tmp, deadoffsets[i]); + dead_tuples->itemptrs[dead_tuples->num_tuples++] = tmp; + } + + Assert(dead_tuples->num_tuples <= dead_tuples->max_tuples); + pgstat_progress_update_param(PROGRESS_VACUUM_NUM_DEAD_TUPLES, + dead_tuples->num_tuples); + } +} + +/* + * Remove the collected garbage tuples from the table and its indexes. + */ +static void +lazy_vacuum(LVRelState *vacrel) +{ + /* Should not end up here with no indexes */ + Assert(vacrel->nindexes > 0); + Assert(!IsParallelWorker()); + + if (!vacrel->do_index_vacuuming) + { + Assert(!vacrel->do_index_cleanup); + vacrel->dead_tuples->num_tuples = 0; + return; + } + + /* Okay, we're going to do index vacuuming */ + lazy_vacuum_all_indexes(vacrel); + + /* Remove tuples from heap */ + lazy_vacuum_heap_rel(vacrel); + + /* + * Forget the now-vacuumed tuples -- just press on + */ + vacrel->dead_tuples->num_tuples = 0; +} + /* * lazy_vacuum_all_indexes() -- Main entry for index vacuuming */ @@ -1897,6 +2157,8 @@ static void lazy_vacuum_all_indexes(LVRelState *vacrel) { Assert(vacrel->nindexes > 0); + Assert(vacrel->do_index_vacuuming); + Assert(vacrel->do_index_cleanup); Assert(TransactionIdIsNormal(vacrel->relfrozenxid)); Assert(MultiXactIdIsValid(vacrel->relminmxid)); @@ -2107,6 +2369,10 @@ lazy_vacuum_heap_rel(LVRelState *vacrel) Buffer vmbuffer = InvalidBuffer; LVSavedErrInfo saved_err_info; + Assert(vacrel->do_index_vacuuming); + Assert(vacrel->do_index_cleanup); + Assert(vacrel->num_index_scans > 0); + /* Report that we are now vacuuming the heap */ pgstat_progress_update_param(PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_HEAP); @@ -2186,6 +2452,8 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, bool all_frozen; LVSavedErrInfo saved_err_info; + Assert(vacrel->nindexes == 0 || vacrel->do_index_vacuuming); + pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno); /* Update error traceback information */ @@ -2429,7 +2697,7 @@ lazy_truncate_heap(LVRelState *vacrel) * other backends could have added tuples to these pages whilst we * were vacuuming. */ - new_rel_pages = count_nondeletable_pages(vacrel); + new_rel_pages = lazy_truncate_count_nondeletable(vacrel); vacrel->blkno = new_rel_pages; if (new_rel_pages >= old_rel_pages) @@ -2478,7 +2746,7 @@ lazy_truncate_heap(LVRelState *vacrel) * Returns number of nondeletable pages (last nonempty page + 1). */ static BlockNumber -count_nondeletable_pages(LVRelState *vacrel) +lazy_truncate_count_nondeletable(LVRelState *vacrel) { Relation onerel = vacrel->onerel; BlockNumber blkno; @@ -2618,14 +2886,14 @@ count_nondeletable_pages(LVRelState *vacrel) * Return the maximum number of dead tuples we can record. */ static long -compute_max_dead_tuples(BlockNumber relblocks, bool useindex) +compute_max_dead_tuples(BlockNumber relblocks, bool hasindex) { long maxtuples; int vac_work_mem = IsAutoVacuumWorkerProcess() && autovacuum_work_mem != -1 ? autovacuum_work_mem : maintenance_work_mem; - if (useindex) + if (hasindex) { maxtuples = MAXDEADTUPLES(vac_work_mem * 1024L); maxtuples = Min(maxtuples, INT_MAX); @@ -2708,26 +2976,6 @@ lazy_space_free(LVRelState *vacrel) end_parallel_vacuum(vacrel); } -/* - * lazy_record_dead_tuple - remember one deletable tuple - */ -static void -lazy_record_dead_tuple(LVDeadTuples *dead_tuples, ItemPointer itemptr) -{ - /* - * The array shouldn't overflow under normal behavior, but perhaps it - * could if we are given a really small maintenance_work_mem. In that - * case, just forget the last few tuples (we'll get 'em next time). - */ - if (dead_tuples->num_tuples < dead_tuples->max_tuples) - { - dead_tuples->itemptrs[dead_tuples->num_tuples] = *itemptr; - dead_tuples->num_tuples++; - pgstat_progress_update_param(PROGRESS_VACUUM_NUM_DEAD_TUPLES, - dead_tuples->num_tuples); - } -} - /* * lazy_tid_reaped() -- is a particular tid deletable? * @@ -2818,7 +3066,8 @@ heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, /* * This is a stripped down version of the line pointer scan in - * lazy_scan_heap(). So if you change anything here, also check that code. + * lazy_scan_new_page. So if you change anything here, also check that + * code. */ maxoff = PageGetMaxOffsetNumber(page); for (offnum = FirstOffsetNumber; @@ -2864,7 +3113,7 @@ heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, { TransactionId xmin; - /* Check comments in lazy_scan_heap. */ + /* Check comments in lazy_scan_new_page() */ if (!HeapTupleHeaderXminCommitted(tuple.t_data)) { all_visible = false; diff --git a/contrib/pg_visibility/pg_visibility.c b/contrib/pg_visibility/pg_visibility.c index dd0c124e62..6bfc48c64a 100644 --- a/contrib/pg_visibility/pg_visibility.c +++ b/contrib/pg_visibility/pg_visibility.c @@ -756,10 +756,10 @@ tuple_all_visible(HeapTuple tup, TransactionId OldestXmin, Buffer buffer) return false; /* all-visible implies live */ /* - * Neither lazy_scan_heap nor heap_page_is_all_visible will mark a page - * all-visible unless every tuple is hinted committed. However, those hint - * bits could be lost after a crash, so we can't be certain that they'll - * be set here. So just check the xmin. + * Neither lazy_scan_heap/lazy_scan_new_page nor heap_page_is_all_visible + * will mark a page all-visible unless every tuple is hinted committed. + * However, those hint bits could be lost after a crash, so we can't be + * certain that they'll be set here. So just check the xmin. */ xmin = HeapTupleHeaderGetXmin(tup->t_data); diff --git a/contrib/pgstattuple/pgstatapprox.c b/contrib/pgstattuple/pgstatapprox.c index 1fe193bb25..adf4a61aac 100644 --- a/contrib/pgstattuple/pgstatapprox.c +++ b/contrib/pgstattuple/pgstatapprox.c @@ -58,8 +58,8 @@ typedef struct output_type * and approximate tuple_len on that basis. For the others, we count * the exact number of dead tuples etc. * - * This scan is loosely based on vacuumlazy.c:lazy_scan_heap(), but - * we do not try to avoid skipping single pages. + * This scan is loosely based on vacuumlazy.c:lazy_scan_heap and + * lazy_scan_new_page, but we do not try to avoid skipping single pages. */ static void statapprox_heap(Relation rel, output_type *stat) @@ -126,8 +126,9 @@ statapprox_heap(Relation rel, output_type *stat) /* * Look at each tuple on the page and decide whether it's live or - * dead, then count it and its size. Unlike lazy_scan_heap, we can - * afford to ignore problems and special cases. + * dead, then count it and its size. Unlike lazy_scan_heap and + * lazy_scan_new_page, we can afford to ignore problems and special + * cases. */ maxoff = PageGetMaxOffsetNumber(page); -- 2.27.0