From 3074c63958d79dd9f0983dd8f46b064a76051bb8 Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Sun, 31 Oct 2021 17:52:09 -0700 Subject: [PATCH v1] Add heap-only tuple assertions to pruning. --- src/backend/access/heap/pruneheap.c | 84 +++++++++++++++++++++++++++-- 1 file changed, 80 insertions(+), 4 deletions(-) diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index db6912e9f..43549be04 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -844,39 +844,115 @@ heap_page_prune_execute(Buffer buffer, { Page page = (Page) BufferGetPage(buffer); OffsetNumber *offnum; - int i; + HeapTupleHeader htup PG_USED_FOR_ASSERTS_ONLY; /* Shouldn't be called unless there's something to do */ Assert(nredirected > 0 || ndead > 0 || nunused > 0); /* Update all redirected line pointers */ offnum = redirected; - for (i = 0; i < nredirected; i++) + for (int i = 0; i < nredirected; i++) { OffsetNumber fromoff = *offnum++; OffsetNumber tooff = *offnum++; ItemId fromlp = PageGetItemId(page, fromoff); + ItemId tolp PG_USED_FOR_ASSERTS_ONLY; + +#ifdef USE_ASSERT_CHECKING + + /* + * The existing items that we set as redirects must be the first tuple + * of a HOT chain that has not be pruned before now (can't be a + * heap-only tuple) when target item has tuple storage. When the item + * has no storage, then we must just be maintaining the LP_REDIRECT of + * a HOT chain that has been pruned at least once before now. + */ + if (!ItemIdIsRedirected(fromlp)) + { + Assert(ItemIdHasStorage(fromlp) && ItemIdIsNormal(fromlp)); + + htup = (HeapTupleHeader) PageGetItem(page, fromlp); + Assert(!HeapTupleHeaderIsHeapOnly(htup)); + } + else + { + /* We shouldn't need to redundantly set the redirect */ + Assert(ItemIdGetRedirect(fromlp) != tooff); + } + + /* + * Redirect links must point to heap-only tuples. There can be at + * most one LP_REDIRECT item per HOT chain. + * + * We need to keep around an LP_REDIRECT item (after original + * non-heap-only root tuple gets pruned away) so that it's always + * possible for VACUUM to easily figure out what TID to delete from + * indexes when an entire HOT chain becomes dead. A heap-only tuple + * can never become LP_DEAD; an LP_REDIRECT item or a regular heap + * tuple can. + */ + tolp = PageGetItemId(page, tooff); + Assert(ItemIdHasStorage(tolp) && ItemIdIsNormal(tolp)); + htup = (HeapTupleHeader) PageGetItem(page, tolp); + Assert(HeapTupleHeaderIsHeapOnly(htup)); +#endif ItemIdSetRedirect(fromlp, tooff); } /* Update all now-dead line pointers */ offnum = nowdead; - for (i = 0; i < ndead; i++) + for (int i = 0; i < ndead; i++) { OffsetNumber off = *offnum++; ItemId lp = PageGetItemId(page, off); +#ifdef USE_ASSERT_CHECKING + + /* + * LP_DEAD line pointers must be left behind when they could be + * pointed to by TIDs from indexes. Index scans expect TIDs to always + * work as stable identifiers of a tuple version (or HOT chain). + */ + if (ItemIdHasStorage(lp)) + { + Assert(ItemIdIsNormal(lp)); + htup = (HeapTupleHeader) PageGetItem(page, lp); + Assert(!HeapTupleHeaderIsHeapOnly(htup)); + } + else + { + /* + * Whole HOT chain becomes dead. (We shouldn't need to redundantly + * mark existing LP_DEAD items LP_DEAD.) + */ + Assert(ItemIdIsRedirected(lp)); + } +#endif + ItemIdSetDead(lp); } /* Update all now-unused line pointers */ offnum = nowunused; - for (i = 0; i < nunused; i++) + for (int i = 0; i < nunused; i++) { OffsetNumber off = *offnum++; ItemId lp = PageGetItemId(page, off); +#ifdef USE_ASSERT_CHECKING + + /* + * Only heap-only tuples can become LP_UNUSED during pruning. They + * don't need to be left in place as LP_DEAD items until VACUUM gets + * around to doing index vacuuming. We are already sure that there + * cannot be a TID in an index that points to the items. + */ + Assert(ItemIdHasStorage(lp) && ItemIdIsNormal(lp)); + htup = (HeapTupleHeader) PageGetItem(page, lp); + Assert(HeapTupleHeaderIsHeapOnly(htup)); +#endif + ItemIdSetUnused(lp); } -- 2.30.2