From bb408e16b029a4515d2669ea1d549ec23bf6392e Mon Sep 17 00:00:00 2001 From: jcoleman Date: Wed, 4 Oct 2023 09:38:53 -0400 Subject: [PATCH v2 3/6] Opportunistically prune to avoid building a new page for inserts --- src/backend/access/heap/heapam.c | 34 ++++++++++++++++++++++++++++++++ src/backend/access/heap/hio.c | 23 +++++++++++++++++++++ 2 files changed, 57 insertions(+) diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index a1d3593f21..06c3998339 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -3009,9 +3009,16 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, infomask2_old_tuple, infomask_new_tuple, infomask2_new_tuple; +#ifdef USE_ASSERT_CHECKING + ItemPointerData original_otid; +#endif Assert(ItemPointerIsValid(otid)); +#ifdef USE_ASSERT_CHECKING + ItemPointerCopy(otid, &original_otid); +#endif + /* Cheap, simplistic check that the tuple matches the rel's rowtype. */ Assert(HeapTupleHeaderGetNatts(newtup->t_data) <= RelationGetNumberOfAttributes(relation)); @@ -3133,6 +3140,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, } /* + * TODO: is this note the problem pointer? * Note: beyond this point, use oldtup not otid to refer to old tuple. * otid may very well point at newtup->t_self, which we will overwrite * with the new tuple's location, so there's great risk of confusion if we @@ -3617,13 +3625,39 @@ l2: if (newtupsize > pagefree) { /* It doesn't fit, must use RelationGetBufferForTuple. */ + /* TODO: every time we call this we need to make sure we don't have pointers into the page */ newbuf = RelationGetBufferForTuple(relation, heaptup->t_len, buffer, 0, NULL, &vmbuffer_new, &vmbuffer, 0); + +#ifdef USE_ASSERT_CHECKING + /* + * About 500 lines ago in this function a comment claimed we + * might not be able to rely on otid after that point, but it + * appears we can. + */ + Assert(ItemPointerEquals(otid, &original_otid)); +#endif + + /* + * Getting a buffer may have pruned the page, so we can't rely + * on our original pointer into the page. + */ + lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid)); + Assert(ItemIdIsNormal(lp)); + + /* + * Mirror what we filled into oldtup at the beginning + * of this function. + */ + oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp); + oldtup.t_len = ItemIdGetLength(lp); + /* We're all done. */ break; } + /* Acquire VM page pin if needed and we don't have it. */ if (vmbuffer == InvalidBuffer && PageIsAllVisible(page)) visibilitymap_pin(relation, block, &vmbuffer); diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index caa62708aa..501747063a 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -707,6 +707,29 @@ loop: RelationSetTargetBlock(relation, targetBlock); return buffer; } + else + { + /* + * Opportunistically prune and see if that frees up enough space to + * avoid needing to build a new page. + */ + heap_page_prune_opt(relation, buffer, true); + + /* + * If pruning cleared the PG_PAGE_FULL hint bit, then it's worth + * checking free space again. + */ + if (!PageIsFull(page)) + { + pageFreeSpace = PageGetHeapFreeSpace(page); + if (targetFreeSpace <= pageFreeSpace) + { + /* use this page as future insert target, too */ + RelationSetTargetBlock(relation, targetBlock); + return buffer; + } + } + } /* * Not enough space, so we must give up our page locks and pin (if -- 2.39.3 (Apple Git-145)