From 18fd84a8ea9605c20d60228438a9a2fab45270a8 Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Tue, 23 Apr 2019 15:16:09 -0700 Subject: [PATCH] Sanitize line pointers within contrib/amcheck. contrib/amcheck's purpose is to detect and report corruption, so clearly it must be held to a high standard when it comes to coping with corrupt state. Adopt a far more defensive approach to accessing index tuples than the core nbtree code: verify that line pointers look sane before performing pointer arithmetic to access the associated tuple from the same page. This avoids undefined behavior and assertion failures in cases where line pointers are corrupt. --- contrib/amcheck/verify_nbtree.c | 138 +++++++++++++++++++++++++------- 1 file changed, 107 insertions(+), 31 deletions(-) diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c index 591e0a3e46..3c411c2fd8 100644 --- a/contrib/amcheck/verify_nbtree.c +++ b/contrib/amcheck/verify_nbtree.c @@ -156,10 +156,13 @@ static inline bool invariant_g_offset(BtreeCheckState *state, BTScanInsert key, static inline bool invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key, Page nontarget, + BlockNumber nontargetblock, OffsetNumber upperbound); static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum); static inline BTScanInsert bt_mkscankey_pivotsearch(Relation rel, IndexTuple itup); +static ItemId PageGetItemIdCareful(BtreeCheckState *state, Page page, + BlockNumber block, OffsetNumber offset); static inline ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state, IndexTuple itup, bool nonpivot); @@ -710,7 +713,9 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level) ItemId itemid; /* Internal page -- downlink gets leftmost on next level */ - itemid = PageGetItemId(state->target, P_FIRSTDATAKEY(opaque)); + itemid = PageGetItemIdCareful(state, state->target, + state->targetblock, + P_FIRSTDATAKEY(opaque)); itup = (IndexTuple) PageGetItem(state->target, itemid); nextleveldown.leftmost = BTreeInnerTupleGetDownLink(itup); nextleveldown.level = opaque->btpo.level - 1; @@ -837,26 +842,29 @@ bt_target_page_check(BtreeCheckState *state) * Check the number of attributes in high key. Note, rightmost page * doesn't contain a high key, so nothing to check */ - if (!P_RIGHTMOST(topaque) && - !_bt_check_natts(state->rel, state->heapkeyspace, state->target, - P_HIKEY)) + if (!P_RIGHTMOST(topaque)) { ItemId itemid; IndexTuple itup; - itemid = PageGetItemId(state->target, P_HIKEY); - itup = (IndexTuple) PageGetItem(state->target, itemid); - - ereport(ERROR, - (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("wrong number of high key index tuple attributes in index \"%s\"", - RelationGetRelationName(state->rel)), - errdetail_internal("Index block=%u natts=%u block type=%s page lsn=%X/%X.", - state->targetblock, - BTreeTupleGetNAtts(itup, state->rel), - P_ISLEAF(topaque) ? "heap" : "index", - (uint32) (state->targetlsn >> 32), - (uint32) state->targetlsn))); + /* Verify line pointer before checking tuple */ + itemid = PageGetItemIdCareful(state, state->target, + state->targetblock, P_HIKEY); + if (!_bt_check_natts(state->rel, state->heapkeyspace, state->target, + P_HIKEY)) + { + itup = (IndexTuple) PageGetItem(state->target, itemid); + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("wrong number of high key index tuple attributes in index \"%s\"", + RelationGetRelationName(state->rel)), + errdetail_internal("Index block=%u natts=%u block type=%s page lsn=%X/%X.", + state->targetblock, + BTreeTupleGetNAtts(itup, state->rel), + P_ISLEAF(topaque) ? "heap" : "index", + (uint32) (state->targetlsn >> 32), + (uint32) state->targetlsn))); + } } /* @@ -876,7 +884,8 @@ bt_target_page_check(BtreeCheckState *state) CHECK_FOR_INTERRUPTS(); - itemid = PageGetItemId(state->target, offset); + itemid = PageGetItemIdCareful(state, state->target, + state->targetblock, offset); itup = (IndexTuple) PageGetItem(state->target, itemid); tupsize = IndexTupleSize(itup); @@ -1121,7 +1130,9 @@ bt_target_page_check(BtreeCheckState *state) OffsetNumberNext(offset)); /* Reuse itup to get pointed-to heap location of second item */ - itemid = PageGetItemId(state->target, OffsetNumberNext(offset)); + itemid = PageGetItemIdCareful(state, state->target, + state->targetblock, + OffsetNumberNext(offset)); itup = (IndexTuple) PageGetItem(state->target, itemid); nhtid = psprintf("(%u,%u)", ItemPointerGetBlockNumberNoCheck(&(itup->t_tid)), @@ -1406,7 +1417,8 @@ bt_right_page_check_scankey(BtreeCheckState *state) if (P_ISLEAF(opaque) && nline >= P_FIRSTDATAKEY(opaque)) { /* Return first data item (if any) */ - rightitem = PageGetItemId(rightpage, P_FIRSTDATAKEY(opaque)); + rightitem = PageGetItemIdCareful(state, rightpage, targetnext, + P_FIRSTDATAKEY(opaque)); } else if (!P_ISLEAF(opaque) && nline >= OffsetNumberNext(P_FIRSTDATAKEY(opaque))) @@ -1415,8 +1427,8 @@ bt_right_page_check_scankey(BtreeCheckState *state) * Return first item after the internal page's "negative infinity" * item */ - rightitem = PageGetItemId(rightpage, - OffsetNumberNext(P_FIRSTDATAKEY(opaque))); + rightitem = PageGetItemIdCareful(state, rightpage, targetnext, + OffsetNumberNext(P_FIRSTDATAKEY(opaque))); } else { @@ -1576,7 +1588,8 @@ bt_downlink_check(BtreeCheckState *state, BTScanInsert targetkey, if (offset_is_negative_infinity(copaque, offset)) continue; - if (!invariant_l_nontarget_offset(state, targetkey, child, offset)) + if (!invariant_l_nontarget_offset(state, targetkey, child, childblock, + offset)) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("down-link lower bound invariant violated for index \"%s\"", @@ -1687,7 +1700,8 @@ bt_downlink_missing_check(BtreeCheckState *state) RelationGetRelationName(state->rel)); level = topaque->btpo.level; - itemid = PageGetItemId(state->target, P_FIRSTDATAKEY(topaque)); + itemid = PageGetItemIdCareful(state, state->target, state->targetblock, + P_FIRSTDATAKEY(topaque)); itup = (IndexTuple) PageGetItem(state->target, itemid); childblk = BTreeInnerTupleGetDownLink(itup); for (;;) @@ -1711,7 +1725,8 @@ bt_downlink_missing_check(BtreeCheckState *state) level - 1, copaque->btpo.level))); level = copaque->btpo.level; - itemid = PageGetItemId(child, P_FIRSTDATAKEY(copaque)); + itemid = PageGetItemIdCareful(state, child, childblk, + P_FIRSTDATAKEY(copaque)); itup = (IndexTuple) PageGetItem(child, itemid); childblk = BTreeInnerTupleGetDownLink(itup); /* Be slightly more pro-active in freeing this memory, just in case */ @@ -1760,7 +1775,7 @@ bt_downlink_missing_check(BtreeCheckState *state) */ if (P_ISHALFDEAD(copaque) && !P_RIGHTMOST(copaque)) { - itemid = PageGetItemId(child, P_HIKEY); + itemid = PageGetItemIdCareful(state, child, childblk, P_HIKEY); itup = (IndexTuple) PageGetItem(child, itemid); if (BTreeTupleGetTopParent(itup) == state->targetblock) return; @@ -2095,9 +2110,13 @@ invariant_l_offset(BtreeCheckState *state, BTScanInsert key, OffsetNumber upperbound) { int32 cmp; + ItemId itemid; Assert(key->pivotsearch); + /* Verify line pointer before checking tuple */ + itemid = PageGetItemIdCareful(state, state->target, state->targetblock, + upperbound); /* pg_upgrade'd indexes may legally have equal sibling tuples */ if (!key->heapkeyspace) return invariant_leq_offset(state, key, upperbound); @@ -2116,13 +2135,11 @@ invariant_l_offset(BtreeCheckState *state, BTScanInsert key, if (cmp == 0) { BTPageOpaque topaque; - ItemId itemid; IndexTuple ritup; int uppnkeyatts; ItemPointer rheaptid; bool nonpivot; - itemid = PageGetItemId(state->target, upperbound); ritup = (IndexTuple) PageGetItem(state->target, itemid); topaque = (BTPageOpaque) PageGetSpecialPointer(state->target); nonpivot = P_ISLEAF(topaque) && upperbound >= P_FIRSTDATAKEY(topaque); @@ -2147,6 +2164,9 @@ invariant_l_offset(BtreeCheckState *state, BTScanInsert key, * * If this function returns false, convention is that caller throws error due * to corruption. + * + * Caller is assumed to have verified that lowerbound's item pointer is + * consistent using PageGetItemIdCareful() call. */ static inline bool invariant_leq_offset(BtreeCheckState *state, BTScanInsert key, @@ -2167,6 +2187,9 @@ invariant_leq_offset(BtreeCheckState *state, BTScanInsert key, * * If this function returns false, convention is that caller throws error due * to corruption. + * + * Caller is assumed to have verified that lowerbound's item pointer is + * consistent using PageGetItemIdCareful() call. */ static inline bool invariant_g_offset(BtreeCheckState *state, BTScanInsert key, @@ -2206,12 +2229,17 @@ invariant_g_offset(BtreeCheckState *state, BTScanInsert key, */ static inline bool invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key, - Page nontarget, OffsetNumber upperbound) + Page nontarget, BlockNumber nontargetblock, + OffsetNumber upperbound) { int32 cmp; + ItemId itemid; Assert(key->pivotsearch); + /* Verify line pointer before checking tuple */ + itemid = PageGetItemIdCareful(state, nontarget, nontargetblock, + upperbound); cmp = _bt_compare(state->rel, key, nontarget, upperbound); /* pg_upgrade'd indexes may legally have equal sibling tuples */ @@ -2221,14 +2249,12 @@ invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key, /* See invariant_l_offset() for an explanation of this extra step */ if (cmp == 0) { - ItemId itemid; IndexTuple child; int uppnkeyatts; ItemPointer childheaptid; BTPageOpaque copaque; bool nonpivot; - itemid = PageGetItemId(nontarget, upperbound); child = (IndexTuple) PageGetItem(nontarget, itemid); copaque = (BTPageOpaque) PageGetSpecialPointer(nontarget); nonpivot = P_ISLEAF(copaque) && upperbound >= P_FIRSTDATAKEY(copaque); @@ -2426,6 +2452,56 @@ bt_mkscankey_pivotsearch(Relation rel, IndexTuple itup) return skey; } +/* + * PageGetItemId() wrapper that enforces that line pointer looks like a sane + * nbtree line pointer. + * + * Buffer page/page item access macros generally trust that line pointers are + * not corrupt. For example, there is no bounds checking in PageGetItem(). + * These preliminary sanity checks won't catch various types of line pointer + * corruption, but they do an excellent job of making undefined behavior and + * assertion failures much less likely. This can allow a subsequent check to + * cleanly identify the problem some other way. + */ +static ItemId +PageGetItemIdCareful(BtreeCheckState *state, Page page, BlockNumber block, + OffsetNumber offset) +{ + ItemId itemid; + + itemid = PageGetItemId(page, offset); + + /* Don't let caller perform a load off the end of the page */ + if (ItemIdGetOffset(itemid) + ItemIdGetLength(itemid) > + BLCKSZ - sizeof(BTPageOpaqueData)) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("line pointer points past end of tuple space in index \"%s\"", + RelationGetRelationName(state->rel)), + errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.", + block, offset, ItemIdGetOffset(itemid), + ItemIdGetLength(itemid), + ItemIdGetFlags(itemid)))); + + /* + * Verify that line pointer isn't LP_REDIRECT or LP_UNUSED, since nbtree + * never uses either. Verify that line pointer has storage, too, since + * even LP_DEAD items should within nbtree. + */ + if (ItemIdIsRedirected(itemid) || !ItemIdIsUsed(itemid) || + ItemIdGetLength(itemid) == 0) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("invalid line pointer storage in index \"%s\"", + RelationGetRelationName(state->rel)), + errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.", + block, offset, ItemIdGetOffset(itemid), + ItemIdGetLength(itemid), + ItemIdGetFlags(itemid)))); + + return itemid; +} + /* * BTreeTupleGetHeapTID() wrapper that lets caller enforce that a heap TID must * be present in cases where that is mandatory. -- 2.17.1