From 5c0b5feb20e15a74276fa1fedbdb389616ce4930 Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Fri, 27 Apr 2018 12:47:39 -0700 Subject: [PATCH v10 3/7] Treat heap TID as part of the nbtree key space. Make nbtree treat all index tuples as having a heap TID trailing key attribute. Heap TID becomes a first class part of the key space on all levels of the tree. Index searches can distinguish duplicates by heap TID. Non-unique index insertions will descend straight to the leaf page that they'll insert on to (unless there is a concurrent page split). This general approach has numerous benefits for performance, and is prerequisite to teaching VACUUM to perform "retail index tuple deletion". Naively adding a new attribute to every pivot tuple has unacceptable overhead (it bloats internal pages), so suffix truncation of pivot tuples is added. This will generally truncate away the "extra" heap TID attribute from pivot tuples during a leaf page split, and may also truncate away additional user attributes. This can increase fan-out, especially in a multi-column index. Truncation can only occur at the attribute granularity, which isn't particularly effective, but works well enough for now. Only new indexes (BTREE_VERSION 4 indexes) will have insertions that treat heap TID as a tie-breaker attribute, or will have pivot tuples undergo suffix truncation during a leaf page split (on-disk compatibility with versions 2 and 3 is preserved). Upgrades to version 4 cannot be performed on-the-fly, unlike upgrades from version 2 to version 3. contrib/amcheck continues to work with version 2 and 3 indexes, while also enforcing the newer/more strict invariants with version 4 indexes. We no longer allow a search for free space among multiple pages full of duplicates to "get tired", except when needed to preserve compatibility with earlier versions. This has significant benefits for free space management in secondary indexes on low cardinality attributes. However, without the next commit in the patch series (without having "single value" mode and "many duplicates" mode within _bt_findsplitloc()), these cases will be significantly regressed, since they'll naively perform 50:50 splits without there being any hope of reusing space left free on the left half of the split. Note that this commit reduces the size of new tuples by a single MAXALIGN() quantum. The documented definition of "1/3 of a page" is already inexact, so it seems unnecessary to revise it. However, there should be a compatibility note in the v12 release notes. The new definition is 2704 bytes on 64-bit systems, down from 2712 bytes. --- contrib/amcheck/verify_nbtree.c | 304 ++++++++++-- contrib/pageinspect/btreefuncs.c | 2 +- contrib/pageinspect/expected/btree.out | 2 +- contrib/pgstattuple/expected/pgstattuple.out | 10 +- doc/src/sgml/indices.sgml | 24 +- src/backend/access/common/indextuple.c | 6 +- src/backend/access/nbtree/README | 164 ++++--- src/backend/access/nbtree/nbtinsert.c | 291 +++++++----- src/backend/access/nbtree/nbtpage.c | 205 +++++--- src/backend/access/nbtree/nbtree.c | 2 +- src/backend/access/nbtree/nbtsearch.c | 103 ++++- src/backend/access/nbtree/nbtsort.c | 88 ++-- src/backend/access/nbtree/nbtutils.c | 463 +++++++++++++++++-- src/backend/access/nbtree/nbtxlog.c | 43 +- src/backend/access/rmgrdesc/nbtdesc.c | 8 - src/backend/utils/sort/tuplesort.c | 13 +- src/include/access/nbtree.h | 166 +++++-- src/include/access/nbtxlog.h | 20 +- src/test/regress/expected/dependency.out | 4 +- src/test/regress/expected/event_trigger.out | 4 +- src/test/regress/expected/foreign_data.out | 8 +- src/test/regress/expected/rowsecurity.out | 4 +- src/tools/pgindent/typedefs.list | 4 + 23 files changed, 1502 insertions(+), 436 deletions(-) diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c index c7cdca3962..b5e2709c88 100644 --- a/contrib/amcheck/verify_nbtree.c +++ b/contrib/amcheck/verify_nbtree.c @@ -44,6 +44,8 @@ PG_MODULE_MAGIC; * block per level, which is bound by the range of BlockNumber: */ #define InvalidBtreeLevel ((uint32) InvalidBlockNumber) +#define BTreeTupleGetNKeyAtts(itup, rel) \ + Min(IndexRelationGetNumberOfKeyAttributes(rel), BTreeTupleGetNAtts(itup, rel)) /* * State associated with verifying a B-Tree index @@ -65,6 +67,8 @@ typedef struct BtreeCheckState /* B-Tree Index Relation and associated heap relation */ Relation rel; Relation heaprel; + /* rel is heapkeyspace index? */ + bool heapkeyspace; /* ShareLock held on heap/index, rather than AccessShareLock? */ bool readonly; /* Also verifying heap has no unindexed tuples? */ @@ -121,7 +125,7 @@ static void bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed); static inline void btree_index_checkable(Relation rel); static void bt_check_every_level(Relation rel, Relation heaprel, - bool readonly, bool heapallindexed); + bool heapkeyspace, bool readonly, bool heapallindexed); static BtreeLevel bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level); static void bt_target_page_check(BtreeCheckState *state); @@ -134,15 +138,19 @@ static void bt_tuple_present_callback(Relation index, HeapTuple htup, bool tupleIsAlive, void *checkstate); static inline bool offset_is_negative_infinity(BTPageOpaque opaque, OffsetNumber offset); +static inline bool invariant_l_offset(BtreeCheckState *state, BTScanInsert key, + OffsetNumber upperbound); static inline bool invariant_leq_offset(BtreeCheckState *state, BTScanInsert key, OffsetNumber upperbound); -static inline bool invariant_geq_offset(BtreeCheckState *state, BTScanInsert key, +static inline bool invariant_g_offset(BtreeCheckState *state, BTScanInsert key, OffsetNumber lowerbound); -static inline bool invariant_leq_nontarget_offset(BtreeCheckState *state, +static inline bool invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key, Page nontarget, OffsetNumber upperbound); static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum); +static inline ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state, + IndexTuple itup, bool nonpivot); /* * bt_index_check(index regclass, heapallindexed boolean) @@ -199,6 +207,7 @@ bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed) Oid heapid; Relation indrel; Relation heaprel; + bool heapkeyspace; LOCKMODE lockmode; if (parentcheck) @@ -249,7 +258,9 @@ bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed) btree_index_checkable(indrel); /* Check index, possibly against table it is an index on */ - bt_check_every_level(indrel, heaprel, parentcheck, heapallindexed); + heapkeyspace = _bt_heapkeyspace(indrel); + bt_check_every_level(indrel, heaprel, heapkeyspace, parentcheck, + heapallindexed); /* * Release locks early. That's ok here because nothing in the called @@ -319,8 +330,8 @@ btree_index_checkable(Relation rel) * parent/child check cannot be affected.) */ static void -bt_check_every_level(Relation rel, Relation heaprel, bool readonly, - bool heapallindexed) +bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace, + bool readonly, bool heapallindexed) { BtreeCheckState *state; Page metapage; @@ -341,6 +352,7 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly, state = palloc0(sizeof(BtreeCheckState)); state->rel = rel; state->heaprel = heaprel; + state->heapkeyspace = heapkeyspace; state->readonly = readonly; state->heapallindexed = heapallindexed; @@ -801,7 +813,8 @@ bt_target_page_check(BtreeCheckState *state) * doesn't contain a high key, so nothing to check */ if (!P_RIGHTMOST(topaque) && - !_bt_check_natts(state->rel, state->target, P_HIKEY)) + !_bt_check_natts(state->rel, state->heapkeyspace, state->target, + P_HIKEY)) { ItemId itemid; IndexTuple itup; @@ -834,6 +847,7 @@ bt_target_page_check(BtreeCheckState *state) IndexTuple itup; size_t tupsize; BTScanInsert skey; + bool lowersizelimit; CHECK_FOR_INTERRUPTS(); @@ -860,7 +874,8 @@ bt_target_page_check(BtreeCheckState *state) errhint("This could be a torn page problem."))); /* Check the number of index tuple attributes */ - if (!_bt_check_natts(state->rel, state->target, offset)) + if (!_bt_check_natts(state->rel, state->heapkeyspace, state->target, + offset)) { char *itid, *htid; @@ -901,7 +916,58 @@ bt_target_page_check(BtreeCheckState *state) continue; /* Build insertion scankey for current page offset/tuple */ - skey = _bt_mkscankey(state->rel, itup); + skey = _bt_mkscankey(state->rel, itup, false); + + /* + * Make sure tuple size does not exceed the relevant BTREE_VERSION + * specific limit. + * + * BTREE_VERSION 4 (which introduced heapkeyspace rules) requisitioned + * a MAXALIGN() quantum of space from BTMaxItemSize() in order to + * ensure that suffix truncation always has enough space to add an + * explicit heap TID back to a tuple -- we pessimistically assume that + * every newly inserted tuple will eventually need to have a heap TID + * appended during a future leaf page split, when the tuple becomes + * the basis of the new high key (pivot tuple) for the leaf page. + * + * Since a MAXALIGN() quantum is reserved for that purpose, we must + * not enforce the slightly lower limit when the extra quantum has + * been used as intended. In other words, there is only a + * cross-version difference in the limit on tuple size within leaf + * pages. + * + * Still, we're particular about the details within BTREE_VERSION 4 + * internal pages. Pivot tuples may only use the extra quantum for + * its designated purpose. Enforce the lower limit for pivot tuples + * when an explicit heap TID isn't actually present. (In all other + * cases suffix truncation is guaranteed to generate a pivot tuple + * that's no larger than the first right tuple provided to it by its + * caller.) + */ + lowersizelimit = skey->heapkeyspace && + (P_ISLEAF(topaque) || BTreeTupleGetHeapTID(itup) == NULL); + if (tupsize > (lowersizelimit ? BTMaxItemSize(state->target) : + BTMaxItemSizeNoHeapTid(state->target))) + { + char *itid, + *htid; + + itid = psprintf("(%u,%u)", state->targetblock, offset); + htid = psprintf("(%u,%u)", + ItemPointerGetBlockNumberNoCheck(&(itup->t_tid)), + ItemPointerGetOffsetNumberNoCheck(&(itup->t_tid))); + + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("index row size %zu exceeds maximum for index \"%s\"", + tupsize, RelationGetRelationName(state->rel)), + errdetail_internal("Index tid=%s points to %s tid=%s page lsn=%X/%X.", + itid, + P_ISLEAF(topaque) ? "heap" : "index", + htid, + (uint32) (state->targetlsn >> 32), + (uint32) state->targetlsn))); + } /* Fingerprint leaf page tuples (those that point to the heap) */ if (state->heapallindexed && P_ISLEAF(topaque) && !ItemIdIsDead(itemid)) @@ -926,9 +992,35 @@ bt_target_page_check(BtreeCheckState *state) * grandparents (as well as great-grandparents, and so on). We don't * go to those lengths because that would be prohibitively expensive, * and probably not markedly more effective in practice. + * + * On the leaf level, we check that the key is <= the highkey. + * However, on non-leaf levels we check that the key is < the highkey, + * because the high key is "just another separator" rather than a copy + * of some existing key item; we expect it to be unique among all keys + * on the same level. (Suffix truncation will sometimes produce a + * leaf highkey that is an untruncated copy of the lastleft item, but + * never any other item, which necessitates weakening the leaf level + * check to <=.) + * + * Full explanation for why a highkey is never truly a copy of another + * item from the same level on internal levels: + * + * While the new left page's high key is copied from the first offset + * on the right page during an internal page split, that's not the + * full story. In effect, internal pages are split in the middle of + * the firstright tuple, not between the would-be lastleft and + * firstright tuples: the firstright key ends up on the left side as + * left's new highkey, and the firstright downlink ends up on the + * right side as right's new "negative infinity" item. The negative + * infinity tuple is truncated to zero attributes, so we're only left + * with the downlink. In other words, the copying is just an + * implementation detail of splitting in the middle of a (pivot) + * tuple. (See also: "Notes About Data Representation" in the nbtree + * README.) */ if (!P_RIGHTMOST(topaque) && - !invariant_leq_offset(state, skey, P_HIKEY)) + !(P_ISLEAF(topaque) ? invariant_leq_offset(state, skey, P_HIKEY) : + invariant_l_offset(state, skey, P_HIKEY))) { char *itid, *htid; @@ -954,10 +1046,10 @@ bt_target_page_check(BtreeCheckState *state) * * Item order check * * * Check that items are stored on page in logical order, by checking - * current item is less than or equal to next item (if any). + * current item is strictly less than next item (if any). */ if (OffsetNumberNext(offset) <= max && - !invariant_leq_offset(state, skey, OffsetNumberNext(offset))) + !invariant_l_offset(state, skey, OffsetNumberNext(offset))) { char *itid, *htid, @@ -1022,9 +1114,9 @@ bt_target_page_check(BtreeCheckState *state) /* Set up right item scankey */ if (righttup) - rightkey = _bt_mkscankey(state->rel, righttup); + rightkey = _bt_mkscankey(state->rel, righttup, false); - if (righttup && !invariant_geq_offset(state, rightkey, max)) + if (righttup && !invariant_g_offset(state, rightkey, max)) { /* * As explained at length in bt_right_page_check_tuple(), @@ -1354,7 +1446,8 @@ bt_downlink_check(BtreeCheckState *state, BTScanInsert targetkey, /* * Verify child page has the downlink key from target page (its parent) as - * a lower bound. + * a lower bound; downlink must be strictly less than all keys on the + * page. * * Check all items, rather than checking just the first and trusting that * the operator class obeys the transitive law. @@ -1403,14 +1496,29 @@ bt_downlink_check(BtreeCheckState *state, BTScanInsert targetkey, { /* * Skip comparison of target page key against "negative infinity" - * item, if any. Checking it would indicate that it's not an upper - * bound, but that's only because of the hard-coding within - * _bt_compare(). + * item, if any. Checking it would indicate that it's not a strict + * lower bound, but that's only because of the hard-coding for + * negative infinity items within _bt_compare(). + * + * If nbtree didn't truncate negative infinity tuples during internal + * page splits then we'd expect child's negative infinity key to be + * equal to the scankey/downlink from target/parent (it would be a + * "low key" in this hypothetical scenario, and so it would still need + * to be treated as a special case here). + * + * Negative infinity items can be thought of as a strict lower bound + * that works transitively, with the last non-negative-infinity pivot + * followed during a descent from the root as its "true" strict lower + * bound. Only a small number of negative infinity items are truly + * negative infinity; those that are the first items of leftmost + * internal pages. In more general terms, a negative infinity item is + * only negative infinity with respect to the subtree that the page is + * at the root of. */ if (offset_is_negative_infinity(copaque, offset)) continue; - if (!invariant_leq_nontarget_offset(state, targetkey, child, offset)) + if (!invariant_l_nontarget_offset(state, targetkey, child, offset)) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("down-link lower bound invariant violated for index \"%s\"", @@ -1750,6 +1858,63 @@ offset_is_negative_infinity(BTPageOpaque opaque, OffsetNumber offset) return !P_ISLEAF(opaque) && offset == P_FIRSTDATAKEY(opaque); } +/* + * Does the invariant hold that the key is strictly less than a given upper + * bound offset item? + * + * If this function returns false, convention is that caller throws error due + * to corruption. + */ +static inline bool +invariant_l_offset(BtreeCheckState *state, BTScanInsert key, + OffsetNumber upperbound) +{ + int32 cmp; + + /* + * pg_upgrade'd indexes may legally have equal sibling tuples. Their + * pivot tuples can never have key attributes truncated away. + */ + if (!key->heapkeyspace) + return invariant_leq_offset(state, key, upperbound); + + cmp = _bt_compare(state->rel, key, state->target, upperbound); + + /* + * _bt_compare interprets the absence of attributes in scan keys as + * meaning that they're not participating in a search. In practice, this + * behavior is equivalent to an explicit negative infinity representation + * within nbtree. We care about the distinction between strict and + * non-strict bounds, though, and so must consider truncated/negative + * infinity attributes explicitly. + */ + if (cmp == 0) + { + BTPageOpaque topaque; + ItemId itemid; + IndexTuple ritup; + int uppnkeyatts; + ItemPointer rheaptid; + bool nonpivot; + + itemid = PageGetItemId(state->target, upperbound); + ritup = (IndexTuple) PageGetItem(state->target, itemid); + topaque = (BTPageOpaque) PageGetSpecialPointer(state->target); + nonpivot = P_ISLEAF(topaque) && upperbound >= P_FIRSTDATAKEY(topaque); + + /* Get number of keys + heap TID for item to the right */ + uppnkeyatts = BTreeTupleGetNKeyAtts(ritup, state->rel); + rheaptid = BTreeTupleGetHeapTIDCareful(state, ritup, nonpivot); + + if (key->keysz == uppnkeyatts) + return key->scantid == NULL && rheaptid != NULL; + + return key->keysz < uppnkeyatts; + } + + return cmp < 0; +} + /* * Does the invariant hold that the key is less than or equal to a given upper * bound offset item? @@ -1769,42 +1934,96 @@ invariant_leq_offset(BtreeCheckState *state, BTScanInsert key, } /* - * Does the invariant hold that the key is greater than or equal to a given - * lower bound offset item? + * Does the invariant hold that the key is strictly greater than a given lower + * bound offset item? * * If this function returns false, convention is that caller throws error due * to corruption. */ static inline bool -invariant_geq_offset(BtreeCheckState *state, BTScanInsert key, - OffsetNumber lowerbound) +invariant_g_offset(BtreeCheckState *state, BTScanInsert key, + OffsetNumber lowerbound) { int32 cmp; cmp = _bt_compare(state->rel, key, state->target, lowerbound); - return cmp >= 0; + /* + * pg_upgrade'd indexes may legally have equal sibling tuples. Their + * pivot tuples can never have key attributes truncated away. + */ + if (!key->heapkeyspace) + return cmp >= 0; + + /* + * No need to consider the possibility that scankey has attributes that we + * need to force to be interpreted as negative infinity. That could cause + * us to miss the fact that the scankey is less than rather than equal to + * its lower bound, but the index is corrupt either way. + */ + return cmp > 0; } /* - * Does the invariant hold that the key is less than or equal to a given upper + * Does the invariant hold that the key is strictly less than a given upper * bound offset item, with the offset relating to a caller-supplied page that - * is not the current target page? Caller's non-target page is typically a - * child page of the target, checked as part of checking a property of the - * target page (i.e. the key comes from the target). + * is not the current target page? + * + * Caller's non-target page is a child page of the target, checked as part of + * checking a property of the target page (i.e. the key comes from the + * target). * * If this function returns false, convention is that caller throws error due * to corruption. */ static inline bool -invariant_leq_nontarget_offset(BtreeCheckState *state, BTScanInsert key, +invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key, Page nontarget, OffsetNumber upperbound) { int32 cmp; cmp = _bt_compare(state->rel, key, nontarget, upperbound); - return cmp <= 0; + /* + * pg_upgrade'd indexes may legally have equal sibling tuples. Their + * pivot tuples can never have key attributes truncated away. + */ + if (!key->heapkeyspace) + return cmp <= 0; + + /* + * _bt_compare interprets the absence of attributes in scan keys as + * meaning that they're not participating in a search. In practice, this + * behavior is equivalent to an explicit negative infinity representation + * within nbtree. We care about the distinction between strict and + * non-strict bounds, though, and so must consider truncated/negative + * infinity attributes explicitly. + */ + if (cmp == 0) + { + ItemId itemid; + IndexTuple child; + int uppnkeyatts; + ItemPointer childheaptid; + BTPageOpaque copaque; + bool nonpivot; + + itemid = PageGetItemId(nontarget, upperbound); + child = (IndexTuple) PageGetItem(nontarget, itemid); + copaque = (BTPageOpaque) PageGetSpecialPointer(nontarget); + nonpivot = P_ISLEAF(copaque) && upperbound >= P_FIRSTDATAKEY(copaque); + + /* Get number of keys + heap TID for child/non-target item */ + uppnkeyatts = BTreeTupleGetNKeyAtts(child, state->rel); + childheaptid = BTreeTupleGetHeapTIDCareful(state, child, nonpivot); + + if (key->keysz == uppnkeyatts) + return key->scantid == NULL && childheaptid != NULL; + + return key->keysz < uppnkeyatts; + } + + return cmp < 0; } /* @@ -1960,3 +2179,28 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum) return page; } + +/* + * BTreeTupleGetHeapTID() wrapper that lets caller enforce that a heap TID must + * be present in cases where that is mandatory. + * + * This doesn't add much as of BTREE_VERSION 4, since the INDEX_ALT_TID_MASK + * bit is effectively a proxy for whether or not the tuple is a pivot tuple. + * It may become more useful in the future, when non-pivot tuples support their + * own alternative INDEX_ALT_TID_MASK representation. + */ +static inline ItemPointer +BTreeTupleGetHeapTIDCareful(BtreeCheckState *state, IndexTuple itup, + bool nonpivot) +{ + ItemPointer result = BTreeTupleGetHeapTID(itup); + BlockNumber targetblock = state->targetblock; + + if (result == NULL && nonpivot) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("block %u or its right sibling block or child block in index \"%s\" contains non-pivot tuple that lacks a heap TID", + targetblock, RelationGetRelationName(state->rel)))); + + return result; +} diff --git a/contrib/pageinspect/btreefuncs.c b/contrib/pageinspect/btreefuncs.c index 184ac62255..251be13b65 100644 --- a/contrib/pageinspect/btreefuncs.c +++ b/contrib/pageinspect/btreefuncs.c @@ -560,7 +560,7 @@ bt_metap(PG_FUNCTION_ARGS) * Get values of extended metadata if available, use default values * otherwise. */ - if (metad->btm_version == BTREE_VERSION) + if (metad->btm_version >= BTREE_NOVAC_VERSION) { values[j++] = psprintf("%u", metad->btm_oldest_btpo_xact); values[j++] = psprintf("%f", metad->btm_last_cleanup_num_heap_tuples); diff --git a/contrib/pageinspect/expected/btree.out b/contrib/pageinspect/expected/btree.out index 2aaa4df53b..07c2dcd771 100644 --- a/contrib/pageinspect/expected/btree.out +++ b/contrib/pageinspect/expected/btree.out @@ -5,7 +5,7 @@ CREATE INDEX test1_a_idx ON test1 USING btree (a); SELECT * FROM bt_metap('test1_a_idx'); -[ RECORD 1 ]-----------+------- magic | 340322 -version | 3 +version | 4 root | 1 level | 0 fastroot | 1 diff --git a/contrib/pgstattuple/expected/pgstattuple.out b/contrib/pgstattuple/expected/pgstattuple.out index 9858ea69d4..9920dbfd40 100644 --- a/contrib/pgstattuple/expected/pgstattuple.out +++ b/contrib/pgstattuple/expected/pgstattuple.out @@ -48,7 +48,7 @@ select version, tree_level, from pgstatindex('test_pkey'); version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation ---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+-------------------- - 3 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN + 4 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN (1 row) select version, tree_level, @@ -58,7 +58,7 @@ select version, tree_level, from pgstatindex('test_pkey'::text); version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation ---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+-------------------- - 3 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN + 4 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN (1 row) select version, tree_level, @@ -68,7 +68,7 @@ select version, tree_level, from pgstatindex('test_pkey'::name); version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation ---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+-------------------- - 3 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN + 4 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN (1 row) select version, tree_level, @@ -78,7 +78,7 @@ select version, tree_level, from pgstatindex('test_pkey'::regclass); version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation ---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+-------------------- - 3 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN + 4 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN (1 row) select pg_relpages('test'); @@ -232,7 +232,7 @@ create index test_partition_hash_idx on test_partition using hash (a); select pgstatindex('test_partition_idx'); pgstatindex ------------------------------ - (3,0,8192,0,0,0,0,0,NaN,NaN) + (4,0,8192,0,0,0,0,0,NaN,NaN) (1 row) select pgstathashindex('test_partition_hash_idx'); diff --git a/doc/src/sgml/indices.sgml b/doc/src/sgml/indices.sgml index 46f427b312..21c978503a 100644 --- a/doc/src/sgml/indices.sgml +++ b/doc/src/sgml/indices.sgml @@ -504,8 +504,9 @@ CREATE INDEX test2_mm_idx ON test2 (major, minor); By default, B-tree indexes store their entries in ascending order - with nulls last. This means that a forward scan of an index on - column x produces output satisfying ORDER BY x + with nulls last (table TID is treated as a tiebreaker column among + otherwise equal entries). This means that a forward scan of an + index on column x produces output satisfying ORDER BY x (or more verbosely, ORDER BY x ASC NULLS LAST). The index can also be scanned backward, producing output satisfying ORDER BY x DESC @@ -1162,10 +1163,21 @@ CREATE INDEX tab_x_y ON tab(x, y); the extra columns are trailing columns; making them be leading columns is unwise for the reasons explained in . However, this method doesn't support the case where you want the index to - enforce uniqueness on the key column(s). Also, explicitly marking - non-searchable columns as INCLUDE columns makes the - index slightly smaller, because such columns need not be stored in upper - B-tree levels. + enforce uniqueness on the key column(s). + + + + Suffix truncation always removes non-key + columns from upper B-Tree levels. As payload columns, they are + never used to guide index scans. The truncation process also + removes one or more trailing key column(s) when the remaining + prefix of key column(s) happens to be sufficient to describe tuples + on the lowest B-Tree level. In practice, covering indexes without + an INCLUDE clause often avoid storing columns + that are effectively payload in the upper levels. However, + explicitly defining payload columns as non-key columns + reliably keeps the tuples in upper levels + small. diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c index bc0c614f3b..a1c3a6f705 100644 --- a/src/backend/access/common/indextuple.c +++ b/src/backend/access/common/indextuple.c @@ -475,7 +475,11 @@ index_truncate_tuple(TupleDesc sourceDescriptor, IndexTuple source, bool isnull[INDEX_MAX_KEYS]; IndexTuple truncated; - Assert(leavenatts < sourceDescriptor->natts); + Assert(leavenatts <= sourceDescriptor->natts); + + /* Easy case: no truncation actually required */ + if (leavenatts == sourceDescriptor->natts) + return CopyIndexTuple(source); /* Create temporary descriptor to scribble on */ truncdesc = palloc(TupleDescSize(sourceDescriptor)); diff --git a/src/backend/access/nbtree/README b/src/backend/access/nbtree/README index 3680e69b89..cb9ed61599 100644 --- a/src/backend/access/nbtree/README +++ b/src/backend/access/nbtree/README @@ -28,37 +28,50 @@ right-link to find the new page containing the key range you're looking for. This might need to be repeated, if the page has been split more than once. +Lehman and Yao talk about pairs of "separator" keys and downlinks in +internal pages rather than tuples or records. We use the term "pivot" +tuple to refer to tuples which don't point to heap tuples, that are used +only for tree navigation. All tuples on non-leaf pages and high keys on +leaf pages are pivot tuples. Since pivot tuples are only used to represent +which part of the key space belongs on each page, they can have attribute +values copied from non-pivot tuples that were deleted and killed by VACUUM +some time ago. A pivot tuple may contain a "separator" key and downlink, +just a separator key (i.e. the downlink value is implicitly undefined), or +just a downlink (i.e. all attributes are truncated away). We aren't always +clear on which case applies, but it should be obvious from context. + +The requirement that all btree keys be unique is satisfied by treating heap +TID as a tiebreaker attribute. Logical duplicates are sorted in heap TID +order. This is necessary because Lehman and Yao also require that the key +range for a subtree S is described by Ki < v <= Ki+1 where Ki and Ki+1 are +the adjacent keys in the parent page (Ki must be _strictly_ less than v, +which can be assured by having reliably unique keys). + +A search where the key is equal to a pivot tuple in an upper tree level +must descend to the left of that pivot to ensure it finds any equal keys. +The equal item(s) being searched for must therefore be to the left of that +downlink page on the next level down. A handy property of this design is +that a scan where all attributes/keys are used behaves just the same as a +scan where only some prefix of attributes are used; equality never needs to +be treated as a special case. + +In practice, exact equality with pivot tuples on internal pages is +extremely rare when all attributes (including even the heap TID attribute) +are used in a search. This is due to suffix truncation: truncated +attributes are treated as having the value negative infinity, and +truncation almost always manages to at least truncate away the trailing +heap TID attribute. While Lehman and Yao don't have anything to say about +suffix truncation, the design used by nbtree is perfectly complementary. +The later section on suffix truncation will be helpful if it's unclear how +the Lehman & Yao invariants work with a real world example involving +suffix truncation. + Differences to the Lehman & Yao algorithm ----------------------------------------- We have made the following changes in order to incorporate the L&Y algorithm into Postgres: -The requirement that all btree keys be unique is too onerous, -but the algorithm won't work correctly without it. Fortunately, it is -only necessary that keys be unique on a single tree level, because L&Y -only use the assumption of key uniqueness when re-finding a key in a -parent page (to determine where to insert the key for a split page). -Therefore, we can use the link field to disambiguate multiple -occurrences of the same user key: only one entry in the parent level -will be pointing at the page we had split. (Indeed we need not look at -the real "key" at all, just at the link field.) We can distinguish -items at the leaf level in the same way, by examining their links to -heap tuples; we'd never have two items for the same heap tuple. - -Lehman and Yao assume that the key range for a subtree S is described -by Ki < v <= Ki+1 where Ki and Ki+1 are the adjacent keys in the parent -page. This does not work for nonunique keys (for example, if we have -enough equal keys to spread across several leaf pages, there *must* be -some equal bounding keys in the first level up). Therefore we assume -Ki <= v <= Ki+1 instead. A search that finds exact equality to a -bounding key in an upper tree level must descend to the left of that -key to ensure it finds any equal keys in the preceding page. An -insertion that sees the high key of its target page is equal to the key -to be inserted has a choice whether or not to move right, since the new -key could go on either page. (Currently, we try to find a page where -there is room for the new key without a split.) - Lehman and Yao don't require read locks, but assume that in-memory copies of tree pages are unshared. Postgres shares in-memory buffers among backends. As a result, we do page-level read locking on btree @@ -111,6 +124,16 @@ it is necessary to lock the next page before releasing the current one. This is safe when moving right or up, but not when moving left or down (else we'd create the possibility of deadlocks). +We don't use btree keys to re-find downlinks from parent pages when +inserting a new downlink in parent during page splits. Only one entry +in the parent level will be pointing at the page we just split, so the +link fields can be used to re-find downlinks in the parent via a +linear search. We don't need to remember key values during the +initial descent; remembering index block numbers instead works just as +well in practice. This is the only approach that works for indexes +initialized by btree versions predating the use of heap TID as a +tiebreaker attribute. + Lehman and Yao fail to discuss what must happen when the root page becomes full and must be split. Our implementation is to split the root in the same way that any other page would be split, then construct @@ -598,33 +621,53 @@ the order of multiple keys for a given column is unspecified.) An insertion scankey uses the same array-of-ScanKey data structure, but the sk_func pointers point to btree comparison support functions (ie, 3-way comparators that return int4 values interpreted as <0, =0, >0). In an -insertion scankey there is exactly one entry per index column. Insertion -scankeys are built within the btree code (eg, by _bt_mkscankey()) and are -used to locate the starting point of a scan, as well as for locating the -place to insert a new index tuple. (Note: in the case of an insertion -scankey built from a search scankey, there might be fewer keys than -index columns, indicating that we have no constraints for the remaining -index columns.) After we have located the starting point of a scan, the -original search scankey is consulted as each index entry is sequentially -scanned to decide whether to return the entry and whether the scan can -stop (see _bt_checkkeys()). +insertion scankey there is exactly one entry per index column. There is +also other data about the rules used to locate where to begin the scan, +such as whether or not the scan is a "nextkey" scan. Insertion scankeys +are built within the btree code (eg, by _bt_mkscankey()) and are used to +locate the starting point of a scan, as well as for locating the place to +insert a new index tuple. (Note: in the case of an insertion scankey built +from a search scankey, there might be fewer keys than index columns, +indicating that we have no constraints for the remaining index columns.) +After we have located the starting point of a scan, the original search +scankey is consulted as each index entry is sequentially scanned to decide +whether to return the entry and whether the scan can stop (see +_bt_checkkeys()). -We use term "pivot" index tuples to distinguish tuples which don't point -to heap tuples, but rather used for tree navigation. Pivot tuples includes -all tuples on non-leaf pages and high keys on leaf pages. Note that pivot -index tuples are only used to represent which part of the key space belongs -on each page, and can have attribute values copied from non-pivot tuples -that were deleted and killed by VACUUM some time ago. In principle, we could -truncate away attributes that are not needed for a page high key during a leaf -page split, provided that the remaining attributes distinguish the last index -tuple on the post-split left page as belonging on the left page, and the first -index tuple on the post-split right page as belonging on the right page. This -optimization is sometimes called suffix truncation, and may appear in a future -release. Since the high key is subsequently reused as the downlink in the -parent page for the new right page, suffix truncation can increase index -fan-out considerably by keeping pivot tuples short. INCLUDE indexes similarly -truncate away non-key attributes at the time of a leaf page split, -increasing fan-out. +Notes about suffix truncation +----------------------------- + +We truncate away suffix key attributes that are not needed for a page high +key during a leaf page split. The remaining attributes must distinguish +the last index tuple on the post-split left page as belonging on the left +page, and the first index tuple on the post-split right page as belonging +on the right page. A truncated tuple logically retains all key attributes, +though they implicitly have "negative infinity" as their value, and have no +storage overhead. Since the high key is subsequently reused as the +downlink in the parent page for the new right page, suffix truncation makes +pivot tuples short. INCLUDE indexes are guaranteed to have non-key +attributes truncated at the time of a leaf page split, but may also have +some key attributes truncated away, based on the usual criteria for key +attributes. They are not a special case, since non-key attributes are +merely payload to B-Tree searches. + +The goal of suffix truncation of key attributes is to improve index +fan-out. The technique was first described by Bayer and Unterauer (R.Bayer +and K.Unterauer, Prefix B-Trees, ACM Transactions on Database Systems, Vol +2, No. 1, March 1977, pp 11-26). The Postgres implementation is loosely +based on their paper. Note that Postgres only implements what the paper +refers to as simple prefix B-Trees. Note also that the paper assumes that +the tree has keys that consist of single strings that maintain the "prefix +property", much like strings that are stored in a suffix tree (comparisons +of earlier bytes must always be more significant than comparisons of later +bytes, and, in general, the strings must compare in a way that doesn't +break transitive consistency as they're split into pieces). Suffix +truncation in Postgres currently only works at the whole-attribute +granularity, but it would be straightforward to invent opclass +infrastructure that manufactures a smaller attribute value in the case of +variable-length types, such as text. An opclass support function could +manufacture the shortest possible key value that still correctly separates +each half of a leaf page split. Notes About Data Representation ------------------------------- @@ -637,20 +680,26 @@ don't need to renumber any existing pages when splitting the root.) The Postgres disk block data format (an array of items) doesn't fit Lehman and Yao's alternating-keys-and-pointers notion of a disk page, -so we have to play some games. +so we have to play some games. (Presumably things are explained this +way because of internal page splits, which conceptually split at the +middle of an existing pivot tuple -- the tuple's "separator" key goes on +the left side of the split as the left side's new high key, while the +tuple's pointer/downlink goes on the right side as the first/minus +infinity downlink.) On a page that is not rightmost in its tree level, the "high key" is kept in the page's first item, and real data items start at item 2. The link portion of the "high key" item goes unused. A page that is -rightmost has no "high key", so data items start with the first item. -Putting the high key at the left, rather than the right, may seem odd, -but it avoids moving the high key as we add data items. +rightmost has no "high key" (it's implicitly positive infinity), so +data items start with the first item. Putting the high key at the +left, rather than the right, may seem odd, but it avoids moving the +high key as we add data items. On a leaf page, the data items are simply links to (TIDs of) tuples in the relation being indexed, with the associated key values. On a non-leaf page, the data items are down-links to child pages with -bounding keys. The key in each data item is the *lower* bound for +bounding keys. The key in each data item is a strict lower bound for keys on that child page, so logically the key is to the left of that downlink. The high key (if present) is the upper bound for the last downlink. The first data item on each such page has no lower bound @@ -658,4 +707,5 @@ downlink. The first data item on each such page has no lower bound routines must treat it accordingly. The actual key stored in the item is irrelevant, and need not be stored at all. This arrangement corresponds to the fact that an L&Y non-leaf page has one more pointer -than key. +than key. Suffix truncation's negative infinity attributes behave in +the same way. diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index cbc07d316b..04c6023cba 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -72,7 +72,7 @@ static void _bt_insertonpg(Relation rel, Buffer buf, Buffer cbuf, bool split_only_page); static Buffer _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, OffsetNumber newitemoff, Size newitemsz, - IndexTuple newitem, bool newitemonleft); + IndexTuple newitem, bool newitemonleft, bool truncate); static void _bt_insert_parent(Relation rel, Buffer buf, Buffer rbuf, BTStack stack, bool is_root, bool is_only); static OffsetNumber _bt_findsplitloc(Relation rel, Page page, @@ -115,13 +115,16 @@ _bt_doinsert(Relation rel, IndexTuple itup, BTStack stack = NULL; Buffer buf; Page page; - OffsetNumber offset; BTPageOpaque lpageop; bool fastpath; bool checkingunique = (checkUnique != UNIQUE_CHECK_NO); /* we need an insertion scan key to do our search, so build one */ - itup_scankey = _bt_mkscankey(rel, itup); + itup_scankey = _bt_mkscankey(rel, itup, false); +top: + /* Cannot use real heap TID in unique case -- it'll be restored later */ + if (itup_scankey->heapkeyspace && checkingunique) + itup_scankey->scantid = NULL; /* * It's very common to have an index on an auto-incremented or @@ -142,9 +145,7 @@ _bt_doinsert(Relation rel, IndexTuple itup, * other backend might be concurrently inserting into the page, thus * reducing our chances to finding an insertion place in this page. */ -top: fastpath = false; - offset = InvalidOffsetNumber; if (RelationGetTargetBlock(rel) != InvalidBlockNumber) { Size itemsz; @@ -226,12 +227,13 @@ top: * NOTE: obviously, _bt_check_unique can only detect keys that are already * in the index; so it cannot defend against concurrent insertions of the * same key. We protect against that by means of holding a write lock on - * the target page. Any other would-be inserter of the same key must - * acquire a write lock on the same target page, so only one would-be - * inserter can be making the check at one time. Furthermore, once we are - * past the check we hold write locks continuously until we have performed - * our insertion, so no later inserter can fail to see our insertion. - * (This requires some care in _bt_findinsertloc.) + * the first page the value could be on, regardless of the value of its + * implicit heap TID tie-breaker attribute. Any other would-be inserter + * of the same key must acquire a write lock on the same page, so only one + * would-be inserter can be making the check at one time. Furthermore, + * once we are past the check we hold write locks continuously until we + * have performed our insertion, so no later inserter can fail to see our + * insertion. (This requires some care in _bt_findinsertloc.) * * If we must wait for another xact, we release the lock while waiting, * and then must start over completely. @@ -244,6 +246,7 @@ top: { TransactionId xwait; uint32 speculativeToken; + OffsetNumber offset; page = BufferGetPage(buf); lpageop = (BTPageOpaque) PageGetSpecialPointer(page); @@ -277,6 +280,10 @@ top: _bt_freestack(stack); goto top; } + + /* Uniqueness is established -- restore heap tid as scantid */ + if (itup_scankey->heapkeyspace) + itup_scankey->scantid = &itup->t_tid; } if (checkUnique != UNIQUE_CHECK_EXISTING) @@ -293,7 +300,7 @@ top: * attributes are not considered part of the key space. */ CheckForSerializableConflictIn(rel, NULL, buf); - /* do the insertion */ + /* do the insertion, possibly on a page to the right in unique case */ insertoff = _bt_findinsertloc(rel, itup_scankey, &buf, checkingunique, itup, stack, heapRel); _bt_insertonpg(rel, buf, InvalidBuffer, stack, itup, insertoff, false); @@ -346,6 +353,7 @@ _bt_check_unique(Relation rel, BTScanInsert itup_scankey, bool found = false; /* Assume unique until we find a duplicate */ + Assert(itup_scankey->scantid == NULL); *is_unique = true; /* _bt_binsrch() alone may determine that there are no duplicates */ @@ -567,6 +575,7 @@ _bt_check_unique(Relation rel, BTScanInsert itup_scankey, if (P_RIGHTMOST(opaque)) break; highkeycmp = _bt_compare(rel, itup_scankey, page, P_HIKEY); + /* scantid-less scankey should be <= hikey */ Assert(highkeycmp <= 0); if (highkeycmp != 0) break; @@ -614,16 +623,16 @@ notfound: /* * _bt_findinsertloc() -- Finds an insert location for a new tuple * - * If the new key is equal to one or more existing keys, we can - * legitimately place it anywhere in the series of equal keys --- in fact, - * if the new key is equal to the page's "high key" we can place it on - * the next page. If it is equal to the high key, and there's not room - * to insert the new tuple on the current page without splitting, then - * we can move right hoping to find more free space and avoid a split. - * (We should not move right indefinitely, however, since that leads to - * O(N^2) insertion behavior in the presence of many equal keys.) - * Once we have chosen the page to put the key on, we'll insert it before - * any existing equal keys because of the way _bt_binsrch() works. + * On entry, *bufptr contains the page that the new tuple unambiguously + * belongs on. This may not be quite right for callers that just called + * _bt_check_unique(), though, since they won't have initially searched + * using a scantid. They'll have to insert into a page somewhere to the + * right in rare cases where there are many physical duplicates in a + * unique index, and their scantid directs us to some page full of + * duplicates to the right, where the new tuple must go. (Actually, + * since !heapkeyspace pg_upgraded'd non-unique indexes never get a + * scantid, they too may require that we move right. We treat them + * somewhat like unique indexes.) * * _bt_check_unique() callers arrange for their insertion scan key to * save the progress of the last binary search performed. No additional @@ -666,46 +675,66 @@ _bt_findinsertloc(Relation rel, itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we * need to be consistent */ - /* - * Check whether the item can fit on a btree page at all. (Eventually, we - * ought to try to apply TOAST methods if not.) We actually need to be - * able to fit three items on every page, so restrict any one item to 1/3 - * the per-page available space. Note that at this point, itemsz doesn't - * include the ItemId. - * - * NOTE: if you change this, see also the similar code in _bt_buildadd(). - */ + /* Check 1/3 of a page restriction */ if (itemsz > BTMaxItemSize(page)) - ereport(ERROR, - (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("index row size %zu exceeds maximum %zu for index \"%s\"", - itemsz, BTMaxItemSize(page), - RelationGetRelationName(rel)), - errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n" - "Consider a function index of an MD5 hash of the value, " - "or use full text indexing."), - errtableconstraint(heapRel, - RelationGetRelationName(rel)))); + _bt_check_third_page(rel, heapRel, itup_scankey->heapkeyspace, page, + newtup); + /* + * We may have to walk right through leaf pages to find the one leaf page + * that we must insert on to, though only when inserting into unique + * indexes. This is necessary because a scantid is not used by the + * insertion scan key initially in the case of unique indexes -- a scantid + * is only set after the absence of duplicates (whose heap tuples are not + * dead or recently dead) has been established by _bt_check_unique(). + * Non-unique index insertions will break out of the loop immediately. + * + * (Actually, non-unique indexes may still need to grovel through leaf + * pages full of duplicates with a pg_upgrade'd !heapkeyspace index.) + */ Assert(P_ISLEAF(lpageop) && !P_INCOMPLETE_SPLIT(lpageop)); + Assert(!itup_scankey->heapkeyspace || itup_scankey->scantid != NULL); + Assert(itup_scankey->heapkeyspace || itup_scankey->scantid == NULL); for (;;) { Buffer rbuf; BlockNumber rblkno; int cmpval; + /* + * No need to check high key when inserting into a non-unique index -- + * _bt_search() already checked this when it checked if a move to the + * right was required. Insertion scankey's scantid would have been + * filled out at the time. + */ + if (itup_scankey->heapkeyspace && !checkingunique) + { + Assert(P_RIGHTMOST(lpageop) || + _bt_compare(rel, itup_scankey, page, P_HIKEY) <= 0); + break; + } + if (P_RIGHTMOST(lpageop)) break; cmpval = _bt_compare(rel, itup_scankey, page, P_HIKEY); - - /* - * May have to handle case where there is a choice of which page to - * place new tuple on, and we must balance space utilization as best - * we can. - */ - if (cmpval != 0 || _bt_useduplicatepage(rel, heapRel, buf, - &restorebinsrch, itemsz)) - break; + if (itup_scankey->heapkeyspace) + { + if (cmpval <= 0) + break; + } + else + { + /* + * pg_upgrade'd !heapkeyspace index. + * + * May have to handle legacy case where there is a choice of which + * page to place new tuple on, and we must balance space + * utilization as best we can. + */ + if (cmpval != 0 || _bt_useduplicatepage(rel, heapRel, buf, + &restorebinsrch, itemsz)) + break; + } /* * step right to next non-dead page @@ -714,6 +743,8 @@ _bt_findinsertloc(Relation rel, * page; else someone else's _bt_check_unique scan could fail to see * our insertion. write locks on intermediate dead pages won't do * because we don't know when they will get de-linked from the tree. + * (this is more aggressive than it needs to be for non-unique + * !heapkeyspace indexes.) */ rbuf = InvalidBuffer; @@ -728,7 +759,10 @@ _bt_findinsertloc(Relation rel, * If this page was incompletely split, finish the split now. We * do this while holding a lock on the left sibling, which is not * good because finishing the split could be a fairly lengthy - * operation. But this should happen very seldom. + * operation. But this should only happen when inserting into a + * unique index that has more than an entire page for duplicates + * of the value being inserted. (!heapkeyspace non-unique indexes + * are an exception, once again.) */ if (P_INCOMPLETE_SPLIT(lpageop)) { @@ -777,6 +811,11 @@ _bt_findinsertloc(Relation rel, /* * _bt_useduplicatepage() -- Settle for this page of duplicates? * + * Prior to PostgreSQL 12/Btree version 4, heap TID was never treated + * as a part of the keyspace. If there were many tuples of the same + * value spanning more than one leaf page, a new tuple of that same + * value could legally be placed on any one of the pages. + * * This function handles the question of whether or not an insertion * of a duplicate into a pg_upgrade'd !heapkeyspace index should * insert on the page contained in buf when a choice must be made. @@ -878,6 +917,8 @@ _bt_insertonpg(Relation rel, BTPageOpaque lpageop; OffsetNumber firstright = InvalidOffsetNumber; Size itemsz; + int indnatts = IndexRelationGetNumberOfAttributes(rel); + int indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); page = BufferGetPage(buf); lpageop = (BTPageOpaque) PageGetSpecialPointer(page); @@ -885,12 +926,9 @@ _bt_insertonpg(Relation rel, /* child buffer must be given iff inserting on an internal page */ Assert(P_ISLEAF(lpageop) == !BufferIsValid(cbuf)); /* tuple must have appropriate number of attributes */ - Assert(!P_ISLEAF(lpageop) || - BTreeTupleGetNAtts(itup, rel) == - IndexRelationGetNumberOfAttributes(rel)); - Assert(P_ISLEAF(lpageop) || - BTreeTupleGetNAtts(itup, rel) == - IndexRelationGetNumberOfKeyAttributes(rel)); + Assert(BTreeTupleGetNAtts(itup, rel) > 0); + Assert(!P_ISLEAF(lpageop) || BTreeTupleGetNAtts(itup, rel) == indnatts); + Assert(P_ISLEAF(lpageop) || BTreeTupleGetNAtts(itup, rel) <= indnkeyatts); /* The caller should've finished any incomplete splits already. */ if (P_INCOMPLETE_SPLIT(lpageop)) @@ -912,6 +950,7 @@ _bt_insertonpg(Relation rel, { bool is_root = P_ISROOT(lpageop); bool is_only = P_LEFTMOST(lpageop) && P_RIGHTMOST(lpageop); + bool truncate; bool newitemonleft; Buffer rbuf; @@ -938,9 +977,16 @@ _bt_insertonpg(Relation rel, newitemoff, itemsz, &newitemonleft); + /* + * Perform truncation of the new high key for the left half of the + * split when splitting a leaf page. Don't do so with version 3 + * indexes unless the index has non-key attributes. + */ + truncate = P_ISLEAF(lpageop) && + (_bt_heapkeyspace(rel) || indnatts != indnkeyatts); /* split the buffer into left and right halves */ rbuf = _bt_split(rel, buf, cbuf, firstright, - newitemoff, itemsz, itup, newitemonleft); + newitemoff, itemsz, itup, newitemonleft, truncate); PredicateLockPageSplit(rel, BufferGetBlockNumber(buf), BufferGetBlockNumber(rbuf)); @@ -980,7 +1026,8 @@ _bt_insertonpg(Relation rel, * only one on its tree level, but was not the root, it may have been * the "fast root". We need to ensure that the fast root link points * at or above the current page. We can safely acquire a lock on the - * metapage here --- see comments for _bt_newroot(). + * metapage here --- see comments for _bt_heapkeyspace() and + * _bt_newroot(). */ if (split_only_page) { @@ -1022,7 +1069,7 @@ _bt_insertonpg(Relation rel, if (BufferIsValid(metabuf)) { /* upgrade meta-page if needed */ - if (metad->btm_version < BTREE_VERSION) + if (metad->btm_version < BTREE_NOVAC_VERSION) _bt_upgrademetapage(metapg); metad->btm_fastroot = itup_blkno; metad->btm_fastlevel = lpageop->btpo.level; @@ -1077,6 +1124,8 @@ _bt_insertonpg(Relation rel, if (BufferIsValid(metabuf)) { + Assert(metad->btm_version >= BTREE_NOVAC_VERSION); + xlmeta.version = metad->btm_root; xlmeta.root = metad->btm_root; xlmeta.level = metad->btm_level; xlmeta.fastroot = metad->btm_fastroot; @@ -1142,7 +1191,10 @@ _bt_insertonpg(Relation rel, * On entry, buf is the page to split, and is pinned and write-locked. * firstright is the item index of the first item to be moved to the * new right page. newitemoff etc. tell us about the new item that - * must be inserted along with the data from the old page. + * must be inserted along with the data from the old page. truncate + * tells us if the new high key should undergo suffix truncation. + * (Version 4 pivot tuples always have an explicit representation of + * the number of non-truncated attributes that remain.) * * When splitting a non-leaf page, 'cbuf' is the left-sibling of the * page we're inserting the downlink for. This function will clear the @@ -1154,7 +1206,7 @@ _bt_insertonpg(Relation rel, static Buffer _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, OffsetNumber newitemoff, Size newitemsz, IndexTuple newitem, - bool newitemonleft) + bool newitemonleft, bool truncate) { Buffer rbuf; Page origpage; @@ -1177,8 +1229,6 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, OffsetNumber i; bool isleaf; IndexTuple lefthikey; - int indnatts = IndexRelationGetNumberOfAttributes(rel); - int indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); /* Acquire a new page to split into */ rbuf = _bt_getbuf(rel, P_NEW, BT_WRITE); @@ -1249,7 +1299,9 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, itemid = PageGetItemId(origpage, P_HIKEY); itemsz = ItemIdGetLength(itemid); item = (IndexTuple) PageGetItem(origpage, itemid); - Assert(BTreeTupleGetNAtts(item, rel) == indnkeyatts); + Assert(BTreeTupleGetNAtts(item, rel) > 0); + Assert(BTreeTupleGetNAtts(item, rel) <= + IndexRelationGetNumberOfKeyAttributes(rel)); if (PageAddItem(rightpage, (Item) item, itemsz, rightoff, false, false) == InvalidOffsetNumber) { @@ -1263,8 +1315,9 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, /* * The "high key" for the new left page will be the first key that's going - * to go into the new right page. This might be either the existing data - * item at position firstright, or the incoming tuple. + * to go into the new right page, or possibly a truncated version if this + * is a leaf page split. This might be either the existing data item at + * position firstright, or the incoming tuple. */ leftoff = P_HIKEY; if (!newitemonleft && newitemoff == firstright) @@ -1282,25 +1335,60 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, } /* - * Truncate non-key (INCLUDE) attributes of the high key item before - * inserting it on the left page. This only needs to happen at the leaf - * level, since in general all pivot tuple values originate from leaf - * level high keys. This isn't just about avoiding unnecessary work, - * though; truncating unneeded key attributes (more aggressive suffix - * truncation) can only be performed at the leaf level anyway. This is - * because a pivot tuple in a grandparent page must guide a search not - * only to the correct parent page, but also to the correct leaf page. + * Truncate nondistinguishing key attributes of the high key item before + * inserting it on the left page. This can only happen at the leaf level, + * since in general all pivot tuple values originate from leaf level high + * keys. This isn't just about avoiding unnecessary work, though; + * truncating unneeded key suffix attributes can only be performed at the + * leaf level anyway. This is because a pivot tuple in a grandparent page + * must guide a search not only to the correct parent page, but also to + * the correct leaf page. */ - if (indnatts != indnkeyatts && isleaf) + if (truncate) { - lefthikey = _bt_nonkey_truncate(rel, item); + IndexTuple lastleft; + + /* + * Determine which tuple will become the last on the left page. The + * last left tuple and the first right tuple enclose the split point, + * and are needed to determine how far truncation can go while still + * leaving us with a high key that distinguishes the left side from + * the right side. + */ + Assert(isleaf); + if (newitemonleft && newitemoff == firstright) + { + /* incoming tuple will become last on left page */ + lastleft = newitem; + } + else + { + OffsetNumber lastleftoff; + + /* item just before firstright will become last on left page */ + lastleftoff = OffsetNumberPrev(firstright); + Assert(lastleftoff >= P_FIRSTDATAKEY(oopaque)); + itemid = PageGetItemId(origpage, lastleftoff); + lastleft = (IndexTuple) PageGetItem(origpage, itemid); + } + + /* + * Truncate first item on the right side to create a new high key for + * the left side. The high key must be strictly less than all tuples + * on the right side of the split, but can be equal to the last item + * on the left side of the split. + */ + Assert(lastleft != item); + lefthikey = _bt_truncate(rel, lastleft, item, false); itemsz = IndexTupleSize(lefthikey); itemsz = MAXALIGN(itemsz); } else lefthikey = item; - Assert(BTreeTupleGetNAtts(lefthikey, rel) == indnkeyatts); + Assert(BTreeTupleGetNAtts(lefthikey, rel) > 0); + Assert(BTreeTupleGetNAtts(lefthikey, rel) <= + IndexRelationGetNumberOfKeyAttributes(rel)); if (PageAddItem(leftpage, (Item) lefthikey, itemsz, leftoff, false, false) == InvalidOffsetNumber) { @@ -1493,7 +1581,6 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, xl_btree_split xlrec; uint8 xlinfo; XLogRecPtr recptr; - bool loglhikey = false; xlrec.level = ropaque->btpo.level; xlrec.firstright = firstright; @@ -1522,22 +1609,10 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, if (newitemonleft) XLogRegisterBufData(0, (char *) newitem, MAXALIGN(newitemsz)); - /* Log left page */ - if (!isleaf || indnatts != indnkeyatts) - { - /* - * We must also log the left page's high key. There are two - * reasons for that: right page's leftmost key is suppressed on - * non-leaf levels and in covering indexes included columns are - * truncated from high keys. Show it as belonging to the left - * page buffer, so that it is not stored if XLogInsert decides it - * needs a full-page image of the left page. - */ - itemid = PageGetItemId(origpage, P_HIKEY); - item = (IndexTuple) PageGetItem(origpage, itemid); - XLogRegisterBufData(0, (char *) item, MAXALIGN(IndexTupleSize(item))); - loglhikey = true; - } + /* Log left page. We must also log the left page's high key. */ + itemid = PageGetItemId(origpage, P_HIKEY); + item = (IndexTuple) PageGetItem(origpage, itemid); + XLogRegisterBufData(0, (char *) item, MAXALIGN(IndexTupleSize(item))); /* * Log the contents of the right page in the format understood by @@ -1555,9 +1630,7 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, (char *) rightpage + ((PageHeader) rightpage)->pd_upper, ((PageHeader) rightpage)->pd_special - ((PageHeader) rightpage)->pd_upper); - xlinfo = newitemonleft ? - (loglhikey ? XLOG_BTREE_SPLIT_L_HIGHKEY : XLOG_BTREE_SPLIT_L) : - (loglhikey ? XLOG_BTREE_SPLIT_R_HIGHKEY : XLOG_BTREE_SPLIT_R); + xlinfo = newitemonleft ? XLOG_BTREE_SPLIT_L : XLOG_BTREE_SPLIT_R; recptr = XLogInsert(RM_BTREE_ID, xlinfo); PageSetLSN(origpage, recptr); @@ -1920,7 +1993,7 @@ _bt_insert_parent(Relation rel, _bt_relbuf(rel, pbuf); } - /* get high key from left page == lower bound for new right page */ + /* get high key from left, a strict lower bound for new right page */ ritem = (IndexTuple) PageGetItem(page, PageGetItemId(page, P_HIKEY)); @@ -2137,11 +2210,9 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access) * We've just split the old root page and need to create a new one. * In order to do this, we add a new root page to the file, then lock * the metadata page and update it. This is guaranteed to be deadlock- - * free, because all readers release their locks on the metadata page - * before trying to lock the root, and all writers lock the root before - * trying to lock the metadata page. We have a write lock on the old - * root page, so we have not introduced any cycles into the waits-for - * graph. + * free, for the same reason the frequent calls to _bt_heapkeyspace() + * are guaranteed safe. We have a write lock on the old root page, so + * we have not introduced any cycles into the waits-for graph. * * On entry, lbuf (the old root) and rbuf (its new peer) are write- * locked. On exit, a new root page exists with entries for the @@ -2210,7 +2281,7 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf) START_CRIT_SECTION(); /* upgrade metapage if needed */ - if (metad->btm_version < BTREE_VERSION) + if (metad->btm_version < BTREE_NOVAC_VERSION) _bt_upgrademetapage(metapg); /* set btree special data */ @@ -2245,7 +2316,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf) /* * insert the right page pointer into the new root page. */ - Assert(BTreeTupleGetNAtts(right_item, rel) == + Assert(BTreeTupleGetNAtts(right_item, rel) > 0); + Assert(BTreeTupleGetNAtts(right_item, rel) <= IndexRelationGetNumberOfKeyAttributes(rel)); if (PageAddItem(rootpage, (Item) right_item, right_item_sz, P_FIRSTKEY, false, false) == InvalidOffsetNumber) @@ -2278,6 +2350,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf) XLogRegisterBuffer(1, lbuf, REGBUF_STANDARD); XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD); + Assert(metad->btm_version >= BTREE_NOVAC_VERSION); + md.version = metad->btm_version; md.root = rootblknum; md.level = metad->btm_level; md.fastroot = rootblknum; @@ -2342,6 +2416,7 @@ _bt_pgaddtup(Page page, { trunctuple = *itup; trunctuple.t_info = sizeof(IndexTupleData); + /* Deliberately zero INDEX_ALT_TID_MASK bits */ BTreeTupleSetNAtts(&trunctuple, 0); itup = &trunctuple; itemsize = sizeof(IndexTupleData); @@ -2357,8 +2432,8 @@ _bt_pgaddtup(Page page, /* * _bt_isequal - used in _bt_doinsert in check for duplicates. * - * This is very similar to _bt_compare, except for NULL handling. - * Rule is simple: NOT_NULL not equal NULL, NULL not equal NULL too. + * This is very similar to _bt_compare, except for NULL and negative infinity + * handling. Rule is simple: NOT_NULL not equal NULL, NULL not equal NULL too. */ static bool _bt_isequal(TupleDesc itupdesc, BTScanInsert itup_scankey, Page page, diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index d0cf73718f..b0b58850a4 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -34,6 +34,7 @@ #include "utils/snapmgr.h" static void _bt_cachemetadata(Relation rel, BTMetaPageData *metad); +static BTMetaPageData *_bt_getmeta(Relation rel, Buffer metabuf); static bool _bt_mark_page_halfdead(Relation rel, Buffer buf, BTStack stack); static bool _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty); @@ -77,7 +78,9 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level) } /* - * _bt_upgrademetapage() -- Upgrade a meta-page from an old format to the new. + * _bt_upgrademetapage() -- Upgrade a meta-page from an old format to version + * 3, the last version that can be updated without broadly affecting on-disk + * compatibility. (A REINDEX is required to upgrade to version 4.) * * This routine does purely in-memory image upgrade. Caller is * responsible for locking, WAL-logging etc. @@ -93,11 +96,11 @@ _bt_upgrademetapage(Page page) /* It must be really a meta page of upgradable version */ Assert(metaopaque->btpo_flags & BTP_META); - Assert(metad->btm_version < BTREE_VERSION); + Assert(metad->btm_version < BTREE_NOVAC_VERSION); Assert(metad->btm_version >= BTREE_MIN_VERSION); /* Set version number and fill extra fields added into version 3 */ - metad->btm_version = BTREE_VERSION; + metad->btm_version = BTREE_NOVAC_VERSION; metad->btm_oldest_btpo_xact = InvalidTransactionId; metad->btm_last_cleanup_num_heap_tuples = -1.0; @@ -107,43 +110,79 @@ _bt_upgrademetapage(Page page) } /* - * Cache metadata from meta page to rel->rd_amcache. + * Cache metadata from input meta page to rel->rd_amcache. */ static void -_bt_cachemetadata(Relation rel, BTMetaPageData *metad) +_bt_cachemetadata(Relation rel, BTMetaPageData *input) { + BTMetaPageData *cached_metad; + /* We assume rel->rd_amcache was already freed by caller */ Assert(rel->rd_amcache == NULL); rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt, sizeof(BTMetaPageData)); - /* - * Meta page should be of supported version (should be already checked by - * caller). - */ - Assert(metad->btm_version >= BTREE_MIN_VERSION && - metad->btm_version <= BTREE_VERSION); + /* Meta page should be of supported version */ + Assert(input->btm_version >= BTREE_MIN_VERSION && + input->btm_version <= BTREE_VERSION); - if (metad->btm_version == BTREE_VERSION) + cached_metad = (BTMetaPageData *) rel->rd_amcache; + if (input->btm_version >= BTREE_NOVAC_VERSION) { - /* Last version of meta-data, no need to upgrade */ - memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData)); + /* Version with compatible meta-data, no need to upgrade */ + memcpy(cached_metad, input, sizeof(BTMetaPageData)); } else { - BTMetaPageData *cached_metad = (BTMetaPageData *) rel->rd_amcache; - /* * Upgrade meta-data: copy available information from meta-page and * fill new fields with default values. + * + * Note that we cannot upgrade to version 4+ without a REINDEX, since + * extensive on-disk changes are required. */ - memcpy(rel->rd_amcache, metad, offsetof(BTMetaPageData, btm_oldest_btpo_xact)); - cached_metad->btm_version = BTREE_VERSION; + memcpy(cached_metad, input, offsetof(BTMetaPageData, btm_oldest_btpo_xact)); + cached_metad->btm_version = BTREE_NOVAC_VERSION; cached_metad->btm_oldest_btpo_xact = InvalidTransactionId; cached_metad->btm_last_cleanup_num_heap_tuples = -1.0; } } +/* + * Get metadata from share-locked buffer containing metapage, while performing + * standard sanity checks. Sanity checks here must match _bt_getroot(). + */ +static BTMetaPageData * +_bt_getmeta(Relation rel, Buffer metabuf) +{ + Page metapg; + BTPageOpaque metaopaque; + BTMetaPageData *metad; + + metapg = BufferGetPage(metabuf); + metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg); + metad = BTPageGetMeta(metapg); + + /* sanity-check the metapage */ + if (!P_ISMETA(metaopaque) || + metad->btm_magic != BTREE_MAGIC) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("index \"%s\" is not a btree", + RelationGetRelationName(rel)))); + + if (metad->btm_version < BTREE_MIN_VERSION || + metad->btm_version > BTREE_VERSION) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("version mismatch in index \"%s\": file version %d, " + "current version %d, minimal supported version %d", + RelationGetRelationName(rel), + metad->btm_version, BTREE_VERSION, BTREE_MIN_VERSION))); + + return metad; +} + /* * _bt_update_meta_cleanup_info() -- Update cleanup-related information in * the metapage. @@ -167,7 +206,7 @@ _bt_update_meta_cleanup_info(Relation rel, TransactionId oldestBtpoXact, metad = BTPageGetMeta(metapg); /* outdated version of metapage always needs rewrite */ - if (metad->btm_version < BTREE_VERSION) + if (metad->btm_version < BTREE_NOVAC_VERSION) needsRewrite = true; else if (metad->btm_oldest_btpo_xact != oldestBtpoXact || metad->btm_last_cleanup_num_heap_tuples != numHeapTuples) @@ -186,7 +225,7 @@ _bt_update_meta_cleanup_info(Relation rel, TransactionId oldestBtpoXact, START_CRIT_SECTION(); /* upgrade meta-page if needed */ - if (metad->btm_version < BTREE_VERSION) + if (metad->btm_version < BTREE_NOVAC_VERSION) _bt_upgrademetapage(metapg); /* update cleanup-related information */ @@ -202,6 +241,8 @@ _bt_update_meta_cleanup_info(Relation rel, TransactionId oldestBtpoXact, XLogBeginInsert(); XLogRegisterBuffer(0, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD); + Assert(metad->btm_version >= BTREE_NOVAC_VERSION); + md.version = metad->btm_version; md.root = metad->btm_root; md.level = metad->btm_level; md.fastroot = metad->btm_fastroot; @@ -376,7 +417,7 @@ _bt_getroot(Relation rel, int access) START_CRIT_SECTION(); /* upgrade metapage if needed */ - if (metad->btm_version < BTREE_VERSION) + if (metad->btm_version < BTREE_NOVAC_VERSION) _bt_upgrademetapage(metapg); metad->btm_root = rootblkno; @@ -400,6 +441,8 @@ _bt_getroot(Relation rel, int access) XLogRegisterBuffer(0, rootbuf, REGBUF_WILL_INIT); XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD); + Assert(metad->btm_version >= BTREE_NOVAC_VERSION); + md.version = metad->btm_version; md.root = rootblkno; md.level = 0; md.fastroot = rootblkno; @@ -492,7 +535,8 @@ _bt_getroot(Relation rel, int access) * from whatever non-root page we were at. If we ever do need to lock the * one true root page, we could loop here, re-reading the metapage on each * failure. (Note that it wouldn't do to hold the lock on the metapage while - * moving to the root --- that'd deadlock against any concurrent root split.) + * moving to the root --- that'd deadlock against certain concurrent calls to + * _bt_heapkeyspace(), or any concurrent root page split.) */ Buffer _bt_gettrueroot(Relation rel) @@ -595,37 +639,12 @@ _bt_getrootheight(Relation rel) { BTMetaPageData *metad; - /* - * We can get what we need from the cached metapage data. If it's not - * cached yet, load it. Sanity checks here must match _bt_getroot(). - */ if (rel->rd_amcache == NULL) { Buffer metabuf; - Page metapg; - BTPageOpaque metaopaque; metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ); - metapg = BufferGetPage(metabuf); - metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg); - metad = BTPageGetMeta(metapg); - - /* sanity-check the metapage */ - if (!P_ISMETA(metaopaque) || - metad->btm_magic != BTREE_MAGIC) - ereport(ERROR, - (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("index \"%s\" is not a btree", - RelationGetRelationName(rel)))); - - if (metad->btm_version < BTREE_MIN_VERSION || - metad->btm_version > BTREE_VERSION) - ereport(ERROR, - (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("version mismatch in index \"%s\": file version %d, " - "current version %d, minimal supported version %d", - RelationGetRelationName(rel), - metad->btm_version, BTREE_VERSION, BTREE_MIN_VERSION))); + metad = _bt_getmeta(rel, metabuf); /* * If there's no root page yet, _bt_getroot() doesn't expect a cache @@ -642,19 +661,80 @@ _bt_getrootheight(Relation rel) * Cache the metapage data for next time */ _bt_cachemetadata(rel, metad); - + /* We shouldn't have cached it if any of these fail */ + Assert(metad->btm_magic == BTREE_MAGIC); + Assert(metad->btm_version >= BTREE_NOVAC_VERSION); + Assert(metad->btm_fastroot != P_NONE); _bt_relbuf(rel, metabuf); } + /* Get cached page */ metad = (BTMetaPageData *) rel->rd_amcache; - /* We shouldn't have cached it if any of these fail */ - Assert(metad->btm_magic == BTREE_MAGIC); - Assert(metad->btm_version == BTREE_VERSION); - Assert(metad->btm_fastroot != P_NONE); return metad->btm_fastlevel; } +/* + * _bt_heapkeyspace() -- is heap TID being treated as a key? + * + * This is used to determine the rules that must be used to descend a + * btree. Version 4 indexes treat heap TID as a tie-breaker attribute. + * pg_upgrade'd version 3 indexes need extra steps to preserve reasonable + * performance when inserting a new BTScanInsert-wise duplicate tuple + * among many leaf pages already full of such duplicates. + * + * Calling here with locks on other pages in the index is guaranteed to + * be deadlock-free, because all readers release their locks on the + * metadata page before trying to lock any other page, and all writers + * lock other pages before trying to lock the metadata page + * (_bt_getbuf() may be called with a buffer lock on the metapage held + * to allocate a new root page, but _bt_getbuf() is careful about + * deadlocks when recycling a page from the FSM). It is natural to + * buffer lock the metapage last and release its buffer lock first, + * since nobody insists on reliably reaching the current true root. + */ +bool +_bt_heapkeyspace(Relation rel) +{ + BTMetaPageData *metad; + + if (rel->rd_amcache == NULL) + { + Buffer metabuf; + + metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ); + metad = _bt_getmeta(rel, metabuf); + + /* + * If there's no root page yet, _bt_getroot() doesn't expect a cache + * to be made, so just stop here. (XXX perhaps _bt_getroot() should + * be changed to allow this case.) + */ + if (metad->btm_root == P_NONE) + { + uint32 btm_version = metad->btm_version; + + _bt_relbuf(rel, metabuf); + return btm_version > BTREE_NOVAC_VERSION; + } + + /* + * Cache the metapage data for next time + */ + _bt_cachemetadata(rel, metad); + /* We shouldn't have cached it if any of these fail */ + Assert(metad->btm_magic == BTREE_MAGIC); + Assert(metad->btm_version >= BTREE_NOVAC_VERSION); + Assert(metad->btm_fastroot != P_NONE); + _bt_relbuf(rel, metabuf); + } + + /* Get cached page */ + metad = (BTMetaPageData *) rel->rd_amcache; + + return metad->btm_version > BTREE_NOVAC_VERSION; +} + /* * _bt_checkpage() -- Verify that a freshly-read page looks sane. */ @@ -1420,10 +1500,21 @@ _bt_pagedel(Relation rel, Buffer buf) } /* we need an insertion scan key for the search, so build one */ - itup_scankey = _bt_mkscankey(rel, targetkey); + itup_scankey = _bt_mkscankey(rel, targetkey, false); + /* high key may have minus infinity (truncated) attributes */ + itup_scankey->minusinfkey = true; /* get stack to leaf page by searching index */ stack = _bt_search(rel, itup_scankey, &lbuf, BT_READ, NULL); + /* + * Search will reliably relocate same leaf page. + * + * (However, prior to version 4 the search is for the leftmost + * leaf page containing this key, which is okay because we + * will tiebreak on downlink block number.) + */ + Assert(!itup_scankey->heapkeyspace || + BufferGetBlockNumber(buf) == BufferGetBlockNumber(lbuf)); /* don't need a lock or second pin on the page */ _bt_relbuf(rel, lbuf); @@ -1890,7 +1981,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) * half-dead, even though it theoretically could occur. * * We can safely acquire a lock on the metapage here --- see comments for - * _bt_newroot(). + * _bt_heapkeyspace() and _bt_newroot(). */ if (leftsib == P_NONE && rightsib_is_rightmost) { @@ -1969,7 +2060,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) if (BufferIsValid(metabuf)) { /* upgrade metapage if needed */ - if (metad->btm_version < BTREE_VERSION) + if (metad->btm_version < BTREE_NOVAC_VERSION) _bt_upgrademetapage(metapg); metad->btm_fastroot = rightsib; metad->btm_fastlevel = targetlevel; @@ -2017,6 +2108,8 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) { XLogRegisterBuffer(4, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD); + Assert(metad->btm_version >= BTREE_NOVAC_VERSION); + xlmeta.version = metad->btm_version; xlmeta.root = metad->btm_root; xlmeta.level = metad->btm_level; xlmeta.fastroot = metad->btm_fastroot; diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 98917de2ef..ec2edae850 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -794,7 +794,7 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info) metapg = BufferGetPage(metabuf); metad = BTPageGetMeta(metapg); - if (metad->btm_version < BTREE_VERSION) + if (metad->btm_version < BTREE_NOVAC_VERSION) { /* * Do cleanup if metapage needs upgrade, because we don't have diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index 9e44e88190..701115f5b9 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -25,6 +25,10 @@ #include "utils/tqual.h" +static inline int32 _bt_nonpivot_compare(Relation rel, + BTScanInsert key, + Page page, + OffsetNumber offnum); static bool _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum); static void _bt_saveitem(BTScanOpaque so, int itemIndex, @@ -155,8 +159,8 @@ _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access, * downlink (block) to uniquely identify the index entry, in case it * moves right while we're working lower in the tree. See the paper * by Lehman and Yao for how this is detected and handled. (We use the - * child link to disambiguate duplicate keys in the index -- Lehman - * and Yao disallow duplicate keys.) + * child link to disambiguate duplicate keys in the index, which is + * required when dealing with pg_upgrade'd !heapkeyspace indexes.) */ new_stack = (BTStack) palloc(sizeof(BTStackData)); new_stack->bts_blkno = par_blkno; @@ -254,11 +258,15 @@ _bt_moveright(Relation rel, /* * When nextkey = false (normal case): if the scan key that brought us to * this page is > the high key stored on the page, then the page has split - * and we need to move right. (If the scan key is equal to the high key, - * we might or might not need to move right; have to scan the page first - * anyway.) + * and we need to move right. (pg_upgrade'd !heapkeyspace indexes could + * have some duplicates to the right as well as the left, but that's + * something that's only ever dealt with on the leaf level, after + * _bt_search has found an initial leaf page. Duplicate pivots on + * internal pages are useless to all index scans, which was a flaw in the + * old design.) * * When nextkey = true: move right if the scan key is >= page's high key. + * (Note that key.scantid cannot be set in this case.) * * The page could even have split more than once, so scan as far as * needed. @@ -363,6 +371,11 @@ _bt_binsrch(Relation rel, isleaf = P_ISLEAF(opaque); Assert(!(key->restorebinsrch && key->savebinsrch)); + /* Requesting nextkey semantics while using scantid seems nonsensical */ + Assert(!key->nextkey || key->scantid == NULL); + /* Restore binary search state when scantid is available */ + Assert(!key->savebinsrch || key->scantid == NULL); + Assert(!key->heapkeyspace || !key->restorebinsrch || key->scantid != NULL); Assert(P_ISLEAF(opaque) || (!key->restorebinsrch && !key->savebinsrch)); if (!key->restorebinsrch) @@ -422,7 +435,10 @@ _bt_binsrch(Relation rel, /* We have low <= mid < high, so mid points at a real slot */ - result = _bt_compare(rel, key, page, mid); + if (!isleaf) + result = _bt_compare(rel, key, page, mid); + else + result = _bt_nonpivot_compare(rel, key, page, mid); if (result >= cmpval) low = mid + 1; @@ -490,17 +506,44 @@ _bt_compare(Relation rel, { BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page); IndexTuple itup; + int ntupatts; - Assert(_bt_check_natts(rel, page, offnum)); + Assert(_bt_check_natts(rel, key->heapkeyspace, page, offnum)); /* * Force result ">" if target item is first data item on an internal page * --- see NOTE above. + * + * A minus infinity key has all attributes truncated away, so this test is + * redundant with the minus infinity attribute tie-breaker. However, the + * number of attributes in minus infinity tuples was not explicitly + * represented as 0 until PostgreSQL v11, so an explicit offnum test is + * still required. */ if (!P_ISLEAF(opaque) && offnum == P_FIRSTDATAKEY(opaque)) return 1; itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum)); + ntupatts = BTreeTupleGetNAtts(itup, rel); + return _bt_tuple_compare(rel, key, itup, ntupatts); +} + +/* + * Optimized version of _bt_compare(). Only works on non-pivot tuples. + */ +static inline int32 +_bt_nonpivot_compare(Relation rel, + BTScanInsert key, + Page page, + OffsetNumber offnum) +{ + IndexTuple itup; + + Assert(_bt_check_natts(rel, key->heapkeyspace, page, offnum)); + + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum)); + Assert(BTreeTupleGetNAtts(itup, rel) == + IndexRelationGetNumberOfAttributes(rel)); return _bt_tuple_compare(rel, key, itup, key->keysz); } @@ -523,13 +566,17 @@ int32 _bt_tuple_compare(Relation rel, BTScanInsert key, IndexTuple itup, - int ncmpkey) + int ntupatts) { TupleDesc itupdesc = RelationGetDescr(rel); + ItemPointer heapTid; + int ncmpkey; int i; ScanKey scankey; Assert(key->keysz <= IndexRelationGetNumberOfKeyAttributes(rel)); + Assert(key->heapkeyspace || key->scantid == NULL); + Assert(!key->minusinfkey || key->heapkeyspace); /* * The scan key is set up with the attribute number associated with each @@ -543,6 +590,7 @@ _bt_tuple_compare(Relation rel, * _bt_first). */ + ncmpkey = Min(ntupatts, key->keysz); scankey = key->scankeys; for (i = 1; i <= ncmpkey; i++) { @@ -595,8 +643,40 @@ _bt_tuple_compare(Relation rel, scankey++; } - /* if we get here, the keys are equal */ - return 0; + /* + * Use the number of attributes as a tie-breaker, in order to treat + * truncated attributes in index as minus infinity + */ + if (key->keysz > ntupatts) + return 1; + + /* If caller provided no heap TID tie-breaker for scan, they're equal */ + heapTid = BTreeTupleGetHeapTID(itup); + if (key->scantid == NULL) + { + /* + * May be able to apply the "avoid minus infinity search" optimization + * with truncated pivot tuples + */ + if ((itup->t_info & INDEX_ALT_TID_MASK) != 0 && !key->minusinfkey && + heapTid == NULL && key->keysz == ntupatts) + return 1; + + return 0; + } + + /* + * Although it isn't counted as an attribute by BTreeTupleGetNAtts(), heap + * TID is an implicit final key attribute that ensures that all index + * tuples have a distinct set of key attribute values. + * + * This is often truncated away in pivot tuples, which makes the attribute + * value implicitly negative infinity. + */ + if (heapTid == NULL) + return 1; + + return ItemPointerCompare(key->scantid, heapTid); } /* @@ -1113,7 +1193,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) /* Initialize remaining insertion scankey fields */ inskey.savebinsrch = inskey.restorebinsrch = false; inskey.low = inskey.high = InvalidOffsetNumber; + inskey.heapkeyspace = _bt_heapkeyspace(rel); + inskey.minusinfkey = !inskey.heapkeyspace; inskey.nextkey = nextkey; + inskey.scantid = NULL; inskey.keysz = keysCount; /* diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index 933fb4dfe7..87b549a96b 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -743,6 +743,7 @@ _bt_sortaddtup(Page page, { trunctuple = *itup; trunctuple.t_info = sizeof(IndexTupleData); + /* Deliberately zero INDEX_ALT_TID_MASK bits */ BTreeTupleSetNAtts(&trunctuple, 0); itup = &trunctuple; itemsize = sizeof(IndexTupleData); @@ -796,8 +797,6 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) OffsetNumber last_off; Size pgspc; Size itupsz; - int indnatts = IndexRelationGetNumberOfAttributes(wstate->index); - int indnkeyatts = IndexRelationGetNumberOfKeyAttributes(wstate->index); /* * This is a handy place to check for cancel interrupts during the btree @@ -814,27 +813,21 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) itupsz = MAXALIGN(itupsz); /* - * Check whether the item can fit on a btree page at all. (Eventually, we - * ought to try to apply TOAST methods if not.) We actually need to be - * able to fit three items on every page, so restrict any one item to 1/3 - * the per-page available space. Note that at this point, itupsz doesn't - * include the ItemId. + * Check whether the item can fit on a btree page at all. * - * NOTE: similar code appears in _bt_insertonpg() to defend against - * oversize items being inserted into an already-existing index. But - * during creation of an index, we don't go through there. + * Every newly built index will treat heap TID as part of the keyspace, + * which imposes the requirement that new high keys must occasionally have + * a heap TID appended within _bt_truncate(). That may leave a new pivot + * tuple one MAXALIGN() quantum larger than the original first right tuple + * it's derived from. v4 deals with the problem by decreasing the limit + * on the size of tuples inserted on the leaf level by the same small + * amount. Enforce the new v4+ limit on the leaf level, and the old limit + * on internal levels, since pivot tuples may need to make use of the + * spare MAXALIGN() quantum. This should never fail on internal pages. */ if (itupsz > BTMaxItemSize(npage)) - ereport(ERROR, - (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("index row size %zu exceeds maximum %zu for index \"%s\"", - itupsz, BTMaxItemSize(npage), - RelationGetRelationName(wstate->index)), - errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n" - "Consider a function index of an MD5 hash of the value, " - "or use full text indexing."), - errtableconstraint(wstate->heap, - RelationGetRelationName(wstate->index)))); + _bt_check_third_page(wstate->index, wstate->heap, + state->btps_level == 0, npage, itup); /* * Check to see if page is "full". It's definitely full if the item won't @@ -880,24 +873,35 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) ItemIdSetUnused(ii); /* redundant */ ((PageHeader) opage)->pd_lower -= sizeof(ItemIdData); - if (indnkeyatts != indnatts && P_ISLEAF(opageop)) + if (P_ISLEAF(opageop)) { + IndexTuple lastleft; IndexTuple truncated; Size truncsz; /* - * Truncate any non-key attributes from high key on leaf level - * (i.e. truncate on leaf level if we're building an INCLUDE - * index). This is only done at the leaf level because downlinks + * Truncate away any unneeded attributes from high key on leaf + * level. This is only done at the leaf level because downlinks * in internal pages are either negative infinity items, or get * their contents from copying from one level down. See also: * _bt_split(). * + * We don't try to bias our choice of split point to make it more + * likely that _bt_truncate() can truncate away more attributes, + * whereas the split point passed to _bt_split() is chosen much + * more delicately. Suffix truncation is mostly useful because it + * improves space utilization for workloads with random + * insertions. It doesn't seem worthwhile to add logic for + * choosing a split point here for a benefit that is bound to be + * much smaller. + * * Since the truncated tuple is probably smaller than the * original, it cannot just be copied in place (besides, we want * to actually save space on the leaf page). We delete the * original high key, and add our own truncated high key at the - * same offset. + * same offset. It's okay if the truncated tuple is slightly + * larger due to containing a heap TID value, since this case is + * known to _bt_check_third_page(), which reserves space. * * Note that the page layout won't be changed very much. oitup is * already located at the physical beginning of tuple space, so we @@ -905,7 +909,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) * the latter portion of the space occupied by the original tuple. * This is fairly cheap. */ - truncated = _bt_nonkey_truncate(wstate->index, oitup); + ii = PageGetItemId(opage, OffsetNumberPrev(last_off)); + lastleft = (IndexTuple) PageGetItem(opage, ii); + + truncated = _bt_truncate(wstate->index, lastleft, oitup, true); truncsz = IndexTupleSize(truncated); PageIndexTupleDelete(opage, P_HIKEY); _bt_sortaddtup(opage, truncsz, truncated, P_HIKEY); @@ -924,8 +931,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) if (state->btps_next == NULL) state->btps_next = _bt_pagestate(wstate, state->btps_level + 1); - Assert(BTreeTupleGetNAtts(state->btps_minkey, wstate->index) == - IndexRelationGetNumberOfKeyAttributes(wstate->index) || + Assert((BTreeTupleGetNAtts(state->btps_minkey, wstate->index) <= + IndexRelationGetNumberOfKeyAttributes(wstate->index) && + BTreeTupleGetNAtts(state->btps_minkey, wstate->index) > 0) || P_LEFTMOST(opageop)); Assert(BTreeTupleGetNAtts(state->btps_minkey, wstate->index) == 0 || !P_LEFTMOST(opageop)); @@ -970,7 +978,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) * the first item for a page is copied from the prior page in the code * above. Since the minimum key for an entire level is only used as a * minus infinity downlink, and never as a high key, there is no need to - * truncate away non-key attributes at this point. + * truncate away suffix attributes at this point. */ if (last_off == P_HIKEY) { @@ -1029,8 +1037,9 @@ _bt_uppershutdown(BTWriteState *wstate, BTPageState *state) } else { - Assert(BTreeTupleGetNAtts(s->btps_minkey, wstate->index) == - IndexRelationGetNumberOfKeyAttributes(wstate->index) || + Assert((BTreeTupleGetNAtts(s->btps_minkey, wstate->index) <= + IndexRelationGetNumberOfKeyAttributes(wstate->index) && + BTreeTupleGetNAtts(s->btps_minkey, wstate->index) > 0) || P_LEFTMOST(opaque)); Assert(BTreeTupleGetNAtts(s->btps_minkey, wstate->index) == 0 || !P_LEFTMOST(opaque)); @@ -1127,6 +1136,8 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) } else if (itup != NULL) { + int32 compare = 0; + for (i = 1; i <= keysz; i++) { SortSupport entry; @@ -1134,7 +1145,6 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) attrDatum2; bool isNull1, isNull2; - int32 compare; entry = sortKeys + i - 1; attrDatum1 = index_getattr(itup, i, tupdes, &isNull1); @@ -1151,6 +1161,20 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) else if (compare < 0) break; } + + /* + * If key values are equal, we sort on ItemPointer. This is + * required for btree indexes, since heap TID is treated as an + * implicit last key attribute in order to ensure that all + * keys in the index are physically unique. + */ + if (compare == 0) + { + compare = ItemPointerCompare(&itup->t_tid, &itup2->t_tid); + Assert(compare != 0); + if (compare > 0) + load1 = false; + } } else load1 = false; diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 69d67fb428..23d75ad604 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -49,6 +49,8 @@ static void _bt_mark_scankey_required(ScanKey skey); static bool _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, ScanDirection dir, bool *continuescan); +static int _bt_keep_natts(Relation rel, IndexTuple lastleft, + IndexTuple firstright, bool build); /* @@ -56,10 +58,25 @@ static bool _bt_check_rowcompare(ScanKey skey, * Build an insertion scan key that contains comparison data from itup * as well as comparator routines appropriate to the key datatypes. * + * When itup is a non-pivot tuple, the returned insertion scan key is + * suitable for finding a place for it to go on the leaf level. When + * itup is a pivot tuple, the returned insertion scankey is suitable + * for locating the leaf page with the pivot as its high key (there + * must have been one like it at some point if the pivot tuple + * actually came from the tree). + * + * Note that we may occasionally have to share lock the metapage, in + * order to determine whether or not the keys in the index are + * expected to be unique (i.e. a "heapkeyspace" index). Callers that + * are building a new index cannot let us access the non-existent + * metapage. This is okay because we can safely assume that the + * index is on the latest btree version, which must be a + * "heapkeyspace" version. + * * The result is intended for use with _bt_compare(). */ BTScanInsert -_bt_mkscankey(Relation rel, IndexTuple itup) +_bt_mkscankey(Relation rel, IndexTuple itup, bool build) { BTScanInsert inskey; ScanKey skey; @@ -80,15 +97,37 @@ _bt_mkscankey(Relation rel, IndexTuple itup) Assert(tupnatts <= indnatts); /* - * We'll execute search using scan key constructed on key columns. Non-key - * (INCLUDE index) columns are always omitted from scan keys. + * We'll execute search using scan key constructed on key columns. + * Truncated attributes and non-key attributes are omitted from the final + * scan key. */ inskey = palloc(offsetof(BTScanInsertData, scankeys) + sizeof(ScanKeyData) * indnkeyatts); + inskey->heapkeyspace = build || _bt_heapkeyspace(rel); + + /* + * Only heapkeyspace indexes support the "no minus infinity keys" + * optimization. !heapkeyspace indexes don't actually have minus infinity + * attributes, but this allows us to avoid checking heapkeyspace + * separately (explicit representation of number of key attributes in v3 + * indexes shouldn't confuse tie-breaker logic). + * + * There is never a need to explicitly represent truncated attributes as + * having minus infinity values. The only caller that may truly need to + * search for negative infinity is the page deletion code. It is + * sufficient to omit trailing truncated attributes from the scankey + * returned to that caller because caller relies on the fact that there + * cannot be duplicate high keys in heapkeyspace indexes. Caller also + * opts out of the "no minus infinity key" optimization, so search moves + * left on scankey-equal downlink in parent, allowing VACUUM caller to + * reliably relocate leaf page undergoing deletion. + */ + inskey->minusinfkey = !inskey->heapkeyspace; inskey->savebinsrch = inskey->restorebinsrch = false; inskey->low = inskey->high = InvalidOffsetNumber; inskey->nextkey = false; inskey->keysz = Min(indnkeyatts, tupnatts); + inskey->scantid = inskey->heapkeyspace ? BTreeTupleGetHeapTID(itup) : NULL; skey = inskey->scankeys; for (i = 0; i < indnkeyatts; i++) { @@ -102,7 +141,19 @@ _bt_mkscankey(Relation rel, IndexTuple itup) * comparison can be needed. */ procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC); - arg = index_getattr(itup, i + 1, itupdesc, &null); + + /* + * Keys built from truncated attributes are defensively represented as + * NULL values, though they should still not participate in + * comparisons. + */ + if (i < tupnatts) + arg = index_getattr(itup, i + 1, itupdesc, &null); + else + { + arg = (Datum) 0; + null = true; + } flags = (null ? SK_ISNULL : 0) | (indoption[i] << SK_BT_INDOPTION_SHIFT); ScanKeyEntryInitializeWithInfo(&skey[i], flags, @@ -2078,38 +2129,265 @@ btproperty(Oid index_oid, int attno, } /* - * _bt_nonkey_truncate() -- create tuple without non-key suffix attributes. + * _bt_truncate() -- create tuple without unneeded suffix attributes. * - * Returns truncated index tuple allocated in caller's memory context, with key - * attributes copied from caller's itup argument. Currently, suffix truncation - * is only performed to create pivot tuples in INCLUDE indexes, but some day it - * could be generalized to remove suffix attributes after the first - * distinguishing key attribute. + * Returns truncated pivot index tuple allocated in caller's memory context, + * with key attributes copied from caller's firstright argument. If rel is + * an INCLUDE index, non-key attributes will definitely be truncated away, + * since they're not part of the key space. More aggressive suffix + * truncation can take place when it's clear that the returned tuple does not + * need one or more suffix key attributes. We only need to keep firstright + * attributes up to and including the first non-lastleft-equal attribute. * - * Truncated tuple is guaranteed to be no larger than the original, which is - * important for staying under the 1/3 of a page restriction on tuple size. + * Sometimes this routine will return a new pivot tuple that takes up more + * space than firstright, because a new heap TID attribute had to be added to + * distinguish lastleft from firstright. This should only happen when the + * caller is in the process of splitting a leaf page that has many logical + * duplicates, where it's unavoidable. * * Note that returned tuple's t_tid offset will hold the number of attributes * present, so the original item pointer offset is not represented. Caller - * should only change truncated tuple's downlink. + * should only change truncated tuple's downlink. Note also that truncated + * key attributes are treated as containing "minus infinity" values by + * _bt_compare()/_bt_tuple_compare(). + * + * In the worst case (when a heap TID is appended) the size of the returned + * tuple is the size of the first right tuple plus an additional MAXALIGN() + * quantum. This guarantee is important, since callers need to stay under + * the 1/3 of a page restriction on tuple size. If this routine is ever + * taught to truncate within an attribute/datum, it will need to avoid + * returning an enlarged tuple to caller when truncation + TOAST compression + * ends up enlarging the final datum. + * + * CREATE INDEX callers must pass build = true, in order to avoid metapage + * access. */ IndexTuple -_bt_nonkey_truncate(Relation rel, IndexTuple itup) +_bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright, + bool build) { - int nkeyattrs = IndexRelationGetNumberOfKeyAttributes(rel); - IndexTuple truncated; + TupleDesc itupdesc = RelationGetDescr(rel); + int16 natts = IndexRelationGetNumberOfAttributes(rel); + int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); + int keepnatts; + IndexTuple pivot; + ItemPointer pivotheaptid; + Size newsize; /* - * We should only ever truncate leaf index tuples, which must have both - * key and non-key attributes. It's never okay to truncate a second time. + * We should only ever truncate leaf index tuples. It's never okay to + * truncate a second time. */ - Assert(BTreeTupleGetNAtts(itup, rel) == - IndexRelationGetNumberOfAttributes(rel)); + Assert(BTreeTupleGetNAtts(lastleft, rel) == natts); + Assert(BTreeTupleGetNAtts(firstright, rel) == natts); - truncated = index_truncate_tuple(RelationGetDescr(rel), itup, nkeyattrs); - BTreeTupleSetNAtts(truncated, nkeyattrs); + /* Determine how many attributes must be kept in truncated tuple */ + keepnatts = _bt_keep_natts(rel, lastleft, firstright, build); - return truncated; +#ifdef DEBUG_NO_TRUNCATE + /* Artificially force truncation to always append heap TID */ + keepnatts = nkeyatts + 1; +#endif + + if (keepnatts <= natts) + { + IndexTuple tidpivot; + + pivot = index_truncate_tuple(itupdesc, firstright, keepnatts); + + /* + * If there is a distinguishing key attribute within keepnatts, there + * is no need to add an explicit heap TID attribute to new pivot. + */ + if (keepnatts <= nkeyatts) + { + BTreeTupleSetNAtts(pivot, keepnatts); + return pivot; + } + + /* + * This must be an INCLUDE index where only non-key attributes could + * be truncated away. They are not considered part of the key space, + * so it's still necessary to add a heap TID attribute to the new + * pivot tuple. Create enlarged copy of our truncated right tuple + * copy, to fit heap TID. + */ + Assert(natts != nkeyatts); + newsize = MAXALIGN(IndexTupleSize(pivot) + sizeof(ItemPointerData)); + tidpivot = palloc0(newsize); + memcpy(tidpivot, pivot, IndexTupleSize(pivot)); + /* cannot leak memory here */ + pfree(pivot); + pivot = tidpivot; + } + else + { + /* + * No truncation was possible, since key attributes are all equal, and + * there are no non-key attributes that need to be truncated in + * passing. It's necessary to add a heap TID attribute to the new + * pivot tuple. + */ + Assert(natts == nkeyatts); + newsize = MAXALIGN(IndexTupleSize(firstright) + sizeof(ItemPointerData)); + pivot = palloc0(newsize); + memcpy(pivot, firstright, IndexTupleSize(firstright)); + } + + /* + * We have to use heap TID as a unique-ifier in the new pivot tuple, since + * no non-TID attribute in the right item readily distinguishes the right + * side of the split from the left side. Use enlarged space that holds a + * copy of first right tuple; place a heap TID value within the extra + * space that remains at the end. + * + * nbtree conceptualizes this case as an inability to truncate away any + * attribute. We must use an alternative representation of heap TID + * within pivots because heap TID is only treated as an attribute within + * nbtree (e.g., there is no explicit pg_attribute entry). + * + * Callers generally try to avoid choosing a split point that necessitates + * that we do this. Splits of pages that only involve a single distinct + * value (or set of values) must end up here, though. + */ + pivot->t_info &= ~INDEX_SIZE_MASK; + pivot->t_info |= newsize; + + /* + * Lehman & Yao use lastleft as the leaf high key in all cases, but don't + * consider suffix truncation. It seems like a good idea to follow that + * example in cases where no truncation takes place -- use lastleft's heap + * TID. (This is also the closest value to negative infinity that's + * legally usable.) + */ + pivotheaptid = (ItemPointer) ((char *) pivot + newsize - + sizeof(ItemPointerData)); + ItemPointerCopy(&lastleft->t_tid, pivotheaptid); + + /* + * Lehman and Yao require that the downlink to the right page, which is to + * be inserted into the parent page in the second phase of a page split be + * a strict lower bound on items on the right page, and a non-strict upper + * bound for items on the left page. Assert that heap TIDs follow these + * invariants, since a heap TID value is apparently needed as a + * tiebreaker. + */ +#ifndef DEBUG_NO_TRUNCATE + Assert(ItemPointerCompare(&lastleft->t_tid, &firstright->t_tid) < 0); + Assert(ItemPointerCompare(pivotheaptid, &lastleft->t_tid) >= 0); + Assert(ItemPointerCompare(pivotheaptid, &firstright->t_tid) < 0); +#else + + /* + * Those invariants aren't guaranteed to hold for lastleft + firstright + * heap TID attribute values when they're considered here only because + * DEBUG_NO_TRUNCATE is defined (a heap TID is probably not actually + * needed as a tiebreaker). DEBUG_NO_TRUNCATE must therefore use a heap + * TID value that always works as a strict lower bound for items to the + * right. In particular, it must avoid using firstright's leading key + * attribute values along with lastleft's heap TID value when lastleft's + * TID happens to be greater than firstright's TID. + * + * (We could just use all of lastleft instead, but that would complicate + * caller's free space accounting, which makes the assumption that the new + * pivot must be no larger than firstright plus a single MAXALIGN() + * quantum.) + */ + ItemPointerCopy(&firstright->t_tid, pivotheaptid); + + /* + * Pivot heap TID should never be fully equal to firstright. Note that + * the pivot heap TID will still end up equal to lastleft's heap TID when + * that's the only value that's legally usable. + */ + ItemPointerSetOffsetNumber(pivotheaptid, + OffsetNumberPrev(ItemPointerGetOffsetNumber(pivotheaptid))); + Assert(ItemPointerCompare(pivotheaptid, &firstright->t_tid) < 0); +#endif + + BTreeTupleSetNAtts(pivot, nkeyatts); + BTreeTupleSetAltHeapTID(pivot); + + return pivot; +} + +/* + * _bt_keep_natts - how many key attributes to keep when truncating. + * + * Caller provides two tuples that enclose a split point. CREATE INDEX + * callers must pass build = true so that we may avoid metapage access. (This + * is okay because CREATE INDEX always creates an index on the latest btree + * version.) + * + * This can return a number of attributes that is one greater than the + * number of key attributes for the index relation. This indicates that the + * caller must use a heap TID as a unique-ifier in new pivot tuple. + */ +static int +_bt_keep_natts(Relation rel, IndexTuple lastleft, IndexTuple firstright, + bool build) +{ + int nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); + TupleDesc itupdesc = RelationGetDescr(rel); + int keepnatts; + ScanKey scankey; + BTScanInsert key; + + key = _bt_mkscankey(rel, firstright, build); + + /* + * Be consistent about the representation of BTREE_VERSION 3 tuples across + * Postgres versions; don't allow new pivot tuples to have truncated key + * attributes there. This keeps things consistent and simple for + * verification tools that have to handle multiple versions. + */ + if (!key->heapkeyspace) + { + Assert(nkeyatts != IndexRelationGetNumberOfAttributes(rel)); + return nkeyatts; + } + + Assert(key->keysz == nkeyatts); + scankey = key->scankeys; + keepnatts = 1; + for (int attnum = 1; attnum <= nkeyatts; attnum++, scankey++) + { + Datum datum1; + bool isNull1, + isNull2; + + datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1); + isNull2 = (scankey->sk_flags & SK_ISNULL) != 0; + + if (isNull1 != isNull2) + break; + + if (!isNull1 && + DatumGetInt32(FunctionCall2Coll(&scankey->sk_func, + scankey->sk_collation, + datum1, + scankey->sk_argument)) != 0) + break; + + keepnatts++; + } + + /* + * Make sure that an authoritative comparison that considers per-column + * options like ASC/DESC/NULLS FIRST/NULLS LAST indicates that it's okay + * to truncate firstright tuple up to keepnatts -- we expect to get a new + * pivot that's strictly greater than lastleft when truncation can go + * ahead. (A truncated version of firstright is also bound to be strictly + * less than firstright, since their attributes will be equal prior to one + * or more truncated negative infinity attributes.) + */ + Assert(keepnatts == nkeyatts + 1 || + _bt_tuple_compare(rel, key, lastleft, keepnatts) > 0); + + /* Can't leak memory here */ + pfree(key); + + return keepnatts; } /* @@ -2123,15 +2401,17 @@ _bt_nonkey_truncate(Relation rel, IndexTuple itup) * preferred to calling here. That's usually more convenient, and is always * more explicit. Call here instead when offnum's tuple may be a negative * infinity tuple that uses the pre-v11 on-disk representation, or when a low - * context check is appropriate. + * context check is appropriate. This routine is as strict as possible about + * what is expected on each version of btree. */ bool -_bt_check_natts(Relation rel, Page page, OffsetNumber offnum) +_bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum) { int16 natts = IndexRelationGetNumberOfAttributes(rel); int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page); IndexTuple itup; + int tupnatts; /* * We cannot reliably test a deleted or half-deleted page, since they have @@ -2151,16 +2431,26 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum) "BT_N_KEYS_OFFSET_MASK can't fit INDEX_MAX_KEYS"); itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum)); + tupnatts = BTreeTupleGetNAtts(itup, rel); if (P_ISLEAF(opaque)) { if (offnum >= P_FIRSTDATAKEY(opaque)) { /* - * Leaf tuples that are not the page high key (non-pivot tuples) - * should never be truncated + * Non-pivot tuples currently never use alternative heap TID + * representation -- even those within heapkeyspace indexes */ - return BTreeTupleGetNAtts(itup, rel) == natts; + if ((itup->t_info & INDEX_ALT_TID_MASK) != 0) + return false; + + /* + * Leaf tuples that are not the page high key (non-pivot tuples) + * should never be truncated. (Note that tupnatts must have been + * inferred, rather than coming from an explicit on-disk + * representation.) + */ + return tupnatts == natts; } else { @@ -2170,8 +2460,16 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum) */ Assert(!P_RIGHTMOST(opaque)); - /* Page high key tuple contains only key attributes */ - return BTreeTupleGetNAtts(itup, rel) == nkeyatts; + /* + * !heapkeyspace high key tuple contains only key attributes. + * Note that tupnatts will only have been explicitly represented + * in !heapkeyspace indexes that happen to have non-key + * attributes. + */ + if (!heapkeyspace) + return tupnatts == nkeyatts; + + /* Use generic heapkeyspace pivot tuple handling */ } } else /* !P_ISLEAF(opaque) */ @@ -2183,7 +2481,11 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum) * its high key) is its negative infinity tuple. Negative * infinity tuples are always truncated to zero attributes. They * are a particular kind of pivot tuple. - * + */ + if (heapkeyspace) + return tupnatts == 0; + + /* * The number of attributes won't be explicitly represented if the * negative infinity tuple was generated during a page split that * occurred with a version of Postgres before v11. There must be @@ -2194,18 +2496,109 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum) * Prior to v11, downlinks always had P_HIKEY as their offset. Use * that to decide if the tuple is a pre-v11 tuple. */ - return BTreeTupleGetNAtts(itup, rel) == 0 || + return tupnatts == 0 || ((itup->t_info & INDEX_ALT_TID_MASK) == 0 && ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY); } else { /* - * Tuple contains only key attributes despite on is it page high - * key or not + * !heapkeyspace downlink tuple with separator key contains only + * key attributes. Note that tupnatts will only have been + * explicitly represented in !heapkeyspace indexes that happen to + * have non-key attributes. */ - return BTreeTupleGetNAtts(itup, rel) == nkeyatts; + if (!heapkeyspace) + return tupnatts == nkeyatts; + + /* Use generic heapkeyspace pivot tuple handling */ } } + + /* Handle heapkeyspace pivot tuples (excluding minus infinity items) */ + Assert(heapkeyspace); + + /* + * Explicit representation of the number of attributes is mandatory with + * heapkeyspace index pivot tuples, regardless of whether or not there are + * non-key attributes. + */ + if ((itup->t_info & INDEX_ALT_TID_MASK) == 0) + return false; + + /* + * Heap TID is a tie-breaker key attribute, so it cannot be untruncated + * when any other key attribute is truncated + */ + if (BTreeTupleGetHeapTID(itup) != NULL && tupnatts != nkeyatts) + return false; + + /* + * Pivot tuple must have at least one untruncated key attribute (minus + * infinity pivot tuples are the only exception). Pivot tuples can never + * represent that there is a value present for a key attribute that + * exceeds pg_index.indnkeyatts for the index. + */ + return tupnatts > 0 && tupnatts <= nkeyatts; +} + +/* + * + * _bt_check_third_page() -- check whether tuple fits on a btree page at all. + * + * We actually need to be able to fit three items on every page, so restrict + * any one item to 1/3 the per-page available space. Note that itemsz should + * not include the ItemId overhead. + * + * It might be useful to apply TOAST methods rather than throw an error here. + * Using out of line storage would break assumptions made by suffix truncation + * and by contrib/amcheck, though. + */ +void +_bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, + Page page, IndexTuple newtup) +{ + Size itemsz; + BTPageOpaque opaque; + + itemsz = MAXALIGN(IndexTupleSize(newtup)); + + /* Double check item size against limit */ + if (itemsz <= BTMaxItemSize(page)) + return; + + /* + * Tuple is probably too large to fit on page, but it's possible that the + * index uses version 2 or version 3, or that page is an internal page, in + * which case a slightly higher limit applies. + */ + if (!needheaptidspace && itemsz <= BTMaxItemSizeNoHeapTid(page)) + return; + + /* + * Internal page insertions cannot fail here, because that would mean that + * an earlier leaf level insertion that should have failed didn't + */ + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + if (!P_ISLEAF(opaque)) + elog(ERROR, "cannot insert oversized tuple of size %zu on internal page of index \"%s\"", + itemsz, RelationGetRelationName(rel)); + + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("index row size %zu exceeds btree version %u maximum %zu for index \"%s\"", + itemsz, + needheaptidspace ? BTREE_VERSION : BTREE_NOVAC_VERSION, + needheaptidspace ? BTMaxItemSize(page) : + BTMaxItemSizeNoHeapTid(page), + RelationGetRelationName(rel)), + errdetail("Index row references tuple (%u,%u) in relation \"%s\".", + ItemPointerGetBlockNumber(&newtup->t_tid), + ItemPointerGetOffsetNumber(&newtup->t_tid), + RelationGetRelationName(heap)), + errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n" + "Consider a function index of an MD5 hash of the value, " + "or use full text indexing."), + errtableconstraint(heap, RelationGetRelationName(rel)))); } diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index b0666b42df..876ff0c40f 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -103,7 +103,7 @@ _bt_restore_meta(XLogReaderState *record, uint8 block_id) md = BTPageGetMeta(metapg); md->btm_magic = BTREE_MAGIC; - md->btm_version = BTREE_VERSION; + md->btm_version = xlrec->version; md->btm_root = xlrec->root; md->btm_level = xlrec->level; md->btm_fastroot = xlrec->fastroot; @@ -202,7 +202,7 @@ btree_xlog_insert(bool isleaf, bool ismeta, XLogReaderState *record) } static void -btree_xlog_split(bool onleft, bool lhighkey, XLogReaderState *record) +btree_xlog_split(bool onleft, XLogReaderState *record) { XLogRecPtr lsn = record->EndRecPtr; xl_btree_split *xlrec = (xl_btree_split *) XLogRecGetData(record); @@ -213,8 +213,6 @@ btree_xlog_split(bool onleft, bool lhighkey, XLogReaderState *record) BTPageOpaque ropaque; char *datapos; Size datalen; - IndexTuple left_hikey = NULL; - Size left_hikeysz = 0; BlockNumber leftsib; BlockNumber rightsib; BlockNumber rnext; @@ -248,20 +246,6 @@ btree_xlog_split(bool onleft, bool lhighkey, XLogReaderState *record) _bt_restore_page(rpage, datapos, datalen); - /* - * When the high key isn't present is the wal record, then we assume it to - * be equal to the first key on the right page. It must be from the leaf - * level. - */ - if (!lhighkey) - { - ItemId hiItemId = PageGetItemId(rpage, P_FIRSTDATAKEY(ropaque)); - - Assert(isleaf); - left_hikey = (IndexTuple) PageGetItem(rpage, hiItemId); - left_hikeysz = ItemIdGetLength(hiItemId); - } - PageSetLSN(rpage, lsn); MarkBufferDirty(rbuf); @@ -284,6 +268,8 @@ btree_xlog_split(bool onleft, bool lhighkey, XLogReaderState *record) OffsetNumber off; IndexTuple newitem = NULL; Size newitemsz = 0; + IndexTuple left_hikey = NULL; + Size left_hikeysz = 0; Page newlpage; OffsetNumber leftoff; @@ -298,13 +284,10 @@ btree_xlog_split(bool onleft, bool lhighkey, XLogReaderState *record) } /* Extract left hikey and its size (assuming 16-bit alignment) */ - if (lhighkey) - { - left_hikey = (IndexTuple) datapos; - left_hikeysz = MAXALIGN(IndexTupleSize(left_hikey)); - datapos += left_hikeysz; - datalen -= left_hikeysz; - } + left_hikey = (IndexTuple) datapos; + left_hikeysz = MAXALIGN(IndexTupleSize(left_hikey)); + datapos += left_hikeysz; + datalen -= left_hikeysz; Assert(datalen == 0); @@ -1003,16 +986,10 @@ btree_redo(XLogReaderState *record) btree_xlog_insert(false, true, record); break; case XLOG_BTREE_SPLIT_L: - btree_xlog_split(true, false, record); - break; - case XLOG_BTREE_SPLIT_L_HIGHKEY: - btree_xlog_split(true, true, record); + btree_xlog_split(true, record); break; case XLOG_BTREE_SPLIT_R: - btree_xlog_split(false, false, record); - break; - case XLOG_BTREE_SPLIT_R_HIGHKEY: - btree_xlog_split(false, true, record); + btree_xlog_split(false, record); break; case XLOG_BTREE_VACUUM: btree_xlog_vacuum(record); diff --git a/src/backend/access/rmgrdesc/nbtdesc.c b/src/backend/access/rmgrdesc/nbtdesc.c index 8d5c6ae0ab..fcac0cd8a9 100644 --- a/src/backend/access/rmgrdesc/nbtdesc.c +++ b/src/backend/access/rmgrdesc/nbtdesc.c @@ -35,8 +35,6 @@ btree_desc(StringInfo buf, XLogReaderState *record) } case XLOG_BTREE_SPLIT_L: case XLOG_BTREE_SPLIT_R: - case XLOG_BTREE_SPLIT_L_HIGHKEY: - case XLOG_BTREE_SPLIT_R_HIGHKEY: { xl_btree_split *xlrec = (xl_btree_split *) rec; @@ -130,12 +128,6 @@ btree_identify(uint8 info) case XLOG_BTREE_SPLIT_R: id = "SPLIT_R"; break; - case XLOG_BTREE_SPLIT_L_HIGHKEY: - id = "SPLIT_L_HIGHKEY"; - break; - case XLOG_BTREE_SPLIT_R_HIGHKEY: - id = "SPLIT_R_HIGHKEY"; - break; case XLOG_BTREE_VACUUM: id = "VACUUM"; break; diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index 489eee095e..cf603d6944 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -4057,9 +4057,10 @@ comparetup_index_btree(const SortTuple *a, const SortTuple *b, } /* - * If key values are equal, we sort on ItemPointer. This does not affect - * validity of the finished index, but it may be useful to have index - * scans in physical order. + * If key values are equal, we sort on ItemPointer. This is required for + * btree indexes, since heap TID is treated as an implicit last key + * attribute in order to ensure that all keys in the index are physically + * unique. */ { BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid); @@ -4076,6 +4077,9 @@ comparetup_index_btree(const SortTuple *a, const SortTuple *b, return (pos1 < pos2) ? -1 : 1; } + /* ItemPointer values should never be equal */ + Assert(false); + return 0; } @@ -4128,6 +4132,9 @@ comparetup_index_hash(const SortTuple *a, const SortTuple *b, return (pos1 < pos2) ? -1 : 1; } + /* ItemPointer values should never be equal */ + Assert(false); + return 0; } diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h index c8fd036c9e..d0eb68e3d2 100644 --- a/src/include/access/nbtree.h +++ b/src/include/access/nbtree.h @@ -112,18 +112,53 @@ typedef struct BTMetaPageData #define BTPageGetMeta(p) \ ((BTMetaPageData *) PageGetContents(p)) +/* + * Btree version 4 (used by indexes initialized by PostgreSQL v12) made + * general changes to the on-disk representation to add support for + * heapkeyspace semantics, necessitating a REINDEX to get heapkeyspace + * semantics in pg_upgrade scenarios. We continue to offer support for + * BTREE_MIN_VERSION in order to support upgrades from PostgreSQL versions + * up to and including v10 to v12+ without requiring a REINDEX. + * Similarly, we continue to offer support for BTREE_NOVAC_VERSION to + * support upgrades from v11 to v12+ without requiring a REINDEX. + * + * We maintain PostgreSQL v11's ability to upgrade from BTREE_MIN_VERSION + * to BTREE_NOVAC_VERSION automatically. v11's "no vacuuming" enhancement + * (the ability to skip full index scans during vacuuming) only requires + * two new metapage fields, which makes it possible to upgrade at any + * point that the metapage must be updated anyway (e.g. during a root page + * split). Note also that there happened to be no changes in metapage + * layout for btree version 4. All current metapage fields should have + * valid values set when a metapage WAL record is replayed. + * + * It's convenient to consider the "no vacuuming" enhancement (metapage + * layout compatibility) separately from heapkeyspace semantics, since + * each issue affects different areas. This is just a convention; in + * practice a heapkeyspace index is always also a "no vacuuming" index. + */ #define BTREE_METAPAGE 0 /* first page is meta */ #define BTREE_MAGIC 0x053162 /* magic number of btree pages */ -#define BTREE_VERSION 3 /* current version number */ +#define BTREE_VERSION 4 /* current version number */ #define BTREE_MIN_VERSION 2 /* minimal supported version number */ +#define BTREE_NOVAC_VERSION 3 /* minimal version with all meta fields */ /* * Maximum size of a btree index entry, including its tuple header. * * We actually need to be able to fit three items on every page, * so restrict any one item to 1/3 the per-page available space. + * + * There are rare cases where _bt_truncate() will need to enlarge + * a heap index tuple to make space for a tie-breaker heap TID + * attribute, which we account for here. */ #define BTMaxItemSize(page) \ + MAXALIGN_DOWN((PageGetPageSize(page) - \ + MAXALIGN(SizeOfPageHeaderData + \ + 3*sizeof(ItemIdData) + \ + 3*sizeof(ItemPointerData)) - \ + MAXALIGN(sizeof(BTPageOpaqueData))) / 3) +#define BTMaxItemSizeNoHeapTid(page) \ MAXALIGN_DOWN((PageGetPageSize(page) - \ MAXALIGN(SizeOfPageHeaderData + 3*sizeof(ItemIdData)) - \ MAXALIGN(sizeof(BTPageOpaqueData))) / 3) @@ -203,22 +238,25 @@ typedef struct BTMetaPageData * their item pointer offset field, since pivot tuples never need to store a * real offset (downlinks only need to store a block number). The offset * field only stores the number of attributes when the INDEX_ALT_TID_MASK - * bit is set (we never assume that pivot tuples must explicitly store the - * number of attributes, and currently do not bother storing the number of - * attributes unless indnkeyatts actually differs from indnatts). - * INDEX_ALT_TID_MASK is only used for pivot tuples at present, though it's - * possible that it will be used within non-pivot tuples in the future. Do - * not assume that a tuple with INDEX_ALT_TID_MASK set must be a pivot - * tuple. + * bit is set, though that number doesn't include the trailing heap TID + * attribute sometimes stored in pivot tuples -- that's represented by the + * presence of BT_HEAP_TID_ATTR. INDEX_ALT_TID_MASK is only used for pivot + * tuples at present, though it's possible that it will be used within + * non-pivot tuples in the future. All pivot tuples must have + * INDEX_ALT_TID_MASK set as of BTREE_VERSION 4. * * The 12 least significant offset bits are used to represent the number of - * attributes in INDEX_ALT_TID_MASK tuples, leaving 4 bits that are reserved - * for future use (BT_RESERVED_OFFSET_MASK bits). BT_N_KEYS_OFFSET_MASK should - * be large enough to store any number <= INDEX_MAX_KEYS. + * attributes in INDEX_ALT_TID_MASK tuples, leaving 4 status bits + * (BT_RESERVED_OFFSET_MASK bits), 3 of which that are reserved for future + * use. BT_N_KEYS_OFFSET_MASK should be large enough to store any number of + * attributes <= INDEX_MAX_KEYS. */ #define INDEX_ALT_TID_MASK INDEX_AM_RESERVED_BIT + +/* Item pointer offset bits */ #define BT_RESERVED_OFFSET_MASK 0xF000 #define BT_N_KEYS_OFFSET_MASK 0x0FFF +#define BT_HEAP_TID_ATTR 0x1000 /* Get/set downlink block number */ #define BTreeInnerTupleGetDownLink(itup) \ @@ -241,14 +279,16 @@ typedef struct BTMetaPageData } while(0) /* - * Get/set number of attributes within B-tree index tuple. Asserts should be - * removed when BT_RESERVED_OFFSET_MASK bits will be used. + * Get/set number of attributes within B-tree index tuple. + * + * Note that this does not include an implicit tie-breaker heap-TID + * attribute, if any. Note also that the number of key attributes must be + * explicitly represented in heapkeyspace pivot tuples. */ #define BTreeTupleGetNAtts(itup, rel) \ ( \ (itup)->t_info & INDEX_ALT_TID_MASK ? \ ( \ - AssertMacro((ItemPointerGetOffsetNumberNoCheck(&(itup)->t_tid) & BT_RESERVED_OFFSET_MASK) == 0), \ ItemPointerGetOffsetNumberNoCheck(&(itup)->t_tid) & BT_N_KEYS_OFFSET_MASK \ ) \ : \ @@ -257,10 +297,46 @@ typedef struct BTMetaPageData #define BTreeTupleSetNAtts(itup, n) \ do { \ (itup)->t_info |= INDEX_ALT_TID_MASK; \ - Assert(((n) & BT_RESERVED_OFFSET_MASK) == 0); \ ItemPointerSetOffsetNumber(&(itup)->t_tid, (n) & BT_N_KEYS_OFFSET_MASK); \ } while(0) +/* + * Get tie-breaker heap TID attribute, if any. Macro works with both pivot + * and non-pivot tuples, despite differences in how heap TID is represented. + * + * Assumes that any tuple without INDEX_ALT_TID_MASK set has a t_tid that + * points to the heap, and that all pivot tuples have INDEX_ALT_TID_MASK set + * (since all pivot tuples must as of BTREE_VERSION 4). When non-pivot + * tuples use the INDEX_ALT_TID_MASK representation in the future, they'll + * probably also contain a heap TID at the end of the tuple. We currently + * assume that a tuple with INDEX_ALT_TID_MASK set is a pivot tuple within + * heapkeyspace indexes (and that a tuple without it set must be a non-pivot + * tuple), but it might also be used by non-pivot tuples in the future. + * pg_upgrade'd !heapkeyspace indexes only set INDEX_ALT_TID_MASK in pivot + * tuples that actually originated with the truncation of one or more + * attributes. + */ +#define BTreeTupleGetHeapTID(itup) \ + ( \ + (itup)->t_info & INDEX_ALT_TID_MASK && \ + (ItemPointerGetOffsetNumberNoCheck(&(itup)->t_tid) & BT_HEAP_TID_ATTR) != 0 ? \ + ( \ + (ItemPointer) (((char *) (itup) + IndexTupleSize(itup)) - \ + sizeof(ItemPointerData)) \ + ) \ + : (itup)->t_info & INDEX_ALT_TID_MASK ? NULL : (ItemPointer) &((itup)->t_tid) \ + ) +/* + * Set the heap TID attribute for a tuple that uses the INDEX_ALT_TID_MASK + * representation (currently limited to pivot tuples) + */ +#define BTreeTupleSetAltHeapTID(itup) \ + do { \ + Assert((itup)->t_info & INDEX_ALT_TID_MASK); \ + ItemPointerSetOffsetNumber(&(itup)->t_tid, \ + ItemPointerGetOffsetNumberNoCheck(&(itup)->t_tid) | BT_HEAP_TID_ATTR); \ + } while(0) + /* * Operator strategy numbers for B-tree have been moved to access/stratnum.h, * because many places need to use them in ScanKeyInit() calls. @@ -326,25 +402,53 @@ typedef BTStackData *BTStack; * _bt_search. For details on its mutable state, see _bt_binsrch and * _bt_findinsertloc. * + * heapkeyspace indicates if we expect all keys in the index to be unique by + * treating heap TID as a tie-breaker attribute (i.e. the index is + * BTREE_VERSION 4+). scantid should never be set when index is not a + * heapkeyspace index. + * + * minusinfkey controls an optimization used by heapkeyspace indexes. When + * minusinfkey is false (the usual case), _bt_tuple_compare will consider a + * scankey greater than a pivot tuple where all explicitly represented + * attributes are equal to the scankey, provided that the pivot tuple has at + * least one attribute truncated away (this is often just the heap TID + * attribute). We exploit the fact that minus infinity is a value that only + * appears in pivot tuples (to make suffix truncation work), and is therefore + * not interesting (page deletion by VACUUM is the one case where the + * optimization cannot be used, since a leaf page is relocated using its high + * key). This optimization allows us to get the full benefit of suffix + * truncation, particularly with indexes where each distinct set of user + * attribute keys appear in at least a few duplicate entries. + * * When nextkey is false (the usual case), _bt_search and _bt_binsrch will * locate the first item >= scankey. When nextkey is true, they will locate * the first item > scan key. * - * keysz is the number of insertion scankeys present. + * scantid is the heap TID that is used as a final tie-breaker attribute, + * which may be set to NULL to indicate its absence. When inserting new + * tuples, it must be set, since every tuple in the tree unambiguously belongs + * in one exact position, even when there are entries in the tree that are + * considered duplicates by external code. Unique insertions set scantid only + * after unique checking indicates that it's safe to insert. Despite the + * representational difference, scantid is just another insertion scankey to + * routines like _bt_search. * - * scankeys is an array of scan key entries for attributes that are compared. - * During insertion, there must be a scan key for every attribute, but when - * starting a regular index scan some can be omitted. The array is used as a - * flexible array member, though it's sized in a way that makes it possible to - * use stack allocations. See nbtree/README for full details. + * keysz is the number of insertion scankeys present, not including scantid. + * + * scankeys is an array of scan key entries for attributes that are compared + * before scantid (user-visible attributes). During insertion, there must be + * a scan key for every attribute, but when starting a regular index scan some + * can be omitted. The array is used as a flexible array member, though it's + * sized in a way that makes it possible to use stack allocations. See + * nbtree/README for full details. */ typedef struct BTScanInsertData { /* * Mutable state used by _bt_binsrch to inexpensively repeat a binary - * search on the leaf level. Only used for insertions where - * _bt_check_unique is called. + * search on the leaf level when only scantid has changed. Only used for + * insertions where _bt_check_unique is called. */ bool savebinsrch; bool restorebinsrch; @@ -352,7 +456,10 @@ typedef struct BTScanInsertData OffsetNumber high; /* State used to locate a position at the leaf level */ + bool heapkeyspace; + bool minusinfkey; bool nextkey; + ItemPointer scantid; /* tiebreaker for scankeys */ int keysz; /* Size of scankeys */ ScanKeyData scankeys[INDEX_MAX_KEYS]; /* Must appear last */ } BTScanInsertData; @@ -582,6 +689,7 @@ extern void _bt_upgrademetapage(Page page); extern Buffer _bt_getroot(Relation rel, int access); extern Buffer _bt_gettrueroot(Relation rel); extern int _bt_getrootheight(Relation rel); +extern bool _bt_heapkeyspace(Relation rel); extern void _bt_checkpage(Relation rel, Buffer buf); extern Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access); extern Buffer _bt_relandgetbuf(Relation rel, Buffer obuf, @@ -606,7 +714,7 @@ extern Buffer _bt_moveright(Relation rel, BTScanInsert key, Buffer buf, extern OffsetNumber _bt_binsrch(Relation rel, BTScanInsert key, Buffer buf); extern int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum); extern int32 _bt_tuple_compare(Relation rel, BTScanInsert key, IndexTuple itup, - int ncmpkey); + int ntupatts); extern bool _bt_first(IndexScanDesc scan, ScanDirection dir); extern bool _bt_next(IndexScanDesc scan, ScanDirection dir); extern Buffer _bt_get_endpoint(Relation rel, uint32 level, bool rightmost, @@ -615,7 +723,7 @@ extern Buffer _bt_get_endpoint(Relation rel, uint32 level, bool rightmost, /* * prototypes for functions in nbtutils.c */ -extern BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup); +extern BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup, bool build); extern ScanKey _bt_mkscankey_nodata(Relation rel); extern void _bt_freestack(BTStack stack); extern void _bt_preprocess_array_keys(IndexScanDesc scan); @@ -638,8 +746,12 @@ extern bytea *btoptions(Datum reloptions, bool validate); extern bool btproperty(Oid index_oid, int attno, IndexAMProperty prop, const char *propname, bool *res, bool *isnull); -extern IndexTuple _bt_nonkey_truncate(Relation rel, IndexTuple itup); -extern bool _bt_check_natts(Relation rel, Page page, OffsetNumber offnum); +extern IndexTuple _bt_truncate(Relation rel, IndexTuple lastleft, + IndexTuple firstright, bool build); +extern bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page, + OffsetNumber offnum); +extern void _bt_check_third_page(Relation rel, Relation heap, + bool needheaptidspace, Page page, IndexTuple newtup); /* * prototypes for functions in nbtvalidate.c diff --git a/src/include/access/nbtxlog.h b/src/include/access/nbtxlog.h index a605851c98..a4cbdff283 100644 --- a/src/include/access/nbtxlog.h +++ b/src/include/access/nbtxlog.h @@ -28,8 +28,7 @@ #define XLOG_BTREE_INSERT_META 0x20 /* same, plus update metapage */ #define XLOG_BTREE_SPLIT_L 0x30 /* add index tuple with split */ #define XLOG_BTREE_SPLIT_R 0x40 /* as above, new item on right */ -#define XLOG_BTREE_SPLIT_L_HIGHKEY 0x50 /* as above, include truncated highkey */ -#define XLOG_BTREE_SPLIT_R_HIGHKEY 0x60 /* as above, include truncated highkey */ +/* 0x50 and 0x60 are unused */ #define XLOG_BTREE_DELETE 0x70 /* delete leaf index tuples for a page */ #define XLOG_BTREE_UNLINK_PAGE 0x80 /* delete a half-dead page */ #define XLOG_BTREE_UNLINK_PAGE_META 0x90 /* same, and update metapage */ @@ -47,6 +46,7 @@ */ typedef struct xl_btree_metadata { + uint32 version; BlockNumber root; uint32 level; BlockNumber fastroot; @@ -82,20 +82,16 @@ typedef struct xl_btree_insert * * Note: the four XLOG_BTREE_SPLIT xl_info codes all use this data record. * The _L and _R variants indicate whether the inserted tuple went into the - * left or right split page (and thus, whether newitemoff and the new item - * are stored or not). The _HIGHKEY variants indicate that we've logged - * explicitly left page high key value, otherwise redo should use right page - * leftmost key as a left page high key. _HIGHKEY is specified for internal - * pages where right page leftmost key is suppressed, and for leaf pages - * of covering indexes where high key have non-key attributes truncated. + * left or right split page (and thus, whether newitemoff and the new item are + * stored or not). We always explicitly log the left page high key value. * * Backup Blk 0: original page / new left page * * The left page's data portion contains the new item, if it's the _L variant. - * (In the _R variants, the new item is one of the right page's tuples.) - * If level > 0, an IndexTuple representing the HIKEY of the left page - * follows. We don't need this on leaf pages, because it's the same as the - * leftmost key in the new right page. + * In the _R variants, the new item is one of the right page's tuples. An + * IndexTuple representing the HIKEY of the left page follows. We don't need + * this on leaf pages, because it's the same as the leftmost key in the new + * right page. * * Backup Blk 1: new right page * diff --git a/src/test/regress/expected/dependency.out b/src/test/regress/expected/dependency.out index 8e50f8ffbb..8d31110b87 100644 --- a/src/test/regress/expected/dependency.out +++ b/src/test/regress/expected/dependency.out @@ -128,9 +128,9 @@ FROM pg_type JOIN pg_class c ON typrelid = c.oid WHERE typname = 'deptest_t'; -- doesn't work: grant still exists DROP USER regress_dep_user1; ERROR: role "regress_dep_user1" cannot be dropped because some objects depend on it -DETAIL: owner of default privileges on new relations belonging to role regress_dep_user1 in schema deptest +DETAIL: privileges for table deptest1 privileges for database regression -privileges for table deptest1 +owner of default privileges on new relations belonging to role regress_dep_user1 in schema deptest DROP OWNED BY regress_dep_user1; DROP USER regress_dep_user1; \set VERBOSITY terse diff --git a/src/test/regress/expected/event_trigger.out b/src/test/regress/expected/event_trigger.out index 0e32d5c427..ac41419c7b 100644 --- a/src/test/regress/expected/event_trigger.out +++ b/src/test/regress/expected/event_trigger.out @@ -187,9 +187,9 @@ ERROR: event trigger "regress_event_trigger" does not exist -- should fail, regress_evt_user owns some objects drop role regress_evt_user; ERROR: role "regress_evt_user" cannot be dropped because some objects depend on it -DETAIL: owner of event trigger regress_event_trigger3 +DETAIL: owner of user mapping for regress_evt_user on server useless_server owner of default privileges on new relations belonging to role regress_evt_user -owner of user mapping for regress_evt_user on server useless_server +owner of event trigger regress_event_trigger3 -- cleanup before next test -- these are all OK; the second one should emit a NOTICE drop event trigger if exists regress_event_trigger2; diff --git a/src/test/regress/expected/foreign_data.out b/src/test/regress/expected/foreign_data.out index 4d82d3a7e8..9c763ec184 100644 --- a/src/test/regress/expected/foreign_data.out +++ b/src/test/regress/expected/foreign_data.out @@ -441,8 +441,8 @@ ALTER SERVER s1 OWNER TO regress_test_indirect; RESET ROLE; DROP ROLE regress_test_indirect; -- ERROR ERROR: role "regress_test_indirect" cannot be dropped because some objects depend on it -DETAIL: owner of server s1 -privileges for foreign-data wrapper foo +DETAIL: privileges for foreign-data wrapper foo +owner of server s1 \des+ List of foreign servers Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description @@ -1998,9 +1998,9 @@ DROP TABLE temp_parted; DROP SCHEMA foreign_schema CASCADE; DROP ROLE regress_test_role; -- ERROR ERROR: role "regress_test_role" cannot be dropped because some objects depend on it -DETAIL: privileges for server s4 +DETAIL: owner of user mapping for regress_test_role on server s6 privileges for foreign-data wrapper foo -owner of user mapping for regress_test_role on server s6 +privileges for server s4 DROP SERVER t1 CASCADE; NOTICE: drop cascades to user mapping for public on server t1 DROP USER MAPPING FOR regress_test_role SERVER s6; diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out index 1d12b01068..06fe44d39a 100644 --- a/src/test/regress/expected/rowsecurity.out +++ b/src/test/regress/expected/rowsecurity.out @@ -3502,8 +3502,8 @@ SELECT refclassid::regclass, deptype SAVEPOINT q; DROP ROLE regress_rls_eve; --fails due to dependency on POLICY p ERROR: role "regress_rls_eve" cannot be dropped because some objects depend on it -DETAIL: target of policy p on table tbl1 -privileges for table tbl1 +DETAIL: privileges for table tbl1 +target of policy p on table tbl1 ROLLBACK TO q; ALTER POLICY p ON tbl1 TO regress_rls_frank USING (true); SAVEPOINT q; diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index 9fe950b29d..08cf72d670 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -167,6 +167,8 @@ BTArrayKeyInfo BTBuildState BTCycleId BTIndexStat +BTInsertionKey +BTInsertionKeyData BTLeader BTMetaPageData BTOneVacInfo @@ -2207,6 +2209,8 @@ SpecialJoinInfo SpinDelayStatus SplitInterval SplitLR +SplitMode +SplitPoint SplitVar SplitedPageLayout StackElem -- 2.17.1