From b51be901d7ffe0124b74eefc27531e99ec65feb4 Mon Sep 17 00:00:00 2001 From: Bharath Rupireddy Date: Mon, 17 May 2021 12:07:35 +0530 Subject: [PATCH v1] Add TDE nonce bytes to page pd_special structure Add TDE nonce bytes of size tde_nonce_size to page pd_special structure while initializing the page in PageInit. For index pages, MAXALIGN the nonce bytes with the existing structure i.e. MAXALIGN(index_structure + nonce bytes). For heap pages, just MAXALIGN(nonce bytes) and pd_special pointer points to nonce bytes. The nonce value in the page will be store at the end of the page. Since the nonce size is a run-time (initdb) option, some of the macros were parameterized. --- contrib/amcheck/verify_heapam.c | 12 +++-- contrib/amcheck/verify_nbtree.c | 8 +-- contrib/bloom/bloom.h | 26 ++++++---- contrib/bloom/blutils.c | 11 ++-- contrib/bloom/blvacuum.c | 4 +- contrib/hstore/hstore_gist.c | 3 +- contrib/intarray/_int.h | 5 +- contrib/ltree/ltree.h | 4 +- contrib/pageinspect/brinfuncs.c | 2 +- contrib/pageinspect/fsmfuncs.c | 2 +- contrib/pageinspect/ginfuncs.c | 6 ++- contrib/pageinspect/hashfuncs.c | 3 +- contrib/pg_surgery/heap_surgery.c | 4 +- contrib/pg_trgm/trgm.h | 3 +- contrib/pgstattuple/pgstatapprox.c | 3 +- contrib/pgstattuple/pgstatindex.c | 2 +- contrib/pgstattuple/pgstattuple.c | 3 +- src/backend/access/brin/brin_bloom.c | 14 ++--- src/backend/access/brin/brin_minmax_multi.c | 14 ++--- src/backend/access/brin/brin_pageops.c | 19 ++++--- src/backend/access/brin/brin_revmap.c | 22 ++++---- src/backend/access/brin/brin_tuple.c | 3 +- src/backend/access/common/indextuple.c | 3 +- src/backend/access/common/reloptions.c | 10 +++- src/backend/access/common/toast_internals.c | 4 +- src/backend/access/gin/gindatapage.c | 41 +++++++-------- src/backend/access/gin/ginentrypage.c | 4 +- src/backend/access/gin/ginfast.c | 13 +++-- src/backend/access/gin/gininsert.c | 7 ++- src/backend/access/gin/ginvacuum.c | 4 +- src/backend/access/gin/ginxlog.c | 6 +-- src/backend/access/gist/gist.c | 4 +- src/backend/access/gist/gistbuild.c | 2 + src/backend/access/gist/gistget.c | 8 +-- src/backend/access/gist/gistutil.c | 5 +- src/backend/access/hash/hash.c | 4 +- src/backend/access/hash/hashinsert.c | 4 +- src/backend/access/hash/hashovfl.c | 6 +-- src/backend/access/hash/hashpage.c | 6 +-- src/backend/access/hash/hashsearch.c | 10 ++-- src/backend/access/hash/hashutil.c | 4 +- src/backend/access/heap/heapam.c | 19 +++---- src/backend/access/heap/heapam_handler.c | 16 +++--- src/backend/access/heap/heaptoast.c | 27 ++++++---- src/backend/access/heap/hio.c | 8 +-- src/backend/access/heap/pruneheap.c | 18 +++---- src/backend/access/heap/rewriteheap.c | 7 +-- src/backend/access/heap/vacuumlazy.c | 19 +++---- src/backend/access/heap/visibilitymap.c | 42 ++++++++------- src/backend/access/nbtree/nbtdedup.c | 4 +- src/backend/access/nbtree/nbtinsert.c | 4 +- src/backend/access/nbtree/nbtpage.c | 11 ++-- src/backend/access/nbtree/nbtree.c | 4 +- src/backend/access/nbtree/nbtsort.c | 5 +- src/backend/access/nbtree/nbtsplitloc.c | 2 +- src/backend/access/nbtree/nbtutils.c | 8 +-- src/backend/access/nbtree/nbtxlog.c | 6 +-- src/backend/access/spgist/spgdoinsert.c | 28 +++++----- src/backend/access/spgist/spgscan.c | 2 +- src/backend/access/spgist/spgutils.c | 8 +-- src/backend/access/spgist/spgvacuum.c | 22 ++++---- src/backend/access/transam/xlog.c | 6 +-- src/backend/nodes/tidbitmap.c | 23 +++++---- .../replication/logical/reorderbuffer.c | 2 +- src/backend/storage/freespace/freespace.c | 51 +++++++++++-------- src/backend/storage/freespace/fsmpage.c | 21 ++++---- src/backend/storage/page/bufpage.c | 25 ++++++--- src/backend/utils/adt/selfuncs.c | 2 +- src/backend/utils/adt/tsgistidx.c | 4 +- src/bin/pg_resetwal/pg_resetwal.c | 4 +- src/include/access/brin_page.h | 8 +-- src/include/access/ginblock.h | 22 ++++---- src/include/access/gist.h | 9 ++-- src/include/access/gist_private.h | 6 ++- src/include/access/hash.h | 11 ++-- src/include/access/heapam.h | 2 +- src/include/access/heaptoast.h | 39 +++++++++++--- src/include/access/htup_details.h | 12 ++++- src/include/access/itup.h | 5 +- src/include/access/nbtree.h | 10 ++-- src/include/access/spgist_private.h | 14 ++--- src/include/storage/fsm_internals.h | 7 +-- .../test_ginpostinglist/test_ginpostinglist.c | 6 +-- 83 files changed, 505 insertions(+), 362 deletions(-) diff --git a/contrib/amcheck/verify_heapam.c b/contrib/amcheck/verify_heapam.c index d8b3fd3d4f..94f5a66012 100644 --- a/contrib/amcheck/verify_heapam.c +++ b/contrib/amcheck/verify_heapam.c @@ -1179,12 +1179,14 @@ check_toast_tuple(HeapTuple toasttup, HeapCheckContext *ctx, uint32 extsize) { int32 chunk_seq; - int32 last_chunk_seq = (extsize - 1) / TOAST_MAX_CHUNK_SIZE; + int32 last_chunk_seq; Pointer chunk; bool isnull; int32 chunksize; int32 expected_size; + last_chunk_seq = (extsize - 1) / TOAST_MAX_CHUNK_SIZE(tde_nonce_size); + /* Sanity-check the sequence number. */ chunk_seq = DatumGetInt32(fastgetattr(toasttup, 2, ctx->toast_rel->rd_att, &isnull)); @@ -1249,8 +1251,10 @@ check_toast_tuple(HeapTuple toasttup, HeapCheckContext *ctx, return; } - expected_size = chunk_seq < last_chunk_seq ? TOAST_MAX_CHUNK_SIZE - : extsize - (last_chunk_seq * TOAST_MAX_CHUNK_SIZE); + if (chunk_seq < last_chunk_seq) + expected_size = TOAST_MAX_CHUNK_SIZE(tde_nonce_size); + else + expected_size = extsize - (last_chunk_seq * TOAST_MAX_CHUNK_SIZE(tde_nonce_size)); if (chunksize != expected_size) report_toast_corruption(ctx, ta, @@ -1457,7 +1461,7 @@ check_toasted_attribute(HeapCheckContext *ctx, ToastedAttribute *ta) int32 last_chunk_seq; extsize = VARATT_EXTERNAL_GET_EXTSIZE(ta->toast_pointer); - last_chunk_seq = (extsize - 1) / TOAST_MAX_CHUNK_SIZE; + last_chunk_seq = (extsize - 1) / TOAST_MAX_CHUNK_SIZE(tde_nonce_size); /* * Setup a scan key to find chunks in toast table with matching va_valueid diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c index fdfc320e84..fc2b28832b 100644 --- a/contrib/amcheck/verify_nbtree.c +++ b/contrib/amcheck/verify_nbtree.c @@ -1257,8 +1257,8 @@ bt_target_page_check(BtreeCheckState *state) */ lowersizelimit = skey->heapkeyspace && (P_ISLEAF(topaque) || BTreeTupleGetHeapTID(itup) == NULL); - if (tupsize > (lowersizelimit ? BTMaxItemSize(state->target) : - BTMaxItemSizeNoHeapTid(state->target))) + if (tupsize > (lowersizelimit ? BTMaxItemSize(state->target, tde_nonce_size) : + BTMaxItemSizeNoHeapTid(state->target, tde_nonce_size))) { ItemPointer tid = BTreeTupleGetPointsToTID(itup); char *itid, @@ -3031,12 +3031,12 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum) * to move left, in the case of backward index scans). */ maxoffset = PageGetMaxOffsetNumber(page); - if (maxoffset > MaxIndexTuplesPerPage) + if (maxoffset > MaxIndexTuplesPerPage(tde_nonce_size)) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("Number of items on block %u of index \"%s\" exceeds MaxIndexTuplesPerPage (%u)", blocknum, RelationGetRelationName(state->rel), - MaxIndexTuplesPerPage))); + MaxIndexTuplesPerPage(tde_nonce_size)))); if (!P_ISLEAF(opaque) && !P_ISDELETED(opaque) && maxoffset < P_FIRSTDATAKEY(opaque)) ereport(ERROR, diff --git a/contrib/bloom/bloom.h b/contrib/bloom/bloom.h index a22a6dfa40..fa4cdf01c4 100644 --- a/contrib/bloom/bloom.h +++ b/contrib/bloom/bloom.h @@ -97,6 +97,18 @@ typedef uint16 BloomSignatureWord; #define DEFAULT_BLOOM_BITS 2 #define MAX_BLOOM_BITS (MAX_BLOOM_LENGTH - 1) +#define FREE_BLK_NUM_ARRAY_MAX_SIZE MAXALIGN_DOWN(BLCKSZ - SizeOfPageHeaderData - \ + MAXALIGN(sizeof(BloomPageOpaqueData)) - \ + MAXALIGN(sizeof(uint16) * 2 + \ + sizeof(uint32) + sizeof(BloomOptions))) \ + / sizeof(BlockNumber) + +#define FREE_BLK_NUM_ARRAY_SIZE(tdeNonceSize) MAXALIGN_DOWN(BLCKSZ - SizeOfPageHeaderData - \ + MAXALIGN(sizeof(BloomPageOpaqueData) + \ + tdeNonceSize) - \ + MAXALIGN(sizeof(uint16) * 2 + \ + sizeof(uint32) + sizeof(BloomOptions))) \ + / sizeof(BlockNumber) /* Bloom index options */ typedef struct BloomOptions { @@ -110,12 +122,7 @@ typedef struct BloomOptions * FreeBlockNumberArray - array of block numbers sized so that metadata fill * all space in metapage. */ -typedef BlockNumber FreeBlockNumberArray[ - MAXALIGN_DOWN( - BLCKSZ - SizeOfPageHeaderData - MAXALIGN(sizeof(BloomPageOpaqueData)) - - MAXALIGN(sizeof(uint16) * 2 + sizeof(uint32) + sizeof(BloomOptions)) - ) / sizeof(BlockNumber) -]; +typedef BlockNumber FreeBlockNumberArray[FREE_BLK_NUM_ARRAY_MAX_SIZE]; /* Metadata of bloom index */ typedef struct BloomMetaPageData @@ -131,7 +138,8 @@ typedef struct BloomMetaPageData #define BLOOM_MAGICK_NUMBER (0xDBAC0DED) /* Number of blocks numbers fit in BloomMetaPageData */ -#define BloomMetaBlockN (sizeof(FreeBlockNumberArray) / sizeof(BlockNumber)) +#define BloomMetaBlockN(tdeNonceSize) ((sizeof(BlockNumber) * FREE_BLK_NUM_ARRAY_SIZE(tdeNonceSize)) \ + / sizeof(BlockNumber)) #define BloomPageGetMeta(page) ((BloomMetaPageData *) PageGetContents(page)) @@ -149,10 +157,10 @@ typedef struct BloomState Size sizeOfBloomTuple; } BloomState; -#define BloomPageGetFreeSpace(state, page) \ +#define BloomPageGetFreeSpace(state, page, tdeNonceSize) \ (BLCKSZ - MAXALIGN(SizeOfPageHeaderData) \ - BloomPageGetMaxOffset(page) * (state)->sizeOfBloomTuple \ - - MAXALIGN(sizeof(BloomPageOpaqueData))) + - MAXALIGN(sizeof(BloomPageOpaqueData) + tdeNonceSize)) /* * Tuples are very different from all other relations diff --git a/contrib/bloom/blutils.c b/contrib/bloom/blutils.c index 754de008d4..b79bf90cbf 100644 --- a/contrib/bloom/blutils.c +++ b/contrib/bloom/blutils.c @@ -325,7 +325,7 @@ BloomPageAddItem(BloomState *state, Page page, BloomTuple *tuple) Assert(!PageIsNew(page) && !BloomPageIsDeleted(page)); /* Does new tuple fit on the page? */ - if (BloomPageGetFreeSpace(state, page) < state->sizeOfBloomTuple) + if (BloomPageGetFreeSpace(state, page, tde_nonce_size) < state->sizeOfBloomTuple) return false; /* Copy new tuple to the end of page */ @@ -423,6 +423,11 @@ BloomFillMetapage(Relation index, Page metaPage) { BloomOptions *opts; BloomMetaPageData *metadata; + Size sz; + + /* Size of the actual BloomMetaPageData */ + sz = sizeof(BloomMetaPageData) - sizeof(FreeBlockNumberArray) + + sizeof(BlockNumber) * FREE_BLK_NUM_ARRAY_SIZE(tde_nonce_size); /* * Choose the index's options. If reloptions have been assigned, use @@ -438,10 +443,10 @@ BloomFillMetapage(Relation index, Page metaPage) */ BloomInitPage(metaPage, BLOOM_META); metadata = BloomPageGetMeta(metaPage); - memset(metadata, 0, sizeof(BloomMetaPageData)); + memset(metadata, 0, sz); metadata->magickNumber = BLOOM_MAGICK_NUMBER; metadata->opts = *opts; - ((PageHeader) metaPage)->pd_lower += sizeof(BloomMetaPageData); + ((PageHeader) metaPage)->pd_lower += sz; /* If this fails, probably FreeBlockNumberArray size calc is wrong: */ Assert(((PageHeader) metaPage)->pd_lower <= ((PageHeader) metaPage)->pd_upper); diff --git a/contrib/bloom/blvacuum.c b/contrib/bloom/blvacuum.c index 88b0a6d290..6621128c2d 100644 --- a/contrib/bloom/blvacuum.c +++ b/contrib/bloom/blvacuum.c @@ -115,8 +115,8 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, * deleted and there is free space on it */ if (BloomPageGetMaxOffset(page) != 0 && - BloomPageGetFreeSpace(&state, page) >= state.sizeOfBloomTuple && - countPage < BloomMetaBlockN) + BloomPageGetFreeSpace(&state, page, tde_nonce_size) >= state.sizeOfBloomTuple && + countPage < BloomMetaBlockN(tde_nonce_size)) notFullPage[countPage++] = blkno; /* Did we delete something? */ diff --git a/contrib/hstore/hstore_gist.c b/contrib/hstore/hstore_gist.c index 102c9cea72..52c1f1f62f 100644 --- a/contrib/hstore/hstore_gist.c +++ b/contrib/hstore/hstore_gist.c @@ -8,6 +8,7 @@ #include "access/stratnum.h" #include "catalog/pg_type.h" #include "hstore.h" +#include "miscadmin.h" /* for tde_nonce_size */ #include "utils/pg_crc.h" /* gist_hstore_ops opclass options */ @@ -20,7 +21,7 @@ typedef struct /* bigint defines */ #define BITBYTE 8 #define SIGLEN_DEFAULT (sizeof(int32) * 4) -#define SIGLEN_MAX GISTMaxIndexKeySize +#define SIGLEN_MAX GISTMaxIndexKeySize(tde_nonce_size) #define SIGLENBIT(siglen) ((siglen) * BITBYTE) #define GET_SIGLEN() (PG_HAS_OPCLASS_OPTIONS() ? \ ((GistHstoreOptions *) PG_GET_OPCLASS_OPTIONS())->siglen : \ diff --git a/contrib/intarray/_int.h b/contrib/intarray/_int.h index 304c29525c..63089bf100 100644 --- a/contrib/intarray/_int.h +++ b/contrib/intarray/_int.h @@ -4,12 +4,13 @@ #ifndef ___INT_H__ #define ___INT_H__ +#include "miscadmin.h" /* for tde_nonce_size */ #include "utils/array.h" #include "utils/memutils.h" /* number ranges for compression */ #define G_INT_NUMRANGES_DEFAULT 100 -#define G_INT_NUMRANGES_MAX ((GISTMaxIndexKeySize - VARHDRSZ) / \ +#define G_INT_NUMRANGES_MAX ((GISTMaxIndexKeySize(tde_nonce_size) - VARHDRSZ) / \ (2 * sizeof(int32))) #define G_INT_GET_NUMRANGES() (PG_HAS_OPCLASS_OPTIONS() ? \ ((GISTIntArrayOptions *) PG_GET_OPCLASS_OPTIONS())->num_ranges : \ @@ -60,7 +61,7 @@ typedef struct /* bigint defines */ #define SIGLEN_DEFAULT (63 * 4) -#define SIGLEN_MAX GISTMaxIndexKeySize +#define SIGLEN_MAX GISTMaxIndexKeySize(tde_nonce_size) #define SIGLENBIT(siglen) ((siglen) * BITS_PER_BYTE) #define GET_SIGLEN() (PG_HAS_OPCLASS_OPTIONS() ? \ ((GISTIntArrayBigOptions *) PG_GET_OPCLASS_OPTIONS())->siglen : \ diff --git a/contrib/ltree/ltree.h b/contrib/ltree/ltree.h index dc68a0c212..b1121a707e 100644 --- a/contrib/ltree/ltree.h +++ b/contrib/ltree/ltree.h @@ -216,7 +216,7 @@ int ltree_strncasecmp(const char *a, const char *b, size_t s); /* GiST support for ltree */ -#define SIGLEN_MAX GISTMaxIndexKeySize +#define SIGLEN_MAX GISTMaxIndexKeySize(tde_nonce_size) #define SIGLEN_DEFAULT (2 * sizeof(int32)) #define BITBYTE 8 #define SIGLEN (sizeof(int32) * SIGLENINT) @@ -277,7 +277,7 @@ extern ltree_gist *ltree_gist_alloc(bool isalltrue, BITVECP sign, int siglen, /* GiST support for ltree[] */ #define LTREE_ASIGLEN_DEFAULT (7 * sizeof(int32)) -#define LTREE_ASIGLEN_MAX GISTMaxIndexKeySize +#define LTREE_ASIGLEN_MAX GISTMaxIndexKeySize(tde_nonce_size) #define LTREE_GET_ASIGLEN() (PG_HAS_OPCLASS_OPTIONS() ? \ ((LtreeGistOptions *) PG_GET_OPCLASS_OPTIONS())->siglen : \ LTREE_ASIGLEN_DEFAULT) diff --git a/contrib/pageinspect/brinfuncs.c b/contrib/pageinspect/brinfuncs.c index 0e3c2deb66..4a212cfbbb 100644 --- a/contrib/pageinspect/brinfuncs.c +++ b/contrib/pageinspect/brinfuncs.c @@ -414,7 +414,7 @@ brin_revmap_data(PG_FUNCTION_ARGS) fctx = SRF_PERCALL_SETUP(); state = fctx->user_fctx; - if (state->idx < REVMAP_PAGE_MAXITEMS) + if (state->idx < REVMAP_PAGE_MAXITEMS(tde_nonce_size)) SRF_RETURN_NEXT(fctx, PointerGetDatum(&state->tids[state->idx++])); SRF_RETURN_DONE(fctx); diff --git a/contrib/pageinspect/fsmfuncs.c b/contrib/pageinspect/fsmfuncs.c index 930f1df339..37f0a9a75a 100644 --- a/contrib/pageinspect/fsmfuncs.c +++ b/contrib/pageinspect/fsmfuncs.c @@ -48,7 +48,7 @@ fsm_page_contents(PG_FUNCTION_ARGS) initStringInfo(&sinfo); - for (i = 0; i < NodesPerPage; i++) + for (i = 0; i < NodesPerPage(tde_nonce_size); i++) { if (fsmpage->fp_nodes[i] != 0) appendStringInfo(&sinfo, "%d: %d\n", i, fsmpage->fp_nodes[i]); diff --git a/contrib/pageinspect/ginfuncs.c b/contrib/pageinspect/ginfuncs.c index e425cbcdb8..451f67a3e8 100644 --- a/contrib/pageinspect/ginfuncs.c +++ b/contrib/pageinspect/ginfuncs.c @@ -184,13 +184,15 @@ gin_leafpage_items(PG_FUNCTION_ARGS) page = get_page_from_raw(raw_page); - if (PageGetSpecialSize(page) != MAXALIGN(sizeof(GinPageOpaqueData))) + if (PageGetSpecialSize(page) != + MAXALIGN(sizeof(GinPageOpaqueData) + tde_nonce_size)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("input page is not a valid GIN data leaf page"), errdetail("Special size %d, expected %d", (int) PageGetSpecialSize(page), - (int) MAXALIGN(sizeof(GinPageOpaqueData))))); + (int) MAXALIGN(sizeof(GinPageOpaqueData) + + tde_nonce_size)))); opaq = (GinPageOpaque) PageGetSpecialPointer(page); if (opaq->flags != (GIN_DATA | GIN_LEAF | GIN_COMPRESSED)) diff --git a/contrib/pageinspect/hashfuncs.c b/contrib/pageinspect/hashfuncs.c index ff01119474..25e3717a0a 100644 --- a/contrib/pageinspect/hashfuncs.c +++ b/contrib/pageinspect/hashfuncs.c @@ -64,7 +64,8 @@ verify_hash_page(bytea *raw_page, int flags) { HashPageOpaque pageopaque; - if (PageGetSpecialSize(page) != MAXALIGN(sizeof(HashPageOpaqueData))) + if (PageGetSpecialSize(page) != + MAXALIGN(sizeof(HashPageOpaqueData) + tde_nonce_size)) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index table contains corrupted page"))); diff --git a/contrib/pg_surgery/heap_surgery.c b/contrib/pg_surgery/heap_surgery.c index d31e5f31fd..e718100324 100644 --- a/contrib/pg_surgery/heap_surgery.c +++ b/contrib/pg_surgery/heap_surgery.c @@ -88,7 +88,7 @@ heap_force_common(FunctionCallInfo fcinfo, HeapTupleForceOption heap_force_opt) Relation rel; OffsetNumber curr_start_ptr, next_start_ptr; - bool include_this_tid[MaxHeapTuplesPerPage]; + bool include_this_tid[MaxHeapTuplesPerPageLimit]; if (RecoveryInProgress()) ereport(ERROR, @@ -206,7 +206,7 @@ heap_force_common(FunctionCallInfo fcinfo, HeapTupleForceOption heap_force_opt) } /* Mark it for processing. */ - Assert(offno < MaxHeapTuplesPerPage); + Assert(offno < MaxHeapTuplesPerPage(tde_nonce_size)); include_this_tid[offno] = true; } diff --git a/contrib/pg_trgm/trgm.h b/contrib/pg_trgm/trgm.h index 405a1d9552..a1c1c9fb84 100644 --- a/contrib/pg_trgm/trgm.h +++ b/contrib/pg_trgm/trgm.h @@ -7,6 +7,7 @@ #include "access/gist.h" #include "access/itup.h" #include "access/stratnum.h" +#include "miscadmin.h" /* for tde_nonce_size */ #include "storage/bufpage.h" /* @@ -75,7 +76,7 @@ typedef struct /* gist */ #define SIGLEN_DEFAULT (sizeof(int) * 3) -#define SIGLEN_MAX GISTMaxIndexKeySize +#define SIGLEN_MAX GISTMaxIndexKeySize(tde_nonce_size) #define BITBYTE 8 #define SIGLENBIT(siglen) ((siglen) * BITBYTE - 1) /* see makesign */ diff --git a/contrib/pgstattuple/pgstatapprox.c b/contrib/pgstattuple/pgstatapprox.c index 1fe193bb25..745fc25b8b 100644 --- a/contrib/pgstattuple/pgstatapprox.c +++ b/contrib/pgstattuple/pgstatapprox.c @@ -113,7 +113,8 @@ statapprox_heap(Relation rel, output_type *stat) if (!PageIsNew(page)) stat->free_space += PageGetHeapFreeSpace(page); else - stat->free_space += BLCKSZ - SizeOfPageHeaderData; + stat->free_space += BLCKSZ - SizeOfPageHeaderData - + MAXALIGN(tde_nonce_size); /* We may count the page as scanned even if it's new/empty */ scanned++; diff --git a/contrib/pgstattuple/pgstatindex.c b/contrib/pgstattuple/pgstatindex.c index 5368bb30f0..0405753ce7 100644 --- a/contrib/pgstattuple/pgstatindex.c +++ b/contrib/pgstattuple/pgstatindex.c @@ -649,7 +649,7 @@ pgstathashindex(PG_FUNCTION_ARGS) if (PageIsNew(page)) stats.unused_pages++; else if (PageGetSpecialSize(page) != - MAXALIGN(sizeof(HashPageOpaqueData))) + MAXALIGN(sizeof(HashPageOpaqueData) + tde_nonce_size)) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index \"%s\" contains corrupted page at block %u", diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c index 21fdeff8af..4acec7f6e3 100644 --- a/contrib/pgstattuple/pgstattuple.c +++ b/contrib/pgstattuple/pgstattuple.c @@ -460,7 +460,8 @@ pgstat_hash_page(pgstattuple_type *stat, Relation rel, BlockNumber blkno, buf = _hash_getbuf_with_strategy(rel, blkno, HASH_READ, 0, bstrategy); page = BufferGetPage(buf); - if (PageGetSpecialSize(page) == MAXALIGN(sizeof(HashPageOpaqueData))) + if (PageGetSpecialSize(page) == + MAXALIGN(sizeof(HashPageOpaqueData) + tde_nonce_size)) { HashPageOpaque opaque; diff --git a/src/backend/access/brin/brin_bloom.c b/src/backend/access/brin/brin_bloom.c index 99b2543f76..6d13416b50 100644 --- a/src/backend/access/brin/brin_bloom.c +++ b/src/backend/access/brin/brin_bloom.c @@ -125,6 +125,7 @@ #include "access/stratnum.h" #include "catalog/pg_type.h" #include "catalog/pg_amop.h" +#include "miscadmin.h" /* for tde_nonce_size */ #include "utils/builtins.h" #include "utils/datum.h" #include "utils/lsyscache.h" @@ -211,11 +212,10 @@ typedef struct BloomOptions * a perfect guarantee, for a couple of reasons. For example, the row may * be larger because the index has multiple columns. */ -#define BloomMaxFilterSize \ +#define BloomMaxFilterSize(tdeNonceSize) \ MAXALIGN_DOWN(BLCKSZ - \ - (MAXALIGN(SizeOfPageHeaderData + \ - sizeof(ItemIdData)) + \ - MAXALIGN(sizeof(BrinSpecialSpace)) + \ + (MAXALIGN(SizeOfPageHeaderData + sizeof(ItemIdData)) + \ + MAXALIGN(sizeof(BrinSpecialSpace) + tdeNonceSize) + \ SizeOfBrinTuple)) /* @@ -307,9 +307,9 @@ bloom_init(int ndistinct, double false_positive_rate) * XXX This check is not perfect, because the index may have multiple * filters that are small individually, but too large when combined. */ - if (nbytes > BloomMaxFilterSize) + if (nbytes > BloomMaxFilterSize(tde_nonce_size)) elog(ERROR, "the bloom filter is too large (%d > %zu)", nbytes, - BloomMaxFilterSize); + BloomMaxFilterSize(tde_nonce_size)); /* * round(log(2.0) * m / ndistinct), but assume round() may not be @@ -479,7 +479,7 @@ brin_bloom_get_ndistinct(BrinDesc *bdesc, BloomOptions *opts) Assert(BlockNumberIsValid(pagesPerRange)); - maxtuples = MaxHeapTuplesPerPage * pagesPerRange; + maxtuples = MaxHeapTuplesPerPage(tde_nonce_size) * pagesPerRange; /* * Similarly to n_distinct, negative values are relative - in this case to diff --git a/src/backend/access/brin/brin_minmax_multi.c b/src/backend/access/brin/brin_minmax_multi.c index bd14184d76..c17b0a6f83 100644 --- a/src/backend/access/brin/brin_minmax_multi.c +++ b/src/backend/access/brin/brin_minmax_multi.c @@ -1975,11 +1975,13 @@ brin_minmax_multi_distance_tid(PG_FUNCTION_ARGS) * We use the no-check variants here, because user-supplied values may * have (ip_posid == 0). See ItemPointerCompare. */ - da1 = ItemPointerGetBlockNumberNoCheck(pa1) * MaxHeapTuplesPerPage + - ItemPointerGetOffsetNumberNoCheck(pa1); + da1 = ItemPointerGetBlockNumberNoCheck(pa1) * + MaxHeapTuplesPerPage(tde_nonce_size) + + ItemPointerGetOffsetNumberNoCheck(pa1); - da2 = ItemPointerGetBlockNumberNoCheck(pa2) * MaxHeapTuplesPerPage + - ItemPointerGetOffsetNumberNoCheck(pa2); + da2 = ItemPointerGetBlockNumberNoCheck(pa2) * + MaxHeapTuplesPerPage(tde_nonce_size) + + ItemPointerGetOffsetNumberNoCheck(pa2); PG_RETURN_FLOAT8(da2 - da1); } @@ -2451,7 +2453,7 @@ brin_minmax_multi_add_value(PG_FUNCTION_ARGS) * much lower, but meh. */ maxvalues = Min(target_maxvalues * MINMAX_BUFFER_FACTOR, - MaxHeapTuplesPerPage * pagesPerRange); + MaxHeapTuplesPerPage(tde_nonce_size) * pagesPerRange); /* but always at least the original value */ maxvalues = Max(maxvalues, target_maxvalues); @@ -2497,7 +2499,7 @@ brin_minmax_multi_add_value(PG_FUNCTION_ARGS) * much lower, but meh. */ maxvalues = Min(serialized->maxvalues * MINMAX_BUFFER_FACTOR, - MaxHeapTuplesPerPage * pagesPerRange); + MaxHeapTuplesPerPage(tde_nonce_size) * pagesPerRange); /* but always at least the original value */ maxvalues = Max(maxvalues, serialized->maxvalues); diff --git a/src/backend/access/brin/brin_pageops.c b/src/backend/access/brin/brin_pageops.c index df9ffc2fb8..ac5d26f754 100644 --- a/src/backend/access/brin/brin_pageops.c +++ b/src/backend/access/brin/brin_pageops.c @@ -26,11 +26,10 @@ * Maximum size of an entry in a BRIN_PAGETYPE_REGULAR page. We can tolerate * a single item per page, unlike other index AMs. */ -#define BrinMaxItemSize \ +#define BrinMaxItemSize(tdeNonceSize) \ MAXALIGN_DOWN(BLCKSZ - \ - (MAXALIGN(SizeOfPageHeaderData + \ - sizeof(ItemIdData)) + \ - MAXALIGN(sizeof(BrinSpecialSpace)))) + (MAXALIGN(SizeOfPageHeaderData + sizeof(ItemIdData)) + \ + MAXALIGN(sizeof(BrinSpecialSpace) + tdeNonceSize))) static Buffer brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz, bool *extended); @@ -69,12 +68,12 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, Assert(newsz == MAXALIGN(newsz)); /* If the item is oversized, don't bother. */ - if (newsz > BrinMaxItemSize) + if (newsz > BrinMaxItemSize(tde_nonce_size)) { ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("index row size %zu exceeds maximum %zu for index \"%s\"", - newsz, BrinMaxItemSize, RelationGetRelationName(idxrel)))); + newsz, BrinMaxItemSize(tde_nonce_size), RelationGetRelationName(idxrel)))); return false; /* keep compiler quiet */ } @@ -355,12 +354,12 @@ brin_doinsert(Relation idxrel, BlockNumber pagesPerRange, Assert(itemsz == MAXALIGN(itemsz)); /* If the item is oversized, don't even bother. */ - if (itemsz > BrinMaxItemSize) + if (itemsz > BrinMaxItemSize(tde_nonce_size)) { ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("index row size %zu exceeds maximum %zu for index \"%s\"", - itemsz, BrinMaxItemSize, RelationGetRelationName(idxrel)))); + itemsz, BrinMaxItemSize(tde_nonce_size), RelationGetRelationName(idxrel)))); return InvalidOffsetNumber; /* keep compiler quiet */ } @@ -692,7 +691,7 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz, Size freespace; /* callers must have checked */ - Assert(itemsz <= BrinMaxItemSize); + Assert(itemsz <= BrinMaxItemSize(tde_nonce_size)); if (BufferIsValid(oldbuf)) oldblk = BufferGetBlockNumber(oldbuf); @@ -805,7 +804,7 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz, * page that has since been repurposed for the revmap.) */ freespace = *extended ? - BrinMaxItemSize : br_page_get_freespace(page); + BrinMaxItemSize(tde_nonce_size) : br_page_get_freespace(page); if (freespace >= itemsz) { RelationSetTargetBlock(irel, newblk); diff --git a/src/backend/access/brin/brin_revmap.c b/src/backend/access/brin/brin_revmap.c index c574c8a06e..32423b3ea2 100644 --- a/src/backend/access/brin/brin_revmap.c +++ b/src/backend/access/brin/brin_revmap.c @@ -38,10 +38,10 @@ * find the logical revmap page number and index number of the revmap item for * the given heap block number. */ -#define HEAPBLK_TO_REVMAP_BLK(pagesPerRange, heapBlk) \ - ((heapBlk / pagesPerRange) / REVMAP_PAGE_MAXITEMS) -#define HEAPBLK_TO_REVMAP_INDEX(pagesPerRange, heapBlk) \ - ((heapBlk / pagesPerRange) % REVMAP_PAGE_MAXITEMS) +#define HEAPBLK_TO_REVMAP_BLK(pagesPerRange, heapBlk, tdeNonceSize) \ + ((heapBlk / pagesPerRange) / REVMAP_PAGE_MAXITEMS(tdeNonceSize)) +#define HEAPBLK_TO_REVMAP_INDEX(pagesPerRange, heapBlk, tdeNonceSize) \ + ((heapBlk / pagesPerRange) % REVMAP_PAGE_MAXITEMS(tdeNonceSize)) struct BrinRevmap @@ -166,7 +166,7 @@ brinSetHeapBlockItemptr(Buffer buf, BlockNumber pagesPerRange, page = BufferGetPage(buf); contents = (RevmapContents *) PageGetContents(page); iptr = (ItemPointerData *) contents->rm_tids; - iptr += HEAPBLK_TO_REVMAP_INDEX(pagesPerRange, heapBlk); + iptr += HEAPBLK_TO_REVMAP_INDEX(pagesPerRange, heapBlk, tde_nonce_size); if (ItemPointerIsValid(&tid)) ItemPointerSet(iptr, @@ -243,7 +243,8 @@ brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk, contents = (RevmapContents *) PageGetContents(BufferGetPage(revmap->rm_currBuf)); iptr = contents->rm_tids; - iptr += HEAPBLK_TO_REVMAP_INDEX(revmap->rm_pagesPerRange, heapBlk); + iptr += HEAPBLK_TO_REVMAP_INDEX(revmap->rm_pagesPerRange, heapBlk, + tde_nonce_size); if (!ItemPointerIsValid(iptr)) { @@ -354,7 +355,8 @@ brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk) /* Lock the revmap page, obtain the index tuple pointer from it */ revmapBuf = brinLockRevmapPageForUpdate(revmap, heapBlk); revmapPg = BufferGetPage(revmapBuf); - revmapOffset = HEAPBLK_TO_REVMAP_INDEX(revmap->rm_pagesPerRange, heapBlk); + revmapOffset = HEAPBLK_TO_REVMAP_INDEX(revmap->rm_pagesPerRange, heapBlk, + tde_nonce_size); contents = (RevmapContents *) PageGetContents(revmapPg); iptr = contents->rm_tids; @@ -454,7 +456,8 @@ revmap_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk) BlockNumber targetblk; /* obtain revmap block number, skip 1 for metapage block */ - targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk) + 1; + targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk, + tde_nonce_size) + 1; /* Normal case: the revmap page is already allocated */ if (targetblk <= revmap->rm_lastRevmapPage) @@ -512,7 +515,8 @@ revmap_extend_and_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk) BlockNumber targetblk; /* obtain revmap block number, skip 1 for metapage block */ - targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk) + 1; + targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk, + tde_nonce_size) + 1; /* Extend the revmap, if necessary */ while (targetblk > revmap->rm_lastRevmapPage) diff --git a/src/backend/access/brin/brin_tuple.c b/src/backend/access/brin/brin_tuple.c index ee05372f79..74c366dc9a 100644 --- a/src/backend/access/brin/brin_tuple.c +++ b/src/backend/access/brin/brin_tuple.c @@ -217,7 +217,8 @@ brin_form_tuple(BrinDesc *brdesc, BlockNumber blkno, BrinMemTuple *tuple, * datatype, try to compress it in-line. */ if (!VARATT_IS_EXTENDED(DatumGetPointer(value)) && - VARSIZE(DatumGetPointer(value)) > TOAST_INDEX_TARGET && + VARSIZE(DatumGetPointer(value)) > + TOAST_INDEX_TARGET(tde_nonce_size) && (atttype->typstorage == TYPSTORAGE_EXTENDED || atttype->typstorage == TYPSTORAGE_MAIN)) { diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c index 5212560411..1939647f6f 100644 --- a/src/backend/access/common/indextuple.c +++ b/src/backend/access/common/indextuple.c @@ -99,7 +99,8 @@ index_form_tuple(TupleDesc tupleDescriptor, * try to compress it in-line. */ if (!VARATT_IS_EXTENDED(DatumGetPointer(untoasted_values[i])) && - VARSIZE(DatumGetPointer(untoasted_values[i])) > TOAST_INDEX_TARGET && + VARSIZE(DatumGetPointer(untoasted_values[i])) > + TOAST_INDEX_TARGET(tde_nonce_size) && (att->attstorage == TYPSTORAGE_EXTENDED || att->attstorage == TYPSTORAGE_MAIN)) { diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c index 5554275e64..45e66ad378 100644 --- a/src/backend/access/common/reloptions.c +++ b/src/backend/access/common/reloptions.c @@ -328,7 +328,7 @@ static relopt_int intRelOpts[] = RELOPT_KIND_HEAP, ShareUpdateExclusiveLock }, - TOAST_TUPLE_TARGET, 128, TOAST_TUPLE_TARGET_MAIN + TOAST_TUPLE_TARGET_MAX_LIMIT, 128, TOAST_TUPLE_TARGET_MAIN_MAX_LIMIT }, { { @@ -569,6 +569,14 @@ initialize_reloptions(void) { Assert(DoLockModesConflict(intRelOpts[i].gen.lockmode, intRelOpts[i].gen.lockmode)); + + /* Set the actual run time values */ + if (strcmp(intRelOpts[i].gen.name, "toast_tuple_target") == 0) + { + intRelOpts[i].default_val = TOAST_TUPLE_TARGET(tde_nonce_size); + intRelOpts[i].max = TOAST_TUPLE_TARGET_MAIN(tde_nonce_size); + } + j++; } for (i = 0; realRelOpts[i].gen.name; i++) diff --git a/src/backend/access/common/toast_internals.c b/src/backend/access/common/toast_internals.c index c036319a0b..5a1357a4a3 100644 --- a/src/backend/access/common/toast_internals.c +++ b/src/backend/access/common/toast_internals.c @@ -131,7 +131,7 @@ toast_save_datum(Relation rel, Datum value, { struct varlena hdr; /* this is to make the union big enough for a chunk: */ - char data[TOAST_MAX_CHUNK_SIZE + VARHDRSZ]; + char data[TOAST_MAX_CHUNK_SIZE_LIMIT + VARHDRSZ]; /* ensure union is aligned well enough: */ int32 align_it; } chunk_data; @@ -309,7 +309,7 @@ toast_save_datum(Relation rel, Datum value, /* * Calculate the size of this chunk */ - chunk_size = Min(TOAST_MAX_CHUNK_SIZE, data_todo); + chunk_size = Min(TOAST_MAX_CHUNK_SIZE(tde_nonce_size), data_todo); /* * Build a tuple and store it diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c index 06c0586543..0471d18299 100644 --- a/src/backend/access/gin/gindatapage.c +++ b/src/backend/access/gin/gindatapage.c @@ -407,7 +407,7 @@ GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset) * "standard" page layout, so that we can squeeze out the unused space * from full-page images. */ - GinDataPageSetDataSize(page, maxoff * sizeof(PostingItem)); + GinDataPageSetDataSize(page, maxoff * sizeof(PostingItem), tde_nonce_size); } /* @@ -429,7 +429,7 @@ GinPageDeletePostingItem(Page page, OffsetNumber offset) maxoff--; GinPageGetOpaque(page)->maxoff = maxoff; - GinDataPageSetDataSize(page, maxoff * sizeof(PostingItem)); + GinDataPageSetDataSize(page, maxoff * sizeof(PostingItem), tde_nonce_size); } /* @@ -535,7 +535,8 @@ dataBeginPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, * a single byte, and we can use all the free space on the old page as * well as the new page. For simplicity, ignore segment overhead etc. */ - maxitems = Min(maxitems, freespace + GinDataPageMaxDataSize); + maxitems = Min(maxitems, freespace + + GinDataPageMaxDataSize(tde_nonce_size)); } else { @@ -550,7 +551,7 @@ dataBeginPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, int nnewsegments; nnewsegments = freespace / GinPostingListSegmentMaxSize; - nnewsegments += GinDataPageMaxDataSize / GinPostingListSegmentMaxSize; + nnewsegments += GinDataPageMaxDataSize(tde_nonce_size) / GinPostingListSegmentMaxSize; maxitems = Min(maxitems, nnewsegments * MinTuplesPerSegment); } @@ -665,8 +666,8 @@ dataBeginPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, leaf->lastleft = dlist_prev_node(&leaf->segments, leaf->lastleft); } } - Assert(leaf->lsize <= GinDataPageMaxDataSize); - Assert(leaf->rsize <= GinDataPageMaxDataSize); + Assert(leaf->lsize <= GinDataPageMaxDataSize(tde_nonce_size)); + Assert(leaf->rsize <= GinDataPageMaxDataSize(tde_nonce_size)); /* * Fetch the max item in the left page's last segment; it becomes the @@ -755,7 +756,7 @@ ginVacuumPostingTreeLeaf(Relation indexrel, Buffer buffer, GinVacuumState *gvs) if (seginfo->seg) oldsegsize = SizeOfGinPostingList(seginfo->seg); else - oldsegsize = GinDataPageMaxDataSize; + oldsegsize = GinDataPageMaxDataSize(tde_nonce_size); cleaned = ginVacuumItemPointers(gvs, seginfo->items, @@ -1015,8 +1016,8 @@ dataPlaceToPageLeafRecompress(Buffer buf, disassembledLeaf *leaf) } } - Assert(newsize <= GinDataPageMaxDataSize); - GinDataPageSetDataSize(page, newsize); + Assert(newsize <= GinDataPageMaxDataSize(tde_nonce_size)); + GinDataPageSetDataSize(page, newsize, tde_nonce_size); } /* @@ -1068,7 +1069,7 @@ dataPlaceToPageLeafSplit(disassembledLeaf *leaf, } } Assert(lsize == leaf->lsize); - GinDataPageSetDataSize(lpage, lsize); + GinDataPageSetDataSize(lpage, lsize, tde_nonce_size); *GinDataPageGetRightBound(lpage) = lbound; /* Copy the segments that go to the right page */ @@ -1092,7 +1093,7 @@ dataPlaceToPageLeafSplit(disassembledLeaf *leaf, break; } Assert(rsize == leaf->rsize); - GinDataPageSetDataSize(rpage, rsize); + GinDataPageSetDataSize(rpage, rsize, tde_nonce_size); *GinDataPageGetRightBound(rpage) = rbound; } @@ -1121,7 +1122,7 @@ dataBeginPlaceToPageInternal(GinBtree btree, Buffer buf, GinBtreeStack *stack, Page page = BufferGetPage(buf); /* If it doesn't fit, deal with split case */ - if (GinNonLeafDataPageGetFreeSpace(page) < sizeof(PostingItem)) + if (GinNonLeafDataPageGetFreeSpace(page, tde_nonce_size) < sizeof(PostingItem)) { dataSplitPageInternal(btree, buf, stack, insertdata, updateblkno, newlpage, newrpage); @@ -1287,7 +1288,7 @@ dataSplitPageInternal(GinBtree btree, Buffer origbuf, * end. This packs the index as tight as possible. */ if (btree->isBuild && GinPageRightMost(oldpage)) - separator = GinNonLeafDataPageGetFreeSpace(rpage) / sizeof(PostingItem); + separator = GinNonLeafDataPageGetFreeSpace(rpage, tde_nonce_size) / sizeof(PostingItem); else separator = nitems / 2; nleftitems = separator; @@ -1305,8 +1306,8 @@ dataSplitPageInternal(GinBtree btree, Buffer origbuf, /* * Also set pd_lower for both pages, like GinDataPageAddPostingItem does. */ - GinDataPageSetDataSize(lpage, nleftitems * sizeof(PostingItem)); - GinDataPageSetDataSize(rpage, nrightitems * sizeof(PostingItem)); + GinDataPageSetDataSize(lpage, nleftitems * sizeof(PostingItem), tde_nonce_size); + GinDataPageSetDataSize(rpage, nrightitems * sizeof(PostingItem), tde_nonce_size); /* set up right bound for left page */ bound = GinDataPageGetRightBound(lpage); @@ -1684,7 +1685,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining) * copying to the page. Did we exceed the size that fits on one page? */ segsize = SizeOfGinPostingList(seginfo->seg); - if (pgused + segsize > GinDataPageMaxDataSize) + if (pgused + segsize > GinDataPageMaxDataSize(tde_nonce_size)) { if (!needsplit) { @@ -1724,8 +1725,8 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining) else leaf->rsize = pgused; - Assert(leaf->lsize <= GinDataPageMaxDataSize); - Assert(leaf->rsize <= GinDataPageMaxDataSize); + Assert(leaf->lsize <= GinDataPageMaxDataSize(tde_nonce_size)); + Assert(leaf->rsize <= GinDataPageMaxDataSize(tde_nonce_size)); /* * Make a palloc'd copy of every segment after the first modified one, @@ -1801,7 +1802,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems, GinPostingListSegmentMaxSize, &npacked); segsize = SizeOfGinPostingList(segment); - if (rootsize + segsize > GinDataPageMaxDataSize) + if (rootsize + segsize > GinDataPageMaxDataSize(tde_nonce_size)) break; memcpy(ptr, segment, segsize); @@ -1810,7 +1811,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems, nrootitems += npacked; pfree(segment); } - GinDataPageSetDataSize(tmppage, rootsize); + GinDataPageSetDataSize(tmppage, rootsize, tde_nonce_size); /* * All set. Get a new physical page, and copy the in-memory page to it. diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c index 29c36bc067..afa24680a0 100644 --- a/src/backend/access/gin/ginentrypage.c +++ b/src/backend/access/gin/ginentrypage.c @@ -102,13 +102,13 @@ GinFormTuple(GinState *ginstate, newsize = MAXALIGN(newsize); - if (newsize > GinMaxItemSize) + if (newsize > GinMaxItemSize(tde_nonce_size)) { if (errorTooBig) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("index row size %zu exceeds maximum %zu for index \"%s\"", - (Size) newsize, (Size) GinMaxItemSize, + (Size) newsize, (Size) GinMaxItemSize(tde_nonce_size), RelationGetRelationName(ginstate->index)))); pfree(itup); return NULL; diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index e0d9940946..94d717b719 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -38,8 +38,9 @@ /* GUC parameter */ int gin_pending_list_limit = 0; -#define GIN_PAGE_FREESIZE \ - ( BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - MAXALIGN(sizeof(GinPageOpaqueData)) ) +#define GIN_PAGE_FREESIZE(tdeNonceSize) \ + ( BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - \ + MAXALIGN(sizeof(GinPageOpaqueData) + tdeNonceSize) ) typedef struct KeyArray { @@ -183,7 +184,7 @@ makeSublist(Relation index, IndexTuple *tuples, int32 ntuples, tupsize = MAXALIGN(IndexTupleSize(tuples[i])) + sizeof(ItemIdData); - if (size + tupsize > GinListPageSize) + if (size + tupsize > GinListPageSize(tde_nonce_size)) { /* won't fit, force a new page and reprocess */ i--; @@ -249,7 +250,8 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) */ CheckForSerializableConflictIn(index, NULL, GIN_METAPAGE_BLKNO); - if (collector->sumsize + collector->ntuples * sizeof(ItemIdData) > GinListPageSize) + if (collector->sumsize + collector->ntuples * sizeof(ItemIdData) > + GinListPageSize(tde_nonce_size)) { /* * Total size is greater than one page => make sublist @@ -447,7 +449,8 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) * ginInsertCleanup() should not be called inside our CRIT_SECTION. */ cleanupSize = GinGetPendingListCleanupSize(index); - if (metadata->nPendingPages * GIN_PAGE_FREESIZE > cleanupSize * 1024L) + if (metadata->nPendingPages * GIN_PAGE_FREESIZE(tde_nonce_size) > + cleanupSize * 1024L) needCleanup = true; UnlockReleaseBuffer(metabuffer); diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c index 0e8672c9e9..41d4f10738 100644 --- a/src/backend/access/gin/gininsert.c +++ b/src/backend/access/gin/gininsert.c @@ -75,7 +75,8 @@ addItemPointersToLeafTuple(GinState *ginstate, /* Compress the posting list, and try to a build tuple with room for it */ res = NULL; - compressedList = ginCompressPostingList(newItems, newNPosting, GinMaxItemSize, + compressedList = ginCompressPostingList(newItems, newNPosting, + GinMaxItemSize(tde_nonce_size), NULL); pfree(newItems); if (compressedList) @@ -135,7 +136,9 @@ buildFreshLeafTuple(GinState *ginstate, GinPostingList *compressedList; /* try to build a posting list tuple with all the items */ - compressedList = ginCompressPostingList(items, nitem, GinMaxItemSize, NULL); + compressedList = ginCompressPostingList(items, nitem, + GinMaxItemSize(tde_nonce_size), + NULL); if (compressedList) { res = GinFormTuple(ginstate, attnum, key, category, diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index a276eb020b..fc2f4c957a 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -514,7 +514,9 @@ ginVacuumEntryPage(GinVacuumState *gvs, Buffer buffer, BlockNumber *roots, uint3 if (nitems > 0) { - plist = ginCompressPostingList(items, nitems, GinMaxItemSize, NULL); + plist = ginCompressPostingList(items, nitems, + GinMaxItemSize(tde_nonce_size), + NULL); plistsize = SizeOfGinPostingList(plist); } else diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c index 09ce4d6a5b..d7af618c0f 100644 --- a/src/backend/access/gin/ginxlog.c +++ b/src/backend/access/gin/ginxlog.c @@ -59,7 +59,7 @@ ginRedoCreatePTree(XLogReaderState *record) /* Place page data */ memcpy(GinDataLeafPageGetPostingList(page), ptr, data->size); - GinDataPageSetDataSize(page, data->size); + GinDataPageSetDataSize(page, data->size, tde_nonce_size); PageSetLSN(page, lsn); @@ -158,7 +158,7 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data) totalsize = 0; } - GinDataPageSetDataSize(page, totalsize); + GinDataPageSetDataSize(page, totalsize, tde_nonce_size); GinPageSetCompressed(page); GinPageGetOpaque(page)->maxoff = InvalidOffsetNumber; } @@ -312,7 +312,7 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data) } totalsize = writePtr - (Pointer) GinDataLeafPageGetPostingList(page); - GinDataPageSetDataSize(page, totalsize); + GinDataPageSetDataSize(page, totalsize, tde_nonce_size); } static void diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 0683f42c25..9d503630a5 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -1439,7 +1439,7 @@ gistSplit(Relation r, ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("index row size %zu exceeds maximum %zu for index \"%s\"", - IndexTupleSize(itup[0]), GiSTPageSize, + IndexTupleSize(itup[0]), GiSTPageSize(tde_nonce_size), RelationGetRelationName(r)))); memset(v.spl_lisnull, true, @@ -1641,7 +1641,7 @@ freeGISTstate(GISTSTATE *giststate) static void gistprunepage(Relation rel, Page page, Buffer buffer, Relation heapRel) { - OffsetNumber deletable[MaxIndexTuplesPerPage]; + OffsetNumber deletable[MaxIndexTuplesPerPageLimit]; int ndeletable = 0; OffsetNumber offnum, maxoff; diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c index f46a42197c..010d069ce5 100644 --- a/src/backend/access/gist/gistbuild.c +++ b/src/backend/access/gist/gistbuild.c @@ -619,6 +619,7 @@ gistInitBuffering(GISTBuildState *buildstate) /* Calc space of index page which is available for index tuples */ pageFreeSpace = BLCKSZ - SizeOfPageHeaderData - sizeof(GISTPageOpaqueData) + - tde_nonce_size - sizeof(ItemIdData) - buildstate->freespace; @@ -775,6 +776,7 @@ calculatePagesPerBuffer(GISTBuildState *buildstate, int levelStep) /* Calc space of index page which is available for index tuples */ pageFreeSpace = BLCKSZ - SizeOfPageHeaderData - sizeof(GISTPageOpaqueData) + - tde_nonce_size - sizeof(ItemIdData) - buildstate->freespace; diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index c8f7e781c6..178e7f6b44 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -660,12 +660,12 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir) MemoryContextSwitchTo(so->giststate->scanCxt); so->killedItems = - (OffsetNumber *) palloc(MaxIndexTuplesPerPage + (OffsetNumber *) palloc(MaxIndexTuplesPerPage(tde_nonce_size) * sizeof(OffsetNumber)); MemoryContextSwitchTo(oldCxt); } - if (so->numKilled < MaxIndexTuplesPerPage) + if (so->numKilled < MaxIndexTuplesPerPage(tde_nonce_size)) so->killedItems[so->numKilled++] = so->pageData[so->curPageData - 1].offnum; } @@ -697,12 +697,12 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir) MemoryContextSwitchTo(so->giststate->scanCxt); so->killedItems = - (OffsetNumber *) palloc(MaxIndexTuplesPerPage + (OffsetNumber *) palloc(MaxIndexTuplesPerPage(tde_nonce_size) * sizeof(OffsetNumber)); MemoryContextSwitchTo(oldCxt); } - if (so->numKilled < MaxIndexTuplesPerPage) + if (so->numKilled < MaxIndexTuplesPerPage(tde_nonce_size)) so->killedItems[so->numKilled++] = so->pageData[so->curPageData - 1].offnum; } diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c index 8dcd53c457..ce1a5447bb 100644 --- a/src/backend/access/gist/gistutil.c +++ b/src/backend/access/gist/gistutil.c @@ -84,7 +84,7 @@ gistfitpage(IndexTuple *itvec, int len) size += IndexTupleSize(itvec[i]) + sizeof(ItemIdData); /* TODO: Consider fillfactor */ - return (size <= GiSTPageSize); + return (size <= GiSTPageSize(tde_nonce_size)); } /* @@ -803,7 +803,8 @@ gistcheckpage(Relation rel, Buffer buf) /* * Additionally check that the special area looks sane. */ - if (PageGetSpecialSize(page) != MAXALIGN(sizeof(GISTPageOpaqueData))) + if (PageGetSpecialSize(page) != + MAXALIGN(sizeof(GISTPageOpaqueData) + tde_nonce_size)) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index \"%s\" contains corrupted page at block %u", diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index 0752fb38a9..2492eab5aa 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -308,9 +308,9 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir) */ if (so->killedItems == NULL) so->killedItems = (int *) - palloc(MaxIndexTuplesPerPage * sizeof(int)); + palloc(MaxIndexTuplesPerPage(tde_nonce_size) * sizeof(int)); - if (so->numKilled < MaxIndexTuplesPerPage) + if (so->numKilled < MaxIndexTuplesPerPage(tde_nonce_size)) so->killedItems[so->numKilled++] = so->currPos.itemIndex; } diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c index d254a00b6a..b4fc39d9e2 100644 --- a/src/backend/access/hash/hashinsert.c +++ b/src/backend/access/hash/hashinsert.c @@ -76,11 +76,11 @@ restart_insert: * * XXX this is useless code if we are only storing hash keys. */ - if (itemsz > HashMaxItemSize(metapage)) + if (itemsz > HashMaxItemSize(metapage, tde_nonce_size)) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("index row size %zu exceeds hash maximum %zu", - itemsz, HashMaxItemSize(metapage)), + itemsz, HashMaxItemSize(metapage, tde_nonce_size)), errhint("Values larger than a buffer page cannot be indexed."))); /* Lock the primary bucket page for the target bucket. */ diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c index 1ff2e0c18e..c02d4e66ff 100644 --- a/src/backend/access/hash/hashovfl.c +++ b/src/backend/access/hash/hashovfl.c @@ -866,9 +866,9 @@ _hash_squeezebucket(Relation rel, OffsetNumber roffnum; OffsetNumber maxroffnum; OffsetNumber deletable[MaxOffsetNumber]; - IndexTuple itups[MaxIndexTuplesPerPage]; - Size tups_size[MaxIndexTuplesPerPage]; - OffsetNumber itup_offsets[MaxIndexTuplesPerPage]; + IndexTuple itups[MaxIndexTuplesPerPageLimit]; + Size tups_size[MaxIndexTuplesPerPageLimit]; + OffsetNumber itup_offsets[MaxIndexTuplesPerPageLimit]; uint16 ndeletable = 0; uint16 nitups = 0; Size all_tups_size = 0; diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index 49a9867787..46c1597e96 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -542,7 +542,7 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid, metap->hashm_ntuples = 0; metap->hashm_nmaps = 0; metap->hashm_ffactor = ffactor; - metap->hashm_bsize = HashGetMaxBitmapSize(page); + metap->hashm_bsize = HashGetMaxBitmapSize(page, tde_nonce_size); /* find largest bitmap array size that will fit in page size */ lshift = pg_leftmost_one_pos32(metap->hashm_bsize); @@ -1082,8 +1082,8 @@ _hash_splitbucket(Relation rel, Page npage; HashPageOpaque oopaque; HashPageOpaque nopaque; - OffsetNumber itup_offsets[MaxIndexTuplesPerPage]; - IndexTuple itups[MaxIndexTuplesPerPage]; + OffsetNumber itup_offsets[MaxIndexTuplesPerPageLimit]; + IndexTuple itups[MaxIndexTuplesPerPageLimit]; Size all_tups_size = 0; int i; uint16 nitups = 0; diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c index 2ffa28e8f7..d08531f4e8 100644 --- a/src/backend/access/hash/hashsearch.c +++ b/src/backend/access/hash/hashsearch.c @@ -538,7 +538,7 @@ _hash_readpage(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) itemIndex = _hash_load_qualified_items(scan, page, offnum, dir); - if (itemIndex != MaxIndexTuplesPerPage) + if (itemIndex != MaxIndexTuplesPerPage(tde_nonce_size)) break; /* @@ -577,8 +577,8 @@ _hash_readpage(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) } so->currPos.firstItem = itemIndex; - so->currPos.lastItem = MaxIndexTuplesPerPage - 1; - so->currPos.itemIndex = MaxIndexTuplesPerPage - 1; + so->currPos.lastItem = MaxIndexTuplesPerPage(tde_nonce_size) - 1; + so->currPos.itemIndex = MaxIndexTuplesPerPage(tde_nonce_size) - 1; } if (so->currPos.buf == so->hashso_bucket_buf || @@ -658,13 +658,13 @@ _hash_load_qualified_items(IndexScanDesc scan, Page page, offnum = OffsetNumberNext(offnum); } - Assert(itemIndex <= MaxIndexTuplesPerPage); + Assert(itemIndex <= MaxIndexTuplesPerPage(tde_nonce_size)); return itemIndex; } else { /* load items[] in descending order */ - itemIndex = MaxIndexTuplesPerPage; + itemIndex = MaxIndexTuplesPerPage(tde_nonce_size); while (offnum >= FirstOffsetNumber) { diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c index 519872850e..eb8b3c0da0 100644 --- a/src/backend/access/hash/hashutil.c +++ b/src/backend/access/hash/hashutil.c @@ -17,6 +17,7 @@ #include "access/hash.h" #include "access/reloptions.h" #include "access/relscan.h" +#include "miscadmin.h" /* for tde_nonce_size */ #include "port/pg_bitutils.h" #include "storage/buf_internals.h" #include "utils/lsyscache.h" @@ -229,7 +230,8 @@ _hash_checkpage(Relation rel, Buffer buf, int flags) /* * Additionally check that the special area looks sane. */ - if (PageGetSpecialSize(page) != MAXALIGN(sizeof(HashPageOpaqueData))) + if (PageGetSpecialSize(page) != + MAXALIGN(sizeof(HashPageOpaqueData) + tde_nonce_size)) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index \"%s\" contains corrupted page at block %u", diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 6ac07f2fda..af2b3f6433 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -475,7 +475,7 @@ heapgetpage(TableScanDesc sscan, BlockNumber page) LockBuffer(buffer, BUFFER_LOCK_UNLOCK); - Assert(ntup <= MaxHeapTuplesPerPage); + Assert(ntup <= MaxHeapTuplesPerPage(tde_nonce_size)); scan->rs_ntuples = ntup; } @@ -2349,7 +2349,8 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, Assert(!HeapTupleHasExternal(tup)); return tup; } - else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD) + else if (HeapTupleHasExternal(tup) || tup->t_len > + TOAST_TUPLE_THRESHOLD(tde_nonce_size)) return heap_toast_insert_or_update(relation, tup, NULL, options); else return tup; @@ -3733,7 +3734,7 @@ l2: else need_toast = (HeapTupleHasExternal(&oldtup) || HeapTupleHasExternal(newtup) || - newtup->t_len > TOAST_TUPLE_THRESHOLD); + newtup->t_len > TOAST_TUPLE_THRESHOLD(tde_nonce_size)); pagefree = PageGetHeapFreeSpace(page); @@ -8931,7 +8932,7 @@ heap_xlog_insert(XLogReaderState *record) union { HeapTupleHeaderData hdr; - char data[MaxHeapTupleSize]; + char data[MaxHeapTupleSizeLimit]; } tbuf; HeapTupleHeader htup; xl_heap_header xlhdr; @@ -8991,7 +8992,7 @@ heap_xlog_insert(XLogReaderState *record) data = XLogRecGetBlockData(record, 0, &datalen); newlen = datalen - SizeOfHeapHeader; - Assert(datalen > SizeOfHeapHeader && newlen <= MaxHeapTupleSize); + Assert(datalen > SizeOfHeapHeader && newlen <= MaxHeapTupleSize(tde_nonce_size)); memcpy((char *) &xlhdr, data, SizeOfHeapHeader); data += SizeOfHeapHeader; @@ -9057,7 +9058,7 @@ heap_xlog_multi_insert(XLogReaderState *record) union { HeapTupleHeaderData hdr; - char data[MaxHeapTupleSize]; + char data[MaxHeapTupleSizeLimit]; } tbuf; HeapTupleHeader htup; uint32 newlen; @@ -9135,7 +9136,7 @@ heap_xlog_multi_insert(XLogReaderState *record) tupdata = ((char *) xlhdr) + SizeOfMultiInsertTuple; newlen = xlhdr->datalen; - Assert(newlen <= MaxHeapTupleSize); + Assert(newlen <= MaxHeapTupleSize(tde_nonce_size)); htup = &tbuf.hdr; MemSet((char *) htup, 0, SizeofHeapTupleHeader); /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */ @@ -9214,7 +9215,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update) union { HeapTupleHeaderData hdr; - char data[MaxHeapTupleSize]; + char data[MaxHeapTupleSizeLimit]; } tbuf; xl_heap_header xlhdr; uint32 newlen; @@ -9370,7 +9371,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update) recdata += SizeOfHeapHeader; tuplen = recdata_end - recdata; - Assert(tuplen <= MaxHeapTupleSize); + Assert(tuplen <= MaxHeapTupleSize(tde_nonce_size)); htup = &tbuf.hdr; MemSet((char *) htup, 0, SizeofHeapTupleHeader); diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c index 61d9044816..53dd147dbe 100644 --- a/src/backend/access/heap/heapam_handler.c +++ b/src/backend/access/heap/heapam_handler.c @@ -1185,7 +1185,7 @@ heapam_index_build_range_scan(Relation heapRelation, TransactionId OldestXmin; BlockNumber previous_blkno = InvalidBlockNumber; BlockNumber root_blkno = InvalidBlockNumber; - OffsetNumber root_offsets[MaxHeapTuplesPerPage]; + OffsetNumber root_offsets[MaxHeapTuplesPerPageLimit]; /* * sanity checks @@ -1748,8 +1748,8 @@ heapam_index_validate_scan(Relation heapRelation, EState *estate; ExprContext *econtext; BlockNumber root_blkno = InvalidBlockNumber; - OffsetNumber root_offsets[MaxHeapTuplesPerPage]; - bool in_index[MaxHeapTuplesPerPage]; + OffsetNumber root_offsets[MaxHeapTuplesPerPageLimit]; + bool in_index[MaxHeapTuplesPerPageLimit]; BlockNumber previous_blkno = InvalidBlockNumber; /* state variables for the merge */ @@ -2069,7 +2069,7 @@ heapam_relation_needs_toast_table(Relation rel) tuple_length = MAXALIGN(SizeofHeapTupleHeader + BITMAPLEN(tupdesc->natts)) + MAXALIGN(data_length); - return (tuple_length > TOAST_TUPLE_THRESHOLD); + return (tuple_length > TOAST_TUPLE_THRESHOLD(tde_nonce_size)); } /* @@ -2089,8 +2089,8 @@ heapam_relation_toast_am(Relation rel) #define HEAP_OVERHEAD_BYTES_PER_TUPLE \ (MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData)) -#define HEAP_USABLE_BYTES_PER_PAGE \ - (BLCKSZ - SizeOfPageHeaderData) +#define HEAP_USABLE_BYTES_PER_PAGE(tdeNonceSize) \ + (BLCKSZ - SizeOfPageHeaderData - MAXALIGN(tdeNonceSize)) static void heapam_estimate_rel_size(Relation rel, int32 *attr_widths, @@ -2100,7 +2100,7 @@ heapam_estimate_rel_size(Relation rel, int32 *attr_widths, table_block_relation_estimate_size(rel, attr_widths, pages, tuples, allvisfrac, HEAP_OVERHEAD_BYTES_PER_TUPLE, - HEAP_USABLE_BYTES_PER_PAGE); + HEAP_USABLE_BYTES_PER_PAGE(tde_nonce_size)); } @@ -2216,7 +2216,7 @@ heapam_scan_bitmap_next_block(TableScanDesc scan, LockBuffer(buffer, BUFFER_LOCK_UNLOCK); - Assert(ntup <= MaxHeapTuplesPerPage); + Assert(ntup <= MaxHeapTuplesPerPage(tde_nonce_size)); hscan->rs_ntuples = ntup; return ntup > 0; diff --git a/src/backend/access/heap/heaptoast.c b/src/backend/access/heap/heaptoast.c index 55bbe1d584..bdad0d761a 100644 --- a/src/backend/access/heap/heaptoast.c +++ b/src/backend/access/heap/heaptoast.c @@ -174,7 +174,7 @@ heap_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, hoff += BITMAPLEN(numAttrs); hoff = MAXALIGN(hoff); /* now convert to a limit on the tuple data size */ - maxDataLen = RelationGetToastTupleTarget(rel, TOAST_TUPLE_TARGET) - hoff; + maxDataLen = RelationGetToastTupleTarget(rel, TOAST_TUPLE_TARGET(tde_nonce_size)) - hoff; /* * Look for attributes with attstorage EXTENDED to compress. Also find @@ -255,7 +255,7 @@ heap_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, * increase the target tuple size, so that MAIN attributes aren't stored * externally unless really necessary. */ - maxDataLen = TOAST_TUPLE_TARGET_MAIN - hoff; + maxDataLen = TOAST_TUPLE_TARGET_MAIN(tde_nonce_size) - hoff; while (heap_compute_data_size(tupleDesc, toast_values, toast_isnull) > maxDataLen && @@ -634,21 +634,23 @@ heap_fetch_toast_slice(Relation toastrel, Oid valueid, int32 attrsize, SysScanDesc toastscan; HeapTuple ttup; int32 expectedchunk; - int32 totalchunks = ((attrsize - 1) / TOAST_MAX_CHUNK_SIZE) + 1; + int32 totalchunks; int startchunk; int endchunk; int num_indexes; int validIndex; SnapshotData SnapshotToast; + totalchunks = ((attrsize - 1) / TOAST_MAX_CHUNK_SIZE(tde_nonce_size)) + 1; + /* Look for the valid index of toast relation */ validIndex = toast_open_indexes(toastrel, AccessShareLock, &toastidxs, &num_indexes); - startchunk = sliceoffset / TOAST_MAX_CHUNK_SIZE; - endchunk = (sliceoffset + slicelength - 1) / TOAST_MAX_CHUNK_SIZE; + startchunk = sliceoffset / TOAST_MAX_CHUNK_SIZE(tde_nonce_size); + endchunk = (sliceoffset + slicelength - 1) / TOAST_MAX_CHUNK_SIZE(tde_nonce_size); Assert(endchunk <= totalchunks); /* Set up a scan key to fetch from the index. */ @@ -749,8 +751,13 @@ heap_fetch_toast_slice(Relation toastrel, Oid valueid, int32 attrsize, curchunk, startchunk, endchunk, valueid, RelationGetRelationName(toastrel)))); - expected_size = curchunk < totalchunks - 1 ? TOAST_MAX_CHUNK_SIZE - : attrsize - ((totalchunks - 1) * TOAST_MAX_CHUNK_SIZE); + + if (curchunk < totalchunks - 1) + expected_size = TOAST_MAX_CHUNK_SIZE(tde_nonce_size); + else + expected_size = attrsize - ((totalchunks - 1) * + TOAST_MAX_CHUNK_SIZE(tde_nonce_size)); + if (chunksize != expected_size) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), @@ -765,12 +772,12 @@ heap_fetch_toast_slice(Relation toastrel, Oid valueid, int32 attrsize, chcpystrt = 0; chcpyend = chunksize - 1; if (curchunk == startchunk) - chcpystrt = sliceoffset % TOAST_MAX_CHUNK_SIZE; + chcpystrt = sliceoffset % TOAST_MAX_CHUNK_SIZE(tde_nonce_size); if (curchunk == endchunk) - chcpyend = (sliceoffset + slicelength - 1) % TOAST_MAX_CHUNK_SIZE; + chcpyend = (sliceoffset + slicelength - 1) % TOAST_MAX_CHUNK_SIZE(tde_nonce_size); memcpy(VARDATA(result) + - (curchunk * TOAST_MAX_CHUNK_SIZE - sliceoffset) + chcpystrt, + (curchunk * TOAST_MAX_CHUNK_SIZE(tde_nonce_size) - sliceoffset) + chcpystrt, chunkdata + chcpystrt, (chcpyend - chcpystrt) + 1); diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index d34edb4190..acc90105c9 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -354,11 +354,11 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * If we're gonna fail for oversize tuple, do it right away */ - if (len > MaxHeapTupleSize) + if (len > MaxHeapTupleSize(tde_nonce_size)) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("row is too big: size %zu, maximum size %zu", - len, MaxHeapTupleSize))); + len, MaxHeapTupleSize(tde_nonce_size)))); /* Compute desired extra freespace due to fillfactor option */ saveFreeSpace = RelationGetTargetPageFreeSpace(relation, @@ -370,8 +370,8 @@ RelationGetBufferForTuple(Relation relation, Size len, * somewhat arbitrary, but it should prevent most unnecessary relation * extensions while inserting large tuples into low-fillfactor tables. */ - nearlyEmptyFreeSpace = MaxHeapTupleSize - - (MaxHeapTuplesPerPage / 8 * sizeof(ItemIdData)); + nearlyEmptyFreeSpace = MaxHeapTupleSize(tde_nonce_size) - + (MaxHeapTuplesPerPage(tde_nonce_size) / 8 * sizeof(ItemIdData)); if (len + saveFreeSpace > nearlyEmptyFreeSpace) targetFreeSpace = Max(len, nearlyEmptyFreeSpace); else diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 15ca1b304a..3d1f3c7b42 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -53,11 +53,11 @@ typedef struct int ndead; int nunused; /* arrays that accumulate indexes of items to be changed */ - OffsetNumber redirected[MaxHeapTuplesPerPage * 2]; - OffsetNumber nowdead[MaxHeapTuplesPerPage]; - OffsetNumber nowunused[MaxHeapTuplesPerPage]; + OffsetNumber redirected[MaxHeapTuplesPerPageLimit * 2]; + OffsetNumber nowdead[MaxHeapTuplesPerPageLimit]; + OffsetNumber nowunused[MaxHeapTuplesPerPageLimit]; /* marked[i] is true if item i is entered in one of the above arrays */ - bool marked[MaxHeapTuplesPerPage + 1]; + bool marked[MaxHeapTuplesPerPageLimit + 1]; } PruneState; /* Local functions */ @@ -517,7 +517,7 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum, PruneState *prstate) OffsetNumber latestdead = InvalidOffsetNumber, maxoff = PageGetMaxOffsetNumber(dp), offnum; - OffsetNumber chainitems[MaxHeapTuplesPerPage]; + OffsetNumber chainitems[MaxHeapTuplesPerPageLimit]; int nchain = 0, i; HeapTupleData tup; @@ -791,7 +791,7 @@ static void heap_prune_record_redirect(PruneState *prstate, OffsetNumber offnum, OffsetNumber rdoffnum) { - Assert(prstate->nredirected < MaxHeapTuplesPerPage); + Assert(prstate->nredirected < MaxHeapTuplesPerPage(tde_nonce_size)); prstate->redirected[prstate->nredirected * 2] = offnum; prstate->redirected[prstate->nredirected * 2 + 1] = rdoffnum; prstate->nredirected++; @@ -805,7 +805,7 @@ heap_prune_record_redirect(PruneState *prstate, static void heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum) { - Assert(prstate->ndead < MaxHeapTuplesPerPage); + Assert(prstate->ndead < MaxHeapTuplesPerPage(tde_nonce_size)); prstate->nowdead[prstate->ndead] = offnum; prstate->ndead++; Assert(!prstate->marked[offnum]); @@ -816,7 +816,7 @@ heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum) static void heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum) { - Assert(prstate->nunused < MaxHeapTuplesPerPage); + Assert(prstate->nunused < MaxHeapTuplesPerPage(tde_nonce_size)); prstate->nowunused[prstate->nunused] = offnum; prstate->nunused++; Assert(!prstate->marked[offnum]); @@ -903,7 +903,7 @@ heap_get_root_tuples(Page page, OffsetNumber *root_offsets) maxoff; MemSet(root_offsets, InvalidOffsetNumber, - MaxHeapTuplesPerPage * sizeof(OffsetNumber)); + MaxHeapTuplesPerPage(tde_nonce_size) * sizeof(OffsetNumber)); maxoff = PageGetMaxOffsetNumber(page); for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index 1aff62cd42..4e37187378 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -637,7 +637,8 @@ raw_heap_insert(RewriteState state, HeapTuple tup) Assert(!HeapTupleHasExternal(tup)); heaptup = tup; } - else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD) + else if (HeapTupleHasExternal(tup) || + tup->t_len > TOAST_TUPLE_THRESHOLD(tde_nonce_size)) { int options = HEAP_INSERT_SKIP_FSM; @@ -659,11 +660,11 @@ raw_heap_insert(RewriteState state, HeapTuple tup) /* * If we're gonna fail for oversize tuple, do it right away */ - if (len > MaxHeapTupleSize) + if (len > MaxHeapTupleSize(tde_nonce_size)) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("row is too big: size %zu, maximum size %zu", - len, MaxHeapTupleSize))); + len, MaxHeapTupleSize(tde_nonce_size)))); /* Compute desired extra freespace due to fillfactor option */ saveFreeSpace = RelationGetTargetPageFreeSpace(state->rs_new_rel, diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 9f1f8e340d..08b50e4d2f 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -130,7 +130,7 @@ * provide an upper limit to memory allocated when vacuuming small * tables. */ -#define LAZY_ALLOC_TUPLES MaxHeapTuplesPerPage +#define LAZY_ALLOC_TUPLES(tdeNonceSize) MaxHeapTuplesPerPage(tdeNonceSize) /* * Before we consider skipping a page that's marked as clean in @@ -1136,7 +1136,8 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) * dead-tuple TIDs, pause and do a cycle of vacuuming before we tackle * this page. */ - if ((dead_tuples->max_tuples - dead_tuples->num_tuples) < MaxHeapTuplesPerPage && + if ((dead_tuples->max_tuples - dead_tuples->num_tuples) < + MaxHeapTuplesPerPage(tde_nonce_size) && dead_tuples->num_tuples > 0) { /* @@ -1686,8 +1687,8 @@ lazy_scan_prune(LVRelState *vacrel, num_tuples, live_tuples; int nfrozen; - OffsetNumber deadoffsets[MaxHeapTuplesPerPage]; - xl_heap_freeze_tuple frozen[MaxHeapTuplesPerPage]; + OffsetNumber deadoffsets[MaxHeapTuplesPerPageLimit]; + xl_heap_freeze_tuple frozen[MaxHeapTuplesPerPageLimit]; maxoff = PageGetMaxOffsetNumber(page); @@ -2385,7 +2386,7 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, { LVDeadTuples *dead_tuples = vacrel->dead_tuples; Page page = BufferGetPage(buffer); - OffsetNumber unused[MaxHeapTuplesPerPage]; + OffsetNumber unused[MaxHeapTuplesPerPageLimit]; int uncnt = 0; TransactionId visibility_cutoff_xid; bool all_frozen; @@ -3449,14 +3450,14 @@ compute_max_dead_tuples(BlockNumber relblocks, bool hasindex) maxtuples = Min(maxtuples, MAXDEADTUPLES(MaxAllocSize)); /* curious coding here to ensure the multiplication can't overflow */ - if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES) > relblocks) - maxtuples = relblocks * LAZY_ALLOC_TUPLES; + if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES(tde_nonce_size)) > relblocks) + maxtuples = relblocks * LAZY_ALLOC_TUPLES(tde_nonce_size); /* stay sane if small maintenance_work_mem */ - maxtuples = Max(maxtuples, MaxHeapTuplesPerPage); + maxtuples = Max(maxtuples, MaxHeapTuplesPerPage(tde_nonce_size)); } else - maxtuples = MaxHeapTuplesPerPage; + maxtuples = MaxHeapTuplesPerPage(tde_nonce_size); return maxtuples; } diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index e198df65d8..d896cf12d6 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -104,17 +104,20 @@ * extra headers, so the whole page minus the standard page header is * used for the bitmap. */ -#define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData)) +#define MAPMAXSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData)) + +#define MAPSIZE(tdeNonceSize) (BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - \ + MAXALIGN(tdeNonceSize)) /* Number of heap blocks we can represent in one byte */ #define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / BITS_PER_HEAPBLOCK) /* Number of heap blocks we can represent in one visibility map page. */ -#define HEAPBLOCKS_PER_PAGE (MAPSIZE * HEAPBLOCKS_PER_BYTE) +#define HEAPBLOCKS_PER_PAGE(tdeNonceSize) (MAPSIZE(tdeNonceSize) * HEAPBLOCKS_PER_BYTE) /* Mapping from heap block number to the right bit in the visibility map */ -#define HEAPBLK_TO_MAPBLOCK(x) ((x) / HEAPBLOCKS_PER_PAGE) -#define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE) +#define HEAPBLK_TO_MAPBLOCK(x, tdeNonceSize) ((x) / HEAPBLOCKS_PER_PAGE(tdeNonceSize)) +#define HEAPBLK_TO_MAPBYTE(x, tdeNonceSize) (((x) % HEAPBLOCKS_PER_PAGE(tdeNonceSize)) / HEAPBLOCKS_PER_BYTE) #define HEAPBLK_TO_OFFSET(x) (((x) % HEAPBLOCKS_PER_BYTE) * BITS_PER_HEAPBLOCK) /* Masks for counting subsets of bits in the visibility map. */ @@ -138,8 +141,8 @@ static void vm_extend(Relation rel, BlockNumber vm_nblocks); bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags) { - BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); - int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk); + BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk, tde_nonce_size); + int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk, tde_nonce_size); int mapOffset = HEAPBLK_TO_OFFSET(heapBlk); uint8 mask = flags << mapOffset; char *map; @@ -189,7 +192,7 @@ visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags) void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf) { - BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); + BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk, tde_nonce_size); /* Reuse the old pinned buffer if possible */ if (BufferIsValid(*buf)) @@ -213,7 +216,7 @@ visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf) bool visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf) { - BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); + BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk, tde_nonce_size); return BufferIsValid(buf) && BufferGetBlockNumber(buf) == mapBlock; } @@ -244,8 +247,8 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags) { - BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); - uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk); + BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk, tde_nonce_size); + uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk, tde_nonce_size); uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk); Page page; uint8 *map; @@ -329,8 +332,8 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf) { - BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); - uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk); + BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk, tde_nonce_size); + uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk, tde_nonce_size); uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk); char *map; uint8 result; @@ -406,16 +409,17 @@ visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_fro */ map = (uint64 *) PageGetContents(BufferGetPage(mapBuffer)); - StaticAssertStmt(MAPSIZE % sizeof(uint64) == 0, - "unsupported MAPSIZE"); + if (MAPSIZE(tde_nonce_size) % sizeof(uint64) != 0) + elog(ERROR, "unsupported MAPSIZE"); + if (all_frozen == NULL) { - for (i = 0; i < MAPSIZE / sizeof(uint64); i++) + for (i = 0; i < MAPSIZE(tde_nonce_size) / sizeof(uint64); i++) nvisible += pg_popcount64(map[i] & VISIBLE_MASK64); } else { - for (i = 0; i < MAPSIZE / sizeof(uint64); i++) + for (i = 0; i < MAPSIZE(tde_nonce_size) / sizeof(uint64); i++) { nvisible += pg_popcount64(map[i] & VISIBLE_MASK64); nfrozen += pg_popcount64(map[i] & FROZEN_MASK64); @@ -447,8 +451,8 @@ visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks) BlockNumber newnblocks; /* last remaining block, byte, and bit */ - BlockNumber truncBlock = HEAPBLK_TO_MAPBLOCK(nheapblocks); - uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks); + BlockNumber truncBlock = HEAPBLK_TO_MAPBLOCK(nheapblocks, tde_nonce_size); + uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks, tde_nonce_size); uint8 truncOffset = HEAPBLK_TO_OFFSET(nheapblocks); #ifdef TRACE_VISIBILITYMAP @@ -495,7 +499,7 @@ visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks) START_CRIT_SECTION(); /* Clear out the unwanted bytes. */ - MemSet(&map[truncByte + 1], 0, MAPSIZE - (truncByte + 1)); + MemSet(&map[truncByte + 1], 0, MAPSIZE(tde_nonce_size) - (truncByte + 1)); /*---- * Mask out the unwanted bits of the last remaining byte. diff --git a/src/backend/access/nbtree/nbtdedup.c b/src/backend/access/nbtree/nbtdedup.c index 271994b08d..58e579b3a9 100644 --- a/src/backend/access/nbtree/nbtdedup.c +++ b/src/backend/access/nbtree/nbtdedup.c @@ -80,7 +80,7 @@ _bt_dedup_pass(Relation rel, Buffer buf, Relation heapRel, IndexTuple newitem, state = (BTDedupState) palloc(sizeof(BTDedupStateData)); state->deduplicate = true; state->nmaxitems = 0; - state->maxpostingsize = Min(BTMaxItemSize(page) / 2, INDEX_SIZE_MASK); + state->maxpostingsize = Min(BTMaxItemSize(page, tde_nonce_size) / 2, INDEX_SIZE_MASK); /* Metadata about base tuple of current pending posting list */ state->base = NULL; state->baseoff = InvalidOffsetNumber; @@ -821,7 +821,7 @@ _bt_singleval_fillfactor(Page page, BTDedupState state, Size newitemsz) /* This calculation needs to match nbtsplitloc.c */ leftfree = PageGetPageSize(page) - SizeOfPageHeaderData - - MAXALIGN(sizeof(BTPageOpaqueData)); + MAXALIGN(sizeof(BTPageOpaqueData) + tde_nonce_size); /* Subtract size of new high key (includes pivot heap TID space) */ leftfree -= newitemsz + MAXALIGN(sizeof(ItemPointerData)); diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index 6ac205c98e..a2976310b2 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -827,7 +827,7 @@ _bt_findinsertloc(Relation rel, opaque = (BTPageOpaque) PageGetSpecialPointer(page); /* Check 1/3 of a page restriction */ - if (unlikely(insertstate->itemsz > BTMaxItemSize(page))) + if (unlikely(insertstate->itemsz > BTMaxItemSize(page, tde_nonce_size))) _bt_check_third_page(rel, heapRel, itup_key->heapkeyspace, page, insertstate->itup); @@ -2655,7 +2655,7 @@ _bt_delete_or_dedup_one_page(Relation rel, Relation heapRel, bool simpleonly, bool checkingunique, bool uniquedup, bool indexUnchanged) { - OffsetNumber deletable[MaxIndexTuplesPerPage]; + OffsetNumber deletable[MaxIndexTuplesPerPageLimit]; int ndeletable = 0; OffsetNumber offnum, minoff, diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index ebec8fa5b8..8fcd9bc426 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -812,7 +812,8 @@ _bt_checkpage(Relation rel, Buffer buf) /* * Additionally check that the special area looks sane. */ - if (PageGetSpecialSize(page) != MAXALIGN(sizeof(BTPageOpaqueData))) + if (PageGetSpecialSize(page) != + MAXALIGN(sizeof(BTPageOpaqueData) + tde_nonce_size)) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index \"%s\" contains corrupted page at block %u", @@ -1173,7 +1174,7 @@ _bt_delitems_vacuum(Relation rel, Buffer buf, bool needswal = RelationNeedsWAL(rel); char *updatedbuf = NULL; Size updatedbuflen = 0; - OffsetNumber updatedoffsets[MaxIndexTuplesPerPage]; + OffsetNumber updatedoffsets[MaxIndexTuplesPerPageLimit]; /* Shouldn't be called unless there's something to do */ Assert(ndeletable > 0 || nupdatable > 0); @@ -1303,7 +1304,7 @@ _bt_delitems_delete(Relation rel, Buffer buf, TransactionId latestRemovedXid, bool needswal = RelationNeedsWAL(rel); char *updatedbuf = NULL; Size updatedbuflen = 0; - OffsetNumber updatedoffsets[MaxIndexTuplesPerPage]; + OffsetNumber updatedoffsets[MaxIndexTuplesPerPageLimit]; /* Shouldn't be called unless there's something to do */ Assert(ndeletable > 0 || nupdatable > 0); @@ -1534,8 +1535,8 @@ _bt_delitems_delete_check(Relation rel, Buffer buf, Relation heapRel, OffsetNumber postingidxoffnum = InvalidOffsetNumber; int ndeletable = 0, nupdatable = 0; - OffsetNumber deletable[MaxIndexTuplesPerPage]; - BTVacuumPosting updatable[MaxIndexTuplesPerPage]; + OffsetNumber deletable[MaxIndexTuplesPerPageLimit]; + BTVacuumPosting updatable[MaxIndexTuplesPerPageLimit]; /* Use tableam interface to determine which tuples to delete first */ latestRemovedXid = table_index_delete_tuples(heapRel, delstate); diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 1360ab80c1..f01bee76b5 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -1150,9 +1150,9 @@ backtrack: } else if (P_ISLEAF(opaque)) { - OffsetNumber deletable[MaxIndexTuplesPerPage]; + OffsetNumber deletable[MaxIndexTuplesPerPageLimit]; int ndeletable; - BTVacuumPosting updatable[MaxIndexTuplesPerPage]; + BTVacuumPosting updatable[MaxIndexTuplesPerPageLimit]; int nupdatable; OffsetNumber offnum, minoff, diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index 2c4d7f6e25..de1e88a2bc 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -874,7 +874,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup, * make use of the reserved space. This should never fail on internal * pages. */ - if (unlikely(itupsz > BTMaxItemSize(npage))) + if (unlikely(itupsz > BTMaxItemSize(npage, tde_nonce_size))) _bt_check_third_page(wstate->index, wstate->heap, isleaf, npage, itup); @@ -1346,7 +1346,8 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) */ dstate->maxpostingsize = MAXALIGN_DOWN((BLCKSZ * 10 / 100)) - sizeof(ItemIdData); - Assert(dstate->maxpostingsize <= BTMaxItemSize(state->btps_page) && + Assert(dstate->maxpostingsize <= + BTMaxItemSize(state->btps_page, tde_nonce_size) && dstate->maxpostingsize <= INDEX_SIZE_MASK); dstate->htids = palloc(dstate->maxpostingsize); diff --git a/src/backend/access/nbtree/nbtsplitloc.c b/src/backend/access/nbtree/nbtsplitloc.c index 3485e93ef6..9464de15ce 100644 --- a/src/backend/access/nbtree/nbtsplitloc.c +++ b/src/backend/access/nbtree/nbtsplitloc.c @@ -158,7 +158,7 @@ _bt_findsplitloc(Relation rel, /* Total free space available on a btree page, after fixed overhead */ leftspace = rightspace = PageGetPageSize(origpage) - SizeOfPageHeaderData - - MAXALIGN(sizeof(BTPageOpaqueData)); + MAXALIGN(sizeof(BTPageOpaqueData) + tde_nonce_size); /* The right page will have the same high key as the old page */ if (!P_RIGHTMOST(opaque)) diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index d524310723..c836ecd8d6 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -2640,7 +2640,7 @@ _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, itemsz = MAXALIGN(IndexTupleSize(newtup)); /* Double check item size against limit */ - if (itemsz <= BTMaxItemSize(page)) + if (itemsz <= BTMaxItemSize(page, tde_nonce_size)) return; /* @@ -2648,7 +2648,7 @@ _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, * index uses version 2 or version 3, or that page is an internal page, in * which case a slightly higher limit applies. */ - if (!needheaptidspace && itemsz <= BTMaxItemSizeNoHeapTid(page)) + if (!needheaptidspace && itemsz <= BTMaxItemSizeNoHeapTid(page, tde_nonce_size)) return; /* @@ -2665,8 +2665,8 @@ _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, errmsg("index row size %zu exceeds btree version %u maximum %zu for index \"%s\"", itemsz, needheaptidspace ? BTREE_VERSION : BTREE_NOVAC_VERSION, - needheaptidspace ? BTMaxItemSize(page) : - BTMaxItemSizeNoHeapTid(page), + needheaptidspace ? BTMaxItemSize(page, tde_nonce_size) : + BTMaxItemSizeNoHeapTid(page, tde_nonce_size), RelationGetRelationName(rel)), errdetail("Index row references tuple (%u,%u) in relation \"%s\".", ItemPointerGetBlockNumber(BTreeTupleGetHeapTID(newtup)), diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index c2e920f159..06642dda32 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -40,8 +40,8 @@ _bt_restore_page(Page page, char *from, int len) IndexTupleData itupdata; Size itemsz; char *end = from + len; - Item items[MaxIndexTuplesPerPage]; - uint16 itemsizes[MaxIndexTuplesPerPage]; + Item items[MaxIndexTuplesPerPageLimit]; + uint16 itemsizes[MaxIndexTuplesPerPageLimit]; int i; int nitems; @@ -486,7 +486,7 @@ btree_xlog_dedup(XLogReaderState *record) state->deduplicate = true; /* unused */ state->nmaxitems = 0; /* unused */ /* Conservatively use larger maxpostingsize than primary */ - state->maxpostingsize = BTMaxItemSize(page); + state->maxpostingsize = BTMaxItemSize(page, tde_nonce_size); state->base = NULL; state->baseoff = InvalidOffsetNumber; state->basetupsize = 0; diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c index 70557bcf3d..83f5fab5b5 100644 --- a/src/backend/access/spgist/spgdoinsert.c +++ b/src/backend/access/spgist/spgdoinsert.c @@ -134,7 +134,7 @@ spgPageIndexMultiDelete(SpGistState *state, Page page, BlockNumber blkno, OffsetNumber offnum) { OffsetNumber firstItem; - OffsetNumber sortednos[MaxIndexTuplesPerPage]; + OffsetNumber sortednos[MaxIndexTuplesPerPageLimit]; SpGistDeadTuple tuple = NULL; int i; @@ -897,7 +897,7 @@ doPickSplit(Relation index, SpGistState *state, * fit on one page. */ allTheSame = checkAllTheSame(&in, &out, - totalLeafSizes > SPGIST_PAGE_CAPACITY, + totalLeafSizes > SPGIST_PAGE_CAPACITY(tde_nonce_size), &includeNew); /* @@ -1028,7 +1028,8 @@ doPickSplit(Relation index, SpGistState *state, for (i = 0; i < nToInsert; i++) leafPageSelect[i] = 0; /* signifies current page */ } - else if (in.nTuples == 1 && totalLeafSizes > SPGIST_PAGE_CAPACITY) + else if (in.nTuples == 1 && totalLeafSizes > + SPGIST_PAGE_CAPACITY(tde_nonce_size)) { /* * We're trying to split up a long value by repeated suffixing, but @@ -1049,7 +1050,7 @@ doPickSplit(Relation index, SpGistState *state, newLeafBuffer = SpGistGetBuffer(index, GBUF_LEAF | (isNulls ? GBUF_NULLS : 0), Min(totalLeafSizes, - SPGIST_PAGE_CAPACITY), + SPGIST_PAGE_CAPACITY(tde_nonce_size)), &xlrec.initDest); /* @@ -1993,13 +1994,13 @@ spgdoinsert(Relation index, SpGistState *state, * If it isn't gonna fit, and the opclass can't reduce the datum size by * suffixing, bail out now rather than doing a lot of useless work. */ - if (leafSize > SPGIST_PAGE_CAPACITY && + if (leafSize > SPGIST_PAGE_CAPACITY(tde_nonce_size) && (isnull || !state->config.longValuesOK)) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("index row size %zu exceeds maximum %zu for index \"%s\"", leafSize - sizeof(ItemIdData), - SPGIST_PAGE_CAPACITY - sizeof(ItemIdData), + SPGIST_PAGE_CAPACITY(tde_nonce_size) - sizeof(ItemIdData), RelationGetRelationName(index)), errhint("Values larger than a buffer page cannot be indexed."))); bestLeafSize = leafSize; @@ -2057,7 +2058,7 @@ spgdoinsert(Relation index, SpGistState *state, current.buffer = SpGistGetBuffer(index, GBUF_LEAF | (isnull ? GBUF_NULLS : 0), - Min(leafSize, SPGIST_PAGE_CAPACITY), + Min(leafSize, SPGIST_PAGE_CAPACITY(tde_nonce_size)), &isNew); current.blkno = BufferGetBlockNumber(current.buffer); } @@ -2118,11 +2119,12 @@ spgdoinsert(Relation index, SpGistState *state, ¤t, &parent, isnull, isNew); break; } - else if ((sizeToSplit = - checkSplitConditions(index, state, ¤t, - &nToSplit)) < SPGIST_PAGE_CAPACITY / 2 && + else if ((sizeToSplit = checkSplitConditions(index, state, ¤t, + &nToSplit)) < + SPGIST_PAGE_CAPACITY(tde_nonce_size) / 2 && nToSplit < 64 && - leafTuple->size + sizeof(ItemIdData) + sizeToSplit <= SPGIST_PAGE_CAPACITY) + leafTuple->size + sizeof(ItemIdData) + + sizeToSplit <= SPGIST_PAGE_CAPACITY(tde_nonce_size)) { /* * the amount of data is pretty small, so just move the whole @@ -2254,7 +2256,7 @@ spgdoinsert(Relation index, SpGistState *state, * than MAXALIGN, to accommodate opclasses that trim one * byte from the leaf datum per pass.) */ - if (leafSize > SPGIST_PAGE_CAPACITY) + if (leafSize > SPGIST_PAGE_CAPACITY(tde_nonce_size)) { bool ok = false; @@ -2274,7 +2276,7 @@ spgdoinsert(Relation index, SpGistState *state, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("index row size %zu exceeds maximum %zu for index \"%s\"", leafSize - sizeof(ItemIdData), - SPGIST_PAGE_CAPACITY - sizeof(ItemIdData), + SPGIST_PAGE_CAPACITY(tde_nonce_size) - sizeof(ItemIdData), RelationGetRelationName(index)), errhint("Values larger than a buffer page cannot be indexed."))); } diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c index e14b9fa573..d0375a98f9 100644 --- a/src/backend/access/spgist/spgscan.c +++ b/src/backend/access/spgist/spgscan.c @@ -958,7 +958,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr, SpGistLeafTuple leafTuple, bool recheck, bool recheckDistances, double *nonNullDistances) { - Assert(so->nPtrs < MaxIndexTuplesPerPage); + Assert(so->nPtrs < MaxIndexTuplesPerPage(tde_nonce_size)); so->heapPtrs[so->nPtrs] = *heapPtr; so->recheck[so->nPtrs] = recheck; so->recheckDistances[so->nPtrs] = recheckDistances; diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c index 8d99c9b762..3e1e67cc16 100644 --- a/src/backend/access/spgist/spgutils.c +++ b/src/backend/access/spgist/spgutils.c @@ -540,7 +540,7 @@ SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew) SpGistLastUsedPage *lup; /* Bail out if even an empty page wouldn't meet the demand */ - if (needSpace > SPGIST_PAGE_CAPACITY) + if (needSpace > SPGIST_PAGE_CAPACITY(tde_nonce_size)) elog(ERROR, "desired SPGiST tuple size is too big"); /* @@ -551,7 +551,7 @@ SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew) * error for requests that would otherwise be legal. */ needSpace += SpGistGetTargetPageFreeSpace(index); - needSpace = Min(needSpace, SPGIST_PAGE_CAPACITY); + needSpace = Min(needSpace, SPGIST_PAGE_CAPACITY(tde_nonce_size)); /* Get the cache entry for this flags setting */ lup = GET_LUP(cache, flags); @@ -999,12 +999,12 @@ spgFormInnerTuple(SpGistState *state, bool hasPrefix, Datum prefix, /* * Inner tuple should be small enough to fit on a page */ - if (size > SPGIST_PAGE_CAPACITY - sizeof(ItemIdData)) + if (size > SPGIST_PAGE_CAPACITY(tde_nonce_size) - sizeof(ItemIdData)) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("SP-GiST inner tuple size %zu exceeds maximum %zu", (Size) size, - SPGIST_PAGE_CAPACITY - sizeof(ItemIdData)), + SPGIST_PAGE_CAPACITY(tde_nonce_size) - sizeof(ItemIdData)), errhint("Values larger than a buffer page cannot be indexed."))); /* diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c index 76fb0374c4..5e8b349be5 100644 --- a/src/backend/access/spgist/spgvacuum.c +++ b/src/backend/access/spgist/spgvacuum.c @@ -128,14 +128,14 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer, { Page page = BufferGetPage(buffer); spgxlogVacuumLeaf xlrec; - OffsetNumber toDead[MaxIndexTuplesPerPage]; - OffsetNumber toPlaceholder[MaxIndexTuplesPerPage]; - OffsetNumber moveSrc[MaxIndexTuplesPerPage]; - OffsetNumber moveDest[MaxIndexTuplesPerPage]; - OffsetNumber chainSrc[MaxIndexTuplesPerPage]; - OffsetNumber chainDest[MaxIndexTuplesPerPage]; - OffsetNumber predecessor[MaxIndexTuplesPerPage + 1]; - bool deletable[MaxIndexTuplesPerPage + 1]; + OffsetNumber toDead[MaxIndexTuplesPerPageLimit]; + OffsetNumber toPlaceholder[MaxIndexTuplesPerPageLimit]; + OffsetNumber moveSrc[MaxIndexTuplesPerPageLimit]; + OffsetNumber moveDest[MaxIndexTuplesPerPageLimit]; + OffsetNumber chainSrc[MaxIndexTuplesPerPageLimit]; + OffsetNumber chainDest[MaxIndexTuplesPerPageLimit]; + OffsetNumber predecessor[MaxIndexTuplesPerPageLimit + 1]; + bool deletable[MaxIndexTuplesPerPageLimit + 1]; int nDeletable; OffsetNumber i, max = PageGetMaxOffsetNumber(page); @@ -408,7 +408,7 @@ vacuumLeafRoot(spgBulkDeleteState *bds, Relation index, Buffer buffer) { Page page = BufferGetPage(buffer); spgxlogVacuumRoot xlrec; - OffsetNumber toDelete[MaxIndexTuplesPerPage]; + OffsetNumber toDelete[MaxIndexTuplesPerPageLimit]; OffsetNumber i, max = PageGetMaxOffsetNumber(page); @@ -498,8 +498,8 @@ vacuumRedirectAndPlaceholder(Relation index, Buffer buffer) firstPlaceholder = InvalidOffsetNumber; bool hasNonPlaceholder = false; bool hasUpdate = false; - OffsetNumber itemToPlaceholder[MaxIndexTuplesPerPage]; - OffsetNumber itemnos[MaxIndexTuplesPerPage]; + OffsetNumber itemToPlaceholder[MaxIndexTuplesPerPageLimit]; + OffsetNumber itemnos[MaxIndexTuplesPerPageLimit]; spgxlogVacuumRedirect xlrec; GlobalVisState *vistest; diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 218aad275b..3a3e5f9282 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -4685,7 +4685,7 @@ WriteControlFile(void) ControlFile->nameDataLen = NAMEDATALEN; ControlFile->indexMaxKeys = INDEX_MAX_KEYS; - ControlFile->toast_max_chunk_size = TOAST_MAX_CHUNK_SIZE; + ControlFile->toast_max_chunk_size = TOAST_MAX_CHUNK_SIZE(tde_nonce_size); ControlFile->loblksize = LOBLKSIZE; ControlFile->float8ByVal = FLOAT8PASSBYVAL; @@ -4879,12 +4879,12 @@ ReadControlFile(void) " but the server was compiled with INDEX_MAX_KEYS %d.", ControlFile->indexMaxKeys, INDEX_MAX_KEYS), errhint("It looks like you need to recompile or initdb."))); - if (ControlFile->toast_max_chunk_size != TOAST_MAX_CHUNK_SIZE) + if (ControlFile->toast_max_chunk_size != TOAST_MAX_CHUNK_SIZE(tde_nonce_size)) ereport(FATAL, (errmsg("database files are incompatible with server"), errdetail("The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d," " but the server was compiled with TOAST_MAX_CHUNK_SIZE %d.", - ControlFile->toast_max_chunk_size, (int) TOAST_MAX_CHUNK_SIZE), + ControlFile->toast_max_chunk_size, (int) TOAST_MAX_CHUNK_SIZE(tde_nonce_size)), errhint("It looks like you need to recompile or initdb."))); if (ControlFile->loblksize != LOBLKSIZE) ereport(FATAL, diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c index c5feacbff4..5086c149ac 100644 --- a/src/backend/nodes/tidbitmap.c +++ b/src/backend/nodes/tidbitmap.c @@ -53,7 +53,8 @@ * the per-page bitmaps variable size. We just legislate that the size * is this: */ -#define MAX_TUPLES_PER_PAGE MaxHeapTuplesPerPage +#define MAX_TUPLES_PER_PAGE_LIMIT MaxHeapTuplesPerPageLimit +#define MAX_TUPLES_PER_PAGE(tdeNonceSize) MaxHeapTuplesPerPage(tdeNonceSize) /* * When we have to switch over to lossy storage, we use a data structure @@ -78,7 +79,9 @@ #define BITNUM(x) ((x) % BITS_PER_BITMAPWORD) /* number of active words for an exact page: */ -#define WORDS_PER_PAGE ((MAX_TUPLES_PER_PAGE - 1) / BITS_PER_BITMAPWORD + 1) +#define WORDS_PER_PAGE_LIMIT ((MAX_TUPLES_PER_PAGE_LIMIT - 1) / BITS_PER_BITMAPWORD + 1) +#define WORDS_PER_PAGE(tdeNonceSize) ((MAX_TUPLES_PER_PAGE(tdeNonceSize) - 1) / BITS_PER_BITMAPWORD + 1) + /* number of active words for a lossy chunk: */ #define WORDS_PER_CHUNK ((PAGES_PER_CHUNK - 1) / BITS_PER_BITMAPWORD + 1) @@ -102,7 +105,7 @@ typedef struct PagetableEntry char status; /* hash entry status */ bool ischunk; /* T = lossy storage, F = exact */ bool recheck; /* should the tuples be rechecked? */ - bitmapword words[Max(WORDS_PER_PAGE, WORDS_PER_CHUNK)]; + bitmapword words[Max(WORDS_PER_PAGE_LIMIT, WORDS_PER_CHUNK)]; } PagetableEntry; /* @@ -389,7 +392,7 @@ tbm_add_tuples(TIDBitmap *tbm, const ItemPointer tids, int ntids, bitnum; /* safety check to ensure we don't overrun bit array bounds */ - if (off < 1 || off > MAX_TUPLES_PER_PAGE) + if (off < 1 || off > MAX_TUPLES_PER_PAGE(tde_nonce_size)) elog(ERROR, "tuple offset out of range: %u", off); /* @@ -520,7 +523,7 @@ tbm_union_page(TIDBitmap *a, const PagetableEntry *bpage) else { /* Both pages are exact, merge at the bit level */ - for (wordnum = 0; wordnum < WORDS_PER_PAGE; wordnum++) + for (wordnum = 0; wordnum < WORDS_PER_PAGE(tde_nonce_size); wordnum++) apage->words[wordnum] |= bpage->words[wordnum]; apage->recheck |= bpage->recheck; } @@ -649,7 +652,7 @@ tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b) { /* Both pages are exact, merge at the bit level */ Assert(!bpage->ischunk); - for (wordnum = 0; wordnum < WORDS_PER_PAGE; wordnum++) + for (wordnum = 0; wordnum < WORDS_PER_PAGE(tde_nonce_size); wordnum++) { apage->words[wordnum] &= bpage->words[wordnum]; if (apage->words[wordnum] != 0) @@ -696,7 +699,8 @@ tbm_begin_iterate(TIDBitmap *tbm) * needs of the TBMIterateResult sub-struct. */ iterator = (TBMIterator *) palloc(sizeof(TBMIterator) + - MAX_TUPLES_PER_PAGE * sizeof(OffsetNumber)); + MAX_TUPLES_PER_PAGE(tde_nonce_size) * + sizeof(OffsetNumber)); iterator->tbm = tbm; /* @@ -912,7 +916,7 @@ tbm_extract_page_tuple(PagetableEntry *page, TBMIterateResult *output) int wordnum; int ntuples = 0; - for (wordnum = 0; wordnum < WORDS_PER_PAGE; wordnum++) + for (wordnum = 0; wordnum < WORDS_PER_PAGE(tde_nonce_size); wordnum++) { bitmapword w = page->words[wordnum]; @@ -1471,7 +1475,8 @@ tbm_attach_shared_iterate(dsa_area *dsa, dsa_pointer dp) * serve the needs of the TBMIterateResult sub-struct. */ iterator = (TBMSharedIterator *) palloc0(sizeof(TBMSharedIterator) + - MAX_TUPLES_PER_PAGE * sizeof(OffsetNumber)); + MAX_TUPLES_PER_PAGE(tde_nonce_size) * + sizeof(OffsetNumber)); istate = (TBMSharedIteratorState *) dsa_get_address(dsa, dp); diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index b0ab91cc71..54a8123b30 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -4711,7 +4711,7 @@ ReorderBufferToastReplace(ReorderBuffer *rb, ReorderBufferTXN *txn, * the tuplebuf because attrs[] will point back into the current content. */ tmphtup = heap_form_tuple(desc, attrs, isnull); - Assert(newtup->tuple.t_len <= MaxHeapTupleSize); + Assert(newtup->tuple.t_len <= MaxHeapTupleSize(tde_nonce_size)); Assert(ReorderBufferTupleBufData(newtup) == newtup->tuple.t_data); memcpy(newtup->tuple.t_data, tmphtup->t_data, tmphtup->t_len); diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c index 8c12dda238..d3d61baec3 100644 --- a/src/backend/storage/freespace/freespace.c +++ b/src/backend/storage/freespace/freespace.c @@ -62,7 +62,7 @@ */ #define FSM_CATEGORIES 256 #define FSM_CAT_STEP (BLCKSZ / FSM_CATEGORIES) -#define MaxFSMRequestSize MaxHeapTupleSize +#define MaxFSMRequestSize(tdeNonceSize) MaxHeapTupleSize(tdeNonceSize) /* * Depth of the on-disk tree. We need to be able to address 2^32-1 blocks, @@ -71,9 +71,9 @@ * this means that 4096 bytes is the smallest BLCKSZ that we can get away * with a 3-level tree, and 512 is the smallest we support. */ -#define FSM_TREE_DEPTH ((SlotsPerFSMPage >= 1626) ? 3 : 4) +#define FSM_TREE_DEPTH(tdeNonceSize) ((SlotsPerFSMPage(tdeNonceSize) >= 1626) ? 3 : 4) -#define FSM_ROOT_LEVEL (FSM_TREE_DEPTH - 1) +#define FSM_ROOT_LEVEL(tdeNonceSize) (FSM_TREE_DEPTH(tdeNonceSize) - 1) #define FSM_BOTTOM_LEVEL 0 /* @@ -87,7 +87,7 @@ typedef struct } FSMAddress; /* Address of the root page. */ -static const FSMAddress FSM_ROOT_ADDRESS = {FSM_ROOT_LEVEL, 0}; +static FSMAddress FSM_ROOT_ADDRESS = {0, 0}; /* functions to navigate the tree */ static FSMAddress fsm_get_child(FSMAddress parent, uint16 slot); @@ -336,6 +336,8 @@ FreeSpaceMapVacuum(Relation rel) { bool dummy; + FSM_ROOT_ADDRESS.level = FSM_ROOT_LEVEL(tde_nonce_size); + /* Recursively scan the tree, starting at the root */ (void) fsm_vacuum_page(rel, FSM_ROOT_ADDRESS, (BlockNumber) 0, InvalidBlockNumber, @@ -357,7 +359,11 @@ FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, BlockNumber end) /* Recursively scan the tree, starting at the root */ if (end > start) + { + FSM_ROOT_ADDRESS.level = FSM_ROOT_LEVEL(tde_nonce_size); + (void) fsm_vacuum_page(rel, FSM_ROOT_ADDRESS, start, end, &dummy); + } } /******** Internal routines ********/ @@ -372,7 +378,7 @@ fsm_space_avail_to_cat(Size avail) Assert(avail < BLCKSZ); - if (avail >= MaxFSMRequestSize) + if (avail >= MaxFSMRequestSize(tde_nonce_size)) return 255; cat = avail / FSM_CAT_STEP; @@ -396,7 +402,7 @@ fsm_space_cat_to_avail(uint8 cat) { /* The highest category represents exactly MaxFSMRequestSize bytes. */ if (cat == 255) - return MaxFSMRequestSize; + return MaxFSMRequestSize(tde_nonce_size); else return cat * FSM_CAT_STEP; } @@ -411,7 +417,7 @@ fsm_space_needed_to_cat(Size needed) int cat; /* Can't ask for more space than the highest category represents */ - if (needed > MaxFSMRequestSize) + if (needed > MaxFSMRequestSize(tde_nonce_size)) elog(ERROR, "invalid FSM request size %zu", needed); if (needed == 0) @@ -441,14 +447,14 @@ fsm_logical_to_physical(FSMAddress addr) */ leafno = addr.logpageno; for (l = 0; l < addr.level; l++) - leafno *= SlotsPerFSMPage; + leafno *= SlotsPerFSMPage(tde_nonce_size); /* Count upper level nodes required to address the leaf page */ pages = 0; - for (l = 0; l < FSM_TREE_DEPTH; l++) + for (l = 0; l < FSM_TREE_DEPTH(tde_nonce_size); l++) { pages += leafno + 1; - leafno /= SlotsPerFSMPage; + leafno /= SlotsPerFSMPage(tde_nonce_size); } /* @@ -470,8 +476,8 @@ fsm_get_location(BlockNumber heapblk, uint16 *slot) FSMAddress addr; addr.level = FSM_BOTTOM_LEVEL; - addr.logpageno = heapblk / SlotsPerFSMPage; - *slot = heapblk % SlotsPerFSMPage; + addr.logpageno = heapblk / SlotsPerFSMPage(tde_nonce_size); + *slot = heapblk % SlotsPerFSMPage(tde_nonce_size); return addr; } @@ -483,7 +489,7 @@ static BlockNumber fsm_get_heap_blk(FSMAddress addr, uint16 slot) { Assert(addr.level == FSM_BOTTOM_LEVEL); - return ((unsigned int) addr.logpageno) * SlotsPerFSMPage + slot; + return ((unsigned int) addr.logpageno) * SlotsPerFSMPage(tde_nonce_size) + slot; } /* @@ -495,11 +501,11 @@ fsm_get_parent(FSMAddress child, uint16 *slot) { FSMAddress parent; - Assert(child.level < FSM_ROOT_LEVEL); + Assert(child.level < FSM_ROOT_LEVEL(tde_nonce_size)); parent.level = child.level + 1; - parent.logpageno = child.logpageno / SlotsPerFSMPage; - *slot = child.logpageno % SlotsPerFSMPage; + parent.logpageno = child.logpageno / SlotsPerFSMPage(tde_nonce_size); + *slot = child.logpageno % SlotsPerFSMPage(tde_nonce_size); return parent; } @@ -516,7 +522,7 @@ fsm_get_child(FSMAddress parent, uint16 slot) Assert(parent.level > FSM_BOTTOM_LEVEL); child.level = parent.level - 1; - child.logpageno = parent.logpageno * SlotsPerFSMPage + slot; + child.logpageno = parent.logpageno * SlotsPerFSMPage(tde_nonce_size) + slot; return child; } @@ -690,7 +696,10 @@ static BlockNumber fsm_search(Relation rel, uint8 min_cat) { int restarts = 0; - FSMAddress addr = FSM_ROOT_ADDRESS; + FSMAddress addr; + + FSM_ROOT_ADDRESS.level = FSM_ROOT_LEVEL(tde_nonce_size); + addr = FSM_ROOT_ADDRESS; for (;;) { @@ -726,7 +735,7 @@ fsm_search(Relation rel, uint8 min_cat) addr = fsm_get_child(addr, slot); } - else if (addr.level == FSM_ROOT_LEVEL) + else if (addr.level == FSM_ROOT_LEVEL(tde_nonce_size)) { /* * At the root, failure means there's no page with enough free @@ -840,14 +849,14 @@ fsm_vacuum_page(Relation rel, FSMAddress addr, if (fsm_start.logpageno == addr.logpageno) start_slot = fsm_start_slot; else if (fsm_start.logpageno > addr.logpageno) - start_slot = SlotsPerFSMPage; /* shouldn't get here... */ + start_slot = SlotsPerFSMPage(tde_nonce_size); /* shouldn't get here... */ else start_slot = 0; if (fsm_end.logpageno == addr.logpageno) end_slot = fsm_end_slot; else if (fsm_end.logpageno > addr.logpageno) - end_slot = SlotsPerFSMPage - 1; + end_slot = SlotsPerFSMPage(tde_nonce_size) - 1; else end_slot = -1; /* shouldn't get here... */ diff --git a/src/backend/storage/freespace/fsmpage.c b/src/backend/storage/freespace/fsmpage.c index 88ae51e526..c1b5315f3b 100644 --- a/src/backend/storage/freespace/fsmpage.c +++ b/src/backend/storage/freespace/fsmpage.c @@ -22,6 +22,7 @@ */ #include "postgres.h" +#include "miscadmin.h" /* for tde_nonce_size */ #include "storage/bufmgr.h" #include "storage/fsm_internals.h" @@ -66,7 +67,7 @@ fsm_set_avail(Page page, int slot, uint8 value) FSMPage fsmpage = (FSMPage) PageGetContents(page); uint8 oldvalue; - Assert(slot < LeafNodesPerPage); + Assert(slot < LeafNodesPerPage(tde_nonce_size)); oldvalue = fsmpage->fp_nodes[nodeno]; @@ -91,7 +92,7 @@ fsm_set_avail(Page page, int slot, uint8 value) rchild = lchild + 1; newvalue = fsmpage->fp_nodes[lchild]; - if (rchild < NodesPerPage) + if (rchild < NodesPerPage(tde_nonce_size)) newvalue = Max(newvalue, fsmpage->fp_nodes[rchild]); @@ -123,7 +124,7 @@ fsm_get_avail(Page page, int slot) { FSMPage fsmpage = (FSMPage) PageGetContents(page); - Assert(slot < LeafNodesPerPage); + Assert(slot < LeafNodesPerPage(tde_nonce_size)); return fsmpage->fp_nodes[NonLeafNodesPerPage + slot]; } @@ -179,7 +180,7 @@ restart: * the last slot on the page.) */ target = fsmpage->fp_next_slot; - if (target < 0 || target >= LeafNodesPerPage) + if (target < 0 || target >= LeafNodesPerPage(tde_nonce_size)) target = 0; target += NonLeafNodesPerPage; @@ -246,14 +247,14 @@ restart: { int childnodeno = leftchild(nodeno); - if (childnodeno < NodesPerPage && + if (childnodeno < NodesPerPage(tde_nonce_size) && fsmpage->fp_nodes[childnodeno] >= minvalue) { nodeno = childnodeno; continue; } childnodeno++; /* point to right child */ - if (childnodeno < NodesPerPage && + if (childnodeno < NodesPerPage(tde_nonce_size) && fsmpage->fp_nodes[childnodeno] >= minvalue) { nodeno = childnodeno; @@ -316,11 +317,11 @@ fsm_truncate_avail(Page page, int nslots) uint8 *ptr; bool changed = false; - Assert(nslots >= 0 && nslots < LeafNodesPerPage); + Assert(nslots >= 0 && nslots < LeafNodesPerPage(tde_nonce_size)); /* Clear all truncated leaf nodes */ ptr = &fsmpage->fp_nodes[NonLeafNodesPerPage + nslots]; - for (; ptr < &fsmpage->fp_nodes[NodesPerPage]; ptr++) + for (; ptr < &fsmpage->fp_nodes[NodesPerPage(tde_nonce_size)]; ptr++) { if (*ptr != 0) changed = true; @@ -356,10 +357,10 @@ fsm_rebuild_page(Page page) uint8 newvalue = 0; /* The first few nodes we examine might have zero or one child. */ - if (lchild < NodesPerPage) + if (lchild < NodesPerPage(tde_nonce_size)) newvalue = fsmpage->fp_nodes[lchild]; - if (rchild < NodesPerPage) + if (rchild < NodesPerPage(tde_nonce_size)) newvalue = Max(newvalue, fsmpage->fp_nodes[rchild]); diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c index 82ca91f597..fe52500a5a 100644 --- a/src/backend/storage/page/bufpage.c +++ b/src/backend/storage/page/bufpage.c @@ -43,6 +43,8 @@ PageInit(Page page, Size pageSize, Size specialSize) { PageHeader p = (PageHeader) page; + specialSize = specialSize + tde_nonce_size; + specialSize = MAXALIGN(specialSize); Assert(pageSize == BLCKSZ); @@ -295,7 +297,7 @@ PageAddItemExtended(Page page, } /* Reject placing items beyond heap boundary, if heap */ - if ((flags & PAI_IS_HEAP) != 0 && offsetNumber > MaxHeapTuplesPerPage) + if ((flags & PAI_IS_HEAP) != 0 && offsetNumber > MaxHeapTuplesPerPage(tde_nonce_size)) { elog(WARNING, "can't put more than MaxHeapTuplesPerPage items in a heap page"); return InvalidOffsetNumber; @@ -403,11 +405,22 @@ PageGetTempPageCopySpecial(Page page) { Size pageSize; Page temp; + int prev_tde_nonce_size; pageSize = PageGetPageSize(page); temp = (Page) palloc(pageSize); + /* + * TDE nonce bytes are copied to temporary page since the source page + * special space is passed to PageInit. + */ + prev_tde_nonce_size = tde_nonce_size; + tde_nonce_size = 0; + PageInit(temp, pageSize, PageGetSpecialSize(page)); + + tde_nonce_size = prev_tde_nonce_size; + memcpy(PageGetSpecialPointer(temp), PageGetSpecialPointer(page), PageGetSpecialSize(page)); @@ -712,7 +725,7 @@ PageRepairFragmentation(Page page) Offset pd_upper = ((PageHeader) page)->pd_upper; Offset pd_special = ((PageHeader) page)->pd_special; Offset last_offset; - itemIdCompactData itemidbase[MaxHeapTuplesPerPage]; + itemIdCompactData itemidbase[MaxHeapTuplesPerPageLimit]; itemIdCompact itemidptr; ItemId lp; int nline, @@ -995,7 +1008,7 @@ PageGetHeapFreeSpace(Page page) * Are there already MaxHeapTuplesPerPage line pointers in the page? */ nline = PageGetMaxOffsetNumber(page); - if (nline >= MaxHeapTuplesPerPage) + if (nline >= MaxHeapTuplesPerPage(tde_nonce_size)) { if (PageHasFreeLinePointers((PageHeader) page)) { @@ -1158,8 +1171,8 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems) Offset pd_upper = phdr->pd_upper; Offset pd_special = phdr->pd_special; Offset last_offset; - itemIdCompactData itemidbase[MaxIndexTuplesPerPage]; - ItemIdData newitemids[MaxIndexTuplesPerPage]; + itemIdCompactData itemidbase[MaxIndexTuplesPerPageLimit]; + ItemIdData newitemids[MaxIndexTuplesPerPageLimit]; itemIdCompact itemidptr; ItemId lp; int nline, @@ -1171,7 +1184,7 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems) OffsetNumber offnum; bool presorted = true; /* For now */ - Assert(nitems <= MaxIndexTuplesPerPage); + Assert(nitems <= MaxIndexTuplesPerPage(tde_nonce_size)); /* * If there aren't very many items to delete, then retail diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 37ddda7724..e1c277429d 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -7739,7 +7739,7 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, BRIN_DEFAULT_PAGES_PER_RANGE), 1.0); statsData.pagesPerRange = BRIN_DEFAULT_PAGES_PER_RANGE; - statsData.revmapNumPages = (indexRanges / REVMAP_PAGE_MAXITEMS) + 1; + statsData.revmapNumPages = (indexRanges / REVMAP_PAGE_MAXITEMS(tde_nonce_size)) + 1; } /* diff --git a/src/backend/utils/adt/tsgistidx.c b/src/backend/utils/adt/tsgistidx.c index c09eefdda2..3d5dfbf442 100644 --- a/src/backend/utils/adt/tsgistidx.c +++ b/src/backend/utils/adt/tsgistidx.c @@ -32,7 +32,7 @@ typedef struct } GistTsVectorOptions; #define SIGLEN_DEFAULT (31 * 4) -#define SIGLEN_MAX GISTMaxIndexKeySize +#define SIGLEN_MAX GISTMaxIndexKeySize(tde_nonce_size) #define GET_SIGLEN() (PG_HAS_OPCLASS_OPTIONS() ? \ ((GistTsVectorOptions *) PG_GET_OPCLASS_OPTIONS())->siglen : \ SIGLEN_DEFAULT) @@ -207,7 +207,7 @@ gtsvector_compress(PG_FUNCTION_ARGS) } /* make signature, if array is too long */ - if (VARSIZE(res) > TOAST_INDEX_TARGET) + if (VARSIZE(res) > TOAST_INDEX_TARGET(tde_nonce_size)) { SignTSVector *ressign = gtsvector_alloc(SIGNKEY, siglen, NULL); diff --git a/src/bin/pg_resetwal/pg_resetwal.c b/src/bin/pg_resetwal/pg_resetwal.c index 805dafef07..0be0d53e17 100644 --- a/src/bin/pg_resetwal/pg_resetwal.c +++ b/src/bin/pg_resetwal/pg_resetwal.c @@ -56,6 +56,7 @@ #include "common/restricted_token.h" #include "common/string.h" #include "getopt_long.h" +#include "miscadmin.h" /* for tde_nonce_size */ #include "pg_getopt.h" #include "storage/large_object.h" @@ -720,9 +721,10 @@ GuessControlValues(void) ControlFile.xlog_seg_size = DEFAULT_XLOG_SEG_SIZE; ControlFile.nameDataLen = NAMEDATALEN; ControlFile.indexMaxKeys = INDEX_MAX_KEYS; - ControlFile.toast_max_chunk_size = TOAST_MAX_CHUNK_SIZE; + ControlFile.toast_max_chunk_size = TOAST_MAX_CHUNK_SIZE(0); ControlFile.loblksize = LOBLKSIZE; ControlFile.float8ByVal = FLOAT8PASSBYVAL; + ControlFile.tde_nonce_size = 0; /* * XXX eventually, should try to grovel through old XLOG to develop more diff --git a/src/include/access/brin_page.h b/src/include/access/brin_page.h index 75de538ed8..8a877ddc83 100644 --- a/src/include/access/brin_page.h +++ b/src/include/access/brin_page.h @@ -85,12 +85,12 @@ typedef struct RevmapContents ItemPointerData rm_tids[1]; } RevmapContents; -#define REVMAP_CONTENT_SIZE \ +#define REVMAP_CONTENT_SIZE(tdeNonceSize) \ (BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - \ offsetof(RevmapContents, rm_tids) - \ - MAXALIGN(sizeof(BrinSpecialSpace))) + MAXALIGN(sizeof(BrinSpecialSpace) + tdeNonceSize)) /* max num of items in the array */ -#define REVMAP_PAGE_MAXITEMS \ - (REVMAP_CONTENT_SIZE / sizeof(ItemPointerData)) +#define REVMAP_PAGE_MAXITEMS(tdeNonceSize) \ + (REVMAP_CONTENT_SIZE(tdeNonceSize) / sizeof(ItemPointerData)) #endif /* BRIN_PAGE_H */ diff --git a/src/include/access/ginblock.h b/src/include/access/ginblock.h index 37d650ac2a..b0fca2e9ed 100644 --- a/src/include/access/ginblock.h +++ b/src/include/access/ginblock.h @@ -11,6 +11,7 @@ #define GINBLOCK_H #include "access/transam.h" +#include "miscadmin.h" /* for tde_nonce_size */ #include "storage/block.h" #include "storage/bufpage.h" #include "storage/itemptr.h" @@ -246,11 +247,11 @@ typedef signed char GinNullCategory; * currently store the high key explicitly, we just use the rightmost item on * the page, so it would actually be enough to fit two items.) */ -#define GinMaxItemSize \ +#define GinMaxItemSize(tdeNonceSize) \ Min(INDEX_SIZE_MASK, \ MAXALIGN_DOWN(((BLCKSZ - \ MAXALIGN(SizeOfPageHeaderData + 3 * sizeof(ItemIdData)) - \ - MAXALIGN(sizeof(GinPageOpaqueData))) / 3))) + MAXALIGN(sizeof(GinPageOpaqueData) + tdeNonceSize)) / 3))) /* * Access macros for non-leaf entry tuples @@ -307,26 +308,27 @@ typedef signed char GinNullCategory; * pages are new in 9.4, however, so we can trust them; see * GinDataLeafPageGetPostingListSize. */ -#define GinDataPageSetDataSize(page, size) \ +#define GinDataPageSetDataSize(page, size, tdeNonceSize) \ { \ - Assert(size <= GinDataPageMaxDataSize); \ + Assert(size <= GinDataPageMaxDataSize(tdeNonceSize)); \ ((PageHeader) page)->pd_lower = (size) + MAXALIGN(SizeOfPageHeaderData) + MAXALIGN(sizeof(ItemPointerData)); \ } -#define GinNonLeafDataPageGetFreeSpace(page) \ - (GinDataPageMaxDataSize - \ +#define GinNonLeafDataPageGetFreeSpace(page, tdeNonceSize) \ + (GinDataPageMaxDataSize(tdeNonceSize) - \ GinPageGetOpaque(page)->maxoff * sizeof(PostingItem)) -#define GinDataPageMaxDataSize \ +#define GinDataPageMaxDataSize(tdeNonceSize) \ (BLCKSZ - MAXALIGN(SizeOfPageHeaderData) \ - MAXALIGN(sizeof(ItemPointerData)) \ - - MAXALIGN(sizeof(GinPageOpaqueData))) + - MAXALIGN(sizeof(GinPageOpaqueData) + tdeNonceSize)) /* * List pages */ -#define GinListPageSize \ - ( BLCKSZ - SizeOfPageHeaderData - MAXALIGN(sizeof(GinPageOpaqueData)) ) +#define GinListPageSize(tdeNonceSize) \ + ( BLCKSZ - SizeOfPageHeaderData - \ + MAXALIGN(sizeof(GinPageOpaqueData) + tdeNonceSize) ) /* * A compressed posting list. diff --git a/src/include/access/gist.h b/src/include/access/gist.h index 4b06575d98..30af09234f 100644 --- a/src/include/access/gist.h +++ b/src/include/access/gist.h @@ -95,12 +95,13 @@ typedef GISTPageOpaqueData *GISTPageOpaque; * makes sense at all. For multicolumn indexes, user might be able to tune * key size using opclass parameters. */ -#define GISTMaxIndexTupleSize \ - MAXALIGN_DOWN((BLCKSZ - SizeOfPageHeaderData - sizeof(GISTPageOpaqueData)) / \ +#define GISTMaxIndexTupleSize(tdeNonceSize) \ + MAXALIGN_DOWN((BLCKSZ - SizeOfPageHeaderData - \ + sizeof(GISTPageOpaqueData) - tdeNonceSize) / \ 4 - sizeof(ItemIdData)) -#define GISTMaxIndexKeySize \ - (GISTMaxIndexTupleSize - MAXALIGN(sizeof(IndexTupleData))) +#define GISTMaxIndexKeySize(tdeNonceSize) \ + (GISTMaxIndexTupleSize(tdeNonceSize) - MAXALIGN(sizeof(IndexTupleData))) /* * The page ID is for the convenience of pg_filedump and similar utilities, diff --git a/src/include/access/gist_private.h b/src/include/access/gist_private.h index 553d364e2d..b85cc8d1ef 100644 --- a/src/include/access/gist_private.h +++ b/src/include/access/gist_private.h @@ -18,6 +18,7 @@ #include "access/gist.h" #include "access/itup.h" #include "lib/pairingheap.h" +#include "miscadmin.h" /* for tde_nonce_size */ #include "storage/bufmgr.h" #include "storage/buffile.h" #include "utils/hsearch.h" @@ -472,8 +473,9 @@ extern void gistadjustmembers(Oid opfamilyoid, /* gistutil.c */ -#define GiSTPageSize \ - ( BLCKSZ - SizeOfPageHeaderData - MAXALIGN(sizeof(GISTPageOpaqueData)) ) +#define GiSTPageSize(tdeNonceSize) \ + ( BLCKSZ - SizeOfPageHeaderData - \ + MAXALIGN(sizeof(GISTPageOpaqueData) + tdeNonceSize) ) #define GIST_MIN_FILLFACTOR 10 #define GIST_DEFAULT_FILLFACTOR 90 diff --git a/src/include/access/hash.h b/src/include/access/hash.h index 1cce865be2..9dbc9d16c4 100644 --- a/src/include/access/hash.h +++ b/src/include/access/hash.h @@ -122,7 +122,7 @@ typedef struct HashScanPosData int lastItem; /* last valid index in items[] */ int itemIndex; /* current index in items[] */ - HashScanPosItem items[MaxIndexTuplesPerPage]; /* MUST BE LAST */ + HashScanPosItem items[MaxIndexTuplesPerPageLimit]; /* MUST BE LAST */ } HashScanPosData; #define HashScanPosIsPinned(scanpos) \ @@ -282,11 +282,11 @@ typedef struct HashOptions /* * Maximum size of a hash index item (it's okay to have only one per page) */ -#define HashMaxItemSize(page) \ +#define HashMaxItemSize(page, tdeNonceSize) \ MAXALIGN_DOWN(PageGetPageSize(page) - \ SizeOfPageHeaderData - \ sizeof(ItemIdData) - \ - MAXALIGN(sizeof(HashPageOpaqueData))) + MAXALIGN(sizeof(HashPageOpaqueData) + tdeNonceSize)) #define INDEX_MOVED_BY_SPLIT_MASK INDEX_AM_RESERVED_BIT @@ -314,9 +314,10 @@ typedef struct HashOptions #define HashPageGetBitmap(page) \ ((uint32 *) PageGetContents(page)) -#define HashGetMaxBitmapSize(page) \ +#define HashGetMaxBitmapSize(page, tdeNonceSize) \ (PageGetPageSize((Page) page) - \ - (MAXALIGN(SizeOfPageHeaderData) + MAXALIGN(sizeof(HashPageOpaqueData)))) + (MAXALIGN(SizeOfPageHeaderData) + \ + MAXALIGN(sizeof(HashPageOpaqueData) + tdeNonceSize))) #define HashPageGetMeta(page) \ ((HashMetaPage) PageGetContents(page)) diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index e63b49fc38..835e6df5da 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -74,7 +74,7 @@ typedef struct HeapScanDescData /* these fields only used in page-at-a-time mode and for bitmap scans */ int rs_cindex; /* current tuple's index in vistuples */ int rs_ntuples; /* number of visible tuples on page */ - OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]; /* their offsets */ + OffsetNumber rs_vistuples[MaxHeapTuplesPerPageLimit]; /* their offsets */ } HeapScanDescData; typedef struct HeapScanDescData *HeapScanDesc; diff --git a/src/include/access/heaptoast.h b/src/include/access/heaptoast.h index 8b29f1a986..186b8da682 100644 --- a/src/include/access/heaptoast.h +++ b/src/include/access/heaptoast.h @@ -14,17 +14,24 @@ #define HEAPTOAST_H #include "access/htup_details.h" +#include "miscadmin.h" /* for tde_nonce_size */ #include "storage/lockdefs.h" #include "utils/relcache.h" /* * Find the maximum size of a tuple if there are to be N tuples per page. */ -#define MaximumBytesPerTuple(tuplesPerPage) \ +#define MaximumBytesPerTupleLimit(tuplesPerPage) \ MAXALIGN_DOWN((BLCKSZ - \ MAXALIGN(SizeOfPageHeaderData + (tuplesPerPage) * sizeof(ItemIdData))) \ / (tuplesPerPage)) +#define MaximumBytesPerTuple(tuplesPerPage, tdeNonceSize) \ + MAXALIGN_DOWN((BLCKSZ - \ + MAXALIGN(SizeOfPageHeaderData + (tuplesPerPage) * sizeof(ItemIdData)) - \ + MAXALIGN(tdeNonceSize)) \ + / (tuplesPerPage)) + /* * These symbols control toaster activation. If a tuple is larger than * TOAST_TUPLE_THRESHOLD, we will try to toast it down to no more than @@ -45,9 +52,14 @@ */ #define TOAST_TUPLES_PER_PAGE 4 -#define TOAST_TUPLE_THRESHOLD MaximumBytesPerTuple(TOAST_TUPLES_PER_PAGE) +#define TOAST_TUPLE_THRESHOLD(tdeNonceSize) MaximumBytesPerTuple(TOAST_TUPLES_PER_PAGE, tdeNonceSize) + +#define TOAST_TUPLE_THRESHOLD_MAX_LIMIT MaximumBytesPerTupleLimit(TOAST_TUPLES_PER_PAGE) + +#define TOAST_TUPLE_TARGET_MAX_LIMIT TOAST_TUPLE_THRESHOLD_MAX_LIMIT + +#define TOAST_TUPLE_TARGET(tdeNonceSize) TOAST_TUPLE_THRESHOLD(tdeNonceSize) -#define TOAST_TUPLE_TARGET TOAST_TUPLE_THRESHOLD /* * The code will also consider moving MAIN data out-of-line, but only as a @@ -58,14 +70,16 @@ */ #define TOAST_TUPLES_PER_PAGE_MAIN 1 -#define TOAST_TUPLE_TARGET_MAIN MaximumBytesPerTuple(TOAST_TUPLES_PER_PAGE_MAIN) +#define TOAST_TUPLE_TARGET_MAIN_MAX_LIMIT MaximumBytesPerTupleLimit(TOAST_TUPLES_PER_PAGE_MAIN) + +#define TOAST_TUPLE_TARGET_MAIN(tdeNonceSize) MaximumBytesPerTuple(TOAST_TUPLES_PER_PAGE_MAIN, tdeNonceSize) /* * If an index value is larger than TOAST_INDEX_TARGET, we will try to * compress it (we can't move it out-of-line, however). Note that this * number is per-datum, not per-tuple, for simplicity in index_form_tuple(). */ -#define TOAST_INDEX_TARGET (MaxHeapTupleSize / 16) +#define TOAST_INDEX_TARGET(tdeNonceSize) (MaxHeapTupleSize(tdeNonceSize) / 16) /* * When we store an oversize datum externally, we divide it into chunks @@ -79,10 +93,19 @@ */ #define EXTERN_TUPLES_PER_PAGE 4 /* tweak only this */ -#define EXTERN_TUPLE_MAX_SIZE MaximumBytesPerTuple(EXTERN_TUPLES_PER_PAGE) +#define EXTERN_TUPLE_MAX_SIZE(tdeNonceSize) MaximumBytesPerTuple(EXTERN_TUPLES_PER_PAGE, tdeNonceSize) + +#define EXTERN_TUPLE_MAX_SIZE_LIMIT MaximumBytesPerTupleLimit(EXTERN_TUPLES_PER_PAGE) + +#define TOAST_MAX_CHUNK_SIZE_LIMIT \ + (EXTERN_TUPLE_MAX_SIZE_LIMIT - \ + MAXALIGN(SizeofHeapTupleHeader) - \ + sizeof(Oid) - \ + sizeof(int32) - \ + VARHDRSZ) -#define TOAST_MAX_CHUNK_SIZE \ - (EXTERN_TUPLE_MAX_SIZE - \ +#define TOAST_MAX_CHUNK_SIZE(tdeNonceSize) \ + (EXTERN_TUPLE_MAX_SIZE(tdeNonceSize) - \ MAXALIGN(SizeofHeapTupleHeader) - \ sizeof(Oid) - \ sizeof(int32) - \ diff --git a/src/include/access/htup_details.h b/src/include/access/htup_details.h index 960772f76b..5a800fe971 100644 --- a/src/include/access/htup_details.h +++ b/src/include/access/htup_details.h @@ -18,6 +18,7 @@ #include "access/transam.h" #include "access/tupdesc.h" #include "access/tupmacs.h" +#include "miscadmin.h" /* for tde_nonce_size */ #include "storage/bufpage.h" /* @@ -556,7 +557,10 @@ do { \ * ItemIds and tuples have different alignment requirements, don't assume that * you can, say, fit 2 tuples of size MaxHeapTupleSize/2 on the same page. */ -#define MaxHeapTupleSize (BLCKSZ - MAXALIGN(SizeOfPageHeaderData + sizeof(ItemIdData))) +#define MaxHeapTupleSizeLimit (BLCKSZ - MAXALIGN(SizeOfPageHeaderData + \ + sizeof(ItemIdData))) +#define MaxHeapTupleSize(tdeNonceSize) (BLCKSZ - MAXALIGN(SizeOfPageHeaderData + \ + sizeof(ItemIdData)) - MAXALIGN(tdeNonceSize)) #define MinHeapTupleSize MAXALIGN(SizeofHeapTupleHeader) /* @@ -570,10 +574,14 @@ do { \ * pointers to this anyway, to avoid excessive line-pointer bloat and not * require increases in the size of work arrays. */ -#define MaxHeapTuplesPerPage \ +#define MaxHeapTuplesPerPageLimit \ ((int) ((BLCKSZ - SizeOfPageHeaderData) / \ (MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData)))) +#define MaxHeapTuplesPerPage(tdeNonceSize) \ + ((int) ((BLCKSZ - SizeOfPageHeaderData - MAXALIGN(tdeNonceSize)) / \ + (MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData)))) + /* * MaxAttrSize is a somewhat arbitrary upper limit on the declared size of * data fields of char(n) and similar types. It need not have anything diff --git a/src/include/access/itup.h b/src/include/access/itup.h index 1917375cde..c18544e9a2 100644 --- a/src/include/access/itup.h +++ b/src/include/access/itup.h @@ -142,10 +142,13 @@ typedef IndexAttributeBitMapData * IndexAttributeBitMap; * estimated here, seemingly allowing one more tuple than estimated here. * But such a page always has at least MAXALIGN special space, so we're safe. */ -#define MaxIndexTuplesPerPage \ +#define MaxIndexTuplesPerPageLimit \ ((int) ((BLCKSZ - SizeOfPageHeaderData) / \ (MAXALIGN(sizeof(IndexTupleData) + 1) + sizeof(ItemIdData)))) +#define MaxIndexTuplesPerPage(tdeNonceSize) \ + ((int) ((BLCKSZ - SizeOfPageHeaderData - MAXALIGN(tdeNonceSize)) / \ + (MAXALIGN(sizeof(IndexTupleData) + 1) + sizeof(ItemIdData)))) /* routines in indextuple.c */ extern IndexTuple index_form_tuple(TupleDesc tupleDescriptor, diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h index 42c66fac57..e04c000236 100644 --- a/src/include/access/nbtree.h +++ b/src/include/access/nbtree.h @@ -159,16 +159,16 @@ typedef struct BTMetaPageData * a heap index tuple to make space for a tiebreaker heap TID * attribute, which we account for here. */ -#define BTMaxItemSize(page) \ +#define BTMaxItemSize(page, tdeNonceSize) \ MAXALIGN_DOWN((PageGetPageSize(page) - \ MAXALIGN(SizeOfPageHeaderData + \ 3*sizeof(ItemIdData) + \ 3*sizeof(ItemPointerData)) - \ - MAXALIGN(sizeof(BTPageOpaqueData))) / 3) -#define BTMaxItemSizeNoHeapTid(page) \ + MAXALIGN(sizeof(BTPageOpaqueData) + tdeNonceSize)) / 3) +#define BTMaxItemSizeNoHeapTid(page, tdeNonceSize) \ MAXALIGN_DOWN((PageGetPageSize(page) - \ MAXALIGN(SizeOfPageHeaderData + 3*sizeof(ItemIdData)) - \ - MAXALIGN(sizeof(BTPageOpaqueData))) / 3) + MAXALIGN(sizeof(BTPageOpaqueData) + tdeNonceSize)) / 3) /* * MaxTIDsPerBTreePage is an upper bound on the number of heap TIDs tuples @@ -885,7 +885,7 @@ typedef struct BTDedupStateData * are implicitly unchanged by deduplication pass). */ int nintervals; /* current number of intervals in array */ - BTDedupInterval intervals[MaxIndexTuplesPerPage]; + BTDedupInterval intervals[MaxIndexTuplesPerPageLimit]; } BTDedupStateData; typedef BTDedupStateData *BTDedupState; diff --git a/src/include/access/spgist_private.h b/src/include/access/spgist_private.h index ba3da5b540..481737ac5e 100644 --- a/src/include/access/spgist_private.h +++ b/src/include/access/spgist_private.h @@ -226,14 +226,14 @@ typedef struct SpGistScanOpaqueData TupleDesc reconTupDesc; /* if so, descriptor for reconstructed tuples */ int nPtrs; /* number of TIDs found on current page */ int iPtr; /* index for scanning through same */ - ItemPointerData heapPtrs[MaxIndexTuplesPerPage]; /* TIDs from cur page */ - bool recheck[MaxIndexTuplesPerPage]; /* their recheck flags */ - bool recheckDistances[MaxIndexTuplesPerPage]; /* distance recheck + ItemPointerData heapPtrs[MaxIndexTuplesPerPageLimit]; /* TIDs from cur page */ + bool recheck[MaxIndexTuplesPerPageLimit]; /* their recheck flags */ + bool recheckDistances[MaxIndexTuplesPerPageLimit]; /* distance recheck * flags */ - HeapTuple reconTups[MaxIndexTuplesPerPage]; /* reconstructed tuples */ + HeapTuple reconTups[MaxIndexTuplesPerPageLimit]; /* reconstructed tuples */ /* distances (for recheck) */ - IndexOrderByDistance *distances[MaxIndexTuplesPerPage]; + IndexOrderByDistance *distances[MaxIndexTuplesPerPageLimit]; /* * Note: using MaxIndexTuplesPerPage above is a bit hokey since @@ -444,10 +444,10 @@ typedef SpGistDeadTupleData *SpGistDeadTuple; */ /* Page capacity after allowing for fixed header and special space */ -#define SPGIST_PAGE_CAPACITY \ +#define SPGIST_PAGE_CAPACITY(tdeNonceSize) \ MAXALIGN_DOWN(BLCKSZ - \ SizeOfPageHeaderData - \ - MAXALIGN(sizeof(SpGistPageOpaqueData))) + MAXALIGN(sizeof(SpGistPageOpaqueData) + tdeNonceSize)) /* * Compute free space on page, assuming that up to n placeholders can be diff --git a/src/include/storage/fsm_internals.h b/src/include/storage/fsm_internals.h index 09749769b3..090619601f 100644 --- a/src/include/storage/fsm_internals.h +++ b/src/include/storage/fsm_internals.h @@ -48,17 +48,18 @@ typedef FSMPageData *FSMPage; * Number of non-leaf and leaf nodes, and nodes in total, on an FSM page. * These definitions are internal to fsmpage.c. */ -#define NodesPerPage (BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - \ +#define NodesPerPage(tdeNonceSize) (BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - \ + MAXALIGN(tdeNonceSize) - \ offsetof(FSMPageData, fp_nodes)) #define NonLeafNodesPerPage (BLCKSZ / 2 - 1) -#define LeafNodesPerPage (NodesPerPage - NonLeafNodesPerPage) +#define LeafNodesPerPage(tdeNonceSize) (NodesPerPage(tdeNonceSize) - NonLeafNodesPerPage) /* * Number of FSM "slots" on a FSM page. This is what should be used * outside fsmpage.c. */ -#define SlotsPerFSMPage LeafNodesPerPage +#define SlotsPerFSMPage(tdeNonceSize) LeafNodesPerPage(tdeNonceSize) /* Prototypes for functions in fsmpage.c */ extern int fsm_search_avail(Buffer buf, uint8 min_cat, bool advancenext, diff --git a/src/test/modules/test_ginpostinglist/test_ginpostinglist.c b/src/test/modules/test_ginpostinglist/test_ginpostinglist.c index 7ce515fcb7..e24ee54de2 100644 --- a/src/test/modules/test_ginpostinglist/test_ginpostinglist.c +++ b/src/test/modules/test_ginpostinglist/test_ginpostinglist.c @@ -88,9 +88,9 @@ Datum test_ginpostinglist(PG_FUNCTION_ARGS) { test_itemptr_pair(0, 2, 14); - test_itemptr_pair(0, MaxHeapTuplesPerPage, 14); - test_itemptr_pair(MaxBlockNumber, MaxHeapTuplesPerPage, 14); - test_itemptr_pair(MaxBlockNumber, MaxHeapTuplesPerPage, 16); + test_itemptr_pair(0, MaxHeapTuplesPerPage(tde_nonce_size), 14); + test_itemptr_pair(MaxBlockNumber, MaxHeapTuplesPerPage(tde_nonce_size), 14); + test_itemptr_pair(MaxBlockNumber, MaxHeapTuplesPerPage(tde_nonce_size), 16); PG_RETURN_VOID(); } -- 2.25.1