From 6f94908b0649956e1d1abbbd5c362a57282c2c26 Mon Sep 17 00:00:00 2001 From: Melanie Plageman Date: Wed, 17 Sep 2025 17:42:54 -0400 Subject: [PATCH v14 18/24] Remove XLOG_HEAP2_VISIBLE entirely There are now no users of this, so eliminate it entirely. This includes the xl_heap_visible struct as well as all of the functions used to emit and replay XLOG_HEAP2_VISIBLE records. ci-os-only: --- src/backend/access/common/bufmask.c | 4 +- src/backend/access/heap/heapam.c | 40 ++-------- src/backend/access/heap/heapam_xlog.c | 96 +++--------------------- src/backend/access/heap/pruneheap.c | 4 +- src/backend/access/heap/vacuumlazy.c | 14 ++-- src/backend/access/heap/visibilitymap.c | 83 +------------------- src/backend/access/rmgrdesc/heapdesc.c | 10 --- src/backend/replication/logical/decode.c | 1 - src/backend/storage/ipc/standby.c | 12 +-- src/include/access/heapam_xlog.h | 19 ----- src/include/access/visibilitymap.h | 11 +-- src/include/access/visibilitymapdefs.h | 9 --- src/tools/pgindent/typedefs.list | 1 - 13 files changed, 36 insertions(+), 268 deletions(-) diff --git a/src/backend/access/common/bufmask.c b/src/backend/access/common/bufmask.c index bb260cffa68..5f07f179415 100644 --- a/src/backend/access/common/bufmask.c +++ b/src/backend/access/common/bufmask.c @@ -56,8 +56,8 @@ mask_page_hint_bits(Page page) /* * During replay, if the page LSN has advanced past our XLOG record's LSN, - * we don't mark the page all-visible. See heap_xlog_visible() for - * details. + * we don't mark the page all-visible. See heap_xlog_prune_and_freeze() + * for more details. */ PageClearAllVisible(page); } diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 0323e2df409..ab514ce65ec 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -2524,11 +2524,11 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, else if (all_frozen_set) { PageSetAllVisible(page); - visibilitymap_set_vmbits(relation, - BufferGetBlockNumber(buffer), - vmbuffer, - VISIBILITYMAP_ALL_VISIBLE | - VISIBILITYMAP_ALL_FROZEN); + visibilitymap_set(relation, + BufferGetBlockNumber(buffer), + vmbuffer, + VISIBILITYMAP_ALL_VISIBLE | + VISIBILITYMAP_ALL_FROZEN); } /* @@ -8799,36 +8799,6 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate) return nblocksfavorable; } -/* - * Perform XLogInsert for a heap-visible operation. 'block' is the block - * being marked all-visible, and vm_buffer is the buffer containing the - * corresponding visibility map block. Both should have already been modified - * and dirtied. - * - * snapshotConflictHorizon comes from the largest xmin on the page being - * marked all-visible. REDO routine uses it to generate recovery conflicts. - */ -XLogRecPtr -log_heap_visible(Relation rel, Buffer vm_buffer, - TransactionId snapshotConflictHorizon, uint8 vmflags) -{ - xl_heap_visible xlrec; - XLogRecPtr recptr; - - Assert(BufferIsValid(vm_buffer)); - - xlrec.snapshotConflictHorizon = snapshotConflictHorizon; - xlrec.flags = vmflags; - if (RelationIsAccessibleInLogicalDecoding(rel)) - xlrec.flags |= VISIBILITYMAP_XLOG_CATALOG_REL; - XLogBeginInsert(); - XLogRegisterData(&xlrec, SizeOfHeapVisible); - XLogRegisterBuffer(0, vm_buffer, 0); - recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE); - - return recptr; -} - /* * Perform XLogInsert for a heap-update operation. Caller must already * have modified the buffer(s) and marked them dirty. diff --git a/src/backend/access/heap/heapam_xlog.c b/src/backend/access/heap/heapam_xlog.c index c1f332f7a9a..a8908373067 100644 --- a/src/backend/access/heap/heapam_xlog.c +++ b/src/backend/access/heap/heapam_xlog.c @@ -251,8 +251,8 @@ heap_xlog_prune_freeze(XLogReaderState *record) * * In recovery, we expect no other writers, so writing to the VM page * without holding a lock on the heap page is considered safe enough. It - * is done this way when replaying xl_heap_visible records (see - * heap_xlog_visible()). + * is also done this way when replaying COPY FREEZE records (see + * heap_xlog_multi_insert()). */ if (vmflags & VISIBILITYMAP_VALID_BITS && XLogReadBufferForRedoExtended(record, 1, @@ -268,7 +268,7 @@ heap_xlog_prune_freeze(XLogReaderState *record) if (PageIsNew(vmpage)) PageInit(vmpage, BLCKSZ, 0); - old_vmbits = visibilitymap_set_vmbits(reln, blkno, vmbuffer, vmflags); + old_vmbits = visibilitymap_set(reln, blkno, vmbuffer, vmflags); /* Only set VM page LSN if we modified the page */ if (old_vmbits != vmflags) @@ -287,81 +287,6 @@ heap_xlog_prune_freeze(XLogReaderState *record) XLogRecordPageWithFreeSpace(rlocator, blkno, freespace); } -/* - * Replay XLOG_HEAP2_VISIBLE records. - * - * It is imperative that the previously emitted record set PD_ALL_VISIBLE on - * the heap page. We must never end up with a situation where the visibility - * map bit is set, and the page-level PD_ALL_VISIBLE bit is clear. If that - * were to occur, then a subsequent page modification would fail to clear the - * visibility map bit. - */ -static void -heap_xlog_visible(XLogReaderState *record) -{ - XLogRecPtr lsn = record->EndRecPtr; - xl_heap_visible *xlrec = (xl_heap_visible *) XLogRecGetData(record); - Buffer vmbuffer = InvalidBuffer; - RelFileLocator rlocator; - BlockNumber blkno; - - Assert((xlrec->flags & VISIBILITYMAP_XLOG_VALID_BITS) == xlrec->flags); - - XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno); - - /* - * If there are any Hot Standby transactions running that have an xmin - * horizon old enough that this page isn't all-visible for them, they - * might incorrectly decide that an index-only scan can skip a heap fetch. - * - * NB: It might be better to throw some kind of "soft" conflict here that - * forces any index-only scan that is in flight to perform heap fetches, - * rather than killing the transaction outright. - */ - if (InHotStandby) - ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon, - xlrec->flags & VISIBILITYMAP_XLOG_CATALOG_REL, - rlocator); - - /* - * Even if the heap relation was dropped or truncated and the previously - * emitted record skipped the heap page update due to this LSN interlock, - * it's still safe to update the visibility map. Any WAL record that - * clears the visibility map bit does so before checking the page LSN, so - * any bits that need to be cleared will still be cleared. - */ - if (XLogReadBufferForRedoExtended(record, 0, RBM_ZERO_ON_ERROR, false, - &vmbuffer) == BLK_NEEDS_REDO) - { - Page vmpage = BufferGetPage(vmbuffer); - Relation reln; - uint8 vmbits; - - /* initialize the page if it was read as zeros */ - if (PageIsNew(vmpage)) - PageInit(vmpage, BLCKSZ, 0); - - /* remove VISIBILITYMAP_XLOG_* */ - vmbits = xlrec->flags & VISIBILITYMAP_VALID_BITS; - - /* - * XLogReadBufferForRedoExtended locked the buffer. But - * visibilitymap_set will handle locking itself. - */ - LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK); - - reln = CreateFakeRelcacheEntry(rlocator); - - visibilitymap_set(reln, blkno, lsn, vmbuffer, - xlrec->snapshotConflictHorizon, vmbits); - - ReleaseBuffer(vmbuffer); - FreeFakeRelcacheEntry(reln); - } - else if (BufferIsValid(vmbuffer)) - UnlockReleaseBuffer(vmbuffer); -} - /* * Given an "infobits" field from an XLog record, set the correct bits in the * given infomask and infomask2 for the tuple touched by the record. @@ -739,8 +664,8 @@ heap_xlog_multi_insert(XLogReaderState *record) * * In recovery, we expect no other writers, so writing to the VM page * without holding a lock on the heap page is considered safe enough. It - * is done this way when replaying xl_heap_visible records (see - * heap_xlog_visible()). + * is done this way when replaying xl_heap_prune records (see + * heap_xlog_prune_and_freeze()). */ if (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET && XLogReadBufferForRedoExtended(record, 1, RBM_ZERO_ON_ERROR, false, @@ -753,10 +678,10 @@ heap_xlog_multi_insert(XLogReaderState *record) if (PageIsNew(vmpage)) PageInit(vmpage, BLCKSZ, 0); - visibilitymap_set_vmbits(reln, blkno, - vmbuffer, - VISIBILITYMAP_ALL_VISIBLE | - VISIBILITYMAP_ALL_FROZEN); + visibilitymap_set(reln, blkno, + vmbuffer, + VISIBILITYMAP_ALL_VISIBLE | + VISIBILITYMAP_ALL_FROZEN); /* * It is not possible that the VM was already set for this heap page, @@ -1342,9 +1267,6 @@ heap2_redo(XLogReaderState *record) case XLOG_HEAP2_PRUNE_VACUUM_CLEANUP: heap_xlog_prune_freeze(record); break; - case XLOG_HEAP2_VISIBLE: - heap_xlog_visible(record); - break; case XLOG_HEAP2_MULTI_INSERT: heap_xlog_multi_insert(record); break; diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index a14c793da7e..39d59a43ff7 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -1026,8 +1026,8 @@ heap_page_prune_and_freeze(Relation relation, Buffer buffer, { Assert(PageIsAllVisible(page)); - old_vmbits = visibilitymap_set_vmbits(relation, blockno, - vmbuffer, new_vmbits); + old_vmbits = visibilitymap_set(relation, blockno, + vmbuffer, new_vmbits); if (old_vmbits == new_vmbits) { LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK); diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 8ec0476a0d4..28436389d63 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -1887,10 +1887,10 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, PageSetAllVisible(page); MarkBufferDirty(buf); - visibilitymap_set_vmbits(vacrel->rel, blkno, - vmbuffer, - VISIBILITYMAP_ALL_VISIBLE | - VISIBILITYMAP_ALL_FROZEN); + visibilitymap_set(vacrel->rel, blkno, + vmbuffer, + VISIBILITYMAP_ALL_VISIBLE | + VISIBILITYMAP_ALL_FROZEN); if (RelationNeedsWAL(vacrel->rel)) { @@ -2775,9 +2775,9 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, if ((vmflags & VISIBILITYMAP_VALID_BITS) != 0) { PageSetAllVisible(page); - visibilitymap_set_vmbits(vacrel->rel, - blkno, - vmbuffer, vmflags); + visibilitymap_set(vacrel->rel, + blkno, + vmbuffer, vmflags); conflict_xid = visibility_cutoff_xid; } diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index 75fcb3f067a..38d3131e56b 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -14,8 +14,7 @@ * visibilitymap_clear - clear bits for one page in the visibility map * visibilitymap_pin - pin a map page for setting a bit * visibilitymap_pin_ok - check whether correct map page is already pinned - * visibilitymap_set - set bit(s) in a previously pinned page and log - * visibilitymap_set_vmbits - set bit(s) in a pinned page + * visibilitymap_set - set bit(s) in a previously pinned page * visibilitymap_get_status - get status of bits * visibilitymap_count - count number of bits set in visibility map * visibilitymap_prepare_truncate - @@ -220,82 +219,6 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf) return BufferIsValid(vmbuf) && BufferGetBlockNumber(vmbuf) == mapBlock; } -/* - * visibilitymap_set - set bit(s) on a previously pinned page - * - * recptr is the LSN of the XLOG record we're replaying, if we're in recovery, - * or InvalidXLogRecPtr in normal running. The VM page LSN is advanced to the - * one provided; in normal running, we generate a new XLOG record and set the - * page LSN to that value (though the heap page's LSN may *not* be updated; - * see below). cutoff_xid is the largest xmin on the page being marked - * all-visible; it is needed for Hot Standby, and can be InvalidTransactionId - * if the page contains no tuples. It can also be set to InvalidTransactionId - * when a page that is already all-visible is being marked all-frozen. - * - * Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling - * this function. - * - * You must pass a buffer containing the correct map page to this function. - * Call visibilitymap_pin first to pin the right one. This function doesn't do - * any I/O. - * - * Returns the state of the page's VM bits before setting flags. - */ -uint8 -visibilitymap_set(Relation rel, BlockNumber heapBlk, - XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, - uint8 flags) -{ - BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); - uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk); - uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk); - Page page; - uint8 *map; - uint8 status; - -#ifdef TRACE_VISIBILITYMAP - elog(DEBUG1, "vm_set flags 0x%02X for %s %d", - flags, RelationGetRelationName(rel), heapBlk); -#endif - - Assert(InRecovery || XLogRecPtrIsInvalid(recptr)); - Assert((flags & VISIBILITYMAP_VALID_BITS) == flags); - - /* Must never set all_frozen bit without also setting all_visible bit */ - Assert(flags != VISIBILITYMAP_ALL_FROZEN); - - /* Check that we have the right VM page pinned */ - if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock) - elog(ERROR, "wrong VM buffer passed to visibilitymap_set"); - - page = BufferGetPage(vmBuf); - map = (uint8 *) PageGetContents(page); - LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE); - - status = (map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS; - if (flags != status) - { - START_CRIT_SECTION(); - - map[mapByte] |= (flags << mapOffset); - MarkBufferDirty(vmBuf); - - if (RelationNeedsWAL(rel)) - { - if (XLogRecPtrIsInvalid(recptr)) - { - Assert(!InRecovery); - recptr = log_heap_visible(rel, vmBuf, cutoff_xid, flags); - } - PageSetLSN(page, recptr); - } - - END_CRIT_SECTION(); - } - - LockBuffer(vmBuf, BUFFER_LOCK_UNLOCK); - return status; -} /* * Set flags in the VM block contained in the passed in vmBuf. @@ -318,8 +241,8 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, * is pinned and exclusive locked. */ uint8 -visibilitymap_set_vmbits(Relation rel, BlockNumber heapBlk, - Buffer vmBuf, uint8 flags) +visibilitymap_set(Relation rel, BlockNumber heapBlk, + Buffer vmBuf, uint8 flags) { BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk); diff --git a/src/backend/access/rmgrdesc/heapdesc.c b/src/backend/access/rmgrdesc/heapdesc.c index 1cb44ca32d3..93505cb8c56 100644 --- a/src/backend/access/rmgrdesc/heapdesc.c +++ b/src/backend/access/rmgrdesc/heapdesc.c @@ -349,13 +349,6 @@ heap2_desc(StringInfo buf, XLogReaderState *record) } } } - else if (info == XLOG_HEAP2_VISIBLE) - { - xl_heap_visible *xlrec = (xl_heap_visible *) rec; - - appendStringInfo(buf, "snapshotConflictHorizon: %u, flags: 0x%02X", - xlrec->snapshotConflictHorizon, xlrec->flags); - } else if (info == XLOG_HEAP2_MULTI_INSERT) { xl_heap_multi_insert *xlrec = (xl_heap_multi_insert *) rec; @@ -460,9 +453,6 @@ heap2_identify(uint8 info) case XLOG_HEAP2_PRUNE_VACUUM_CLEANUP: id = "PRUNE_VACUUM_CLEANUP"; break; - case XLOG_HEAP2_VISIBLE: - id = "VISIBLE"; - break; case XLOG_HEAP2_MULTI_INSERT: id = "MULTI_INSERT"; break; diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c index cc03f0706e9..2fdd4af90a8 100644 --- a/src/backend/replication/logical/decode.c +++ b/src/backend/replication/logical/decode.c @@ -454,7 +454,6 @@ heap2_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) case XLOG_HEAP2_PRUNE_ON_ACCESS: case XLOG_HEAP2_PRUNE_VACUUM_SCAN: case XLOG_HEAP2_PRUNE_VACUUM_CLEANUP: - case XLOG_HEAP2_VISIBLE: case XLOG_HEAP2_LOCK_UPDATED: break; default: diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c index 4222bdab078..c619643e121 100644 --- a/src/backend/storage/ipc/standby.c +++ b/src/backend/storage/ipc/standby.c @@ -475,12 +475,12 @@ ResolveRecoveryConflictWithSnapshot(TransactionId snapshotConflictHorizon, * If we get passed InvalidTransactionId then we do nothing (no conflict). * * This can happen when replaying already-applied WAL records after a - * standby crash or restart, or when replaying an XLOG_HEAP2_VISIBLE - * record that marks as frozen a page which was already all-visible. It's - * also quite common with records generated during index deletion - * (original execution of the deletion can reason that a recovery conflict - * which is sufficient for the deletion operation must take place before - * replay of the deletion record itself). + * standby crash or restart, or when replaying a record that marks as + * frozen a page which was already marked all-visible in the visibility + * map. It's also quite common with records generated during index + * deletion (original execution of the deletion can reason that a recovery + * conflict which is sufficient for the deletion operation must take place + * before replay of the deletion record itself). */ if (!TransactionIdIsValid(snapshotConflictHorizon)) return; diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h index 833114e0a6e..61ceaf2a98b 100644 --- a/src/include/access/heapam_xlog.h +++ b/src/include/access/heapam_xlog.h @@ -60,7 +60,6 @@ #define XLOG_HEAP2_PRUNE_ON_ACCESS 0x10 #define XLOG_HEAP2_PRUNE_VACUUM_SCAN 0x20 #define XLOG_HEAP2_PRUNE_VACUUM_CLEANUP 0x30 -#define XLOG_HEAP2_VISIBLE 0x40 #define XLOG_HEAP2_MULTI_INSERT 0x50 #define XLOG_HEAP2_LOCK_UPDATED 0x60 #define XLOG_HEAP2_NEW_CID 0x70 @@ -451,19 +450,6 @@ typedef struct xl_heap_inplace #define MinSizeOfHeapInplace (offsetof(xl_heap_inplace, nmsgs) + sizeof(int)) -/* - * This is what we need to know about setting a visibility map bit - * - * Backup blk 0: visibility map buffer - */ -typedef struct xl_heap_visible -{ - TransactionId snapshotConflictHorizon; - uint8 flags; -} xl_heap_visible; - -#define SizeOfHeapVisible (offsetof(xl_heap_visible, flags) + sizeof(uint8)) - typedef struct xl_heap_new_cid { /* @@ -507,11 +493,6 @@ extern void heap2_desc(StringInfo buf, XLogReaderState *record); extern const char *heap2_identify(uint8 info); extern void heap_xlog_logical_rewrite(XLogReaderState *r); -extern XLogRecPtr log_heap_visible(Relation rel, - Buffer vm_buffer, - TransactionId snapshotConflictHorizon, - uint8 vmflags); - /* in heapdesc.c, so it can be shared between frontend/backend code */ extern void heap_xlog_deserialize_prune_and_freeze(char *cursor, uint16 flags, int *nplans, xlhp_freeze_plan **plans, diff --git a/src/include/access/visibilitymap.h b/src/include/access/visibilitymap.h index 302adf4856a..c5b1e1f7adb 100644 --- a/src/include/access/visibilitymap.h +++ b/src/include/access/visibilitymap.h @@ -15,7 +15,6 @@ #define VISIBILITYMAP_H #include "access/visibilitymapdefs.h" -#include "access/xlogdefs.h" #include "storage/block.h" #include "storage/buf.h" #include "utils/relcache.h" @@ -31,14 +30,8 @@ extern bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, extern void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf); extern bool visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf); -extern uint8 visibilitymap_set(Relation rel, - BlockNumber heapBlk, - XLogRecPtr recptr, - Buffer vmBuf, - TransactionId cutoff_xid, - uint8 flags); -extern uint8 visibilitymap_set_vmbits(Relation rel, BlockNumber heapBlk, - Buffer vmBuf, uint8 flags); +extern uint8 visibilitymap_set(Relation rel, BlockNumber heapBlk, + Buffer vmBuf, uint8 flags); extern uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf); extern void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen); extern BlockNumber visibilitymap_prepare_truncate(Relation rel, diff --git a/src/include/access/visibilitymapdefs.h b/src/include/access/visibilitymapdefs.h index 5ad5c020877..e01bce4c99f 100644 --- a/src/include/access/visibilitymapdefs.h +++ b/src/include/access/visibilitymapdefs.h @@ -21,14 +21,5 @@ #define VISIBILITYMAP_ALL_FROZEN 0x02 #define VISIBILITYMAP_VALID_BITS 0x03 /* OR of all valid visibilitymap * flags bits */ -/* - * To detect recovery conflicts during logical decoding on a standby, we need - * to know if a table is a user catalog table. For that we add an additional - * bit into xl_heap_visible.flags, in addition to the above. - * - * NB: VISIBILITYMAP_XLOG_* may not be passed to visibilitymap_set(). - */ -#define VISIBILITYMAP_XLOG_CATALOG_REL 0x04 -#define VISIBILITYMAP_XLOG_VALID_BITS (VISIBILITYMAP_VALID_BITS | VISIBILITYMAP_XLOG_CATALOG_REL) #endif /* VISIBILITYMAPDEFS_H */ diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index e90af5b2ad3..32c0f4719c3 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -4268,7 +4268,6 @@ xl_heap_prune xl_heap_rewrite_mapping xl_heap_truncate xl_heap_update -xl_heap_visible xl_invalid_page xl_invalid_page_key xl_invalidations -- 2.43.0