From 9e36bb144fa2bc050f1ad2eb203fe089f6603255 Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Sat, 19 Nov 2022 16:37:53 -0800 Subject: [PATCH v8 1/6] Refactor how VACUUM passes around its XID cutoffs. Use a dedicated struct for the XID/MXID cutoffs used by VACUUM, such as FreezeLimit and OldestXmin. This state is initialized in vacuum.c, and then passed around (via const pointers) by code from vacuumlazy.c to external freezing related routines like heap_prepare_freeze_tuple. Also simplify some of the logic for dealing with frozen xmin in heap_prepare_freeze_tuple: add dedicated "xmin_already_frozen" state to clearly distinguish xmin XIDs that we're going to freeze from those that were already frozen from before. This makes its xmin handling code symmetrical with its xmax handling code. This is preparation for an upcoming commit that adds page level freezing. Also refactor the control flow within FreezeMultiXactId(), while adding stricter sanity checks. We now test OldestXmin directly (instead than using FreezeLimit as an inexact proxy for OldestXmin). Also promote an assertion to detect multiple updater XIDs within a single multi into a new "can't happen" error. This is also in preparation for the page level freezing commit, which will need to cede control of page level freezing to FreezeMultiXactId() with pages that have MultiXactIds that might need to be frozen. This helps to preserve the historic eager freezing behavior used when processing MultiXactIds, while still doing lazy processing for MultiXactIds where eager processing happen to be expensive. --- src/include/access/heapam.h | 18 +- src/include/commands/vacuum.h | 44 ++- src/backend/access/heap/heapam.c | 497 +++++++++++++-------------- src/backend/access/heap/vacuumlazy.c | 125 +++---- src/backend/commands/cluster.c | 25 +- src/backend/commands/vacuum.c | 78 ++--- 6 files changed, 395 insertions(+), 392 deletions(-) diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index 810baaf9d..abc3a1f34 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -38,6 +38,7 @@ typedef struct BulkInsertStateData *BulkInsertState; struct TupleTableSlot; +struct VacuumCutoffs; #define MaxLockTupleMode LockTupleExclusive @@ -178,21 +179,20 @@ extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple, extern void heap_inplace_update(Relation relation, HeapTuple tuple); extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, - TransactionId relfrozenxid, TransactionId relminmxid, - TransactionId cutoff_xid, TransactionId cutoff_multi, + const struct VacuumCutoffs *cutoffs, HeapTupleFreeze *frz, bool *totally_frozen, - TransactionId *relfrozenxid_out, - MultiXactId *relminmxid_out); + TransactionId *NewRelFrozenXid, + MultiXactId *NewRelminMxid); extern void heap_freeze_execute_prepared(Relation rel, Buffer buffer, TransactionId FreezeLimit, HeapTupleFreeze *tuples, int ntuples); extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, - TransactionId cutoff_xid, TransactionId cutoff_multi); -extern bool heap_tuple_would_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, - MultiXactId cutoff_multi, - TransactionId *relfrozenxid_out, - MultiXactId *relminmxid_out); + TransactionId FreezeLimit, TransactionId MultiXactCutoff); +extern bool heap_tuple_should_freeze(HeapTupleHeader tuple, + const struct VacuumCutoffs *cutoffs, + TransactionId *NewRelfrozenXid, + MultiXactId *NewRelminMxid); extern bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple); extern void simple_heap_insert(Relation relation, HeapTuple tup); diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h index b63751c46..43ee24b12 100644 --- a/src/include/commands/vacuum.h +++ b/src/include/commands/vacuum.h @@ -235,6 +235,45 @@ typedef struct VacuumParams int nworkers; } VacuumParams; +/* + * VacuumCutoffs is immutable state that describes the cutoffs used by VACUUM. + * Established at the beginning of each VACUUM operation. + */ +struct VacuumCutoffs +{ + /* + * Existing pg_class fields at start of VACUUM (used for sanity checks) + */ + TransactionId relfrozenxid; + MultiXactId relminmxid; + + /* + * OldestXmin is the Xid below which tuples deleted by any xact (that + * committed) should be considered DEAD, not just RECENTLY_DEAD. + * + * OldestMxact is the Mxid below which MultiXacts are definitely not seen + * as visible by any running transaction. + * + * OldestXmin and OldestMxact are also the most recent values that can + * ever be passed to vac_update_relstats() as frozenxid and minmulti + * arguments at the end of VACUUM. These same values should be passed + * when it turns out that VACUUM will leave no unfrozen XIDs/MXIDs behind + * in the table. + */ + TransactionId OldestXmin; + MultiXactId OldestMxact; + + /* + * FreezeLimit is the Xid below which all Xids are definitely replaced by + * FrozenTransactionId in heap pages that VACUUM can cleanup lock. + * + * MultiXactCutoff is the value below which all MultiXactIds are + * definitely removed from Xmax in heap pages VACUUM can cleanup lock. + */ + TransactionId FreezeLimit; + MultiXactId MultiXactCutoff; +}; + /* * VacDeadItems stores TIDs whose index tuples are deleted by index vacuuming. */ @@ -287,10 +326,7 @@ extern void vac_update_relstats(Relation relation, bool *minmulti_updated, bool in_outer_xact); extern bool vacuum_set_xid_limits(Relation rel, const VacuumParams *params, - TransactionId *OldestXmin, - MultiXactId *OldestMxact, - TransactionId *FreezeLimit, - MultiXactId *MultiXactCutoff); + struct VacuumCutoffs *cutoffs); extern bool vacuum_xid_failsafe_check(TransactionId relfrozenxid, MultiXactId relminmxid); extern void vac_update_datfrozenxid(void); diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 747db5037..74b3a459e 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -52,6 +52,7 @@ #include "access/xloginsert.h" #include "access/xlogutils.h" #include "catalog/catalog.h" +#include "commands/vacuum.h" #include "miscadmin.h" #include "pgstat.h" #include "port/atomics.h" @@ -6125,12 +6126,10 @@ heap_inplace_update(Relation relation, HeapTuple tuple) */ static TransactionId FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, - TransactionId relfrozenxid, TransactionId relminmxid, - TransactionId cutoff_xid, MultiXactId cutoff_multi, - uint16 *flags, TransactionId *mxid_oldest_xid_out) + const struct VacuumCutoffs *cutoffs, uint16 *flags, + TransactionId *mxid_oldest_xid_out) { - TransactionId xid = InvalidTransactionId; - int i; + TransactionId newxmax = InvalidTransactionId; MultiXactMember *members; int nmembers; bool need_replace; @@ -6153,12 +6152,12 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, *flags |= FRM_INVALIDATE_XMAX; return InvalidTransactionId; } - else if (MultiXactIdPrecedes(multi, relminmxid)) + else if (MultiXactIdPrecedes(multi, cutoffs->relminmxid)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg_internal("found multixact %u from before relminmxid %u", - multi, relminmxid))); - else if (MultiXactIdPrecedes(multi, cutoff_multi)) + multi, cutoffs->relminmxid))); + else if (MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff)) { /* * This old multi cannot possibly have members still running, but @@ -6171,39 +6170,39 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg_internal("multixact %u from before cutoff %u found to be still running", - multi, cutoff_multi))); + multi, cutoffs->MultiXactCutoff))); if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)) { *flags |= FRM_INVALIDATE_XMAX; - xid = InvalidTransactionId; + newxmax = InvalidTransactionId; } else { - /* replace multi by update xid */ - xid = MultiXactIdGetUpdateXid(multi, t_infomask); + /* replace multi with single XID for its updater */ + newxmax = MultiXactIdGetUpdateXid(multi, t_infomask); /* wasn't only a lock, xid needs to be valid */ - Assert(TransactionIdIsValid(xid)); + Assert(TransactionIdIsValid(newxmax)); - if (TransactionIdPrecedes(xid, relfrozenxid)) + if (TransactionIdPrecedes(newxmax, cutoffs->relfrozenxid)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg_internal("found update xid %u from before relfrozenxid %u", - xid, relfrozenxid))); + newxmax, cutoffs->relfrozenxid))); /* - * If the xid is older than the cutoff, it has to have aborted, - * otherwise the tuple would have gotten pruned away. + * If the new xmax xid is older than OldestXmin, it has to have + * aborted, otherwise the tuple would have been pruned away */ - if (TransactionIdPrecedes(xid, cutoff_xid)) + if (TransactionIdPrecedes(newxmax, cutoffs->OldestXmin)) { - if (TransactionIdDidCommit(xid)) + if (TransactionIdDidCommit(newxmax)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg_internal("cannot freeze committed update xid %u", xid))); + errmsg_internal("cannot freeze committed update xid %u", newxmax))); *flags |= FRM_INVALIDATE_XMAX; - xid = InvalidTransactionId; + newxmax = InvalidTransactionId; } else { @@ -6215,17 +6214,14 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, * Don't push back mxid_oldest_xid_out using FRM_RETURN_IS_XID Xid, or * when no Xids will remain */ - return xid; + return newxmax; } /* - * This multixact might have or might not have members still running, but - * we know it's valid and is newer than the cutoff point for multis. - * However, some member(s) of it may be below the cutoff for Xids, so we + * Some member(s) of this Multi may be below FreezeLimit xid cutoff, so we * need to walk the whole members array to figure out what to do, if * anything. */ - nmembers = GetMultiXactIdMembers(multi, &members, false, HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)); @@ -6236,12 +6232,15 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, return InvalidTransactionId; } - /* is there anything older than the cutoff? */ need_replace = false; temp_xid_out = *mxid_oldest_xid_out; /* init for FRM_NOOP */ - for (i = 0; i < nmembers; i++) + for (int i = 0; i < nmembers; i++) { - if (TransactionIdPrecedes(members[i].xid, cutoff_xid)) + TransactionId xid = members[i].xid; + + Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid)); + + if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit)) { need_replace = true; break; @@ -6251,7 +6250,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, } /* - * In the simplest case, there is no member older than the cutoff; we can + * In the simplest case, there is no member older than FreezeLimit; we can * keep the existing MultiXactId as-is, avoiding a more expensive second * pass over the multi */ @@ -6279,110 +6278,98 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, update_committed = false; temp_xid_out = *mxid_oldest_xid_out; /* init for FRM_RETURN_IS_MULTI */ - for (i = 0; i < nmembers; i++) + /* + * Determine whether to keep each member txid, or to ignore it instead + */ + for (int i = 0; i < nmembers; i++) { - /* - * Determine whether to keep this member or ignore it. - */ - if (ISUPDATE_from_mxstatus(members[i].status)) + TransactionId xid = members[i].xid; + MultiXactStatus mstatus = members[i].status; + + Assert(TransactionIdIsValid(xid)); + Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid)); + + if (!ISUPDATE_from_mxstatus(mstatus)) { - TransactionId txid = members[i].xid; - - Assert(TransactionIdIsValid(txid)); - if (TransactionIdPrecedes(txid, relfrozenxid)) - ereport(ERROR, - (errcode(ERRCODE_DATA_CORRUPTED), - errmsg_internal("found update xid %u from before relfrozenxid %u", - txid, relfrozenxid))); - /* - * It's an update; should we keep it? If the transaction is known - * aborted or crashed then it's okay to ignore it, otherwise not. - * Note that an updater older than cutoff_xid cannot possibly be - * committed, because HeapTupleSatisfiesVacuum would have returned - * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple. - * - * As with all tuple visibility routines, it's critical to test - * TransactionIdIsInProgress before TransactionIdDidCommit, - * because of race conditions explained in detail in - * heapam_visibility.c. + * Locker XID (not updater XID). We only keep lockers that are + * still running. */ - if (TransactionIdIsCurrentTransactionId(txid) || - TransactionIdIsInProgress(txid)) - { - Assert(!TransactionIdIsValid(update_xid)); - update_xid = txid; - } - else if (TransactionIdDidCommit(txid)) - { - /* - * The transaction committed, so we can tell caller to set - * HEAP_XMAX_COMMITTED. (We can only do this because we know - * the transaction is not running.) - */ - Assert(!TransactionIdIsValid(update_xid)); - update_committed = true; - update_xid = txid; - } - else - { - /* - * Not in progress, not committed -- must be aborted or - * crashed; we can ignore it. - */ - } - - /* - * Since the tuple wasn't totally removed when vacuum pruned, the - * update Xid cannot possibly be older than the xid cutoff. The - * presence of such a tuple would cause corruption, so be paranoid - * and check. - */ - if (TransactionIdIsValid(update_xid) && - TransactionIdPrecedes(update_xid, cutoff_xid)) - ereport(ERROR, - (errcode(ERRCODE_DATA_CORRUPTED), - errmsg_internal("found update xid %u from before xid cutoff %u", - update_xid, cutoff_xid))); - - /* - * We determined that this is an Xid corresponding to an update - * that must be retained -- add it to new members list for later. - * - * Also consider pushing back temp_xid_out, which is needed when - * we later conclude that a new multi is required (i.e. when we go - * on to set FRM_RETURN_IS_MULTI for our caller because we also - * need to retain a locker that's still running). - */ - if (TransactionIdIsValid(update_xid)) + if (TransactionIdIsCurrentTransactionId(xid) || + TransactionIdIsInProgress(xid)) { newmembers[nnewmembers++] = members[i]; - if (TransactionIdPrecedes(members[i].xid, temp_xid_out)) - temp_xid_out = members[i].xid; + has_lockers = true; + + /* + * Cannot possibly be older than VACUUM's OldestXmin, so we + * don't need a NewRelfrozenXid step here + */ + Assert(TransactionIdPrecedesOrEquals(cutoffs->OldestXmin, xid)); } + + continue; + } + + /* + * Updater XID (not locker XID). Should we keep it? + * + * Since the tuple wasn't totally removed when vacuum pruned, the + * update Xid cannot possibly be older than OldestXmin cutoff. The + * presence of such a tuple would cause corruption, so be paranoid and + * check. + */ + if (TransactionIdPrecedes(xid, cutoffs->OldestXmin)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("found update xid %u from before removable cutoff %u", + xid, cutoffs->OldestXmin))); + if (TransactionIdIsValid(update_xid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("multixact %u has two or more updating members", + multi), + errdetail_internal("First updater XID=%u second updater XID=%u.", + update_xid, xid))); + + /* + * If the transaction is known aborted or crashed then it's okay to + * ignore it, otherwise not. + * + * As with all tuple visibility routines, it's critical to test + * TransactionIdIsInProgress before TransactionIdDidCommit, because of + * race conditions explained in detail in heapam_visibility.c. + */ + if (TransactionIdIsCurrentTransactionId(xid) || + TransactionIdIsInProgress(xid)) + update_xid = xid; + else if (TransactionIdDidCommit(xid)) + { + /* + * The transaction committed, so we can tell caller to set + * HEAP_XMAX_COMMITTED. (We can only do this because we know the + * transaction is not running.) + */ + update_committed = true; + update_xid = xid; } else { - /* We only keep lockers if they are still running */ - if (TransactionIdIsCurrentTransactionId(members[i].xid) || - TransactionIdIsInProgress(members[i].xid)) - { - /* - * Running locker cannot possibly be older than the cutoff. - * - * The cutoff is <= VACUUM's OldestXmin, which is also the - * initial value used for top-level relfrozenxid_out tracking - * state. A running locker cannot be older than VACUUM's - * OldestXmin, either, so we don't need a temp_xid_out step. - */ - Assert(TransactionIdIsNormal(members[i].xid)); - Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid)); - Assert(!TransactionIdPrecedes(members[i].xid, - *mxid_oldest_xid_out)); - newmembers[nnewmembers++] = members[i]; - has_lockers = true; - } + /* + * Not in progress, not committed -- must be aborted or crashed; + * we can ignore it. + */ + continue; } + + /* + * We determined that this is an Xid corresponding to an update that + * must be retained -- add it to new members list for later. Also + * consider pushing back mxid_oldest_xid_out. + */ + newmembers[nnewmembers++] = members[i]; + if (TransactionIdPrecedes(xid, temp_xid_out)) + temp_xid_out = xid; } pfree(members); @@ -6395,7 +6382,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, { /* nothing worth keeping!? Tell caller to remove the whole thing */ *flags |= FRM_INVALIDATE_XMAX; - xid = InvalidTransactionId; + newxmax = InvalidTransactionId; /* Don't push back mxid_oldest_xid_out -- no Xids will remain */ } else if (TransactionIdIsValid(update_xid) && !has_lockers) @@ -6411,7 +6398,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, *flags |= FRM_RETURN_IS_XID; if (update_committed) *flags |= FRM_MARK_COMMITTED; - xid = update_xid; + newxmax = update_xid; /* Don't push back mxid_oldest_xid_out using FRM_RETURN_IS_XID Xid */ } else @@ -6421,14 +6408,14 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, * one, to set as new Xmax in the tuple. The oldest surviving member * might push back mxid_oldest_xid_out. */ - xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers); + newxmax = MultiXactIdCreateFromMembers(nnewmembers, newmembers); *flags |= FRM_RETURN_IS_MULTI; *mxid_oldest_xid_out = temp_xid_out; } pfree(newmembers); - return xid; + return newxmax; } /* @@ -6450,19 +6437,13 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD * (else we should be removing the tuple, not freezing it). * - * The *relfrozenxid_out and *relminmxid_out arguments are the current target + * The *NewRelFrozenXid and *NewRelminMxid arguments are the current target * relfrozenxid and relminmxid for VACUUM caller's heap rel. Any and all * unfrozen XIDs or MXIDs that remain in caller's rel after VACUUM finishes * _must_ have values >= the final relfrozenxid/relminmxid values in pg_class. * This includes XIDs that remain as MultiXact members from any tuple's xmax. - * Each call here pushes back *relfrozenxid_out and/or *relminmxid_out as - * needed to avoid unsafe final values in rel's authoritative pg_class tuple. - * - * NB: cutoff_xid *must* be <= VACUUM's OldestXmin, to ensure that any - * XID older than it could neither be running nor seen as running by any - * open transaction. This ensures that the replacement will not change - * anyone's idea of the tuple state. - * Similarly, cutoff_multi must be <= VACUUM's OldestMxact. + * Each call here pushes back *NewRelFrozenXid and/or *NewRelminMxid as needed + * to avoid unsafe final values in rel's authoritative pg_class tuple. * * NB: This function has side effects: it might allocate a new MultiXactId. * It will be set as tuple's new xmax when our *frz output is processed within @@ -6471,16 +6452,16 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, */ bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, - TransactionId relfrozenxid, TransactionId relminmxid, - TransactionId cutoff_xid, TransactionId cutoff_multi, + const struct VacuumCutoffs *cutoffs, HeapTupleFreeze *frz, bool *totally_frozen, - TransactionId *relfrozenxid_out, - MultiXactId *relminmxid_out) + TransactionId *NewRelFrozenXid, + MultiXactId *NewRelminMxid) { - bool changed = false; - bool xmax_already_frozen = false; - bool xmin_frozen; - bool freeze_xmax; + bool frzplan_set = false; + bool xmin_already_frozen = false, + xmax_already_frozen = false; + bool freeze_xmin, + freeze_xmax; TransactionId xid; frz->frzflags = 0; @@ -6489,54 +6470,51 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, frz->xmax = HeapTupleHeaderGetRawXmax(tuple); /* - * Process xmin. xmin_frozen has two slightly different meanings: in the - * !XidIsNormal case, it means "the xmin doesn't need any freezing" (it's - * already a permanent value), while in the block below it is set true to - * mean "xmin won't need freezing after what we do to it here" (false - * otherwise). In both cases we're allowed to set totally_frozen, as far - * as xmin is concerned. Both cases also don't require relfrozenxid_out - * handling, since either way the tuple's xmin will be a permanent value - * once we're done with it. + * Process xmin, while keeping track of whether it's already frozen, or + * will become frozen when our freeze plan is executed by caller (could be + * neither). */ xid = HeapTupleHeaderGetXmin(tuple); if (!TransactionIdIsNormal(xid)) - xmin_frozen = true; + { + freeze_xmin = false; + xmin_already_frozen = true; + /* No need for NewRelfrozenXid handling for already-frozen xmin */ + } else { - if (TransactionIdPrecedes(xid, relfrozenxid)) + if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg_internal("found xmin %u from before relfrozenxid %u", - xid, relfrozenxid))); + xid, cutoffs->relfrozenxid))); - xmin_frozen = TransactionIdPrecedes(xid, cutoff_xid); - if (xmin_frozen) + freeze_xmin = TransactionIdPrecedes(xid, cutoffs->FreezeLimit); + if (freeze_xmin) { if (!TransactionIdDidCommit(xid)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg_internal("uncommitted xmin %u from before xid cutoff %u needs to be frozen", - xid, cutoff_xid))); + xid, cutoffs->FreezeLimit))); frz->t_infomask |= HEAP_XMIN_FROZEN; - changed = true; + frzplan_set = true; } else { - /* xmin to remain unfrozen. Could push back relfrozenxid_out. */ - if (TransactionIdPrecedes(xid, *relfrozenxid_out)) - *relfrozenxid_out = xid; + /* xmin to remain unfrozen. Could push back NewRelfrozenXid. */ + if (TransactionIdPrecedes(xid, *NewRelFrozenXid)) + *NewRelFrozenXid = xid; } } /* * Process xmax. To thoroughly examine the current Xmax value we need to * resolve a MultiXactId to its member Xids, in case some of them are - * below the given cutoff for Xids. In that case, those values might need + * below the given FreezeLimit. In that case, those values might need * freezing, too. Also, if a multi needs freezing, we cannot simply take * it out --- if there's a live updater Xid, it needs to be kept. - * - * Make sure to keep heap_tuple_would_freeze in sync with this. */ xid = HeapTupleHeaderGetRawXmax(tuple); @@ -6545,11 +6523,9 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, /* Raw xmax is a MultiXactId */ TransactionId newxmax; uint16 flags; - TransactionId mxid_oldest_xid_out = *relfrozenxid_out; + TransactionId mxid_oldest_xid_out = *NewRelFrozenXid; - newxmax = FreezeMultiXactId(xid, tuple->t_infomask, - relfrozenxid, relminmxid, - cutoff_xid, cutoff_multi, + newxmax = FreezeMultiXactId(xid, tuple->t_infomask, cutoffs, &flags, &mxid_oldest_xid_out); freeze_xmax = (flags & FRM_INVALIDATE_XMAX); @@ -6559,13 +6535,13 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, /* * xmax will become an updater Xid (original MultiXact's updater * member Xid will be carried forward as a simple Xid in Xmax). - * Might have to ratchet back relfrozenxid_out here, though never - * relminmxid_out. + * Might have to ratchet back NewRelfrozenXid here, though never + * NewRelminMxid. */ Assert(!freeze_xmax); Assert(TransactionIdIsValid(newxmax)); - if (TransactionIdPrecedes(newxmax, *relfrozenxid_out)) - *relfrozenxid_out = newxmax; + if (TransactionIdPrecedes(newxmax, *NewRelFrozenXid)) + *NewRelFrozenXid = newxmax; /* * NB -- some of these transformations are only valid because we @@ -6578,7 +6554,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, frz->xmax = newxmax; if (flags & FRM_MARK_COMMITTED) frz->t_infomask |= HEAP_XMAX_COMMITTED; - changed = true; + frzplan_set = true; } else if (flags & FRM_RETURN_IS_MULTI) { @@ -6588,15 +6564,15 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, /* * xmax is an old MultiXactId that we have to replace with a new * MultiXactId, to carry forward two or more original member XIDs. - * Might have to ratchet back relfrozenxid_out here, though never - * relminmxid_out. + * Might have to ratchet back NewRelfrozenXid here, though never + * NewRelminMxid. */ Assert(!freeze_xmax); Assert(MultiXactIdIsValid(newxmax)); - Assert(!MultiXactIdPrecedes(newxmax, *relminmxid_out)); + Assert(!MultiXactIdPrecedes(newxmax, *NewRelminMxid)); Assert(TransactionIdPrecedesOrEquals(mxid_oldest_xid_out, - *relfrozenxid_out)); - *relfrozenxid_out = mxid_oldest_xid_out; + *NewRelFrozenXid)); + *NewRelFrozenXid = mxid_oldest_xid_out; /* * We can't use GetMultiXactIdHintBits directly on the new multi @@ -6612,28 +6588,28 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, frz->xmax = newxmax; - changed = true; + frzplan_set = true; } else if (flags & FRM_NOOP) { /* * xmax is a MultiXactId, and nothing about it changes for now. - * Might have to ratchet back relminmxid_out, relfrozenxid_out, or + * Might have to ratchet back NewRelminMxid, NewRelfrozenXid, or * both together. */ Assert(!freeze_xmax); Assert(MultiXactIdIsValid(newxmax) && xid == newxmax); Assert(TransactionIdPrecedesOrEquals(mxid_oldest_xid_out, - *relfrozenxid_out)); - if (MultiXactIdPrecedes(xid, *relminmxid_out)) - *relminmxid_out = xid; - *relfrozenxid_out = mxid_oldest_xid_out; + *NewRelFrozenXid)); + if (MultiXactIdPrecedes(xid, *NewRelminMxid)) + *NewRelminMxid = xid; + *NewRelFrozenXid = mxid_oldest_xid_out; } else { /* * Keeping nothing (neither an Xid nor a MultiXactId) in xmax. - * Won't have to ratchet back relminmxid_out or relfrozenxid_out. + * Won't have to ratchet back NewRelminMxid or NewRelfrozenXid. */ Assert(freeze_xmax); Assert(!TransactionIdIsValid(newxmax)); @@ -6642,13 +6618,13 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, else if (TransactionIdIsNormal(xid)) { /* Raw xmax is normal XID */ - if (TransactionIdPrecedes(xid, relfrozenxid)) + if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg_internal("found xmax %u from before relfrozenxid %u", - xid, relfrozenxid))); + xid, cutoffs->relfrozenxid))); - if (TransactionIdPrecedes(xid, cutoff_xid)) + if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit)) { /* * If we freeze xmax, make absolutely sure that it's not an XID @@ -6663,13 +6639,13 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, errmsg_internal("cannot freeze committed xmax %u", xid))); freeze_xmax = true; - /* No need for relfrozenxid_out handling, since we'll freeze xmax */ + /* No need for NewRelfrozenXid handling, since we'll freeze xmax */ } else { freeze_xmax = false; - if (TransactionIdPrecedes(xid, *relfrozenxid_out)) - *relfrozenxid_out = xid; + if (TransactionIdPrecedes(xid, *NewRelFrozenXid)) + *NewRelFrozenXid = xid; } } else if (!TransactionIdIsValid(xid)) @@ -6678,14 +6654,19 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, Assert((tuple->t_infomask & HEAP_XMAX_IS_MULTI) == 0); freeze_xmax = false; xmax_already_frozen = true; - /* No need for relfrozenxid_out handling for already-frozen xmax */ + /* No need for NewRelfrozenXid handling for already-frozen xmax */ } else ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg_internal("found xmax %u (infomask 0x%04x) not frozen, not multi, not normal", + errmsg_internal("found raw xmax %u (infomask 0x%04x) not invalid and not multi", xid, tuple->t_infomask))); + if (freeze_xmin) + { + Assert(!xmin_already_frozen); + Assert(frzplan_set); + } if (freeze_xmax) { Assert(!xmax_already_frozen); @@ -6701,7 +6682,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, frz->t_infomask |= HEAP_XMAX_INVALID; frz->t_infomask2 &= ~HEAP_HOT_UPDATED; frz->t_infomask2 &= ~HEAP_KEYS_UPDATED; - changed = true; + frzplan_set = true; } /* @@ -6713,17 +6694,14 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, xid = HeapTupleHeaderGetXvac(tuple); /* - * For Xvac, we ignore the cutoff_xid and just always perform the - * freeze operation. The oldest release in which such a value can - * actually be set is PostgreSQL 8.4, because old-style VACUUM FULL - * was removed in PostgreSQL 9.0. Note that if we were to respect - * cutoff_xid here, we'd need to make surely to clear totally_frozen - * when we skipped freezing on that basis. - * - * No need for relfrozenxid_out handling, since we always freeze xvac. + * For Xvac, we always freeze proactively. This allows totally_frozen + * tracking to ignore xvac. */ if (TransactionIdIsNormal(xid)) { + Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid)); + Assert(TransactionIdPrecedes(xid, cutoffs->OldestXmin)); + /* * If a MOVED_OFF tuple is not dead, the xvac transaction must * have failed; whereas a non-dead MOVED_IN tuple must mean the @@ -6734,19 +6712,21 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, else frz->frzflags |= XLH_FREEZE_XVAC; - /* - * Might as well fix the hint bits too; usually XMIN_COMMITTED - * will already be set here, but there's a small chance not. - */ + /* Set XMIN_COMMITTED defensively */ Assert(!(tuple->t_infomask & HEAP_XMIN_INVALID)); frz->t_infomask |= HEAP_XMIN_COMMITTED; - changed = true; + frzplan_set = true; } } - *totally_frozen = (xmin_frozen && + /* + * Determine if this tuple is already totally frozen, or will become + * totally frozen + */ + *totally_frozen = ((freeze_xmin || xmin_already_frozen) && (freeze_xmax || xmax_already_frozen)); - return changed; + + return frzplan_set; } /* @@ -6865,19 +6845,25 @@ heap_freeze_execute_prepared(Relation rel, Buffer buffer, bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, - TransactionId cutoff_xid, TransactionId cutoff_multi) + TransactionId FreezeLimit, TransactionId MultiXactCutoff) { HeapTupleFreeze frz; bool do_freeze; - bool tuple_totally_frozen; - TransactionId relfrozenxid_out = cutoff_xid; - MultiXactId relminmxid_out = cutoff_multi; + bool totally_frozen; + struct VacuumCutoffs cutoffs; + TransactionId NewRelfrozenXid = FreezeLimit; + MultiXactId NewRelminMxid = MultiXactCutoff; - do_freeze = heap_prepare_freeze_tuple(tuple, - relfrozenxid, relminmxid, - cutoff_xid, cutoff_multi, - &frz, &tuple_totally_frozen, - &relfrozenxid_out, &relminmxid_out); + cutoffs.relfrozenxid = relfrozenxid; + cutoffs.relminmxid = relminmxid; + cutoffs.OldestXmin = FreezeLimit; + cutoffs.OldestMxact = MultiXactCutoff; + cutoffs.FreezeLimit = FreezeLimit; + cutoffs.MultiXactCutoff = MultiXactCutoff; + + do_freeze = heap_prepare_freeze_tuple(tuple, &cutoffs, + &frz, &totally_frozen, + &NewRelfrozenXid, &NewRelminMxid); /* * Note that because this is not a WAL-logged operation, we don't need to @@ -7300,35 +7286,41 @@ heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple) } /* - * heap_tuple_would_freeze + * heap_tuple_should_freeze * * Return value indicates if heap_prepare_freeze_tuple sibling function would * freeze any of the XID/MXID fields from the tuple, given the same cutoffs. * We must also deal with dead tuples here, since (xmin, xmax, xvac) fields * could be processed by pruning away the whole tuple instead of freezing. * - * The *relfrozenxid_out and *relminmxid_out input/output arguments work just + * The *NewRelfrozenXid and *NewRelminMxid input/output arguments work just * like the heap_prepare_freeze_tuple arguments that they're based on. We * never freeze here, which makes tracking the oldest extant XID/MXID simple. */ bool -heap_tuple_would_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, - MultiXactId cutoff_multi, - TransactionId *relfrozenxid_out, - MultiXactId *relminmxid_out) +heap_tuple_should_freeze(HeapTupleHeader tuple, + const struct VacuumCutoffs *cutoffs, + TransactionId *NewRelfrozenXid, + MultiXactId *NewRelminMxid) { + TransactionId MustFreezeLimit; + MultiXactId MustFreezeMultiLimit; TransactionId xid; MultiXactId multi; - bool would_freeze = false; + bool freeze = false; + + MustFreezeLimit = cutoffs->FreezeLimit; + MustFreezeMultiLimit = cutoffs->MultiXactCutoff; /* First deal with xmin */ xid = HeapTupleHeaderGetXmin(tuple); if (TransactionIdIsNormal(xid)) { - if (TransactionIdPrecedes(xid, *relfrozenxid_out)) - *relfrozenxid_out = xid; - if (TransactionIdPrecedes(xid, cutoff_xid)) - would_freeze = true; + Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid)); + if (TransactionIdPrecedes(xid, *NewRelfrozenXid)) + *NewRelfrozenXid = xid; + if (TransactionIdPrecedes(xid, MustFreezeLimit)) + freeze = true; } /* Now deal with xmax */ @@ -7341,11 +7333,12 @@ heap_tuple_would_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, if (TransactionIdIsNormal(xid)) { + Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid)); /* xmax is a non-permanent XID */ - if (TransactionIdPrecedes(xid, *relfrozenxid_out)) - *relfrozenxid_out = xid; - if (TransactionIdPrecedes(xid, cutoff_xid)) - would_freeze = true; + if (TransactionIdPrecedes(xid, *NewRelfrozenXid)) + *NewRelfrozenXid = xid; + if (TransactionIdPrecedes(xid, MustFreezeLimit)) + freeze = true; } else if (!MultiXactIdIsValid(multi)) { @@ -7354,10 +7347,10 @@ heap_tuple_would_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, else if (HEAP_LOCKED_UPGRADED(tuple->t_infomask)) { /* xmax is a pg_upgrade'd MultiXact, which can't have updater XID */ - if (MultiXactIdPrecedes(multi, *relminmxid_out)) - *relminmxid_out = multi; + if (MultiXactIdPrecedes(multi, *NewRelminMxid)) + *NewRelminMxid = multi; /* heap_prepare_freeze_tuple always freezes pg_upgrade'd xmax */ - would_freeze = true; + freeze = true; } else { @@ -7365,10 +7358,11 @@ heap_tuple_would_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactMember *members; int nmembers; - if (MultiXactIdPrecedes(multi, *relminmxid_out)) - *relminmxid_out = multi; - if (MultiXactIdPrecedes(multi, cutoff_multi)) - would_freeze = true; + Assert(MultiXactIdPrecedesOrEquals(cutoffs->relminmxid, multi)); + if (MultiXactIdPrecedes(multi, *NewRelminMxid)) + *NewRelminMxid = multi; + if (MultiXactIdPrecedes(multi, MustFreezeMultiLimit)) + freeze = true; /* need to check whether any member of the mxact is old */ nmembers = GetMultiXactIdMembers(multi, &members, false, @@ -7377,11 +7371,11 @@ heap_tuple_would_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, for (int i = 0; i < nmembers; i++) { xid = members[i].xid; - Assert(TransactionIdIsNormal(xid)); - if (TransactionIdPrecedes(xid, *relfrozenxid_out)) - *relfrozenxid_out = xid; - if (TransactionIdPrecedes(xid, cutoff_xid)) - would_freeze = true; + Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid)); + if (TransactionIdPrecedes(xid, *NewRelfrozenXid)) + *NewRelfrozenXid = xid; + if (TransactionIdPrecedes(xid, MustFreezeLimit)) + freeze = true; } if (nmembers > 0) pfree(members); @@ -7392,14 +7386,15 @@ heap_tuple_would_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, xid = HeapTupleHeaderGetXvac(tuple); if (TransactionIdIsNormal(xid)) { - if (TransactionIdPrecedes(xid, *relfrozenxid_out)) - *relfrozenxid_out = xid; - /* heap_prepare_freeze_tuple always freezes xvac */ - would_freeze = true; + Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid)); + if (TransactionIdPrecedes(xid, *NewRelfrozenXid)) + *NewRelfrozenXid = xid; + /* heap_prepare_freeze_tuple forces xvac freezing */ + freeze = true; } } - return would_freeze; + return freeze; } /* diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index d59711b7e..b3668e57b 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -144,6 +144,10 @@ typedef struct LVRelState Relation *indrels; int nindexes; + /* Buffer access strategy and parallel vacuum state */ + BufferAccessStrategy bstrategy; + ParallelVacuumState *pvs; + /* Aggressive VACUUM? (must set relfrozenxid >= FreezeLimit) */ bool aggressive; /* Use visibility map to skip? (disabled by DISABLE_PAGE_SKIPPING) */ @@ -158,21 +162,9 @@ typedef struct LVRelState bool do_index_cleanup; bool do_rel_truncate; - /* Buffer access strategy and parallel vacuum state */ - BufferAccessStrategy bstrategy; - ParallelVacuumState *pvs; - - /* rel's initial relfrozenxid and relminmxid */ - TransactionId relfrozenxid; - MultiXactId relminmxid; - double old_live_tuples; /* previous value of pg_class.reltuples */ - /* VACUUM operation's cutoffs for freezing and pruning */ - TransactionId OldestXmin; + struct VacuumCutoffs cutoffs; GlobalVisState *vistest; - /* VACUUM operation's target cutoffs for freezing XIDs and MultiXactIds */ - TransactionId FreezeLimit; - MultiXactId MultiXactCutoff; /* Tracks oldest extant XID/MXID for setting relfrozenxid/relminmxid */ TransactionId NewRelfrozenXid; MultiXactId NewRelminMxid; @@ -318,10 +310,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, skipwithvm, frozenxid_updated, minmulti_updated; - TransactionId OldestXmin, - FreezeLimit; - MultiXactId OldestMxact, - MultiXactCutoff; + struct VacuumCutoffs cutoffs; BlockNumber orig_rel_pages, new_rel_pages, new_rel_allvisible; @@ -354,14 +343,10 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, RelationGetRelid(rel)); /* - * Get OldestXmin cutoff, which is used to determine which deleted tuples - * are considered DEAD, not just RECENTLY_DEAD. Also get related cutoffs - * used to determine which XIDs/MultiXactIds will be frozen. If this is - * an aggressive VACUUM then lazy_scan_heap cannot leave behind unfrozen - * XIDs < FreezeLimit (all MXIDs < MultiXactCutoff also need to go away). + * Get cutoffs that determine which deleted tuples are considered DEAD, + * not just RECENTLY_DEAD, and which XIDs/MXIDs to freeze */ - aggressive = vacuum_set_xid_limits(rel, params, &OldestXmin, &OldestMxact, - &FreezeLimit, &MultiXactCutoff); + aggressive = vacuum_set_xid_limits(rel, params, &cutoffs); skipwithvm = true; if (params->options & VACOPT_DISABLE_PAGE_SKIPPING) @@ -415,6 +400,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, vacrel->rel = rel; vac_open_indexes(vacrel->rel, RowExclusiveLock, &vacrel->nindexes, &vacrel->indrels); + vacrel->bstrategy = bstrategy; if (instrument && vacrel->nindexes > 0) { /* Copy index names used by instrumentation (not error reporting) */ @@ -459,11 +445,6 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, Assert(params->index_cleanup == VACOPTVALUE_AUTO); } - vacrel->bstrategy = bstrategy; - vacrel->relfrozenxid = rel->rd_rel->relfrozenxid; - vacrel->relminmxid = rel->rd_rel->relminmxid; - vacrel->old_live_tuples = rel->rd_rel->reltuples; - /* Initialize page counters explicitly (be tidy) */ vacrel->scanned_pages = 0; vacrel->removed_pages = 0; @@ -505,15 +486,11 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, * frozen) during its scan. */ vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel); - vacrel->OldestXmin = OldestXmin; + vacrel->cutoffs = cutoffs; vacrel->vistest = GlobalVisTestFor(rel); - /* FreezeLimit controls XID freezing (always <= OldestXmin) */ - vacrel->FreezeLimit = FreezeLimit; - /* MultiXactCutoff controls MXID freezing (always <= OldestMxact) */ - vacrel->MultiXactCutoff = MultiXactCutoff; /* Initialize state used to track oldest extant XID/MXID */ - vacrel->NewRelfrozenXid = OldestXmin; - vacrel->NewRelminMxid = OldestMxact; + vacrel->NewRelfrozenXid = cutoffs.OldestXmin; + vacrel->NewRelminMxid = cutoffs.OldestMxact; vacrel->skippedallvis = false; /* @@ -569,13 +546,13 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, * value >= FreezeLimit, and relminmxid to a value >= MultiXactCutoff. * Non-aggressive VACUUMs may advance them by any amount, or not at all. */ - Assert(vacrel->NewRelfrozenXid == OldestXmin || - TransactionIdPrecedesOrEquals(aggressive ? FreezeLimit : - vacrel->relfrozenxid, + Assert(vacrel->NewRelfrozenXid == cutoffs.OldestXmin || + TransactionIdPrecedesOrEquals(aggressive ? cutoffs.FreezeLimit : + vacrel->cutoffs.relfrozenxid, vacrel->NewRelfrozenXid)); - Assert(vacrel->NewRelminMxid == OldestMxact || - MultiXactIdPrecedesOrEquals(aggressive ? MultiXactCutoff : - vacrel->relminmxid, + Assert(vacrel->NewRelminMxid == cutoffs.OldestMxact || + MultiXactIdPrecedesOrEquals(aggressive ? cutoffs.MultiXactCutoff : + vacrel->cutoffs.relminmxid, vacrel->NewRelminMxid)); if (vacrel->skippedallvis) { @@ -702,20 +679,22 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, _("tuples missed: %lld dead from %u pages not removed due to cleanup lock contention\n"), (long long) vacrel->missed_dead_tuples, vacrel->missed_dead_pages); - diff = (int32) (ReadNextTransactionId() - OldestXmin); + diff = (int32) (ReadNextTransactionId() - cutoffs.OldestXmin); appendStringInfo(&buf, _("removable cutoff: %u, which was %d XIDs old when operation ended\n"), - OldestXmin, diff); + cutoffs.OldestXmin, diff); if (frozenxid_updated) { - diff = (int32) (vacrel->NewRelfrozenXid - vacrel->relfrozenxid); + diff = (int32) (vacrel->NewRelfrozenXid - + vacrel->cutoffs.relfrozenxid); appendStringInfo(&buf, _("new relfrozenxid: %u, which is %d XIDs ahead of previous value\n"), vacrel->NewRelfrozenXid, diff); } if (minmulti_updated) { - diff = (int32) (vacrel->NewRelminMxid - vacrel->relminmxid); + diff = (int32) (vacrel->NewRelminMxid - + vacrel->cutoffs.relminmxid); appendStringInfo(&buf, _("new relminmxid: %u, which is %d MXIDs ahead of previous value\n"), vacrel->NewRelminMxid, diff); @@ -1610,7 +1589,7 @@ retry: offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { - bool tuple_totally_frozen; + bool totally_frozen; /* * Set the offset number so that we can display it along with any @@ -1666,7 +1645,8 @@ retry: * since heap_page_prune() looked. Handle that here by restarting. * (See comments at the top of function for a full explanation.) */ - res = HeapTupleSatisfiesVacuum(&tuple, vacrel->OldestXmin, buf); + res = HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin, + buf); if (unlikely(res == HEAPTUPLE_DEAD)) goto retry; @@ -1723,7 +1703,8 @@ retry: * that everyone sees it as committed? */ xmin = HeapTupleHeaderGetXmin(tuple.t_data); - if (!TransactionIdPrecedes(xmin, vacrel->OldestXmin)) + if (!TransactionIdPrecedes(xmin, + vacrel->cutoffs.OldestXmin)) { prunestate->all_visible = false; break; @@ -1774,13 +1755,8 @@ retry: prunestate->hastup = true; /* page makes rel truncation unsafe */ /* Tuple with storage -- consider need to freeze */ - if (heap_prepare_freeze_tuple(tuple.t_data, - vacrel->relfrozenxid, - vacrel->relminmxid, - vacrel->FreezeLimit, - vacrel->MultiXactCutoff, - &frozen[tuples_frozen], - &tuple_totally_frozen, + if (heap_prepare_freeze_tuple(tuple.t_data, &vacrel->cutoffs, + &frozen[tuples_frozen], &totally_frozen, &NewRelfrozenXid, &NewRelminMxid)) { /* Save prepared freeze plan for later */ @@ -1791,7 +1767,7 @@ retry: * If tuple is not frozen (and not about to become frozen) then caller * had better not go on to set this page's VM bit */ - if (!tuple_totally_frozen) + if (!totally_frozen) prunestate->all_frozen = false; } @@ -1817,7 +1793,8 @@ retry: vacrel->frozen_pages++; /* Execute all freeze plans for page as a single atomic action */ - heap_freeze_execute_prepared(vacrel->rel, buf, vacrel->FreezeLimit, + heap_freeze_execute_prepared(vacrel->rel, buf, + vacrel->cutoffs.FreezeLimit, frozen, tuples_frozen); } @@ -1972,10 +1949,8 @@ lazy_scan_noprune(LVRelState *vacrel, *hastup = true; /* page prevents rel truncation */ tupleheader = (HeapTupleHeader) PageGetItem(page, itemid); - if (heap_tuple_would_freeze(tupleheader, - vacrel->FreezeLimit, - vacrel->MultiXactCutoff, - &NewRelfrozenXid, &NewRelminMxid)) + if (heap_tuple_should_freeze(tupleheader, &vacrel->cutoffs, + &NewRelfrozenXid, &NewRelminMxid)) { /* Tuple with XID < FreezeLimit (or MXID < MultiXactCutoff) */ if (vacrel->aggressive) @@ -2010,7 +1985,8 @@ lazy_scan_noprune(LVRelState *vacrel, tuple.t_len = ItemIdGetLength(itemid); tuple.t_tableOid = RelationGetRelid(vacrel->rel); - switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->OldestXmin, buf)) + switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin, + buf)) { case HEAPTUPLE_DELETE_IN_PROGRESS: case HEAPTUPLE_LIVE: @@ -2274,6 +2250,7 @@ static bool lazy_vacuum_all_indexes(LVRelState *vacrel) { bool allindexes = true; + double old_live_tuples = vacrel->rel->rd_rel->reltuples; Assert(vacrel->nindexes > 0); Assert(vacrel->do_index_vacuuming); @@ -2297,9 +2274,9 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) Relation indrel = vacrel->indrels[idx]; IndexBulkDeleteResult *istat = vacrel->indstats[idx]; - vacrel->indstats[idx] = - lazy_vacuum_one_index(indrel, istat, vacrel->old_live_tuples, - vacrel); + vacrel->indstats[idx] = lazy_vacuum_one_index(indrel, istat, + old_live_tuples, + vacrel); if (lazy_check_wraparound_failsafe(vacrel)) { @@ -2312,7 +2289,7 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) else { /* Outsource everything to parallel variant */ - parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, vacrel->old_live_tuples, + parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, old_live_tuples, vacrel->num_index_scans); /* @@ -2581,15 +2558,15 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, static bool lazy_check_wraparound_failsafe(LVRelState *vacrel) { - Assert(TransactionIdIsNormal(vacrel->relfrozenxid)); - Assert(MultiXactIdIsValid(vacrel->relminmxid)); + Assert(TransactionIdIsNormal(vacrel->cutoffs.relfrozenxid)); + Assert(MultiXactIdIsValid(vacrel->cutoffs.relminmxid)); /* Don't warn more than once per VACUUM */ if (vacrel->failsafe_active) return true; - if (unlikely(vacuum_xid_failsafe_check(vacrel->relfrozenxid, - vacrel->relminmxid))) + if (unlikely(vacuum_xid_failsafe_check(vacrel->cutoffs.relfrozenxid, + vacrel->cutoffs.relminmxid))) { vacrel->failsafe_active = true; @@ -3246,7 +3223,8 @@ heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, tuple.t_len = ItemIdGetLength(itemid); tuple.t_tableOid = RelationGetRelid(vacrel->rel); - switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->OldestXmin, buf)) + switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin, + buf)) { case HEAPTUPLE_LIVE: { @@ -3265,7 +3243,8 @@ heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, * that everyone sees it as committed? */ xmin = HeapTupleHeaderGetXmin(tuple.t_data); - if (!TransactionIdPrecedes(xmin, vacrel->OldestXmin)) + if (!TransactionIdPrecedes(xmin, + vacrel->cutoffs.OldestXmin)) { all_visible = false; *all_frozen = false; diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c index 07e091bb8..6cfea04a9 100644 --- a/src/backend/commands/cluster.c +++ b/src/backend/commands/cluster.c @@ -824,10 +824,7 @@ copy_table_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, TupleDesc oldTupDesc PG_USED_FOR_ASSERTS_ONLY; TupleDesc newTupDesc PG_USED_FOR_ASSERTS_ONLY; VacuumParams params; - TransactionId OldestXmin, - FreezeXid; - MultiXactId OldestMxact, - MultiXactCutoff; + struct VacuumCutoffs cutoffs; bool use_sort; double num_tuples = 0, tups_vacuumed = 0, @@ -916,23 +913,24 @@ copy_table_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, * not to be aggressive about this. */ memset(¶ms, 0, sizeof(VacuumParams)); - vacuum_set_xid_limits(OldHeap, ¶ms, &OldestXmin, &OldestMxact, - &FreezeXid, &MultiXactCutoff); + vacuum_set_xid_limits(OldHeap, ¶ms, &cutoffs); /* * FreezeXid will become the table's new relfrozenxid, and that mustn't go * backwards, so take the max. */ if (TransactionIdIsValid(OldHeap->rd_rel->relfrozenxid) && - TransactionIdPrecedes(FreezeXid, OldHeap->rd_rel->relfrozenxid)) - FreezeXid = OldHeap->rd_rel->relfrozenxid; + TransactionIdPrecedes(cutoffs.FreezeLimit, + OldHeap->rd_rel->relfrozenxid)) + cutoffs.FreezeLimit = OldHeap->rd_rel->relfrozenxid; /* * MultiXactCutoff, similarly, shouldn't go backwards either. */ if (MultiXactIdIsValid(OldHeap->rd_rel->relminmxid) && - MultiXactIdPrecedes(MultiXactCutoff, OldHeap->rd_rel->relminmxid)) - MultiXactCutoff = OldHeap->rd_rel->relminmxid; + MultiXactIdPrecedes(cutoffs.MultiXactCutoff, + OldHeap->rd_rel->relminmxid)) + cutoffs.MultiXactCutoff = OldHeap->rd_rel->relminmxid; /* * Decide whether to use an indexscan or seqscan-and-optional-sort to scan @@ -971,13 +969,14 @@ copy_table_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, * values (e.g. because the AM doesn't use freezing). */ table_relation_copy_for_cluster(OldHeap, NewHeap, OldIndex, use_sort, - OldestXmin, &FreezeXid, &MultiXactCutoff, + cutoffs.OldestXmin, &cutoffs.FreezeLimit, + &cutoffs.MultiXactCutoff, &num_tuples, &tups_vacuumed, &tups_recently_dead); /* return selected values to caller, get set as relfrozenxid/minmxid */ - *pFreezeXid = FreezeXid; - *pCutoffMulti = MultiXactCutoff; + *pFreezeXid = cutoffs.FreezeLimit; + *pCutoffMulti = cutoffs.MultiXactCutoff; /* Reset rd_toastoid just to be tidy --- it shouldn't be looked at again */ NewHeap->rd_toastoid = InvalidOid; diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index b5d0ac161..0fb211845 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -933,30 +933,11 @@ get_all_vacuum_rels(int options) * * The target relation and VACUUM parameters are our inputs. * - * Our output parameters are: - * - OldestXmin is the Xid below which tuples deleted by any xact (that - * committed) should be considered DEAD, not just RECENTLY_DEAD. - * - OldestMxact is the Mxid below which MultiXacts are definitely not - * seen as visible by any running transaction. - * - FreezeLimit is the Xid below which all Xids are definitely frozen or - * removed during aggressive vacuums. - * - MultiXactCutoff is the value below which all MultiXactIds are definitely - * removed from Xmax during aggressive vacuums. - * - * Return value indicates if vacuumlazy.c caller should make its VACUUM - * operation aggressive. An aggressive VACUUM must advance relfrozenxid up to - * FreezeLimit (at a minimum), and relminmxid up to MultiXactCutoff (at a - * minimum). - * - * OldestXmin and OldestMxact are the most recent values that can ever be - * passed to vac_update_relstats() as frozenxid and minmulti arguments by our - * vacuumlazy.c caller later on. These values should be passed when it turns - * out that VACUUM will leave no unfrozen XIDs/MXIDs behind in the table. + * Output parameters are the cutoffs that VACUUM caller should use. */ bool vacuum_set_xid_limits(Relation rel, const VacuumParams *params, - TransactionId *OldestXmin, MultiXactId *OldestMxact, - TransactionId *FreezeLimit, MultiXactId *MultiXactCutoff) + struct VacuumCutoffs *cutoffs) { int freeze_min_age, multixact_freeze_min_age, @@ -970,6 +951,10 @@ vacuum_set_xid_limits(Relation rel, const VacuumParams *params, safeOldestMxact, aggressiveMXIDCutoff; + /* Determining table age details */ + cutoffs->relfrozenxid = rel->rd_rel->relfrozenxid; + cutoffs->relminmxid = rel->rd_rel->relminmxid; + /* Use mutable copies of freeze age parameters */ freeze_min_age = params->freeze_min_age; multixact_freeze_min_age = params->multixact_freeze_min_age; @@ -987,14 +972,14 @@ vacuum_set_xid_limits(Relation rel, const VacuumParams *params, * that only one vacuum process can be working on a particular table at * any time, and that each vacuum is always an independent transaction. */ - *OldestXmin = GetOldestNonRemovableTransactionId(rel); + cutoffs->OldestXmin = GetOldestNonRemovableTransactionId(rel); if (OldSnapshotThresholdActive()) { TransactionId limit_xmin; TimestampTz limit_ts; - if (TransactionIdLimitedForOldSnapshots(*OldestXmin, rel, + if (TransactionIdLimitedForOldSnapshots(cutoffs->OldestXmin, rel, &limit_xmin, &limit_ts)) { /* @@ -1004,15 +989,15 @@ vacuum_set_xid_limits(Relation rel, const VacuumParams *params, * frequency), but would still be a significant improvement. */ SetOldSnapshotThresholdTimestamp(limit_ts, limit_xmin); - *OldestXmin = limit_xmin; + cutoffs->OldestXmin = limit_xmin; } } - Assert(TransactionIdIsNormal(*OldestXmin)); + Assert(TransactionIdIsNormal(cutoffs->OldestXmin)); /* Acquire OldestMxact */ - *OldestMxact = GetOldestMultiXactId(); - Assert(MultiXactIdIsValid(*OldestMxact)); + cutoffs->OldestMxact = GetOldestMultiXactId(); + Assert(MultiXactIdIsValid(cutoffs->OldestMxact)); /* Acquire next XID/next MXID values used to apply age-based settings */ nextXID = ReadNextTransactionId(); @@ -1030,12 +1015,12 @@ vacuum_set_xid_limits(Relation rel, const VacuumParams *params, Assert(freeze_min_age >= 0); /* Compute FreezeLimit, being careful to generate a normal XID */ - *FreezeLimit = nextXID - freeze_min_age; - if (!TransactionIdIsNormal(*FreezeLimit)) - *FreezeLimit = FirstNormalTransactionId; + cutoffs->FreezeLimit = nextXID - freeze_min_age; + if (!TransactionIdIsNormal(cutoffs->FreezeLimit)) + cutoffs->FreezeLimit = FirstNormalTransactionId; /* FreezeLimit must always be <= OldestXmin */ - if (TransactionIdPrecedes(*OldestXmin, *FreezeLimit)) - *FreezeLimit = *OldestXmin; + if (TransactionIdPrecedes(cutoffs->OldestXmin, cutoffs->FreezeLimit)) + cutoffs->FreezeLimit = cutoffs->OldestXmin; /* * Compute the multixact age for which freezing is urgent. This is @@ -1057,16 +1042,16 @@ vacuum_set_xid_limits(Relation rel, const VacuumParams *params, Assert(multixact_freeze_min_age >= 0); /* Compute MultiXactCutoff, being careful to generate a valid value */ - *MultiXactCutoff = nextMXID - multixact_freeze_min_age; - if (*MultiXactCutoff < FirstMultiXactId) - *MultiXactCutoff = FirstMultiXactId; + cutoffs->MultiXactCutoff = nextMXID - multixact_freeze_min_age; + if (cutoffs->MultiXactCutoff < FirstMultiXactId) + cutoffs->MultiXactCutoff = FirstMultiXactId; /* MultiXactCutoff must always be <= OldestMxact */ - if (MultiXactIdPrecedes(*OldestMxact, *MultiXactCutoff)) - *MultiXactCutoff = *OldestMxact; + if (MultiXactIdPrecedes(cutoffs->OldestMxact, cutoffs->MultiXactCutoff)) + cutoffs->MultiXactCutoff = cutoffs->OldestMxact; /* - * Done setting output parameters; check if OldestXmin or OldestMxact are - * held back to an unsafe degree in passing + * Check if OldestXmin or OldestMxact are held back to an unsafe degree in + * passing */ safeOldestXmin = nextXID - autovacuum_freeze_max_age; if (!TransactionIdIsNormal(safeOldestXmin)) @@ -1074,20 +1059,29 @@ vacuum_set_xid_limits(Relation rel, const VacuumParams *params, safeOldestMxact = nextMXID - effective_multixact_freeze_max_age; if (safeOldestMxact < FirstMultiXactId) safeOldestMxact = FirstMultiXactId; - if (TransactionIdPrecedes(*OldestXmin, safeOldestXmin)) + if (TransactionIdPrecedes(cutoffs->OldestXmin, safeOldestXmin)) ereport(WARNING, (errmsg("cutoff for removing and freezing tuples is far in the past"), errhint("Close open transactions soon to avoid wraparound problems.\n" "You might also need to commit or roll back old prepared transactions, or drop stale replication slots."))); - if (MultiXactIdPrecedes(*OldestMxact, safeOldestMxact)) + if (MultiXactIdPrecedes(cutoffs->OldestMxact, safeOldestMxact)) ereport(WARNING, (errmsg("cutoff for freezing multixacts is far in the past"), errhint("Close open transactions soon to avoid wraparound problems.\n" "You might also need to commit or roll back old prepared transactions, or drop stale replication slots."))); /* - * Finally, figure out if caller needs to do an aggressive VACUUM or not. + * Assert that all cutoff invariants hold. * + * We omit relfrozenxid and relminmxid assertions here because there are + * edge cases that allow OldestXmin to go slightly backwards. This is + * okay because vac_update_relstats() won't allow either to go backwards. + */ + Assert(TransactionIdPrecedesOrEquals(cutoffs->FreezeLimit, + cutoffs->OldestXmin)); + Assert(MultiXactIdPrecedesOrEquals(cutoffs->MultiXactCutoff, + cutoffs->OldestMxact)); + /* * Determine the table freeze age to use: as specified by the caller, or * the value of the vacuum_freeze_table_age GUC, but in any case not more * than autovacuum_freeze_max_age * 0.95, so that if you have e.g nightly -- 2.38.1