diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 38bba16..5d5c673 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -34,6 +34,28 @@ * the POSTGRES heap access method used for all POSTGRES * relations. * + * WAL CONSIDERATIONS + * All heap operations are normally WAL-logged. but there are a few + * exceptions. Temporary and unlogged relations never need to be + * WAL-logged, but we can also skip WAL-logging for a table that was + * created in the same transaction, if we don't need WAL for PITR or + * WAL archival purposes (i.e. if wal_level=minimal), and we fsync() + * the file to disk at COMMIT instead. + * + * The same-relation optimization is not employed automatically on all + * updates to a table that was created in the same transacton, because + * for a small number of changes, it's cheaper to just create the WAL + * records than fsyncing() the whole relation at COMMIT. It is only + * worthwhile for (presumably) large operations like COPY, CLUSTER, + * or VACUUM FULL. Use heap_register_sync() to initiate such an + * operation; it will cause any subsequent updates to the table to skip + * WAL-logging, if possible, and cause the heap to be synced to disk at + * COMMIT. + * + * To make that work, all modifications to heap must use + * HeapNeedsWAL() to check if WAL-logging is needed in this transaction + * for the given block. + * *------------------------------------------------------------------------- */ #include "postgres.h" @@ -55,6 +77,7 @@ #include "access/xlogutils.h" #include "catalog/catalog.h" #include "catalog/namespace.h" +#include "catalog/storage.h" #include "miscadmin.h" #include "pgstat.h" #include "storage/bufmgr.h" @@ -2331,12 +2354,6 @@ FreeBulkInsertState(BulkInsertState bistate) * The new tuple is stamped with current transaction ID and the specified * command ID. * - * If the HEAP_INSERT_SKIP_WAL option is specified, the new tuple is not - * logged in WAL, even for a non-temp relation. Safe usage of this behavior - * requires that we arrange that all new tuples go into new pages not - * containing any tuples from other transactions, and that the relation gets - * fsync'd before commit. (See also heap_sync() comments) - * * The HEAP_INSERT_SKIP_FSM option is passed directly to * RelationGetBufferForTuple, which see for more info. * @@ -2440,7 +2457,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, MarkBufferDirty(buffer); /* XLOG stuff */ - if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation)) + if (HeapNeedsWAL(relation, buffer)) { xl_heap_insert xlrec; xl_heap_header xlhdr; @@ -2639,12 +2656,10 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, int ndone; char *scratch = NULL; Page page; - bool needwal; Size saveFreeSpace; bool need_tuple_data = RelationIsLogicallyLogged(relation); bool need_cids = RelationIsAccessibleInLogicalDecoding(relation); - needwal = !(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation); saveFreeSpace = RelationGetTargetPageFreeSpace(relation, HEAP_DEFAULT_FILLFACTOR); @@ -2659,7 +2674,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, * palloc() within a critical section is not safe, so we allocate this * beforehand. */ - if (needwal) + if (RelationNeedsWAL(relation)) scratch = palloc(BLCKSZ); /* @@ -2694,6 +2709,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, Buffer vmbuffer = InvalidBuffer; bool all_visible_cleared = false; int nthispage; + bool needwal; CHECK_FOR_INTERRUPTS(); @@ -2705,6 +2721,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, InvalidBuffer, options, bistate, &vmbuffer, NULL); page = BufferGetPage(buffer); + needwal = HeapNeedsWAL(relation, buffer); /* NO EREPORT(ERROR) from here till changes are logged */ START_CRIT_SECTION(); @@ -3261,7 +3278,7 @@ l1: * NB: heap_abort_speculative() uses the same xlog record and replay * routines. */ - if (RelationNeedsWAL(relation)) + if (HeapNeedsWAL(relation, buffer)) { xl_heap_delete xlrec; XLogRecPtr recptr; @@ -4194,7 +4211,8 @@ l2: MarkBufferDirty(buffer); /* XLOG stuff */ - if (RelationNeedsWAL(relation)) + if (HeapNeedsWAL(relation, buffer) || + HeapNeedsWAL(relation, newbuf)) { XLogRecPtr recptr; @@ -5148,7 +5166,7 @@ failed: * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG * entries for everything anyway.) */ - if (RelationNeedsWAL(relation)) + if (HeapNeedsWAL(relation, *buffer)) { xl_heap_lock xlrec; XLogRecPtr recptr; @@ -5825,7 +5843,7 @@ l4: MarkBufferDirty(buf); /* XLOG stuff */ - if (RelationNeedsWAL(rel)) + if (HeapNeedsWAL(rel, buf)) { xl_heap_lock_updated xlrec; XLogRecPtr recptr; @@ -5980,7 +5998,7 @@ heap_finish_speculative(Relation relation, HeapTuple tuple) htup->t_ctid = tuple->t_self; /* XLOG stuff */ - if (RelationNeedsWAL(relation)) + if (HeapNeedsWAL(relation, buffer)) { xl_heap_confirm xlrec; XLogRecPtr recptr; @@ -6112,7 +6130,7 @@ heap_abort_speculative(Relation relation, HeapTuple tuple) * The WAL records generated here match heap_delete(). The same recovery * routines are used. */ - if (RelationNeedsWAL(relation)) + if (HeapNeedsWAL(relation, buffer)) { xl_heap_delete xlrec; XLogRecPtr recptr; @@ -6218,7 +6236,7 @@ heap_inplace_update(Relation relation, HeapTuple tuple) MarkBufferDirty(buffer); /* XLOG stuff */ - if (RelationNeedsWAL(relation)) + if (HeapNeedsWAL(relation, buffer)) { xl_heap_inplace xlrec; XLogRecPtr recptr; @@ -7331,7 +7349,7 @@ log_heap_clean(Relation reln, Buffer buffer, XLogRecPtr recptr; /* Caller should not call me on a non-WAL-logged relation */ - Assert(RelationNeedsWAL(reln)); + Assert(HeapNeedsWAL(reln, buffer)); xlrec.latestRemovedXid = latestRemovedXid; xlrec.nredirected = nredirected; @@ -7379,7 +7397,7 @@ log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid, XLogRecPtr recptr; /* Caller should not call me on a non-WAL-logged relation */ - Assert(RelationNeedsWAL(reln)); + Assert(HeapNeedsWAL(reln, buffer)); /* nor when there are no tuples to freeze */ Assert(ntuples > 0); @@ -7464,7 +7482,7 @@ log_heap_update(Relation reln, Buffer oldbuf, int bufflags; /* Caller should not call me on a non-WAL-logged relation */ - Assert(RelationNeedsWAL(reln)); + Assert(HeapNeedsWAL(reln, newbuf) || HeapNeedsWAL(reln, oldbuf)); XLogBeginInsert(); @@ -7567,76 +7585,86 @@ log_heap_update(Relation reln, Buffer oldbuf, xlrec.new_offnum = ItemPointerGetOffsetNumber(&newtup->t_self); xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data); + XLogRegisterData((char *) &xlrec, SizeOfHeapUpdate); + bufflags = REGBUF_STANDARD; if (init) bufflags |= REGBUF_WILL_INIT; if (need_tuple_data) bufflags |= REGBUF_KEEP_DATA; - XLogRegisterBuffer(0, newbuf, bufflags); - if (oldbuf != newbuf) - XLogRegisterBuffer(1, oldbuf, REGBUF_STANDARD); - - XLogRegisterData((char *) &xlrec, SizeOfHeapUpdate); - /* * Prepare WAL data for the new tuple. */ - if (prefixlen > 0 || suffixlen > 0) + if (HeapNeedsWAL(reln, newbuf)) { - if (prefixlen > 0 && suffixlen > 0) - { - prefix_suffix[0] = prefixlen; - prefix_suffix[1] = suffixlen; - XLogRegisterBufData(0, (char *) &prefix_suffix, sizeof(uint16) * 2); - } - else if (prefixlen > 0) - { - XLogRegisterBufData(0, (char *) &prefixlen, sizeof(uint16)); - } - else + XLogRegisterBuffer(0, newbuf, bufflags); + + if ((prefixlen > 0 || suffixlen > 0)) { - XLogRegisterBufData(0, (char *) &suffixlen, sizeof(uint16)); + if (prefixlen > 0 && suffixlen > 0) + { + prefix_suffix[0] = prefixlen; + prefix_suffix[1] = suffixlen; + XLogRegisterBufData(0, (char *) &prefix_suffix, + sizeof(uint16) * 2); + } + else if (prefixlen > 0) + { + XLogRegisterBufData(0, (char *) &prefixlen, sizeof(uint16)); + } + else + { + XLogRegisterBufData(0, (char *) &suffixlen, sizeof(uint16)); + } } - } - xlhdr.t_infomask2 = newtup->t_data->t_infomask2; - xlhdr.t_infomask = newtup->t_data->t_infomask; - xlhdr.t_hoff = newtup->t_data->t_hoff; - Assert(SizeofHeapTupleHeader + prefixlen + suffixlen <= newtup->t_len); + xlhdr.t_infomask2 = newtup->t_data->t_infomask2; + xlhdr.t_infomask = newtup->t_data->t_infomask; + xlhdr.t_hoff = newtup->t_data->t_hoff; + Assert(SizeofHeapTupleHeader + prefixlen + suffixlen <= newtup->t_len); - /* - * PG73FORMAT: write bitmap [+ padding] [+ oid] + data - * - * The 'data' doesn't include the common prefix or suffix. - */ - XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader); - if (prefixlen == 0) - { - XLogRegisterBufData(0, - ((char *) newtup->t_data) + SizeofHeapTupleHeader, - newtup->t_len - SizeofHeapTupleHeader - suffixlen); - } - else - { /* - * Have to write the null bitmap and data after the common prefix as - * two separate rdata entries. + * PG73FORMAT: write bitmap [+ padding] [+ oid] + data + * + * The 'data' doesn't include the common prefix or suffix. */ - /* bitmap [+ padding] [+ oid] */ - if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0) + XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader); + if (prefixlen == 0) { XLogRegisterBufData(0, ((char *) newtup->t_data) + SizeofHeapTupleHeader, - newtup->t_data->t_hoff - SizeofHeapTupleHeader); + newtup->t_len - SizeofHeapTupleHeader - suffixlen); } + else + { + /* + * Have to write the null bitmap and data after the common prefix + * as two separate rdata entries. + */ + /* bitmap [+ padding] [+ oid] */ + if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0) + { + XLogRegisterBufData(0, + ((char *) newtup->t_data) + SizeofHeapTupleHeader, + newtup->t_data->t_hoff - SizeofHeapTupleHeader); + } - /* data after common prefix */ - XLogRegisterBufData(0, + /* data after common prefix */ + XLogRegisterBufData(0, ((char *) newtup->t_data) + newtup->t_data->t_hoff + prefixlen, newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen); + } } + /* + * If the old and new tuple are on different pages, also register the old + * page, so that a full-page image is created for it if necessary. We + * don't need any extra information to replay changes to it. + */ + if (oldbuf != newbuf && HeapNeedsWAL(reln, oldbuf)) + XLogRegisterBuffer(1, oldbuf, REGBUF_STANDARD); + /* We need to log a tuple identity */ if (need_tuple_data && old_key_tuple) { @@ -8555,8 +8583,13 @@ heap_xlog_update(XLogReaderState *record, bool hot_update) */ /* Deal with old tuple version */ - oldaction = XLogReadBufferForRedo(record, (oldblk == newblk) ? 0 : 1, - &obuffer); + if (oldblk == newblk) + oldaction = XLogReadBufferForRedo(record, 0, &obuffer); + else if (XLogRecHasBlockRef(record, 1)) + oldaction = XLogReadBufferForRedo(record, 1, &obuffer); + else + oldaction = BLK_DONE; + if (oldaction == BLK_NEEDS_REDO) { page = BufferGetPage(obuffer); @@ -8610,6 +8643,8 @@ heap_xlog_update(XLogReaderState *record, bool hot_update) PageInit(page, BufferGetPageSize(nbuffer), 0); newaction = BLK_NEEDS_REDO; } + else if (!XLogRecHasBlockRef(record, 0)) + newaction = BLK_DONE; else newaction = XLogReadBufferForRedo(record, 0, &nbuffer); @@ -9046,9 +9081,16 @@ heap2_redo(XLogReaderState *record) * heap_sync - sync a heap, for use when no WAL has been written * * This forces the heap contents (including TOAST heap if any) down to disk. - * If we skipped using WAL, and WAL is otherwise needed, we must force the - * relation down to disk before it's safe to commit the transaction. This - * requires writing out any dirty buffers and then doing a forced fsync. + * If we did any changes to the heap bypassing the buffer manager, we must + * force the relation down to disk before it's safe to commit the + * transaction, because the direct modifications will not be flushed by + * the next checkpoint. + * + * We used to also use this after batch operations like COPY and CLUSTER, + * if we skipped using WAL and WAL is otherwise needed, but there were + * corner-cases involving other WAL-logged operations to the same + * relation, where that was not enough. heap_register_sync() should be + * used for that purpose instead. * * Indexes are not touched. (Currently, index operations associated with * the commands that use this are WAL-logged and so do not need fsync. @@ -9081,3 +9123,75 @@ heap_sync(Relation rel) heap_close(toastrel, AccessShareLock); } } + +/* + * heap_register_sync - register a heap to be synced to disk at commit + * + * This can be used to skip WAL-logging changes on a relation file that has + * been created in the same transaction. This makes note of the current size of + * the relation, and ensures that when the relation is extended, any changes + * to the new blocks in the heap, in the same transaction, will not be + * WAL-logged. Instead, the heap contents are flushed to disk at commit, + * like heap_sync() does. + * + * This does the same for the TOAST heap, if any. Indexes are not affected. + */ +void +heap_register_sync(Relation rel) +{ + /* non-WAL-logged tables never need fsync */ + if (!RelationNeedsWAL(rel)) + return; + + smgrRegisterPendingSync(rel->rd_node, RelationGetNumberOfBlocks(rel)); + if (OidIsValid(rel->rd_rel->reltoastrelid)) + { + Relation toastrel; + + toastrel = heap_open(rel->rd_rel->reltoastrelid, AccessShareLock); + smgrRegisterPendingSync(toastrel->rd_node, + RelationGetNumberOfBlocks(toastrel)); + heap_close(toastrel, AccessShareLock); + } +} + +/* + * Do changes to given heap page need to be WAL-logged? + * + * This takes into account any previous heap_register_sync() requests. + * + * Note that it is required to check this before creating any WAL records + * for heap pages - it is not merely an optimization! WAL-logging a record, + * when we have already skipped a previous WAL record for the same page + * could lead lead to failure at WAL replay, as the "before" state expected + * by the record might not match what's on disk. Also, if the heap was + * truncated earlier, we must WAL-log any changes to the once-truncated + * blocks, because replaying the truncation record will destroy them. + * (smgrIsSyncPending() figures out all that.) + */ +bool +HeapNeedsWAL(Relation rel, Buffer buf) +{ + /* Temporary relations never need WAL */ + if (!RelationNeedsWAL(rel)) + return false; + + /* + * If we are going to fsync() the relation at COMMIT, and we have not + * truncated the block away previously, and we have not emitted any WAL + * records for this block yet, we can skip WAL-logging it. + */ + if (smgrIsSyncPending(rel->rd_node, BufferGetBlockNumber(buf))) + { + /* + * If a pending fsync() will handle this page, its LSN should be + * invalid. If it's not, we've already emitted a WAL record for this + * block, and all subsequent changes to the block must be WAL-logged + * too. + */ + Assert(PageGetLSN(BufferGetPage(buf)) == InvalidXLogRecPtr); + return false; + } + + return true; +} diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 6ff9251..3207134 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -260,7 +260,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin, /* * Emit a WAL HEAP_CLEAN record showing what we did */ - if (RelationNeedsWAL(relation)) + if (HeapNeedsWAL(relation, buffer)) { XLogRecPtr recptr; diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index f9ce986..36ba62a 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -649,9 +649,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup) } else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD) heaptup = toast_insert_or_update(state->rs_new_rel, tup, NULL, - HEAP_INSERT_SKIP_FSM | - (state->rs_use_wal ? - 0 : HEAP_INSERT_SKIP_WAL)); + HEAP_INSERT_SKIP_FSM); else heaptup = tup; diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index 3ad4a9f..4b82f3d 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -307,7 +307,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, map[mapByte] |= (flags << mapOffset); MarkBufferDirty(vmBuf); - if (RelationNeedsWAL(rel)) + if (HeapNeedsWAL(rel, heapBuf)) { if (XLogRecPtrIsInvalid(recptr)) { diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 23f36ea..f66d9ab 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -2007,6 +2007,9 @@ CommitTransaction(void) /* close large objects before lower-level cleanup */ AtEOXact_LargeObject(true); + /* Flush updates to relations that we didn't WAL-logged */ + smgrDoPendingSyncs(true); + /* * Mark serializable transaction as complete for predicate locking * purposes. This should be done as late as we can put it and still allow @@ -2237,6 +2240,9 @@ PrepareTransaction(void) /* close large objects before lower-level cleanup */ AtEOXact_LargeObject(true); + /* Flush updates to relations that we didn't WAL-logged */ + smgrDoPendingSyncs(true); + /* * Mark serializable transaction as complete for predicate locking * purposes. This should be done as late as we can put it and still allow @@ -2541,6 +2547,7 @@ AbortTransaction(void) AtAbort_Notify(); AtEOXact_RelationMap(false); AtAbort_Twophase(); + smgrDoPendingSyncs(false); /* * Advertise the fact that we aborted in pg_clog (assuming that we got as diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c index 0d8311c..0a685be 100644 --- a/src/backend/catalog/storage.c +++ b/src/backend/catalog/storage.c @@ -19,6 +19,7 @@ #include "postgres.h" +#include "access/transam.h" #include "access/visibilitymap.h" #include "access/xact.h" #include "access/xlog.h" @@ -29,6 +30,7 @@ #include "catalog/storage_xlog.h" #include "storage/freespace.h" #include "storage/smgr.h" +#include "utils/hsearch.h" #include "utils/memutils.h" #include "utils/rel.h" @@ -64,6 +66,49 @@ typedef struct PendingRelDelete static PendingRelDelete *pendingDeletes = NULL; /* head of linked list */ /* + * We also track relation files (RelFileNode values) that have been created + * in the same transaction, and that have been modified without WAL-logging + * the action (an optimization possible with wal_level=minimal). When we are + * about to skip WAL-logging, a PendingRelSync entry is created, and + * 'sync_above' is set to the current size of the relation. Any operations + * on blocks < sync_above need to be WAL-logged as usual, but for operations + * on higher blocks, WAL-logging is skipped. + * + * NB: after WAL-logging has been skipped for a block, we must not WAL-log + * any subsequent actions on the same block either. Replaying the WAL record + * of the subsequent action might fail otherwise, as the "before" state of + * the block might not match, as the earlier actions were not WAL-logged. + * Likewise, after we have WAL-logged an operation for a block, we must + * WAL-log any subsequent operations on the same page as well. Replaying + * a possible full-page-image from the earlier WAL record would otherwise + * revert the page to the old state, even if we sync the relation at end + * of transaction. + * + * If a relation is truncated (without creating a new relfilenode), and we + * emit a WAL record of the truncation, we can't skip WAL-logging for any + * of the truncated blocks anymore, as replaying the truncation record will + * destroy all the data inserted after that. But if we have already decided + * to skip WAL-logging changes to a relation, and the relation is truncated, + * we don't need to WAL-log the truncation either. + * + * This mechanism is currently only used by heaps. Indexes are always + * WAL-logged. Also, this only applies for wal_level=minimal; with higher + * WAL levels we need the WAL for PITR/replication anyway. + */ +typedef struct PendingRelSync +{ + RelFileNode relnode; /* relation created in same xact */ + BlockNumber sync_above; /* WAL-logging skipped for blocks >= + * sync_above */ + BlockNumber truncated_to; /* truncation WAL record was written */ +} PendingRelSync; + +/* Relations that need to be fsync'd at commit */ +static HTAB *pendingSyncs = NULL; + +static void createPendingSyncsHash(void); + +/* * RelationCreateStorage * Create physical storage for a relation. * @@ -226,6 +271,8 @@ RelationPreserveStorage(RelFileNode rnode, bool atCommit) void RelationTruncate(Relation rel, BlockNumber nblocks) { + PendingRelSync *pending = NULL; + bool found; bool fsm; bool vm; @@ -260,31 +307,52 @@ RelationTruncate(Relation rel, BlockNumber nblocks) */ if (RelationNeedsWAL(rel)) { - /* - * Make an XLOG entry reporting the file truncation. - */ - XLogRecPtr lsn; - xl_smgr_truncate xlrec; - - xlrec.blkno = nblocks; - xlrec.rnode = rel->rd_node; - xlrec.flags = SMGR_TRUNCATE_ALL; - - XLogBeginInsert(); - XLogRegisterData((char *) &xlrec, sizeof(xlrec)); - - lsn = XLogInsert(RM_SMGR_ID, - XLOG_SMGR_TRUNCATE | XLR_SPECIAL_REL_UPDATE); + if (!pendingSyncs) + createPendingSyncsHash(); + pending = (PendingRelSync *) hash_search(pendingSyncs, + (void *) &rel->rd_node, + HASH_ENTER, &found); + if (!found) + { + pending->sync_above = InvalidBlockNumber; + pending->truncated_to = InvalidBlockNumber; + } - /* - * Flush, because otherwise the truncation of the main relation might - * hit the disk before the WAL record, and the truncation of the FSM - * or visibility map. If we crashed during that window, we'd be left - * with a truncated heap, but the FSM or visibility map would still - * contain entries for the non-existent heap pages. - */ - if (fsm || vm) - XLogFlush(lsn); + if (pending->sync_above == InvalidBlockNumber || + pending->sync_above < nblocks) + { + /* + * Make an XLOG entry reporting the file truncation. + */ + XLogRecPtr lsn; + xl_smgr_truncate xlrec; + + xlrec.blkno = nblocks; + xlrec.rnode = rel->rd_node; + + XLogBeginInsert(); + XLogRegisterData((char *) &xlrec, sizeof(xlrec)); + + lsn = XLogInsert(RM_SMGR_ID, + XLOG_SMGR_TRUNCATE | XLR_SPECIAL_REL_UPDATE); + + elog(DEBUG2, "WAL-logged truncation of rel %u/%u/%u to %u blocks", + rel->rd_node.spcNode, rel->rd_node.dbNode, rel->rd_node.relNode, + nblocks); + + /* + * Flush, because otherwise the truncation of the main relation + * might hit the disk before the WAL record, and the truncation of + * the FSM or visibility map. If we crashed during that window, + * we'd be left with a truncated heap, but the FSM or visibility + * map would still contain entries for the non-existent heap + * pages. + */ + if (fsm || vm) + XLogFlush(lsn); + + pending->truncated_to = nblocks; + } } /* Do the real work */ @@ -419,6 +487,140 @@ smgrGetPendingDeletes(bool forCommit, RelFileNode **ptr) return nrels; } +/* create the hash table to track pending at-commit fsyncs */ +static void +createPendingSyncsHash(void) +{ + /* First time through: initialize the hash table */ + HASHCTL ctl; + + MemSet(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(RelFileNode); + ctl.entrysize = sizeof(PendingRelSync); + ctl.hash = tag_hash; + pendingSyncs = hash_create("pending relation sync table", 5, + &ctl, HASH_ELEM | HASH_FUNCTION); +} + +/* + * Remember that the given relation needs to be sync'd at commit, because + * we are going to skip WAL-logging subsequent actions to it. + */ +void +smgrRegisterPendingSync(RelFileNode rnode, BlockNumber nblocks) +{ + PendingRelSync *pending; + bool found; + + if (!pendingSyncs) + createPendingSyncsHash(); + + /* Look up or create an entry */ + pending = (PendingRelSync *) hash_search(pendingSyncs, + (void *) &rnode, + HASH_ENTER, &found); + if (!found) + { + pending->truncated_to = InvalidBlockNumber; + pending->sync_above = nblocks; + + elog(DEBUG2, "registering new pending sync for rel %u/%u/%u at block %u", + rnode.spcNode, rnode.dbNode, rnode.relNode, nblocks); + + } + else if (pending->sync_above == InvalidBlockNumber) + { + elog(DEBUG2, "registering pending sync for rel %u/%u/%u at block %u", + rnode.spcNode, rnode.dbNode, rnode.relNode, nblocks); + pending->sync_above = nblocks; + } + else + elog(DEBUG2, "pending sync for rel %u/%u/%u was already registered at block %u (new %u)", + rnode.spcNode, rnode.dbNode, rnode.relNode, pending->sync_above, + nblocks); +} + +/* + * Are we going to fsync() this relation at COMMIT, and hence don't need to + * WAL-log changes to the given block? + */ +bool +smgrIsSyncPending(RelFileNode rnode, BlockNumber blkno) +{ + PendingRelSync *pending; + bool found; + + if (!pendingSyncs) + return false; + + pending = (PendingRelSync *) hash_search(pendingSyncs, + (void *) &rnode, + HASH_FIND, &found); + if (!found) + return false; + + /* + * We have no fsync() pending for this relation, or we have (possibly) + * already emitted WAL records for this block. + */ + if (pending->sync_above == InvalidBlockNumber || + pending->sync_above > blkno) + { + elog(DEBUG2, "not skipping WAL-logging for rel %u/%u/%u block %u, because sync_above is %u", + rnode.spcNode, rnode.dbNode, rnode.relNode, blkno, pending->sync_above); + return false; + } + + /* + * We have emitted a truncation record for this block. + */ + if (pending->truncated_to != InvalidBlockNumber && + pending->truncated_to <= blkno) + { + elog(DEBUG2, "not skipping WAL-logging for rel %u/%u/%u block %u, because it was truncated earlier in the same xact", + rnode.spcNode, rnode.dbNode, rnode.relNode, blkno); + return false; + } + + elog(DEBUG2, "skipping WAL-logging for rel %u/%u/%u block %u", + rnode.spcNode, rnode.dbNode, rnode.relNode, blkno); + + return true; +} + +/* + * Sync to disk any relations that we skipped WAL-logging for earlier. + */ +void +smgrDoPendingSyncs(bool isCommit) +{ + if (!pendingSyncs) + return; + + if (isCommit) + { + HASH_SEQ_STATUS status; + PendingRelSync *pending; + + hash_seq_init(&status, pendingSyncs); + + while ((pending = hash_seq_search(&status)) != NULL) + { + if (pending->sync_above != InvalidBlockNumber) + { + FlushRelationBuffersWithoutRelCache(pending->relnode, false); + smgrimmedsync(smgropen(pending->relnode, InvalidBackendId), MAIN_FORKNUM); + + elog(DEBUG2, "syncing rel %u/%u/%u", pending->relnode.spcNode, + pending->relnode.dbNode, pending->relnode.relNode); + } + } + } + + hash_destroy(pendingSyncs); + pendingSyncs = NULL; +} + /* * PostPrepare_smgr -- Clean up after a successful PREPARE * diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index f45b330..01b712d 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -26,6 +26,7 @@ #include "access/xact.h" #include "access/xlog.h" #include "catalog/pg_type.h" +#include "catalog/storage.h" #include "commands/copy.h" #include "commands/defrem.h" #include "commands/trigger.h" @@ -2269,8 +2270,7 @@ CopyFrom(CopyState cstate) * - data is being written to relfilenode created in this transaction * then we can skip writing WAL. It's safe because if the transaction * doesn't commit, we'll discard the table (or the new relfilenode file). - * If it does commit, we'll have done the heap_sync at the bottom of this - * routine first. + * If it does commit, commit will do heap_sync(). * * As mentioned in comments in utils/rel.h, the in-same-transaction test * is not always set correctly, since in rare cases rd_newRelfilenodeSubid @@ -2302,7 +2302,7 @@ CopyFrom(CopyState cstate) { hi_options |= HEAP_INSERT_SKIP_FSM; if (!XLogIsNeeded()) - hi_options |= HEAP_INSERT_SKIP_WAL; + heap_register_sync(cstate->rel); } /* @@ -2551,11 +2551,11 @@ CopyFrom(CopyState cstate) FreeExecutorState(estate); /* - * If we skipped writing WAL, then we need to sync the heap (but not - * indexes since those use WAL anyway) + * If we skipped writing WAL, then we will sync the heap at the end of + * the transaction. (We used to do it here, but it was later found out + * that to be safe, we must also avoid WAL-logging any subsequent + * actions on the pages we skipped WAL for). Indexes always use WAL. */ - if (hi_options & HEAP_INSERT_SKIP_WAL) - heap_sync(cstate->rel); return processed; } diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c index 5b4f6af..b64d52a 100644 --- a/src/backend/commands/createas.c +++ b/src/backend/commands/createas.c @@ -567,8 +567,9 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo) * We can skip WAL-logging the insertions, unless PITR or streaming * replication is in use. We can skip the FSM in any case. */ - myState->hi_options = HEAP_INSERT_SKIP_FSM | - (XLogIsNeeded() ? 0 : HEAP_INSERT_SKIP_WAL); + if (!XLogIsNeeded()) + heap_register_sync(intoRelationDesc); + myState->hi_options = HEAP_INSERT_SKIP_FSM; myState->bistate = GetBulkInsertState(); /* Not using WAL requires smgr_targblock be initially invalid */ @@ -617,9 +618,7 @@ intorel_shutdown(DestReceiver *self) FreeBulkInsertState(myState->bistate); - /* If we skipped using WAL, must heap_sync before commit */ - if (myState->hi_options & HEAP_INSERT_SKIP_WAL) - heap_sync(myState->rel); + /* If we skipped using WAL, we will sync the relation at commit */ /* close rel, but keep lock until commit */ heap_close(myState->rel, NoLock); diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c index 6cddcbd..dbef95b 100644 --- a/src/backend/commands/matview.c +++ b/src/backend/commands/matview.c @@ -456,7 +456,7 @@ transientrel_startup(DestReceiver *self, int operation, TupleDesc typeinfo) */ myState->hi_options = HEAP_INSERT_SKIP_FSM | HEAP_INSERT_FROZEN; if (!XLogIsNeeded()) - myState->hi_options |= HEAP_INSERT_SKIP_WAL; + heap_register_sync(transientrel); myState->bistate = GetBulkInsertState(); /* Not using WAL requires smgr_targblock be initially invalid */ @@ -499,9 +499,7 @@ transientrel_shutdown(DestReceiver *self) FreeBulkInsertState(myState->bistate); - /* If we skipped using WAL, must heap_sync before commit */ - if (myState->hi_options & HEAP_INSERT_SKIP_WAL) - heap_sync(myState->transientrel); + /* If we skipped using WAL, we will sync the relation at commit */ /* close transientrel, but keep lock until commit */ heap_close(myState->transientrel, NoLock); diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 86e9814..ca892ea 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -3984,8 +3984,9 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode) bistate = GetBulkInsertState(); hi_options = HEAP_INSERT_SKIP_FSM; + if (!XLogIsNeeded()) - hi_options |= HEAP_INSERT_SKIP_WAL; + heap_register_sync(newrel); } else { @@ -4236,8 +4237,6 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode) FreeBulkInsertState(bistate); /* If we skipped writing WAL, then we need to sync the heap. */ - if (hi_options & HEAP_INSERT_SKIP_WAL) - heap_sync(newrel); heap_close(newrel, NoLock); } diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index 231e92d..7190000 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -879,7 +879,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, * page has been previously WAL-logged, and if not, do that * now. */ - if (RelationNeedsWAL(onerel) && + if (HeapNeedsWAL(onerel, buf) && PageGetLSN(page) == InvalidXLogRecPtr) log_newpage_buffer(buf, true); @@ -1106,7 +1106,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, } /* Now WAL-log freezing if necessary */ - if (RelationNeedsWAL(onerel)) + if (HeapNeedsWAL(onerel, buf)) { XLogRecPtr recptr; @@ -1462,7 +1462,7 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, MarkBufferDirty(buffer); /* XLOG stuff */ - if (RelationNeedsWAL(onerel)) + if (HeapNeedsWAL(onerel, buffer)) { XLogRecPtr recptr; diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 76ade37..edc580f 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -451,6 +451,7 @@ static BufferDesc *BufferAlloc(SMgrRelation smgr, BufferAccessStrategy strategy, bool *foundPtr); static void FlushBuffer(BufferDesc *buf, SMgrRelation reln); +static void FlushRelationBuffers_common(SMgrRelation smgr, bool islocal); static void AtProcExit_Buffers(int code, Datum arg); static void CheckForBufferLeaks(void); static int rnode_comparator(const void *p1, const void *p2); @@ -3130,20 +3131,41 @@ PrintPinnedBufs(void) void FlushRelationBuffers(Relation rel) { - int i; - BufferDesc *bufHdr; - /* Open rel at the smgr level if not already done */ RelationOpenSmgr(rel); - if (RelationUsesLocalBuffers(rel)) + FlushRelationBuffers_common(rel->rd_smgr, RelationUsesLocalBuffers(rel)); +} + +/* + * Like FlushRelationBuffers(), but the relation is specified by a + * RelFileNode + */ +void +FlushRelationBuffersWithoutRelCache(RelFileNode rnode, bool islocal) +{ + FlushRelationBuffers_common(smgropen(rnode, InvalidBackendId), islocal); +} + +/* + * Code shared between functions FlushRelationBuffers() and + * FlushRelationBuffersWithoutRelCache(). + */ +static void +FlushRelationBuffers_common(SMgrRelation smgr, bool islocal) +{ + RelFileNode rnode = smgr->smgr_rnode.node; + int i; + BufferDesc *bufHdr; + + if (islocal) { for (i = 0; i < NLocBuffer; i++) { uint32 buf_state; bufHdr = GetLocalBufferDescriptor(i); - if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) && + if (RelFileNodeEquals(bufHdr->tag.rnode, rnode) && ((buf_state = pg_atomic_read_u32(&bufHdr->state)) & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY)) { @@ -3160,7 +3182,7 @@ FlushRelationBuffers(Relation rel) PageSetChecksumInplace(localpage, bufHdr->tag.blockNum); - smgrwrite(rel->rd_smgr, + smgrwrite(smgr, bufHdr->tag.forkNum, bufHdr->tag.blockNum, localpage, @@ -3190,18 +3212,18 @@ FlushRelationBuffers(Relation rel) * As in DropRelFileNodeBuffers, an unlocked precheck should be safe * and saves some cycles. */ - if (!RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node)) + if (!RelFileNodeEquals(bufHdr->tag.rnode, rnode)) continue; ReservePrivateRefCountEntry(); buf_state = LockBufHdr(bufHdr); - if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) && + if (RelFileNodeEquals(bufHdr->tag.rnode, rnode) && (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY)) { PinBuffer_Locked(bufHdr); LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED); - FlushBuffer(bufHdr, rel->rd_smgr); + FlushBuffer(bufHdr, smgr); LWLockRelease(BufferDescriptorGetContentLock(bufHdr)); UnpinBuffer(bufHdr, true); } @@ -3397,6 +3419,9 @@ MarkBufferDirtyHint(Buffer buffer, bool buffer_std) bool dirtied = false; bool delayChkpt = false; uint32 buf_state; + RelFileNode rnode; + ForkNumber forknum; + BlockNumber blknum; /* * If we need to protect hint bit updates from torn writes, WAL-log a @@ -3407,8 +3432,10 @@ MarkBufferDirtyHint(Buffer buffer, bool buffer_std) * We don't check full_page_writes here because that logic is included * when we call XLogInsert() since the value changes dynamically. */ + BufferGetTag(buffer, &rnode, &forknum, &blknum); if (XLogHintBitIsNeeded() && - (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT)) + (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) && + !smgrIsSyncPending(rnode, blknum)) { /* * If we're in recovery we cannot dirty a page because of a hint. diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index b3a595c..1c169ef 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -25,10 +25,9 @@ /* "options" flag bits for heap_insert */ -#define HEAP_INSERT_SKIP_WAL 0x0001 -#define HEAP_INSERT_SKIP_FSM 0x0002 -#define HEAP_INSERT_FROZEN 0x0004 -#define HEAP_INSERT_SPECULATIVE 0x0008 +#define HEAP_INSERT_SKIP_FSM 0x0001 +#define HEAP_INSERT_FROZEN 0x0002 +#define HEAP_INSERT_SPECULATIVE 0x0004 typedef struct BulkInsertStateData *BulkInsertState; @@ -177,6 +176,7 @@ extern void simple_heap_delete(Relation relation, ItemPointer tid); extern void simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup); +extern void heap_register_sync(Relation relation); extern void heap_sync(Relation relation); /* in heap/pruneheap.c */ diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h index 06a8242..5418d71 100644 --- a/src/include/access/heapam_xlog.h +++ b/src/include/access/heapam_xlog.h @@ -378,6 +378,8 @@ extern void heap2_desc(StringInfo buf, XLogReaderState *record); extern const char *heap2_identify(uint8 info); extern void heap_xlog_logical_rewrite(XLogReaderState *r); +extern bool HeapNeedsWAL(Relation rel, Buffer buf); + extern XLogRecPtr log_heap_cleanup_info(RelFileNode rnode, TransactionId latestRemovedXid); extern XLogRecPtr log_heap_clean(Relation reln, Buffer buffer, diff --git a/src/include/catalog/storage.h b/src/include/catalog/storage.h index ef960da..e84dee2 100644 --- a/src/include/catalog/storage.h +++ b/src/include/catalog/storage.h @@ -29,6 +29,9 @@ extern void RelationTruncate(Relation rel, BlockNumber nblocks); */ extern void smgrDoPendingDeletes(bool isCommit); extern int smgrGetPendingDeletes(bool forCommit, RelFileNode **ptr); +extern void smgrRegisterPendingSync(RelFileNode rnode, BlockNumber nblocks); +extern bool smgrIsSyncPending(RelFileNode rnode, BlockNumber blkno); +extern void smgrDoPendingSyncs(bool isCommit); extern void AtSubCommit_smgr(void); extern void AtSubAbort_smgr(void); extern void PostPrepare_smgr(void); diff --git a/src/include/storage/bufmgr.h b/src/include/storage/bufmgr.h index 3d5dea7..f02ea93 100644 --- a/src/include/storage/bufmgr.h +++ b/src/include/storage/bufmgr.h @@ -202,6 +202,8 @@ extern BlockNumber RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum); extern void FlushOneBuffer(Buffer buffer); extern void FlushRelationBuffers(Relation rel); +extern void FlushRelationBuffersWithoutRelCache(RelFileNode rnode, + bool islocal); extern void FlushDatabaseBuffers(Oid dbid); extern void DropRelFileNodeBuffers(RelFileNodeBackend rnode, ForkNumber forkNum, BlockNumber firstDelBlock); diff --git a/src/test/recovery/t/006_truncate_opt.pl b/src/test/recovery/t/006_truncate_opt.pl new file mode 100644 index 0000000..baf5604 --- /dev/null +++ b/src/test/recovery/t/006_truncate_opt.pl @@ -0,0 +1,94 @@ +# Set of tests to check TRUNCATE optimizations with CREATE TABLE +use strict; +use warnings; + +use PostgresNode; +use TestLib; +use Test::More tests => 3; + +my $node = get_new_node('master'); +$node->init; + +my $copy_file = $node->backup_dir . "copy_data.txt"; + +$node->append_conf('postgresql.conf', qq{ +fsync = on +wal_level = minimal +}); + +$node->start; + +# Create file containing data to COPY +TestLib::append_to_file($copy_file, qq{copied row 1 +copied row 2 +copied row 3 +}); + +# CREATE, INSERT, COPY, crash. +# +# If COPY inserts to the existing block, and is not WAL-logged, replaying +# the implicit FPW of the INSERT record will destroy the COPY data. +$node->psql('postgres', qq{ +BEGIN; +CREATE TABLE test1(t text PRIMARY KEY); +INSERT INTO test1 VALUES ('inserted row'); +COPY test1 FROM '$copy_file'; +COMMIT; +}); +# Enforce recovery and check the state of table. There should be 4 rows. +$node->stop('immediate'); +$node->start; +my $ret = $node->safe_psql('postgres', 'SELECT count(*) FROM test1'); +is($ret, '4', 'SELECT reports 4 rows'); +# Clean up +$node->safe_psql('postgres', 'DROP TABLE test1;'); + +# CREATE, COPY, crash. Trigger in COPY that inserts more to same table. +# +# If the INSERTS from the trigger go to the same block we're copying to, +# and the INSERTs are WAL-logged, WAL replay will fail when it tries to +# replay the WAL record but the "before" image doesn't match, because not +# all changes were WAL-logged. +$node->psql('postgres', qq{ +BEGIN; +CREATE TABLE test1(t text PRIMARY KEY); +CREATE FUNCTION test1_beforetrig() RETURNS trigger LANGUAGE plpgsql as \$\$ + BEGIN + IF new.t NOT LIKE 'triggered%' THEN + INSERT INTO test1 VALUES ('triggered ' || NEW.t); + END IF; + RETURN NEW; +END; +\$\$; +CREATE TRIGGER test1_beforeinsert BEFORE INSERT ON test1 +FOR EACH ROW EXECUTE PROCEDURE test1_beforetrig(); +COPY test1 FROM '$copy_file'; +COMMIT; +}); +# Enforce recovery and check the state of table. There should be 6 +# rows here. +$node->stop('immediate'); +$node->start; +$ret = $node->safe_psql('postgres', 'SELECT count(*) FROM test1'); +is($ret, '6', 'SELECT returns 6 rows'); +# Clean up +$node->safe_psql('postgres', 'DROP TABLE test1;'); +$node->safe_psql('postgres', 'DROP FUNCTION test1_beforetrig();'); + +# CREATE, TRUNCATE, COPY, crash. +# +# If we skip WAL-logging of the COPY, replaying the TRUNCATE record destroys +# the newly inserted data. +$node->psql('postgres', qq{ +BEGIN; +CREATE TABLE test1(t text PRIMARY KEY); +TRUNCATE test1; +COPY test1 FROM '$copy_file'; +COMMIT; +}); +# Enforce recovery and check the state of table. There should be 3 +# rows here. +$node->stop('immediate'); +$node->start; +$ret = $node->safe_psql('postgres', 'SELECT count(*) FROM test1'); +is($ret, '3', 'SELECT returns 3 rows');