From 5617306ae7fe1d4019eeb596497f765c907a4c2e Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Wed, 28 Dec 2022 12:49:35 -0800 Subject: [PATCH v42 3/4] pgstat: Count IO for relations Count IOOps done on IOObjects in IOContexts by various BackendTypes using the IO stats infrastructure introduced by a previous commit. The primary concern of these statistics is IO operations on data blocks during the course of normal database operations. IO operations done by, for example, the archiver or syslogger are not counted in these statistics. WAL IO, temporary file IO, and IO done directly though smgr* functions (such as when building an index) are not yet counted but would be useful future additions. Author: Melanie Plageman Reviewed-by: Andres Freund Reviewed-by: Justin Pryzby Reviewed-by: Kyotaro Horiguchi Reviewed-by: Maciek Sakrejda Reviewed-by: Lukas Fittl Discussion: https://www.postgresql.org/message-id/flat/20200124195226.lth52iydq2n2uilq%40alap3.anarazel.de --- src/backend/storage/buffer/bufmgr.c | 102 ++++++++++++++++++++++---- src/backend/storage/buffer/freelist.c | 58 +++++++++++---- src/backend/storage/buffer/localbuf.c | 29 ++++++-- src/backend/storage/smgr/md.c | 25 +++++++ src/include/storage/buf_internals.h | 8 +- src/include/storage/bufmgr.h | 7 +- 6 files changed, 189 insertions(+), 40 deletions(-) diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 8075828e8a..3709d2e810 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -481,8 +481,9 @@ static BufferDesc *BufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, BufferAccessStrategy strategy, - bool *foundPtr); -static void FlushBuffer(BufferDesc *buf, SMgrRelation reln); + bool *foundPtr, IOContext *io_context); +static void FlushBuffer(BufferDesc *buf, SMgrRelation reln, + IOContext io_context, IOObject io_object); static void FindAndDropRelationBuffers(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber nForkBlock, @@ -823,6 +824,8 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, BufferDesc *bufHdr; Block bufBlock; bool found; + IOContext io_context; + IOObject io_object; bool isExtend; bool isLocalBuf = SmgrIsTemp(smgr); @@ -855,7 +858,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, if (isLocalBuf) { - bufHdr = LocalBufferAlloc(smgr, forkNum, blockNum, &found); + bufHdr = LocalBufferAlloc(smgr, forkNum, blockNum, &found, &io_context); if (found) pgBufferUsage.local_blks_hit++; else if (isExtend) @@ -871,7 +874,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, * not currently in memory. */ bufHdr = BufferAlloc(smgr, relpersistence, forkNum, blockNum, - strategy, &found); + strategy, &found, &io_context); if (found) pgBufferUsage.shared_blks_hit++; else if (isExtend) @@ -986,7 +989,16 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, */ Assert(!(pg_atomic_read_u32(&bufHdr->state) & BM_VALID)); /* spinlock not needed */ - bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr); + if (isLocalBuf) + { + bufBlock = LocalBufHdrGetBlock(bufHdr); + io_object = IOOBJECT_TEMP_RELATION; + } + else + { + bufBlock = BufHdrGetBlock(bufHdr); + io_object = IOOBJECT_RELATION; + } if (isExtend) { @@ -995,6 +1007,8 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, /* don't set checksum for all-zero page */ smgrextend(smgr, forkNum, blockNum, (char *) bufBlock, false); + pgstat_count_io_op(IOOP_EXTEND, io_object, io_context); + /* * NB: we're *not* doing a ScheduleBufferTagForWriteback here; * although we're essentially performing a write. At least on linux @@ -1020,6 +1034,8 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, smgrread(smgr, forkNum, blockNum, (char *) bufBlock); + pgstat_count_io_op(IOOP_READ, io_object, io_context); + if (track_io_timing) { INSTR_TIME_SET_CURRENT(io_time); @@ -1113,14 +1129,19 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, * *foundPtr is actually redundant with the buffer's BM_VALID flag, but * we keep it for simplicity in ReadBuffer. * + * io_context is passed as an output paramter to avoid calling + * IOContextForStrategy() when there is a shared buffers hit and no IO + * statistics need be captured. + * * No locks are held either at entry or exit. */ static BufferDesc * BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, BufferAccessStrategy strategy, - bool *foundPtr) + bool *foundPtr, IOContext *io_context) { + bool from_ring; BufferTag newTag; /* identity of requested block */ uint32 newHash; /* hash value for newTag */ LWLock *newPartitionLock; /* buffer partition lock for it */ @@ -1172,8 +1193,11 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, { /* * If we get here, previous attempts to read the buffer must - * have failed ... but we shall bravely try again. + * have failed ... but we shall bravely try again. Set + * io_context since we will in fact need to count an IO + * Operation. */ + *io_context = IOContextForStrategy(strategy); *foundPtr = false; } } @@ -1187,6 +1211,8 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, */ LWLockRelease(newPartitionLock); + *io_context = IOContextForStrategy(strategy); + /* Loop here in case we have to try another victim buffer */ for (;;) { @@ -1200,7 +1226,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, * Select a victim buffer. The buffer is returned with its header * spinlock still held! */ - buf = StrategyGetBuffer(strategy, &buf_state); + buf = StrategyGetBuffer(strategy, &buf_state, &from_ring); Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 0); @@ -1254,7 +1280,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, UnlockBufHdr(buf, buf_state); if (XLogNeedsFlush(lsn) && - StrategyRejectBuffer(strategy, buf)) + StrategyRejectBuffer(strategy, buf, from_ring)) { /* Drop lock/pin and loop around for another buffer */ LWLockRelease(BufferDescriptorGetContentLock(buf)); @@ -1269,7 +1295,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, smgr->smgr_rlocator.locator.dbOid, smgr->smgr_rlocator.locator.relNumber); - FlushBuffer(buf, NULL); + FlushBuffer(buf, NULL, *io_context, IOOBJECT_RELATION); LWLockRelease(BufferDescriptorGetContentLock(buf)); ScheduleBufferTagForWriteback(&BackendWritebackContext, @@ -1441,6 +1467,28 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, UnlockBufHdr(buf, buf_state); + if (oldFlags & BM_VALID) + { + /* + * When a BufferAccessStrategy is in use, blocks evicted from shared + * buffers are counted as IOOP_EVICT in the corresponding context + * (e.g. IOCONTEXT_BULKWRITE). Shared buffers are evicted by a + * strategy in two cases: 1) while initially claiming buffers for the + * strategy ring 2) to replace an existing strategy ring buffer + * because it is pinned or in use and cannot be reused. + * + * Blocks evicted from buffers already in the strategy ring are + * counted as IOOP_REUSE in the corresponding strategy context. + * + * At this point, we can accurately count evictions and reuses, + * because we have successfully claimed the valid buffer. Previously, + * we may have been forced to release the buffer due to concurrent + * pinners or erroring out. + */ + pgstat_count_io_op(from_ring ? IOOP_REUSE : IOOP_EVICT, + IOOBJECT_RELATION, *io_context); + } + if (oldPartitionLock != NULL) { BufTableDelete(&oldTag, oldHash); @@ -2570,7 +2618,7 @@ SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context) PinBuffer_Locked(bufHdr); LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED); - FlushBuffer(bufHdr, NULL); + FlushBuffer(bufHdr, NULL, IOCONTEXT_NORMAL, IOOBJECT_RELATION); LWLockRelease(BufferDescriptorGetContentLock(bufHdr)); @@ -2820,7 +2868,7 @@ BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, * as the second parameter. If not, pass NULL. */ static void -FlushBuffer(BufferDesc *buf, SMgrRelation reln) +FlushBuffer(BufferDesc *buf, SMgrRelation reln, IOContext io_context, IOObject io_object) { XLogRecPtr recptr; ErrorContextCallback errcallback; @@ -2912,6 +2960,26 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln) bufToWrite, false); + /* + * When a strategy is in use, only flushes of dirty buffers already in the + * strategy ring are counted as strategy writes (IOCONTEXT + * [BULKREAD|BULKWRITE|VACUUM] IOOP_WRITE) for the purpose of IO + * statistics tracking. + * + * If a shared buffer initially added to the ring must be flushed before + * being used, this is counted as an IOCONTEXT_NORMAL IOOP_WRITE. + * + * If a shared buffer which was added to the ring later because the + * current strategy buffer is pinned or in use or because all strategy + * buffers were dirty and rejected (for BAS_BULKREAD operations only) + * requires flushing, this is counted as an IOCONTEXT_NORMAL IOOP_WRITE + * (from_ring will be false). + * + * When a strategy is not in use, the write can only be a "regular" write + * of a dirty shared buffer (IOCONTEXT_NORMAL IOOP_WRITE). + */ + pgstat_count_io_op(IOOP_WRITE, IOOBJECT_RELATION, io_context); + if (track_io_timing) { INSTR_TIME_SET_CURRENT(io_time); @@ -3554,6 +3622,8 @@ FlushRelationBuffers(Relation rel) buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED); pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state); + pgstat_count_io_op(IOOP_WRITE, IOOBJECT_TEMP_RELATION, IOCONTEXT_NORMAL); + /* Pop the error context stack */ error_context_stack = errcallback.previous; } @@ -3586,7 +3656,7 @@ FlushRelationBuffers(Relation rel) { PinBuffer_Locked(bufHdr); LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED); - FlushBuffer(bufHdr, RelationGetSmgr(rel)); + FlushBuffer(bufHdr, RelationGetSmgr(rel), IOCONTEXT_NORMAL, IOOBJECT_RELATION); LWLockRelease(BufferDescriptorGetContentLock(bufHdr)); UnpinBuffer(bufHdr); } @@ -3684,7 +3754,7 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels) { PinBuffer_Locked(bufHdr); LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED); - FlushBuffer(bufHdr, srelent->srel); + FlushBuffer(bufHdr, srelent->srel, IOCONTEXT_NORMAL, IOOBJECT_RELATION); LWLockRelease(BufferDescriptorGetContentLock(bufHdr)); UnpinBuffer(bufHdr); } @@ -3894,7 +3964,7 @@ FlushDatabaseBuffers(Oid dbid) { PinBuffer_Locked(bufHdr); LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED); - FlushBuffer(bufHdr, NULL); + FlushBuffer(bufHdr, NULL, IOCONTEXT_NORMAL, IOOBJECT_RELATION); LWLockRelease(BufferDescriptorGetContentLock(bufHdr)); UnpinBuffer(bufHdr); } @@ -3921,7 +3991,7 @@ FlushOneBuffer(Buffer buffer) Assert(LWLockHeldByMe(BufferDescriptorGetContentLock(bufHdr))); - FlushBuffer(bufHdr, NULL); + FlushBuffer(bufHdr, NULL, IOCONTEXT_NORMAL, IOOBJECT_RELATION); } /* diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c index 7dec35801c..c690d5f15f 100644 --- a/src/backend/storage/buffer/freelist.c +++ b/src/backend/storage/buffer/freelist.c @@ -15,6 +15,7 @@ */ #include "postgres.h" +#include "pgstat.h" #include "port/atomics.h" #include "storage/buf_internals.h" #include "storage/bufmgr.h" @@ -81,12 +82,6 @@ typedef struct BufferAccessStrategyData */ int current; - /* - * True if the buffer just returned by StrategyGetBuffer had been in the - * ring already. - */ - bool current_was_in_ring; - /* * Array of buffer numbers. InvalidBuffer (that is, zero) indicates we * have not yet selected a buffer for this ring slot. For allocation @@ -198,13 +193,15 @@ have_free_buffer(void) * return the buffer with the buffer header spinlock still held. */ BufferDesc * -StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state) +StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_ring) { BufferDesc *buf; int bgwprocno; int trycounter; uint32 local_buf_state; /* to avoid repeated (de-)referencing */ + *from_ring = false; + /* * If given a strategy object, see whether it can select a buffer. We * assume strategy objects don't need buffer_strategy_lock. @@ -213,7 +210,10 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state) { buf = GetBufferFromRing(strategy, buf_state); if (buf != NULL) + { + *from_ring = true; return buf; + } } /* @@ -602,7 +602,7 @@ FreeAccessStrategy(BufferAccessStrategy strategy) /* * GetBufferFromRing -- returns a buffer from the ring, or NULL if the - * ring is empty. + * ring is empty / not usable. * * The bufhdr spin lock is held on the returned buffer. */ @@ -625,10 +625,7 @@ GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_state) */ bufnum = strategy->buffers[strategy->current]; if (bufnum == InvalidBuffer) - { - strategy->current_was_in_ring = false; return NULL; - } /* * If the buffer is pinned we cannot use it under any circumstances. @@ -644,7 +641,6 @@ GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_state) if (BUF_STATE_GET_REFCOUNT(local_buf_state) == 0 && BUF_STATE_GET_USAGECOUNT(local_buf_state) <= 1) { - strategy->current_was_in_ring = true; *buf_state = local_buf_state; return buf; } @@ -654,7 +650,6 @@ GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_state) * Tell caller to allocate a new buffer with the normal allocation * strategy. He'll then replace this ring element via AddBufferToRing. */ - strategy->current_was_in_ring = false; return NULL; } @@ -670,6 +665,39 @@ AddBufferToRing(BufferAccessStrategy strategy, BufferDesc *buf) strategy->buffers[strategy->current] = BufferDescriptorGetBuffer(buf); } +/* + * Utility function returning the IOContext of a given BufferAccessStrategy's + * strategy ring. + */ +IOContext +IOContextForStrategy(BufferAccessStrategy strategy) +{ + if (!strategy) + return IOCONTEXT_NORMAL; + + switch (strategy->btype) + { + case BAS_NORMAL: + + /* + * Currently, GetAccessStrategy() returns NULL for + * BufferAccessStrategyType BAS_NORMAL, so this case is + * unreachable. + */ + pg_unreachable(); + return IOCONTEXT_NORMAL; + case BAS_BULKREAD: + return IOCONTEXT_BULKREAD; + case BAS_BULKWRITE: + return IOCONTEXT_BULKWRITE; + case BAS_VACUUM: + return IOCONTEXT_VACUUM; + } + + elog(ERROR, "unrecognized BufferAccessStrategyType: %d", strategy->btype); + pg_unreachable(); +} + /* * StrategyRejectBuffer -- consider rejecting a dirty buffer * @@ -682,14 +710,14 @@ AddBufferToRing(BufferAccessStrategy strategy, BufferDesc *buf) * if this buffer should be written and re-used. */ bool -StrategyRejectBuffer(BufferAccessStrategy strategy, BufferDesc *buf) +StrategyRejectBuffer(BufferAccessStrategy strategy, BufferDesc *buf, bool from_ring) { /* We only do this in bulkread mode */ if (strategy->btype != BAS_BULKREAD) return false; /* Don't muck with behavior of normal buffer-replacement strategy */ - if (!strategy->current_was_in_ring || + if (!from_ring || strategy->buffers[strategy->current] != BufferDescriptorGetBuffer(buf)) return false; diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c index 8372acc383..f5e2138701 100644 --- a/src/backend/storage/buffer/localbuf.c +++ b/src/backend/storage/buffer/localbuf.c @@ -18,6 +18,7 @@ #include "access/parallel.h" #include "catalog/catalog.h" #include "executor/instrument.h" +#include "pgstat.h" #include "storage/buf_internals.h" #include "storage/bufmgr.h" #include "utils/guc_hooks.h" @@ -100,14 +101,22 @@ PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, * LocalBufferAlloc - * Find or create a local buffer for the given page of the given relation. * - * API is similar to bufmgr.c's BufferAlloc, except that we do not need - * to do any locking since this is all local. Also, IO_IN_PROGRESS - * does not get set. Lastly, we support only default access strategy - * (hence, usage_count is always advanced). + * API is similar to bufmgr.c's BufferAlloc(). Note that, unlike BufferAlloc(), + * no locking is required and IO_IN_PROGRESS does not get set. + * + * Only the default access strategy is supported with local buffers, so no + * BufferAccessStrategy is passed to LocalBufferAlloc(). The selected buffer's + * usage_count is, therefore, unconditionally advanced. Also, the passed-in + * io_context is always set to IOCONTEXT_NORMAL. This indicates to the caller + * not to use the BufferAccessStrategy to set the io_context itself. + * + * This is important in cases like CREATE TEMPORARY TABLE AS ..., in which a + * BufferAccessStrategy object may have been created for the CTAS operation but + * it will not be used because it will operate on local buffers. */ BufferDesc * LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, - bool *foundPtr) + bool *foundPtr, IOContext *io_context) { BufferTag newTag; /* identity of requested block */ LocalBufferLookupEnt *hresult; @@ -127,6 +136,14 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, hresult = (LocalBufferLookupEnt *) hash_search(LocalBufHash, (void *) &newTag, HASH_FIND, NULL); + /* + * IO Operations on local buffers are only done in IOCONTEXT_NORMAL. Set + * io_context here for convenience since there is no function call + * overhead to avoid in the case of a local buffer hit (like that of + * IOCOntextForStrategy()). + */ + *io_context = IOCONTEXT_NORMAL; + if (hresult) { b = hresult->id; @@ -230,6 +247,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, buf_state &= ~BM_DIRTY; pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state); + pgstat_count_io_op(IOOP_WRITE, IOOBJECT_TEMP_RELATION, IOCONTEXT_NORMAL); pgBufferUsage.local_blks_written++; } @@ -256,6 +274,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, ClearBufferTag(&bufHdr->tag); buf_state &= ~(BM_VALID | BM_TAG_VALID); pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state); + pgstat_count_io_op(IOOP_EVICT, IOOBJECT_TEMP_RELATION, IOCONTEXT_NORMAL); } hresult = (LocalBufferLookupEnt *) diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c index 60c9905eff..2115d7184a 100644 --- a/src/backend/storage/smgr/md.c +++ b/src/backend/storage/smgr/md.c @@ -983,6 +983,15 @@ mdimmedsync(SMgrRelation reln, ForkNumber forknum) { MdfdVec *v = &reln->md_seg_fds[forknum][segno - 1]; + /* + * fsyncs done through mdimmedsync() should be tracked in a separate + * IOContext than those done through mdsyncfiletag() to differentiate + * between unavoidable client backend fsyncs (e.g. those done during + * index build) and those which ideally would have been done by the + * checkpointer or bgwriter. Since other IO operations bypassing the + * buffer manager could also be tracked in such an IOContext, wait + * until these are also tracked to track immediate fsyncs. + */ if (FileSync(v->mdfd_vfd, WAIT_EVENT_DATA_FILE_IMMEDIATE_SYNC) < 0) ereport(data_sync_elevel(ERROR), (errcode_for_file_access(), @@ -1021,6 +1030,19 @@ register_dirty_segment(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg) if (!RegisterSyncRequest(&tag, SYNC_REQUEST, false /* retryOnError */ )) { + /* + * We have no way of knowing if the current IOContext is + * IOCONTEXT_NORMAL or IOCONTEXT_[BULKREAD, BULKWRITE, VACUUM] at this + * point, so count the fsync as being in the IOCONTEXT_NORMAL + * IOContext. This is probably okay, because the number of backend + * fsyncs doesn't say anything about the efficacy of the + * BufferAccessStrategy. And counting both fsyncs done in + * IOCONTEXT_NORMAL and IOCONTEXT_[BULKREAD, BULKWRITE, VACUUM] under + * IOCONTEXT_NORMAL is likely clearer when investigating the number of + * backend fsyncs. + */ + pgstat_count_io_op(IOOP_FSYNC, IOOBJECT_RELATION, IOCONTEXT_NORMAL); + ereport(DEBUG1, (errmsg_internal("could not forward fsync request because request queue is full"))); @@ -1410,6 +1432,9 @@ mdsyncfiletag(const FileTag *ftag, char *path) if (need_to_close) FileClose(file); + if (result >= 0) + pgstat_count_io_op(IOOP_FSYNC, IOOBJECT_RELATION, IOCONTEXT_NORMAL); + errno = save_errno; return result; } diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h index ed8aa2519c..0b44814740 100644 --- a/src/include/storage/buf_internals.h +++ b/src/include/storage/buf_internals.h @@ -15,6 +15,7 @@ #ifndef BUFMGR_INTERNALS_H #define BUFMGR_INTERNALS_H +#include "pgstat.h" #include "port/atomics.h" #include "storage/buf.h" #include "storage/bufmgr.h" @@ -391,11 +392,12 @@ extern void IssuePendingWritebacks(WritebackContext *context); extern void ScheduleBufferTagForWriteback(WritebackContext *context, BufferTag *tag); /* freelist.c */ +extern IOContext IOContextForStrategy(BufferAccessStrategy bas); extern BufferDesc *StrategyGetBuffer(BufferAccessStrategy strategy, - uint32 *buf_state); + uint32 *buf_state, bool *from_ring); extern void StrategyFreeBuffer(BufferDesc *buf); extern bool StrategyRejectBuffer(BufferAccessStrategy strategy, - BufferDesc *buf); + BufferDesc *buf, bool from_ring); extern int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc); extern void StrategyNotifyBgWriter(int bgwprocno); @@ -417,7 +419,7 @@ extern PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum); extern BufferDesc *LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, - BlockNumber blockNum, bool *foundPtr); + BlockNumber blockNum, bool *foundPtr, IOContext *io_context); extern void MarkLocalBufferDirty(Buffer buffer); extern void DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber forkNum, diff --git a/src/include/storage/bufmgr.h b/src/include/storage/bufmgr.h index 33eadbc129..b8a18b8081 100644 --- a/src/include/storage/bufmgr.h +++ b/src/include/storage/bufmgr.h @@ -23,7 +23,12 @@ typedef void *Block; -/* Possible arguments for GetAccessStrategy() */ +/* + * Possible arguments for GetAccessStrategy(). + * + * If adding a new BufferAccessStrategyType, also add a new IOContext so + * IO statistics using this strategy are tracked. + */ typedef enum BufferAccessStrategyType { BAS_NORMAL, /* Normal random access */ -- 2.38.1