diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index 87586a7b06..7dfadf3875 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -2837,6 +2837,11 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i bigint Estimated number of rows modified since this table was last analyzed + + vacuum_resume_block + bigint + Block number to resume vacuuming + last_vacuum timestamp with time zone diff --git a/doc/src/sgml/ref/vacuum.sgml b/doc/src/sgml/ref/vacuum.sgml index 846056a353..f5d473b178 100644 --- a/doc/src/sgml/ref/vacuum.sgml +++ b/doc/src/sgml/ref/vacuum.sgml @@ -35,6 +35,7 @@ VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ ANALYZE ] [ boolean ] TRUNCATE [ boolean ] PARALLEL integer + RESUME [ boolean ] and table_and_columns is: @@ -255,6 +256,23 @@ VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ ANALYZE ] [ . This behavior is helpful + when resuming vacuum run from interruption and cancellation. The default + is false unless the vacuum_resume option has been + set to true. This option is ignored if either the FULL, + FREEZE, or DISABLE_PAGE_SKIPPING + option is used. + + + + boolean diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c index 5325dd3f61..bc6dcd5e53 100644 --- a/src/backend/access/common/reloptions.c +++ b/src/backend/access/common/reloptions.c @@ -168,6 +168,15 @@ static relopt_bool boolRelOpts[] = }, true }, + { + { + "vacuum_resume", + "Enables vacuum to resume vacuuming from the last vacuumed block", + RELOPT_KIND_HEAP | RELOPT_KIND_TOAST, + ShareUpdateExclusiveLock + }, + false + }, /* list terminator */ {{NULL}} }; @@ -1534,7 +1543,9 @@ default_reloptions(Datum reloptions, bool validate, relopt_kind kind) {"vacuum_index_cleanup", RELOPT_TYPE_BOOL, offsetof(StdRdOptions, vacuum_index_cleanup)}, {"vacuum_truncate", RELOPT_TYPE_BOOL, - offsetof(StdRdOptions, vacuum_truncate)} + offsetof(StdRdOptions, vacuum_truncate)}, + {"vacuum_resume", RELOPT_TYPE_BOOL, + offsetof(StdRdOptions, vacuum_resume)} }; return (bytea *) build_reloptions(reloptions, validate, kind, diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 03c43efc32..f54f0054b6 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -110,6 +110,14 @@ #define VACUUM_FSM_EVERY_PAGES \ ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ)) +/* + * When a table has no indexes, save the progress every 8GB so that we can + * resume vacuum from the middle of table. When table has indexes we save it + * after the second heap pass finished. + */ +#define VACUUM_RESUME_BLK_INTERVAL \ + ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ)) + /* * Guesstimation of number of dead tuples per page. This is used to * provide an upper limit to memory allocated when vacuuming small @@ -361,6 +369,7 @@ static void end_parallel_vacuum(Relation *Irel, IndexBulkDeleteResult **stats, LVParallelState *lps, int nindexes); static LVSharedIndStats *get_indstats(LVShared *lvshared, int n); static bool skip_parallel_vacuum_index(Relation indrel, LVShared *lvshared); +static BlockNumber get_resume_block(Relation onerel); /* @@ -398,6 +407,7 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params, Assert(params != NULL); Assert(params->index_cleanup != VACOPT_TERNARY_DEFAULT); Assert(params->truncate != VACOPT_TERNARY_DEFAULT); + Assert(params->resume != VACOPT_TERNARY_DEFAULT); /* not every AM requires these to be valid, but heap does */ Assert(TransactionIdIsNormal(onerel->rd_rel->relfrozenxid)); @@ -704,7 +714,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, TransactionId relminmxid = onerel->rd_rel->relminmxid; BlockNumber empty_pages, vacuumed_pages, - next_fsm_block_to_vacuum; + next_fsm_block_to_vacuum, + next_block_to_resume_vacuum; double num_tuples, /* total number of nonremovable tuples */ live_tuples, /* live tuples (reltuples estimate) */ tups_vacuumed, /* tuples cleaned up by vacuum */ @@ -715,6 +726,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, PGRUsage ru0; Buffer vmbuffer = InvalidBuffer; BlockNumber next_unskippable_block; + BlockNumber start_blkno = 0; bool skipping_blocks; xl_heap_freeze_tuple *frozen; StringInfoData buf; @@ -727,6 +739,19 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, pg_rusage_init(&ru0); + /* + * If resuming is not requested, we clear the last saved block so as not + * keep the previous information. If requested and it is not an aggressive + * vacuum, we fetch the last saved block number to resume and set it as the + * starting block to vacuum. + */ + if (params->resume == VACOPT_TERNARY_DISABLED) + pgstat_report_vacuum_resume_block(RelationGetRelid(onerel), + onerel->rd_rel->relisshared, + 0); + else if (!aggressive) + start_blkno = get_resume_block(onerel); + relname = RelationGetRelationName(onerel); if (aggressive) ereport(elevel, @@ -734,19 +759,30 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, get_namespace_name(RelationGetNamespace(onerel)), relname))); else - ereport(elevel, - (errmsg("vacuuming \"%s.%s\"", - get_namespace_name(RelationGetNamespace(onerel)), - relname))); + { + if (start_blkno != 0) + ereport(elevel, + (errmsg("vacuuming \"%s.%s\" starting from %u block", + get_namespace_name(RelationGetNamespace(onerel)), + relname, start_blkno))); + else + ereport(elevel, + (errmsg("vacuuming \"%s.%s\"", + get_namespace_name(RelationGetNamespace(onerel)), + relname))); + } empty_pages = vacuumed_pages = 0; next_fsm_block_to_vacuum = (BlockNumber) 0; + next_block_to_resume_vacuum = (BlockNumber) 0; num_tuples = live_tuples = tups_vacuumed = nkeep = nunused = 0; indstats = (IndexBulkDeleteResult **) palloc0(nindexes * sizeof(IndexBulkDeleteResult *)); nblocks = RelationGetNumberOfBlocks(onerel); + Assert(start_blkno <= nblocks); /* both are the same if there are no blocks */ + vacrelstats->rel_pages = nblocks; vacrelstats->scanned_pages = 0; vacrelstats->tupcount_pages = 0; @@ -841,7 +877,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, * the last page. This is worth avoiding mainly because such a lock must * be replayed on any hot standby, where it can be disruptive. */ - next_unskippable_block = 0; + next_unskippable_block = start_blkno; if ((params->options & VACOPT_DISABLE_PAGE_SKIPPING) == 0) { while (next_unskippable_block < nblocks) @@ -870,7 +906,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, else skipping_blocks = false; - for (blkno = 0; blkno < nblocks; blkno++) + for (blkno = start_blkno; blkno < nblocks; blkno++) { Buffer buf; Page page; @@ -1008,6 +1044,11 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum, blkno); next_fsm_block_to_vacuum = blkno; + /* Save the current block number to resume vacuuming */ + pgstat_report_vacuum_resume_block(RelationGetRelid(onerel), + onerel->rd_rel->relisshared, + blkno); + /* Report that we are once again scanning the heap */ pgstat_progress_update_param(PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_SCAN_HEAP); @@ -1480,6 +1521,15 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, */ dead_tuples->num_tuples = 0; + /* Save the current block number to resume vacuuming */ + if (blkno - next_block_to_resume_vacuum >= VACUUM_RESUME_BLK_INTERVAL) + { + pgstat_report_vacuum_resume_block(RelationGetRelid(onerel), + onerel->rd_rel->relisshared, + blkno); + next_block_to_resume_vacuum = blkno; + } + /* * Periodically do incremental FSM vacuuming to make newly-freed * space visible on upper FSM pages. Note: although we've cleaned @@ -1644,6 +1694,11 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, if (blkno > next_fsm_block_to_vacuum) FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum, blkno); + /* Clear the saved block number */ + pgstat_report_vacuum_resume_block(RelationGetRelid(onerel), + onerel->rd_rel->relisshared, + 0); + /* report all blocks vacuumed */ pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno); @@ -3376,3 +3431,22 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc) table_close(onerel, ShareUpdateExclusiveLock); pfree(stats); } + +/* + * Return the block number to resume vacuuming fetched from stats collector. + */ +static BlockNumber +get_resume_block(Relation onerel) +{ + PgStat_StatTabEntry *tabentry; + + tabentry = pgstat_fetch_stat_tabentry(RelationGetRelid(onerel)); + + /* If not found a valid saved block number, resume from the first block */ + if (tabentry == NULL || + tabentry->vacuum_resume_block >= RelationGetNumberOfBlocks(onerel)) + return (BlockNumber) 0; + + Assert(tabentry->vacuum_resume_block >= 0); + return tabentry->vacuum_resume_block; +} diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql index f681aafcf9..b160787d1e 100644 --- a/src/backend/catalog/system_views.sql +++ b/src/backend/catalog/system_views.sql @@ -573,6 +573,7 @@ CREATE VIEW pg_stat_all_tables AS pg_stat_get_live_tuples(C.oid) AS n_live_tup, pg_stat_get_dead_tuples(C.oid) AS n_dead_tup, pg_stat_get_mod_since_analyze(C.oid) AS n_mod_since_analyze, + pg_stat_get_vacuum_resume_block(C.oid) AS vacuum_resume_blk, pg_stat_get_last_vacuum_time(C.oid) as last_vacuum, pg_stat_get_last_autovacuum_time(C.oid) as last_autovacuum, pg_stat_get_last_analyze_time(C.oid) as last_analyze, diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index d625d17bf4..6c8cf425b6 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -110,6 +110,7 @@ ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel) /* Set default value */ params.index_cleanup = VACOPT_TERNARY_DEFAULT; params.truncate = VACOPT_TERNARY_DEFAULT; + params.resume = VACOPT_TERNARY_DEFAULT; /* By default parallel vacuum is enabled */ params.nworkers = 0; @@ -141,6 +142,8 @@ ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel) disable_page_skipping = defGetBoolean(opt); else if (strcmp(opt->defname, "index_cleanup") == 0) params.index_cleanup = get_vacopt_ternary_value(opt); + else if (strcmp(opt->defname, "resume") == 0) + params.resume = get_vacopt_ternary_value(opt); else if (strcmp(opt->defname, "truncate") == 0) params.truncate = get_vacopt_ternary_value(opt); else if (strcmp(opt->defname, "parallel") == 0) @@ -1840,6 +1843,16 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params) params->truncate = VACOPT_TERNARY_DISABLED; } + /* Set resume option based on reloptions if not yet, default is false */ + if (params->resume == VACOPT_TERNARY_DEFAULT) + { + if (onerel->rd_options == NULL || + !((StdRdOptions *) onerel->rd_options)->vacuum_resume) + params->resume = VACOPT_TERNARY_DISABLED; + else + params->resume = VACOPT_TERNARY_ENABLED; + } + /* * Remember the relation's TOAST relation for later, if the caller asked * us to process it. In VACUUM FULL, though, the toast table is diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index 462b4d7e06..65db5126d2 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -321,6 +321,8 @@ static void pgstat_recv_resetsharedcounter(PgStat_MsgResetsharedcounter *msg, in static void pgstat_recv_resetsinglecounter(PgStat_MsgResetsinglecounter *msg, int len); static void pgstat_recv_autovac(PgStat_MsgAutovacStart *msg, int len); static void pgstat_recv_vacuum(PgStat_MsgVacuum *msg, int len); +static void pgstat_recv_vacuum_resume_block(PgStat_MsgVacuumResumeBlock *msg, + int len); static void pgstat_recv_analyze(PgStat_MsgAnalyze *msg, int len); static void pgstat_recv_archiver(PgStat_MsgArchiver *msg, int len); static void pgstat_recv_bgwriter(PgStat_MsgBgWriter *msg, int len); @@ -1421,6 +1423,27 @@ pgstat_report_vacuum(Oid tableoid, bool shared, pgstat_send(&msg, sizeof(msg)); } +/* --------- + * pgstat_report_vacuum_resume_block() - + * + * Tell the collector about the block number to resume. + * --------- + */ +void +pgstat_report_vacuum_resume_block(Oid tableoid, bool shared, BlockNumber blkno) +{ + PgStat_MsgVacuumResumeBlock msg; + + if (pgStatSock == PGINVALID_SOCKET || !pgstat_track_counts) + return; + + pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_VACUUMRESUMEBLOCK); + msg.m_databaseid = shared ? InvalidOid : MyDatabaseId; + msg.m_tableoid = tableoid; + msg.m_blkno = blkno; + pgstat_send(&msg, sizeof(msg)); +} + /* -------- * pgstat_report_analyze() - * @@ -4591,6 +4614,11 @@ PgstatCollectorMain(int argc, char *argv[]) pgstat_recv_vacuum(&msg.msg_vacuum, len); break; + case PGSTAT_MTYPE_VACUUMRESUMEBLOCK: + pgstat_recv_vacuum_resume_block(&msg.msg_vacuum_resume_block, + len); + break; + case PGSTAT_MTYPE_ANALYZE: pgstat_recv_analyze(&msg.msg_analyze, len); break; @@ -6200,6 +6228,20 @@ pgstat_recv_vacuum(PgStat_MsgVacuum *msg, int len) } } +static void +pgstat_recv_vacuum_resume_block(PgStat_MsgVacuumResumeBlock *msg, int len) +{ + PgStat_StatDBEntry *dbentry; + PgStat_StatTabEntry *tabentry; + + /* + * Store the data in the table's hashtable entry. + */ + dbentry = pgstat_get_db_entry(msg->m_databaseid, true); + tabentry = pgstat_get_tab_entry(dbentry, msg->m_tableoid, true); + tabentry->vacuum_resume_block = msg->m_blkno; +} + /* ---------- * pgstat_recv_analyze() - * diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c index 7e6a3c1774..427e47c1c1 100644 --- a/src/backend/utils/adt/pgstatfuncs.c +++ b/src/backend/utils/adt/pgstatfuncs.c @@ -195,6 +195,20 @@ pg_stat_get_mod_since_analyze(PG_FUNCTION_ARGS) PG_RETURN_INT64(result); } +Datum +pg_stat_get_vacuum_resume_block(PG_FUNCTION_ARGS) +{ + Oid relid = PG_GETARG_OID(0); + uint64 result; + PgStat_StatTabEntry *tabentry; + + if ((tabentry = pgstat_fetch_stat_tabentry(relid)) == NULL) + result = 0; + else + result = (int64) (tabentry->vacuum_resume_block); + + PG_RETURN_INT64(result); +} Datum pg_stat_get_blocks_fetched(PG_FUNCTION_ARGS) diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index 07a86c7b7b..9cd785d4c4 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -5129,6 +5129,11 @@ proname => 'pg_stat_get_mod_since_analyze', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', prosrc => 'pg_stat_get_mod_since_analyze' }, +{ oid => '8001', + descr => 'statistics: block number to resume vacuuming', + proname => 'pg_stat_get_vacuum_resume_block', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_vacuum_resume_block' }, { oid => '1934', descr => 'statistics: number of blocks fetched', proname => 'pg_stat_get_blocks_fetched', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h index c27d255d8d..cf3c2919e7 100644 --- a/src/include/commands/vacuum.h +++ b/src/include/commands/vacuum.h @@ -222,6 +222,8 @@ typedef struct VacuumParams * default value depends on reloptions */ VacOptTernaryValue truncate; /* Truncate empty pages at the end, * default value depends on reloptions */ + VacOptTernaryValue resume; /* Resume vacuuming from the last vacuumed + * block */ /* * The number of parallel vacuum workers. 0 by default which means choose diff --git a/src/include/pgstat.h b/src/include/pgstat.h index 3a65a51696..8e991927d7 100644 --- a/src/include/pgstat.h +++ b/src/include/pgstat.h @@ -56,6 +56,7 @@ typedef enum StatMsgType PGSTAT_MTYPE_RESETSINGLECOUNTER, PGSTAT_MTYPE_AUTOVAC_START, PGSTAT_MTYPE_VACUUM, + PGSTAT_MTYPE_VACUUMRESUMEBLOCK, PGSTAT_MTYPE_ANALYZE, PGSTAT_MTYPE_ARCHIVER, PGSTAT_MTYPE_BGWRITER, @@ -371,6 +372,14 @@ typedef struct PgStat_MsgVacuum PgStat_Counter m_dead_tuples; } PgStat_MsgVacuum; +typedef struct PgStat_MsgVacuumResumeBlock +{ + PgStat_MsgHdr m_hdr; + Oid m_databaseid; + Oid m_tableoid; + BlockNumber m_blkno; +} PgStat_MsgVacuumResumeBlock; + /* ---------- * PgStat_MsgAnalyze Sent by the backend or autovacuum daemon @@ -561,6 +570,7 @@ typedef union PgStat_Msg PgStat_MsgResetsinglecounter msg_resetsinglecounter; PgStat_MsgAutovacStart msg_autovacuum_start; PgStat_MsgVacuum msg_vacuum; + PgStat_MsgVacuumResumeBlock msg_vacuum_resume_block; PgStat_MsgAnalyze msg_analyze; PgStat_MsgArchiver msg_archiver; PgStat_MsgBgWriter msg_bgwriter; @@ -650,6 +660,8 @@ typedef struct PgStat_StatTabEntry PgStat_Counter blocks_fetched; PgStat_Counter blocks_hit; + BlockNumber vacuum_resume_block; + TimestampTz vacuum_timestamp; /* user initiated vacuum */ PgStat_Counter vacuum_count; TimestampTz autovac_vacuum_timestamp; /* autovacuum initiated */ @@ -1264,6 +1276,8 @@ extern void pgstat_reset_single_counter(Oid objectid, PgStat_Single_Reset_Type t extern void pgstat_report_autovac(Oid dboid); extern void pgstat_report_vacuum(Oid tableoid, bool shared, PgStat_Counter livetuples, PgStat_Counter deadtuples); +extern void pgstat_report_vacuum_resume_block(Oid tableoid, bool shared, + BlockNumber blkno); extern void pgstat_report_analyze(Relation rel, PgStat_Counter livetuples, PgStat_Counter deadtuples, bool resetcounter); diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h index 44ed04dd3f..24e7b14a24 100644 --- a/src/include/utils/rel.h +++ b/src/include/utils/rel.h @@ -277,6 +277,8 @@ typedef struct StdRdOptions int parallel_workers; /* max number of parallel workers */ bool vacuum_index_cleanup; /* enables index vacuuming and cleanup */ bool vacuum_truncate; /* enables vacuum to truncate a relation */ + bool vacuum_resume; /* enables vacuum to resume from last + * vacuumed block. */ } StdRdOptions; #define HEAP_MIN_FILLFACTOR 10 diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index 634f8256f7..55d6b439c3 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -1778,6 +1778,7 @@ pg_stat_all_tables| SELECT c.oid AS relid, pg_stat_get_live_tuples(c.oid) AS n_live_tup, pg_stat_get_dead_tuples(c.oid) AS n_dead_tup, pg_stat_get_mod_since_analyze(c.oid) AS n_mod_since_analyze, + pg_stat_get_vacuum_resume_block(c.oid) AS vacuum_resume_blk, pg_stat_get_last_vacuum_time(c.oid) AS last_vacuum, pg_stat_get_last_autovacuum_time(c.oid) AS last_autovacuum, pg_stat_get_last_analyze_time(c.oid) AS last_analyze, @@ -2034,6 +2035,7 @@ pg_stat_sys_tables| SELECT pg_stat_all_tables.relid, pg_stat_all_tables.n_live_tup, pg_stat_all_tables.n_dead_tup, pg_stat_all_tables.n_mod_since_analyze, + pg_stat_all_tables.vacuum_resume_blk, pg_stat_all_tables.last_vacuum, pg_stat_all_tables.last_autovacuum, pg_stat_all_tables.last_analyze, @@ -2077,6 +2079,7 @@ pg_stat_user_tables| SELECT pg_stat_all_tables.relid, pg_stat_all_tables.n_live_tup, pg_stat_all_tables.n_dead_tup, pg_stat_all_tables.n_mod_since_analyze, + pg_stat_all_tables.vacuum_resume_blk, pg_stat_all_tables.last_vacuum, pg_stat_all_tables.last_autovacuum, pg_stat_all_tables.last_analyze, diff --git a/src/test/regress/expected/vacuum.out b/src/test/regress/expected/vacuum.out index 0cfe28e63f..d1ca94a224 100644 --- a/src/test/regress/expected/vacuum.out +++ b/src/test/regress/expected/vacuum.out @@ -182,6 +182,25 @@ SELECT pg_relation_size('vac_truncate_test') = 0; VACUUM (TRUNCATE FALSE, FULL TRUE) vac_truncate_test; DROP TABLE vac_truncate_test; +-- RESUME option +CREATE TABLE resume_test (i INT PRIMARY KEY, t TEXT); +INSERT INTO resume_test(i, t) VALUES (generate_series(1,30), + repeat('1234567890',300)); +VACUUM (RESUME TRUE) resume_test; +-- resume option is ignored +VACUUM (RESUME TRUE, FREEZE TRUE) resume_test; +VACUUM (RESUME TRUE, FULL TRUE) resume_test; +VACUUM (RESUME TRUE, DISABLE_PAGE_SKIPPING TRUE) resume_test; +-- Only parent enables resuming +ALTER TABLE resume_test SET (vacuum_resume = true, + toast.vacuum_resume = false); +VACUUM (RESUME TRUE) resume_test; +-- Only toast table enables resuming +ALTER TABLE resume_test SET (vacuum_resume = false, + toast.vacuum_resume = true); +-- Test some extra relations. +VACUUM (RESUME TRUE) vaccluster; +VACUUM (RESUME TRUE) vactst; -- partitioned table CREATE TABLE vacparted (a int, b char) PARTITION BY LIST (a); CREATE TABLE vacparted1 PARTITION OF vacparted FOR VALUES IN (1); @@ -254,6 +273,7 @@ DROP TABLE vaccluster; DROP TABLE vactst; DROP TABLE vacparted; DROP TABLE no_index_cleanup; +DROP TABLE resume_test; -- relation ownership, WARNING logs generated as all are skipped. CREATE TABLE vacowned (a int); CREATE TABLE vacowned_parted (a int) PARTITION BY LIST (a); diff --git a/src/test/regress/sql/vacuum.sql b/src/test/regress/sql/vacuum.sql index cf741f7b11..ff70290e99 100644 --- a/src/test/regress/sql/vacuum.sql +++ b/src/test/regress/sql/vacuum.sql @@ -152,6 +152,26 @@ SELECT pg_relation_size('vac_truncate_test') = 0; VACUUM (TRUNCATE FALSE, FULL TRUE) vac_truncate_test; DROP TABLE vac_truncate_test; +-- RESUME option +CREATE TABLE resume_test (i INT PRIMARY KEY, t TEXT); +INSERT INTO resume_test(i, t) VALUES (generate_series(1,30), + repeat('1234567890',300)); +VACUUM (RESUME TRUE) resume_test; +-- resume option is ignored +VACUUM (RESUME TRUE, FREEZE TRUE) resume_test; +VACUUM (RESUME TRUE, FULL TRUE) resume_test; +VACUUM (RESUME TRUE, DISABLE_PAGE_SKIPPING TRUE) resume_test; +-- Only parent enables resuming +ALTER TABLE resume_test SET (vacuum_resume = true, + toast.vacuum_resume = false); +VACUUM (RESUME TRUE) resume_test; +-- Only toast table enables resuming +ALTER TABLE resume_test SET (vacuum_resume = false, + toast.vacuum_resume = true); +-- Test some extra relations. +VACUUM (RESUME TRUE) vaccluster; +VACUUM (RESUME TRUE) vactst; + -- partitioned table CREATE TABLE vacparted (a int, b char) PARTITION BY LIST (a); CREATE TABLE vacparted1 PARTITION OF vacparted FOR VALUES IN (1); @@ -214,6 +234,7 @@ DROP TABLE vaccluster; DROP TABLE vactst; DROP TABLE vacparted; DROP TABLE no_index_cleanup; +DROP TABLE resume_test; -- relation ownership, WARNING logs generated as all are skipped. CREATE TABLE vacowned (a int);