From 34a47cd3e6dd2e9bd59b90d0a4e1b284d23e52e7 Mon Sep 17 00:00:00 2001 From: Vignesh C Date: Fri, 23 Dec 2022 17:31:54 +0530 Subject: [PATCH 5/5] Support DDL replication. To support DDL replication, we use event trigger and DDL deparsing facilities. During CREATE PUBLICATION we register a command end trigger that deparses the DDL as a JSON blob, and WAL logs it. The event trigger is automatically removed at the time of DROP PUBLICATION. The WALSender decodes the WAL and sends it downstream similar to other DML commands. The subscriber then converts JSON back to the DDL command string and executes it. In the subscriber, we also add the newly added rel to pg_subscription_rel so that the DML changes on the new table can be replicated without having to manually run "ALTER SUBSCRIPTION ... REFRESH PUBLICATION". - For non-rewrite ALTER object command and - CREATE object command: we deparse the command at ddl_command_end event trigger and WAL log the deparsed json string. The WALSender decodes the WAL and sends it to subscriber if the created/altered table is published. It supports most of ALTER TABLE command except some commands(DDL related to PARTITIONED TABLE ...) that introduced recently which haven't been supported by the current ddl_deparser, we will support that later. - For DROP object: The 'command start' event handler logs a ddl message with the relids of the tables that are dropped which the output plugin (pgoutput) stores in its internal data structure after verifying that it is for a table that is part of the publication. Later the 'command end' event handler sends the actual drop message. Pgoutput on receiving the command end, only sends out the drop command only if it is for one of the relids marked for deleting. The reason we have to do this is because, once the logical decoder receives the 'command end' message, the relid of the table is no longer valid as it has been deleted as part of invalidations received for the drop table command. It is no longer possible to verify if the table is part of the publication list or not. To make this possible, I have added two more elements to the ddl xlog and ddl message, (relid and cmdtype). We could have also handled all this on the subscriber side as well, but that would mean sending spurious ddl messages for tables that are not part of the publication. - For table_rewrite ALTER TABLE command: (ALTER COLUMN TYPE, ADD COLUMN DEFAULT, SET LOGGED, SET ACCESS METHOD) we deparse the command and WAL log the deparsed json string at table_rewrite event trigger. The WALSender decodes the WAL and sends it to subscriber if the altered table is published. Then, the WALSender will convert the upcoming rewrite INSERTs to UPDATEs and send them to subscriber so that the data between publisher and subscriber can always be consistent. Note that the tables that publish rewrite ddl must have a replica identity configured in order to be able to replicate the upcoming rewrite UPDATEs. We do this way because of two reason: (1) The data before the rewrite ddl could already be different among publisher and subscriber. To make sure the extra data in subscriber which doesn't exist in publisher also get rewritten, we need to let the subscriber execute the original rewrite ddl to rewrite all the data at first. (2) the data after executing rewrite ddl could be different among publisher and subscriber(due to different functions/operators used during rewrite), so we need to replicate the rewrite UPDATEs to keep the data consistent. TO IMPROVE: This approach could be improved by letting the subscriber try to update the extra data itself instead of doing fully rewrite ddl and use the upcoming rewrite UPDATEs to rewrite the rest data. To achieve this, we could modify the deparsed json string to temporarily remove the rewrite part and add some logic in subscriber to update the extra data. Besides, we may not need to send rewrite changes for all type of rewrite ddl, for example, it seems fine to skip sending rewrite changes for ALTER TABLE SET LOGGED as the data in the table doesn't actually be changed. We could use the deparser and event trigger to filter these ddls and skip sending rewrite changes for them. --- src/backend/catalog/pg_publication.c | 1 + src/backend/commands/createas.c | 10 + src/backend/commands/event_trigger.c | 283 ++++++++++- src/backend/commands/publicationcmds.c | 191 +++++++ src/backend/commands/tablecmds.c | 10 +- src/backend/parser/parse_utilcmd.c | 1 + src/backend/replication/logical/Makefile | 1 + src/backend/replication/logical/ddltrigger.c | 356 ++++++++++++++ src/backend/replication/logical/logical.c | 49 ++ src/backend/replication/logical/meson.build | 1 + src/backend/replication/logical/proto.c | 43 ++ .../replication/logical/reorderbuffer.c | 20 +- src/backend/replication/logical/worker.c | 240 +++++++++ src/backend/replication/pgoutput/pgoutput.c | 188 ++++++- src/backend/tcop/utility.c | 3 + src/backend/utils/cache/evtcache.c | 2 + src/backend/utils/cache/relcache.c | 1 + src/bin/pg_dump/pg_dump.c | 21 +- src/bin/pg_dump/pg_dump.h | 1 + src/bin/psql/describe.c | 33 +- src/include/catalog/pg_proc.dat | 12 + src/include/catalog/pg_publication.h | 4 + src/include/commands/event_trigger.h | 11 +- src/include/replication/logicalproto.h | 4 + src/include/replication/output_plugin.h | 15 +- src/include/replication/pgoutput.h | 1 + src/include/replication/reorderbuffer.h | 12 + src/include/utils/evtcache.h | 3 +- .../expected/create_table.out | 7 + .../sql/create_table.sql | 1 + src/test/regress/expected/psql.out | 6 +- src/test/regress/expected/publication.out | 420 ++++++++-------- .../subscription/t/032_ddl_replication.pl | 465 ++++++++++++++++++ src/tools/pgindent/typedefs.list | 5 + 34 files changed, 2175 insertions(+), 246 deletions(-) create mode 100644 src/backend/replication/logical/ddltrigger.c create mode 100644 src/test/subscription/t/032_ddl_replication.pl diff --git a/src/backend/catalog/pg_publication.c b/src/backend/catalog/pg_publication.c index 59967098b3..721d023aa5 100644 --- a/src/backend/catalog/pg_publication.c +++ b/src/backend/catalog/pg_publication.c @@ -1005,6 +1005,7 @@ GetPublication(Oid pubid) pub->pubactions.pubupdate = pubform->pubupdate; pub->pubactions.pubdelete = pubform->pubdelete; pub->pubactions.pubtruncate = pubform->pubtruncate; + pub->pubactions.pubddl = pubform->pubddl; pub->pubviaroot = pubform->pubviaroot; ReleaseSysCache(tup); diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c index 152c29b551..b7795da87d 100644 --- a/src/backend/commands/createas.c +++ b/src/backend/commands/createas.c @@ -34,6 +34,7 @@ #include "catalog/namespace.h" #include "catalog/toasting.h" #include "commands/createas.h" +#include "commands/event_trigger.h" #include "commands/matview.h" #include "commands/prepare.h" #include "commands/tablecmds.h" @@ -143,6 +144,15 @@ create_ctas_internal(List *attrList, IntoClause *into) StoreViewQuery(intoRelationAddr.objectId, query, false); CommandCounterIncrement(); } + else + { + /* + * Fire the trigger for table_init_write after creating the table so + * that we can access the catalog info about the newly created table + * in the trigger function. + */ + EventTriggerTableInitWrite((Node *) create, intoRelationAddr); + } return intoRelationAddr; } diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c index eff15c05db..afb4b7e843 100644 --- a/src/backend/commands/event_trigger.c +++ b/src/backend/commands/event_trigger.c @@ -37,8 +37,11 @@ #include "miscadmin.h" #include "parser/parse_func.h" #include "pgstat.h" +#include "replication/ddlmessage.h" +#include "replication/message.h" #include "tcop/deparse_utility.h" #include "tcop/utility.h" +#include "tcop/ddl_deparse.h" #include "utils/acl.h" #include "utils/builtins.h" #include "utils/evtcache.h" @@ -92,7 +95,8 @@ CreateEventTrigger(CreateEventTrigStmt *stmt) if (strcmp(stmt->eventname, "ddl_command_start") != 0 && strcmp(stmt->eventname, "ddl_command_end") != 0 && strcmp(stmt->eventname, "sql_drop") != 0 && - strcmp(stmt->eventname, "table_rewrite") != 0) + strcmp(stmt->eventname, "table_rewrite") != 0 && + strcmp(stmt->eventname, "table_init_write") != 0) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("unrecognized event name \"%s\"", @@ -118,7 +122,8 @@ CreateEventTrigger(CreateEventTrigStmt *stmt) /* Validate tag list, if any. */ if ((strcmp(stmt->eventname, "ddl_command_start") == 0 || strcmp(stmt->eventname, "ddl_command_end") == 0 || - strcmp(stmt->eventname, "sql_drop") == 0) + strcmp(stmt->eventname, "sql_drop") == 0 || + strcmp(stmt->eventname, "table_init_write") == 0) && tags != NULL) validate_ddl_tags("tag", tags); else if (strcmp(stmt->eventname, "table_rewrite") == 0 @@ -544,7 +549,8 @@ EventTriggerCommonSetup(Node *parsetree, dbgtag = CreateCommandTag(parsetree); if (event == EVT_DDLCommandStart || event == EVT_DDLCommandEnd || - event == EVT_SQLDrop) + event == EVT_SQLDrop || + event == EVT_TableInitWrite) { if (!command_tag_event_trigger_ok(dbgtag)) elog(ERROR, "unexpected command tag \"%s\"", GetCommandTagName(dbgtag)); @@ -827,6 +833,141 @@ EventTriggerTableRewrite(Node *parsetree, Oid tableOid, int reason) CommandCounterIncrement(); } + +/* + * EventTriggerTableInitWriteStart + * Prepare to receive data on an CREATE TABLE AS/SELET INTO command about + * to be executed. + */ +void +EventTriggerTableInitWriteStart(Node *parsetree) +{ + MemoryContext oldcxt; + CollectedCommand *command; + CreateTableAsStmt *stmt = (CreateTableAsStmt *)parsetree; + + /* ignore if event trigger context not set, or collection disabled */ + if (!currentEventTriggerState || + currentEventTriggerState->commandCollectionInhibited) + return; + + oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); + + command = palloc(sizeof(CollectedCommand)); + + command->type = (stmt->objtype == OBJECT_TABLE) ? SCT_CreateTableAs : SCT_Simple; + command->in_extension = creating_extension; + command->d.ctas.address = InvalidObjectAddress; + command->d.ctas.real_create = NULL; + command->parsetree = copyObject(parsetree); + + command->parent = currentEventTriggerState->currentCommand; + currentEventTriggerState->currentCommand = command; + + MemoryContextSwitchTo(oldcxt); +} + +/* + * EventTriggerTableInitWriteEnd + * Finish up saving an CREATE TABLE AS/SELECT INTO command. + * + * FIXME this API isn't considering the possibility that an xact/subxact is + * aborted partway through. Probably it's best to add an + * AtEOSubXact_EventTriggers() to fix this. + */ +void +EventTriggerTableInitWriteEnd(ObjectAddress address) +{ + CollectedCommand *parent; + CreateTableAsStmt *stmt; + + /* ignore if event trigger context not set, or collection disabled */ + if (!currentEventTriggerState || + currentEventTriggerState->commandCollectionInhibited) + return; + + stmt = (CreateTableAsStmt *)currentEventTriggerState->currentCommand->parsetree; + + if (stmt->objtype == OBJECT_TABLE) + { + parent = currentEventTriggerState->currentCommand->parent; + + pfree(currentEventTriggerState->currentCommand); + + currentEventTriggerState->currentCommand = parent; + } + else + { + MemoryContext oldcxt; + + oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); + + currentEventTriggerState->currentCommand->d.simple.address = address; + currentEventTriggerState->commandList = + lappend(currentEventTriggerState->commandList, + currentEventTriggerState->currentCommand); + + MemoryContextSwitchTo(oldcxt); + } +} + +/* + * Fire table_init_rewrite triggers. + */ +void +EventTriggerTableInitWrite(Node *real_create, ObjectAddress address) +{ + List *runlist; + EventTriggerData trigdata; + CollectedCommand *command; + + /* + * See EventTriggerDDLCommandStart for a discussion about why event + * triggers are disabled in single user mode. + */ + if (!IsUnderPostmaster) + return; + + /* + * Also do nothing if our state isn't set up, which it won't be if there + * weren't any relevant event triggers at the start of the current DDL + * command. This test might therefore seem optional, but it's + * *necessary*, because EventTriggerCommonSetup might find triggers that + * didn't exist at the time the command started. + */ + if (!currentEventTriggerState) + return; + + /* Do nothing if no command was collected. */ + if (!currentEventTriggerState->currentCommand) + return; + + command = currentEventTriggerState->currentCommand; + + runlist = EventTriggerCommonSetup(command->parsetree, + EVT_TableInitWrite, + "table_init_write", + &trigdata); + if (runlist == NIL) + return; + + /* Set the real CreateTable statment and object address */ + command->d.ctas.real_create = real_create; + command->d.ctas.address = address; + + /* Run the triggers. */ + EventTriggerInvoke(runlist, &trigdata); + + /* Cleanup. */ + list_free(runlist); + + /* + * Make sure anything the event triggers did will be visible to the main + * command. + */ + CommandCounterIncrement(); +} + /* * Invoke each event trigger in a list of event triggers. */ @@ -1108,7 +1249,8 @@ trackDroppedObjectsNeeded(void) */ return (EventCacheLookup(EVT_SQLDrop) != NIL) || (EventCacheLookup(EVT_TableRewrite) != NIL) || - (EventCacheLookup(EVT_DDLCommandEnd) != NIL); + (EventCacheLookup(EVT_DDLCommandEnd) != NIL) || + (EventCacheLookup(EVT_TableInitWrite) != NIL); } /* @@ -1499,6 +1641,7 @@ EventTriggerAlterTableStart(Node *parsetree) command->d.alterTable.classId = RelationRelationId; command->d.alterTable.objectId = InvalidOid; + command->d.alterTable.rewrite = false; command->d.alterTable.subcmds = NIL; command->parsetree = copyObject(parsetree); @@ -1532,7 +1675,7 @@ EventTriggerAlterTableRelid(Oid objectId) * internally, so that's all that this code needs to handle at the moment. */ void -EventTriggerCollectAlterTableSubcmd(Node *subcmd, ObjectAddress address) +EventTriggerCollectAlterTableSubcmd(Node *subcmd, ObjectAddress address, bool rewrite) { MemoryContext oldcxt; CollectedATSubcmd *newsub; @@ -1552,12 +1695,140 @@ EventTriggerCollectAlterTableSubcmd(Node *subcmd, ObjectAddress address) newsub->address = address; newsub->parsetree = copyObject(subcmd); + currentEventTriggerState->currentCommand->d.alterTable.rewrite |= rewrite; currentEventTriggerState->currentCommand->d.alterTable.subcmds = lappend(currentEventTriggerState->currentCommand->d.alterTable.subcmds, newsub); MemoryContextSwitchTo(oldcxt); } +/* + * EventTriggerAlterTypeStart + * Save data about a single part of an ALTER TYPE. + * + * ALTER TABLE can have multiple subcommands which might include DROP COLUMN + * command and ALTER TYPE referring the drop column in USING expression. + * As the dropped column cannot be accessed after the execution of DROP COLUMN, + * a special trigger is required to handle this case before the drop column is + * executed. + */ +void +EventTriggerAlterTypeStart(AlterTableCmd *subcmd, Relation rel) +{ + MemoryContext oldcxt; + CollectedATSubcmd *newsub; + ColumnDef *def; + Relation attrelation; + HeapTuple heapTup; + Form_pg_attribute attTup; + AttrNumber attnum; + ObjectAddress address; + + /* ignore if event trigger context not set, or collection disabled */ + if (!currentEventTriggerState || + currentEventTriggerState->commandCollectionInhibited) + return; + + Assert(IsA(subcmd, AlterTableCmd)); + Assert(subcmd->subtype == AT_AlterColumnType); + Assert(currentEventTriggerState->currentCommand != NULL); + Assert(OidIsValid(currentEventTriggerState->currentCommand->d.alterTable.objectId)); + + def = (ColumnDef *) subcmd->def; + Assert(IsA(def, ColumnDef)); + + oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); + + newsub = palloc(sizeof(CollectedATSubcmd)); + newsub->parsetree = (Node *)copyObject(subcmd); + + attrelation = table_open(AttributeRelationId, RowExclusiveLock); + + /* Look up the target column */ + heapTup = SearchSysCacheCopyAttName(RelationGetRelid(rel), subcmd->name); + if (!HeapTupleIsValid(heapTup)) /* shouldn't happen */ + ereport(ERROR, + errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("column \"%s\" of relation \"%s\" does not exist", + subcmd->name, RelationGetRelationName(rel))); + attTup = (Form_pg_attribute) GETSTRUCT(heapTup); + attnum = attTup->attnum; + + ObjectAddressSubSet(address, RelationRelationId, + RelationGetRelid(rel), attnum); + heap_freetuple(heapTup); + table_close(attrelation, RowExclusiveLock); + newsub->address = address; + + if (def->raw_default) + { + char *defexpr; + + defexpr = nodeToString(def->cooked_default); + newsub->usingexpr = TextDatumGetCString(DirectFunctionCall2(pg_get_expr, + CStringGetTextDatum(defexpr), + RelationGetRelid(rel))); + } + else + newsub->usingexpr = NULL; + + currentEventTriggerState->currentCommand->d.alterTable.subcmds = + lappend(currentEventTriggerState->currentCommand->d.alterTable.subcmds, newsub); + + MemoryContextSwitchTo(oldcxt); +} + +/* + * EventTriggerAlterTypeEnd + * Finish up saving an ALTER TYPE command, and add it to command list. + */ +void +EventTriggerAlterTypeEnd(Node *subcmd, ObjectAddress address, bool rewrite) +{ + MemoryContext oldcxt; + CollectedATSubcmd *newsub; + ListCell *cell; + CollectedCommand *cmd; + AlterTableCmd *altsubcmd = (AlterTableCmd *)subcmd; + + /* ignore if event trigger context not set, or collection disabled */ + if (!currentEventTriggerState || + currentEventTriggerState->commandCollectionInhibited) + return; + + cmd = currentEventTriggerState->currentCommand; + + Assert(IsA(subcmd, AlterTableCmd)); + Assert(cmd != NULL); + Assert(OidIsValid(cmd->d.alterTable.objectId)); + + foreach(cell, cmd->d.alterTable.subcmds) + { + CollectedATSubcmd *sub = (CollectedATSubcmd *) lfirst(cell); + AlterTableCmd *collcmd = (AlterTableCmd *) sub->parsetree; + + if (collcmd->subtype == altsubcmd->subtype && + address.classId == sub->address.classId && + address.objectId == sub->address.objectId && + address.objectSubId == sub->address.objectSubId) + { + cmd->d.alterTable.rewrite |= rewrite; + return; + } + } + + oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); + + newsub = palloc(sizeof(CollectedATSubcmd)); + newsub->address = address; + newsub->parsetree = copyObject(subcmd); + + cmd->d.alterTable.rewrite |= rewrite; + cmd->d.alterTable.subcmds = lappend(cmd->d.alterTable.subcmds, newsub); + + MemoryContextSwitchTo(oldcxt); +} + /* * EventTriggerAlterTableEnd * Finish up saving an ALTER TABLE command, and add it to command list. @@ -1843,6 +2114,8 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS) addr = cmd->d.createopc.address; else if (cmd->type == SCT_AlterTSConfig) addr = cmd->d.atscfg.address; + else if (cmd->type == SCT_AlterTSConfig) + addr = cmd->d.ctas.address; /* * If an object was dropped in the same command we may end diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c index 20fa72c5c8..35e0e0618c 100644 --- a/src/backend/commands/publicationcmds.c +++ b/src/backend/commands/publicationcmds.c @@ -38,10 +38,12 @@ #include "commands/publicationcmds.h" #include "funcapi.h" #include "miscadmin.h" +#include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "parser/parse_clause.h" #include "parser/parse_collate.h" #include "parser/parse_relation.h" +#include "parser/parser.h" #include "storage/lmgr.h" #include "utils/acl.h" #include "utils/array.h" @@ -97,6 +99,7 @@ parse_publication_options(ParseState *pstate, pubactions->pubupdate = true; pubactions->pubdelete = true; pubactions->pubtruncate = true; + pubactions->pubddl = false; *publish_via_partition_root = false; /* Parse options */ @@ -144,6 +147,8 @@ parse_publication_options(ParseState *pstate, pubactions->pubdelete = true; else if (strcmp(publish_opt, "truncate") == 0) pubactions->pubtruncate = true; + else if (strcmp(publish_opt, "ddl") == 0) + pubactions->pubddl = true; else ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), @@ -728,6 +733,53 @@ CheckPubRelationColumnList(char *pubname, List *tables, } } +/* + * Create event trigger which is used for DDL replication. + */ +static void +CreateDDLReplicaEventTrigger(char *eventname, CommandTag *commands, + int ncommands, ObjectAddress pubaddress, + Oid puboid) +{ + int i; + List *tags = NIL; + Oid trigger_id; + ObjectAddress referenced; + CreateEventTrigStmt *ddl_trigger; + char trigger_name[NAMEDATALEN]; + char trigger_func_name[NAMEDATALEN]; + static const char *trigger_name_prefix = "pg_deparse_trig_%s_%u"; + static const char *trigger_func_prefix = "publication_deparse_%s"; + + ddl_trigger = makeNode(CreateEventTrigStmt); + + snprintf(trigger_name, sizeof(trigger_name), trigger_name_prefix, + eventname, puboid); + snprintf(trigger_func_name, sizeof(trigger_func_name), trigger_func_prefix, + eventname); + + ddl_trigger->trigname = pstrdup(trigger_name); + ddl_trigger->eventname = eventname; + ddl_trigger->funcname = SystemFuncName(trigger_func_name); + + for (i = 0; i < ncommands; i++) + { + String *tag = makeString(pstrdup(GetCommandTagName(commands[i]))); + + tags = lappend(tags, tag); + } + + ddl_trigger->whenclause = list_make1(makeDefElem("tag", (Node *) tags, -1)); + + trigger_id = CreateEventTrigger(ddl_trigger); + + /* + * Register the event triggers as internally dependent on the publication. + */ + ObjectAddressSet(referenced, EventTriggerRelationId, trigger_id); + recordDependencyOn(&referenced, &pubaddress, DEPENDENCY_INTERNAL); +} + /* * Create new publication. */ @@ -798,6 +850,8 @@ CreatePublication(ParseState *pstate, CreatePublicationStmt *stmt) BoolGetDatum(pubactions.pubdelete); values[Anum_pg_publication_pubtruncate - 1] = BoolGetDatum(pubactions.pubtruncate); + values[Anum_pg_publication_pubddl - 1] = + BoolGetDatum(pubactions.pubddl); values[Anum_pg_publication_pubviaroot - 1] = BoolGetDatum(publish_via_partition_root); @@ -858,6 +912,140 @@ CreatePublication(ParseState *pstate, CreatePublicationStmt *stmt) } } + /* + * Create an event trigger to allow logging of DDL statements. + * + */ + if (pubactions.pubddl) + { + CommandTag start_commands[] = {CMDTAG_DROP_TABLE}; + CommandTag rewrite_commands[] = {CMDTAG_ALTER_TABLE}; + + static CommandTag end_commands[] = { + CMDTAG_ALTER_CAST, + CMDTAG_ALTER_COLLATION, + CMDTAG_ALTER_CONVERSION, + CMDTAG_ALTER_DEFAULT_PRIVILEGES, + CMDTAG_ALTER_DOMAIN, + CMDTAG_ALTER_EXTENSION, + CMDTAG_ALTER_FOREIGN_DATA_WRAPPER, + CMDTAG_ALTER_FOREIGN_TABLE, + CMDTAG_ALTER_FUNCTION, + CMDTAG_ALTER_INDEX, + CMDTAG_ALTER_INDEX, + CMDTAG_ALTER_LANGUAGE, + CMDTAG_ALTER_MATERIALIZED_VIEW, + CMDTAG_ALTER_OPERATOR, + CMDTAG_ALTER_OPERATOR_CLASS, + CMDTAG_ALTER_OPERATOR_FAMILY, + CMDTAG_ALTER_POLICY, + CMDTAG_ALTER_PROCEDURE, + CMDTAG_ALTER_ROUTINE, + CMDTAG_ALTER_RULE, + CMDTAG_ALTER_SCHEMA, + CMDTAG_ALTER_SEQUENCE, + CMDTAG_ALTER_SERVER, + CMDTAG_ALTER_STATISTICS, + CMDTAG_ALTER_TABLE, + CMDTAG_ALTER_TEXT_SEARCH_CONFIGURATION, + CMDTAG_ALTER_TEXT_SEARCH_DICTIONARY, + CMDTAG_ALTER_TRIGGER, + CMDTAG_ALTER_TYPE, + CMDTAG_ALTER_USER_MAPPING, + CMDTAG_ALTER_VIEW, + CMDTAG_COMMENT, + CMDTAG_CREATE_ACCESS_METHOD, + CMDTAG_CREATE_CAST, + CMDTAG_CREATE_COLLATION, + CMDTAG_CREATE_CONVERSION, + CMDTAG_CREATE_DOMAIN, + CMDTAG_CREATE_EXTENSION, + CMDTAG_CREATE_FOREIGN_DATA_WRAPPER, + CMDTAG_CREATE_FOREIGN_TABLE, + CMDTAG_CREATE_FUNCTION, + CMDTAG_CREATE_INDEX, + CMDTAG_CREATE_LANGUAGE, + CMDTAG_CREATE_MATERIALIZED_VIEW, + CMDTAG_CREATE_OPERATOR, + CMDTAG_CREATE_OPERATOR_CLASS, + CMDTAG_CREATE_OPERATOR_FAMILY, + CMDTAG_CREATE_POLICY, + CMDTAG_CREATE_PROCEDURE, + CMDTAG_CREATE_RULE, + CMDTAG_CREATE_SCHEMA, + CMDTAG_CREATE_SEQUENCE, + CMDTAG_CREATE_SERVER, + CMDTAG_CREATE_STATISTICS, + CMDTAG_CREATE_TABLE, + CMDTAG_CREATE_TEXT_SEARCH_CONFIGURATION, + CMDTAG_CREATE_TEXT_SEARCH_DICTIONARY, + CMDTAG_CREATE_TEXT_SEARCH_PARSER, + CMDTAG_CREATE_TEXT_SEARCH_TEMPLATE, + CMDTAG_CREATE_TRANSFORM, + CMDTAG_CREATE_TRIGGER, + CMDTAG_CREATE_TYPE, + CMDTAG_CREATE_USER_MAPPING, + CMDTAG_CREATE_VIEW, + CMDTAG_DROP_ACCESS_METHOD, + CMDTAG_DROP_CAST, + CMDTAG_DROP_COLLATION, + CMDTAG_DROP_CONVERSION, + CMDTAG_DROP_DOMAIN, + CMDTAG_DROP_EXTENSION, + CMDTAG_DROP_FOREIGN_DATA_WRAPPER, + CMDTAG_DROP_FOREIGN_TABLE, + CMDTAG_DROP_FUNCTION, + CMDTAG_DROP_INDEX, + CMDTAG_DROP_LANGUAGE, + CMDTAG_DROP_MATERIALIZED_VIEW, + CMDTAG_DROP_OPERATOR, + CMDTAG_DROP_OPERATOR_CLASS, + CMDTAG_DROP_OPERATOR_FAMILY, + CMDTAG_DROP_POLICY, + CMDTAG_DROP_PROCEDURE, + CMDTAG_DROP_ROUTINE, + CMDTAG_DROP_RULE, + CMDTAG_DROP_SCHEMA, + CMDTAG_DROP_SEQUENCE, + CMDTAG_DROP_SERVER, + CMDTAG_DROP_STATISTICS, + CMDTAG_DROP_TABLE, + CMDTAG_DROP_TEXT_SEARCH_CONFIGURATION, + CMDTAG_DROP_TEXT_SEARCH_DICTIONARY, + CMDTAG_DROP_TEXT_SEARCH_PARSER, + CMDTAG_DROP_TEXT_SEARCH_TEMPLATE, + CMDTAG_DROP_TRANSFORM, + CMDTAG_DROP_TYPE, + CMDTAG_DROP_VIEW, + CMDTAG_GRANT, + CMDTAG_IMPORT_FOREIGN_SCHEMA, + CMDTAG_REFRESH_MATERIALIZED_VIEW, + CMDTAG_REVOKE, + CMDTAG_SECURITY_LABEL, + }; + + CommandTag init_commands[] = { + CMDTAG_CREATE_TABLE_AS, + CMDTAG_SELECT_INTO + }; + + /* Create the ddl_command_end event trigger */ + CreateDDLReplicaEventTrigger("ddl_command_end", end_commands, + lengthof(end_commands), myself, puboid); + + /* Create the ddl_command_start event trigger */ + CreateDDLReplicaEventTrigger("ddl_command_start", start_commands, + lengthof(start_commands), myself, puboid); + + /* Create the table_rewrite event trigger */ + CreateDDLReplicaEventTrigger("table_rewrite", rewrite_commands, + lengthof(rewrite_commands), myself, puboid); + + /* Create the table_init_write event trigger */ + CreateDDLReplicaEventTrigger("table_init_write", init_commands, + lengthof(init_commands), myself, puboid); + } + table_close(rel, RowExclusiveLock); InvokeObjectPostCreateHook(PublicationRelationId, puboid, 0); @@ -996,6 +1184,9 @@ AlterPublicationOptions(ParseState *pstate, AlterPublicationStmt *stmt, values[Anum_pg_publication_pubtruncate - 1] = BoolGetDatum(pubactions.pubtruncate); replaces[Anum_pg_publication_pubtruncate - 1] = true; + + values[Anum_pg_publication_pubddl - 1] = BoolGetDatum(pubactions.pubddl); + replaces[Anum_pg_publication_pubddl - 1] = true; } if (publish_via_partition_root_given) diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 4bea7b3c90..b481823b78 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -4651,6 +4651,9 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, cmd = ATParseTransformCmd(wqueue, tab, rel, cmd, recurse, lockmode, AT_PASS_UNSET, context); Assert(cmd != NULL); + + EventTriggerAlterTypeStart(cmd, rel); + /* Performs own recursion */ ATPrepAlterColumnType(wqueue, tab, rel, recurse, recursing, cmd, lockmode, context); @@ -4922,6 +4925,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, { ObjectAddress address = InvalidObjectAddress; Relation rel = tab->rel; + bool commandCollected = false; switch (cmd->subtype) { @@ -5045,6 +5049,8 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, case AT_AlterColumnType: /* ALTER COLUMN TYPE */ /* parse transformation was done earlier */ address = ATExecAlterColumnType(tab, rel, cmd, lockmode); + EventTriggerAlterTypeEnd((Node *) cmd, address, tab->rewrite); + commandCollected = true; break; case AT_AlterColumnGenericOptions: /* ALTER COLUMN OPTIONS */ address = @@ -5217,8 +5223,8 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, /* * Report the subcommand to interested event triggers. */ - if (cmd) - EventTriggerCollectAlterTableSubcmd((Node *) cmd, address); + if (cmd && !commandCollected) + EventTriggerCollectAlterTableSubcmd((Node *) cmd, address, tab->rewrite); /* * Bump the command counter to ensure the next subcommand in the sequence diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index f743cd548c..16967468b8 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -1397,6 +1397,7 @@ expandTableLikeClause(RangeVar *heapRel, TableLikeClause *table_like_clause) atcmd->cmds = atsubcmds; atcmd->objtype = OBJECT_TABLE; atcmd->missing_ok = false; + atcmd->table_like = true; result = lcons(atcmd, result); } diff --git a/src/backend/replication/logical/Makefile b/src/backend/replication/logical/Makefile index d4f29f8ffc..23abd6fa79 100644 --- a/src/backend/replication/logical/Makefile +++ b/src/backend/replication/logical/Makefile @@ -16,6 +16,7 @@ override CPPFLAGS := -I$(srcdir) $(CPPFLAGS) OBJS = \ ddlmessage.o\ + ddltrigger.o\ decode.o \ launcher.o \ logical.o \ diff --git a/src/backend/replication/logical/ddltrigger.c b/src/backend/replication/logical/ddltrigger.c new file mode 100644 index 0000000000..c825ff6d09 --- /dev/null +++ b/src/backend/replication/logical/ddltrigger.c @@ -0,0 +1,356 @@ +/*------------------------------------------------------------------------- + * + * ddltrigger.c + * Logical DDL messages. + * + * Copyright (c) 2022, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/backend/replication/logical/ddltrigger.c + * + * NOTES + * + * Deparse the ddl command and log it. + * + * --------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/table.h" +#include "catalog/pg_class.h" +#include "commands/event_trigger.h" +#include "funcapi.h" +#include "lib/ilist.h" +#include "replication/ddlmessage.h" +#include "tcop/ddl_deparse.h" +#include "utils/fmgrprotos.h" +#include "utils/lsyscache.h" + +extern EventTriggerQueryState *currentEventTriggerState; + +/* + * Deparse the ddl command and log it prior to + * execution. Currently only used for DROP TABLE command + * so that catalog can be accessed before being deleted. + * This is to check if the table is part of the publication + * or not. + */ +Datum +publication_deparse_ddl_command_start(PG_FUNCTION_ARGS) +{ + EventTriggerData *trigdata; + char *command = psprintf("Drop table command start"); + DropStmt *stmt; + ListCell *cell1; + + if (!CALLED_AS_EVENT_TRIGGER(fcinfo)) + elog(ERROR, "not fired by event trigger manager"); + + trigdata = (EventTriggerData *) fcinfo->context; + stmt = (DropStmt *) trigdata->parsetree; + + /* Extract the relid from the parse tree */ + foreach(cell1, stmt->objects) + { + char relpersist; + Node *object = lfirst(cell1); + ObjectAddress address; + Relation relation = NULL; + + address = get_object_address(stmt->removeType, + object, + &relation, + AccessExclusiveLock, + true); + + /* Object does not exist, nothing to do */ + if (!relation) + continue; + + relpersist = get_rel_persistence(address.objectId); + + /* + * Do not generate wal log for commands whose target table is a + * temporary table. + * + * We will generate wal logs for unlogged tables so that unlogged + * tables can also be created and altered on the subscriber side. This + * makes it possible to directly replay the SET LOGGED command and the + * incoming rewrite message without creating a new table. + */ + if (relpersist != RELPERSISTENCE_TEMP) + LogLogicalDDLMessage("deparse", address.objectId, DCT_TableDropStart, + command, strlen(command) + 1); + + if (relation) + table_close(relation, NoLock); + } + return PointerGetDatum(NULL); +} + +/* + * publication_deparse_table_rewrite + * + * Deparse the ddl table rewrite command and log it. + */ +Datum +publication_deparse_table_rewrite(PG_FUNCTION_ARGS) +{ + char relpersist; + CollectedCommand *cmd; + char *json_string; + + if (!CALLED_AS_EVENT_TRIGGER(fcinfo)) + elog(ERROR, "not fired by event trigger manager"); + + cmd = currentEventTriggerState->currentCommand; + + Assert(cmd && cmd->d.alterTable.rewrite); + + relpersist = get_rel_persistence(cmd->d.alterTable.objectId); + + /* + * Do not generate wal log for commands whose target table is a temporary + * table. + * + * We will generate wal logs for unlogged tables so that unlogged tables + * can also be created and altered on the subscriber side. This makes it + * possible to directly replay the SET LOGGED command and the incoming + * rewrite message without creating a new table. + */ + if (relpersist != RELPERSISTENCE_TEMP) + { + /* Deparse the DDL command and WAL log it to allow decoding of the same. */ + json_string = deparse_utility_command(cmd, false); + + if (json_string != NULL) + LogLogicalDDLMessage("deparse", cmd->d.alterTable.objectId, DCT_TableAlter, + json_string, strlen(json_string) + 1); + } + + return PointerGetDatum(NULL); +} + +/* + * Deparse the ddl command and log it. This function + * is called after the execution of the command but before the + * transaction commits. + */ +Datum +publication_deparse_ddl_command_end(PG_FUNCTION_ARGS) +{ + ListCell *lc; + slist_iter iter; + DeparsedCommandType type; + Oid relid; + char relkind; + + if (!CALLED_AS_EVENT_TRIGGER(fcinfo)) + elog(ERROR, "not fired by event trigger manager"); + + foreach(lc, currentEventTriggerState->commandList) + { + char relpersist = RELPERSISTENCE_PERMANENT; + CollectedCommand *cmd = lfirst(lc); + char *json_string; + + /* Rewrite DDL has been handled in table_rewrite trigger */ + if (cmd->d.alterTable.rewrite) + { + RenameStmt *renameStmt = (RenameStmt *) cmd->parsetree; + + if (renameStmt && renameStmt->relationType != OBJECT_TYPE && + renameStmt->relationType != OBJECT_TABLE) + continue; + } + + if (cmd->type == SCT_Simple && + !OidIsValid(cmd->d.simple.address.objectId)) + continue; + + if (cmd->type == SCT_AlterTable) + { + relid = cmd->d.alterTable.objectId; + type = DCT_TableAlter; + } + else + { + /* Only SCT_Simple for now */ + relid = cmd->d.simple.address.objectId; + type = DCT_SimpleCmd; + } + + relkind = get_rel_relkind(relid); + if (relkind) + relpersist = get_rel_persistence(relid); + + /* + * Do not generate wal log for commands whose target table is a + * temporary table. + * + * We will generate wal logs for unlogged tables so that unlogged + * tables can also be created and altered on the subscriber side. This + * makes it possible to directly replay the SET LOGGED command and the + * incoming rewrite message without creating a new table. + */ + if (relpersist != RELPERSISTENCE_TEMP) + { + /* + * Deparse the DDL command and WAL log it to allow decoding of the + * same. + */ + json_string = deparse_utility_command(cmd, false); + + if (json_string != NULL) + LogLogicalDDLMessage("deparse", relid, type, json_string, + strlen(json_string) + 1); + } + } + + slist_foreach(iter, &(currentEventTriggerState->SQLDropList)) + { + volatile SQLDropObject *obj; + DropStmt *stmt; + EventTriggerData *trigdata; + char *command; + DeparsedCommandType cmdtype; + const char *tmptype; + ObjectClass objclass; + ObjectAddress objaddr; + + trigdata = (EventTriggerData *) fcinfo->context; + stmt = (DropStmt *) trigdata->parsetree; + + obj = slist_container(SQLDropObject, next, iter.cur); + objaddr = obj->address; + objclass = getObjectClass(&objaddr); + + if (strcmp(obj->objecttype, "table") == 0) + cmdtype = DCT_TableDropEnd; + else if (objclass == OCLASS_SCHEMA || + objclass == OCLASS_OPERATOR || + objclass == OCLASS_OPCLASS || + objclass == OCLASS_OPFAMILY || + objclass == OCLASS_CAST || + objclass == OCLASS_TYPE || + objclass == OCLASS_TRIGGER || + objclass == OCLASS_CONVERSION || + objclass == OCLASS_POLICY || + objclass == OCLASS_REWRITE || + objclass == OCLASS_EXTENSION || + objclass == OCLASS_FDW || + objclass == OCLASS_TSCONFIG || + objclass == OCLASS_TSDICT || + objclass == OCLASS_TSTEMPLATE || + objclass == OCLASS_TSPARSER || + objclass == OCLASS_TRANSFORM || + objclass == OCLASS_FOREIGN_SERVER || + objclass == OCLASS_COLLATION || + objclass == OCLASS_USER_MAPPING || + objclass == OCLASS_LANGUAGE || + objclass == OCLASS_STATISTIC_EXT || + objclass == OCLASS_AM || + strcmp(obj->objecttype, "foreign table") == 0 || + strcmp(obj->objecttype, "index") == 0 || + strcmp(obj->objecttype, "sequence") == 0 || + strcmp(obj->objecttype, "view") == 0 || + strcmp(obj->objecttype, "function") == 0 || + strcmp(obj->objecttype, "materialized view") == 0 || + strcmp(obj->objecttype, "procedure") == 0 || + strcmp(obj->objecttype, "routine") == 0) + cmdtype = DCT_ObjectDrop; + else + continue; + + /* Change foreign-data wrapper to foreign data wrapper */ + if (strncmp(obj->objecttype, "foreign-data wrapper", 20) == 0) + { + tmptype = pstrdup("foreign data wrapper"); + command = deparse_drop_command(obj->objidentity, tmptype, + stmt->behavior); + } + + /* Change statistics object to statistics */ + else if (strncmp(obj->objecttype, "statistics object", + strlen("statistics object")) == 0) + { + tmptype = pstrdup("statistics"); + command = deparse_drop_command(obj->objidentity, tmptype, + stmt->behavior); + } + + /* + * Object identity needs to be modified to make the drop work + * + * FROM: on server TO : for server + * + * + */ + else if (strncmp(obj->objecttype, "user mapping", 12) == 0) + { + char *on_server; + + tmptype = palloc(strlen(obj->objidentity) + 2); + on_server = strstr(obj->objidentity, "on server"); + + sprintf((char *) tmptype, "for "); + strncat((char *) tmptype, obj->objidentity, on_server - obj->objidentity); + strcat((char *) tmptype, on_server + 3); + command = deparse_drop_command(tmptype, obj->objecttype, + stmt->behavior); + } + else + command = deparse_drop_command(obj->objidentity, obj->objecttype, + stmt->behavior); + + if (command) + LogLogicalDDLMessage("deparse", obj->address.objectId, cmdtype, + command, strlen(command) + 1); + } + + return PointerGetDatum(NULL); +} + + +/* + * publication_deparse_table_init_write + * + * Deparse the ddl table create command and log it. + */ +Datum +publication_deparse_table_init_write(PG_FUNCTION_ARGS) +{ + char relpersist; + CollectedCommand *cmd; + char *json_string; + + if (!CALLED_AS_EVENT_TRIGGER(fcinfo)) + elog(ERROR, "not fired by event trigger manager"); + + cmd = currentEventTriggerState->currentCommand; + Assert(cmd); + + relpersist = get_rel_persistence(cmd->d.simple.address.objectId); + + /* + * Do not generate wal log for commands whose target table is a temporary + * table. + * + * We will generate wal logs for unlogged tables so that unlogged tables + * can also be created and altered on the subscriber side. This makes it + * possible to directly replay the SET LOGGED command and the incoming + * rewrite message without creating a new table. + */ + if (relpersist == RELPERSISTENCE_TEMP) + return PointerGetDatum(NULL); + + /* Deparse the DDL command and WAL log it to allow decoding of the same. */ + json_string = deparse_utility_command(cmd, false); + + if (json_string != NULL) + LogLogicalDDLMessage("deparse", cmd->d.simple.address.objectId, DCT_SimpleCmd, + json_string, strlen(json_string) + 1); + + return PointerGetDatum(NULL); +} diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c index bd0c2fd6d8..23a8895ac7 100644 --- a/src/backend/replication/logical/logical.c +++ b/src/backend/replication/logical/logical.c @@ -95,6 +95,11 @@ static void stream_change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn static void stream_message_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, XLogRecPtr message_lsn, bool transactional, const char *prefix, Size message_size, const char *message); +static void stream_ddl_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, + XLogRecPtr message_lsn, + const char *prefix, + Oid relid, DeparsedCommandType cmdtype, + Size message_size, const char *message); static void stream_truncate_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, int nrelations, Relation relations[], ReorderBufferChange *change); @@ -240,6 +245,7 @@ StartupDecodingContext(List *output_plugin_options, (ctx->callbacks.stream_commit_cb != NULL) || (ctx->callbacks.stream_change_cb != NULL) || (ctx->callbacks.stream_message_cb != NULL) || + (ctx->callbacks.stream_ddl_cb != NULL) || (ctx->callbacks.stream_truncate_cb != NULL); /* @@ -257,6 +263,7 @@ StartupDecodingContext(List *output_plugin_options, ctx->reorder->stream_commit = stream_commit_cb_wrapper; ctx->reorder->stream_change = stream_change_cb_wrapper; ctx->reorder->stream_message = stream_message_cb_wrapper; + ctx->reorder->stream_ddl = stream_ddl_cb_wrapper; ctx->reorder->stream_truncate = stream_truncate_cb_wrapper; @@ -1579,6 +1586,48 @@ stream_message_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, error_context_stack = errcallback.previous; } +static void +stream_ddl_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, + XLogRecPtr message_lsn, + const char *prefix, Oid relid, DeparsedCommandType cmdtype, + Size message_size, + const char *message) +{ + LogicalDecodingContext *ctx = cache->private_data; + LogicalErrorCallbackState state; + ErrorContextCallback errcallback; + + Assert(!ctx->fast_forward); + + /* We're only supposed to call this when streaming is supported. */ + Assert(ctx->streaming); + + /* this callback is optional */ + if (ctx->callbacks.stream_ddl_cb == NULL) + return; + + /* Push callback + info on the error context stack */ + state.ctx = ctx; + state.callback_name = "stream_ddl"; + state.report_location = message_lsn; + errcallback.callback = output_plugin_error_callback; + errcallback.arg = (void *) &state; + errcallback.previous = error_context_stack; + error_context_stack = &errcallback; + + /* set output state */ + ctx->accept_writes = true; + ctx->write_xid = txn != NULL ? txn->xid : InvalidTransactionId; + ctx->write_location = message_lsn; + + /* do the actual work: call callback */ + ctx->callbacks.stream_ddl_cb(ctx, txn, message_lsn, prefix, relid, + cmdtype, message_size, message); + + /* Pop the error context stack */ + error_context_stack = errcallback.previous; +} + static void stream_truncate_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, int nrelations, Relation relations[], diff --git a/src/backend/replication/logical/meson.build b/src/backend/replication/logical/meson.build index 07756e90ae..dab9f3afeb 100644 --- a/src/backend/replication/logical/meson.build +++ b/src/backend/replication/logical/meson.build @@ -2,6 +2,7 @@ backend_sources += files( 'ddlmessage.c', + 'ddltrigger.c', 'decode.c', 'launcher.c', 'logical.c', diff --git a/src/backend/replication/logical/proto.c b/src/backend/replication/logical/proto.c index f5f2bc24d8..38175dc030 100644 --- a/src/backend/replication/logical/proto.c +++ b/src/backend/replication/logical/proto.c @@ -663,6 +663,47 @@ logicalrep_write_message(StringInfo out, TransactionId xid, XLogRecPtr lsn, pq_sendbytes(out, message, sz); } +/* + * Read DDL MESSAGE from stream + */ +char * +logicalrep_read_ddl(StringInfo in, XLogRecPtr *lsn, + const char **prefix, + Size *sz) +{ + uint8 flags; + char *msg; + + flags = pq_getmsgint(in, 1); + if (flags != 0) + elog(ERROR, "unrecognized flags %u in ddl message", flags); + + *lsn = pq_getmsgint64(in); + *prefix = pq_getmsgstring(in); + *sz = pq_getmsgint(in, 4); + msg = (char *) pq_getmsgbytes(in, *sz); + + return msg; +} + +/* + * Write DDL MESSAGE to stream + */ +void +logicalrep_write_ddl(StringInfo out, XLogRecPtr lsn, + const char *prefix, Size sz, const char *message) +{ + uint8 flags = 0; + + pq_sendbyte(out, LOGICAL_REP_MSG_DDL); + + pq_sendint8(out, flags); + pq_sendint64(out, lsn); + pq_sendstring(out, prefix); + pq_sendint32(out, sz); + pq_sendbytes(out, message, sz); +} + /* * Write relation description to the output stream. */ @@ -1219,6 +1260,8 @@ logicalrep_message_type(LogicalRepMsgType action) return "TYPE"; case LOGICAL_REP_MSG_MESSAGE: return "MESSAGE"; + case LOGICAL_REP_MSG_DDL: + return "DDL"; case LOGICAL_REP_MSG_BEGIN_PREPARE: return "BEGIN PREPARE"; case LOGICAL_REP_MSG_PREPARE: diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 3222027be1..bb2feeb13c 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -2048,12 +2048,20 @@ static inline void ReorderBufferApplyDDLMessage(ReorderBuffer *rb, ReorderBufferTXN *txn, ReorderBufferChange *change, bool streaming) { - rb->ddl(rb, txn, change->lsn, - change->data.ddl.prefix, - change->data.ddl.relid, - change->data.ddl.cmdtype, - change->data.ddl.message_size, - change->data.ddl.message); + if (streaming) + rb->stream_ddl(rb, txn, change->lsn, + change->data.ddl.prefix, + change->data.ddl.relid, + change->data.ddl.cmdtype, + change->data.ddl.message_size, + change->data.ddl.message); + else + rb->ddl(rb, txn, change->lsn, + change->data.ddl.prefix, + change->data.ddl.relid, + change->data.ddl.cmdtype, + change->data.ddl.message_size, + change->data.ddl.message); } /* diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index 96772e4d73..ca9258113a 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -156,6 +156,7 @@ #include "miscadmin.h" #include "nodes/makefuncs.h" #include "optimizer/optimizer.h" +#include "parser/analyze.h" #include "parser/parse_relation.h" #include "pgstat.h" #include "postmaster/bgworker.h" @@ -180,7 +181,10 @@ #include "storage/lmgr.h" #include "storage/proc.h" #include "storage/procarray.h" +#include "tcop/ddl_deparse.h" +#include "tcop/pquery.h" #include "tcop/tcopprot.h" +#include "tcop/utility.h" #include "utils/acl.h" #include "utils/builtins.h" #include "utils/catcache.h" @@ -2507,6 +2511,238 @@ apply_handle_truncate(StringInfo s) end_replication_step(); } +/* + * Special handling for CREATE TABLE AS and SELECT INTO + * to not populate data from the source table on the subscriber. + * Allow the data to be replicated through INSERTs on the publisher. + */ +static void +preprocess_create_table(RawStmt *command) +{ + CommandTag commandTag; + + commandTag = CreateCommandTag((Node *) command); + + switch (commandTag) + { + case CMDTAG_CREATE_TABLE_AS: + case CMDTAG_SELECT_INTO: + { + CreateTableAsStmt *castmt = (CreateTableAsStmt *) command->stmt; + + if (castmt->objtype == OBJECT_TABLE) + { + /* + * Force skipping data population to avoid data + * inconsistency. Data should be replicated from the + * publisher instead. + */ + castmt->into->skipData = true; + } + } + break; + case CMDTAG_SELECT: + { + SelectStmt *sstmt = (SelectStmt *) command->stmt; + + if (sstmt->intoClause != NULL) + { + /* + * Force skipping data population to avoid data + * inconsistency. Data should be replicated from the + * publisher instead. + */ + sstmt->intoClause->skipData = true; + } + } + break; + default: + break; + } +} + +/* + * Handle CREATE TABLE command + * + * Call AddSubscriptionRelState for CREATE LABEL command to set the relstate to + * SUBREL_STATE_READY so DML changes on this new table can be replicated without + * having to manually run "alter subscription ... refresh publication" + */ +static void +handle_create_table(RawStmt *command) +{ + CommandTag commandTag; + RangeVar *rv = NULL; + Oid relid; + Oid relnamespace = InvalidOid; + CreateStmt *cstmt; + char *schemaname = NULL; + char *relname = NULL; + + commandTag = CreateCommandTag((Node *) command); + cstmt = (CreateStmt *) command->stmt; + rv = cstmt->relation; + + if (commandTag == CMDTAG_CREATE_TABLE) + { + cstmt = (CreateStmt *) command->stmt; + rv = cstmt->relation; + } + else + { + return; + } + + if (!rv) + return; + + schemaname = rv->schemaname; + relname = rv->relname; + + if (schemaname != NULL) + relnamespace = get_namespace_oid(schemaname, false); + + if (relnamespace != InvalidOid) + relid = get_relname_relid(relname, relnamespace); + else + relid = RelnameGetRelid(relname); + + if (OidIsValid(relid)) + { + AddSubscriptionRelState(MySubscription->oid, relid, + SUBREL_STATE_READY, + InvalidXLogRecPtr); + ereport(DEBUG1, + (errmsg_internal("table \"%s\" added to subscription \"%s\"", + relname, MySubscription->name))); + } +} + +/* + * Handle DDL replication messages. + */ +static void +apply_handle_ddl(StringInfo s) +{ + XLogRecPtr lsn; + const char *prefix = NULL; + char *message = NULL; + char *ddl_command; + Size sz; + List *parsetree_list; + ListCell *parsetree_item; + DestReceiver *receiver; + MemoryContext oldcontext; + const char *save_debug_query_string = debug_query_string; + + message = logicalrep_read_ddl(s, &lsn, &prefix, &sz); + + /* Make sure we are in a transaction command */ + begin_replication_step(); + + ddl_command = deparse_ddl_json_to_string(message); + debug_query_string = ddl_command; + + /* DestNone for logical replication */ + receiver = CreateDestReceiver(DestNone); + parsetree_list = pg_parse_query(ddl_command); + + foreach(parsetree_item, parsetree_list) + { + List *plantree_list; + List *querytree_list; + RawStmt *command = (RawStmt *) lfirst(parsetree_item); + CommandTag commandTag; + MemoryContext per_parsetree_context = NULL; + Portal portal; + bool snapshot_set = false; + + commandTag = CreateCommandTag((Node *) command); + + /* If we got a cancel signal in parsing or prior command, quit */ + CHECK_FOR_INTERRUPTS(); + + /* Remove data population from the command */ + preprocess_create_table(command); + + /* + * Set up a snapshot if parse analysis/planning will need one. + */ + if (analyze_requires_snapshot(command)) + { + PushActiveSnapshot(GetTransactionSnapshot()); + snapshot_set = true; + } + + /* + * We do the work for each parsetree in a short-lived context, to + * limit the memory used when there are many commands in the string. + */ + per_parsetree_context = + AllocSetContextCreate(CurrentMemoryContext, + "execute_sql_string per-statement context", + ALLOCSET_DEFAULT_SIZES); + oldcontext = MemoryContextSwitchTo(per_parsetree_context); + + querytree_list = pg_analyze_and_rewrite_fixedparams(command, + ddl_command, + NULL, 0, NULL); + + plantree_list = pg_plan_queries(querytree_list, ddl_command, 0, NULL); + + /* Done with the snapshot used for parsing/planning */ + if (snapshot_set) + PopActiveSnapshot(); + + portal = CreatePortal("logical replication", true, true); + + /* + * We don't have to copy anything into the portal, because everything + * we are passing here is in ApplyMessageContext or the + * per_parsetree_context, and so will outlive the portal anyway. + */ + PortalDefineQuery(portal, + NULL, + ddl_command, + commandTag, + plantree_list, + NULL); + + /* + * Start the portal. No parameters here. + */ + PortalStart(portal, NULL, 0, InvalidSnapshot); + + /* + * Switch back to transaction context for execution. + */ + MemoryContextSwitchTo(oldcontext); + + (void) PortalRun(portal, + FETCH_ALL, + true, + true, + receiver, + receiver, + NULL); + + PortalDrop(portal, false); + + CommandCounterIncrement(); + + /* + * Table created by DDL replication (database level) is automatically + * added to the subscription here. + */ + handle_create_table(command); + + /* Now we may drop the per-parsetree context, if one was created. */ + MemoryContextDelete(per_parsetree_context); + } + + debug_query_string = save_debug_query_string; + end_replication_step(); +} /* * Logical replication protocol message dispatcher. @@ -2572,6 +2808,10 @@ apply_dispatch(StringInfo s) */ break; + case LOGICAL_REP_MSG_DDL: + apply_handle_ddl(s); + break; + case LOGICAL_REP_MSG_STREAM_START: apply_handle_stream_start(s); break; diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index ca46fba3af..bed636fc61 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -53,6 +53,11 @@ static void pgoutput_message(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr message_lsn, bool transactional, const char *prefix, Size sz, const char *message); +static void pgoutput_ddl(LogicalDecodingContext *ctx, + ReorderBufferTXN *txn, XLogRecPtr message_lsn, + const char *prefix, Oid relid, + DeparsedCommandType cmdtype, + Size sz, const char *message); static bool pgoutput_origin_filter(LogicalDecodingContext *ctx, RepOriginId origin_id); static void pgoutput_begin_prepare_txn(LogicalDecodingContext *ctx, @@ -256,6 +261,7 @@ _PG_output_plugin_init(OutputPluginCallbacks *cb) cb->change_cb = pgoutput_change; cb->truncate_cb = pgoutput_truncate; cb->message_cb = pgoutput_message; + cb->ddl_cb = pgoutput_ddl; cb->commit_cb = pgoutput_commit_txn; cb->begin_prepare_cb = pgoutput_begin_prepare_txn; @@ -272,6 +278,7 @@ _PG_output_plugin_init(OutputPluginCallbacks *cb) cb->stream_commit_cb = pgoutput_stream_commit; cb->stream_change_cb = pgoutput_change; cb->stream_message_cb = pgoutput_message; + cb->stream_ddl_cb = pgoutput_ddl; cb->stream_truncate_cb = pgoutput_truncate; /* transaction streaming - two-phase commit */ cb->stream_prepare_cb = pgoutput_stream_prepare_txn; @@ -426,6 +433,7 @@ pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, /* This plugin uses binary protocol. */ opt->output_type = OUTPUT_PLUGIN_BINARY_OUTPUT; + opt->receive_rewrites = true; /* * This is replication start and not slot initialization. @@ -499,6 +507,7 @@ pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, /* Init publication state. */ data->publications = NIL; + data->deleted_relids = NIL; publications_valid = false; CacheRegisterSyscacheCallback(PUBLICATIONOID, publication_invalidation_cb, @@ -1377,9 +1386,22 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, ReorderBufferChangeType action = change->action; TupleTableSlot *old_slot = NULL; TupleTableSlot *new_slot = NULL; + bool table_rewrite = false; update_replication_progress(ctx, false); + /* + * For heap rewrites, we might need to replicate them if the rewritten + * table publishes rewrite ddl message. So get the actual relation here + * and check the pubaction later. + */ + if (relation->rd_rel->relrewrite) + { + table_rewrite = true; + relation = RelationIdGetRelation(relation->rd_rel->relrewrite); + targetrel = relation; + } + if (!is_publishable_relation(relation)) return; @@ -1413,6 +1435,13 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, Assert(false); } + /* + * We don't publish table rewrite change unless we publish the rewrite ddl + * message. + */ + if (table_rewrite && !relentry->pubactions.pubddl) + return; + /* Avoid leaking memory by using and resetting our own context */ old = MemoryContextSwitchTo(data->context); @@ -1442,8 +1471,8 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, } /* Check row filter */ - if (!pgoutput_row_filter(targetrel, NULL, &new_slot, relentry, - &action)) + if (!table_rewrite && + !pgoutput_row_filter(targetrel, NULL, &new_slot, relentry, &action)) break; /* @@ -1463,8 +1492,19 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, maybe_send_schema(ctx, change, relation, relentry); OutputPluginPrepareWrite(ctx, true); - logicalrep_write_insert(ctx->out, xid, targetrel, new_slot, - data->binary, relentry->columns); + + /* + * Convert the rewrite inserts to updates so that the subscriber + * can replay it. This is needed to make sure the data between + * publisher and subscriber is consistent. + */ + if (table_rewrite) + logicalrep_write_update(ctx->out, xid, targetrel, + NULL, new_slot, data->binary, + relentry->columns); + else + logicalrep_write_insert(ctx->out, xid, targetrel, new_slot, + data->binary, relentry->columns); OutputPluginWrite(ctx, true); break; case REORDER_BUFFER_CHANGE_UPDATE: @@ -1596,6 +1636,9 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, ancestor = NULL; } + if (table_rewrite) + RelationClose(relation); + /* Cleanup */ MemoryContextSwitchTo(old); MemoryContextReset(data->context); @@ -1714,6 +1757,138 @@ pgoutput_message(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, OutputPluginWrite(ctx, true); } +/* + * Send the decoded DDL over wire. + */ +static void +pgoutput_ddl(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, + XLogRecPtr message_lsn, + const char *prefix, Oid relid, DeparsedCommandType cmdtype, + Size sz, const char *message) +{ + PGOutputData *data = (PGOutputData *) ctx->output_plugin_private; + PGOutputTxnData *txndata = (PGOutputTxnData *) txn->output_plugin_private; + Relation relation = NULL; + RelationSyncEntry *relentry; + + switch (cmdtype) + { + case DCT_TableDropStart: + + /* + * On DROP start, add the relid to a deleted_relid list if the + * relid is part of a publication that supports ddl publication. + * We need this because on DROP end, the relid will no longer be + * valid. Later on Drop end, verify that the drop is for a relid + * that is on the deleted_rid list, and only then send the ddl + * message. + */ + relation = RelationIdGetRelation(relid); + + Assert(relation); + relentry = get_rel_sync_entry(data, relation); + + if (relentry->pubactions.pubddl) + data->deleted_relids = lappend_oid(data->deleted_relids, relid); + + RelationClose(relation); + return; + + case DCT_TableDropEnd: + if (!list_member_oid(data->deleted_relids, relid)) + return; + else + data->deleted_relids = list_delete_oid(data->deleted_relids, relid); + break; + + case DCT_TableAlter: + + /* + * For table rewrite ddl, we first send the original ddl message + * to subscriber, then convert the upcoming rewrite INSERT to + * UPDATE and send them to subscriber so that the data between + * publisher and subscriber can always be consistent. + * + * We do this way because of two reason: + * + * (1) The data before the rewrite ddl could already be different + * among publisher and subscriber. To make sure the extra data in + * subscriber which doesn't exist in publisher also get rewritten, + * we need to let the subscriber execute the original rewrite ddl + * to rewrite all the data at first. + * + * (2) the data after executing rewrite ddl could be different + * among publisher and subscriber(due to different + * functions/operators used during rewrite), so we need to + * replicate the rewrite UPDATEs to keep the data consistent. + * + * TO IMPROVE: We could improve this by letting the subscriber + * only rewrite the extra data instead of doing fully rewrite and + * use the upcoming rewrite UPDATEs to rewrite the rest data. + * Besides, we may not need to send rewrite changes for all type + * of rewrite ddl, for example, it seems fine to skip sending + * rewrite changes for ALTER TABLE SET LOGGED as the data in the + * table doesn't actually be changed. + */ + relation = RelationIdGetRelation(relid); + Assert(relation); + + relentry = get_rel_sync_entry(data, relation); + + /* + * Skip sending this ddl if we don't publish ddl message or the + * ddl need to be published via its root relation. + */ + if (!relentry->pubactions.pubddl || + relentry->publish_as_relid != relid) + { + RelationClose(relation); + return; + } + + break; + + case DCT_SimpleCmd: + relation = RelationIdGetRelation(relid); + + if (relation == NULL) + break; + + relentry = get_rel_sync_entry(data, relation); + + if (!relentry->pubactions.pubddl) + { + RelationClose(relation); + return; + } + + break; + + case DCT_ObjectDrop: + /* do nothing */ + break; + + default: + elog(ERROR, "unsupported type %d", cmdtype); + break; + } + + /* Send BEGIN if we haven't yet */ + if (txndata && !txndata->sent_begin_txn) + pgoutput_send_begin(ctx, txn); + + OutputPluginPrepareWrite(ctx, true); + logicalrep_write_ddl(ctx->out, + message_lsn, + prefix, + sz, + message); + OutputPluginWrite(ctx, true); + + if (relation) + RelationClose(relation); +} + /* * Return true if the data is associated with an origin and the user has * requested the changes that don't have an origin, false otherwise. @@ -1995,7 +2170,8 @@ get_rel_sync_entry(PGOutputData *data, Relation relation) entry->schema_sent = false; entry->streamed_txns = NIL; entry->pubactions.pubinsert = entry->pubactions.pubupdate = - entry->pubactions.pubdelete = entry->pubactions.pubtruncate = false; + entry->pubactions.pubdelete = entry->pubactions.pubtruncate = + entry->pubactions.pubddl = false; entry->new_slot = NULL; entry->old_slot = NULL; memset(entry->exprstate, 0, sizeof(entry->exprstate)); @@ -2053,6 +2229,7 @@ get_rel_sync_entry(PGOutputData *data, Relation relation) entry->pubactions.pubupdate = false; entry->pubactions.pubdelete = false; entry->pubactions.pubtruncate = false; + entry->pubactions.pubddl = false; /* * Tuple slots cleanups. (Will be rebuilt later if needed). @@ -2166,6 +2343,7 @@ get_rel_sync_entry(PGOutputData *data, Relation relation) entry->pubactions.pubupdate |= pub->pubactions.pubupdate; entry->pubactions.pubdelete |= pub->pubactions.pubdelete; entry->pubactions.pubtruncate |= pub->pubactions.pubtruncate; + entry->pubactions.pubddl |= pub->pubactions.pubddl; /* * We want to publish the changes as the top-most ancestor diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index f1215c8e18..29a31449cf 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -1666,8 +1666,11 @@ ProcessUtilitySlow(ParseState *pstate, break; case T_CreateTableAsStmt: + EventTriggerTableInitWriteStart(parsetree); address = ExecCreateTableAs(pstate, (CreateTableAsStmt *) parsetree, params, queryEnv, qc); + EventTriggerTableInitWriteEnd(address); + commandCollected = true; break; case T_RefreshMatViewStmt: diff --git a/src/backend/utils/cache/evtcache.c b/src/backend/utils/cache/evtcache.c index f7f7165f7f..7fb8cb291d 100644 --- a/src/backend/utils/cache/evtcache.c +++ b/src/backend/utils/cache/evtcache.c @@ -167,6 +167,8 @@ BuildEventTriggerCache(void) event = EVT_SQLDrop; else if (strcmp(evtevent, "table_rewrite") == 0) event = EVT_TableRewrite; + else if (strcmp(evtevent, "table_init_write") == 0) + event = EVT_TableInitWrite; else continue; diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 450e5124a5..2f9dd19c85 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -5690,6 +5690,7 @@ RelationBuildPublicationDesc(Relation relation, PublicationDesc *pubdesc) pubdesc->pubactions.pubupdate |= pubform->pubupdate; pubdesc->pubactions.pubdelete |= pubform->pubdelete; pubdesc->pubactions.pubtruncate |= pubform->pubtruncate; + pubdesc->pubactions.pubddl |= pubform->pubddl; /* * Check if all columns referenced in the filter expression are part diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 44d957c038..edb4a1ba82 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -3949,6 +3949,7 @@ getPublications(Archive *fout, int *numPublications) int i_pubupdate; int i_pubdelete; int i_pubtruncate; + int i_pubddl; int i_pubviaroot; int i, ntups; @@ -3964,23 +3965,23 @@ getPublications(Archive *fout, int *numPublications) resetPQExpBuffer(query); /* Get the publications. */ - if (fout->remoteVersion >= 130000) + if (fout->remoteVersion >= 160000) appendPQExpBufferStr(query, "SELECT p.tableoid, p.oid, p.pubname, " "p.pubowner, " - "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate, p.pubviaroot " + "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate, p.pubddl, p.pubviaroot " "FROM pg_publication p"); else if (fout->remoteVersion >= 110000) appendPQExpBufferStr(query, "SELECT p.tableoid, p.oid, p.pubname, " "p.pubowner, " - "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate, false AS pubviaroot " + "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate, false as p.pubddl, false AS pubviaroot " "FROM pg_publication p"); else appendPQExpBufferStr(query, "SELECT p.tableoid, p.oid, p.pubname, " "p.pubowner, " - "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, false AS pubtruncate, false AS pubviaroot " + "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, false AS pubtruncate, false AS p.pubddl, false AS pubviaroot " "FROM pg_publication p"); res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK); @@ -3996,6 +3997,7 @@ getPublications(Archive *fout, int *numPublications) i_pubupdate = PQfnumber(res, "pubupdate"); i_pubdelete = PQfnumber(res, "pubdelete"); i_pubtruncate = PQfnumber(res, "pubtruncate"); + i_pubddl = PQfnumber(res, "pubddl"); i_pubviaroot = PQfnumber(res, "pubviaroot"); pubinfo = pg_malloc(ntups * sizeof(PublicationInfo)); @@ -4019,6 +4021,8 @@ getPublications(Archive *fout, int *numPublications) (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0); pubinfo[i].pubtruncate = (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0); + pubinfo[i].pubddl = + (strcmp(PQgetvalue(res, i, i_pubddl), "t") == 0); pubinfo[i].pubviaroot = (strcmp(PQgetvalue(res, i, i_pubviaroot), "t") == 0); @@ -4098,6 +4102,15 @@ dumpPublication(Archive *fout, const PublicationInfo *pubinfo) first = false; } + if (pubinfo->pubddl) + { + if (!first) + appendPQExpBufferStr(query, ", "); + + appendPQExpBufferStr(query, "ddl"); + first = false; + } + appendPQExpBufferChar(query, '\''); if (pubinfo->pubviaroot) diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h index 436ac5bb98..cd50ec8cf9 100644 --- a/src/bin/pg_dump/pg_dump.h +++ b/src/bin/pg_dump/pg_dump.h @@ -619,6 +619,7 @@ typedef struct _PublicationInfo bool pubupdate; bool pubdelete; bool pubtruncate; + bool pubddl; bool pubviaroot; } PublicationInfo; diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index df166365e8..0f4b1eb8dc 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -6164,7 +6164,7 @@ listPublications(const char *pattern) PQExpBufferData buf; PGresult *res; printQueryOpt myopt = pset.popt; - static const bool translate_columns[] = {false, false, false, false, false, false, false, false}; + static const bool translate_columns[] = {false, false, false, false, false, false, false, false, false}; if (pset.sversion < 100000) { @@ -6181,13 +6181,18 @@ listPublications(const char *pattern) printfPQExpBuffer(&buf, "SELECT pubname AS \"%s\",\n" " pg_catalog.pg_get_userbyid(pubowner) AS \"%s\",\n" - " puballtables AS \"%s\",\n" - " pubinsert AS \"%s\",\n" - " pubupdate AS \"%s\",\n" - " pubdelete AS \"%s\"", + " puballtables AS \"%s\"", gettext_noop("Name"), gettext_noop("Owner"), - gettext_noop("All tables"), + gettext_noop("All tables")); + if (pset.sversion >= 160000) + appendPQExpBuffer(&buf, + ",\n pubddl AS \"%s\"", + gettext_noop("DDL")); + appendPQExpBuffer(&buf, + ",\n pubinsert AS \"%s\",\n" + " pubupdate AS \"%s\",\n" + " pubdelete AS \"%s\"", gettext_noop("Inserts"), gettext_noop("Updates"), gettext_noop("Deletes")); @@ -6199,7 +6204,6 @@ listPublications(const char *pattern) appendPQExpBuffer(&buf, ",\n pubviaroot AS \"%s\"", gettext_noop("Via root")); - appendPQExpBufferStr(&buf, "\nFROM pg_catalog.pg_publication\n"); @@ -6289,6 +6293,7 @@ describePublications(const char *pattern) PGresult *res; bool has_pubtruncate; bool has_pubviaroot; + bool has_pubddl; PQExpBufferData title; printTableContent cont; @@ -6305,13 +6310,19 @@ describePublications(const char *pattern) has_pubtruncate = (pset.sversion >= 110000); has_pubviaroot = (pset.sversion >= 130000); + has_pubddl = (pset.sversion >= 160000); initPQExpBuffer(&buf); printfPQExpBuffer(&buf, "SELECT oid, pubname,\n" " pg_catalog.pg_get_userbyid(pubowner) AS owner,\n" - " puballtables, pubinsert, pubupdate, pubdelete"); + " puballtables"); + if (has_pubddl) + appendPQExpBufferStr(&buf, + ", pubddl"); + appendPQExpBufferStr(&buf, + ", pubinsert, pubupdate, pubdelete"); if (has_pubtruncate) appendPQExpBufferStr(&buf, ", pubtruncate"); @@ -6369,6 +6380,8 @@ describePublications(const char *pattern) ncols++; if (has_pubviaroot) ncols++; + if (has_pubddl) + ncols++; initPQExpBuffer(&title); printfPQExpBuffer(&title, _("Publication %s"), pubname); @@ -6376,6 +6389,8 @@ describePublications(const char *pattern) printTableAddHeader(&cont, gettext_noop("Owner"), true, align); printTableAddHeader(&cont, gettext_noop("All tables"), true, align); + if (has_pubddl) + printTableAddHeader(&cont, gettext_noop("DDL"), true, align); printTableAddHeader(&cont, gettext_noop("Inserts"), true, align); printTableAddHeader(&cont, gettext_noop("Updates"), true, align); printTableAddHeader(&cont, gettext_noop("Deletes"), true, align); @@ -6393,6 +6408,8 @@ describePublications(const char *pattern) printTableAddCell(&cont, PQgetvalue(res, i, 7), false, false); if (has_pubviaroot) printTableAddCell(&cont, PQgetvalue(res, i, 8), false, false); + if (has_pubddl) + printTableAddCell(&cont, PQgetvalue(res, i, 9), false, false); if (!puballtables) { diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index 05e0498c5e..adbd63f8a0 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -11901,4 +11901,16 @@ { oid => '4643', descr => 'expand JSON format DDL to a plain DDL command', proname => 'ddl_deparse_expand_command', prorettype => 'text', proargtypes => 'text', prosrc => 'ddl_deparse_expand_command' }, +{ oid => '4644', descr => 'trigger for ddl command deparse end', + proname => 'publication_deparse_ddl_command_end', prorettype => 'event_trigger', + proargtypes => '', prosrc => 'publication_deparse_ddl_command_end' }, +{ oid => '4645', descr => 'trigger for ddl command deparse start', + proname => 'publication_deparse_ddl_command_start', prorettype => 'event_trigger', + proargtypes => '', prosrc => 'publication_deparse_ddl_command_start' }, +{ oid => '4646', descr => 'trigger for ddl command deparse table rewrite', + proname => 'publication_deparse_table_rewrite', prorettype => 'event_trigger', + proargtypes => '', prosrc => 'publication_deparse_table_rewrite' }, +{ oid => '4647', descr => 'trigger for ddl command deparse table init', + proname => 'publication_deparse_table_init_write', prorettype => 'event_trigger', + proargtypes => '', prosrc => 'publication_deparse_table_init_write' }, ] diff --git a/src/include/catalog/pg_publication.h b/src/include/catalog/pg_publication.h index ecf5a28e00..cc950436a3 100644 --- a/src/include/catalog/pg_publication.h +++ b/src/include/catalog/pg_publication.h @@ -54,6 +54,9 @@ CATALOG(pg_publication,6104,PublicationRelationId) /* true if partition changes are published using root schema */ bool pubviaroot; + + /* true if ddls are published */ + bool pubddl; } FormData_pg_publication; /* ---------------- @@ -72,6 +75,7 @@ typedef struct PublicationActions bool pubupdate; bool pubdelete; bool pubtruncate; + bool pubddl; } PublicationActions; typedef struct PublicationDesc diff --git a/src/include/commands/event_trigger.h b/src/include/commands/event_trigger.h index 47094752e9..942816f6d6 100644 --- a/src/include/commands/event_trigger.h +++ b/src/include/commands/event_trigger.h @@ -94,6 +94,10 @@ extern void EventTriggerDDLCommandEnd(Node *parsetree); extern void EventTriggerSQLDrop(Node *parsetree); extern void EventTriggerTableRewrite(Node *parsetree, Oid tableOid, int reason); +extern void EventTriggerTableInitWriteStart(Node *parsetree); +extern void EventTriggerTableInitWrite(Node *parsetree, ObjectAddress address); +extern void EventTriggerTableInitWriteEnd(ObjectAddress address); + extern bool EventTriggerBeginCompleteQuery(void); extern void EventTriggerEndCompleteQuery(void); extern bool trackDroppedObjectsNeeded(void); @@ -110,7 +114,12 @@ extern void EventTriggerCollectSimpleCommand(ObjectAddress address, extern void EventTriggerAlterTableStart(Node *parsetree); extern void EventTriggerAlterTableRelid(Oid objectId); extern void EventTriggerCollectAlterTableSubcmd(Node *subcmd, - ObjectAddress address); + ObjectAddress address, + bool rewrite); + +extern void EventTriggerAlterTypeStart(AlterTableCmd *subcmd, Relation rel); +extern void EventTriggerAlterTypeEnd(Node *subcmd, ObjectAddress address, + bool rewrite); extern void EventTriggerAlterTableEnd(void); extern void EventTriggerCollectGrant(InternalGrant *istmt); diff --git a/src/include/replication/logicalproto.h b/src/include/replication/logicalproto.h index 1eb7dda529..8572ca6c1e 100644 --- a/src/include/replication/logicalproto.h +++ b/src/include/replication/logicalproto.h @@ -61,6 +61,7 @@ typedef enum LogicalRepMsgType LOGICAL_REP_MSG_RELATION = 'R', LOGICAL_REP_MSG_TYPE = 'Y', LOGICAL_REP_MSG_MESSAGE = 'M', + LOGICAL_REP_MSG_DDL = 'L', LOGICAL_REP_MSG_BEGIN_PREPARE = 'b', LOGICAL_REP_MSG_PREPARE = 'P', LOGICAL_REP_MSG_COMMIT_PREPARED = 'K', @@ -230,6 +231,9 @@ extern List *logicalrep_read_truncate(StringInfo in, bool *cascade, bool *restart_seqs); extern void logicalrep_write_message(StringInfo out, TransactionId xid, XLogRecPtr lsn, bool transactional, const char *prefix, Size sz, const char *message); +extern void logicalrep_write_ddl(StringInfo out, XLogRecPtr lsn, + const char *prefix, Size sz, const char *message); +extern char *logicalrep_read_ddl(StringInfo in, XLogRecPtr *lsn, const char **prefix, Size *sz); extern void logicalrep_write_rel(StringInfo out, TransactionId xid, Relation rel, Bitmapset *columns); extern LogicalRepRelation *logicalrep_read_rel(StringInfo in); diff --git a/src/include/replication/output_plugin.h b/src/include/replication/output_plugin.h index 0c4250096c..ae1addce7a 100644 --- a/src/include/replication/output_plugin.h +++ b/src/include/replication/output_plugin.h @@ -9,7 +9,6 @@ #ifndef OUTPUT_PLUGIN_H #define OUTPUT_PLUGIN_H -#include "replication/ddlmessage.h" #include "replication/reorderbuffer.h" struct LogicalDecodingContext; @@ -214,6 +213,19 @@ typedef void (*LogicalDecodeStreamMessageCB) (struct LogicalDecodingContext *ctx Size message_size, const char *message); +/* + * Callback for streaming logical decoding DDL messages from in-progress + * transactions. + */ +typedef void (*LogicalDecodeStreamDDLMessageCB) (struct LogicalDecodingContext *ctx, + ReorderBufferTXN *txn, + XLogRecPtr message_lsn, + const char *prefix, + Oid relid, + DeparsedCommandType cmdtype, + Size message_size, + const char *message); + /* * Callback for streaming truncates from in-progress transactions. */ @@ -253,6 +265,7 @@ typedef struct OutputPluginCallbacks LogicalDecodeStreamCommitCB stream_commit_cb; LogicalDecodeStreamChangeCB stream_change_cb; LogicalDecodeStreamMessageCB stream_message_cb; + LogicalDecodeStreamDDLMessageCB stream_ddl_cb; LogicalDecodeStreamTruncateCB stream_truncate_cb; } OutputPluginCallbacks; diff --git a/src/include/replication/pgoutput.h b/src/include/replication/pgoutput.h index 02027550e2..83e0b1e2e5 100644 --- a/src/include/replication/pgoutput.h +++ b/src/include/replication/pgoutput.h @@ -25,6 +25,7 @@ typedef struct PGOutputData uint32 protocol_version; List *publication_names; List *publications; + List *deleted_relids; bool binary; bool streaming; bool messages; diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h index 3d2939de29..41a1911c81 100644 --- a/src/include/replication/reorderbuffer.h +++ b/src/include/replication/reorderbuffer.h @@ -539,6 +539,17 @@ typedef void (*ReorderBufferStreamMessageCB) ( const char *prefix, Size sz, const char *message); +/* stream DDL message callback signature */ +typedef void (*ReorderBufferStreamDDLMessageCB) ( + ReorderBuffer *rb, + ReorderBufferTXN *txn, + XLogRecPtr message_lsn, + const char *prefix, + Oid relid, + DeparsedCommandType cmdtype, + Size sz, + const char *message); + /* stream truncate callback signature */ typedef void (*ReorderBufferStreamTruncateCB) ( ReorderBuffer *rb, @@ -609,6 +620,7 @@ struct ReorderBuffer ReorderBufferStreamCommitCB stream_commit; ReorderBufferStreamChangeCB stream_change; ReorderBufferStreamMessageCB stream_message; + ReorderBufferStreamDDLMessageCB stream_ddl; ReorderBufferStreamTruncateCB stream_truncate; /* diff --git a/src/include/utils/evtcache.h b/src/include/utils/evtcache.h index ddb67a68fa..1e648317ae 100644 --- a/src/include/utils/evtcache.h +++ b/src/include/utils/evtcache.h @@ -22,7 +22,8 @@ typedef enum EVT_DDLCommandStart, EVT_DDLCommandEnd, EVT_SQLDrop, - EVT_TableRewrite + EVT_TableRewrite, + EVT_TableInitWrite } EventTriggerEvent; typedef struct diff --git a/src/test/modules/test_ddl_deparse_regress/expected/create_table.out b/src/test/modules/test_ddl_deparse_regress/expected/create_table.out index 9eeaa1ccc1..8e6ffa9893 100644 --- a/src/test/modules/test_ddl_deparse_regress/expected/create_table.out +++ b/src/test/modules/test_ddl_deparse_regress/expected/create_table.out @@ -20,6 +20,13 @@ NOTICE: re-formed command: ALTER TABLE public.ctlt1 ALTER COLUMN a SET STORAGE ALTER TABLE ctlt1 ALTER COLUMN b SET STORAGE EXTERNAL; NOTICE: deparsed json: {"fmt": "ALTER %{objtype}s %{identity}D %{subcmds:, }s", "objtype": "TABLE", "subcmds": [{"fmt": "ALTER COLUMN %{column}I SET STORAGE %{storage}s", "type": "set storage", "column": "b", "storage": "external"}], "identity": {"objname": "ctlt1", "schemaname": "public"}} NOTICE: re-formed command: ALTER TABLE public.ctlt1 ALTER COLUMN b SET STORAGE external +CREATE TABLE ctlt1_like (LIKE ctlt1 INCLUDING ALL); +NOTICE: deparsed json: {"fmt": "CREATE %{persistence}s TABLE %{if_not_exists}s %{identity}D (%{table_elements:, }s) %{inherits}s %{tablespace}s %{on_commit}s %{partition_by}s %{access_method}s %{with_clause}s", "identity": {"objname": "ctlt1_like", "schemaname": "public"}, "inherits": {"fmt": "INHERITS (%{parents:, }D)", "parents": null, "present": false}, "on_commit": {"fmt": "ON COMMIT %{on_commit_value}s", "present": false, "on_commit_value": null}, "tablespace": {"fmt": "TABLESPACE %{tablespace}I", "present": false, "tablespace": null}, "persistence": "", "with_clause": {"fmt": "WITH", "present": false}, "partition_by": {"fmt": "PARTITION BY %{definition}s", "present": false, "definition": null}, "access_method": {"fmt": "USING %{access_method}I", "present": false, "access_method": null}, "if_not_exists": "", "table_elements": [{"fmt": "%{name}I %{coltype}T STORAGE %{colstorage}s %{compression}s %{collation}s %{not_null}s %{default}s %{generated_column}s", "name": "a", "type": "column", "coltype": {"typmod": "", "typarray": false, "typename": "text", "schemaname": "pg_catalog"}, "default": {"fmt": "DEFAULT", "present": false}, "not_null": "", "collation": {"fmt": "COLLATE %{name}D", "name": {"objname": "default", "schemaname": "pg_catalog"}}, "colstorage": "main", "compression": {"fmt": "COMPRESSION %{compression_method}I", "present": false, "compression_method": null}, "generated_column": {"fmt": "GENERATED ALWAYS AS", "present": false}}, {"fmt": "%{name}I %{coltype}T STORAGE %{colstorage}s %{compression}s %{collation}s %{not_null}s %{default}s %{generated_column}s", "name": "b", "type": "column", "coltype": {"typmod": "", "typarray": false, "typename": "text", "schemaname": "pg_catalog"}, "default": {"fmt": "DEFAULT", "present": false}, "not_null": "", "collation": {"fmt": "COLLATE %{name}D", "name": {"objname": "default", "schemaname": "pg_catalog"}}, "colstorage": "external", "compression": {"fmt": "COMPRESSION %{compression_method}I", "present": false, "compression_method": null}, "generated_column": {"fmt": "GENERATED ALWAYS AS", "present": false}}, {"fmt": "CONSTRAINT %{name}I %{definition}s", "name": "ctlt1_a_check", "type": "constraint", "contype": "check", "definition": "CHECK ((pg_catalog.length(a) OPERATOR(pg_catalog.>) 2))"}, {"fmt": "CONSTRAINT %{name}I %{definition}s", "name": "ctlt1_like_pkey", "type": "constraint", "contype": "primary key", "definition": "PRIMARY KEY (a)"}]} +NOTICE: re-formed command: CREATE TABLE public.ctlt1_like (a pg_catalog.text STORAGE main COLLATE pg_catalog."default" , b pg_catalog.text STORAGE external COLLATE pg_catalog."default" , CONSTRAINT ctlt1_a_check CHECK ((pg_catalog.length(a) OPERATOR(pg_catalog.>) 2)), CONSTRAINT ctlt1_like_pkey PRIMARY KEY (a)) +NOTICE: deparsed json: +NOTICE: re-formed command: +NOTICE: deparsed json: +NOTICE: re-formed command: -- Test foreign key constraint is handled in a following ALTER TABLE ADD CONSTRAINT FOREIGN KEY REFERENCES subcommand CREATE TABLE product (id int PRIMARY KEY, name text); NOTICE: deparsed json: {"fmt": "CREATE %{persistence}s TABLE %{if_not_exists}s %{identity}D (%{table_elements:, }s) %{inherits}s %{tablespace}s %{on_commit}s %{partition_by}s %{access_method}s %{with_clause}s", "identity": {"objname": "product", "schemaname": "public"}, "inherits": {"fmt": "INHERITS (%{parents:, }D)", "parents": null, "present": false}, "on_commit": {"fmt": "ON COMMIT %{on_commit_value}s", "present": false, "on_commit_value": null}, "tablespace": {"fmt": "TABLESPACE %{tablespace}I", "present": false, "tablespace": null}, "persistence": "", "with_clause": {"fmt": "WITH", "present": false}, "partition_by": {"fmt": "PARTITION BY %{definition}s", "present": false, "definition": null}, "access_method": {"fmt": "USING %{access_method}I", "present": false, "access_method": null}, "if_not_exists": "", "table_elements": [{"fmt": "%{name}I %{coltype}T STORAGE %{colstorage}s %{compression}s %{collation}s %{not_null}s %{default}s %{generated_column}s", "name": "id", "type": "column", "coltype": {"typmod": "", "typarray": false, "typename": "int4", "schemaname": "pg_catalog"}, "default": {"fmt": "DEFAULT", "present": false}, "not_null": "", "collation": {"fmt": "COLLATE", "present": false}, "colstorage": "plain", "compression": {"fmt": "COMPRESSION %{compression_method}I", "present": false, "compression_method": null}, "generated_column": {"fmt": "GENERATED ALWAYS AS", "present": false}}, {"fmt": "%{name}I %{coltype}T STORAGE %{colstorage}s %{compression}s %{collation}s %{not_null}s %{default}s %{generated_column}s", "name": "name", "type": "column", "coltype": {"typmod": "", "typarray": false, "typename": "text", "schemaname": "pg_catalog"}, "default": {"fmt": "DEFAULT", "present": false}, "not_null": "", "collation": {"fmt": "COLLATE %{name}D", "name": {"objname": "default", "schemaname": "pg_catalog"}}, "colstorage": "extended", "compression": {"fmt": "COMPRESSION %{compression_method}I", "present": false, "compression_method": null}, "generated_column": {"fmt": "GENERATED ALWAYS AS", "present": false}}, {"fmt": "CONSTRAINT %{name}I %{definition}s", "name": "product_pkey", "type": "constraint", "contype": "primary key", "definition": "PRIMARY KEY (id)"}]} diff --git a/src/test/modules/test_ddl_deparse_regress/sql/create_table.sql b/src/test/modules/test_ddl_deparse_regress/sql/create_table.sql index 997d2f54fb..1c5f0c206a 100644 --- a/src/test/modules/test_ddl_deparse_regress/sql/create_table.sql +++ b/src/test/modules/test_ddl_deparse_regress/sql/create_table.sql @@ -9,6 +9,7 @@ CREATE UNIQUE INDEX covering_index on simple_table (id) INCLUDE (name); CREATE TABLE ctlt1 (a text CHECK (length(a) > 2) PRIMARY KEY, b text); ALTER TABLE ctlt1 ALTER COLUMN a SET STORAGE MAIN; ALTER TABLE ctlt1 ALTER COLUMN b SET STORAGE EXTERNAL; +CREATE TABLE ctlt1_like (LIKE ctlt1 INCLUDING ALL); -- Test foreign key constraint is handled in a following ALTER TABLE ADD CONSTRAINT FOREIGN KEY REFERENCES subcommand CREATE TABLE product (id int PRIMARY KEY, name text); diff --git a/src/test/regress/expected/psql.out b/src/test/regress/expected/psql.out index 8fc62cebd2..ebbdfa9020 100644 --- a/src/test/regress/expected/psql.out +++ b/src/test/regress/expected/psql.out @@ -6194,9 +6194,9 @@ List of schemas (0 rows) \dRp "no.such.publication" - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root -------+-------+------------+---------+---------+---------+-----------+---------- + List of publications + Name | Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +------+-------+------------+-----+---------+---------+---------+-----------+---------- (0 rows) \dRs "no.such.subscription" diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out index 427f87ea07..e064e936e7 100644 --- a/src/test/regress/expected/publication.out +++ b/src/test/regress/expected/publication.out @@ -30,20 +30,20 @@ ERROR: conflicting or redundant options LINE 1: ...ub_xxx WITH (publish_via_partition_root = 'true', publish_vi... ^ \dRp - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------+--------------------------+------------+---------+---------+---------+-----------+---------- - testpib_ins_trunct | regress_publication_user | f | t | f | f | f | f - testpub_default | regress_publication_user | f | f | t | f | f | f + List of publications + Name | Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------+--------------------------+------------+-----+---------+---------+---------+-----------+---------- + testpib_ins_trunct | regress_publication_user | f | f | t | f | f | f | f + testpub_default | regress_publication_user | f | f | f | t | f | f | f (2 rows) ALTER PUBLICATION testpub_default SET (publish = 'insert, update, delete'); \dRp - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------+--------------------------+------------+---------+---------+---------+-----------+---------- - testpib_ins_trunct | regress_publication_user | f | t | f | f | f | f - testpub_default | regress_publication_user | f | t | t | t | f | f + List of publications + Name | Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------+--------------------------+------------+-----+---------+---------+---------+-----------+---------- + testpib_ins_trunct | regress_publication_user | f | f | t | f | f | f | f + testpub_default | regress_publication_user | f | f | t | t | t | f | f (2 rows) --- adding tables @@ -87,10 +87,10 @@ RESET client_min_messages; -- should be able to add schema to 'FOR TABLE' publication ALTER PUBLICATION testpub_fortable ADD TABLES IN SCHEMA pub_test; \dRp+ testpub_fortable - Publication testpub_fortable - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub_fortable + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables: "public.testpub_tbl1" Tables from schemas: @@ -99,20 +99,20 @@ Tables from schemas: -- should be able to drop schema from 'FOR TABLE' publication ALTER PUBLICATION testpub_fortable DROP TABLES IN SCHEMA pub_test; \dRp+ testpub_fortable - Publication testpub_fortable - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub_fortable + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables: "public.testpub_tbl1" -- should be able to set schema to 'FOR TABLE' publication ALTER PUBLICATION testpub_fortable SET TABLES IN SCHEMA pub_test; \dRp+ testpub_fortable - Publication testpub_fortable - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub_fortable + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test" @@ -123,10 +123,10 @@ CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA pub_test; CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA pub_test, TABLE pub_test.testpub_nopk; RESET client_min_messages; \dRp+ testpub_for_tbl_schema - Publication testpub_for_tbl_schema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub_for_tbl_schema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables: "pub_test.testpub_nopk" Tables from schemas: @@ -135,10 +135,10 @@ Tables from schemas: -- should be able to add a table of the same schema to the schema publication ALTER PUBLICATION testpub_forschema ADD TABLE pub_test.testpub_nopk; \dRp+ testpub_forschema - Publication testpub_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables: "pub_test.testpub_nopk" Tables from schemas: @@ -147,10 +147,10 @@ Tables from schemas: -- should be able to drop the table ALTER PUBLICATION testpub_forschema DROP TABLE pub_test.testpub_nopk; \dRp+ testpub_forschema - Publication testpub_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test" @@ -161,10 +161,10 @@ ERROR: relation "testpub_nopk" is not part of the publication -- should be able to set table to schema publication ALTER PUBLICATION testpub_forschema SET TABLE pub_test.testpub_nopk; \dRp+ testpub_forschema - Publication testpub_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables: "pub_test.testpub_nopk" @@ -186,10 +186,10 @@ Publications: "testpub_foralltables" \dRp+ testpub_foralltables - Publication testpub_foralltables - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | t | t | t | f | f | f + Publication testpub_foralltables + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | t | f | t | t | f | f | f (1 row) DROP TABLE testpub_tbl2; @@ -201,19 +201,19 @@ CREATE PUBLICATION testpub3 FOR TABLE testpub_tbl3; CREATE PUBLICATION testpub4 FOR TABLE ONLY testpub_tbl3; RESET client_min_messages; \dRp+ testpub3 - Publication testpub3 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub3 + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables: "public.testpub_tbl3" "public.testpub_tbl3a" \dRp+ testpub4 - Publication testpub4 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub4 + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables: "public.testpub_tbl3" @@ -234,10 +234,10 @@ UPDATE testpub_parted1 SET a = 1; -- only parent is listed as being in publication, not the partition ALTER PUBLICATION testpub_forparted ADD TABLE testpub_parted; \dRp+ testpub_forparted - Publication testpub_forparted - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub_forparted + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables: "public.testpub_parted" @@ -252,10 +252,10 @@ ALTER TABLE testpub_parted DETACH PARTITION testpub_parted1; UPDATE testpub_parted1 SET a = 1; ALTER PUBLICATION testpub_forparted SET (publish_via_partition_root = true); \dRp+ testpub_forparted - Publication testpub_forparted - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | t + Publication testpub_forparted + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | t Tables: "public.testpub_parted" @@ -284,10 +284,10 @@ SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub5 FOR TABLE testpub_rf_tbl1, testpub_rf_tbl2 WHERE (c <> 'test' AND d < 5) WITH (publish = 'insert'); RESET client_min_messages; \dRp+ testpub5 - Publication testpub5 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | f | f | f | f + Publication testpub5 + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | f | f | f | f Tables: "public.testpub_rf_tbl1" "public.testpub_rf_tbl2" WHERE ((c <> 'test'::text) AND (d < 5)) @@ -300,10 +300,10 @@ Tables: ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl3 WHERE (e > 1000 AND e < 2000); \dRp+ testpub5 - Publication testpub5 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | f | f | f | f + Publication testpub5 + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | f | f | f | f Tables: "public.testpub_rf_tbl1" "public.testpub_rf_tbl2" WHERE ((c <> 'test'::text) AND (d < 5)) @@ -319,10 +319,10 @@ Publications: ALTER PUBLICATION testpub5 DROP TABLE testpub_rf_tbl2; \dRp+ testpub5 - Publication testpub5 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | f | f | f | f + Publication testpub5 + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | f | f | f | f Tables: "public.testpub_rf_tbl1" "public.testpub_rf_tbl3" WHERE ((e > 1000) AND (e < 2000)) @@ -330,10 +330,10 @@ Tables: -- remove testpub_rf_tbl1 and add testpub_rf_tbl3 again (another WHERE expression) ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e > 300 AND e < 500); \dRp+ testpub5 - Publication testpub5 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | f | f | f | f + Publication testpub5 + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | f | f | f | f Tables: "public.testpub_rf_tbl3" WHERE ((e > 300) AND (e < 500)) @@ -366,10 +366,10 @@ SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub_syntax1 FOR TABLE testpub_rf_tbl1, ONLY testpub_rf_tbl3 WHERE (e < 999) WITH (publish = 'insert'); RESET client_min_messages; \dRp+ testpub_syntax1 - Publication testpub_syntax1 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | f | f | f | f + Publication testpub_syntax1 + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | f | f | f | f Tables: "public.testpub_rf_tbl1" "public.testpub_rf_tbl3" WHERE (e < 999) @@ -379,10 +379,10 @@ SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub_syntax2 FOR TABLE testpub_rf_tbl1, testpub_rf_schema1.testpub_rf_tbl5 WHERE (h < 999) WITH (publish = 'insert'); RESET client_min_messages; \dRp+ testpub_syntax2 - Publication testpub_syntax2 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | f | f | f | f + Publication testpub_syntax2 + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | f | f | f | f Tables: "public.testpub_rf_tbl1" "testpub_rf_schema1.testpub_rf_tbl5" WHERE (h < 999) @@ -497,10 +497,10 @@ CREATE PUBLICATION testpub6 FOR TABLES IN SCHEMA testpub_rf_schema2; ALTER PUBLICATION testpub6 SET TABLES IN SCHEMA testpub_rf_schema2, TABLE testpub_rf_schema2.testpub_rf_tbl6 WHERE (i < 99); RESET client_min_messages; \dRp+ testpub6 - Publication testpub6 - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub6 + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables: "testpub_rf_schema2.testpub_rf_tbl6" WHERE (i < 99) Tables from schemas: @@ -714,10 +714,10 @@ CREATE PUBLICATION testpub_table_ins WITH (publish = 'insert, truncate'); RESET client_min_messages; ALTER PUBLICATION testpub_table_ins ADD TABLE testpub_tbl5 (a); -- ok \dRp+ testpub_table_ins - Publication testpub_table_ins - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | f | f | t | f + Publication testpub_table_ins + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | f | f | t | f Tables: "public.testpub_tbl5" (a) @@ -891,10 +891,10 @@ CREATE TABLE testpub_tbl_both_filters (a int, b int, c int, PRIMARY KEY (a,c)); ALTER TABLE testpub_tbl_both_filters REPLICA IDENTITY USING INDEX testpub_tbl_both_filters_pkey; ALTER PUBLICATION testpub_both_filters ADD TABLE testpub_tbl_both_filters (a,c) WHERE (c != 1); \dRp+ testpub_both_filters - Publication testpub_both_filters - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub_both_filters + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables: "public.testpub_tbl_both_filters" (a, c) WHERE (c <> 1) @@ -1099,10 +1099,10 @@ ERROR: relation "testpub_tbl1" is already member of publication "testpub_fortbl CREATE PUBLICATION testpub_fortbl FOR TABLE testpub_tbl1; ERROR: publication "testpub_fortbl" already exists \dRp+ testpub_fortbl - Publication testpub_fortbl - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub_fortbl + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables: "pub_test.testpub_nopk" "public.testpub_tbl1" @@ -1140,10 +1140,10 @@ Publications: "testpub_fortbl" \dRp+ testpub_default - Publication testpub_default - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | f | f + Publication testpub_default + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | f | f Tables: "pub_test.testpub_nopk" "public.testpub_tbl1" @@ -1221,10 +1221,10 @@ REVOKE CREATE ON DATABASE regression FROM regress_publication_user2; DROP TABLE testpub_parted; DROP TABLE testpub_tbl1; \dRp+ testpub_default - Publication testpub_default - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | f | f + Publication testpub_default + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | f | f (1 row) -- fail - must be owner of publication @@ -1234,20 +1234,20 @@ ERROR: must be owner of publication testpub_default RESET ROLE; ALTER PUBLICATION testpub_default RENAME TO testpub_foo; \dRp testpub_foo - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root --------------+--------------------------+------------+---------+---------+---------+-----------+---------- - testpub_foo | regress_publication_user | f | t | t | t | f | f + List of publications + Name | Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +-------------+--------------------------+------------+-----+---------+---------+---------+-----------+---------- + testpub_foo | regress_publication_user | f | f | t | t | t | f | f (1 row) -- rename back to keep the rest simple ALTER PUBLICATION testpub_foo RENAME TO testpub_default; ALTER PUBLICATION testpub_default OWNER TO regress_publication_user2; \dRp testpub_default - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ------------------+---------------------------+------------+---------+---------+---------+-----------+---------- - testpub_default | regress_publication_user2 | f | t | t | t | f | f + List of publications + Name | Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +-----------------+---------------------------+------------+-----+---------+---------+---------+-----------+---------- + testpub_default | regress_publication_user2 | f | f | t | t | t | f | f (1 row) -- adding schemas and tables @@ -1263,19 +1263,19 @@ CREATE TABLE "CURRENT_SCHEMA"."CURRENT_SCHEMA"(id int); SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub1_forschema FOR TABLES IN SCHEMA pub_test1; \dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub1_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test1" CREATE PUBLICATION testpub2_forschema FOR TABLES IN SCHEMA pub_test1, pub_test2, pub_test3; \dRp+ testpub2_forschema - Publication testpub2_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub2_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test1" "pub_test2" @@ -1289,44 +1289,44 @@ CREATE PUBLICATION testpub6_forschema FOR TABLES IN SCHEMA "CURRENT_SCHEMA", CUR CREATE PUBLICATION testpub_fortable FOR TABLE "CURRENT_SCHEMA"."CURRENT_SCHEMA"; RESET client_min_messages; \dRp+ testpub3_forschema - Publication testpub3_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub3_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "public" \dRp+ testpub4_forschema - Publication testpub4_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub4_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "CURRENT_SCHEMA" \dRp+ testpub5_forschema - Publication testpub5_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub5_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "CURRENT_SCHEMA" "public" \dRp+ testpub6_forschema - Publication testpub6_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub6_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "CURRENT_SCHEMA" "public" \dRp+ testpub_fortable - Publication testpub_fortable - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub_fortable + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables: "CURRENT_SCHEMA.CURRENT_SCHEMA" @@ -1360,10 +1360,10 @@ ERROR: schema "testpub_view" does not exist -- dropping the schema should reflect the change in publication DROP SCHEMA pub_test3; \dRp+ testpub2_forschema - Publication testpub2_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub2_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test1" "pub_test2" @@ -1371,20 +1371,20 @@ Tables from schemas: -- renaming the schema should reflect the change in publication ALTER SCHEMA pub_test1 RENAME to pub_test1_renamed; \dRp+ testpub2_forschema - Publication testpub2_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub2_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test1_renamed" "pub_test2" ALTER SCHEMA pub_test1_renamed RENAME to pub_test1; \dRp+ testpub2_forschema - Publication testpub2_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub2_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test1" "pub_test2" @@ -1392,10 +1392,10 @@ Tables from schemas: -- alter publication add schema ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA pub_test2; \dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub1_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test1" "pub_test2" @@ -1404,10 +1404,10 @@ Tables from schemas: ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA non_existent_schema; ERROR: schema "non_existent_schema" does not exist \dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub1_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test1" "pub_test2" @@ -1416,10 +1416,10 @@ Tables from schemas: ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA pub_test1; ERROR: schema "pub_test1" is already member of publication "testpub1_forschema" \dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub1_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test1" "pub_test2" @@ -1427,10 +1427,10 @@ Tables from schemas: -- alter publication drop schema ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test2; \dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub1_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test1" @@ -1438,10 +1438,10 @@ Tables from schemas: ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test2; ERROR: tables from schema "pub_test2" are not part of the publication \dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub1_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test1" @@ -1449,29 +1449,29 @@ Tables from schemas: ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA non_existent_schema; ERROR: schema "non_existent_schema" does not exist \dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub1_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test1" -- drop all schemas ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test1; \dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub1_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f (1 row) -- alter publication set multiple schema ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA pub_test1, pub_test2; \dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub1_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test1" "pub_test2" @@ -1480,10 +1480,10 @@ Tables from schemas: ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA non_existent_schema; ERROR: schema "non_existent_schema" does not exist \dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub1_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test1" "pub_test2" @@ -1492,10 +1492,10 @@ Tables from schemas: -- removing the duplicate schemas ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA pub_test1, pub_test1; \dRp+ testpub1_forschema - Publication testpub1_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub1_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test1" @@ -1574,18 +1574,18 @@ SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub3_forschema; RESET client_min_messages; \dRp+ testpub3_forschema - Publication testpub3_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub3_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f (1 row) ALTER PUBLICATION testpub3_forschema SET TABLES IN SCHEMA pub_test1; \dRp+ testpub3_forschema - Publication testpub3_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub3_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables from schemas: "pub_test1" @@ -1595,20 +1595,20 @@ CREATE PUBLICATION testpub_forschema_fortable FOR TABLES IN SCHEMA pub_test1, TA CREATE PUBLICATION testpub_fortable_forschema FOR TABLE pub_test2.tbl1, TABLES IN SCHEMA pub_test1; RESET client_min_messages; \dRp+ testpub_forschema_fortable - Publication testpub_forschema_fortable - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub_forschema_fortable + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables: "pub_test2.tbl1" Tables from schemas: "pub_test1" \dRp+ testpub_fortable_forschema - Publication testpub_fortable_forschema - Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root ---------------------------+------------+---------+---------+---------+-----------+---------- - regress_publication_user | f | t | t | t | t | f + Publication testpub_fortable_forschema + Owner | All tables | DDL | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+-----+---------+---------+---------+-----------+---------- + regress_publication_user | f | f | t | t | t | t | f Tables: "pub_test2.tbl1" Tables from schemas: diff --git a/src/test/subscription/t/032_ddl_replication.pl b/src/test/subscription/t/032_ddl_replication.pl new file mode 100644 index 0000000000..66dcb012a6 --- /dev/null +++ b/src/test/subscription/t/032_ddl_replication.pl @@ -0,0 +1,465 @@ +# Copyright (c) 2022, PostgreSQL Global Development Group +# Regression tests for logical replication of DDLs +# +use strict; +use warnings; +use PostgreSQL::Test::Cluster; +use PostgreSQL::Test::Utils; +use Test::More; + +my $node_publisher = PostgreSQL::Test::Cluster->new('publisher'); +$node_publisher->init(allows_streaming => 'logical'); +$node_publisher->start; + +my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber'); +$node_subscriber->init(allows_streaming => 'logical'); +$node_subscriber->start; + +my $ddl = "CREATE TABLE test_rep(id int primary key, name varchar, value integer);"; +$node_publisher->safe_psql('postgres', $ddl); +$node_publisher->safe_psql('postgres', "INSERT INTO test_rep VALUES (1, 'data1', 1);"); +$node_subscriber->safe_psql('postgres', $ddl); + +my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION mypub FOR ALL TABLES with (publish = 'insert, update, delete, ddl');"); +$node_subscriber->safe_psql('postgres', + "CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr' PUBLICATION mypub;"); +$node_publisher->wait_for_catchup('mysub'); + +# Make sure we have fully synchronized the table. +# This prevents ALTER TABLE command below from being executed during table synchronization. +$node_subscriber->poll_query_until('postgres', + "SELECT COUNT(1) = 0 FROM pg_subscription_rel sr WHERE sr.srsubstate NOT IN ('s', 'r') AND sr.srrelid = 'test_rep'::regclass" +); + +# Test ALTER TABLE ADD +$node_publisher->safe_psql('postgres', "ALTER TABLE test_rep ADD c4 int;"); +$node_publisher->safe_psql('postgres', "INSERT INTO test_rep VALUES (2, 'data2', 2, 2);"); +$node_publisher->wait_for_catchup('mysub'); +my $result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM test_rep WHERE c4 = 2;"); +is($result, qq(1), 'ALTER test_rep ADD replicated'); + +# Test ALTER TABLE DROP +$node_publisher->safe_psql('postgres', "ALTER TABLE test_rep DROP c4;"); +$node_publisher->safe_psql('postgres', "DELETE FROM test_rep where id = 2;"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from test_rep;"); +is($result, qq(1), 'ALTER test_rep DROP replicated'); + +# Test ALTER TABLE ALTER TYPE +$node_publisher->safe_psql('postgres', "ALTER TABLE test_rep ALTER value TYPE varchar"); +$node_publisher->safe_psql('postgres', "INSERT INTO test_rep VALUES (3, 'data3', '3');"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM test_rep WHERE value = '3';"); +is($result, qq(1), 'ALTER test_rep ALTER COLUMN TYPE replicated'); + +# Test ALTER TABLE ALTER SET DEFAULT +# Check if we have the default value after the direct insert to subscriber node. +$node_publisher->safe_psql('postgres', "ALTER TABLE test_rep ALTER COLUMN value SET DEFAULT 'foo'"); +$node_publisher->wait_for_catchup('mysub'); +$node_subscriber->safe_psql('postgres', "INSERT INTO test_rep VALUES (4, 'data4');"); +$result = $node_subscriber->safe_psql('postgres', "SELECT value FROM test_rep WHERE id = 4;"); +is($result, 'foo', 'ALTER test_rep ALTER SET DEFAULT replicated'); + +# Test ALTER TABLE ALTER DROP DEFAULT +# Check if we don't have the default value previously defined. +$node_publisher->safe_psql('postgres', "ALTER TABLE test_rep ALTER COLUMN value DROP DEFAULT;"); +$node_publisher->wait_for_catchup('mysub'); +$node_subscriber->safe_psql('postgres', "INSERT INTO test_rep VALUES (5, 'data5');"); +$result = $node_subscriber->safe_psql('postgres', "SELECT value IS NULL FROM test_rep WHERE id = 5;"); +is($result, q(t), 'ALTER test_rep ALTER DROP DEFAULT replicated'); + +# Test ALTER TABLE ALTER SET NOT NULL +# Remove the existing record that contains null value first. +my ($stdout, $stderr); +$node_subscriber->safe_psql('postgres', "DELETE FROM test_rep WHERE id = 5;"); +$node_publisher->safe_psql('postgres', "ALTER TABLE test_rep ALTER value SET NOT NULL;"); +$node_publisher->wait_for_catchup('mysub'); +$node_subscriber->psql('postgres', "INSERT INTO test_rep VALUES (6, 'data6');", + on_error_stop => 0, + stderr => \$stderr, + stdout => \$stdout); +$stderr =~ /ERROR: null value in column \"value\" of relation \"test_rep\" violates not-null constraint/ + or die "failed to replicate ALTER TABLE ALTER SET NOT NULL"; + +# Test ALTER TABLE ALTER DROP NOT NULL +$node_publisher->safe_psql('postgres', "ALTER TABLE test_rep ALTER value DROP NOT NULL;"); +$node_publisher->wait_for_catchup('mysub'); +# Insert same data that has NULL value. This failed before but now should succeed. +$node_subscriber->safe_psql('postgres', "INSERT INTO test_rep VALUES (6, 'data6');"); +$result = $node_subscriber->safe_psql('postgres', "SELECT value IS NULL FROM test_rep WHERE id = 6;"); +is($result, q(t), "ALTER test_rep ALTER DROP NOT NULL replicated"); + +# Test ALTER TABLE SET UNLOGGED +$node_publisher->safe_psql('postgres', 'ALTER TABLE test_rep SET UNLOGGED;'); +$node_publisher->wait_for_catchup('mysub'); +$node_publisher->safe_psql('postgres', "INSERT INTO test_rep VALUES (7, 'data7', '7');"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM test_rep WHERE id = 7;"); +is($result, qq(0), 'ALTER test_rep SET UNLOGGED replicated'); + +# Test ALTER TABLE SET LOGGED +$node_publisher->safe_psql('postgres', 'ALTER TABLE test_rep SET LOGGED;'); +$node_publisher->wait_for_catchup('mysub'); +$node_publisher->safe_psql('postgres', "INSERT INTO test_rep VALUES (8, 'data8', '8');"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM test_rep WHERE id = 8;"); +is($result, qq(1), 'ALTER test_rep SET LOGGED replicated'); + +# Test CREATE TABLE and DML changes +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (a int, b varchar);"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from pg_tables where tablename = 'tmp';"); +is($result, qq(1), 'CREATE tmp is replicated'); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp values (1, 'a')"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp values (2, 'b')"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from tmp;"); +is($result, qq(2), 'DML Changes to tmp are replicated'); + +# Test CREATE TABLE INHERITS +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp2 (c3 int) INHERITS (tmp);"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp2 VALUES (1, 'a', 1);"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from pg_tables where tablename = 'tmp2';"); +is($result, qq(1), 'CREATE TABLE INHERITS is replicated'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from tmp2;"); +is($result, qq(1), 'inserting some data to inherited table replicated'); +$node_publisher->safe_psql('postgres', "DROP TABLE tmp2"); + +# Test CREATE TABLE LIKE +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp3 (c3 int, LIKE tmp);"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp3 VALUES (1, 1, 'a');"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from pg_tables where tablename = 'tmp3';"); +is($result, qq(1), 'CREATE TABLE LIKE replicated'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from tmp3;"); +is($result, qq(1), 'insert some data to a table defined with LIKE replicated'); +$node_publisher->safe_psql('postgres', "DROP TABLE tmp3"); + +# Test DROP TABLE +$node_publisher->safe_psql('postgres', "DROP TABLE tmp;"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from pg_tables where tablename = 'tmp';"); +is($result, qq(0), 'TABLE tmp is dropped'); + +# Test CREATE UNLOGGED TABLE +$node_publisher->safe_psql('postgres', "CREATE UNLOGGED TABLE tmp (id int);"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES (1);"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from pg_tables where tablename = 'tmp';"); +is($result, qq(1), 'CREATE UNLOGGED TABLE is replicated correctly'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from tmp;"); +is($result, qq(0), 'inserting data to unlogged table is not replicated correctly'); +$node_publisher->safe_psql('postgres', "DROP TABLE tmp"); + +# Test CREATE TABLE IF NOT EXISTS +$node_publisher->safe_psql('postgres', "CREATE TABLE IF NOT EXISTS tmp (id int);"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES (1);"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from pg_tables where tablename = 'tmp';"); +is($result, qq(1), 'CREATE TABLE IF NOT EXISTS replicated'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from tmp;"); +is($result, qq(1), 'inserting data to a table replicated'); +$node_publisher->safe_psql('postgres', "DROP TABLE tmp"); + +# Test CREATE TABLE IF NOT EXISTS (check if we skip to create a table +# when we have the table on the subscriber in advance, and if we succeed +# in replicating changes.) +$node_subscriber->safe_psql('postgres', "CREATE TABLE tmp (id int);"); +$node_publisher->safe_psql('postgres', "CREATE TABLE IF NOT EXISTS tmp (id int);"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES (1);"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from tmp;"); +is($result, qq(1), 'CREATE TABLE IF NOT EXISTS replicated'); +$node_publisher->safe_psql('postgres', "DROP TABLE tmp"); + +# Test CREATE TABLE IF NOT EXISTS (check if we skip to create a table +# when we have the table on the publisher, but not on the subscriber.) +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (id int);"); +$node_publisher->safe_psql('postgres', "CREATE TABLE IF NOT EXISTS tmp (id int);"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES (1);"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from tmp;"); +is($result, qq(1), 'CREATE TABLE IF NOT EXISTS replicated'); +$node_publisher->safe_psql('postgres', "DROP TABLE tmp"); + +# Test CREATE TABLE with collate +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (name text COLLATE \"C\");"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES ('foo');"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT collation_name FROM information_schema.columns WHERE table_name = 'tmp';"); +is($result, qq(C), 'CREATE TABLE with collate replicated'); +$node_publisher->safe_psql('postgres', "DROP TABLE tmp"); + +# Test CREATE TABLE with named constraint +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (id int CONSTRAINT \"must be bigger than 10\" CHECK (id > 10));"); +$node_publisher->wait_for_catchup('mysub'); +$node_subscriber->psql('postgres', "INSERT INTO tmp VALUES (1);", + on_error_stop => 0, + stderr => \$stderr, + stdout => \$stdout); +$stderr =~ /ERROR: new row for relation "tmp" violates check constraint "must be bigger than 10"/ + or die "failed to replicate named constraint at creating table"; +$node_publisher->safe_psql('postgres', "DROP TABLE tmp;"); + +# Test CREATE TABLE with various types of constraints. +# NOT NULL constraint +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (id int, name text NOT NULL);"); +$node_publisher->wait_for_catchup('mysub'); +$node_subscriber->psql('postgres', "INSERT INTO tmp VALUES (1);", + on_error_stop => 0, + stderr => \$stderr, + stdout => \$stdout); +$stderr =~ /ERROR: null value in column "name" of relation "tmp" violates not-null constraint/ + or die "failed to replicate non null constraint at creating table"; +$node_publisher->safe_psql('postgres', "DROP TABLE tmp;"); + +# NULL constraint +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (id int, name text NULL);"); +$node_publisher->wait_for_catchup('mysub'); +$node_subscriber->safe_psql('postgres', "INSERT INTO tmp VALUES (1);"); +$result = $node_subscriber->safe_psql('postgres', "SELECT name IS NULL FROM tmp;"); +is($result, qq(t), "CREATE TABLE with NULL constraint replicated"); +$node_publisher->safe_psql('postgres', "DROP TABLE tmp"); + +# CHECK constraint +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (id int, product_ame text, price int CHECK (price > 0));"); +$node_publisher->wait_for_catchup('mysub'); +$node_subscriber->psql('postgres', "INSERT INTO tmp VALUES (1, 'foo', -100);", + on_error_stop => 0, + stderr => \$stderr, + stdout => \$stdout); +$stderr =~ /ERROR: new row for relation "tmp" violates check constraint "tmp_price_check"/ + or die "failed to replicate CHECK constraint"; +$node_publisher->safe_psql('postgres', "DROP TABLE tmp;"); + +# DEFAULT +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (id int, name text DEFAULT 'foo');"); +$node_publisher->wait_for_catchup('mysub'); +$node_subscriber->safe_psql('postgres', "INSERT INTO tmp VALUES (1);"); +$result = $node_subscriber->safe_psql('postgres', "SELECT name from tmp;"); +is($result, qq(foo), "CREATE TABLE with default value replicated"); +$node_publisher->safe_psql('postgres', "DROP TABLE tmp"); + +# UNIQUE constraint +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (id int UNIQUE);"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES (1);"); +$node_publisher->wait_for_catchup('mysub'); +$node_subscriber->psql('postgres', "INSERT INTO tmp VALUES (1);", + on_error_stop => 0, + stderr => \$stderr, + stdout => \$stdout); +$stderr =~ /ERROR: duplicate key value violates unique constraint "tmp_id_key"/ + or die "failed to replicate constraint at creating table"; +$node_publisher->safe_psql('postgres', "DROP TABLE tmp;"); + +# PRIMARY KEY +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (id int PRIMARY KEY, name text);"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES (1, 'foo');"); +$node_publisher->wait_for_catchup('mysub'); +$node_subscriber->psql('postgres', "INSERT INTO tmp VALUES (1, 'bar');", + on_error_stop => 0, + stderr => \$stderr, + stdout => \$stdout); +$stderr =~ /ERROR: duplicate key value violates unique constraint "tmp_pkey"/ + or die "failed to replicate primary key at creating table"; +$node_publisher->safe_psql('postgres', "DROP TABLE tmp;"); + +# EXCLUDE +$node_publisher->safe_psql('postgres', "CREATE TABLE circles (c circle, EXCLUDE USING gist (c WITH &&));"); +$node_publisher->safe_psql('postgres', "INSERT INTO circles VALUES ('<(1, 1), 1>'::circle);"); +$node_publisher->wait_for_catchup('mysub'); +$node_subscriber->psql('postgres', "INSERT INTO circles VALUES ('<(1, 1), 1>'::circle);", + on_error_stop => 0, + stderr => \$stderr, + stdout => \$stdout); +$stderr =~ /ERROR: conflicting key value violates exclusion constraint "circles_c_excl"/ + or die "failed to replicate EXCLUDE at creating table"; +$node_publisher->safe_psql('postgres', "DROP TABLE circles"); + +# REFERENCES +$node_publisher->safe_psql('postgres', "CREATE TABLE product (id int PRIMARY KEY, name text);"); +$node_publisher->safe_psql('postgres', "INSERT INTO product VALUES (1, 'foo');"); +$node_publisher->safe_psql('postgres', "INSERT INTO product VALUES (2, 'bar');"); +$node_publisher->safe_psql('postgres', "CREATE TABLE orders (order_id int PRIMARY KEY, product_id int REFERENCES product (id))"); +$node_publisher->wait_for_catchup('mysub'); +$node_subscriber->psql('postgres', "INSERT INTO orders VALUES (1, 10)", + on_error_stop => 0, + stderr => \$stderr, + stdout => \$stdout); +$stderr =~ /ERROR: insert or update on table "orders" violates foreign key constraint "orders_product_id_fkey"/ + or die "failed to replicate REFERENCES at creating table"; +$node_publisher->safe_psql('postgres', "DROP TABLE orders"); +$node_publisher->safe_psql('postgres', "DROP TABLE product"); + +# DEFERRABLE +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (id int PRIMARY KEY DEFERRABLE, name text);"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES (1, 'foo');"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES (2, 'bar');"); +$node_publisher->wait_for_catchup('mysub'); +# Quick check of deferrable clause +$node_subscriber->safe_psql('postgres', "UPDATE tmp SET id = id + 1;"); +# Also, execute a test that should fail for INITIALLY IMMEDIATE(the default) +$node_subscriber->psql('postgres', qq( +BEGIN; +UPDATE tmp SET id = id + 1; +INSERT INTO tmp VALUES (3, 'foobar'); +DELETE FROM tmp WHERE id = 3; +COMMIT; +), on_error_stop => 0, stderr => \$stderr, stdout => \$stdout); +$stderr =~ /ERROR: duplicate key value violates unique constraint "tmp_pkey"/ + or die "failed to replicate DEFERRABLE at creating table"; +$node_publisher->safe_psql('postgres', "DROP TABLE tmp;"); + +# NOT DEFERRABLE +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (id int PRIMARY KEY NOT DEFERRABLE, name text);"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES (1, 'foo');"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES (2, 'bar');"); +$node_publisher->wait_for_catchup('mysub'); +$node_subscriber->psql('postgres', "UPDATE tmp SET id = id + 1;", + on_error_stop => 0, + stderr => \$stderr, + stdout => \$stdout); +$stderr =~ /ERROR: duplicate key value violates unique constraint "tmp_pkey"/ + or die "failed to replicate NOT DEFERRABLE at creating table"; +$node_publisher->safe_psql('postgres', "DROP TABLE tmp;"); + +# DEFERRABLE and INITIALLY DEFERRED +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (id int PRIMARY KEY DEFERRABLE INITIALLY DEFERRED, name text);"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES (1, 'foo');"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES (2, 'bar');"); +$node_publisher->wait_for_catchup('mysub'); +# Quick check of deferrable clause +$node_subscriber->safe_psql('postgres', "UPDATE tmp SET id = id + 1;"); +# Also, execute a test that should succeed for INITIALLY DEFERRED +$node_subscriber->safe_psql('postgres', qq( +BEGIN; +UPDATE tmp SET id = id + 1; +INSERT INTO tmp VALUES (3, 'foobar'); +DELETE FROM tmp WHERE id = 3; +COMMIT; +)); +$node_publisher->safe_psql('postgres', "DROP TABLE tmp;"); + +# Test CREATE TABLE with table constraint +# We will set two checks and conduct two inserts that should fail respectively. +$node_publisher->safe_psql('postgres', + "CREATE TABLE tmp (price int, discounted_price int, CHECK (discounted_price > 0 AND price > discounted_price));"); +$node_publisher->wait_for_catchup('mysub'); +$node_subscriber->psql('postgres', "INSERT INTO tmp VALUES (100, 0);", + on_error_stop => 0, + stderr => \$stderr, + stdout => \$stdout); +$stderr =~ /ERROR: new row for relation "tmp" violates check constraint "tmp_check"/ + or die "failed to replicate table constraint (first condition) at creating table"; +$node_subscriber->psql('postgres', "INSERT INTO tmp VALUES (50, 100);", + on_error_stop => 0, + stderr => \$stderr, + stdout => \$stdout); +$stderr =~ /ERROR: new row for relation "tmp" violates check constraint "tmp_check"/ + or die "failed to replicate table constraint (second condition) at creating table"; +$node_publisher->safe_psql('postgres', "DROP TABLE tmp;"); + +# Test CREATE TABLE WITH strorage_parameter +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (id int) WITH (fillfactor = 80);"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES (1);"); +$node_publisher->wait_for_catchup('mysub'); +$node_subscriber->psql('postgres', "SELECT reloptions FROM pg_class WHERE relname = 'tmp';", + on_error_stop => 0, + stderr => \$stderr, + stdout => \$stdout); +$stdout =~ /{fillfactor=80}/ + or die "failed to replicate storage option"; +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from tmp;"); +is($result, qq(1), 'CREATE TABLE with storage_parameter replicated'); +$node_publisher->safe_psql('postgres', "DROP TABLE tmp;"); + +# Test CREATE TABLE TABLESPACE (creating a tablespace is not replicated) +# Prepare the directories for the publisher and subscriber first. +my ($basedir, $tablespace_dir); + +$basedir = $node_publisher->basedir; +$tablespace_dir = "$basedir/tblspc_pub"; +mkdir($tablespace_dir); +$node_publisher->safe_psql('postgres', "CREATE TABLESPACE mytblspc LOCATION '$tablespace_dir';"); +$basedir = $node_subscriber->basedir; +$tablespace_dir = "$basedir/tblspc_sub"; +mkdir ($tablespace_dir); +$node_subscriber->safe_psql('postgres', "CREATE TABLESPACE mytblspc LOCATION '$tablespace_dir';"); + +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (id int) TABLESPACE mytblspc;"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES (1);"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from tmp;"); +is($result, qq(1), 'CREATE TABLE TABLESPACE replicated'); +$node_publisher->safe_psql('postgres', "DROP TABLE tmp;"); + +# Test CREATE TYPE +$node_publisher->safe_psql('postgres', "CREATE TYPE mytype AS (id int, name text, age int);"); +$node_publisher->wait_for_catchup('mysub'); +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp OF mytype;"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES (1, 'bar');"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) from tmp;"); +is($result, qq(1), 'CREATE TABLE OF replicated'); +$node_publisher->safe_psql('postgres', "DROP TABLE tmp"); +$node_publisher->safe_psql('postgres', "DROP TYPE mytype"); + +# Test CREATE ENUM TYPE +$node_publisher->safe_psql('postgres', "CREATE TYPE myenumtype AS ENUM ('new', 'open', 'closed');"); +$node_publisher->wait_for_catchup('mysub'); +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (status myenumtype);"); +$node_publisher->safe_psql('postgres', "INSERT INTO tmp VALUES ('new');"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT * from tmp;"); +is($result, qq(new), 'CREATE TABLE OF replicated'); +$node_publisher->safe_psql('postgres', "DROP TABLE tmp"); +$node_publisher->safe_psql('postgres', "DROP TYPE myenumtype"); + +# Test CREATE RANGE TYPE +$node_publisher->safe_psql('postgres', "CREATE TYPE float8_range AS RANGE (subtype = float8, subtype_diff = float8mi);"); +$node_publisher->wait_for_catchup('mysub'); +$node_publisher->safe_psql('postgres', "CREATE TABLE tmp (val float8_range);"); +$node_publisher->safe_psql('postgres', "insert into tmp values(float8_range(-12.34, '1.111113e3'));"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT * from tmp;"); +is($result, qq([-12.34,1111.113\)), 'CREATE TABLE OF replicated'); +$node_publisher->safe_psql('postgres', "DROP TABLE tmp"); +$node_publisher->safe_psql('postgres', "DROP TYPE float8_range"); + +# Test CREATE BASE DATA TYPE and use it in table definition +$node_publisher->safe_psql('postgres', " +CREATE TYPE int42; +CREATE FUNCTION int42_in(cstring) + RETURNS int42 + AS 'int4in' + LANGUAGE internal STRICT IMMUTABLE; +CREATE FUNCTION int42_out(int42) + RETURNS cstring + AS 'int4out' + LANGUAGE internal STRICT IMMUTABLE; +CREATE TYPE int42 ( + internallength = 4, + input = int42_in, + output = int42_out, + alignment = int4, + default = 42, + passedbyvalue +); +CREATE TABLE tmp (f1 int42); +INSERT INTO tmp DEFAULT VALUES; +"); +$node_publisher->wait_for_catchup('mysub'); +$result = $node_subscriber->safe_psql('postgres', "SELECT * from tmp;"); +is($result, qq(42), 'CREATE TABLE OF replicated'); +$node_publisher->safe_psql('postgres', "DROP TABLE tmp"); +$node_publisher->safe_psql('postgres', "DROP TYPE int42 cascade"); + +pass "DDL replication tests passed:"; + +$node_subscriber->stop; +$node_publisher->stop; + +done_testing(); diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index 5cd8af9e66..1df66653f7 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -559,6 +559,7 @@ DefElemAction DefaultACLInfo DefineStmt DeleteStmt +DeparsedCommandType DependencyGenerator DependencyGeneratorData DependencyType @@ -1439,6 +1440,8 @@ LogicalDecodeBeginPrepareCB LogicalDecodeChangeCB LogicalDecodeCommitCB LogicalDecodeCommitPreparedCB +LogicalDecodeDDLMessageCB +LogicalDecodeStreamDDLMessageCB LogicalDecodeFilterByOriginCB LogicalDecodeFilterPrepareCB LogicalDecodeMessageCB @@ -2287,6 +2290,8 @@ ReorderBufferChange ReorderBufferChangeType ReorderBufferCommitCB ReorderBufferCommitPreparedCB +ReorderBufferDDLMessageCB +ReorderBufferStreamDDLMessageCB ReorderBufferDiskChange ReorderBufferIterTXNEntry ReorderBufferIterTXNState -- 2.38.1