From a9e257693790a8b3823b598edd071de7478b6bd0 Mon Sep 17 00:00:00 2001 From: Amit Langote Date: Wed, 6 Sep 2023 17:54:15 +0900 Subject: [PATCH v47 8/9] Delay locking of child tables in cached plans until ExecutorStart() Currently, GetCachedPlan() takes a lock on all relations contained in a cached plan before returning it as a valid plan to its callers for execution. One disadvantage is that if the plan contains partitions that are prunable with conditions involving EXTERN parameters and other stable expressions (known as "initial pruning"), many of them would be locked unnecessarily, because only those that survive initial pruning need to have been locked. Locking all partitions this way causes significant delay when there are many partitions. Note that initial pruning occurs during executor's initialization of the plan, that is, ExecInitNode(). Previous commits have made all the necessary adjustment to make the executor lock child tables, to detect invalidation of the CachedPlan resulting from that, and to retry the execution with a new CachePlan. So, this commit simply removes the code in plancache.c that does the "for execution" locking, aka AcquireExecutorLocks(). Discussion: https://postgr.es/m/CA+HiwqFGkMSge6TgC9KQzde0ohpAycLQuV7ooitEEpbKB0O_mg@mail.gmail.com --- src/backend/executor/spi.c | 2 +- src/backend/tcop/pquery.c | 6 +- src/backend/utils/cache/plancache.c | 154 +++++++---------- src/test/modules/delay_execution/Makefile | 3 +- .../modules/delay_execution/delay_execution.c | 67 +++++++- .../expected/cached-plan-replan.out | 158 ++++++++++++++++++ .../specs/cached-plan-replan.spec | 61 +++++++ 7 files changed, 343 insertions(+), 108 deletions(-) create mode 100644 src/test/modules/delay_execution/expected/cached-plan-replan.out create mode 100644 src/test/modules/delay_execution/specs/cached-plan-replan.spec diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 814ff1390f..9c4ed74240 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -2680,7 +2680,7 @@ replan: snap = InvalidSnapshot; qdesc = CreateQueryDesc(stmt, - NULL, + cplan, plansource->query_string, snap, crosscheck_snapshot, dest, diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c index fcf9925ed4..8d0772ae29 100644 --- a/src/backend/tcop/pquery.c +++ b/src/backend/tcop/pquery.c @@ -412,7 +412,7 @@ PortalStart(Portal portal, ParamListInfo params, * set the destination to DestNone. */ queryDesc = CreateQueryDesc(linitial_node(PlannedStmt, portal->stmts), - NULL, + portal->cplan, portal->sourceText, GetActiveSnapshot(), InvalidSnapshot, @@ -443,6 +443,7 @@ PortalStart(Portal portal, ParamListInfo params, */ if (!ExecutorStart(queryDesc, myeflags)) { + Assert(queryDesc->cplan); ExecutorEnd(queryDesc); FreeQueryDesc(queryDesc); PopActiveSnapshot(); @@ -542,7 +543,7 @@ PortalStart(Portal portal, ParamListInfo params, * PortalRunMulti() before calling ExecutorRun(). */ queryDesc = CreateQueryDesc(plan, - NULL, + portal->cplan, portal->sourceText, !is_utility ? GetActiveSnapshot() : @@ -566,6 +567,7 @@ PortalStart(Portal portal, ParamListInfo params, if (!ExecutorStart(queryDesc, myeflags)) { PopActiveSnapshot(); + Assert(queryDesc->cplan); ExecutorEnd(queryDesc); FreeQueryDesc(queryDesc); plan_valid = false; diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index 7d4168f82f..35d903cb98 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -104,13 +104,13 @@ static void ReleaseGenericPlan(CachedPlanSource *plansource); static List *RevalidateCachedQuery(CachedPlanSource *plansource, QueryEnvironment *queryEnv); static bool CheckCachedPlan(CachedPlanSource *plansource); +static bool GenericPlanIsValid(CachedPlan *cplan); static CachedPlan *BuildCachedPlan(CachedPlanSource *plansource, List *qlist, ParamListInfo boundParams, QueryEnvironment *queryEnv); static bool choose_custom_plan(CachedPlanSource *plansource, ParamListInfo boundParams); static double cached_plan_cost(CachedPlan *plan, bool include_planner); static Query *QueryListGetPrimaryStmt(List *stmts); -static void AcquireExecutorLocks(List *stmt_list, bool acquire); static void AcquirePlannerLocks(List *stmt_list, bool acquire); static void ScanQueryForLocks(Query *parsetree, bool acquire); static bool ScanQueryWalker(Node *node, bool *acquire); @@ -792,8 +792,13 @@ RevalidateCachedQuery(CachedPlanSource *plansource, * Caller must have already called RevalidateCachedQuery to verify that the * querytree is up to date. * - * On a "true" return, we have acquired the locks needed to run the plan. - * (We must do this for the "true" result to be race-condition-free.) + * If the plan includes child relations introduced by the planner, they + * wouldn't be locked yet. This is because AcquirePlannerLocks() only locks + * relations present in the original query's range table (before planner + * entry). Hence, the plan might become stale if child relations are modified + * concurrently. During the plan initialization, the executor must ensure the + * plan (CachedPlan) remains valid after locking each child table. If found + * invalid, the caller should be prompted to recreate the plan. */ static bool CheckCachedPlan(CachedPlanSource *plansource) @@ -807,60 +812,56 @@ CheckCachedPlan(CachedPlanSource *plansource) if (!plan) return false; - Assert(plan->magic == CACHEDPLAN_MAGIC); - /* Generic plans are never one-shot */ - Assert(!plan->is_oneshot); + if (GenericPlanIsValid(plan)) + return true; /* - * If plan isn't valid for current role, we can't use it. + * Plan has been invalidated, so unlink it from the parent and release it. */ - if (plan->is_valid && plan->dependsOnRole && - plan->planRoleId != GetUserId()) - plan->is_valid = false; + ReleaseGenericPlan(plansource); - /* - * If it appears valid, acquire locks and recheck; this is much the same - * logic as in RevalidateCachedQuery, but for a plan. - */ - if (plan->is_valid) + return false; +} + +/* + * GenericPlanIsValid + * Is a generic plan still valid? + * + * It may have gone stale due to concurrent schema modifications of relations + * mentioned in the plan or a couple of other things mentioned below. + */ +static bool +GenericPlanIsValid(CachedPlan *cplan) +{ + Assert(cplan != NULL); + Assert(cplan->magic == CACHEDPLAN_MAGIC); + /* Generic plans are never one-shot */ + Assert(!cplan->is_oneshot); + + if (cplan->is_valid) { /* * Plan must have positive refcount because it is referenced by * plansource; so no need to fear it disappears under us here. */ - Assert(plan->refcount > 0); - - AcquireExecutorLocks(plan->stmt_list, true); + Assert(cplan->refcount > 0); /* - * If plan was transient, check to see if TransactionXmin has - * advanced, and if so invalidate it. + * If plan isn't valid for current role, we can't use it. */ - if (plan->is_valid && - TransactionIdIsValid(plan->saved_xmin) && - !TransactionIdEquals(plan->saved_xmin, TransactionXmin)) - plan->is_valid = false; + if (cplan->dependsOnRole && cplan->planRoleId != GetUserId()) + cplan->is_valid = false; /* - * By now, if any invalidation has happened, the inval callback - * functions will have marked the plan invalid. + * If plan was transient, check to see if TransactionXmin has + * advanced, and if so invalidate it. */ - if (plan->is_valid) - { - /* Successfully revalidated and locked the query. */ - return true; - } - - /* Oops, the race case happened. Release useless locks. */ - AcquireExecutorLocks(plan->stmt_list, false); + if (TransactionIdIsValid(cplan->saved_xmin) && + !TransactionIdEquals(cplan->saved_xmin, TransactionXmin)) + cplan->is_valid = false; } - /* - * Plan has been invalidated, so unlink it from the parent and release it. - */ - ReleaseGenericPlan(plansource); - - return false; + return cplan->is_valid; } /* @@ -1130,8 +1131,16 @@ cached_plan_cost(CachedPlan *plan, bool include_planner) * plan or a custom plan for the given parameters: the caller does not know * which it will get. * - * On return, the plan is valid and we have sufficient locks to begin - * execution. + * Typically, the plan returned by this function is valid. However, a caveat + * arises with inheritance/partition child tables. These aren't locked by + * this function, as we only lock tables directly mentioned in the original + * query here. The task of locking these child tables falls to the executor + * during plan tree setup. If acquiring these locks invalidates the plan, the + * executor should inform the caller to regenerate the plan by invoking this + * function again. The reason for this deferred child table locking mechanism + * is efficiency: not all might need to be locked. Some could be pruned during + * executor initialization, especially if their corresponding plan nodes + * facilitate partition pruning. * * On return, the refcount of the plan has been incremented; a later * ReleaseCachedPlan() call is expected. If "owner" is not NULL then @@ -1166,7 +1175,10 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams, { if (CheckCachedPlan(plansource)) { - /* We want a generic plan, and we already have a valid one */ + /* + * We want a generic plan, and we already have a valid one, though + * see the header comment. + */ plan = plansource->gplan; Assert(plan->magic == CACHEDPLAN_MAGIC); } @@ -1364,8 +1376,8 @@ CachedPlanAllowsSimpleValidityCheck(CachedPlanSource *plansource, } /* - * Reject if AcquireExecutorLocks would have anything to do. This is - * probably unnecessary given the previous check, but let's be safe. + * Reject if the executor would need to take additional locks, that is, in + * addition to those taken by AcquirePlannerLocks() on a given query. */ foreach(lc, plan->stmt_list) { @@ -1741,58 +1753,6 @@ QueryListGetPrimaryStmt(List *stmts) return NULL; } -/* - * AcquireExecutorLocks: acquire locks needed for execution of a cached plan; - * or release them if acquire is false. - */ -static void -AcquireExecutorLocks(List *stmt_list, bool acquire) -{ - ListCell *lc1; - - foreach(lc1, stmt_list) - { - PlannedStmt *plannedstmt = lfirst_node(PlannedStmt, lc1); - ListCell *lc2; - - if (plannedstmt->commandType == CMD_UTILITY) - { - /* - * Ignore utility statements, except those (such as EXPLAIN) that - * contain a parsed-but-not-planned query. Note: it's okay to use - * ScanQueryForLocks, even though the query hasn't been through - * rule rewriting, because rewriting doesn't change the query - * representation. - */ - Query *query = UtilityContainsQuery(plannedstmt->utilityStmt); - - if (query) - ScanQueryForLocks(query, acquire); - continue; - } - - foreach(lc2, plannedstmt->rtable) - { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc2); - - if (!(rte->rtekind == RTE_RELATION || - (rte->rtekind == RTE_SUBQUERY && OidIsValid(rte->relid)))) - continue; - - /* - * Acquire the appropriate type of lock on each relation OID. Note - * that we don't actually try to open the rel, and hence will not - * fail if it's been dropped entirely --- we'll just transiently - * acquire a non-conflicting lock. - */ - if (acquire) - LockRelationOid(rte->relid, rte->rellockmode); - else - UnlockRelationOid(rte->relid, rte->rellockmode); - } - } -} - /* * AcquirePlannerLocks: acquire locks needed for planning of a querytree list; * or release them if acquire is false. diff --git a/src/test/modules/delay_execution/Makefile b/src/test/modules/delay_execution/Makefile index 70f24e846d..2fca84d027 100644 --- a/src/test/modules/delay_execution/Makefile +++ b/src/test/modules/delay_execution/Makefile @@ -8,7 +8,8 @@ OBJS = \ delay_execution.o ISOLATION = partition-addition \ - partition-removal-1 + partition-removal-1 \ + cached-plan-replan ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/src/test/modules/delay_execution/delay_execution.c b/src/test/modules/delay_execution/delay_execution.c index 7cd76eb34b..ce189156ad 100644 --- a/src/test/modules/delay_execution/delay_execution.c +++ b/src/test/modules/delay_execution/delay_execution.c @@ -1,14 +1,18 @@ /*------------------------------------------------------------------------- * * delay_execution.c - * Test module to allow delay between parsing and execution of a query. + * Test module to introduce delay at various points during execution of a + * query to test that execution proceeds safely in light of concurrent + * changes. * * The delay is implemented by taking and immediately releasing a specified * advisory lock. If another process has previously taken that lock, the * current process will be blocked until the lock is released; otherwise, * there's no effect. This allows an isolationtester script to reliably - * test behaviors where some specified action happens in another backend - * between parsing and execution of any desired query. + * test behaviors where some specified action happens in another backend in + * a couple of cases: 1) between parsing and execution of any desired query + * when using the planner_hook, 2) between RevalidateCachedQuery() and + * ExecutorStart() when using the ExecutorStart_hook. * * Copyright (c) 2020-2023, PostgreSQL Global Development Group * @@ -22,6 +26,7 @@ #include +#include "executor/executor.h" #include "optimizer/planner.h" #include "utils/builtins.h" #include "utils/guc.h" @@ -32,9 +37,11 @@ PG_MODULE_MAGIC; /* GUC: advisory lock ID to use. Zero disables the feature. */ static int post_planning_lock_id = 0; +static int executor_start_lock_id = 0; -/* Save previous planner hook user to be a good citizen */ +/* Save previous hook users to be a good citizen */ static planner_hook_type prev_planner_hook = NULL; +static ExecutorStart_hook_type prev_ExecutorStart_hook = NULL; /* planner_hook function to provide the desired delay */ @@ -70,11 +77,45 @@ delay_execution_planner(Query *parse, const char *query_string, return result; } +/* ExecutorStart_hook function to provide the desired delay */ +static bool +delay_execution_ExecutorStart(QueryDesc *queryDesc, int eflags) +{ + bool plan_valid; + + /* If enabled, delay by taking and releasing the specified lock */ + if (executor_start_lock_id != 0) + { + DirectFunctionCall1(pg_advisory_lock_int8, + Int64GetDatum((int64) executor_start_lock_id)); + DirectFunctionCall1(pg_advisory_unlock_int8, + Int64GetDatum((int64) executor_start_lock_id)); + + /* + * Ensure that we notice any pending invalidations, since the advisory + * lock functions don't do this. + */ + AcceptInvalidationMessages(); + } + + /* Now start the executor, possibly via a previous hook user */ + if (prev_ExecutorStart_hook) + plan_valid = prev_ExecutorStart_hook(queryDesc, eflags); + else + plan_valid = standard_ExecutorStart(queryDesc, eflags); + + if (executor_start_lock_id != 0) + elog(NOTICE, "Finished ExecutorStart(): CachedPlan is %s", + plan_valid ? "valid" : "not valid"); + + return plan_valid; +} + /* Module load function */ void _PG_init(void) { - /* Set up the GUC to control which lock is used */ + /* Set up GUCs to control which lock is used */ DefineCustomIntVariable("delay_execution.post_planning_lock_id", "Sets the advisory lock ID to be locked/unlocked after planning.", "Zero disables the delay.", @@ -86,10 +127,22 @@ _PG_init(void) NULL, NULL, NULL); - + DefineCustomIntVariable("delay_execution.executor_start_lock_id", + "Sets the advisory lock ID to be locked/unlocked before starting execution.", + "Zero disables the delay.", + &executor_start_lock_id, + 0, + 0, INT_MAX, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); MarkGUCPrefixReserved("delay_execution"); - /* Install our hook */ + /* Install our hooks. */ prev_planner_hook = planner_hook; planner_hook = delay_execution_planner; + prev_ExecutorStart_hook = ExecutorStart_hook; + ExecutorStart_hook = delay_execution_ExecutorStart; } diff --git a/src/test/modules/delay_execution/expected/cached-plan-replan.out b/src/test/modules/delay_execution/expected/cached-plan-replan.out new file mode 100644 index 0000000000..122d81f2ee --- /dev/null +++ b/src/test/modules/delay_execution/expected/cached-plan-replan.out @@ -0,0 +1,158 @@ +Parsed test spec with 2 sessions + +starting permutation: s1prep s2lock s1exec s2dropi s2unlock +step s1prep: SET plan_cache_mode = force_generic_plan; + PREPARE q AS SELECT * FROM foov WHERE a = $1 FOR UPDATE; + EXPLAIN (COSTS OFF) EXECUTE q (1); +QUERY PLAN +---------------------------------------------- +LockRows + -> Append + Subplans Removed: 1 + -> Bitmap Heap Scan on foo11 foo_1 + Recheck Cond: (a = $1) + -> Bitmap Index Scan on foo11_a + Index Cond: (a = $1) +(7 rows) + +step s2lock: SELECT pg_advisory_lock(12345); +pg_advisory_lock +---------------- + +(1 row) + +step s1exec: LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q (1); +step s2dropi: DROP INDEX foo11_a; +step s2unlock: SELECT pg_advisory_unlock(12345); +pg_advisory_unlock +------------------ +t +(1 row) + +step s1exec: <... completed> +s1: NOTICE: Finished ExecutorStart(): CachedPlan is not valid +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +----------------------------------- +LockRows + -> Append + Subplans Removed: 1 + -> Seq Scan on foo11 foo_1 + Filter: (a = $1) +(5 rows) + + +starting permutation: s1prep2 s2lock s1exec2 s2dropi s2unlock +step s1prep2: SET plan_cache_mode = force_generic_plan; + PREPARE q2 AS SELECT * FROM foov WHERE a = 1; + EXPLAIN (COSTS OFF) EXECUTE q2; +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +---------------------------------- +Bitmap Heap Scan on foo11 foo + Recheck Cond: (a = 1) + -> Bitmap Index Scan on foo11_a + Index Cond: (a = 1) +(4 rows) + +step s2lock: SELECT pg_advisory_lock(12345); +pg_advisory_lock +---------------- + +(1 row) + +step s1exec2: LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q2; +step s2dropi: DROP INDEX foo11_a; +step s2unlock: SELECT pg_advisory_unlock(12345); +pg_advisory_unlock +------------------ +t +(1 row) + +step s1exec2: <... completed> +s1: NOTICE: Finished ExecutorStart(): CachedPlan is not valid +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +--------------------- +Seq Scan on foo11 foo + Filter: (a = 1) +(2 rows) + + +starting permutation: s1prep3 s2lock s1exec3 s2dropi s2unlock +step s1prep3: SET plan_cache_mode = force_generic_plan; + SET enable_partitionwise_aggregate = on; + SET enable_partitionwise_join = on; + PREPARE q3 AS SELECT t1.a, count(t2.b) FROM foo t1, foo t2 WHERE t1.a = t2.a GROUP BY 1; + EXPLAIN (COSTS OFF) EXECUTE q3; +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +------------------------------------------------------------ +Append + -> GroupAggregate + Group Key: t1.a + -> Merge Join + Merge Cond: (t1.a = t2.a) + -> Index Only Scan using foo11_a on foo11 t1 + -> Materialize + -> Index Scan using foo11_a on foo11 t2 + -> GroupAggregate + Group Key: t1_1.a + -> Merge Join + Merge Cond: (t1_1.a = t2_1.a) + -> Sort + Sort Key: t1_1.a + -> Seq Scan on foo2 t1_1 + -> Sort + Sort Key: t2_1.a + -> Seq Scan on foo2 t2_1 +(18 rows) + +step s2lock: SELECT pg_advisory_lock(12345); +pg_advisory_lock +---------------- + +(1 row) + +step s1exec3: LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q3; +step s2dropi: DROP INDEX foo11_a; +step s2unlock: SELECT pg_advisory_unlock(12345); +pg_advisory_unlock +------------------ +t +(1 row) + +step s1exec3: <... completed> +s1: NOTICE: Finished ExecutorStart(): CachedPlan is not valid +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +--------------------------------------------- +Append + -> GroupAggregate + Group Key: t1.a + -> Merge Join + Merge Cond: (t1.a = t2.a) + -> Sort + Sort Key: t1.a + -> Seq Scan on foo11 t1 + -> Sort + Sort Key: t2.a + -> Seq Scan on foo11 t2 + -> GroupAggregate + Group Key: t1_1.a + -> Merge Join + Merge Cond: (t1_1.a = t2_1.a) + -> Sort + Sort Key: t1_1.a + -> Seq Scan on foo2 t1_1 + -> Sort + Sort Key: t2_1.a + -> Seq Scan on foo2 t2_1 +(21 rows) + diff --git a/src/test/modules/delay_execution/specs/cached-plan-replan.spec b/src/test/modules/delay_execution/specs/cached-plan-replan.spec new file mode 100644 index 0000000000..2d0607b176 --- /dev/null +++ b/src/test/modules/delay_execution/specs/cached-plan-replan.spec @@ -0,0 +1,61 @@ +# Test to check that invalidation of cached generic plans during ExecutorStart +# correctly triggers replanning and re-execution. + +setup +{ + CREATE TABLE foo (a int, b text) PARTITION BY LIST(a); + CREATE TABLE foo1 PARTITION OF foo FOR VALUES IN (1) PARTITION BY LIST (a); + CREATE TABLE foo11 PARTITION OF foo1 FOR VALUES IN (1); + CREATE INDEX foo11_a ON foo11 (a); + CREATE TABLE foo2 PARTITION OF foo FOR VALUES IN (2); + CREATE VIEW foov AS SELECT * FROM foo; +} + +teardown +{ + DROP VIEW foov; + DROP TABLE foo; +} + +session "s1" +# Append with run-time pruning +step "s1prep" { SET plan_cache_mode = force_generic_plan; + PREPARE q AS SELECT * FROM foov WHERE a = $1 FOR UPDATE; + EXPLAIN (COSTS OFF) EXECUTE q (1); } + +# no Append case (only one partition selected by the planner) +step "s1prep2" { SET plan_cache_mode = force_generic_plan; + PREPARE q2 AS SELECT * FROM foov WHERE a = 1; + EXPLAIN (COSTS OFF) EXECUTE q2; } + +# Append with partition-wise join aggregate and join plans as child subplans +step "s1prep3" { SET plan_cache_mode = force_generic_plan; + SET enable_partitionwise_aggregate = on; + SET enable_partitionwise_join = on; + PREPARE q3 AS SELECT t1.a, count(t2.b) FROM foo t1, foo t2 WHERE t1.a = t2.a GROUP BY 1; + EXPLAIN (COSTS OFF) EXECUTE q3; } + +# Executes a generic plan +step "s1exec" { LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q (1); } +step "s1exec2" { LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q2; } +step "s1exec3" { LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q3; } + +session "s2" +step "s2lock" { SELECT pg_advisory_lock(12345); } +step "s2unlock" { SELECT pg_advisory_unlock(12345); } +step "s2dropi" { DROP INDEX foo11_a; } + +# While "s1exec", etc. wait to acquire the advisory lock, "s2drop" is able to +# drop the index being used in the cached plan. When "s1exec" is then +# unblocked and initializes the cached plan for execution, it detects the +# concurrent index drop and causes the cached plan to be discarded and +# recreated without the index. +permutation "s1prep" "s2lock" "s1exec" "s2dropi" "s2unlock" +permutation "s1prep2" "s2lock" "s1exec2" "s2dropi" "s2unlock" +permutation "s1prep3" "s2lock" "s1exec3" "s2dropi" "s2unlock" -- 2.35.3