2006-03-24 12:58:18 +01:00
|
|
|
drop table if exists t1;
|
|
|
|
create table t1 (a varchar(16), b int) engine=innodb;
|
|
|
|
create trigger t1_bi before insert on t1 for each row
|
|
|
|
begin
|
|
|
|
set new.a := upper(new.a);
|
|
|
|
set new.b := new.b + 3;
|
|
|
|
end|
|
|
|
|
select trigger_schema, trigger_name, event_object_schema,
|
|
|
|
event_object_table, action_statement from information_schema.triggers
|
|
|
|
where event_object_schema = 'test' and event_object_table = 't1';
|
|
|
|
trigger_schema trigger_name event_object_schema event_object_table action_statement
|
|
|
|
test t1_bi test t1 begin
|
|
|
|
set new.a := upper(new.a);
|
|
|
|
set new.b := new.b + 3;
|
|
|
|
end
|
|
|
|
insert into t1 values ('The Lion', 10);
|
|
|
|
select * from t1;
|
|
|
|
a b
|
|
|
|
THE LION 13
|
|
|
|
optimize table t1;
|
|
|
|
Table Op Msg_type Msg_text
|
2008-08-11 20:02:03 +02:00
|
|
|
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
2006-03-24 12:58:18 +01:00
|
|
|
test.t1 optimize status OK
|
|
|
|
select trigger_schema, trigger_name, event_object_schema,
|
|
|
|
event_object_table, action_statement from information_schema.triggers
|
|
|
|
where event_object_schema = 'test' and event_object_table = 't1';
|
|
|
|
trigger_schema trigger_name event_object_schema event_object_table action_statement
|
|
|
|
test t1_bi test t1 begin
|
|
|
|
set new.a := upper(new.a);
|
|
|
|
set new.b := new.b + 3;
|
|
|
|
end
|
|
|
|
insert into t1 values ('The Unicorn', 20);
|
|
|
|
select * from t1;
|
|
|
|
a b
|
|
|
|
THE LION 13
|
|
|
|
THE UNICORN 23
|
|
|
|
alter table t1 add column c int default 0;
|
|
|
|
select trigger_schema, trigger_name, event_object_schema,
|
|
|
|
event_object_table, action_statement from information_schema.triggers
|
|
|
|
where event_object_schema = 'test' and event_object_table = 't1';
|
|
|
|
trigger_schema trigger_name event_object_schema event_object_table action_statement
|
|
|
|
test t1_bi test t1 begin
|
|
|
|
set new.a := upper(new.a);
|
|
|
|
set new.b := new.b + 3;
|
|
|
|
end
|
|
|
|
insert into t1 values ('Alice', 30, 1);
|
|
|
|
select * from t1;
|
|
|
|
a b c
|
|
|
|
THE LION 13 0
|
|
|
|
THE UNICORN 23 0
|
|
|
|
ALICE 33 1
|
|
|
|
alter table t1 rename to t1;
|
|
|
|
select trigger_schema, trigger_name, event_object_schema,
|
|
|
|
event_object_table, action_statement from information_schema.triggers
|
|
|
|
where event_object_schema = 'test' and event_object_table = 't1';
|
|
|
|
trigger_schema trigger_name event_object_schema event_object_table action_statement
|
|
|
|
test t1_bi test t1 begin
|
|
|
|
set new.a := upper(new.a);
|
|
|
|
set new.b := new.b + 3;
|
|
|
|
end
|
|
|
|
insert into t1 values ('The Crown', 40, 1);
|
|
|
|
select * from t1;
|
|
|
|
a b c
|
|
|
|
THE LION 13 0
|
|
|
|
THE UNICORN 23 0
|
|
|
|
ALICE 33 1
|
|
|
|
THE CROWN 43 1
|
|
|
|
alter table t1 rename to t1, add column d int default 0;
|
|
|
|
select trigger_schema, trigger_name, event_object_schema,
|
|
|
|
event_object_table, action_statement from information_schema.triggers
|
|
|
|
where event_object_schema = 'test' and event_object_table = 't1';
|
|
|
|
trigger_schema trigger_name event_object_schema event_object_table action_statement
|
|
|
|
test t1_bi test t1 begin
|
|
|
|
set new.a := upper(new.a);
|
|
|
|
set new.b := new.b + 3;
|
|
|
|
end
|
|
|
|
insert into t1 values ('The Pie', 50, 1, 1);
|
|
|
|
select * from t1;
|
|
|
|
a b c d
|
|
|
|
THE LION 13 0 0
|
|
|
|
THE UNICORN 23 0 0
|
|
|
|
ALICE 33 1 0
|
|
|
|
THE CROWN 43 1 0
|
|
|
|
THE PIE 53 1 1
|
|
|
|
drop table t1;
|
A fix and a test case for Bug#26141 mixing table types in trigger
causes full table lock on innodb table.
Also fixes Bug#28502 Triggers that update another innodb table
will block on X lock unnecessarily (duplciate).
Code review fixes.
Both bugs' synopses are misleading: InnoDB table is
not X locked. The statements, however, cannot proceed concurrently,
but this happens due to lock conflicts for tables used in triggers,
not for the InnoDB table.
If a user had an InnoDB table, and two triggers, AFTER UPDATE and
AFTER INSERT, competing for different resources (e.g. two distinct
MyISAM tables), then these two triggers would not be able to execute
concurrently. Moreover, INSERTS/UPDATES of the InnoDB table would
not be able to run concurrently.
The problem had other side-effects (see respective bug reports).
This behavior was a consequence of a shortcoming of the pre-locking
algorithm, which would not distinguish between different DML operations
(e.g. INSERT and DELETE) and pre-lock all the tables
that are used by any trigger defined on the subject table.
The idea of the fix is to extend the pre-locking algorithm to keep track,
for each table, what DML operation it is used for and not
load triggers that are known to never be fired.
2007-07-12 20:26:41 +02:00
|
|
|
|
|
|
|
Bug#26141 mixing table types in trigger causes full
|
|
|
|
table lock on innodb table
|
|
|
|
|
|
|
|
Ensure we do not open and lock tables for the triggers we do not
|
|
|
|
fire.
|
|
|
|
|
|
|
|
drop table if exists t1, t2, t3;
|
|
|
|
drop trigger if exists trg_bug26141_au;
|
|
|
|
drop trigger if exists trg_bug26141_ai;
|
|
|
|
create table t1 (c int primary key) engine=innodb;
|
|
|
|
create table t2 (c int) engine=myisam;
|
|
|
|
create table t3 (c int) engine=myisam;
|
|
|
|
insert into t1 (c) values (1);
|
|
|
|
create trigger trg_bug26141_ai after insert on t1
|
|
|
|
for each row
|
|
|
|
begin
|
|
|
|
insert into t2 (c) values (1);
|
|
|
|
# We need the 'sync' lock to synchronously wait in connection 2 till
|
|
|
|
# the moment when the trigger acquired all the locks.
|
|
|
|
select release_lock("lock_bug26141_sync") into @a;
|
|
|
|
# 1000 is time in seconds of lock wait timeout -- this is a way
|
|
|
|
# to cause a manageable sleep up to 1000 seconds
|
|
|
|
select get_lock("lock_bug26141_wait", 1000) into @a;
|
|
|
|
end|
|
|
|
|
create trigger trg_bug26141_au after update on t1
|
|
|
|
for each row
|
|
|
|
begin
|
|
|
|
insert into t3 (c) values (1);
|
|
|
|
end|
|
2016-03-25 17:51:22 +01:00
|
|
|
connect connection_aux,localhost,root,,test,,;
|
|
|
|
connect connection_update,localhost,root,,test,,;
|
|
|
|
connection connection_aux;
|
A fix and a test case for Bug#26141 mixing table types in trigger
causes full table lock on innodb table.
Also fixes Bug#28502 Triggers that update another innodb table
will block on X lock unnecessarily (duplciate).
Code review fixes.
Both bugs' synopses are misleading: InnoDB table is
not X locked. The statements, however, cannot proceed concurrently,
but this happens due to lock conflicts for tables used in triggers,
not for the InnoDB table.
If a user had an InnoDB table, and two triggers, AFTER UPDATE and
AFTER INSERT, competing for different resources (e.g. two distinct
MyISAM tables), then these two triggers would not be able to execute
concurrently. Moreover, INSERTS/UPDATES of the InnoDB table would
not be able to run concurrently.
The problem had other side-effects (see respective bug reports).
This behavior was a consequence of a shortcoming of the pre-locking
algorithm, which would not distinguish between different DML operations
(e.g. INSERT and DELETE) and pre-lock all the tables
that are used by any trigger defined on the subject table.
The idea of the fix is to extend the pre-locking algorithm to keep track,
for each table, what DML operation it is used for and not
load triggers that are known to never be fired.
2007-07-12 20:26:41 +02:00
|
|
|
select get_lock("lock_bug26141_wait", 0);
|
|
|
|
get_lock("lock_bug26141_wait", 0)
|
|
|
|
1
|
2016-03-25 17:51:22 +01:00
|
|
|
connection default;
|
A fix and a test case for Bug#26141 mixing table types in trigger
causes full table lock on innodb table.
Also fixes Bug#28502 Triggers that update another innodb table
will block on X lock unnecessarily (duplciate).
Code review fixes.
Both bugs' synopses are misleading: InnoDB table is
not X locked. The statements, however, cannot proceed concurrently,
but this happens due to lock conflicts for tables used in triggers,
not for the InnoDB table.
If a user had an InnoDB table, and two triggers, AFTER UPDATE and
AFTER INSERT, competing for different resources (e.g. two distinct
MyISAM tables), then these two triggers would not be able to execute
concurrently. Moreover, INSERTS/UPDATES of the InnoDB table would
not be able to run concurrently.
The problem had other side-effects (see respective bug reports).
This behavior was a consequence of a shortcoming of the pre-locking
algorithm, which would not distinguish between different DML operations
(e.g. INSERT and DELETE) and pre-lock all the tables
that are used by any trigger defined on the subject table.
The idea of the fix is to extend the pre-locking algorithm to keep track,
for each table, what DML operation it is used for and not
load triggers that are known to never be fired.
2007-07-12 20:26:41 +02:00
|
|
|
select get_lock("lock_bug26141_sync", /* must not be priorly locked */ 0);
|
|
|
|
get_lock("lock_bug26141_sync", /* must not be priorly locked */ 0)
|
|
|
|
1
|
|
|
|
insert into t1 (c) values (2);
|
2016-03-25 17:51:22 +01:00
|
|
|
connection connection_update;
|
A fix and a test case for Bug#26141 mixing table types in trigger
causes full table lock on innodb table.
Also fixes Bug#28502 Triggers that update another innodb table
will block on X lock unnecessarily (duplciate).
Code review fixes.
Both bugs' synopses are misleading: InnoDB table is
not X locked. The statements, however, cannot proceed concurrently,
but this happens due to lock conflicts for tables used in triggers,
not for the InnoDB table.
If a user had an InnoDB table, and two triggers, AFTER UPDATE and
AFTER INSERT, competing for different resources (e.g. two distinct
MyISAM tables), then these two triggers would not be able to execute
concurrently. Moreover, INSERTS/UPDATES of the InnoDB table would
not be able to run concurrently.
The problem had other side-effects (see respective bug reports).
This behavior was a consequence of a shortcoming of the pre-locking
algorithm, which would not distinguish between different DML operations
(e.g. INSERT and DELETE) and pre-lock all the tables
that are used by any trigger defined on the subject table.
The idea of the fix is to extend the pre-locking algorithm to keep track,
for each table, what DML operation it is used for and not
load triggers that are known to never be fired.
2007-07-12 20:26:41 +02:00
|
|
|
select get_lock("lock_bug26141_sync", 1000);
|
|
|
|
get_lock("lock_bug26141_sync", 1000)
|
|
|
|
1
|
|
|
|
update t1 set c=3 where c=1;
|
|
|
|
select release_lock("lock_bug26141_sync");
|
|
|
|
release_lock("lock_bug26141_sync")
|
|
|
|
1
|
2016-03-25 17:51:22 +01:00
|
|
|
connection connection_aux;
|
A fix and a test case for Bug#26141 mixing table types in trigger
causes full table lock on innodb table.
Also fixes Bug#28502 Triggers that update another innodb table
will block on X lock unnecessarily (duplciate).
Code review fixes.
Both bugs' synopses are misleading: InnoDB table is
not X locked. The statements, however, cannot proceed concurrently,
but this happens due to lock conflicts for tables used in triggers,
not for the InnoDB table.
If a user had an InnoDB table, and two triggers, AFTER UPDATE and
AFTER INSERT, competing for different resources (e.g. two distinct
MyISAM tables), then these two triggers would not be able to execute
concurrently. Moreover, INSERTS/UPDATES of the InnoDB table would
not be able to run concurrently.
The problem had other side-effects (see respective bug reports).
This behavior was a consequence of a shortcoming of the pre-locking
algorithm, which would not distinguish between different DML operations
(e.g. INSERT and DELETE) and pre-lock all the tables
that are used by any trigger defined on the subject table.
The idea of the fix is to extend the pre-locking algorithm to keep track,
for each table, what DML operation it is used for and not
load triggers that are known to never be fired.
2007-07-12 20:26:41 +02:00
|
|
|
select release_lock("lock_bug26141_wait");
|
|
|
|
release_lock("lock_bug26141_wait")
|
|
|
|
1
|
2016-03-25 17:51:22 +01:00
|
|
|
connection default;
|
A fix and a test case for Bug#26141 mixing table types in trigger
causes full table lock on innodb table.
Also fixes Bug#28502 Triggers that update another innodb table
will block on X lock unnecessarily (duplciate).
Code review fixes.
Both bugs' synopses are misleading: InnoDB table is
not X locked. The statements, however, cannot proceed concurrently,
but this happens due to lock conflicts for tables used in triggers,
not for the InnoDB table.
If a user had an InnoDB table, and two triggers, AFTER UPDATE and
AFTER INSERT, competing for different resources (e.g. two distinct
MyISAM tables), then these two triggers would not be able to execute
concurrently. Moreover, INSERTS/UPDATES of the InnoDB table would
not be able to run concurrently.
The problem had other side-effects (see respective bug reports).
This behavior was a consequence of a shortcoming of the pre-locking
algorithm, which would not distinguish between different DML operations
(e.g. INSERT and DELETE) and pre-lock all the tables
that are used by any trigger defined on the subject table.
The idea of the fix is to extend the pre-locking algorithm to keep track,
for each table, what DML operation it is used for and not
load triggers that are known to never be fired.
2007-07-12 20:26:41 +02:00
|
|
|
select * from t1;
|
|
|
|
c
|
|
|
|
2
|
|
|
|
3
|
|
|
|
select * from t2;
|
|
|
|
c
|
|
|
|
1
|
|
|
|
select * from t3;
|
|
|
|
c
|
|
|
|
1
|
|
|
|
drop table t1, t2, t3;
|
2016-03-25 17:51:22 +01:00
|
|
|
disconnect connection_update;
|
|
|
|
disconnect connection_aux;
|
2008-03-12 14:13:33 +01:00
|
|
|
DROP TABLE IF EXISTS t1;
|
|
|
|
DROP TABLE IF EXISTS t2;
|
|
|
|
CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=innodb;
|
|
|
|
CREATE TABLE t2(b INT, FOREIGN KEY(b) REFERENCES t1(a)) ENGINE=innodb;
|
|
|
|
INSERT INTO t1 VALUES (1);
|
|
|
|
CREATE TRIGGER t1_bd BEFORE DELETE ON t1 FOR EACH ROW SET @a = 1;
|
|
|
|
CREATE TRIGGER t1_ad AFTER DELETE ON t1 FOR EACH ROW SET @b = 1;
|
|
|
|
SET @a = 0;
|
|
|
|
SET @b = 0;
|
|
|
|
TRUNCATE t1;
|
Bug#49938: Failing assertion: inode or deadlock in fsp/fsp0fsp.c
Bug#54678: InnoDB, TRUNCATE, ALTER, I_S SELECT, crash or deadlock
- Incompatible change: truncate no longer resorts to a row by
row delete if the storage engine does not support the truncate
method. Consequently, the count of affected rows does not, in
any case, reflect the actual number of rows.
- Incompatible change: it is no longer possible to truncate a
table that participates as a parent in a foreign key constraint,
unless it is a self-referencing constraint (both parent and child
are in the same table). To work around this incompatible change
and still be able to truncate such tables, disable foreign checks
with SET foreign_key_checks=0 before truncate. Alternatively, if
foreign key checks are necessary, please use a DELETE statement
without a WHERE condition.
Problem description:
The problem was that for storage engines that do not support
truncate table via a external drop and recreate, such as InnoDB
which implements truncate via a internal drop and recreate, the
delete_all_rows method could be invoked with a shared metadata
lock, causing problems if the engine needed exclusive access
to some internal metadata. This problem originated with the
fact that there is no truncate specific handler method, which
ended up leading to a abuse of the delete_all_rows method that
is primarily used for delete operations without a condition.
Solution:
The solution is to introduce a truncate handler method that is
invoked when the engine does not support truncation via a table
drop and recreate. This method is invoked under a exclusive
metadata lock, so that there is only a single instance of the
table when the method is invoked.
Also, the method is not invoked and a error is thrown if
the table is a parent in a non-self-referencing foreign key
relationship. This was necessary to avoid inconsistency as
some integrity checks are bypassed. This is inline with the
fact that truncate is primarily a DDL operation that was
designed to quickly remove all data from a table.
2010-10-06 16:34:28 +02:00
|
|
|
ERROR 42000: Cannot truncate a table referenced in a foreign key constraint (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`b`) REFERENCES `test`.`t1` (`a`))
|
2008-03-12 14:13:33 +01:00
|
|
|
SELECT @a, @b;
|
|
|
|
@a @b
|
|
|
|
0 0
|
Bug#49938: Failing assertion: inode or deadlock in fsp/fsp0fsp.c
Bug#54678: InnoDB, TRUNCATE, ALTER, I_S SELECT, crash or deadlock
- Incompatible change: truncate no longer resorts to a row by
row delete if the storage engine does not support the truncate
method. Consequently, the count of affected rows does not, in
any case, reflect the actual number of rows.
- Incompatible change: it is no longer possible to truncate a
table that participates as a parent in a foreign key constraint,
unless it is a self-referencing constraint (both parent and child
are in the same table). To work around this incompatible change
and still be able to truncate such tables, disable foreign checks
with SET foreign_key_checks=0 before truncate. Alternatively, if
foreign key checks are necessary, please use a DELETE statement
without a WHERE condition.
Problem description:
The problem was that for storage engines that do not support
truncate table via a external drop and recreate, such as InnoDB
which implements truncate via a internal drop and recreate, the
delete_all_rows method could be invoked with a shared metadata
lock, causing problems if the engine needed exclusive access
to some internal metadata. This problem originated with the
fact that there is no truncate specific handler method, which
ended up leading to a abuse of the delete_all_rows method that
is primarily used for delete operations without a condition.
Solution:
The solution is to introduce a truncate handler method that is
invoked when the engine does not support truncation via a table
drop and recreate. This method is invoked under a exclusive
metadata lock, so that there is only a single instance of the
table when the method is invoked.
Also, the method is not invoked and a error is thrown if
the table is a parent in a non-self-referencing foreign key
relationship. This was necessary to avoid inconsistency as
some integrity checks are bypassed. This is inline with the
fact that truncate is primarily a DDL operation that was
designed to quickly remove all data from a table.
2010-10-06 16:34:28 +02:00
|
|
|
DELETE FROM t1;
|
|
|
|
SELECT @a, @b;
|
|
|
|
@a @b
|
|
|
|
1 1
|
2008-03-12 14:13:33 +01:00
|
|
|
INSERT INTO t1 VALUES (1);
|
|
|
|
DELETE FROM t1;
|
|
|
|
SELECT @a, @b;
|
|
|
|
@a @b
|
|
|
|
1 1
|
|
|
|
DROP TABLE t2, t1;
|
A fix and a test case for Bug#26141 mixing table types in trigger
causes full table lock on innodb table.
Also fixes Bug#28502 Triggers that update another innodb table
will block on X lock unnecessarily (duplciate).
Code review fixes.
Both bugs' synopses are misleading: InnoDB table is
not X locked. The statements, however, cannot proceed concurrently,
but this happens due to lock conflicts for tables used in triggers,
not for the InnoDB table.
If a user had an InnoDB table, and two triggers, AFTER UPDATE and
AFTER INSERT, competing for different resources (e.g. two distinct
MyISAM tables), then these two triggers would not be able to execute
concurrently. Moreover, INSERTS/UPDATES of the InnoDB table would
not be able to run concurrently.
The problem had other side-effects (see respective bug reports).
This behavior was a consequence of a shortcoming of the pre-locking
algorithm, which would not distinguish between different DML operations
(e.g. INSERT and DELETE) and pre-lock all the tables
that are used by any trigger defined on the subject table.
The idea of the fix is to extend the pre-locking algorithm to keep track,
for each table, what DML operation it is used for and not
load triggers that are known to never be fired.
2007-07-12 20:26:41 +02:00
|
|
|
End of 5.0 tests
|
2008-10-06 16:06:59 +02:00
|
|
|
BUG#31612
|
|
|
|
Trigger fired multiple times leads to gaps in auto_increment sequence
|
|
|
|
create table t1 (a int, val char(1)) engine=InnoDB;
|
|
|
|
create table t2 (b int auto_increment primary key,
|
|
|
|
val char(1)) engine=InnoDB;
|
|
|
|
create trigger t1_after_insert after
|
|
|
|
insert on t1 for each row insert into t2 set val=NEW.val;
|
|
|
|
insert into t1 values ( 123, 'a'), ( 123, 'b'), ( 123, 'c'),
|
|
|
|
(123, 'd'), (123, 'e'), (123, 'f'), (123, 'g');
|
|
|
|
insert into t1 values ( 654, 'a'), ( 654, 'b'), ( 654, 'c'),
|
|
|
|
(654, 'd'), (654, 'e'), (654, 'f'), (654, 'g');
|
|
|
|
select * from t2 order by b;
|
|
|
|
b val
|
|
|
|
1 a
|
|
|
|
2 b
|
|
|
|
3 c
|
|
|
|
4 d
|
|
|
|
5 e
|
|
|
|
6 f
|
|
|
|
7 g
|
|
|
|
8 a
|
|
|
|
9 b
|
|
|
|
10 c
|
|
|
|
11 d
|
|
|
|
12 e
|
|
|
|
13 f
|
|
|
|
14 g
|
2008-10-07 18:54:12 +02:00
|
|
|
drop trigger t1_after_insert;
|
|
|
|
drop table t1,t2;
|
2015-01-27 08:43:55 +01:00
|
|
|
#
|
|
|
|
#Bug#19683834 SOME INNODB ERRORS CAUSES STORED FUNCTION
|
|
|
|
# AND TRIGGER HANDLERS TO BE IGNORED
|
|
|
|
#Code fixed in Bug#16041903
|
|
|
|
CREATE TABLE t1 (id int unsigned PRIMARY KEY, val int DEFAULT 0)
|
|
|
|
ENGINE=InnoDB;
|
|
|
|
INSERT INTO t1 (id) VALUES (1), (2);
|
|
|
|
CREATE TABLE t2 (id int PRIMARY KEY);
|
|
|
|
CREATE TABLE t3 LIKE t2;
|
|
|
|
CREATE TRIGGER bef_insert BEFORE INSERT ON t2 FOR EACH ROW
|
|
|
|
BEGIN
|
|
|
|
DECLARE CONTINUE HANDLER FOR 1062 BEGIN END;
|
|
|
|
INSERT INTO t3 (id) VALUES (NEW.id);
|
|
|
|
INSERT INTO t3 (id) VALUES (NEW.id);
|
|
|
|
END//
|
|
|
|
START TRANSACTION;
|
|
|
|
UPDATE t1 SET val = val + 1;
|
|
|
|
connect con2,localhost,root,,test,,;
|
|
|
|
SET SESSION innodb_lock_wait_timeout = 2;
|
|
|
|
UPDATE t1 SET val = val + 1;
|
|
|
|
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
|
|
|
INSERT INTO t2 (id) VALUES (1);
|
|
|
|
disconnect con2;
|
|
|
|
connection default;
|
|
|
|
DROP TABLE t3, t2, t1;
|
2021-05-23 18:41:17 +02:00
|
|
|
#
|
|
|
|
# MDEV-25738 Assertion `ticket->m_duration == MDL_EXPLICIT' failed in
|
|
|
|
# void MDL_context::release_lock(MDL_ticket*)
|
|
|
|
#
|
|
|
|
CREATE TABLE t1 (id int(11)) ENGINE=InnoDB;
|
|
|
|
LOCK TABLES t1 WRITE;
|
2021-05-27 12:11:17 +02:00
|
|
|
SET max_statement_time= 0.001;
|
2021-05-23 18:41:17 +02:00
|
|
|
CREATE TRIGGER tr16 AFTER UPDATE ON t1 FOR EACH ROW INSERT INTO t1 VALUES (1);
|
|
|
|
SET max_statement_time= default;
|
2021-05-27 12:11:17 +02:00
|
|
|
DROP TRIGGER IF EXISTS trg16;
|
|
|
|
DROP TABLE t1;
|
2021-05-23 18:41:17 +02:00
|
|
|
#
|
|
|
|
# End of 10.5 tests
|
|
|
|
#
|