mariadb/mysql-test/r/handler_myisam.result

1867 lines
41 KiB
Text
Raw Normal View History

SET SESSION STORAGE_ENGINE = MyISAM;
drop table if exists t1,t3,t4,t5;
create table t1 (a int, b char(10), key a(a), key b(a,b));
insert into t1 values
(17,"ddd"),(18,"eee"),(19,"fff"),(19,"yyy"),
(14,"aaa"),(15,"bbb"),(16,"ccc"),(16,"xxx"),
(20,"ggg"),(21,"hhh"),(22,"iii");
handler t1 open as t2;
2003-01-10 16:36:59 +01:00
handler t2 read a=(SELECT 1);
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'SELECT 1)' at line 1
handler t2 read a first;
a b
14 aaa
handler t2 read a next;
a b
15 bbb
handler t2 read a next;
a b
16 ccc
handler t2 read a prev;
a b
15 bbb
handler t2 read a last;
a b
22 iii
handler t2 read a prev;
a b
21 hhh
handler t2 read a prev;
a b
20 ggg
handler t2 read a first;
a b
14 aaa
handler t2 read a prev;
a b
handler t2 read a last;
a b
22 iii
handler t2 read a prev;
a b
21 hhh
handler t2 read a next;
a b
22 iii
handler t2 read a next;
a b
handler t2 read a=(15);
a b
15 bbb
handler t2 read a=(16);
a b
16 ccc
handler t2 read a=(19,"fff");
ERROR 42000: Too many key parts specified; max 1 parts allowed
handler t2 read b=(19,"fff");
a b
19 fff
handler t2 read b=(19,"yyy");
a b
19 yyy
handler t2 read b=(19);
a b
19 fff
handler t1 read a last;
ERROR 42S02: Unknown table 't1' in HANDLER
handler t2 read a=(11);
a b
handler t2 read a>=(11);
a b
14 aaa
handler t2 read a=(18);
a b
18 eee
handler t2 read a>=(18);
a b
18 eee
handler t2 read a>(18);
a b
19 fff
handler t2 read a<=(18);
a b
18 eee
handler t2 read a<(18);
a b
17 ddd
handler t2 read a first limit 5;
a b
14 aaa
15 bbb
16 ccc
16 xxx
17 ddd
handler t2 read a next limit 3;
a b
18 eee
19 fff
19 yyy
handler t2 read a prev limit 10;
a b
19 fff
18 eee
17 ddd
16 xxx
16 ccc
15 bbb
14 aaa
handler t2 read a>=(16) limit 4;
a b
16 ccc
16 xxx
17 ddd
18 eee
handler t2 read a>=(16) limit 2,2;
a b
17 ddd
18 eee
handler t2 read a last limit 3;
a b
22 iii
21 hhh
20 ggg
handler t2 read a=(19);
a b
19 fff
handler t2 read a=(19) where b="yyy";
a b
19 yyy
handler t2 read first;
a b
17 ddd
handler t2 read next;
a b
18 eee
handler t2 read next;
a b
19 fff
handler t2 read last;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '' at line 1
handler t2 close;
handler t1 open;
handler t1 read a next;
a b
14 aaa
handler t1 read a next;
a b
15 bbb
handler t1 close;
handler t1 open;
handler t1 read a prev;
a b
22 iii
handler t1 read a prev;
a b
21 hhh
handler t1 close;
handler t1 open as t2;
handler t2 read first;
a b
17 ddd
alter table t1 engine = MyISAM;
handler t2 read first;
ERROR 42S02: Unknown table 't2' in HANDLER
handler t1 open as t2;
drop table t1;
create table t1 (a int);
insert into t1 values (17);
handler t2 read first;
ERROR 42S02: Unknown table 't2' in HANDLER
2003-02-22 18:22:39 +01:00
handler t1 open as t2;
alter table t1 engine=MEMORY;
2003-02-22 18:22:39 +01:00
handler t2 read first;
ERROR 42S02: Unknown table 't2' in HANDLER
drop table t1;
create table t1 (a int);
insert into t1 values (1),(2),(3),(4),(5),(6);
delete from t1 limit 2;
handler t1 open;
handler t1 read first;
a
3
handler t1 read first limit 1,1;
a
4
handler t1 read first limit 2,2;
a
5
6
delete from t1 limit 3;
handler t1 read first;
a
6
drop table t1;
create table t1(a int, index(a));
insert into t1 values (1), (2), (3);
handler t1 open;
handler t1 read a=(W);
ERROR 42S22: Unknown column 'W' in 'field list'
handler t1 read a=(a);
ERROR HY000: Incorrect arguments to HANDLER ... READ
drop table t1;
create table t1 (a char(5));
insert into t1 values ("Ok");
handler t1 open as t;
handler t read first;
a
Ok
use mysql;
handler t read first;
a
Ok
handler t close;
handler test.t1 open as t;
handler t read first;
a
Ok
handler t close;
use test;
drop table t1;
2004-05-18 20:59:43 +02:00
create table t1 ( a int, b int, INDEX a (a) );
insert into t1 values (1,2), (2,1);
handler t1 open;
handler t1 read a=(1) where b=2;
a b
1 2
handler t1 read a=(1) where b=3;
a b
handler t1 read a=(1) where b=1;
a b
handler t1 close;
drop table t1;
drop database if exists test_test;
create database test_test;
use test_test;
create table t1(table_id char(20) primary key);
insert into t1 values ('test_test.t1');
insert into t1 values ('');
handler t1 open;
handler t1 read first limit 9;
table_id
test_test.t1
create table t2(table_id char(20) primary key);
insert into t2 values ('test_test.t2');
insert into t2 values ('');
handler t2 open;
handler t2 read first limit 9;
table_id
test_test.t2
use test;
drop table if exists t1;
create table t1(table_id char(20) primary key);
insert into t1 values ('test.t1');
insert into t1 values ('');
handler t1 open;
ERROR 42000: Not unique table/alias: 't1'
use test;
handler test.t1 read first limit 9;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'read first limit 9' at line 1
handler test_test.t1 read first limit 9;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'read first limit 9' at line 1
handler t1 read first limit 9;
table_id
test_test.t1
handler test_test.t2 read first limit 9;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'read first limit 9' at line 1
handler t2 read first limit 9;
table_id
test_test.t2
handler test_test.t1 close;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'close' at line 1
handler t1 close;
drop table test_test.t1;
handler test_test.t2 close;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'close' at line 1
handler t2 close;
drop table test_test.t2;
drop database test_test;
use test;
handler test.t1 close;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'close' at line 1
handler t1 close;
ERROR 42S02: Unknown table 't1' in HANDLER
drop table test.t1;
drop database if exists test_test;
drop table if exists t1;
drop table if exists t2;
drop table if exists t3;
create database test_test;
use test_test;
create table t1 (c1 char(20));
insert into t1 values ('test_test.t1');
create table t3 (c1 char(20));
insert into t3 values ('test_test.t3');
handler t1 open;
handler t1 read first limit 9;
c1
test_test.t1
handler t1 open h1;
handler h1 read first limit 9;
c1
test_test.t1
use test;
create table t1 (c1 char(20));
create table t2 (c1 char(20));
create table t3 (c1 char(20));
insert into t1 values ('t1');
insert into t2 values ('t2');
insert into t3 values ('t3');
handler t1 open;
ERROR 42000: Not unique table/alias: 't1'
handler t2 open t1;
ERROR 42000: Not unique table/alias: 't1'
handler t3 open t1;
ERROR 42000: Not unique table/alias: 't1'
handler t1 read first limit 9;
c1
test_test.t1
handler test.t1 close;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'close' at line 1
handler test.t1 open h1;
ERROR 42000: Not unique table/alias: 'h1'
handler test_test.t1 open h1;
ERROR 42000: Not unique table/alias: 'h1'
handler test_test.t3 open h3;
handler test.t1 open h2;
handler t1 read first limit 9;
c1
test_test.t1
handler h1 read first limit 9;
c1
test_test.t1
handler h2 read first limit 9;
c1
t1
handler h3 read first limit 9;
c1
test_test.t3
handler h2 read first limit 9;
c1
t1
handler test.h1 close;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'close' at line 1
handler t1 close;
handler h1 close;
handler h2 close;
handler t1 read first limit 9;
ERROR 42S02: Unknown table 't1' in HANDLER
handler h1 read first limit 9;
ERROR 42S02: Unknown table 'h1' in HANDLER
handler h2 read first limit 9;
ERROR 42S02: Unknown table 'h2' in HANDLER
handler h3 read first limit 9;
c1
test_test.t3
handler h3 read first limit 9;
c1
test_test.t3
use test_test;
handler h3 read first limit 9;
c1
test_test.t3
handler test.h3 read first limit 9;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'read first limit 9' at line 1
handler h3 close;
use test;
drop table t3;
drop table t2;
drop table t1;
drop database test_test;
create table t1 (c1 char(20));
insert into t1 values ("t1");
handler t1 open as h1;
handler h1 read first limit 9;
c1
t1
create table t2 (c1 char(20));
insert into t2 values ("t2");
handler t2 open as h2;
handler h2 read first limit 9;
c1
t2
create table t3 (c1 char(20));
insert into t3 values ("t3");
handler t3 open as h3;
handler h3 read first limit 9;
c1
t3
create table t4 (c1 char(20));
insert into t4 values ("t4");
handler t4 open as h4;
handler h4 read first limit 9;
c1
t4
create table t5 (c1 char(20));
insert into t5 values ("t5");
handler t5 open as h5;
handler h5 read first limit 9;
c1
t5
alter table t1 engine=MyISAM;
handler h1 read first limit 9;
ERROR 42S02: Unknown table 'h1' in HANDLER
handler h2 read first limit 9;
c1
t2
handler h3 read first limit 9;
c1
t3
handler h4 read first limit 9;
c1
t4
handler h5 read first limit 9;
c1
t5
alter table t5 engine=MyISAM;
handler h1 read first limit 9;
ERROR 42S02: Unknown table 'h1' in HANDLER
handler h2 read first limit 9;
c1
t2
handler h3 read first limit 9;
c1
t3
handler h4 read first limit 9;
c1
t4
handler h5 read first limit 9;
ERROR 42S02: Unknown table 'h5' in HANDLER
alter table t3 engine=MyISAM;
handler h1 read first limit 9;
ERROR 42S02: Unknown table 'h1' in HANDLER
handler h2 read first limit 9;
c1
t2
handler h3 read first limit 9;
ERROR 42S02: Unknown table 'h3' in HANDLER
handler h4 read first limit 9;
c1
t4
handler h5 read first limit 9;
ERROR 42S02: Unknown table 'h5' in HANDLER
handler h2 close;
handler h4 close;
handler t1 open as h1_1;
handler t1 open as h1_2;
handler t1 open as h1_3;
handler h1_1 read first limit 9;
c1
t1
handler h1_2 read first limit 9;
c1
t1
handler h1_3 read first limit 9;
c1
t1
alter table t1 engine=MyISAM;
handler h1_1 read first limit 9;
ERROR 42S02: Unknown table 'h1_1' in HANDLER
handler h1_2 read first limit 9;
ERROR 42S02: Unknown table 'h1_2' in HANDLER
handler h1_3 read first limit 9;
ERROR 42S02: Unknown table 'h1_3' in HANDLER
drop table t1;
drop table t2;
drop table t3;
drop table t4;
drop table t5;
create table t1 (c1 int);
insert into t1 values (1);
handler t1 open;
handler t1 read first;
c1
1
send the below to another connection, do not wait for the result
optimize table t1;
proceed with the normal connection
handler t1 read next;
c1
1
handler t1 close;
read the result from the other connection
Table Op Msg_type Msg_text
test.t1 optimize status OK
proceed with the normal connection
drop table t1;
CREATE TABLE t1 ( no1 smallint(5) NOT NULL default '0', no2 int(10) NOT NULL default '0', PRIMARY KEY (no1,no2));
INSERT INTO t1 VALUES (1,274),(1,275),(2,6),(2,8),(4,1),(4,2);
HANDLER t1 OPEN;
HANDLER t1 READ `primary` = (1, 1000);
no1 no2
HANDLER t1 READ `primary` PREV;
no1 no2
1 275
DROP TABLE t1;
create table t1 (c1 int);
insert into t1 values (14397);
flush tables with read lock;
drop table t1;
ERROR HY000: Can't execute the query because you have a conflicting read lock
send the below to another connection, do not wait for the result
drop table t1;
proceed with the normal connection
select * from t1;
c1
14397
unlock tables;
read the result from the other connection
proceed with the normal connection
select * from t1;
ERROR 42S02: Table 'test.t1' doesn't exist
drop table if exists t1;
Warnings:
Note 1051 Unknown table 't1'
drop table if exists t1;
create table t1 (a int) ENGINE=MEMORY;
--> client 2
handler t1 open;
ERROR HY000: Table storage engine for 't1' doesn't have this option
--> client 1
drop table t1;
drop table if exists t1;
create table t1 (a int);
handler t1 open as t1_alias;
handler t1_alias read a next;
2007-08-30 00:00:49 +02:00
ERROR 42000: Key 'a' doesn't exist in table 't1_alias'
handler t1_alias READ a next where inexistent > 0;
ERROR 42S22: Unknown column 'inexistent' in 'field list'
handler t1_alias read a next;
2007-08-30 00:00:49 +02:00
ERROR 42000: Key 'a' doesn't exist in table 't1_alias'
handler t1_alias READ a next where inexistent > 0;
ERROR 42S22: Unknown column 'inexistent' in 'field list'
handler t1_alias close;
drop table t1;
2007-10-11 00:06:53 +02:00
drop table if exists t1,t2;
create table t1 (c1 int);
create table t2 (c1 int);
insert into t1 values (1);
insert into t2 values (2);
connection: default
handler t1 open;
handler t1 read first;
c1
1
connection: flush
flush tables;;
Initial import of WL#3726 "DDL locking for all metadata objects". Backport of: ------------------------------------------------------------ revno: 2630.4.1 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Fri 2008-05-23 17:54:03 +0400 message: WL#3726 "DDL locking for all metadata objects". After review fixes in progress. ------------------------------------------------------------ This is the first patch in series. It transforms the metadata locking subsystem to use a dedicated module (mdl.h,cc). No significant changes in the locking protocol. The import passes the test suite with the exception of deprecated/removed 6.0 features, and MERGE tables. The latter are subject to a fix by WL#4144. Unfortunately, the original changeset comments got lost in a merge, thus this import has its own (largely insufficient) comments. This patch fixes Bug#25144 "replication / binlog with view breaks". Warning: this patch introduces an incompatible change: Under LOCK TABLES, it's no longer possible to FLUSH a table that was not locked for WRITE. Under LOCK TABLES, it's no longer possible to DROP a table or VIEW that was not locked for WRITE. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.2 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 14:03:45 +0400 message: WL#3726 "DDL locking for all metadata objects". After review fixes in progress. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.3 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 14:08:51 +0400 message: WL#3726 "DDL locking for all metadata objects" Fixed failing Windows builds by adding mdl.cc to the lists of files needed to build server/libmysqld on Windows. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.4 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 21:57:58 +0400 message: WL#3726 "DDL locking for all metadata objects". Fix for assert failures in kill.test which occured when one tried to kill ALTER TABLE statement on merge table while it was waiting in wait_while_table_is_used() for other connections to close this table. These assert failures stemmed from the fact that cleanup code in this case assumed that temporary table representing new version of table was open with adding to THD::temporary_tables list while code which were opening this temporary table wasn't always fulfilling this. This patch changes code that opens new version of table to always do this linking in. It also streamlines cleanup process for cases when error occurs while we have new version of table open. ****** WL#3726 "DDL locking for all metadata objects" Add libmysqld/mdl.cc to .bzrignore. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.6 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sun 2008-05-25 00:33:22 +0400 message: WL#3726 "DDL locking for all metadata objects". Addition to the fix of assert failures in kill.test caused by changes for this worklog. Make sure we close the new table only once.
2009-11-30 16:55:03 +01:00
connection: waiter
2007-10-11 00:06:53 +02:00
connection: default
handler t2 open;
handler t2 read first;
c1
2
handler t1 read next;
c1
2007-10-11 02:39:22 +02:00
1
2007-10-11 00:06:53 +02:00
handler t1 close;
handler t2 close;
drop table t1,t2;
drop table if exists t1, t0;
2007-10-11 02:39:22 +02:00
create table t1 (c1 int);
connection: default
handler t1 open;
handler t1 read first;
c1
connection: flush
rename table t1 to t0;;
Initial import of WL#3726 "DDL locking for all metadata objects". Backport of: ------------------------------------------------------------ revno: 2630.4.1 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Fri 2008-05-23 17:54:03 +0400 message: WL#3726 "DDL locking for all metadata objects". After review fixes in progress. ------------------------------------------------------------ This is the first patch in series. It transforms the metadata locking subsystem to use a dedicated module (mdl.h,cc). No significant changes in the locking protocol. The import passes the test suite with the exception of deprecated/removed 6.0 features, and MERGE tables. The latter are subject to a fix by WL#4144. Unfortunately, the original changeset comments got lost in a merge, thus this import has its own (largely insufficient) comments. This patch fixes Bug#25144 "replication / binlog with view breaks". Warning: this patch introduces an incompatible change: Under LOCK TABLES, it's no longer possible to FLUSH a table that was not locked for WRITE. Under LOCK TABLES, it's no longer possible to DROP a table or VIEW that was not locked for WRITE. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.2 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 14:03:45 +0400 message: WL#3726 "DDL locking for all metadata objects". After review fixes in progress. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.3 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 14:08:51 +0400 message: WL#3726 "DDL locking for all metadata objects" Fixed failing Windows builds by adding mdl.cc to the lists of files needed to build server/libmysqld on Windows. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.4 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 21:57:58 +0400 message: WL#3726 "DDL locking for all metadata objects". Fix for assert failures in kill.test which occured when one tried to kill ALTER TABLE statement on merge table while it was waiting in wait_while_table_is_used() for other connections to close this table. These assert failures stemmed from the fact that cleanup code in this case assumed that temporary table representing new version of table was open with adding to THD::temporary_tables list while code which were opening this temporary table wasn't always fulfilling this. This patch changes code that opens new version of table to always do this linking in. It also streamlines cleanup process for cases when error occurs while we have new version of table open. ****** WL#3726 "DDL locking for all metadata objects" Add libmysqld/mdl.cc to .bzrignore. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.6 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sun 2008-05-25 00:33:22 +0400 message: WL#3726 "DDL locking for all metadata objects". Addition to the fix of assert failures in kill.test caused by changes for this worklog. Make sure we close the new table only once.
2009-11-30 16:55:03 +01:00
connection: waiter
2007-10-11 02:39:22 +02:00
connection: default
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
#
# RENAME placed two pending locks and waits.
# When HANDLER t0 OPEN does open_tables(), it calls
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
# mysql_ha_flush(), which in turn closes the open HANDLER for t1.
# RENAME TABLE gets unblocked. If it gets scheduled quickly
# and manages to complete before open_tables()
# of HANDLER t0 OPEN, open_tables() and therefore the whole
# HANDLER t0 OPEN succeeds. Otherwise open_tables()
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
# notices a pending or active exclusive metadata lock on t2
# and the whole HANDLER t0 OPEN fails with ER_LOCK_DEADLOCK
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
# error.
#
handler t0 open;
handler t0 close;
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
connection: flush
2007-10-11 02:39:22 +02:00
handler t1 read next;
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
ERROR 42S02: Unknown table 't1' in HANDLER
2007-10-11 02:39:22 +02:00
handler t1 close;
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
ERROR 42S02: Unknown table 't1' in HANDLER
drop table t0;
drop table if exists t1;
create temporary table t1 (a int, b char(1), key a(a), key b(a,b));
insert into t1 values (0,"a"),(1,"b"),(2,"c"),(3,"d"),(4,"e"),
(5,"f"),(6,"g"),(7,"h"),(8,"i"),(9,"j");
select a,b from t1;
a b
0 a
1 b
2 c
3 d
4 e
5 f
6 g
7 h
8 i
9 j
handler t1 open as a1;
handler a1 read a first;
a b
0 a
handler a1 read a next;
a b
1 b
handler a1 read a next;
a b
2 c
select a,b from t1;
ERROR HY000: Can't reopen table: 'a1'
handler a1 read a prev;
a b
1 b
handler a1 read a prev;
a b
0 a
handler a1 read a=(6) where b="g";
a b
6 g
handler a1 close;
select a,b from t1;
a b
0 a
1 b
2 c
3 d
4 e
5 f
6 g
7 h
8 i
9 j
handler t1 open as a2;
handler a2 read a first;
a b
0 a
handler a2 read a last;
a b
9 j
handler a2 read a prev;
a b
8 i
handler a2 close;
drop table t1;
drop table if exists t1,t2;
create table t1 (a int);
handler t1 open as t1_alias;
drop table t1;
create table t1 (a int);
handler t1 open as t1_alias;
flush tables;
drop table t1;
create table t1 (a int);
handler t1 open as t1_alias;
handler t1_alias close;
drop table t1;
create table t1 (a int);
handler t1 open as t1_alias;
handler t1_alias read first;
a
drop table t1;
handler t1_alias read next;
ERROR 42S02: Unknown table 't1_alias' in HANDLER
create table t1 (a int);
create temporary table t2 (a int, key(a));
handler t1 open as a1;
handler t2 open as a2;
handler a2 read a first;
a
drop table t1, t2;
handler a2 read a next;
ERROR 42S02: Unknown table 'a2' in HANDLER
handler a1 close;
ERROR 42S02: Unknown table 'a1' in HANDLER
create table t1 (a int, key(a));
create table t2 like t1;
handler t1 open as a1;
handler t2 open as a2;
handler a1 read a first;
a
handler a2 read a first;
a
alter table t1 add b int;
handler a1 close;
ERROR 42S02: Unknown table 'a1' in HANDLER
handler a2 close;
drop table t1, t2;
create table t1 (a int, key(a));
handler t1 open as a1;
handler a1 read a first;
a
rename table t1 to t2;
handler a1 read a first;
ERROR 42S02: Unknown table 'a1' in HANDLER
drop table t2;
create table t1 (a int, key(a));
create table t2 like t1;
handler t1 open as a1;
handler t2 open as a2;
handler a1 read a first;
a
handler a2 read a first;
a
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status Table is already up to date
handler a1 close;
ERROR 42S02: Unknown table 'a1' in HANDLER
handler a2 close;
drop table t1, t2;
create table t1 (a int, b char(1), key a(a), key b(a,b));
insert into t1 values (0,"a"),(1,"b"),(2,"c"),(3,"d"),(4,"e"),
(5,"f"),(6,"g"),(7,"h"),(8,"i"),(9,"j");
handler t1 open;
handler t1 read a first;
a b
0 a
handler t1 read a next;
a b
1 b
flush tables;
handler t1 read a next;
a b
0 a
handler t1 read a next;
a b
1 b
flush tables with read lock;
handler t1 read a next;
a b
0 a
unlock tables;
drop table t1;
handler t1 read a next;
ERROR 42S02: Unknown table 't1' in HANDLER
drop table if exists t1;
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
# First test case which is supposed trigger the execution
# path on which problem was discovered.
create table t1 (a int);
insert into t1 values (1);
handler t1 open;
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
lock table t1 write;
alter table t1 engine=memory;
handler t1 read a next;
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
ERROR HY000: Table storage engine for 't1' doesn't have this option
handler t1 close;
unlock tables;
drop table t1;
# Now test case which was reported originally but which no longer
# triggers execution path which has caused the problem.
create table t1 (a int, key(a));
insert into t1 values (1);
handler t1 open;
alter table t1 engine=memory;
# Since S metadata lock was already acquired at HANDLER OPEN time
# and TL_READ lock requested by HANDLER READ is compatible with
# ALTER's TL_WRITE_ALLOW_READ the below statement should succeed
# without waiting. The old version of table should be used in it.
handler t1 read a next;
Implementation of simple deadlock detection for metadata locks. This change is supposed to reduce number of ER_LOCK_DEADLOCK errors which occur when multi-statement transaction encounters conflicting metadata lock in cases when waiting is possible. The idea is not to fail ER_LOCK_DEADLOCK error immediately when we encounter conflicting metadata lock. Instead we release all metadata locks acquired by current statement and start to wait until conflicting lock go away. To avoid deadlocks we use simple empiric which aborts waiting with ER_LOCK_DEADLOCK error if it turns out that somebody is waiting for metadata locks owned by this transaction. This patch also fixes bug #46273 "MySQL 5.4.4 new MDL: Bug#989 is not fully fixed in case of ALTER". The bug was that concurrent execution of UPDATE or MULTI-UPDATE statement as a part of multi-statement transaction that already has used table being updated and ALTER TABLE statement might have resulted of loss of isolation between this transaction and ALTER TABLE statement, which manifested itself as changes performed by ALTER TABLE becoming visible in transaction and wrong binary log order as a consequence. This problem occurred when UPDATE or MULTI-UPDATE's wait in mysql_lock_tables() call was aborted due to metadata lock upgrade performed by concurrent ALTER TABLE. After such abort all metadata locks held by transaction were released but transaction silently continued to be executed as if nothing has happened. We solve this problem by changing our code not to release all locks in such case. Instead we release only locks which were acquired by current statement and then try to reacquire them by restarting open/lock tables process. We piggyback on simple deadlock detector implementation since this change has to be done anyway for it.
2009-12-30 18:53:30 +01:00
a
1
handler t1 close;
drop table t1;
USE information_schema;
HANDLER COLUMNS OPEN;
ERROR HY000: Incorrect usage of HANDLER OPEN and information_schema
USE test;
#
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
# Add test coverage for HANDLER and LOCK TABLES, HANDLER and DDL.
#
drop table if exists t1, t2, t3;
create table t1 (a int, key a (a));
insert into t1 (a) values (1), (2), (3), (4), (5);
create table t2 (a int, key a (a)) select * from t1;
create temporary table t3 (a int, key a (a)) select * from t2;
handler t1 open;
handler t2 open;
handler t3 open;
#
# No HANDLER sql is allowed under LOCK TABLES.
# But it does not implicitly closes all handlers.
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
#
lock table t1 read;
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
handler t1 open;
ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
handler t1 read next;
ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
handler t2 close;
ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
handler t3 open;
ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
# After UNLOCK TABLES handlers should be around and
# we should be able to continue reading through them.
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
unlock tables;
handler t1 read next;
a
1
handler t1 close;
handler t2 read next;
a
1
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
handler t2 close;
handler t3 read next;
a
1
handler t3 close;
drop temporary table t3;
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
#
# Other operations that implicitly close handler:
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
#
# TRUNCATE
#
handler t1 open;
truncate table t1;
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
handler t1 open;
#
# CREATE TRIGGER
#
create trigger t1_ai after insert on t1 for each row set @a=1;
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
#
# DROP TRIGGER
#
handler t1 open;
drop trigger t1_ai;
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
#
# ALTER TABLE
#
handler t1 open;
alter table t1 add column b int;
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
#
# ANALYZE TABLE
#
handler t1 open;
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status Table is already up to date
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
#
# OPTIMIZE TABLE
#
handler t1 open;
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
#
# REPAIR TABLE
#
handler t1 open;
repair table t1;
Table Op Msg_type Msg_text
test.t1 repair status OK
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
#
# DROP TABLE, naturally.
#
handler t1 open;
drop table t1;
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
create table t1 (a int, b int, key a (a)) select a from t2;
#
# RENAME TABLE, naturally
#
handler t1 open;
rename table t1 to t3;
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
#
# CREATE TABLE (even with IF NOT EXISTS clause,
# and the table exists).
#
handler t2 open;
create table if not exists t2 (a int);
Warnings:
Note 1050 Table 't2' already exists
handler t2 read next;
ERROR 42S02: Unknown table 't2' in HANDLER
rename table t3 to t1;
drop table t2;
#
# FLUSH TABLE doesn't close the table but loses the position
#
handler t1 open;
handler t1 read a prev;
b a
NULL 5
flush table t1;
handler t1 read a prev;
b a
NULL 5
handler t1 close;
#
# FLUSH TABLES WITH READ LOCK behaves like FLUSH TABLE.
#
handler t1 open;
handler t1 read a prev;
b a
NULL 5
flush tables with read lock;
handler t1 read a prev;
b a
NULL 5
handler t1 close;
unlock tables;
#
# Let us also check that these operations behave in similar
# way under LOCK TABLES.
#
# TRUNCATE under LOCK TABLES.
#
handler t1 open;
lock tables t1 write;
truncate table t1;
unlock tables;
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
handler t1 open;
#
# CREATE TRIGGER under LOCK TABLES.
#
lock tables t1 write;
create trigger t1_ai after insert on t1 for each row set @a=1;
unlock tables;
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
#
# DROP TRIGGER under LOCK TABLES.
#
handler t1 open;
lock tables t1 write;
drop trigger t1_ai;
unlock tables;
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
#
# ALTER TABLE under LOCK TABLES.
#
handler t1 open;
lock tables t1 write;
alter table t1 drop column b;
unlock tables;
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
#
# ANALYZE TABLE under LOCK TABLES.
#
handler t1 open;
lock tables t1 write;
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status Table is already up to date
unlock tables;
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
#
# OPTIMIZE TABLE under LOCK TABLES.
#
handler t1 open;
lock tables t1 write;
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
unlock tables;
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
#
# REPAIR TABLE under LOCK TABLES.
#
handler t1 open;
lock tables t1 write;
repair table t1;
Table Op Msg_type Msg_text
test.t1 repair status OK
unlock tables;
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
#
# DROP TABLE under LOCK TABLES, naturally.
#
handler t1 open;
lock tables t1 write;
drop table t1;
unlock tables;
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
create table t1 (a int, b int, key a (a));
insert into t1 (a) values (1), (2), (3), (4), (5);
#
# FLUSH TABLE doesn't close the table but loses the position
#
handler t1 open;
handler t1 read a prev;
a b
5 NULL
lock tables t1 write;
flush table t1;
unlock tables;
handler t1 read a prev;
a b
5 NULL
handler t1 close;
#
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
# Explore the effect of HANDLER locks on concurrent DDL
#
handler t1 open;
# Establishing auxiliary connections con1, con2, con3
# --> connection con1;
# Sending:
drop table t1 ;
# We can't use connection 'default' as wait_condition will
# autoclose handlers.
# --> connection con2
# Waitng for 'drop table t1' to get blocked...
# --> connection default
handler t1 read a prev;
a b
5 NULL
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
handler t1 read a prev;
a b
4 NULL
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
handler t1 close;
# --> connection con1
# Reaping 'drop table t1'...
# --> connection default
#
# Explore the effect of HANDLER locks in parallel with SELECT
#
create table t1 (a int, key a (a));
insert into t1 (a) values (1), (2), (3), (4), (5);
begin;
select * from t1;
a
1
2
3
4
5
handler t1 open;
handler t1 read a prev;
a
5
handler t1 read a prev;
a
4
handler t1 close;
# --> connection con1;
# Sending:
drop table t1 ;
# --> connection con2
# Waiting for 'drop table t1' to get blocked...
# --> connection default
# We can still use the table, it's part of the transaction
select * from t1;
a
1
2
3
4
5
# Such are the circumstances that t1 is a part of transaction,
# thus we can reopen it in the handler
handler t1 open;
# We can commit the transaction, it doesn't close the handler
# and doesn't let DROP to proceed.
commit;
handler t1 read a prev;
a
5
handler t1 read a prev;
a
4
handler t1 read a prev;
a
3
handler t1 close;
# --> connection con1
# Now drop can proceed
# Reaping 'drop table t1'...
# --> connection default
#
# Demonstrate that HANDLER locks and transaction locks
Fix for bug#14188793 - "DEADLOCK CAUSED BY ALTER TABLE DOEN'T CLEAR STATUS OF ROLLBACKED TRANSACTION" and bug #17054007 - "TRANSACTION IS NOT FULLY ROLLED BACK IN CASE OF INNODB DEADLOCK". The problem in the first bug report was that although deadlock involving metadata locks was reported using the same error code and message as InnoDB deadlock it didn't rollback transaction like the latter. This caused confusion to users as in some cases after ER_LOCK_DEADLOCK transaction could have been restarted immediately and in some cases rollback was required. The problem in the second bug report was that although InnoDB deadlock caused transaction rollback in all storage engines it didn't cause release of metadata locks. So concurrent DDL on the tables used in transaction was blocked until implicit or explicit COMMIT or ROLLBACK was issued in the connection which got InnoDB deadlock. The former issue has stemmed from the fact that when support for detection and reporting metadata locks deadlocks was added we erroneously assumed that InnoDB doesn't rollback transaction on deadlock but only last statement (while this is what happens on InnoDB lock timeout actually) and so didn't implement rollback of transactions on MDL deadlocks. The latter issue was caused by the fact that rollback of transaction due to deadlock is carried out by setting THD::transaction_rollback_request flag at the point where deadlock is detected and performing rollback inside of trans_rollback_stmt() call when this flag is set. And trans_rollback_stmt() is not aware of MDL locks, so no MDL locks are released. This patch solves these two problems in the following way: - In case when MDL deadlock is detect transaction rollback is requested by setting THD::transaction_rollback_request flag. - Code performing rollback of transaction if THD::transaction_rollback_request is moved out from trans_rollback_stmt(). Now we handle rollback request on the same level as we call trans_rollback_stmt() and release statement/ transaction MDL locks.
2013-08-20 11:12:34 +02:00
# reside in the same context.
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
#
create table t1 (a int, key a (a));
insert into t1 (a) values (1), (2), (3), (4), (5);
create table t0 (a int, key a (a));
insert into t0 (a) values (1), (2), (3), (4), (5);
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
begin;
select * from t1;
a
1
2
3
4
5
# --> connection con2
# Sending:
rename table t0 to t3, t1 to t0, t3 to t1;
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
# --> connection con1
Implementation of simple deadlock detection for metadata locks. This change is supposed to reduce number of ER_LOCK_DEADLOCK errors which occur when multi-statement transaction encounters conflicting metadata lock in cases when waiting is possible. The idea is not to fail ER_LOCK_DEADLOCK error immediately when we encounter conflicting metadata lock. Instead we release all metadata locks acquired by current statement and start to wait until conflicting lock go away. To avoid deadlocks we use simple empiric which aborts waiting with ER_LOCK_DEADLOCK error if it turns out that somebody is waiting for metadata locks owned by this transaction. This patch also fixes bug #46273 "MySQL 5.4.4 new MDL: Bug#989 is not fully fixed in case of ALTER". The bug was that concurrent execution of UPDATE or MULTI-UPDATE statement as a part of multi-statement transaction that already has used table being updated and ALTER TABLE statement might have resulted of loss of isolation between this transaction and ALTER TABLE statement, which manifested itself as changes performed by ALTER TABLE becoming visible in transaction and wrong binary log order as a consequence. This problem occurred when UPDATE or MULTI-UPDATE's wait in mysql_lock_tables() call was aborted due to metadata lock upgrade performed by concurrent ALTER TABLE. After such abort all metadata locks held by transaction were released but transaction silently continued to be executed as if nothing has happened. We solve this problem by changing our code not to release all locks in such case. Instead we release only locks which were acquired by current statement and then try to reacquire them by restarting open/lock tables process. We piggyback on simple deadlock detector implementation since this change has to be done anyway for it.
2009-12-30 18:53:30 +01:00
# Waiting for 'rename table ...' to get blocked...
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
# --> connection default
Fix for bug#14188793 - "DEADLOCK CAUSED BY ALTER TABLE DOEN'T CLEAR STATUS OF ROLLBACKED TRANSACTION" and bug #17054007 - "TRANSACTION IS NOT FULLY ROLLED BACK IN CASE OF INNODB DEADLOCK". The problem in the first bug report was that although deadlock involving metadata locks was reported using the same error code and message as InnoDB deadlock it didn't rollback transaction like the latter. This caused confusion to users as in some cases after ER_LOCK_DEADLOCK transaction could have been restarted immediately and in some cases rollback was required. The problem in the second bug report was that although InnoDB deadlock caused transaction rollback in all storage engines it didn't cause release of metadata locks. So concurrent DDL on the tables used in transaction was blocked until implicit or explicit COMMIT or ROLLBACK was issued in the connection which got InnoDB deadlock. The former issue has stemmed from the fact that when support for detection and reporting metadata locks deadlocks was added we erroneously assumed that InnoDB doesn't rollback transaction on deadlock but only last statement (while this is what happens on InnoDB lock timeout actually) and so didn't implement rollback of transactions on MDL deadlocks. The latter issue was caused by the fact that rollback of transaction due to deadlock is carried out by setting THD::transaction_rollback_request flag at the point where deadlock is detected and performing rollback inside of trans_rollback_stmt() call when this flag is set. And trans_rollback_stmt() is not aware of MDL locks, so no MDL locks are released. This patch solves these two problems in the following way: - In case when MDL deadlock is detect transaction rollback is requested by setting THD::transaction_rollback_request flag. - Code performing rollback of transaction if THD::transaction_rollback_request is moved out from trans_rollback_stmt(). Now we handle rollback request on the same level as we call trans_rollback_stmt() and release statement/ transaction MDL locks.
2013-08-20 11:12:34 +02:00
# We back-off on hitting deadlock condition.
handler t0 open;
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
select * from t0;
Fix for bug#14188793 - "DEADLOCK CAUSED BY ALTER TABLE DOEN'T CLEAR STATUS OF ROLLBACKED TRANSACTION" and bug #17054007 - "TRANSACTION IS NOT FULLY ROLLED BACK IN CASE OF INNODB DEADLOCK". The problem in the first bug report was that although deadlock involving metadata locks was reported using the same error code and message as InnoDB deadlock it didn't rollback transaction like the latter. This caused confusion to users as in some cases after ER_LOCK_DEADLOCK transaction could have been restarted immediately and in some cases rollback was required. The problem in the second bug report was that although InnoDB deadlock caused transaction rollback in all storage engines it didn't cause release of metadata locks. So concurrent DDL on the tables used in transaction was blocked until implicit or explicit COMMIT or ROLLBACK was issued in the connection which got InnoDB deadlock. The former issue has stemmed from the fact that when support for detection and reporting metadata locks deadlocks was added we erroneously assumed that InnoDB doesn't rollback transaction on deadlock but only last statement (while this is what happens on InnoDB lock timeout actually) and so didn't implement rollback of transactions on MDL deadlocks. The latter issue was caused by the fact that rollback of transaction due to deadlock is carried out by setting THD::transaction_rollback_request flag at the point where deadlock is detected and performing rollback inside of trans_rollback_stmt() call when this flag is set. And trans_rollback_stmt() is not aware of MDL locks, so no MDL locks are released. This patch solves these two problems in the following way: - In case when MDL deadlock is detect transaction rollback is requested by setting THD::transaction_rollback_request flag. - Code performing rollback of transaction if THD::transaction_rollback_request is moved out from trans_rollback_stmt(). Now we handle rollback request on the same level as we call trans_rollback_stmt() and release statement/ transaction MDL locks.
2013-08-20 11:12:34 +02:00
a
1
2
3
4
5
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
handler t1 open;
commit;
handler t1 close;
# --> connection con2
Implementation of simple deadlock detection for metadata locks. This change is supposed to reduce number of ER_LOCK_DEADLOCK errors which occur when multi-statement transaction encounters conflicting metadata lock in cases when waiting is possible. The idea is not to fail ER_LOCK_DEADLOCK error immediately when we encounter conflicting metadata lock. Instead we release all metadata locks acquired by current statement and start to wait until conflicting lock go away. To avoid deadlocks we use simple empiric which aborts waiting with ER_LOCK_DEADLOCK error if it turns out that somebody is waiting for metadata locks owned by this transaction. This patch also fixes bug #46273 "MySQL 5.4.4 new MDL: Bug#989 is not fully fixed in case of ALTER". The bug was that concurrent execution of UPDATE or MULTI-UPDATE statement as a part of multi-statement transaction that already has used table being updated and ALTER TABLE statement might have resulted of loss of isolation between this transaction and ALTER TABLE statement, which manifested itself as changes performed by ALTER TABLE becoming visible in transaction and wrong binary log order as a consequence. This problem occurred when UPDATE or MULTI-UPDATE's wait in mysql_lock_tables() call was aborted due to metadata lock upgrade performed by concurrent ALTER TABLE. After such abort all metadata locks held by transaction were released but transaction silently continued to be executed as if nothing has happened. We solve this problem by changing our code not to release all locks in such case. Instead we release only locks which were acquired by current statement and then try to reacquire them by restarting open/lock tables process. We piggyback on simple deadlock detector implementation since this change has to be done anyway for it.
2009-12-30 18:53:30 +01:00
# Reaping 'rename table ...'...
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
# --> connection default
handler t1 open;
handler t1 read a prev;
a
5
handler t1 close;
drop table t0;
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
#
Implementation of simple deadlock detection for metadata locks. This change is supposed to reduce number of ER_LOCK_DEADLOCK errors which occur when multi-statement transaction encounters conflicting metadata lock in cases when waiting is possible. The idea is not to fail ER_LOCK_DEADLOCK error immediately when we encounter conflicting metadata lock. Instead we release all metadata locks acquired by current statement and start to wait until conflicting lock go away. To avoid deadlocks we use simple empiric which aborts waiting with ER_LOCK_DEADLOCK error if it turns out that somebody is waiting for metadata locks owned by this transaction. This patch also fixes bug #46273 "MySQL 5.4.4 new MDL: Bug#989 is not fully fixed in case of ALTER". The bug was that concurrent execution of UPDATE or MULTI-UPDATE statement as a part of multi-statement transaction that already has used table being updated and ALTER TABLE statement might have resulted of loss of isolation between this transaction and ALTER TABLE statement, which manifested itself as changes performed by ALTER TABLE becoming visible in transaction and wrong binary log order as a consequence. This problem occurred when UPDATE or MULTI-UPDATE's wait in mysql_lock_tables() call was aborted due to metadata lock upgrade performed by concurrent ALTER TABLE. After such abort all metadata locks held by transaction were released but transaction silently continued to be executed as if nothing has happened. We solve this problem by changing our code not to release all locks in such case. Instead we release only locks which were acquired by current statement and then try to reacquire them by restarting open/lock tables process. We piggyback on simple deadlock detector implementation since this change has to be done anyway for it.
2009-12-30 18:53:30 +01:00
# Originally there was a deadlock error in this test.
# With implementation of deadlock detector
# we no longer deadlock, but block and wait on a lock.
# The HANDLER is auto-closed as soon as the connection
# sees a pending conflicting lock against it.
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
#
create table t2 (a int, key a (a));
handler t1 open;
# --> connection con1
lock tables t2 read;
# --> connection con2
# Sending 'drop table t2'...
drop table t2;
# --> connection con1
# Waiting for 'drop table t2' to get blocked...
# --> connection default
Implementation of simple deadlock detection for metadata locks. This change is supposed to reduce number of ER_LOCK_DEADLOCK errors which occur when multi-statement transaction encounters conflicting metadata lock in cases when waiting is possible. The idea is not to fail ER_LOCK_DEADLOCK error immediately when we encounter conflicting metadata lock. Instead we release all metadata locks acquired by current statement and start to wait until conflicting lock go away. To avoid deadlocks we use simple empiric which aborts waiting with ER_LOCK_DEADLOCK error if it turns out that somebody is waiting for metadata locks owned by this transaction. This patch also fixes bug #46273 "MySQL 5.4.4 new MDL: Bug#989 is not fully fixed in case of ALTER". The bug was that concurrent execution of UPDATE or MULTI-UPDATE statement as a part of multi-statement transaction that already has used table being updated and ALTER TABLE statement might have resulted of loss of isolation between this transaction and ALTER TABLE statement, which manifested itself as changes performed by ALTER TABLE becoming visible in transaction and wrong binary log order as a consequence. This problem occurred when UPDATE or MULTI-UPDATE's wait in mysql_lock_tables() call was aborted due to metadata lock upgrade performed by concurrent ALTER TABLE. After such abort all metadata locks held by transaction were released but transaction silently continued to be executed as if nothing has happened. We solve this problem by changing our code not to release all locks in such case. Instead we release only locks which were acquired by current statement and then try to reacquire them by restarting open/lock tables process. We piggyback on simple deadlock detector implementation since this change has to be done anyway for it.
2009-12-30 18:53:30 +01:00
# Sending 'select * from t2'
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
select * from t2;
# --> connection con1
Implementation of simple deadlock detection for metadata locks. This change is supposed to reduce number of ER_LOCK_DEADLOCK errors which occur when multi-statement transaction encounters conflicting metadata lock in cases when waiting is possible. The idea is not to fail ER_LOCK_DEADLOCK error immediately when we encounter conflicting metadata lock. Instead we release all metadata locks acquired by current statement and start to wait until conflicting lock go away. To avoid deadlocks we use simple empiric which aborts waiting with ER_LOCK_DEADLOCK error if it turns out that somebody is waiting for metadata locks owned by this transaction. This patch also fixes bug #46273 "MySQL 5.4.4 new MDL: Bug#989 is not fully fixed in case of ALTER". The bug was that concurrent execution of UPDATE or MULTI-UPDATE statement as a part of multi-statement transaction that already has used table being updated and ALTER TABLE statement might have resulted of loss of isolation between this transaction and ALTER TABLE statement, which manifested itself as changes performed by ALTER TABLE becoming visible in transaction and wrong binary log order as a consequence. This problem occurred when UPDATE or MULTI-UPDATE's wait in mysql_lock_tables() call was aborted due to metadata lock upgrade performed by concurrent ALTER TABLE. After such abort all metadata locks held by transaction were released but transaction silently continued to be executed as if nothing has happened. We solve this problem by changing our code not to release all locks in such case. Instead we release only locks which were acquired by current statement and then try to reacquire them by restarting open/lock tables process. We piggyback on simple deadlock detector implementation since this change has to be done anyway for it.
2009-12-30 18:53:30 +01:00
# Waiting for 'select * from t2' to get blocked...
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
unlock tables;
# --> connection con2
# Reaping 'drop table t2'...
# --> connection default
Implementation of simple deadlock detection for metadata locks. This change is supposed to reduce number of ER_LOCK_DEADLOCK errors which occur when multi-statement transaction encounters conflicting metadata lock in cases when waiting is possible. The idea is not to fail ER_LOCK_DEADLOCK error immediately when we encounter conflicting metadata lock. Instead we release all metadata locks acquired by current statement and start to wait until conflicting lock go away. To avoid deadlocks we use simple empiric which aborts waiting with ER_LOCK_DEADLOCK error if it turns out that somebody is waiting for metadata locks owned by this transaction. This patch also fixes bug #46273 "MySQL 5.4.4 new MDL: Bug#989 is not fully fixed in case of ALTER". The bug was that concurrent execution of UPDATE or MULTI-UPDATE statement as a part of multi-statement transaction that already has used table being updated and ALTER TABLE statement might have resulted of loss of isolation between this transaction and ALTER TABLE statement, which manifested itself as changes performed by ALTER TABLE becoming visible in transaction and wrong binary log order as a consequence. This problem occurred when UPDATE or MULTI-UPDATE's wait in mysql_lock_tables() call was aborted due to metadata lock upgrade performed by concurrent ALTER TABLE. After such abort all metadata locks held by transaction were released but transaction silently continued to be executed as if nothing has happened. We solve this problem by changing our code not to release all locks in such case. Instead we release only locks which were acquired by current statement and then try to reacquire them by restarting open/lock tables process. We piggyback on simple deadlock detector implementation since this change has to be done anyway for it.
2009-12-30 18:53:30 +01:00
# Reaping 'select * from t2'
ERROR 42S02: Table 'test.t2' doesn't exist
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
handler t1 close;
#
# ROLLBACK TO SAVEPOINT releases transactional locks,
# but has no effect on open HANDLERs
#
create table t2 like t1;
create table t3 like t1;
begin;
# Have something before the savepoint
select * from t3;
a
savepoint sv;
handler t1 open;
handler t1 read a first;
a
1
handler t1 read a next;
a
2
select * from t2;
a
# --> connection con1
# Sending:
drop table t1;
# --> connection con2
# Sending:
drop table t2;
# --> connection default
# Let DROP TABLE statements sync in. We must use
# a separate connection for that, because otherwise SELECT
# will auto-close the HANDLERs, becaues there are pending
# exclusive locks against them.
# --> connection con3
# Waiting for 'drop table t1' to get blocked...
# Waiting for 'drop table t2' to get blocked...
# Demonstrate that t2 lock was released and t2 was dropped
# after ROLLBACK TO SAVEPOINT
# --> connection default
rollback to savepoint sv;
# --> connection con2
# Reaping 'drop table t2'...
# Demonstrate that ROLLBACK TO SAVEPOINT didn't release the handler
# lock.
# --> connection default
handler t1 read a next;
a
3
handler t1 read a next;
a
4
# Demonstrate that the drop will go through as soon as we close the
# HANDLER
handler t1 close;
# connection con1
# Reaping 'drop table t1'...
# --> connection default
commit;
drop table t3;
#
# A few special cases when using SAVEPOINT/ROLLBACK TO
# SAVEPOINT and HANDLER.
#
# Show that rollback to the savepoint taken in the beginning
# of the transaction doesn't release mdl lock on
# the HANDLER that was opened later.
#
create table t1 (a int, key a(a));
insert into t1 (a) values (1), (2), (3), (4), (5);
create table t2 like t1;
begin;
savepoint sv;
handler t1 open;
handler t1 read a first;
a
1
handler t1 read a next;
a
2
select * from t2;
a
# --> connection con1
# Sending:
drop table t1;
# --> connection con2
# Sending:
drop table t2;
# --> connection default
# Let DROP TABLE statements sync in. We must use
# a separate connection for that, because otherwise SELECT
# will auto-close the HANDLERs, becaues there are pending
# exclusive locks against them.
# --> connection con3
# Waiting for 'drop table t1' to get blocked...
# Waiting for 'drop table t2' to get blocked...
# Demonstrate that t2 lock was released and t2 was dropped
# after ROLLBACK TO SAVEPOINT
# --> connection default
rollback to savepoint sv;
# --> connection con2
# Reaping 'drop table t2'...
# Demonstrate that ROLLBACK TO SAVEPOINT didn't release the handler
# lock.
# --> connection default
handler t1 read a next;
a
3
handler t1 read a next;
a
4
# Demonstrate that the drop will go through as soon as we close the
# HANDLER
handler t1 close;
# connection con1
# Reaping 'drop table t1'...
# --> connection default
commit;
#
# Show that rollback to the savepoint taken in the beginning
# of the transaction works properly (no valgrind warnins, etc),
# even though it's done after the HANDLER mdl lock that was there
# at the beginning is released and added again.
#
create table t1 (a int, key a(a));
insert into t1 (a) values (1), (2), (3), (4), (5);
create table t2 like t1;
create table t3 like t1;
insert into t3 (a) select a from t1;
begin;
handler t1 open;
savepoint sv;
handler t1 read a first;
a
1
select * from t2;
a
handler t1 close;
handler t3 open;
handler t3 read a first;
a
1
rollback to savepoint sv;
# --> connection con1
drop table t1, t2;
# Sending:
drop table t3;
# Let DROP TABLE statement sync in.
# --> connection con2
# Waiting for 'drop table t3' to get blocked...
# Demonstrate that ROLLBACK TO SAVEPOINT didn't release the handler
# lock.
# --> connection default
handler t3 read a next;
a
2
# Demonstrate that the drop will go through as soon as we close the
# HANDLER
handler t3 close;
# connection con1
# Reaping 'drop table t3'...
# --> connection default
commit;
#
# If we have to wait on an exclusive locks while having
# an open HANDLER, ER_LOCK_DEADLOCK is reported.
#
create table t1 (a int, key a(a));
create table t2 like t1;
handler t1 open;
# --> connection con1
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
lock table t1 write, t2 write;
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
# --> connection default
drop table t2;
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
# --> connection con2
# Waiting for 'drop table t2' to get blocked...
# --> connection con1
drop table t1;
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
unlock tables;
# --> connection default
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
# Demonstrate that there is no deadlock with FLUSH TABLE,
# even though it is waiting for the other table to go away
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
create table t2 like t1;
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
# Sending:
flush table t2;
# --> connection con2
drop table t1;
# --> connection con1
unlock tables;
# --> connection default
# Reaping 'flush table t2'...
drop table t2;
#
# Bug #46224 HANDLER statements within a transaction might
# lead to deadlocks
#
create table t1 (a int, key a(a));
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
insert into t1 values (1), (2);
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
# --> connection default
begin;
select * from t1;
a
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
1
2
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
handler t1 open;
# --> connection con1
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
# Sending:
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
lock tables t1 write;
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
# --> connection con2
# Check that 'lock tables t1 write' waits until transaction which
# has read from the table commits.
# --> connection default
# The below 'handler t1 read ...' should not be blocked as
# 'lock tables t1 write' has not succeeded yet.
handler t1 read a next;
a
1
# Unblock 'lock tables t1 write'.
commit;
# --> connection con1
# Reap 'lock tables t1 write'.
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
# --> connection default
# Sending:
handler t1 read a next;
# --> connection con1
# Waiting for 'handler t1 read a next' to get blocked...
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
# The below 'drop table t1' should be able to proceed without
# waiting as it will force HANDLER to be closed.
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
drop table t1;
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
unlock tables;
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
# --> connection default
# Reaping 'handler t1 read a next'...
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
ERROR 42S02: Table 'test.t1' doesn't exist
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
handler t1 close;
# --> connection con1
# --> connection con2
# --> connection con3
#
# A temporary table test.
# Check that we don't loose positions of HANDLER opened
# against a temporary table.
#
create table t1 (a int, b int, key a (a));
insert into t1 (a) values (1), (2), (3), (4), (5);
create temporary table t2 (a int, b int, key a (a));
insert into t2 (a) select a from t1;
handler t1 open;
handler t1 read a next;
a b
1 NULL
handler t2 open;
handler t2 read a next;
a b
1 NULL
flush table t1;
handler t2 read a next;
a b
2 NULL
# Sic: the position is lost
handler t1 read a next;
a b
1 NULL
select * from t1;
a b
1 NULL
2 NULL
3 NULL
4 NULL
5 NULL
# Sic: the position is not lost
handler t2 read a next;
a b
3 NULL
select * from t2;
ERROR HY000: Can't reopen table: 't2'
handler t2 read a next;
a b
4 NULL
drop table t1;
drop temporary table t2;
#
# A test for lock_table_names()/unlock_table_names() function.
# It should work properly in presence of open HANDLER.
#
create table t1 (a int, b int, key a (a));
create table t2 like t1;
create table t3 like t1;
create table t4 like t1;
handler t1 open;
handler t2 open;
rename table t4 to t5, t3 to t4, t5 to t3;
handler t1 read first;
a b
handler t2 read first;
a b
drop table t1, t2, t3, t4;
#
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
# A test for FLUSH TABLES WITH READ LOCK and HANDLER statements.
#
set autocommit=0;
create table t1 (a int, b int, key a (a));
insert into t1 (a, b) values (1, 1), (2, 1), (3, 2), (4, 2), (5, 5);
create table t2 like t1;
insert into t2 (a, b) select a, b from t1;
create table t3 like t1;
insert into t3 (a, b) select a, b from t1;
commit;
flush tables with read lock;
handler t1 open;
lock table t1 read;
handler t1 read next;
ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
# This implicitly leaves LOCK TABLES but doesn't drop the GLR
lock table not_exists_write read;
ERROR 42S02: Table 'test.not_exists_write' doesn't exist
# We still have the read lock.
drop table t1;
ERROR HY000: Can't execute the query because you have a conflicting read lock
handler t1 open;
select a from t2;
a
1
2
3
4
5
handler t1 read next;
a b
1 1
flush tables with read lock;
handler t2 open;
flush tables with read lock;
handler t1 read next;
a b
1 1
select a from t3;
a
1
2
3
4
5
handler t2 read next;
a b
1 1
handler t1 close;
rollback;
handler t2 close;
drop table t1;
ERROR HY000: Can't execute the query because you have a conflicting read lock
commit;
flush tables;
drop table t1;
ERROR HY000: Can't execute the query because you have a conflicting read lock
unlock tables;
drop table t1;
set autocommit=default;
drop table t2, t3;
#
# HANDLER statement and operation-type aware metadata locks.
# Check that when we clone a ticket for HANDLER we downrade
# the lock.
#
# Establish an auxiliary connection con1.
# -> connection default
create table t1 (a int, b int, key a (a));
insert into t1 (a, b) values (1, 1), (2, 1), (3, 2), (4, 2), (5, 5);
begin;
insert into t1 (a, b) values (6, 6);
handler t1 open;
handler t1 read a last;
a b
6 6
insert into t1 (a, b) values (7, 7);
handler t1 read a last;
a b
7 7
commit;
# -> connection con1
# Demonstrate that the HANDLER doesn't hold MDL_SHARED_WRITE.
lock table t1 write;
unlock tables;
# -> connection default
handler t1 read a prev;
a b
6 6
handler t1 close;
# Cleanup.
drop table t1;
# -> connection con1
# -> connection default
#
# A test for Bug#50555 "handler commands crash server in
# my_hash_first()".
#
handler no_such_table read no_such_index first;
ERROR 42S02: Unknown table 'no_such_table' in HANDLER
handler no_such_table close;
ERROR 42S02: Unknown table 'no_such_table' in HANDLER
#
# Bug#50907 Assertion `hash_tables->table->next == __null' on
# HANDLER OPEN
#
DROP TABLE IF EXISTS t1, t2;
CREATE TEMPORARY TABLE t1 (i INT);
CREATE TEMPORARY TABLE t2 (i INT);
HANDLER t2 OPEN;
HANDLER t2 READ FIRST;
i
HANDLER t2 CLOSE;
DROP TABLE t1, t2;
#
# Bug#50912 Assertion `ticket->m_type >= mdl_request->type'
# failed on HANDLER + I_S
#
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (id INT);
HANDLER t1 OPEN;
SELECT table_name, table_comment FROM information_schema.tables
WHERE table_schema= 'test' AND table_name= 't1';
table_name table_comment
t1
HANDLER t1 CLOSE;
DROP TABLE t1;
#
# Test for bug #50908 "Assertion `handler_tables_hash.records == 0'
# failed in enter_locked_tables_mode".
#
drop tables if exists t1, t2;
drop function if exists f1;
create table t1 (i int);
insert into t1 values (1), (2);
create table t2 (j int);
insert into t2 values (1);
create function f1() returns int return (select count(*) from t2);
# Check that open HANDLER survives statement executed in
# prelocked mode.
handler t1 open;
handler t1 read next;
i
1
# The below statement were aborted due to an assertion failure.
select f1() from t2;
f1()
1
handler t1 read next;
i
2
handler t1 close;
# Check that the same happens under GLOBAL READ LOCK.
flush tables with read lock;
handler t1 open;
handler t1 read next;
i
1
select f1() from t2;
f1()
1
handler t1 read next;
i
2
unlock tables;
handler t1 close;
# Now, check that the same happens if LOCK TABLES is executed.
handler t1 open;
handler t1 read next;
i
1
lock table t2 read;
select * from t2;
j
1
unlock tables;
handler t1 read next;
i
2
handler t1 close;
# Finally, check scenario with GRL and LOCK TABLES.
flush tables with read lock;
handler t1 open;
handler t1 read next;
i
1
lock table t2 read;
select * from t2;
j
1
# This unlocks both tables and GRL.
unlock tables;
handler t1 read next;
i
2
handler t1 close;
# Clean-up.
drop function f1;
drop tables t1, t2;
#
# Test for bug #51136 "Crash in pthread_rwlock_rdlock on TEMPORARY +
# HANDLER + LOCK + SP".
# Also see additional coverage for this bug in flush.test.
#
drop tables if exists t1, t2;
create table t1 (i int);
create temporary table t2 (j int);
handler t1 open;
lock table t2 read;
# This commit should not release any MDL locks.
commit;
unlock tables;
# The below statement crashed before the bug fix as it
# has attempted to release metadata lock which was
# already released by commit.
handler t1 close;
drop tables t1, t2;
#
# Bug#51355 handler stmt cause assertion in
# bool MDL_context::try_acquire_lock(MDL_request*)
#
DROP TABLE IF EXISTS t1;
# Connection default
CREATE TABLE t1(id INT, KEY id(id));
HANDLER t1 OPEN;
# Connection con51355
# Sending:
DROP TABLE t1;
# Connection default
# This I_S query will cause the handler table to be closed and
# the metadata lock to be released. This will allow DROP TABLE
# to proceed. Waiting for the table to be removed.
# Connection con51355
# Reaping: DROP TABLE t1
# Connection default
HANDLER t1 READ id NEXT;
ERROR 42S02: Table 'test.t1' doesn't exist
HANDLER t1 READ id NEXT;
ERROR 42S02: Table 'test.t1' doesn't exist
HANDLER t1 CLOSE;
# Connection con51355
# Connection default
#
# Bug#54401 assert in Diagnostics_area::set_eof_status , HANDLER
#
DROP TABLE IF EXISTS t1, t2;
DROP FUNCTION IF EXISTS f1;
CREATE FUNCTION f1() RETURNS INTEGER
BEGIN
SELECT 1 FROM t2 INTO @a;
RETURN 1;
END|
SELECT f1();
ERROR 42S02: Table 'test.t2' doesn't exist
CREATE TABLE t1(a INT);
INSERT INTO t1 VALUES (1);
HANDLER t1 OPEN;
HANDLER t1 READ FIRST WHERE f1() = 1;
ERROR 42000: This version of MySQL doesn't yet support 'stored functions in HANDLER ... READ'
HANDLER t1 CLOSE;
DROP FUNCTION f1;
DROP TABLE t1;
#
# Bug#54920 Stored functions are allowed in HANDLER statements,
# but broken.
#
DROP TABLE IF EXISTS t1;
DROP FUNCTION IF EXISTS f1;
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1), (2);
CREATE FUNCTION f1() RETURNS INT RETURN 1;
HANDLER t1 OPEN;
HANDLER t1 READ FIRST WHERE f1() = 1;
ERROR 42000: This version of MySQL doesn't yet support 'stored functions in HANDLER ... READ'
HANDLER t1 CLOSE;
DROP FUNCTION f1;
DROP TABLE t1;
#
# BUG #46456: HANDLER OPEN + TRUNCATE + DROP (temporary) TABLE, crash
#
CREATE TABLE t1 AS SELECT 1 AS f1;
HANDLER t1 OPEN;
TRUNCATE t1;
HANDLER t1 READ FIRST;
ERROR 42S02: Unknown table 't1' in HANDLER
DROP TABLE t1;
CREATE TEMPORARY TABLE t1 AS SELECT 1 AS f1;
HANDLER t1 OPEN;
TRUNCATE t1;
HANDLER t1 READ FIRST;
ERROR 42S02: Unknown table 't1' in HANDLER
DROP TABLE t1;
#
# BUG#51877 - HANDLER interface causes invalid memory read
#
CREATE TABLE t1(a INT, KEY(a));
HANDLER t1 OPEN;
HANDLER t1 READ a FIRST;
a
INSERT INTO t1 VALUES(1);
HANDLER t1 READ a NEXT;
a
1
HANDLER t1 CLOSE;
DROP TABLE t1;
#
# Bug #54007: assert in ha_myisam::index_next , HANDLER
#
CREATE TABLE t1(a INT, b INT, PRIMARY KEY(a), KEY b(b), KEY ab(a, b));
HANDLER t1 OPEN;
HANDLER t1 READ FIRST;
a b
HANDLER t1 READ `PRIMARY` NEXT;
a b
HANDLER t1 READ ab NEXT;
a b
HANDLER t1 READ b NEXT;
a b
HANDLER t1 READ NEXT;
a b
HANDLER t1 CLOSE;
INSERT INTO t1 VALUES (2, 20), (1, 10), (4, 40), (3, 30);
HANDLER t1 OPEN;
HANDLER t1 READ FIRST;
a b
2 20
HANDLER t1 READ NEXT;
a b
1 10
HANDLER t1 READ `PRIMARY` NEXT;
a b
1 10
HANDLER t1 READ `PRIMARY` NEXT;
a b
2 20
HANDLER t1 READ ab NEXT;
a b
1 10
HANDLER t1 READ ab NEXT;
a b
2 20
HANDLER t1 READ b NEXT;
a b
1 10
HANDLER t1 READ b NEXT;
a b
2 20
HANDLER t1 READ b NEXT;
a b
3 30
HANDLER t1 READ b NEXT;
a b
4 40
HANDLER t1 READ b NEXT;
a b
HANDLER t1 READ NEXT;
a b
4 40
HANDLER t1 READ NEXT;
a b
3 30
HANDLER t1 READ NEXT;
a b
HANDLER t1 CLOSE;
HANDLER t1 OPEN;
HANDLER t1 READ FIRST;
a b
2 20
HANDLER t1 READ `PRIMARY` PREV;
a b
4 40
HANDLER t1 READ `PRIMARY` PREV;
a b
3 30
HANDLER t1 READ b PREV;
a b
4 40
HANDLER t1 READ b PREV;
a b
3 30
HANDLER t1 CLOSE;
HANDLER t1 OPEN;
HANDLER t1 READ FIRST;
a b
2 20
HANDLER t1 READ `PRIMARY` PREV LIMIT 3;
a b
4 40
3 30
2 20
HANDLER t1 READ b NEXT LIMIT 5;
a b
1 10
2 20
3 30
4 40
HANDLER t1 CLOSE;
DROP TABLE t1;
End of 5.1 tests