mariadb/mysql-test/t/flush.test
Dmitry Lenev db114007eb Fix for bug #12641342 - "61401: UPDATE PERFORMANCE DEGRADES
GRADUALLY IF A TRIGGER EXISTS".

This bug manifested itself in two ways:

- Firstly execution of any data-changing statement which
  required prelocking (i.e. involved stored function or
  trigger) as part of transaction slowed down a bit all
  subsequent statements in this transaction. So performance
  in transaction which periodically involved such statements
  gradually degraded over time.
- Secondly execution of any data-changing statement which
  required prelocking as part of transaction prevented
  concurrent FLUSH TABLES WITH READ LOCK from proceeding
  until the end of transaction instead of end of particular
  statement.
  
The problem was caused by incorrect handling of metadata lock
used in FTWRL implementation for statements requiring prelocked 
mode. 
Each statement which changes data acquires global IX lock
with STATEMENT duration. This lock is supposed to block 
concurrent FTWRL from proceeding until the statement ends.

When entering prelocked mode, durations of all metadata locks
acquired so far were changed to EXPLICIT, to prevent 
substatements from releasing these locks. When prelocked mode
was left, durations of metadata locks were changed to
TRANSACTIONAL (with a few exceptions) so they can be properly
released at the end of transaction. 
Unfortunately, this meant that the global IX lock blocking
FTWRL with STATEMENT duration was moved to TRANSACTIONAL
duration after execution of statement requiring prelocking.

Since each subsequent statement that required prelocking and
tried to acquire global IX lock with STATEMENT duration got
a new instance of MDL_ticket, which was later moved to
TRANSACTIONAL duration, this led to unwarranted growth of
number of tickets with TRANSACITONAL duration in this
connection's MDL_context. As result searching for other
tickets in it became slow and acquisition of other metadata
locks by this transaction started to hog CPU.

Moreover, this also meant that after execution of statement
requiring prelocking concurrent FTWRL was blocked
until the end of transaction instead of end of statement.

This patch solves this problem by not moving locks to EXPLICIT
duration when thread enters prelocked mode (unless it is a real 
LOCK TABLES mode). This step turned out to be not really 
necessary as substatements don't try to release metadata locks.
Consequently, the global IX lock blocking FTWRL keeps its
STATEMENT duration and is properly released at the end of
statement and the above issue goes away.
2011-06-16 19:18:16 +04:00

703 lines
17 KiB
Text

connect (con1,localhost,root,,);
connect (con2,localhost,root,,);
connection con1;
--disable_warnings
drop table if exists t1,t2;
drop database if exists mysqltest;
--enable_warnings
create temporary table t1(n int not null primary key);
create table t2(n int);
insert into t2 values(3);
let $1=100;
disable_query_log;
while ($1)
{
connection con1;
send replace into t1 select n from t2;
connection con2;
send flush tables;
connection con1;
reap;
connection con2;
reap;
dec $1;
}
enable_query_log;
connection con1;
select * from t1;
connection con2;
flush tables with read lock;
--error 1223
drop table t2;
connection con1;
send drop table t2;
connection con2;
unlock tables;
connection con1;
reap;
#test if drop database will wait until we release the global read lock
connection con1;
create database mysqltest;
create table mysqltest.t1(n int);
insert into mysqltest.t1 values (23);
flush tables with read lock;
connection con2;
send drop database mysqltest;
connection con1;
select * from mysqltest.t1;
unlock tables;
connection con2;
reap;
# test if dirty close releases global read lock
connection con1;
create table t1 (n int);
flush tables with read lock;
dirty_close con1;
connection con2;
insert into t1 values (345);
select * from t1;
drop table t1;
#
# Bug#9459 - deadlock with flush with lock, and lock table write
#
create table t1 (c1 int);
lock table t1 write;
# Cannot get the global read lock with write locked tables.
--error ER_LOCK_OR_ACTIVE_TRANSACTION
flush tables with read lock;
lock table t1 read;
# Cannot get the global read lock with read locked tables.
--error ER_LOCK_OR_ACTIVE_TRANSACTION
flush tables with read lock;
unlock tables;
flush tables with read lock;
--error 1223
lock table t1 write;
lock table t1 read;
--error 1223
lock table t1 write;
# Release all table locks and the global read lock.
unlock tables;
create table t2 (c1 int);
create table t3 (c1 int);
lock table t1 read, t2 read, t3 write;
# Cannot get the global read lock with write locked tables.
--error ER_LOCK_OR_ACTIVE_TRANSACTION
flush tables with read lock;
lock table t1 read, t2 read, t3 read;
# Cannot get the global read lock with read locked tables.
--error ER_LOCK_OR_ACTIVE_TRANSACTION
flush tables with read lock;
unlock tables;
drop table t1, t2, t3;
# End of 4.1 tests
#
# Test of deadlock problem when doing FLUSH TABLE with read lock
# (Bug was in NTPL threads in Linux when using different mutex while
# waiting for a condtion variable)
create table t1 (c1 int);
create table t2 (c1 int);
connect (con1,localhost,root,,);
connect (con3,localhost,root,,);
connection con1;
lock table t1 write;
connection con2;
send flush tables with read lock;
--sleep 1
connection con3;
send insert into t2 values(1);
--sleep 1
connection con1;
unlock tables;
disconnect con1;
connection con2;
reap;
disconnect con2;
connection con3;
# It hangs here (insert into t2 does not end).
reap;
disconnect con3;
connection default;
drop table t1, t2;
#
# Bug#32528 Global read lock with a low priority write lock causes a server crash
#
--disable_warnings
drop table if exists t1, t2;
--enable_warnings
set session low_priority_updates=1;
create table t1 (a int);
create table t2 (b int);
lock tables t1 write;
--error ER_LOCK_OR_ACTIVE_TRANSACTION
flush tables with read lock;
unlock tables;
lock tables t1 read, t2 write;
--error ER_LOCK_OR_ACTIVE_TRANSACTION
flush tables with read lock;
unlock tables;
lock tables t1 read;
--error ER_LOCK_OR_ACTIVE_TRANSACTION
flush tables with read lock;
unlock tables;
drop table t1, t2;
set session low_priority_updates=default;
#
# Bug #33334 mysqltest_embedded crashes when disconnecting before reap
#
connect (con1,localhost,root,,);
send select benchmark(200, (select sin(1))) > 1000;
disconnect con1;
--source include/wait_until_disconnected.inc
connection default;
--echo End of 5.0 tests
#
# Bug #26380: LOCK TABLES + FLUSH LOGS causes deadlock
#
set @old_general_log= @@general_log;
set @old_read_only= @@read_only;
set global general_log= on;
flush tables with read lock;
flush logs;
unlock tables;
set global read_only=1;
flush logs;
unlock tables;
flush tables with read lock;
flush logs;
unlock tables;
set global general_log= @old_general_log;
set global read_only= @old_read_only;
--echo End of 5.1 tests
--echo #
--echo # Additional test for bug #51136 "Crash in pthread_rwlock_rdlock
--echo # on TEMPORARY + HANDLER + LOCK + SP".
--echo # Also see the main test for this bug in include/handler.inc.
--echo #
--disable_warnings
drop tables if exists t1, t2;
--enable_warnings
create table t1 (i int);
create temporary table t2 (j int);
flush tables with read lock;
lock table t2 read;
--echo # This commit should not release any MDL locks.
commit;
--echo # The below statement crashed before the bug fix as it
--echo # has attempted to release global shared metadata lock
--echo # which was already released by commit.
unlock tables;
drop tables t1, t2;
--echo #
--echo # Tests for WL#5000 FLUSH TABLES|TABLE table_list WITH READ LOCK
--echo #
--echo # I. Check the incompatible changes in the grammar.
--echo #
--error ER_PARSE_ERROR
flush tables with read lock, hosts;
--error ER_PARSE_ERROR
flush privileges, tables;
--error ER_PARSE_ERROR
flush privileges, tables with read lock;
--error ER_PARSE_ERROR
flush privileges, tables;
--error ER_PARSE_ERROR
flush tables with read lock, tables;
show tables;
--echo #
--echo # II. Check the allowed syntax.
--echo #
--disable_warnings
drop table if exists t1, t2, t3;
--enable_warnings
create table t1 (a int);
create table t2 (a int);
create table t3 (a int);
lock table t1 read, t2 read;
--error ER_LOCK_OR_ACTIVE_TRANSACTION
flush tables with read lock;
unlock tables;
flush tables with read lock;
flush tables t1, t2 with read lock;
--error ER_LOCK_OR_ACTIVE_TRANSACTION
flush tables t1, t2 with read lock;
--error ER_LOCK_OR_ACTIVE_TRANSACTION
flush tables with read lock;
select * from t1;
select * from t2;
--error ER_TABLE_NOT_LOCKED
select * from t3;
--error ER_TABLE_NOT_LOCKED_FOR_WRITE
insert into t1 (a) values (1);
--error ER_TABLE_NOT_LOCKED_FOR_WRITE
insert into t2 (a) values (1);
--error ER_TABLE_NOT_LOCKED
insert into t3 (a) values (1);
--error ER_NO_SUCH_TABLE
lock table no_such_table read;
--echo #
--echo # We implicitly left the locked tables
--echo # mode but still have the read lock.
--echo #
--error ER_CANT_UPDATE_WITH_READLOCK
insert into t2 (a) values (1);
unlock tables;
insert into t1 (a) values (1);
insert into t2 (a) values (1);
flush table t1, t2 with read lock;
select * from t1;
select * from t2;
--error ER_TABLE_NOT_LOCKED
select * from t3;
--error ER_TABLE_NOT_LOCKED_FOR_WRITE
insert into t1 (a) values (2);
--error ER_TABLE_NOT_LOCKED_FOR_WRITE
insert into t2 (a) values (2);
--error ER_TABLE_NOT_LOCKED
insert into t3 (a) values (2);
--error ER_NO_SUCH_TABLE
lock table no_such_table read;
insert into t3 (a) values (2);
--echo #
--echo # III. Concurrent tests.
--echo #
connect (con1,localhost,root,,);
--echo # --> connection default
--echo #
--echo # Check that flush tables <list> with read lock
--echo # does not affect non-locked tables.
connection default;
--echo #
flush tables t1 with read lock;
--echo # --> connection con1;
connection con1;
select * from t1;
select * from t2;
insert into t2 (a) values (3);
--echo # --> connection default;
connection default;
unlock tables;
--echo #
--echo # Check that "FLUSH TABLES <list> WITH READ LOCK" is
--echo # compatible with active "FLUSH TABLES WITH READ LOCK".
--echo # Vice versa it is not true, since tables read-locked by
--echo # "FLUSH TABLES <list> WITH READ LOCK" can't be flushed.
flush tables with read lock;
--echo # --> connection con1;
connection con1;
flush table t1 with read lock;
select * from t1;
unlock tables;
--echo # --> connection default;
connection default;
unlock tables;
--echo #
--echo # Check that FLUSH TABLES t1 WITH READ LOCK
--echo # does not conflict with an existing FLUSH TABLES t2
--echo # WITH READ LOCK.
--echo #
flush table t1 with read lock;
--echo # --> connection con1
connection con1;
flush table t2 with read lock;
unlock tables;
--echo # --> connection default
connection default;
unlock tables;
--echo #
--echo # Check that FLUSH TABLES t1 WITH READ LOCK
--echo # does not conflict with SET GLOBAL read_only=1.
--echo #
set global read_only=1;
--echo # connection con1
connection con1;
flush table t1 with read lock;
unlock tables;
--echo # connection default
connection default;
set global read_only=0;
--echo #
--echo # Check that it's possible to read-lock
--echo # tables locked with FLUSH TABLE <list> WITH READ LOCK.
--echo #
flush tables t1, t2 with read lock;
--echo # connection con1
connection con1;
lock table t1 read, t2 read;
unlock tables;
--echo # connection default
connection default;
unlock tables;
--echo # --> connection con1
connection con1;
disconnect con1;
--source include/wait_until_disconnected.inc
connection default;
drop table t1, t2, t3;
--echo #
--echo # Bug#51710 FLUSH TABLES <view> WITH READ LOCK kills the server
--echo #
--disable_warnings
drop view if exists v1, v2, v3;
drop table if exists t1, v1;
--enable_warnings
create table t1 (a int);
create view v1 as select 1;
create view v2 as select * from t1;
create view v3 as select * from v2;
--error ER_WRONG_OBJECT
flush table v1, v2, v3 with read lock;
--error ER_WRONG_OBJECT
flush table v1 with read lock;
--error ER_WRONG_OBJECT
flush table v2 with read lock;
--error ER_WRONG_OBJECT
flush table v3 with read lock;
create temporary table v1 (a int);
--error ER_WRONG_OBJECT
flush table v1 with read lock;
drop view v1;
create table v1 (a int);
flush table v1 with read lock;
drop temporary table v1;
unlock tables;
drop view v2, v3;
drop table t1, v1;
--echo #
--echo # FLUSH TABLES <list> WITH READ LOCK and HANDLER
--echo #
--disable_warnings
drop table if exists t1;
--enable_warnings
create table t1 (a int, key a (a));
insert into t1 (a) values (1), (2), (3);
handler t1 open;
handler t1 read a next;
handler t1 read a next;
flush tables t1 with read lock;
--error ER_LOCK_OR_ACTIVE_TRANSACTION
handler t1 read a next;
unlock tables;
--echo #
--echo # Sic: lost handler position.
--echo #
handler t1 read a next;
handler t1 close;
drop table t1;
--echo #
--echo # Bug#52117 Pending FLUSH TALBES <list> aborts
--echo # transactions unnecessarily.
--echo #
--disable_warnings
drop table if exists t1;
--enable_warnings
connect (con1,localhost,root,,);
connect (con2,localhost,root,,);
--echo # --> conection default
connection default;
create table t1 (a int);
begin;
select * from t1;
--echo # --> connection con1
connection con1;
--echo #
--echo # Issue a LOCK TABLE t1 READ. We could use HANDLER t1 OPEN
--echo # or a long-running select -- anything that
--echo # prevents FLUSH TABLE t1 from immediate completion would do.
--echo #
lock table t1 read;
--echo # --> connection con2
connection con2;
--echo #
--echo # FLUSH TABLE expels the table definition from the cache.
--echo # Sending 'flush table t1'...
send flush table t1;
--echo # --> connection default
connection default;
--echo # Let flush table sync in.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
where state = "Waiting for table flush"
and info = "flush table t1";
--source include/wait_condition.inc
send select * from t1;
--echo # --> connection con1
connection con1;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
where state = "Waiting for table flush"
and info = "select * from t1";
select * from t1;
unlock tables;
--echo # --> connection con2
connection con2;
--echo # Reaping 'flush table t1'...
reap;
--echo # --> connection default
connection default;
--echo # Reaping 'select * from t1'...
reap;
commit;
--echo #
--echo # Repeat the same test but with FLUSH TABLES
--echo #
begin;
select * from t1;
--echo # --> connection con1
connection con1;
--echo #
--echo # Issue a LOCK TABLE t1 READ.
--echo #
lock table t1 read;
--echo # --> connection con2
connection con2;
--echo #
--echo # FLUSH TABLES expels the table definition from the cache.
--echo # Sending 'flush tables'...
send flush tables;
--echo # --> connection default
connection default;
--echo # Let flush table sync in.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
where state = "Waiting for table flush"
and info = "flush tables";
--source include/wait_condition.inc
send select * from t1;
--echo # --> connection con1
connection con1;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
where state = "Waiting for table flush"
and info = "select * from t1";
select * from t1;
unlock tables;
--echo # --> connection con2
connection con2;
--echo # Reaping 'flush tables'...
reap;
--echo # --> connection default
connection default;
--echo # Reaping 'select * from t1'...
reap;
commit;
--echo # Cleanup
--echo # --> connection con1
connection con1;
disconnect con1;
--source include/wait_until_disconnected.inc
--echo # --> connection con2
connection con2;
disconnect con2;
--source include/wait_until_disconnected.inc
--echo # --> connection default
connection default;
drop table t1;
--echo #
--echo # Test for bug #55273 "FLUSH TABLE tm WITH READ LOCK for Merge table
--echo # causes assert failure".
--echo #
--disable_warnings
drop table if exists t1, t2, tm;
--enable_warnings
create table t1 (i int);
create table t2 (i int);
create table tm (i int) engine=merge union=(t1, t2);
insert into t1 values (1), (2);
insert into t2 values (3), (4);
--echo # The below statement should succeed and lock merge
--echo # table for read. Only merge table gets flushed and
--echo # not underlying tables.
flush tables tm with read lock;
select * from tm;
--echo # Check that underlying tables are locked.
select * from t1;
select * from t2;
unlock tables;
--echo # This statement should succeed as well and flush
--echo # all tables in the list.
flush tables tm, t1, t2 with read lock;
select * from tm;
--echo # Naturally, underlying tables should be locked in this case too.
select * from t1;
select * from t2;
unlock tables;
drop tables tm, t1, t2;
--echo #
--echo # Test for bug #57006 "Deadlock between HANDLER and
--echo # FLUSH TABLES WITH READ LOCK".
--echo #
--disable_warnings
drop table if exists t1, t2;
--enable_warnings
connect (con1,localhost,root,,);
connect (con2,localhost,root,,);
connection default;
create table t1 (i int);
create table t2 (i int);
handler t1 open;
--echo # Switching to connection 'con1'.
connection con1;
--echo # Sending:
--send flush tables with read lock
--echo # Switching to connection 'con2'.
connection con2;
--echo # Wait until FTWRL starts waiting for 't1' to be closed.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
where state = "Waiting for table flush"
and info = "flush tables with read lock";
--source include/wait_condition.inc
--echo # Switching to connection 'default'.
connection default;
--echo # The below statement should not cause deadlock.
--echo # Sending:
--send insert into t2 values (1)
--echo # Switching to connection 'con2'.
connection con2;
--echo # Wait until INSERT starts to wait for FTWRL to go away.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
where state = "Waiting for global read lock"
and info = "insert into t2 values (1)";
--source include/wait_condition.inc
--echo # Switching to connection 'con1'.
connection con1;
--echo # FTWRL should be able to continue now.
--echo # Reap FTWRL.
--reap
unlock tables;
--echo # Switching to connection 'default'.
connection default;
--echo # Reap INSERT.
--reap
handler t1 close;
--echo # Cleanup.
connection con1;
disconnect con1;
--source include/wait_until_disconnected.inc
connection con2;
disconnect con2;
--source include/wait_until_disconnected.inc
connection default;
drop tables t1, t2;
--echo #
--echo # Bug#57649 FLUSH TABLES under FLUSH TABLES <list> WITH READ LOCK leads
--echo # to assert failure.
--echo #
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
CREATE TABLE t1 (a INT);
FLUSH TABLES t1 WITH READ LOCK;
# All these triggered the assertion
--error ER_TABLE_NOT_LOCKED_FOR_WRITE
FLUSH TABLES;
--error ER_TABLE_NOT_LOCKED_FOR_WRITE
CREATE TRIGGER t1_bi BEFORE INSERT ON t1 FOR EACH ROW SET @a= 1;
--error ER_TABLE_NOT_LOCKED_FOR_WRITE
ALTER TABLE t1 COMMENT 'test';
UNLOCK TABLES;
DROP TABLE t1;
--echo #
--echo # Test for bug #12641342 - "61401: UPDATE PERFORMANCE DEGRADES
--echo # GRADUALLY IF A TRIGGER EXISTS".
--echo #
--echo # One of side-effects of this bug was that a transaction which
--echo # involved DML statements requiring prelocking blocked concurrent
--echo # FLUSH TABLES WITH READ LOCK for the whole its duration, while
--echo # correct behavior in this case is to block FTWRL only for duration
--echo # of individual DML statements.
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
CREATE TABLE t1 (id INT PRIMARY KEY, value INT);
INSERT INTO t1 VALUES (1, 1);
CREATE TRIGGER t1_au AFTER UPDATE ON t1 FOR EACH ROW SET @var = "a";
BEGIN;
UPDATE t1 SET value= value + 1 WHERE id = 1;
--echo # Switching to connection 'con1'.
connect(con1, localhost, root);
--echo # The below FLUSH TABLES WITH READ LOCK should succeed and
--echo # should not be blocked by the transaction in default connection.
FLUSH TABLES WITH READ LOCK;
UNLOCK TABLES;
disconnect con1;
--source include/wait_until_disconnected.inc
--echo # Switching to connection 'default'.
connection default;
COMMIT;
DROP TABLE t1;