Fix for bug #12641342 - "61401: UPDATE PERFORMANCE DEGRADES

GRADUALLY IF A TRIGGER EXISTS".

This bug manifested itself in two ways:

- Firstly execution of any data-changing statement which
  required prelocking (i.e. involved stored function or
  trigger) as part of transaction slowed down a bit all
  subsequent statements in this transaction. So performance
  in transaction which periodically involved such statements
  gradually degraded over time.
- Secondly execution of any data-changing statement which
  required prelocking as part of transaction prevented
  concurrent FLUSH TABLES WITH READ LOCK from proceeding
  until the end of transaction instead of end of particular
  statement.
  
The problem was caused by incorrect handling of metadata lock
used in FTWRL implementation for statements requiring prelocked 
mode. 
Each statement which changes data acquires global IX lock
with STATEMENT duration. This lock is supposed to block 
concurrent FTWRL from proceeding until the statement ends.

When entering prelocked mode, durations of all metadata locks
acquired so far were changed to EXPLICIT, to prevent 
substatements from releasing these locks. When prelocked mode
was left, durations of metadata locks were changed to
TRANSACTIONAL (with a few exceptions) so they can be properly
released at the end of transaction. 
Unfortunately, this meant that the global IX lock blocking
FTWRL with STATEMENT duration was moved to TRANSACTIONAL
duration after execution of statement requiring prelocking.

Since each subsequent statement that required prelocking and
tried to acquire global IX lock with STATEMENT duration got
a new instance of MDL_ticket, which was later moved to
TRANSACTIONAL duration, this led to unwarranted growth of
number of tickets with TRANSACITONAL duration in this
connection's MDL_context. As result searching for other
tickets in it became slow and acquisition of other metadata
locks by this transaction started to hog CPU.

Moreover, this also meant that after execution of statement
requiring prelocking concurrent FTWRL was blocked
until the end of transaction instead of end of statement.

This patch solves this problem by not moving locks to EXPLICIT
duration when thread enters prelocked mode (unless it is a real 
LOCK TABLES mode). This step turned out to be not really 
necessary as substatements don't try to release metadata locks.
Consequently, the global IX lock blocking FTWRL keeps its
STATEMENT duration and is properly released at the end of
statement and the above issue goes away.
This commit is contained in:
Dmitry Lenev 2011-06-16 19:18:16 +04:00
parent 08f1257dc5
commit db114007eb
6 changed files with 109 additions and 15 deletions

View file

@ -466,3 +466,26 @@ ALTER TABLE t1 COMMENT 'test';
ERROR HY000: Table 't1' was locked with a READ lock and can't be updated
UNLOCK TABLES;
DROP TABLE t1;
#
# Test for bug #12641342 - "61401: UPDATE PERFORMANCE DEGRADES
# GRADUALLY IF A TRIGGER EXISTS".
#
# One of side-effects of this bug was that a transaction which
# involved DML statements requiring prelocking blocked concurrent
# FLUSH TABLES WITH READ LOCK for the whole its duration, while
# correct behavior in this case is to block FTWRL only for duration
# of individual DML statements.
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (id INT PRIMARY KEY, value INT);
INSERT INTO t1 VALUES (1, 1);
CREATE TRIGGER t1_au AFTER UPDATE ON t1 FOR EACH ROW SET @var = "a";
BEGIN;
UPDATE t1 SET value= value + 1 WHERE id = 1;
# Switching to connection 'con1'.
# The below FLUSH TABLES WITH READ LOCK should succeed and
# should not be blocked by the transaction in default connection.
FLUSH TABLES WITH READ LOCK;
UNLOCK TABLES;
# Switching to connection 'default'.
COMMIT;
DROP TABLE t1;

View file

@ -668,3 +668,36 @@ ALTER TABLE t1 COMMENT 'test';
UNLOCK TABLES;
DROP TABLE t1;
--echo #
--echo # Test for bug #12641342 - "61401: UPDATE PERFORMANCE DEGRADES
--echo # GRADUALLY IF A TRIGGER EXISTS".
--echo #
--echo # One of side-effects of this bug was that a transaction which
--echo # involved DML statements requiring prelocking blocked concurrent
--echo # FLUSH TABLES WITH READ LOCK for the whole its duration, while
--echo # correct behavior in this case is to block FTWRL only for duration
--echo # of individual DML statements.
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
CREATE TABLE t1 (id INT PRIMARY KEY, value INT);
INSERT INTO t1 VALUES (1, 1);
CREATE TRIGGER t1_au AFTER UPDATE ON t1 FOR EACH ROW SET @var = "a";
BEGIN;
UPDATE t1 SET value= value + 1 WHERE id = 1;
--echo # Switching to connection 'con1'.
connect(con1, localhost, root);
--echo # The below FLUSH TABLES WITH READ LOCK should succeed and
--echo # should not be blocked by the transaction in default connection.
FLUSH TABLES WITH READ LOCK;
UNLOCK TABLES;
disconnect con1;
--source include/wait_until_disconnected.inc
--echo # Switching to connection 'default'.
connection default;
COMMIT;
DROP TABLE t1;

View file

@ -152,11 +152,24 @@ enum enum_mdl_type {
/** Duration of metadata lock. */
enum enum_mdl_duration { MDL_STATEMENT= 0,
MDL_TRANSACTION,
MDL_EXPLICIT,
/* This should be the last ! */
MDL_DURATION_END };
enum enum_mdl_duration {
/**
Locks with statement duration are automatically released at the end
of statement or transaction.
*/
MDL_STATEMENT= 0,
/**
Locks with transaction duration are automatically released at the end
of transaction.
*/
MDL_TRANSACTION,
/**
Locks with explicit duration survive the end of statement and transaction.
They have to be released explicitly by calling MDL_context::release_lock().
*/
MDL_EXPLICIT,
/* This should be the last ! */
MDL_DURATION_END };
/** Maximal length of key for metadata locking subsystem. */

View file

@ -3790,16 +3790,24 @@ void THD::set_mysys_var(struct st_my_thread_var *new_mysys_var)
void THD::leave_locked_tables_mode()
{
if (locked_tables_mode == LTM_LOCK_TABLES)
{
/*
When leaving LOCK TABLES mode we have to change the duration of most
of the metadata locks being held, except for HANDLER and GRL locks,
to transactional for them to be properly released at UNLOCK TABLES.
*/
mdl_context.set_transaction_duration_for_all_locks();
/*
Make sure we don't release the global read lock and commit blocker
when leaving LTM.
*/
global_read_lock.set_explicit_lock_duration(this);
/* Also ensure that we don't release metadata locks for open HANDLERs. */
if (handler_tables_hash.records)
mysql_ha_set_explicit_lock_duration(this);
}
locked_tables_mode= LTM_NONE;
mdl_context.set_transaction_duration_for_all_locks();
/*
Make sure we don't release the global read lock and commit blocker
when leaving LTM.
*/
global_read_lock.set_explicit_lock_duration(this);
/* Also ensure that we don't release metadata locks for open HANDLERs. */
if (handler_tables_hash.records)
mysql_ha_set_explicit_lock_duration(this);
}
void THD::get_definer(LEX_USER *definer)

View file

@ -2795,7 +2795,19 @@ public:
{
DBUG_ASSERT(locked_tables_mode == LTM_NONE);
mdl_context.set_explicit_duration_for_all_locks();
if (mode_arg == LTM_LOCK_TABLES)
{
/*
When entering LOCK TABLES mode we should set explicit duration
for all metadata locks acquired so far in order to avoid releasing
them till UNLOCK TABLES statement.
We don't do this when entering prelocked mode since sub-statements
don't release metadata locks and restoring status-quo after leaving
prelocking mode gets complicated.
*/
mdl_context.set_explicit_duration_for_all_locks();
}
locked_tables_mode= mode_arg;
}
void leave_locked_tables_mode();

View file

@ -2027,6 +2027,11 @@ mysql_execute_command(THD *thd)
*/
if (stmt_causes_implicit_commit(thd, CF_IMPLICT_COMMIT_BEGIN))
{
/*
Note that this should never happen inside of stored functions
or triggers as all such statements prohibited there.
*/
DBUG_ASSERT(! thd->in_sub_stmt);
/* Commit or rollback the statement transaction. */
thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd);
/* Commit the normal transaction if one is active. */