mirror of
https://github.com/MariaDB/server.git
synced 2025-01-19 21:42:35 +01:00
416ab8532e
The problem is that since MyISAM's concurrent_insert is on by default some concurrent SELECT statements might not see changes made by INSERT statements in other connections, even if the INSERT statement has returned. The solution is to disable concurrent_insert so that INSERT statements returns after the data is actually visible to other statements. mysql-test/r/flush_read_lock_kill.result: Restore old value of @@global.concurrent_insert mysql-test/r/kill.result: Restore old value of @@global.concurrent_insert mysql-test/r/sp_notembedded.result: Update test case result mysql-test/t/flush_read_lock_kill.test: Restore old value of @@global.concurrent_insert so it doesn't affect other tests. mysql-test/t/kill.test: Restore old value of @@global.concurrent_insert so it doesn't affect other tests. mysql-test/t/sp_notembedded.test: Disable and restore concurrent_insert value at the end of the test case. The test case for Bug 29936 requires that the inserted rows need to be visible before a SELECT statement is queued in another connection. Remove sleep at the start of the test, it's not necessary to log the result of the processlist command, showing the warnings has the same end result.
65 lines
2.1 KiB
Text
65 lines
2.1 KiB
Text
# Let's see if FLUSH TABLES WITH READ LOCK can be killed when waiting
|
|
# for running commits to finish (in the past it could not)
|
|
# This will not be a meaningful test on non-debug servers so will be
|
|
# skipped.
|
|
# If running mysql-test-run --debug, the --debug added by
|
|
# mysql-test-run to the mysqld command line will override the one of
|
|
# -master.opt. But this test is designed to still pass then (though it
|
|
# won't test anything interesting).
|
|
|
|
# This also won't work with the embedded server test
|
|
-- source include/not_embedded.inc
|
|
|
|
-- source include/have_debug.inc
|
|
|
|
# Disable concurrent inserts to avoid test failures when reading the
|
|
# connection id which was inserted into a table by another thread.
|
|
set @old_concurrent_insert= @@global.concurrent_insert;
|
|
set @@global.concurrent_insert= 0;
|
|
|
|
connect (con1,localhost,root,,);
|
|
connect (con2,localhost,root,,);
|
|
connection con1;
|
|
|
|
--disable_warnings
|
|
drop table if exists t1;
|
|
--enable_warnings
|
|
create table t1 (kill_id int);
|
|
insert into t1 values(connection_id());
|
|
|
|
# Thanks to the parameter we passed to --debug, this FLUSH will
|
|
# block on a debug build running with our --debug=make_global... It
|
|
# will block until killed. In other cases (non-debug build or other
|
|
# --debug) it will succeed immediately
|
|
|
|
connection con1;
|
|
send flush tables with read lock;
|
|
|
|
# kill con1
|
|
connection con2;
|
|
select ((@id := kill_id) - kill_id) from t1;
|
|
|
|
# Wait for the debug sync point, test won't run on non-debug
|
|
# builds anyway.
|
|
let $wait_condition=
|
|
select count(*) = 1 from information_schema.processlist
|
|
where state = "Waiting for all running commits to finish"
|
|
and info = "flush tables with read lock";
|
|
--source include/wait_condition.inc
|
|
|
|
kill connection @id;
|
|
|
|
connection con1;
|
|
# On debug builds it will be error 1053 (killed); on non-debug, or
|
|
# debug build running without our --debug=make_global..., will be
|
|
# error 0 (no error). The only important thing to test is that on
|
|
# debug builds with our --debug=make_global... we don't hang forever.
|
|
--error 0,1053,2013
|
|
reap;
|
|
|
|
connection con2;
|
|
drop table t1;
|
|
connection default;
|
|
|
|
# Restore global concurrent_insert value
|
|
set @@global.concurrent_insert= @old_concurrent_insert;
|