mariadb/mysql-test/t/multi_update2.test
unknown 92fc426386 Speed up mtr --parallel=<lots> by scheduling some slow tests earlier.
The patch also fixes a race in rpl_stop_slave.test.

On machines with lots of CPU and memory, something like `mtr --parallel=10`
can speed up the test suite enormously. However, we have a few test cases
that run for long (several minutes), and if we are unlucky and happen to
schedule those towards the end of the test suite, we end up with most
workers idle while waiting for the last slow test to end, significantly
delaying the finish of the entire suite.

Improve this by marking the offending tests as taking "long", and trying
to schedule those tests early. This reduces the time towards the end of
the test suite run where some workers are waiting with nothing to do for
the remaining workers each to finish their last test.

Also, the rpl_stop_slave test had a race which could cause it to take
a 300 seconds debug_sync timeout; this is fixed.

Testing on a 4-core 8GB machine, this patch speeds up the test suite with
around 30% for --parallel=10 (debug build), allowing to run the entire
suite in 5 minutes.
2011-01-03 15:33:39 +01:00

78 lines
1.9 KiB
Text

--source include/long_test.inc
#
# Test of update statement that uses many tables.
#
#
# If we are running with
# - Valgrind -> $VALGRIND_TEST <> 0
# - debug tracing -> @@session.debug LIKE '%trace%'
# the resource consumption (storage space needed, runtime) will be extreme.
# Therefore we require that the option "--big-test" is also set.
#
let $need_big= 0;
--disable_query_log
--error 0,ER_UNKNOWN_SYSTEM_VARIABLE
SET @aux = @@session.debug;
if (!$mysql_errno)
{
# We have returncode 0 = the server system variable @@session.debug exists.
# But we only need "--big-test" in case of tracing.
if (`SELECT @@session.debug LIKE '%trace%'`)
{
let $need_big= 1;
}
}
--enable_query_log
if ($VALGRIND_TEST)
{
# We are running with Valgrind
inc $need_big;
}
if (`SELECT '$BIG_TEST' = '' AND $need_big = 1`)
{
--skip Need "--big-test" when running with the option "--debug" or "--valgrind"
}
#
# Bug#1820 Rows not deleted from second table on multi-table delete
#
--disable_warnings
DROP TABLE IF EXISTS t1,t2;
--enable_warnings
CREATE TABLE t1 ( a INT NOT NULL, b INT NOT NULL) ;
--echo # The protocolling of many inserts into t1 is suppressed.
--disable_query_log
INSERT INTO t1 VALUES (1,1),(2,2),(3,3),(4,4);
let $1=19;
set @d=4;
begin;
while ($1)
{
eval INSERT INTO t1 SELECT a+@d,b+@d FROM t1;
eval SET @d=@d*2;
dec $1;
}
commit;
--enable_query_log
ALTER TABLE t1 ADD INDEX i1(a);
DELETE FROM t1 WHERE a > 2000000;
CREATE TABLE t2 LIKE t1;
INSERT INTO t2 SELECT * FROM t1;
SELECT 't2 rows before small delete', COUNT(*) FROM t1;
DELETE t1,t2 FROM t1,t2 WHERE t1.b=t2.a AND t1.a < 2;
SELECT 't2 rows after small delete', COUNT(*) FROM t2;
SELECT 't1 rows after small delete', COUNT(*) FROM t1;
## Try deleting many rows
DELETE t1,t2 FROM t1,t2 WHERE t1.b=t2.a AND t1.a < 100*1000;
SELECT 't2 rows after big delete', COUNT(*) FROM t2;
SELECT 't1 rows after big delete', COUNT(*) FROM t1;
DROP TABLE t1,t2;