mariadb/mysql-test/suite/innodb/t/innodb-index-debug.test
Marko Mäkelä 88733282fb MDEV-32050: Look up tables in the purge coordinator
The InnoDB table lookup in purge worker threads is a bottleneck that can
degrade a slow shutdown to utilize less than 2 threads. Let us fix that
bottleneck by constructing a local lookup table that does not require any
synchronization while the undo log records of the current batch
are being processed.

TRX_PURGE_TABLE_BUCKETS: The initial number of std::unordered_map
hash buckets used during a purge batch. This could avoid some
resizing and rehashing in trx_purge_attach_undo_recs().

purge_node_t::tables: A lookup table from table ID to an already
looked up and locked table. Replaces many fields.

trx_purge_attach_undo_recs(): Look up each table in the purge batch
only once.

trx_purge(): Close all tables and release MDL at the end of the batch.

trx_purge_table_open(), trx_purge_table_acquire(): Open a table in purge
and acquire a metadata lock on it. This replaces
dict_table_open_on_id<true>() and dict_acquire_mdl_shared().

purge_sys_t::close_and_reopen(): In case of an MDL conflict, close and
reopen all tables that are covered by the current purge batch.
It may be that some of the tables have been dropped meanwhile and can
be ignored. This replaces wait_SYS() and wait_FTS().

row_purge_parse_undo_rec(): Make purge_coordinator_task issue a
MDL warrant to any purge_worker_task which might need it
when innodb_purge_threads>1.

purge_node_t::end(): Clear the MDL warrant.

Reviewed by: Vladislav Lesin and Vladislav Vaintroub
2023-10-25 10:08:20 +03:00

179 lines
5.7 KiB
Text

-- source include/have_debug.inc
-- source include/have_innodb.inc
-- source include/count_sessions.inc
-- source include/have_debug_sync.inc
# This test is slow on buildbot.
--source include/big_test.inc
let $MYSQLD_DATADIR= `select @@datadir`;
SET GLOBAL innodb_max_purge_lag_wait=0;
connect (stop_purge,localhost,root);
START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default;
#
# Test for BUG# 12739098, check whether trx->error_status is reset on error.
#
CREATE TABLE t1(c1 INT NOT NULL, c2 INT, PRIMARY KEY(c1)) Engine=InnoDB;
SHOW CREATE TABLE t1;
INSERT INTO t1 VALUES (1,1),(2,2),(3,3),(4,4),(5,5);
SET @saved_debug_dbug = @@SESSION.debug_dbug;
SET DEBUG_DBUG='+d,ib_build_indexes_too_many_concurrent_trxs, ib_rename_indexes_too_many_concurrent_trxs, ib_drop_index_too_many_concurrent_trxs';
--error ER_TOO_MANY_CONCURRENT_TRXS
ALTER TABLE t1 ADD UNIQUE INDEX(c2);
SET DEBUG_DBUG = @saved_debug_dbug;
SHOW CREATE TABLE t1;
DROP TABLE t1;
#
# Test for Bug#13861218 Records are not fully sorted during index creation
#
CREATE TABLE bug13861218 (c1 INT NOT NULL, c2 INT NOT NULL, INDEX(c2))
ENGINE=InnoDB;
INSERT INTO bug13861218 VALUES (8, 0), (4, 0), (0, 0);
SET DEBUG_DBUG = '+d,ib_row_merge_buf_add_two';
# Force creation of a PRIMARY KEY on c1 to see what happens on the index(c2).
# No crash here, because n_uniq for c2 includes the clustered index fields
CREATE UNIQUE INDEX ui ON bug13861218(c1);
SET DEBUG_DBUG = @saved_debug_dbug;
DROP TABLE bug13861218;
CREATE TABLE bug13861218 (c1 INT NOT NULL, c2 INT UNIQUE) ENGINE=InnoDB;
INSERT INTO bug13861218 VALUES (8, NULL), (4, NULL), (0, NULL);
SET DEBUG_DBUG = '+d,ib_row_merge_buf_add_two';
# Force creation of a PRIMARY KEY on c1 to see what happens on the index(c2).
# assertion failure: ut_ad(cmp_dtuple_rec(dtuple, rec, rec_offsets) > 0)
CREATE UNIQUE INDEX ui ON bug13861218(c1);
SET DEBUG_DBUG = @saved_debug_dbug;
DROP TABLE bug13861218;
--echo #
--echo # Bug #17657223 EXCESSIVE TEMPORARY FILE USAGE IN ALTER TABLE
--echo #
# Error during file creation in alter operation
create table t480(a serial)engine=innodb;
insert into t480
values(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),
(),(),(),(),(),(),(),();
insert into t480 select 0 from t480;
insert into t480 select 0 from t480;
insert into t480 select 0 from t480;
insert into t480 select 0 from t480;
# Error during file write in alter operation.
create table t1(f1 int auto_increment not null,
f2 char(200) not null, f3 char(200) not null,
primary key(f1,f2,f3), key(f1))engine=innodb;
insert into t1 select NULL,'aaa','bbb' from t480;
insert into t1 select NULL,'aaaa','bbbb' from t480;
insert into t1 select NULL,'aaaaa','bbbbb' from t480;
insert into t1 select NULL,'aaaaaa','bbbbbb' from t480;
SET DEBUG_DBUG = '+d,row_merge_write_failure';
--error ER_TEMP_FILE_WRITE_FAILURE
alter table t1 drop primary key,add primary key(f2,f1);
SET DEBUG_DBUG = @saved_debug_dbug;
drop table t1;
# Optimize table via inplace algorithm
connect (con1,localhost,root);
create table t1(k1 int auto_increment primary key,
k2 char(200),k3 char(200))engine=innodb;
insert into t1 values(NULL,'a','b'), (NULL,'aa','bb');
SET DEBUG_SYNC= 'row_merge_after_scan
SIGNAL opened WAIT_FOR flushed';
send optimize table t1;
connection default;
SET DEBUG_SYNC= 'now WAIT_FOR opened';
INSERT INTO t1 select NULL,'aaa','bbb' from t480;
SET DEBUG_SYNC= 'now SIGNAL flushed';
connection con1;
--enable_info
--echo /*con1 reap*/ Optimize table t1;
reap;
--disable_info
SELECT COUNT(k1),k2,k3 FROM t1 GROUP BY k2,k3;
drop table t1;
# Log file creation failure.
create table t1(k1 int auto_increment primary key,
k2 char(200),k3 char(200))engine=innodb;
INSERT INTO t1 VALUES(1, "test", "test");
SET DEBUG_SYNC= 'row_merge_after_scan
SIGNAL opened WAIT_FOR flushed';
send ALTER TABLE t1 FORCE, ADD COLUMN k4 int;
connection default;
SET DEBUG_SYNC= 'now WAIT_FOR opened';
SET debug = '+d,row_log_tmpfile_fail';
INSERT INTO t1 select NULL,'aaa','bbb' from t480;
INSERT INTO t1 select NULL,'aaaa','bbbb' from t480;
SET DEBUG_SYNC= 'now SIGNAL flushed';
SET DEBUG_DBUG = @saved_debug_dbug;
connection con1;
--echo /*con1 reap*/ ALTER TABLE t1 ADD COLUMN k4 int;
--error ER_OUT_OF_RESOURCES
reap;
SELECT COUNT(k1),k2,k3 FROM t1 GROUP BY k2,k3;
disconnect con1;
connection default;
show create table t1;
drop table t1;
drop table t480;
--echo #
--echo # MDEV-12827 Assertion failure when reporting duplicate key error
--echo # in online table rebuild
--echo #
CREATE TABLE t1 (j INT UNIQUE, i INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES(2, 2);
--connect (con1,localhost,root,,test)
SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL built WAIT_FOR log';
--send
ALTER TABLE t1 DROP j, ADD UNIQUE INDEX(i), FORCE;
--connection default
SET DEBUG_SYNC='now WAIT_FOR built';
SET DEBUG_DBUG='+d,row_ins_row_level';
INSERT INTO t1 (i) VALUES (0),(0);
SET DEBUG_SYNC='now SIGNAL log';
SET DEBUG_DBUG=@saved_debug_dbug;
--connection con1
--error ER_DUP_ENTRY
reap;
DELETE FROM t1;
ALTER TABLE t1 ADD UNIQUE INDEX(i);
SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL built2 WAIT_FOR log2';
--send
ALTER TABLE t1 DROP j, FORCE;
--connection default
SET DEBUG_SYNC='now WAIT_FOR built2';
INSERT INTO t1 (i) VALUES (0),(1);
--error ER_DUP_ENTRY
UPDATE t1 SET i=0;
SET DEBUG_SYNC='now SIGNAL log2';
--connection con1
reap;
--disconnect con1
--disconnect stop_purge
--connection default
SET DEBUG_SYNC='RESET';
DROP TABLE t1;
SET DEBUG_SYNC='RESET';
--source include/wait_until_count_sessions.inc
--echo #
--echo # BUG#21612714 ALTER TABLE SORTING SKIPPED WHEN CHANGE PK AND DROP
--echo # LAST COLUMN OF OLD PK
--echo #
SET DEBUG_DBUG = '+d,innodb_alter_table_pk_assert_no_sort';
--source suite/innodb/include/alter_table_pk_no_sort.inc
SET DEBUG_DBUG = @saved_debug_dbug;