mirror of
https://github.com/MariaDB/server.git
synced 2025-04-11 01:35:33 +02:00

Also fixes MDEV-23929: innodb_flush_neighbors is not being ignored for system tablespace on SSD When the maximum configured number of file is exceeded, InnoDB will close data files. We used to maintain a fil_system.LRU list and a counter fil_node_t::n_pending to achieve this, at the huge cost of multiple fil_system.mutex operations per I/O operation. fil_node_open_file_low(): Implement a FIFO replacement policy: The last opened file will be moved to the end of fil_system.space_list, and files will be closed from the start of the list. However, we will not move tablespaces in fil_system.space_list while i_s_tablespaces_encryption_fill_table() is executing (producing output for INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION) because it may cause information of some tablespaces to go missing. We also avoid this in mariabackup --backup because datafiles_iter_next() assumes that the ordering is not changed. IORequest: Fold more parameters to IORequest::type. fil_space_t::io(): Replaces fil_io(). fil_space_t::flush(): Replaces fil_flush(). OS_AIO_IBUF: Remove. We will always issue synchronous reads of the change buffer pages in buf_read_page_low(). We will always ignore some errors for background reads. This should reduce fil_system.mutex contention a little. fil_node_t::complete_write(): Replaces fil_node_t::complete_io(). On both read and write completion, fil_space_t::release_for_io() will have to be called. fil_space_t::io(): Do not acquire fil_system.mutex in the normal code path. xb_delta_open_matching_space(): Do not try to open the system tablespace which was already opened. This fixes a file sharing violation in mariabackup --prepare --incremental. Reviewed by: Vladislav Vaintroub
70 lines
1.7 KiB
Text
70 lines
1.7 KiB
Text
--source include/have_innodb.inc
|
|
--source include/have_debug.inc
|
|
--source include/have_debug_sync.inc
|
|
# This test is slow on buildbot.
|
|
--source include/big_test.inc
|
|
|
|
call mtr.add_suppression("InnoDB: innodb_open_files=.* is exceeded");
|
|
|
|
SET @save_tdc= @@GLOBAL.table_definition_cache;
|
|
SET @save_toc= @@GLOBAL.table_open_cache;
|
|
|
|
# InnoDB plugin essentially ignores table_definition_cache size
|
|
# and hard-wires it to 400, which also is the minimum allowed value.
|
|
SET GLOBAL table_definition_cache= 400;
|
|
SET GLOBAL table_open_cache= 1024;
|
|
|
|
CREATE TABLE to_be_evicted(a INT PRIMARY KEY, b INT NOT NULL) ENGINE=InnoDB;
|
|
INSERT INTO to_be_evicted VALUES(1,2),(2,1);
|
|
|
|
connect(ddl,localhost,root,,);
|
|
SET DEBUG_SYNC = 'row_log_apply_before SIGNAL scanned WAIT_FOR got_duplicate';
|
|
--send
|
|
ALTER TABLE to_be_evicted ADD UNIQUE INDEX(b);
|
|
|
|
connection default;
|
|
SET DEBUG_SYNC = 'now WAIT_FOR scanned';
|
|
|
|
# During the ADD UNIQUE INDEX, start a transaction that inserts a duplicate
|
|
# and then hogs the table lock, so that the unique index cannot be dropped.
|
|
BEGIN;
|
|
INSERT INTO to_be_evicted VALUES(3, 2);
|
|
SET DEBUG_SYNC = 'now SIGNAL got_duplicate';
|
|
|
|
connection ddl;
|
|
--error ER_DUP_ENTRY
|
|
reap;
|
|
|
|
disconnect ddl;
|
|
connection default;
|
|
# Release the table lock.
|
|
COMMIT;
|
|
SET DEBUG_SYNC = RESET;
|
|
|
|
# Allow cache eviction.
|
|
FLUSH TABLES;
|
|
--disable_query_log
|
|
|
|
# Pollute the cache with many tables, so that our table will be evicted.
|
|
let $N=1000;
|
|
let $loop=$N;
|
|
while ($loop)
|
|
{
|
|
eval CREATE TABLE t_$loop(id INT)ENGINE=InnoDB;
|
|
dec $loop;
|
|
}
|
|
|
|
# Hopefully let InnoDB evict the tables.
|
|
sleep 10;
|
|
|
|
let $loop=$N;
|
|
while ($loop)
|
|
{
|
|
eval DROP TABLE t_$loop;
|
|
dec $loop;
|
|
}
|
|
|
|
SET GLOBAL table_definition_cache= @save_tdc;
|
|
SET GLOBAL table_open_cache= @save_toc;
|
|
|
|
DROP TABLE to_be_evicted;
|