mirror of
https://github.com/MariaDB/server.git
synced 2025-01-18 04:53:01 +01:00
4b80c11f52
- InnoDB DDL results in `Duplicate entry' if concurrent DML throws duplicate key error. The following scenario explains the problem connection con1: ALTER TABLE t1 FORCE; connection con2: INSERT INTO t1(pk, uk) VALUES (2, 2), (3, 2); In connection con2, InnoDB throws the 'DUPLICATE KEY' error because of unique index. Alter operation will throw the error when applying the concurrent DML log. - Inserting the duplicate key for unique index logs the insert operation for online ALTER TABLE. When insertion fails, transaction does rollback and it leads to logging of delete operation for online ALTER TABLE. While applying the insert log entries, alter operation encounters 'DUPLICATE KEY' error. - To avoid the above fake duplicate scenario, InnoDB should not write any log for online ALTER TABLE before DML transaction commit. - User thread which does DML can apply the online log if InnoDB ran out of online log and index is marked as completed. Set online log error if apply phase encountered any error. It can also clear all other indexes log, marks the newly added indexes as corrupted. - Removed the old online code which was a part of DML operations commit_inplace_alter_table() : Does apply the online log for the last batch of secondary index log and does frees the log for the completed index. trx_t::apply_online_log: Set to true while writing the undo log if the modified table has active DDL trx_t::apply_log(): Apply the DML changes to online DDL tables dict_table_t::is_active_ddl(): Returns true if the table has an active DDL dict_index_t::online_log_make_dummy(): Assign dummy value for clustered index online log to indicate the secondary indexes are being rebuild. dict_index_t::online_log_is_dummy(): Check whether the online log has dummy value ha_innobase_inplace_ctx::log_failure(): Handle the apply log failure for online DDL transaction row_log_mark_other_online_index_abort(): Clear out all other online index log after encountering the error during row_log_apply() row_log_get_error(): Get the error happened during row_log_apply() row_log_online_op(): Does apply the online log if index is completed and ran out of memory. Returns false if apply log fails UndorecApplier: Introduced a class to maintain the undo log record, latched undo buffer page, parse the undo log record, maintain the undo record type, info bits and update vector UndorecApplier::get_old_rec(): Get the correct version of the clustered index record that was modified by the current undo log record UndorecApplier::clear_undo_rec(): Clear the undo log related information after applying the undo log record UndorecApplier::log_update(): Handle the update, delete undo log and apply it on online indexes UndorecApplier::log_insert(): Handle the insert undo log and apply it on online indexes UndorecApplier::is_same(): Check whether the given roll pointer is generated by the current undo log record information trx_t::rollback_low(): Set apply_online_log for the transaction after partially rollbacked transaction has any active DDL prepare_inplace_alter_table_dict(): After allocating the online log, InnoDB does create fulltext common tables. Fulltext index doesn't allow the index to be online. So removed the dead code of online log removal Thanks to Marko Mäkelä for providing the initial prototype and Matthias Leich for testing the issue patiently.
414 lines
14 KiB
Text
414 lines
14 KiB
Text
--source include/innodb_page_size_small.inc
|
|
--source include/innodb_encrypt_log.inc
|
|
--source include/have_debug.inc
|
|
--source include/have_debug_sync.inc
|
|
|
|
let $innodb_metrics_select=
|
|
SELECT name, count FROM INFORMATION_SCHEMA.INNODB_METRICS WHERE subsystem = 'ddl';
|
|
|
|
call mtr.add_suppression("InnoDB: Warning: Small buffer pool size");
|
|
# these will be triggered by DISCARD TABLESPACE
|
|
call mtr.add_suppression("InnoDB: Error: table 'test/t1'");
|
|
call mtr.add_suppression("MariaDB is trying to open a table handle but the .ibd file for");
|
|
|
|
# DISCARD TABLESPACE needs file-per-table
|
|
SET @global_innodb_file_per_table_orig = @@global.innodb_file_per_table;
|
|
SET GLOBAL innodb_file_per_table = on;
|
|
|
|
# Save the initial number of concurrent sessions.
|
|
--source include/count_sessions.inc
|
|
|
|
CREATE TABLE t1 (c1 INT PRIMARY KEY, c2 INT NOT NULL, c3 CHAR(255) NOT NULL)
|
|
ENGINE = InnoDB;
|
|
INSERT INTO t1 VALUES (1,1,''), (2,2,''), (3,3,''), (4,4,''), (5,5,'');
|
|
|
|
SET GLOBAL innodb_monitor_enable = module_ddl;
|
|
eval $innodb_metrics_select;
|
|
|
|
SET DEBUG_SYNC = 'RESET';
|
|
SET DEBUG_SYNC = 'write_row_noreplace SIGNAL have_handle WAIT_FOR go_ahead';
|
|
--send
|
|
INSERT INTO t1 VALUES(1,2,3);
|
|
|
|
connect (con1,localhost,root,,);
|
|
|
|
# This should block at the end because of the INSERT in connection default
|
|
# is holding a metadata lock.
|
|
SET DEBUG_SYNC = 'now WAIT_FOR have_handle';
|
|
SET lock_wait_timeout = 1;
|
|
--error ER_LOCK_WAIT_TIMEOUT
|
|
ALTER TABLE t1 ROW_FORMAT=REDUNDANT;
|
|
SET DEBUG_SYNC = 'now SIGNAL go_ahead';
|
|
|
|
connection default;
|
|
--error ER_DUP_ENTRY
|
|
reap;
|
|
eval $innodb_metrics_select;
|
|
|
|
connection con1;
|
|
SET @saved_debug_dbug = @@SESSION.debug_dbug;
|
|
SET DEBUG_DBUG = '+d,innodb_OOM_prepare_inplace_alter';
|
|
--error ER_OUT_OF_RESOURCES
|
|
ALTER TABLE t1 ROW_FORMAT=REDUNDANT, ALGORITHM=INPLACE, LOCK=NONE;
|
|
SET SESSION DEBUG = @saved_debug_dbug;
|
|
SET SESSION DEBUG = '+d,innodb_OOM_inplace_alter';
|
|
--error ER_OUT_OF_RESOURCES
|
|
ALTER TABLE t1 ROW_FORMAT=REDUNDANT, ALGORITHM=INPLACE, LOCK=NONE;
|
|
SET SESSION DEBUG = @saved_debug_dbug;
|
|
ALTER TABLE t1 ROW_FORMAT=REDUNDANT, ALGORITHM=INPLACE, LOCK=NONE;
|
|
|
|
connection default;
|
|
SHOW CREATE TABLE t1;
|
|
# Insert a duplicate entry (4) for the upcoming UNIQUE INDEX(c2).
|
|
BEGIN;
|
|
INSERT INTO t1 VALUES(7,4,2);
|
|
|
|
connection con1;
|
|
# This DEBUG_SYNC should not kick in yet, because the duplicate key will be
|
|
# detected before we get a chance to apply the online log.
|
|
SET DEBUG_SYNC = 'row_log_table_apply1_before SIGNAL scanned WAIT_FOR insert_done';
|
|
# This will be a lock wait timeout on the meta-data lock,
|
|
# because the transaction inserting (7,4,2) is still active.
|
|
--error ER_LOCK_WAIT_TIMEOUT
|
|
ALTER TABLE t1 DROP PRIMARY KEY, ADD UNIQUE INDEX(c2);
|
|
|
|
connection default;
|
|
COMMIT;
|
|
|
|
connection con1;
|
|
--error ER_DUP_ENTRY
|
|
ALTER TABLE t1 DROP PRIMARY KEY, ADD UNIQUE INDEX(c2);
|
|
|
|
connection default;
|
|
DELETE FROM t1 WHERE c1 = 7;
|
|
|
|
connection con1;
|
|
ALTER TABLE t1 DROP PRIMARY KEY, ADD UNIQUE INDEX(c2), ROW_FORMAT=COMPACT,
|
|
LOCK = SHARED, ALGORITHM = INPLACE;
|
|
|
|
# The previous DEBUG_SYNC should be ignored, because an exclusive lock
|
|
# has been requested and the online log is not being allocated.
|
|
ALTER TABLE t1 ADD UNIQUE INDEX(c2),
|
|
LOCK = EXCLUSIVE, ALGORITHM = INPLACE;
|
|
|
|
SHOW CREATE TABLE t1;
|
|
# Now the previous DEBUG_SYNC should kick in.
|
|
--send
|
|
ALTER TABLE t1 DROP INDEX c2, ADD PRIMARY KEY(c1);
|
|
|
|
connection default;
|
|
SET DEBUG_SYNC = 'now WAIT_FOR scanned';
|
|
eval $innodb_metrics_select;
|
|
|
|
# Insert a duplicate entry (4) for the already started UNIQUE INDEX(c1).
|
|
INSERT INTO t1 VALUES(4,7,2);
|
|
SET DEBUG_SYNC = 'now SIGNAL insert_done';
|
|
|
|
connection con1;
|
|
# Because the modification log will be applied in order and we did
|
|
# not roll back before the log apply, there will be a duplicate key
|
|
# error on the (4,7,2).
|
|
--error ER_DUP_ENTRY
|
|
reap;
|
|
DELETE FROM t1 WHERE c1=4 and c2=7;
|
|
connection default;
|
|
ROLLBACK;
|
|
|
|
connection con1;
|
|
SHOW CREATE TABLE t1;
|
|
# Now, rebuild the table without any concurrent DML, while no duplicate exists.
|
|
--error ER_CANT_DROP_FIELD_OR_KEY
|
|
ALTER TABLE t1 DROP PRIMARY KEY, ADD UNIQUE INDEX(c2), ALGORITHM = INPLACE;
|
|
ALTER TABLE t1 DROP INDEX c2, ADD PRIMARY KEY(c1), ALGORITHM = INPLACE;
|
|
eval $innodb_metrics_select;
|
|
|
|
connection default;
|
|
--error ER_DUP_ENTRY
|
|
INSERT INTO t1 VALUES(6,3,1);
|
|
--error ER_DUP_ENTRY
|
|
INSERT INTO t1 VALUES(7,4,2);
|
|
DROP INDEX c2_2 ON t1;
|
|
BEGIN;
|
|
INSERT INTO t1 VALUES(7,4,2);
|
|
ROLLBACK;
|
|
|
|
connection con1;
|
|
let $ID= `SELECT @id := CONNECTION_ID()`;
|
|
--error ER_QUERY_INTERRUPTED
|
|
KILL QUERY @id;
|
|
|
|
SET DEBUG_SYNC = 'row_log_table_apply1_before SIGNAL rebuilt WAIT_FOR dml_done';
|
|
SET DEBUG_SYNC = 'row_log_table_apply2_before SIGNAL applied WAIT_FOR kill_done';
|
|
--send
|
|
ALTER TABLE t1 ROW_FORMAT=REDUNDANT;
|
|
|
|
connection default;
|
|
SET DEBUG_SYNC = 'now WAIT_FOR rebuilt';
|
|
eval $innodb_metrics_select;
|
|
BEGIN;
|
|
INSERT INTO t1 VALUES(7,4,2);
|
|
ROLLBACK;
|
|
SET DEBUG_SYNC = 'now SIGNAL dml_done WAIT_FOR applied';
|
|
let $ignore= `SELECT @id := $ID`;
|
|
KILL QUERY @id;
|
|
SET DEBUG_SYNC = 'now SIGNAL kill_done';
|
|
|
|
connection con1;
|
|
--error ER_QUERY_INTERRUPTED
|
|
reap;
|
|
eval $innodb_metrics_select;
|
|
|
|
connection default;
|
|
CHECK TABLE t1;
|
|
INSERT INTO t1 SELECT 5 + c1, c2, c3 FROM t1;
|
|
INSERT INTO t1 SELECT 10 + c1, c2, c3 FROM t1;
|
|
INSERT INTO t1 SELECT 20 + c1, c2, c3 FROM t1;
|
|
INSERT INTO t1 SELECT 40 + c1, c2, c3 FROM t1;
|
|
# Purge may or may not have cleaned up the DELETE FROM t1 WHERE c1 = 7;
|
|
--replace_column 9 ROWS
|
|
EXPLAIN SELECT COUNT(*) FROM t1 WHERE c2 > 3;
|
|
ANALYZE TABLE t1;
|
|
|
|
SET @merge_encrypt_0=
|
|
(SELECT variable_value FROM information_schema.global_status
|
|
WHERE variable_name = 'innodb_encryption_n_merge_blocks_encrypted');
|
|
SET @merge_decrypt_0=
|
|
(SELECT variable_value FROM information_schema.global_status
|
|
WHERE variable_name = 'innodb_encryption_n_merge_blocks_decrypted');
|
|
SET @rowlog_encrypt_0=
|
|
(SELECT variable_value FROM information_schema.global_status
|
|
WHERE variable_name = 'innodb_encryption_n_rowlog_blocks_encrypted');
|
|
|
|
connection con1;
|
|
SHOW CREATE TABLE t1;
|
|
ALTER TABLE t1 ROW_FORMAT=REDUNDANT;
|
|
|
|
# Exceed the configured innodb_online_alter_log_max_size.
|
|
# The actual limit is a multiple of innodb_sort_buf_size,
|
|
# because that is the size of the in-memory log buffers.
|
|
SET DEBUG_SYNC = 'row_log_table_apply1_before SIGNAL rebuilt2 WAIT_FOR dml2_done';
|
|
# Ensure that the ALTER TABLE will be executed even with some concurrent DML.
|
|
SET lock_wait_timeout = 10;
|
|
--send
|
|
ALTER TABLE t1 ROW_FORMAT=COMPACT
|
|
PAGE_COMPRESSED = YES PAGE_COMPRESSION_LEVEL = 1, ALGORITHM = INPLACE;
|
|
|
|
# Generate some log (delete-mark, delete-unmark, insert etc.)
|
|
# while the index creation is blocked.
|
|
connection default;
|
|
SET DEBUG_SYNC = 'now WAIT_FOR rebuilt2';
|
|
INSERT INTO t1 SELECT 80 + c1, c2, c3 FROM t1;
|
|
INSERT INTO t1 SELECT 160 + c1, c2, c3 FROM t1;
|
|
UPDATE t1 SET c2 = c2 + 1;
|
|
# At this point, the clustered index scan must have completed,
|
|
# but the modification log keeps accumulating due to the DEBUG_SYNC.
|
|
eval $innodb_metrics_select;
|
|
let $c= 8;
|
|
while ($c)
|
|
{
|
|
UPDATE t1 SET c2 = c2 + 1;
|
|
UPDATE t1 SET c2 = c2 + 2;
|
|
dec $c;
|
|
}
|
|
# Temporary table should exist until the DDL thread notices the overflow.
|
|
eval $innodb_metrics_select;
|
|
|
|
SET @merge_encrypt_1=
|
|
(SELECT variable_value FROM information_schema.global_status
|
|
WHERE variable_name = 'innodb_encryption_n_merge_blocks_encrypted');
|
|
SET @merge_decrypt_1=
|
|
(SELECT variable_value FROM information_schema.global_status
|
|
WHERE variable_name = 'innodb_encryption_n_merge_blocks_decrypted');
|
|
SET @rowlog_encrypt_1=
|
|
(SELECT variable_value FROM information_schema.global_status
|
|
WHERE variable_name = 'innodb_encryption_n_rowlog_blocks_encrypted');
|
|
|
|
SELECT
|
|
(@merge_encrypt_1-@merge_encrypt_0)-
|
|
(@merge_decrypt_1-@merge_decrypt_0) as sort_balance,
|
|
@merge_encrypt_1>@merge_encrypt_0, @merge_decrypt_1>@merge_decrypt_0,
|
|
@rowlog_encrypt_1>@rowlog_encrypt_0;
|
|
|
|
# Release con1.
|
|
SET DEBUG_SYNC = 'now SIGNAL dml2_done';
|
|
|
|
connection con1;
|
|
# If the following fails with the wrong error, it probably means that
|
|
# you should rerun with a larger mtr --debug-sync-timeout.
|
|
--error ER_INNODB_ONLINE_LOG_TOO_BIG
|
|
reap;
|
|
# The table should have been dropped from the data dictionary
|
|
# when the above error was noticed.
|
|
eval $innodb_metrics_select;
|
|
|
|
SET @merge_encrypt_1=
|
|
(SELECT variable_value FROM information_schema.global_status
|
|
WHERE variable_name = 'innodb_encryption_n_merge_blocks_encrypted');
|
|
SET @merge_decrypt_1=
|
|
(SELECT variable_value FROM information_schema.global_status
|
|
WHERE variable_name = 'innodb_encryption_n_merge_blocks_decrypted');
|
|
SET @rowlog_encrypt_1=
|
|
(SELECT variable_value FROM information_schema.global_status
|
|
WHERE variable_name = 'innodb_encryption_n_rowlog_blocks_encrypted');
|
|
SET @rowlog_decrypt_1=
|
|
(SELECT variable_value FROM information_schema.global_status
|
|
WHERE variable_name = 'innodb_encryption_n_rowlog_blocks_decrypted');
|
|
|
|
# Accumulate and apply some modification log.
|
|
SET DEBUG_SYNC = 'row_log_table_apply1_before SIGNAL rebuilt3 WAIT_FOR dml3_done';
|
|
--error ER_MULTIPLE_PRI_KEY
|
|
ALTER TABLE t1 ADD PRIMARY KEY(c22f), CHANGE c2 c22f INT;
|
|
--error ER_DUP_ENTRY
|
|
ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(c22f), CHANGE c2 c22f INT;
|
|
--send
|
|
ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(c22f,c1,c4(5)),
|
|
CHANGE c2 c22f INT, CHANGE c3 c3 CHAR(255) NULL, CHANGE c1 c1 INT AFTER c22f,
|
|
ADD COLUMN c4 VARCHAR(6) DEFAULT 'Online', LOCK=NONE;
|
|
|
|
connection default;
|
|
SET DEBUG_SYNC = 'now WAIT_FOR rebuilt3';
|
|
# Generate some log (delete-mark, delete-unmark, insert etc.)
|
|
eval $innodb_metrics_select;
|
|
BEGIN;
|
|
INSERT INTO t1 SELECT 320 + c1, c2, c3 FROM t1 WHERE c1 > 240;
|
|
DELETE FROM t1 WHERE c1 > 320;
|
|
UPDATE t1 SET c2 = c2 + 1;
|
|
COMMIT;
|
|
eval $innodb_metrics_select;
|
|
# Release con1.
|
|
SET DEBUG_SYNC = 'now SIGNAL dml3_done';
|
|
|
|
connection con1;
|
|
reap;
|
|
eval $innodb_metrics_select;
|
|
SELECT COUNT(c22f) FROM t1;
|
|
CHECK TABLE t1;
|
|
|
|
SET @merge_encrypt_2=
|
|
(SELECT variable_value FROM information_schema.global_status
|
|
WHERE variable_name = 'innodb_encryption_n_merge_blocks_encrypted');
|
|
SET @merge_decrypt_2=
|
|
(SELECT variable_value FROM information_schema.global_status
|
|
WHERE variable_name = 'innodb_encryption_n_merge_blocks_decrypted');
|
|
SET @rowlog_encrypt_2=
|
|
(SELECT variable_value FROM information_schema.global_status
|
|
WHERE variable_name = 'innodb_encryption_n_rowlog_blocks_encrypted');
|
|
SET @rowlog_decrypt_2=
|
|
(SELECT variable_value FROM information_schema.global_status
|
|
WHERE variable_name = 'innodb_encryption_n_rowlog_blocks_decrypted');
|
|
|
|
SELECT
|
|
(@merge_encrypt_2-@merge_encrypt_1)-
|
|
(@merge_decrypt_2-@merge_decrypt_1) as sort_balance,
|
|
(@rowlog_encrypt_2-@rowlog_encrypt_1)-
|
|
(@rowlog_decrypt_2-@rowlog_decrypt_1) as log_balance;
|
|
SELECT
|
|
@merge_encrypt_2-@merge_encrypt_1>0 as sort_encrypted,
|
|
@merge_decrypt_2-@merge_decrypt_1>0 as sort_decrypted,
|
|
@rowlog_encrypt_2-@rowlog_encrypt_1>0 as log_encrypted,
|
|
@rowlog_decrypt_2-@rowlog_decrypt_1>0 as log_decrypted;
|
|
|
|
# Create a column prefix index.
|
|
--error ER_DUP_ENTRY
|
|
ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY c3p5(c3(5));
|
|
UPDATE t1 SET c3 = NULL WHERE c3 = '';
|
|
SET lock_wait_timeout = 1;
|
|
--error ER_KEY_COLUMN_DOES_NOT_EXITS
|
|
ALTER TABLE t1 DROP COLUMN c22f, ADD PRIMARY KEY c3p5(c3(5));
|
|
# NULL -> NOT NULL is only allowed INPLACE without IGNORE.
|
|
# Adding a PRIMARY KEY will add NOT NULL implicitly!
|
|
--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
|
|
ALTER IGNORE TABLE t1 DROP COLUMN c22f, DROP PRIMARY KEY, ADD PRIMARY KEY c3p5(c3(5)),
|
|
ALGORITHM = INPLACE;
|
|
|
|
UPDATE t1 SET c3=LEFT(CONCAT(c1,REPEAT('foo',c1)),255) WHERE c3 IS NULL;
|
|
|
|
SET DEBUG_SYNC = 'row_log_table_apply1_before SIGNAL c3p5_created0 WAIT_FOR ins_done0';
|
|
# NULL -> NOT NULL is allowed INPLACE.
|
|
--send
|
|
ALTER TABLE t1 MODIFY c3 CHAR(255) NOT NULL, DROP COLUMN c22f,
|
|
DROP PRIMARY KEY, ADD PRIMARY KEY(c1,c4(5)),
|
|
ADD COLUMN c5 CHAR(5) DEFAULT 'tired' FIRST;
|
|
|
|
connection default;
|
|
|
|
SET DEBUG_SYNC = 'now WAIT_FOR c3p5_created0';
|
|
BEGIN;
|
|
INSERT INTO t1 VALUES(347,33101,'Pikku kakkosen posti','YLETV2');
|
|
INSERT INTO t1 VALUES(33101,347,NULL,'');
|
|
COMMIT;
|
|
SET DEBUG_SYNC = 'now SIGNAL ins_done0';
|
|
|
|
connection con1;
|
|
--error ER_INVALID_USE_OF_NULL
|
|
reap;
|
|
DELETE FROM t1 WHERE c1= 347 and c22f = 33101;
|
|
ALTER TABLE t1 MODIFY c3 CHAR(255) NOT NULL;
|
|
|
|
SET DEBUG_SYNC = 'row_log_table_apply1_before SIGNAL c3p5_created WAIT_FOR ins_done';
|
|
--send
|
|
ALTER TABLE t1 DROP PRIMARY KEY, DROP COLUMN c22f,
|
|
ADD COLUMN c6 VARCHAR(1000) DEFAULT
|
|
'I love tracking down hard-to-reproduce bugs.',
|
|
ADD PRIMARY KEY c3p5(c3(5), c6(2));
|
|
|
|
connection default;
|
|
SET DEBUG_SYNC = 'now WAIT_FOR c3p5_created';
|
|
SET DEBUG_SYNC = 'ib_after_row_insert SIGNAL ins_done WAIT_FOR ddl_timed_out';
|
|
--error ER_BAD_NULL_ERROR
|
|
INSERT INTO t1 VALUES(347,33101,NULL,'');
|
|
--send
|
|
INSERT INTO t1 VALUES(347,33101,'Pikku kakkosen posti','');
|
|
|
|
connection con1;
|
|
--error ER_LOCK_WAIT_TIMEOUT
|
|
reap;
|
|
SET DEBUG_SYNC = 'now SIGNAL ddl_timed_out';
|
|
eval $innodb_metrics_select;
|
|
|
|
connection default;
|
|
reap;
|
|
SELECT COUNT(*) FROM t1;
|
|
ALTER TABLE t1 ROW_FORMAT=REDUNDANT;
|
|
SELECT * FROM t1 LIMIT 10;
|
|
|
|
connection con1;
|
|
ALTER TABLE t1 DISCARD TABLESPACE;
|
|
|
|
connection default;
|
|
SHOW CREATE TABLE t1;
|
|
SET GLOBAL innodb_monitor_disable = module_ddl;
|
|
DROP TABLE t1;
|
|
|
|
CREATE TABLE t1 (a INT PRIMARY KEY, b blob) ENGINE=InnoDB;
|
|
INSERT INTO t1 VALUES(0,NULL);
|
|
|
|
connection con1;
|
|
SET DEBUG_SYNC = 'row_log_table_apply1_before SIGNAL created WAIT_FOR ins';
|
|
send ALTER TABLE t1 FORCE;
|
|
|
|
connection default;
|
|
SET DEBUG_SYNC = 'now WAIT_FOR created';
|
|
BEGIN;
|
|
INSERT INTO t1 VALUES(1, repeat('a', 10000));
|
|
ROLLBACK;
|
|
SET DEBUG_SYNC = 'now SIGNAL ins';
|
|
|
|
connection con1;
|
|
reap;
|
|
disconnect con1;
|
|
|
|
connection default;
|
|
SELECT * FROM t1;
|
|
DROP TABLE t1;
|
|
SET DEBUG_SYNC = 'RESET';
|
|
|
|
# Check that all connections opened by test cases in this file are really
|
|
# gone so execution of other tests won't be affected by their presence.
|
|
--source include/wait_until_count_sessions.inc
|
|
|
|
SET GLOBAL innodb_file_per_table = @global_innodb_file_per_table_orig;
|
|
--disable_warnings
|
|
SET GLOBAL innodb_monitor_enable = default;
|
|
SET GLOBAL innodb_monitor_disable = default;
|
|
--enable_warnings
|