mirror of
https://github.com/MariaDB/server.git
synced 2025-10-24 16:38:14 +02:00
This is actually an existing problem in the old binlog implementation, and this patch is applicable to old binlog also. The problem is that RESET MASTER can run concurrently with binlog dump threads / connected slaves. This will remove the binlog from under the feet of the reader, which can cause all sorts of strange behaviour. This patch fixes the problem by disallowing to run RESET MASTER when dump threads (or other RESET MASTER or SHOW BINARY LOGS) are running. An error is thrown in this case, user must stop slaves and/or kill dump threads to make the RESET MASTER go through. A slave that connects in the middle of RESET MASTER will wait for it to complete. Fix a lot of test cases to kill any lingering dump threads before doing RESET MASTER, mostly just by sourcing include/kill_binlog_dump_threads.inc. Signed-off-by: Kristian Nielsen <knielsen@knielsen-hq.org>
125 lines
4 KiB
Text
125 lines
4 KiB
Text
#
|
|
# Test Galera as a replica to a MySQL async replication
|
|
#
|
|
# The galera/galera_2node_slave.cnf describes the setup of the nodes
|
|
#
|
|
--source include/force_restart.inc
|
|
--source include/galera_cluster.inc
|
|
--source include/have_innodb.inc
|
|
--source include/have_sequence.inc
|
|
|
|
# As node #3 is not a Galera node, and galera_cluster.inc does not open connetion to it
|
|
# we open the node_3 connection here
|
|
--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
|
|
|
|
create user repl@'%' identified by 'repl';
|
|
grant all on *.* to repl@'%';
|
|
flush privileges;
|
|
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
|
|
|
|
--let $node_1 = node_1
|
|
--let $node_2 = node_2
|
|
--source include/auto_increment_offset_save.inc
|
|
|
|
--connection node_2
|
|
--disable_query_log
|
|
--eval CHANGE MASTER TO master_host='127.0.0.1', master_user='repl', master_password='repl', master_ssl_verify_server_cert=0, master_port=$NODE_MYPORT_3, master_use_gtid=slave_pos;
|
|
--enable_query_log
|
|
START SLAVE;
|
|
|
|
--connection node_3
|
|
|
|
CREATE TABLE t1 (id bigint primary key, msg varchar(100)) engine=innodb;
|
|
--disable_query_log
|
|
INSERT INTO t1 SELECT seq, 'test' from seq_1_to_10000;
|
|
--enable_query_log
|
|
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
|
|
|
|
--connection node_2
|
|
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
|
|
--source include/wait_condition.inc
|
|
|
|
--let $wait_condition = SELECT COUNT(*) = 10000 FROM t1;
|
|
--source include/wait_condition.inc
|
|
|
|
#
|
|
# Node_2 is slave so mysql.gtid_slave_pos table is also replicated
|
|
#
|
|
SELECT COUNT(*) > 0 AS EXPECT_1 FROM mysql.gtid_slave_pos;
|
|
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
|
|
|
|
--connection node_1
|
|
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
|
|
--source include/wait_condition.inc
|
|
|
|
--let $wait_condition = SELECT COUNT(*) = 10000 FROM t1;
|
|
--source include/wait_condition.inc
|
|
|
|
#
|
|
# mysql-gtid_slave_pos table should not be replicated by Galera
|
|
#
|
|
SELECT COUNT(*) AS EXPECT_0 FROM mysql.gtid_slave_pos;
|
|
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
|
|
|
|
--connection node_2
|
|
--echo # Verify that graceful shutdown succeeds.
|
|
--source include/shutdown_mysqld.inc
|
|
--echo # Force SST
|
|
--remove_file $MYSQLTEST_VARDIR/mysqld.2/data/grastate.dat
|
|
|
|
--connection node_1
|
|
--echo # Waiting until node_2 is not part of cluster anymore
|
|
--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
|
|
--source include/wait_condition.inc
|
|
--let $wait_condition = SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
|
|
--source include/wait_condition.inc
|
|
|
|
--connection node_2
|
|
--echo # Start node_2 again
|
|
--source include/start_mysqld.inc
|
|
|
|
--echo ¤ Wait until node_2 is back on cluster
|
|
--let $wait_condition = SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
|
|
--source include/wait_condition.inc
|
|
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
|
|
--source include/wait_condition.inc
|
|
--let $wait_condition = SELECT VARIABLE_VALUE = 'ON' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_ready';
|
|
--source include/wait_condition.inc
|
|
|
|
--connection node_2
|
|
call mtr.add_suppression("Slave: Operation CREATE USER failed for ");
|
|
SELECT COUNT(*) AS EXPECT_0 FROM mysql.gtid_slave_pos;
|
|
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
|
|
|
|
--connection node_1
|
|
SELECT COUNT(*) AS EXPECT_0 FROM mysql.gtid_slave_pos;
|
|
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
|
|
|
|
--connection node_3
|
|
SELECT COUNT(*) AS EXPECT_10000 FROM t1;
|
|
|
|
#
|
|
# Cleanup
|
|
#
|
|
--connection node_2
|
|
STOP SLAVE;
|
|
RESET SLAVE ALL;
|
|
|
|
--connection node_3
|
|
--source include/kill_binlog_dump_threads.inc
|
|
RESET MASTER;
|
|
drop table t1;
|
|
|
|
--connection node_2
|
|
DROP TABLE t1;
|
|
|
|
--connection node_1
|
|
--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
|
|
--source include/wait_condition.inc
|
|
|
|
--connection node_1
|
|
--disconnect node_3
|
|
|
|
--source include/auto_increment_offset_restore.inc
|
|
--source include/galera_end.inc
|
|
--echo # End of test
|