mirror of
https://github.com/MariaDB/server.git
synced 2025-01-15 19:42:28 +01:00
Merge from 10.4 to 10.5
Signed-off-by: Kristian Nielsen <knielsen@knielsen-hq.org>
This commit is contained in:
commit
16aa4b5f59
58 changed files with 1009 additions and 238 deletions
|
@ -1422,7 +1422,9 @@ static void usage(void)
|
|||
refresh Flush all tables and close and open logfiles\n\
|
||||
shutdown Take server down\n\
|
||||
status Gives a short status message from the server\n\
|
||||
start-all-slaves Start all slaves\n\
|
||||
start-slave Start slave\n\
|
||||
stop-all-slaves Stop all slaves\n\
|
||||
stop-slave Stop slave\n\
|
||||
variables Prints variables available\n\
|
||||
version Get version info from server");
|
||||
|
|
|
@ -474,6 +474,7 @@ typedef struct st_net {
|
|||
my_bool thread_specific_malloc;
|
||||
unsigned char compress;
|
||||
my_bool unused3; /* Please remove with the next incompatible ABI change. */
|
||||
my_bool using_proxy_protocol;
|
||||
/*
|
||||
Pointer to query object in query cache, do not equal NULL (0) for
|
||||
queries in cache that have not stored its results yet
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
if "%MTR_PARALLEL%"=="" set MTR_PARALLEL=%NUMBER_OF_PROCESSORS%
|
||||
perl mysql-test-run.pl --verbose-restart --force --suite-timeout=120 --max-test-fail=10 --retry=3 --suite=^
|
||||
perl mysql-test-run.pl --force --suite-timeout=120 --max-test-fail=10 --retry=3 --suite=^
|
||||
vcol,gcol,perfschema,^
|
||||
main,^
|
||||
innodb,^
|
||||
|
|
|
@ -613,13 +613,17 @@ call p_verify_status_increment(2, 0, 2, 0);
|
|||
drop table t2;
|
||||
set sql_mode=no_engine_substitution;
|
||||
create temporary table t2 (a int);
|
||||
call p_verify_status_increment(1, 0, 0, 0);
|
||||
# One commit for the create temporary table, and two for committing the
|
||||
# read of the stored procedure from Aria table (creating temporary table
|
||||
# clears the sp cache).
|
||||
call p_verify_status_increment(3, 0, 2, 0);
|
||||
set sql_mode=default;
|
||||
--echo # 19. A function changes temp-trans-table.
|
||||
--echo #
|
||||
select f1();
|
||||
--echo # Two commits because a binary log record is written
|
||||
call p_verify_status_increment(2, 0, 1, 0);
|
||||
--echo # Two commits because a binary log record is written, and another two
|
||||
--echo # as the function f1() is reloaded after creating temporary table.
|
||||
call p_verify_status_increment(4, 0, 3, 0);
|
||||
commit;
|
||||
call p_verify_status_increment(2, 0, 1, 0);
|
||||
|
||||
|
@ -672,9 +676,11 @@ call p_verify_status_increment(2, 0, 1, 0);
|
|||
--echo # 25. DDL: DROP TEMPORARY TABLE, does not start a transaction
|
||||
--echo #
|
||||
drop temporary table t2;
|
||||
call p_verify_status_increment(1, 0, 1, 0);
|
||||
# Dropping temporary table clears SP caches, so get another two commit
|
||||
# increments from loading the p_verify_status_increment procedure.
|
||||
call p_verify_status_increment(3, 0, 2, 0);
|
||||
commit;
|
||||
call p_verify_status_increment(1, 0, 1, 0);
|
||||
call p_verify_status_increment(1, 0, 0, 0);
|
||||
|
||||
--echo # 26. Verify that SET AUTOCOMMIT issues an implicit commit
|
||||
--echo #
|
||||
|
@ -721,7 +727,9 @@ call p_verify_status_increment(1, 0, 1, 0);
|
|||
create table t2 (a int);
|
||||
call p_verify_status_increment(0, 0, 0, 0);
|
||||
do (select f1() from t1 where a=2);
|
||||
call p_verify_status_increment(2, 2, 2, 2);
|
||||
# Again extra 2 commit increments from re-loading function f1 after
|
||||
# dropping temporary table.
|
||||
call p_verify_status_increment(4, 2, 4, 2);
|
||||
commit;
|
||||
call p_verify_status_increment(2, 2, 2, 2);
|
||||
|
||||
|
|
|
@ -87,12 +87,16 @@ sub flush_out {
|
|||
$out_line = "";
|
||||
}
|
||||
|
||||
use if $^O eq "MSWin32", "threads::shared";
|
||||
my $flush_lock :shared;
|
||||
|
||||
# Print to stdout
|
||||
sub print_out {
|
||||
if(IS_WIN32PERL) {
|
||||
$out_line .= $_[0];
|
||||
# Flush buffered output on new lines.
|
||||
if (rindex($_[0], "\n") != -1) {
|
||||
lock($flush_lock);
|
||||
flush_out();
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -634,7 +634,7 @@ SUCCESS
|
|||
drop table t2;
|
||||
set sql_mode=no_engine_substitution;
|
||||
create temporary table t2 (a int);
|
||||
call p_verify_status_increment(1, 0, 0, 0);
|
||||
call p_verify_status_increment(3, 0, 2, 0);
|
||||
SUCCESS
|
||||
|
||||
set sql_mode=default;
|
||||
|
@ -643,8 +643,9 @@ set sql_mode=default;
|
|||
select f1();
|
||||
f1()
|
||||
2
|
||||
# Two commits because a binary log record is written
|
||||
call p_verify_status_increment(2, 0, 1, 0);
|
||||
# Two commits because a binary log record is written, and another two
|
||||
# as the function f1() is reloaded after creating temporary table.
|
||||
call p_verify_status_increment(4, 0, 3, 0);
|
||||
SUCCESS
|
||||
|
||||
commit;
|
||||
|
@ -715,11 +716,11 @@ SUCCESS
|
|||
# 25. DDL: DROP TEMPORARY TABLE, does not start a transaction
|
||||
#
|
||||
drop temporary table t2;
|
||||
call p_verify_status_increment(1, 0, 1, 0);
|
||||
call p_verify_status_increment(3, 0, 2, 0);
|
||||
SUCCESS
|
||||
|
||||
commit;
|
||||
call p_verify_status_increment(1, 0, 1, 0);
|
||||
call p_verify_status_increment(1, 0, 0, 0);
|
||||
SUCCESS
|
||||
|
||||
# 26. Verify that SET AUTOCOMMIT issues an implicit commit
|
||||
|
@ -801,7 +802,7 @@ call p_verify_status_increment(0, 0, 0, 0);
|
|||
SUCCESS
|
||||
|
||||
do (select f1() from t1 where a=2);
|
||||
call p_verify_status_increment(2, 2, 2, 2);
|
||||
call p_verify_status_increment(4, 2, 4, 2);
|
||||
SUCCESS
|
||||
|
||||
commit;
|
||||
|
|
|
@ -1,4 +1,15 @@
|
|||
SET DEBUG_SYNC='dispatch_command_end2 SIGNAL ready EXECUTE 3';
|
||||
connect con1,localhost,root,,;
|
||||
SET DEBUG_SYNC='now wait_for ready';
|
||||
connection default;
|
||||
SET DEBUG_SYNC='dispatch_command_end WAIT_FOR go EXECUTE 2';
|
||||
select 1;
|
||||
connection con1;
|
||||
SET DEBUG_SYNC='now signal go';
|
||||
SET DEBUG_SYNC='now wait_for ready';
|
||||
SET DEBUG_SYNC='now signal go';
|
||||
SET DEBUG_SYNC='now wait_for ready';
|
||||
SET DEBUG_SYNC='RESET';
|
||||
SHOW PROCESSLIST;
|
||||
Id User Host db Command Time State Info Progress
|
||||
# root # test Sleep # # NULL 0.000
|
||||
|
@ -6,6 +17,8 @@ Id User Host db Command Time State Info Progress
|
|||
SET DEBUG_SYNC='before_execute_sql_command SIGNAL ready WAIT_FOR go';
|
||||
SHOW PROCESSLIST;
|
||||
connection default;
|
||||
1
|
||||
1
|
||||
SET DEBUG_SYNC='now WAIT_FOR ready';
|
||||
KILL QUERY con_id;
|
||||
SET DEBUG_SYNC='now SIGNAL go';
|
||||
|
|
|
@ -4,21 +4,44 @@
|
|||
--source include/not_embedded.inc
|
||||
--source include/have_debug_sync.inc
|
||||
|
||||
let $wait_condition=select count(*) = 1 from information_schema.processlist;
|
||||
source include/wait_condition.inc;
|
||||
# Ensure no lingering connections from an earlier test run, which can very
|
||||
# rarely still be visible in SHOW PROCESSLIST here.
|
||||
--let $wait_condition= SELECT COUNT(*) = 1 from information_schema.processlist
|
||||
--source include/wait_condition.inc
|
||||
|
||||
# This is to ensure that the following SHOW PROCESSLIST does not show the query
|
||||
#
|
||||
# The use of DEBUG_SYNC here is quite tricky, and there were several bugs in
|
||||
# this test case before. The dispatch_command_end* sync points will trigger at
|
||||
# the end of the statement that sets them, so we need to use EXECUTE 2/3 to
|
||||
# make them trigger also during the "select 1" statement. And we need to use
|
||||
# two separate sync points so that we can wait first and signal after;
|
||||
# otherwise the last wait from dispatch_command_end may time out as its signal
|
||||
# gets overridden from the later sync point "before_execute_sql_command".
|
||||
#
|
||||
SET DEBUG_SYNC='dispatch_command_end2 SIGNAL ready EXECUTE 3';
|
||||
--connect (con1,localhost,root,,)
|
||||
SET DEBUG_SYNC='now wait_for ready';
|
||||
--connection default
|
||||
SET DEBUG_SYNC='dispatch_command_end WAIT_FOR go EXECUTE 2';
|
||||
--send select 1
|
||||
|
||||
--connection con1
|
||||
SET DEBUG_SYNC='now signal go';
|
||||
SET DEBUG_SYNC='now wait_for ready';
|
||||
SET DEBUG_SYNC='now signal go';
|
||||
SET DEBUG_SYNC='now wait_for ready';
|
||||
SET DEBUG_SYNC='RESET';
|
||||
|
||||
--let $con_id = `SELECT CONNECTION_ID()`
|
||||
|
||||
let $wait_condition=select command = 'sleep' from information_schema.processlist where id != $con_id;
|
||||
source include/wait_condition.inc;
|
||||
|
||||
--replace_result Execute Query
|
||||
--replace_column 1 # 3 # 6 # 7 #
|
||||
SHOW PROCESSLIST;
|
||||
SET DEBUG_SYNC='before_execute_sql_command SIGNAL ready WAIT_FOR go';
|
||||
send SHOW PROCESSLIST;
|
||||
--connection default
|
||||
--reap
|
||||
|
||||
# We must wait for the SHOW PROCESSLIST query to have started before sending
|
||||
# the kill. Otherwise, the KILL may be lost since it is reset at the start of
|
||||
# query execution.
|
||||
|
@ -32,7 +55,9 @@ reap;
|
|||
SET DEBUG_SYNC='reset';
|
||||
|
||||
# Wait until default connection has reset query string
|
||||
let $wait_condition=select command = 'sleep' from information_schema.processlist where id != $con_id;
|
||||
let $wait_condition=
|
||||
SELECT COUNT(*) = 1 from information_schema.processlist
|
||||
WHERE Command = "Sleep" AND info is NULL;
|
||||
--source include/wait_condition.inc
|
||||
|
||||
--replace_result Execute Query
|
||||
|
|
|
@ -261,3 +261,4 @@ SET @@global.character_set_server= @save_character_set_server;
|
|||
SET @@global.collation_server= @save_collation_server;
|
||||
SET @@global.character_set_client= @save_character_set_client;
|
||||
SET @@global.collation_connection= @save_collation_connection;
|
||||
FOUND 1 /Aborted connection.*'u' host: '192.0.2.1' real ip: '(localhost|::1)'/ in mysqld.1.err
|
||||
|
|
|
@ -59,3 +59,9 @@ SET @@global.character_set_server= @save_character_set_server;
|
|||
SET @@global.collation_server= @save_collation_server;
|
||||
SET @@global.character_set_client= @save_character_set_client;
|
||||
SET @@global.collation_connection= @save_collation_connection;
|
||||
|
||||
# Search for "real ip" in Aborted message
|
||||
# This is indicator for abort of the proxied connections.
|
||||
let SEARCH_FILE=$MYSQLTEST_VARDIR/log/mysqld.1.err;
|
||||
let SEARCH_PATTERN= Aborted connection.*'u' host: '192.0.2.1' real ip: '(localhost|::1)';
|
||||
source include/search_pattern_in_file.inc;
|
||||
|
|
|
@ -5978,6 +5978,19 @@ EXECUTE stmt USING DEFAULT;
|
|||
# Clean up
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1, t2;
|
||||
# MDEV-33218: Assertion `active_arena->is_stmt_prepare_or_first_stmt_execute() || active_arena->state == Query_arena::STMT_SP_QUERY_ARGUMENTS' failed. in st_select_lex::fix_prepare_information
|
||||
CREATE TABLE t1 AS SELECT 1 f;
|
||||
PREPARE stmt FROM 'SHOW CREATE TABLE t1';
|
||||
DROP TABLE t1;
|
||||
EXECUTE stmt;
|
||||
ERROR 42S02: Table 'test.t1' doesn't exist
|
||||
CREATE VIEW t1 AS SELECT 1;
|
||||
EXECUTE stmt;
|
||||
View Create View character_set_client collation_connection
|
||||
t1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `t1` AS select 1 AS `1` latin1 latin1_swedish_ci
|
||||
# Clean up
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP VIEW t1;
|
||||
#
|
||||
# End of 10.4 tests
|
||||
#
|
||||
|
|
|
@ -5416,6 +5416,18 @@ EXECUTE stmt USING DEFAULT;
|
|||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
--echo # MDEV-33218: Assertion `active_arena->is_stmt_prepare_or_first_stmt_execute() || active_arena->state == Query_arena::STMT_SP_QUERY_ARGUMENTS' failed. in st_select_lex::fix_prepare_information
|
||||
CREATE TABLE t1 AS SELECT 1 f;
|
||||
PREPARE stmt FROM 'SHOW CREATE TABLE t1';
|
||||
DROP TABLE t1;
|
||||
--error ER_NO_SUCH_TABLE
|
||||
EXECUTE stmt;
|
||||
CREATE VIEW t1 AS SELECT 1;
|
||||
EXECUTE stmt;
|
||||
--echo # Clean up
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP VIEW t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.4 tests
|
||||
--echo #
|
||||
|
|
|
@ -3847,4 +3847,103 @@ Warnings:
|
|||
Note 1276 Field or reference 'test.t1.pk' of SELECT #2 was resolved in SELECT #1
|
||||
Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`c1` AS `c1` from `test`.`t1` where !<expr_cache><`test`.`t1`.`c1`,`test`.`t1`.`pk`>(<in_optimizer>(`test`.`t1`.`c1`,<exists>(/* select#2 */ select `test`.`t2`.`c1` from `test`.`t2` join `test`.`t1` `a1` where `test`.`t2`.`i1` = `test`.`t1`.`pk` and `test`.`t2`.`i1` between 3 and 5 and trigcond(<cache>(`test`.`t1`.`c1`) = `test`.`t2`.`c1`))))
|
||||
DROP TABLE t1,t2;
|
||||
#
|
||||
# MDEV-31154: Fatal InnoDB error or assertion `!is_v' failure upon multi-update with indexed virtual column
|
||||
#
|
||||
# Test with auto generated Primary Key
|
||||
#
|
||||
SET @save_optimizer_switch= @@optimizer_switch;
|
||||
SET optimizer_switch='rowid_filter=on';
|
||||
CREATE TABLE t0(a int);
|
||||
INSERT INTO t0 SELECT seq FROM seq_1_to_20;
|
||||
ANALYZE TABLE t0 PERSISTENT FOR ALL;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t0 analyze status Engine-independent statistics collected
|
||||
test.t0 analyze status OK
|
||||
CREATE TABLE t1 (
|
||||
a int,
|
||||
b int as (a * 2) VIRTUAL,
|
||||
f char(200), /* Filler */
|
||||
key (b),
|
||||
key (a)
|
||||
) engine=innodb;
|
||||
INSERT INTO t1 (a, f) SELECT seq, seq FROM seq_1_to_1000;
|
||||
ANALYZE TABLE t1 PERSISTENT FOR ALL;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
# Test for type 'ref|filter'
|
||||
EXPLAIN SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t0 ALL NULL NULL NULL NULL 20 Using where
|
||||
1 SIMPLE t1 ref|filter b,a b|a 5|5 test.t0.a 1 (2%) Using where; Using rowid filter
|
||||
SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20;
|
||||
count(*)
|
||||
10
|
||||
EXPLAIN SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20 FOR UPDATE;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t0 ALL NULL NULL NULL NULL 20 Using where
|
||||
1 SIMPLE t1 ref|filter b,a b|a 5|5 test.t0.a 1 (2%) Using where; Using rowid filter
|
||||
SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20 FOR UPDATE;
|
||||
count(*)
|
||||
10
|
||||
# Test for type 'range|filter'
|
||||
EXPLAIN SELECT count(*) FROM t1 WHERE a<100 and b <100;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range|filter b,a b|a 5|5 NULL 49 (10%) Using where; Using rowid filter
|
||||
SELECT count(*) FROM t1 WHERE a<100 and b <100;
|
||||
count(*)
|
||||
49
|
||||
EXPLAIN SELECT count(*) FROM t1 WHERE a<100 and b <100 FOR UPDATE;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range|filter b,a b|a 5|5 NULL 49 (10%) Using where; Using rowid filter
|
||||
SELECT count(*) FROM t1 WHERE a<100 and b <100 FOR UPDATE;
|
||||
count(*)
|
||||
49
|
||||
# Test with Primary Key
|
||||
#
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
p int PRIMARY KEY AUTO_INCREMENT,
|
||||
a int,
|
||||
b int as (a * 2) VIRTUAL,
|
||||
f char(200), /* Filler */
|
||||
key (b),
|
||||
key (a)
|
||||
) engine=innodb;
|
||||
INSERT INTO t1 (a, f) SELECT seq, seq FROM seq_1_to_1000;
|
||||
ANALYZE TABLE t1 PERSISTENT FOR ALL;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
# Test for type 'ref|filter'
|
||||
EXPLAIN SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t0 ALL NULL NULL NULL NULL 20 Using where
|
||||
1 SIMPLE t1 ref|filter b,a b|a 5|5 test.t0.a 1 (2%) Using where; Using rowid filter
|
||||
SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20;
|
||||
count(*)
|
||||
10
|
||||
EXPLAIN SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20 FOR UPDATE;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t0 ALL NULL NULL NULL NULL 20 Using where
|
||||
1 SIMPLE t1 ref|filter b,a b|a 5|5 test.t0.a 1 (2%) Using where; Using rowid filter
|
||||
SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20 FOR UPDATE;
|
||||
count(*)
|
||||
10
|
||||
# Test for type 'range|filter'
|
||||
EXPLAIN SELECT count(*) FROM t1 WHERE a<100 and b <100;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range|filter b,a b|a 5|5 NULL 49 (10%) Using where; Using rowid filter
|
||||
SELECT count(*) FROM t1 WHERE a<100 and b <100;
|
||||
count(*)
|
||||
49
|
||||
EXPLAIN SELECT count(*) FROM t1 WHERE a<100 and b <100 FOR UPDATE;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range|filter b,a b|a 5|5 NULL 49 (10%) Using where; Using rowid filter
|
||||
SELECT count(*) FROM t1 WHERE a<100 and b <100 FOR UPDATE;
|
||||
count(*)
|
||||
49
|
||||
SET optimizer_switch=@save_optimizer_switch;
|
||||
DROP TABLE t0, t1;
|
||||
# End of 10.4 tests
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
--source include/no_valgrind_without_big.inc
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_debug.inc
|
||||
--source include/have_sequence.inc
|
||||
--source include/innodb_stable_estimates.inc
|
||||
|
||||
SET SESSION DEFAULT_STORAGE_ENGINE='InnoDB';
|
||||
|
||||
|
@ -675,4 +677,77 @@ eval EXPLAIN EXTENDED $q;
|
|||
|
||||
DROP TABLE t1,t2;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-31154: Fatal InnoDB error or assertion `!is_v' failure upon multi-update with indexed virtual column
|
||||
--echo #
|
||||
|
||||
--echo # Test with auto generated Primary Key
|
||||
--echo #
|
||||
|
||||
SET @save_optimizer_switch= @@optimizer_switch;
|
||||
SET optimizer_switch='rowid_filter=on';
|
||||
|
||||
CREATE TABLE t0(a int);
|
||||
INSERT INTO t0 SELECT seq FROM seq_1_to_20;
|
||||
ANALYZE TABLE t0 PERSISTENT FOR ALL;
|
||||
|
||||
CREATE TABLE t1 (
|
||||
a int,
|
||||
b int as (a * 2) VIRTUAL,
|
||||
f char(200), /* Filler */
|
||||
key (b),
|
||||
key (a)
|
||||
) engine=innodb;
|
||||
|
||||
INSERT INTO t1 (a, f) SELECT seq, seq FROM seq_1_to_1000;
|
||||
ANALYZE TABLE t1 PERSISTENT FOR ALL;
|
||||
|
||||
--echo # Test for type 'ref|filter'
|
||||
EXPLAIN SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20;
|
||||
SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20;
|
||||
|
||||
EXPLAIN SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20 FOR UPDATE;
|
||||
SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20 FOR UPDATE;
|
||||
|
||||
--echo # Test for type 'range|filter'
|
||||
EXPLAIN SELECT count(*) FROM t1 WHERE a<100 and b <100;
|
||||
SELECT count(*) FROM t1 WHERE a<100 and b <100;
|
||||
|
||||
EXPLAIN SELECT count(*) FROM t1 WHERE a<100 and b <100 FOR UPDATE;
|
||||
SELECT count(*) FROM t1 WHERE a<100 and b <100 FOR UPDATE;
|
||||
|
||||
--echo # Test with Primary Key
|
||||
--echo #
|
||||
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
p int PRIMARY KEY AUTO_INCREMENT,
|
||||
a int,
|
||||
b int as (a * 2) VIRTUAL,
|
||||
f char(200), /* Filler */
|
||||
key (b),
|
||||
key (a)
|
||||
) engine=innodb;
|
||||
|
||||
INSERT INTO t1 (a, f) SELECT seq, seq FROM seq_1_to_1000;
|
||||
ANALYZE TABLE t1 PERSISTENT FOR ALL;
|
||||
|
||||
--echo # Test for type 'ref|filter'
|
||||
EXPLAIN SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20;
|
||||
SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20;
|
||||
|
||||
EXPLAIN SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20 FOR UPDATE;
|
||||
SELECT count(*) from t0,t1 WHERE t0.a=t1.b AND t1.a<20 FOR UPDATE;
|
||||
|
||||
--echo # Test for type 'range|filter'
|
||||
EXPLAIN SELECT count(*) FROM t1 WHERE a<100 and b <100;
|
||||
SELECT count(*) FROM t1 WHERE a<100 and b <100;
|
||||
|
||||
EXPLAIN SELECT count(*) FROM t1 WHERE a<100 and b <100 FOR UPDATE;
|
||||
SELECT count(*) FROM t1 WHERE a<100 and b <100 FOR UPDATE;
|
||||
|
||||
SET optimizer_switch=@save_optimizer_switch;
|
||||
|
||||
DROP TABLE t0, t1;
|
||||
|
||||
--echo # End of 10.4 tests
|
||||
|
|
|
@ -7182,15 +7182,14 @@ CREATE VIEW t1 AS SELECT 10 AS f1;
|
|||
CALL p1(1);
|
||||
ERROR HY000: The target table t1 of the INSERT is not insertable-into
|
||||
CREATE TEMPORARY TABLE t1 (f1 INT);
|
||||
# t1 still refers to the view since it was inlined
|
||||
CALL p1(2);
|
||||
ERROR HY000: The target table t1 of the INSERT is not insertable-into
|
||||
DROP VIEW t1;
|
||||
# t1 now refers to the temporary table
|
||||
CALL p1(3);
|
||||
# Check which values were inserted into the temp table.
|
||||
SELECT * FROM t1;
|
||||
f1
|
||||
2
|
||||
3
|
||||
DROP TEMPORARY TABLE t1;
|
||||
DROP PROCEDURE p1;
|
||||
|
|
|
@ -8633,8 +8633,6 @@ CALL p1(1);
|
|||
|
||||
CREATE TEMPORARY TABLE t1 (f1 INT);
|
||||
|
||||
--echo # t1 still refers to the view since it was inlined
|
||||
--error ER_NON_INSERTABLE_TABLE
|
||||
CALL p1(2);
|
||||
|
||||
DROP VIEW t1;
|
||||
|
|
|
@ -3296,4 +3296,33 @@ a
|
|||
2
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1,t2,t3;
|
||||
#
|
||||
# MDEV-33747: Optimization of (SELECT) IN (SELECT ...) executes subquery at prepare stage
|
||||
#
|
||||
create table t1 (a int, b int);
|
||||
insert into t1 select seq, seq from seq_1_to_200;
|
||||
create table t2 as select * from t1;
|
||||
create table t3 as select * from t1;
|
||||
analyze table t1,t2,t3;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
test.t2 analyze status Engine-independent statistics collected
|
||||
test.t2 analyze status OK
|
||||
test.t3 analyze status Engine-independent statistics collected
|
||||
test.t3 analyze status OK
|
||||
select @@expensive_subquery_limit < 200 as DEFAULTS_ARE_SUITABLE;
|
||||
DEFAULTS_ARE_SUITABLE
|
||||
1
|
||||
flush status;
|
||||
explain select * from t1 where a<3 or (select max(a) from t2) in (select b from t3);
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY t1 ALL NULL NULL NULL NULL 200 Using where
|
||||
3 SUBQUERY t3 ALL NULL NULL NULL NULL 200 Using where
|
||||
2 SUBQUERY t2 ALL NULL NULL NULL NULL 200
|
||||
# Must show 0. If this shows 200, this means subquery was executed and you have a bug:
|
||||
show status like 'Handler_read_rnd_next%';
|
||||
Variable_name Value
|
||||
Handler_read_rnd_next 0
|
||||
drop table t1,t2,t3;
|
||||
# End of 10.4 tests
|
||||
|
|
|
@ -2668,4 +2668,19 @@ DEALLOCATE PREPARE stmt;
|
|||
|
||||
DROP TABLE t1,t2,t3;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-33747: Optimization of (SELECT) IN (SELECT ...) executes subquery at prepare stage
|
||||
--echo #
|
||||
create table t1 (a int, b int);
|
||||
insert into t1 select seq, seq from seq_1_to_200;
|
||||
create table t2 as select * from t1;
|
||||
create table t3 as select * from t1;
|
||||
analyze table t1,t2,t3;
|
||||
select @@expensive_subquery_limit < 200 as DEFAULTS_ARE_SUITABLE;
|
||||
flush status;
|
||||
explain select * from t1 where a<3 or (select max(a) from t2) in (select b from t3);
|
||||
--echo # Must show 0. If this shows 200, this means subquery was executed and you have a bug:
|
||||
show status like 'Handler_read_rnd_next%';
|
||||
drop table t1,t2,t3;
|
||||
|
||||
--echo # End of 10.4 tests
|
||||
|
|
|
@ -614,6 +614,55 @@ Tables_in_test
|
|||
# in 11.2 and above here should be listed above used temporary tables
|
||||
DROP TEMPORARY TABLE t1, t2;
|
||||
#
|
||||
# MDEV-33218: Assertion `active_arena->is_stmt_prepare_or_first_stmt_execute() || active_arena->state == Query_arena::STMT_SP_QUERY_ARGUMENTS' failed. in st_select_lex::fix_prepare_information
|
||||
#
|
||||
CREATE VIEW v1 AS SELECT 5;
|
||||
CREATE PROCEDURE sp() SELECT * FROM v1;
|
||||
CREATE TEMPORARY TABLE v1 as SELECT 7;
|
||||
# sp() accesses the temporary table v1 that hides the view with the same name
|
||||
# Therefore expected output is the row (7)
|
||||
CALL sp();
|
||||
7
|
||||
7
|
||||
DROP TEMPORARY TABLE v1;
|
||||
# After the temporary table v1 has been dropped the next invocation of sp()
|
||||
# accesses the view v1. So, expected output is the row (5)
|
||||
CALL sp();
|
||||
5
|
||||
5
|
||||
# Clean up
|
||||
DROP VIEW v1;
|
||||
DROP PROCEDURE sp;
|
||||
# Another use case is when a temporary table hides a view is dropped
|
||||
# inside a stored routine being called.
|
||||
CREATE VIEW t1 AS SELECT 1;
|
||||
CREATE PROCEDURE p1()
|
||||
BEGIN
|
||||
DROP TEMPORARY TABLE t1;
|
||||
END
|
||||
|
|
||||
CREATE FUNCTION f1() RETURNS INT
|
||||
BEGIN
|
||||
CALL p1();
|
||||
RETURN 1;
|
||||
END
|
||||
|
|
||||
CREATE TEMPORARY TABLE t1 AS SELECT 1 AS a;
|
||||
PREPARE stmt FROM 'SELECT f1()';
|
||||
EXECUTE stmt;
|
||||
f1()
|
||||
1
|
||||
# The temporary table t1 has been dropped on first
|
||||
# execution of the prepared statement 'stmt',
|
||||
# next time this statement is run it results in issuing
|
||||
# the error ER_BAD_TABLE_ERROR
|
||||
EXECUTE stmt;
|
||||
ERROR 42S02: Unknown table 'test.t1'
|
||||
# Clean up
|
||||
DROP VIEW t1;
|
||||
DROP FUNCTION f1;
|
||||
DROP PROCEDURE p1;
|
||||
#
|
||||
# End of 10.4 tests
|
||||
#
|
||||
create function f1() returns int
|
||||
|
|
|
@ -669,6 +669,60 @@ SHOW TABLES;
|
|||
|
||||
DROP TEMPORARY TABLE t1, t2;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-33218: Assertion `active_arena->is_stmt_prepare_or_first_stmt_execute() || active_arena->state == Query_arena::STMT_SP_QUERY_ARGUMENTS' failed. in st_select_lex::fix_prepare_information
|
||||
--echo #
|
||||
CREATE VIEW v1 AS SELECT 5;
|
||||
CREATE PROCEDURE sp() SELECT * FROM v1;
|
||||
CREATE TEMPORARY TABLE v1 as SELECT 7;
|
||||
--echo # sp() accesses the temporary table v1 that hides the view with the same name
|
||||
--echo # Therefore expected output is the row (7)
|
||||
CALL sp();
|
||||
DROP TEMPORARY TABLE v1;
|
||||
--echo # After the temporary table v1 has been dropped the next invocation of sp()
|
||||
--echo # accesses the view v1. So, expected output is the row (5)
|
||||
CALL sp();
|
||||
|
||||
--echo # Clean up
|
||||
DROP VIEW v1;
|
||||
DROP PROCEDURE sp;
|
||||
|
||||
--echo # Another use case is when a temporary table hides a view is dropped
|
||||
--echo # inside a stored routine being called.
|
||||
|
||||
CREATE VIEW t1 AS SELECT 1;
|
||||
|
||||
--delimiter |
|
||||
CREATE PROCEDURE p1()
|
||||
BEGIN
|
||||
DROP TEMPORARY TABLE t1;
|
||||
END
|
||||
|
|
||||
|
||||
CREATE FUNCTION f1() RETURNS INT
|
||||
BEGIN
|
||||
CALL p1();
|
||||
RETURN 1;
|
||||
END
|
||||
|
|
||||
|
||||
--delimiter ;
|
||||
|
||||
CREATE TEMPORARY TABLE t1 AS SELECT 1 AS a;
|
||||
PREPARE stmt FROM 'SELECT f1()';
|
||||
EXECUTE stmt;
|
||||
--echo # The temporary table t1 has been dropped on first
|
||||
--echo # execution of the prepared statement 'stmt',
|
||||
--echo # next time this statement is run it results in issuing
|
||||
--echo # the error ER_BAD_TABLE_ERROR
|
||||
--error ER_BAD_TABLE_ERROR
|
||||
EXECUTE stmt;
|
||||
|
||||
--echo # Clean up
|
||||
DROP VIEW t1;
|
||||
DROP FUNCTION f1;
|
||||
DROP PROCEDURE p1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.4 tests
|
||||
--echo #
|
||||
|
|
|
@ -4511,8 +4511,6 @@ sub extract_warning_lines ($$) {
|
|||
qr/WSREP: Guessing address for incoming client/,
|
||||
|
||||
qr/InnoDB: Difficult to find free blocks in the buffer pool*/,
|
||||
# for UBSAN
|
||||
qr/decimal\.c.*: runtime error: signed integer overflow/,
|
||||
# Disable test for UBSAN on dynamically loaded objects
|
||||
qr/runtime error: member call.*object.*'Handler_share'/,
|
||||
qr/sql_type\.cc.* runtime error: member call.*object.* 'Type_collection'/,
|
||||
|
|
33
mysql-test/suite/galera_sr/r/galera_sr_bf_abort_idle.result
Normal file
33
mysql-test/suite/galera_sr/r/galera_sr_bf_abort_idle.result
Normal file
|
@ -0,0 +1,33 @@
|
|||
connection node_2;
|
||||
connection node_1;
|
||||
connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
|
||||
connection node_1;
|
||||
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER);
|
||||
INSERT INTO t1 VALUES (1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1);
|
||||
SET SESSION wsrep_trx_fragment_size=10;
|
||||
SET SESSION wsrep_trx_fragment_unit='rows';
|
||||
START TRANSACTION;
|
||||
UPDATE t1 SET f2 = f2 + 10;
|
||||
connection node_2;
|
||||
INSERT INTO t1 VALUES (10,2);
|
||||
connection node_1a;
|
||||
connection node_1;
|
||||
INSERT INTO t1 VALUES (9,1);
|
||||
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
|
||||
ROLLBACK;
|
||||
DROP TABLE t1;
|
||||
connection node_1;
|
||||
CREATE TABLE t1(f1 INTEGER PRIMARY KEY, f2 INTEGER);
|
||||
INSERT INTO t1 VALUES (1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1);
|
||||
SET SESSION wsrep_trx_fragment_size=5;
|
||||
SET SESSION wsrep_trx_fragment_unit='rows';
|
||||
START TRANSACTION;
|
||||
UPDATE t1 SET f2 = f2 + 10;
|
||||
connection node_2;
|
||||
INSERT INTO t1 VALUES (10,2);
|
||||
connection node_1a;
|
||||
connection node_1;
|
||||
INSERT INTO t1 VALUES (9,1);
|
||||
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
|
||||
ROLLBACK;
|
||||
DROP TABLE t1;
|
68
mysql-test/suite/galera_sr/t/galera_sr_bf_abort_idle.test
Normal file
68
mysql-test/suite/galera_sr/t/galera_sr_bf_abort_idle.test
Normal file
|
@ -0,0 +1,68 @@
|
|||
#
|
||||
# Test BF abort for idle SR transactions
|
||||
#
|
||||
|
||||
--source include/galera_cluster.inc
|
||||
|
||||
--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
|
||||
|
||||
#
|
||||
# Case 1: BF abort idle SR transaction that has not yet replicated any fragments
|
||||
#
|
||||
--connection node_1
|
||||
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER);
|
||||
INSERT INTO t1 VALUES (1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1);
|
||||
|
||||
--let $bf_count = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.global_status WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'`
|
||||
|
||||
SET SESSION wsrep_trx_fragment_size=10;
|
||||
SET SESSION wsrep_trx_fragment_unit='rows';
|
||||
START TRANSACTION;
|
||||
UPDATE t1 SET f2 = f2 + 10;
|
||||
|
||||
--connection node_2
|
||||
INSERT INTO t1 VALUES (10,2);
|
||||
|
||||
# Wait for SR transaction to be BF aborted
|
||||
--connection node_1a
|
||||
--let $wait_condition = SELECT VARIABLE_VALUE = $bf_count + 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'
|
||||
--source include/wait_condition.inc
|
||||
|
||||
|
||||
--connection node_1
|
||||
--error ER_LOCK_DEADLOCK
|
||||
INSERT INTO t1 VALUES (9,1);
|
||||
ROLLBACK;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
#
|
||||
# Case 2: BF abort idle SR transaction that has already replicated a fragment
|
||||
#
|
||||
--connection node_1
|
||||
CREATE TABLE t1(f1 INTEGER PRIMARY KEY, f2 INTEGER);
|
||||
INSERT INTO t1 VALUES (1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1);
|
||||
|
||||
--let $bf_count = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.global_status WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'`
|
||||
|
||||
|
||||
SET SESSION wsrep_trx_fragment_size=5;
|
||||
SET SESSION wsrep_trx_fragment_unit='rows';
|
||||
START TRANSACTION;
|
||||
UPDATE t1 SET f2 = f2 + 10;
|
||||
|
||||
--connection node_2
|
||||
INSERT INTO t1 VALUES (10,2);
|
||||
|
||||
# Wait for SR transaction to be BF aborted
|
||||
--connection node_1a
|
||||
--let $wait_condition = SELECT VARIABLE_VALUE = $bf_count + 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'
|
||||
--source include/wait_condition.inc
|
||||
|
||||
--connection node_1
|
||||
--error ER_LOCK_DEADLOCK
|
||||
INSERT INTO t1 VALUES (9,1);
|
||||
ROLLBACK;
|
||||
|
||||
DROP TABLE t1;
|
Binary file not shown.
|
@ -256,3 +256,16 @@ select * from t1;
|
|||
check table t1;
|
||||
|
||||
drop database best;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-33214 Table is getting rebuild with
|
||||
--echo # ALTER TABLE ADD COLUMN
|
||||
--echo #
|
||||
use test;
|
||||
CREATE TABLE t1(f1 INT, f2 VARCHAR(10)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;
|
||||
INSERT INTO t1 VALUES(1,'abc'),(2,'def');
|
||||
ALTER TABLE t1 ADD (f3 VARCHAR(5000), f4 VARCHAR(20)), ALGORITHM=instant;
|
||||
ALTER TABLE t1 ADD f5 TEXT, ALGORITHM=INSTANT;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo # End of 10.4 tests
|
||||
|
|
|
@ -174,6 +174,105 @@ a
|
|||
10
|
||||
11
|
||||
12
|
||||
*** MDEV-33475: --gtid-ignore-duplicate can double-apply event in case of parallel replication retry
|
||||
connection server_2;
|
||||
STOP SLAVE "c2b";
|
||||
SET default_master_connection = "c2b";
|
||||
include/wait_for_slave_to_stop.inc
|
||||
STOP SLAVE "a2b";
|
||||
SET default_master_connection = "a2b";
|
||||
include/wait_for_slave_to_stop.inc
|
||||
connection server_1;
|
||||
CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (0, 0);
|
||||
INSERT INTO t2 VALUES (1, 0);
|
||||
INSERT INTO t2 VALUES (2, 0);
|
||||
INSERT INTO t2 VALUES (3, 0);
|
||||
INSERT INTO t2 VALUES (4, 0);
|
||||
INSERT INTO t2 VALUES (5, 0);
|
||||
INSERT INTO t2 VALUES (6, 0);
|
||||
INSERT INTO t2 VALUES (7, 0);
|
||||
INSERT INTO t2 VALUES (8, 0);
|
||||
INSERT INTO t2 VALUES (9, 0);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (0+10, 100);
|
||||
UPDATE t2 SET b=0 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (0+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (1+10, 100);
|
||||
UPDATE t2 SET b=1 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (1+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (2+10, 100);
|
||||
UPDATE t2 SET b=2 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (2+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (3+10, 100);
|
||||
UPDATE t2 SET b=3 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (3+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (4+10, 100);
|
||||
UPDATE t2 SET b=4 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (4+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (5+10, 100);
|
||||
UPDATE t2 SET b=5 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (5+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (6+10, 100);
|
||||
UPDATE t2 SET b=6 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (6+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (7+10, 100);
|
||||
UPDATE t2 SET b=7 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (7+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (8+10, 100);
|
||||
UPDATE t2 SET b=8 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (8+20, 200);
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
INSERT INTO t2 VALUES (9+10, 100);
|
||||
UPDATE t2 SET b=9 WHERE a<10;
|
||||
INSERT INTO t2 VALUES (9+20, 200);
|
||||
COMMIT;
|
||||
SELECT COUNT(*), SUM(a), SUM(b) FROM t2;
|
||||
COUNT(*) SUM(a) SUM(b)
|
||||
30 435 3090
|
||||
include/save_master_gtid.inc
|
||||
connection server_2;
|
||||
SET @old_mode= @@GLOBAL.slave_parallel_mode;
|
||||
SET GLOBAL slave_parallel_mode=aggressive;
|
||||
SET default_master_connection = "a2b";
|
||||
START SLAVE;
|
||||
include/wait_for_slave_to_start.inc
|
||||
SET default_master_connection = "c2b";
|
||||
START SLAVE;
|
||||
include/wait_for_slave_to_start.inc
|
||||
include/sync_with_master_gtid.inc
|
||||
SELECT COUNT(*), SUM(a), SUM(b) FROM t2;
|
||||
COUNT(*) SUM(a) SUM(b)
|
||||
30 435 3090
|
||||
connection server_3;
|
||||
include/sync_with_master_gtid.inc
|
||||
SELECT COUNT(*), SUM(a), SUM(b) FROM t2;
|
||||
COUNT(*) SUM(a) SUM(b)
|
||||
30 435 3090
|
||||
connection server_4;
|
||||
include/sync_with_master_gtid.inc
|
||||
SELECT COUNT(*), SUM(a), SUM(b) FROM t2;
|
||||
COUNT(*) SUM(a) SUM(b)
|
||||
30 435 3090
|
||||
*** Test also with not using parallel replication.
|
||||
connection server_1;
|
||||
SET default_master_connection = "b2a";
|
||||
|
@ -474,6 +573,7 @@ Warnings:
|
|||
Note 1938 SLAVE 'a2b' stopped
|
||||
Note 1938 SLAVE 'c2b' stopped
|
||||
SET GLOBAL slave_parallel_threads= @old_parallel;
|
||||
SET GLOBAL slave_parallel_mode= @old_mode;
|
||||
SET GLOBAL gtid_ignore_duplicates= @old_ignore_duplicates;
|
||||
connection server_3;
|
||||
SET GLOBAL gtid_domain_id=0;
|
||||
|
@ -491,22 +591,22 @@ Note 1938 SLAVE 'a2d' stopped
|
|||
SET GLOBAL slave_parallel_threads= @old_parallel;
|
||||
SET GLOBAL gtid_ignore_duplicates= @old_ignore_duplicates;
|
||||
connection server_1;
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t1, t2;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
|
||||
include/reset_master_slave.inc
|
||||
disconnect server_1;
|
||||
connection server_2;
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t1, t2;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
|
||||
include/reset_master_slave.inc
|
||||
disconnect server_2;
|
||||
connection server_3;
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t1, t2;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
|
||||
include/reset_master_slave.inc
|
||||
disconnect server_3;
|
||||
connection server_4;
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t1, t2;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
|
||||
include/reset_master_slave.inc
|
||||
disconnect server_4;
|
||||
|
|
|
@ -173,6 +173,65 @@ SET default_master_connection = "a2b";
|
|||
SELECT * FROM t1 WHERE a >= 10 ORDER BY a;
|
||||
|
||||
|
||||
--echo *** MDEV-33475: --gtid-ignore-duplicate can double-apply event in case of parallel replication retry
|
||||
|
||||
# Create a bunch of transactions that will cause conflicts and retries.
|
||||
# The bug was that the retry code was not handling the --gtid-ignore-duplicates
|
||||
# option, so events could be doubly-applied.
|
||||
|
||||
--connection server_2
|
||||
STOP SLAVE "c2b";
|
||||
SET default_master_connection = "c2b";
|
||||
--source include/wait_for_slave_to_stop.inc
|
||||
STOP SLAVE "a2b";
|
||||
SET default_master_connection = "a2b";
|
||||
--source include/wait_for_slave_to_stop.inc
|
||||
|
||||
--connection server_1
|
||||
CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
|
||||
BEGIN;
|
||||
--let $i= 0
|
||||
while ($i < 10) {
|
||||
eval INSERT INTO t2 VALUES ($i, 0);
|
||||
inc $i;
|
||||
}
|
||||
COMMIT;
|
||||
|
||||
--let $i= 0
|
||||
while ($i < 10) {
|
||||
BEGIN;
|
||||
eval INSERT INTO t2 VALUES ($i+10, 100);
|
||||
eval UPDATE t2 SET b=$i WHERE a<10;
|
||||
eval INSERT INTO t2 VALUES ($i+20, 200);
|
||||
COMMIT;
|
||||
inc $i;
|
||||
}
|
||||
|
||||
SELECT COUNT(*), SUM(a), SUM(b) FROM t2;
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection server_2
|
||||
SET @old_mode= @@GLOBAL.slave_parallel_mode;
|
||||
SET GLOBAL slave_parallel_mode=aggressive;
|
||||
SET default_master_connection = "a2b";
|
||||
START SLAVE;
|
||||
--source include/wait_for_slave_to_start.inc
|
||||
SET default_master_connection = "c2b";
|
||||
START SLAVE;
|
||||
--source include/wait_for_slave_to_start.inc
|
||||
|
||||
--source include/sync_with_master_gtid.inc
|
||||
SELECT COUNT(*), SUM(a), SUM(b) FROM t2;
|
||||
|
||||
--connection server_3
|
||||
--source include/sync_with_master_gtid.inc
|
||||
SELECT COUNT(*), SUM(a), SUM(b) FROM t2;
|
||||
|
||||
--connection server_4
|
||||
--source include/sync_with_master_gtid.inc
|
||||
SELECT COUNT(*), SUM(a), SUM(b) FROM t2;
|
||||
|
||||
|
||||
--echo *** Test also with not using parallel replication.
|
||||
|
||||
--connection server_1
|
||||
|
@ -414,6 +473,7 @@ SET GLOBAL gtid_domain_id=0;
|
|||
--sorted_result
|
||||
STOP ALL SLAVES;
|
||||
SET GLOBAL slave_parallel_threads= @old_parallel;
|
||||
SET GLOBAL slave_parallel_mode= @old_mode;
|
||||
SET GLOBAL gtid_ignore_duplicates= @old_ignore_duplicates;
|
||||
|
||||
--connection server_3
|
||||
|
@ -431,25 +491,25 @@ SET GLOBAL slave_parallel_threads= @old_parallel;
|
|||
SET GLOBAL gtid_ignore_duplicates= @old_ignore_duplicates;
|
||||
|
||||
--connection server_1
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t1, t2;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
|
||||
--source include/reset_master_slave.inc
|
||||
--disconnect server_1
|
||||
|
||||
--connection server_2
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t1, t2;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
|
||||
--source include/reset_master_slave.inc
|
||||
--disconnect server_2
|
||||
|
||||
--connection server_3
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t1, t2;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
|
||||
--source include/reset_master_slave.inc
|
||||
--disconnect server_3
|
||||
|
||||
--connection server_4
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t1, t2;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
|
||||
--source include/reset_master_slave.inc
|
||||
--disconnect server_4
|
||||
|
|
|
@ -36,8 +36,8 @@ public:
|
|||
static Create_func_trt<TRT_FIELD> s_singleton;
|
||||
|
||||
protected:
|
||||
Create_func_trt<TRT_FIELD>() = default;
|
||||
virtual ~Create_func_trt<TRT_FIELD>() = default;
|
||||
Create_func_trt() = default;
|
||||
virtual ~Create_func_trt() = default;
|
||||
};
|
||||
|
||||
template<TR_table::field_id_t TRT_FIELD>
|
||||
|
@ -132,8 +132,8 @@ public:
|
|||
static Create_func_trt_trx_sees<Item_func_trt_trx_seesX> s_singleton;
|
||||
|
||||
protected:
|
||||
Create_func_trt_trx_sees<Item_func_trt_trx_seesX>() = default;
|
||||
virtual ~Create_func_trt_trx_sees<Item_func_trt_trx_seesX>() = default;
|
||||
Create_func_trt_trx_sees() = default;
|
||||
virtual ~Create_func_trt_trx_sees() = default;
|
||||
};
|
||||
|
||||
template<class X>
|
||||
|
|
|
@ -1365,7 +1365,13 @@ bool Item_in_optimizer::fix_left(THD *thd)
|
|||
copy_with_sum_func(args[0]);
|
||||
with_param= args[0]->with_param || args[1]->with_param;
|
||||
with_field= args[0]->with_field;
|
||||
if ((const_item_cache= args[0]->const_item()))
|
||||
|
||||
/*
|
||||
If left expression is a constant, cache its value.
|
||||
But don't do that if that involves computing a subquery, as we are in a
|
||||
prepare-phase rewrite.
|
||||
*/
|
||||
if ((const_item_cache= args[0]->const_item()) && !args[0]->with_subquery())
|
||||
{
|
||||
cache->store(args[0]);
|
||||
cache->cache_value();
|
||||
|
|
|
@ -3344,13 +3344,11 @@ template <template<class> class LI, typename T> class Item_equal_iterator
|
|||
{
|
||||
protected:
|
||||
Item_equal *item_equal;
|
||||
Item *curr_item;
|
||||
Item *curr_item= nullptr;
|
||||
public:
|
||||
Item_equal_iterator<LI,T>(Item_equal &item_eq)
|
||||
:LI<T> (item_eq.equal_items)
|
||||
Item_equal_iterator(Item_equal &item_eq)
|
||||
:LI<T> (item_eq.equal_items), item_equal(&item_eq)
|
||||
{
|
||||
curr_item= NULL;
|
||||
item_equal= &item_eq;
|
||||
if (item_eq.with_const)
|
||||
{
|
||||
LI<T> *list_it= this;
|
||||
|
|
|
@ -1375,15 +1375,16 @@ String *Item_func_regexp_replace::val_str_internal(String *str,
|
|||
LEX_CSTRING src, rpl;
|
||||
size_t startoffset= 0;
|
||||
|
||||
if ((null_value=
|
||||
(!(source= args[0]->val_str(&tmp0)) ||
|
||||
!(replace= args[2]->val_str_null_to_empty(&tmp2, null_to_empty)) ||
|
||||
re.recompile(args[1]))))
|
||||
return (String *) 0;
|
||||
|
||||
source= args[0]->val_str(&tmp0);
|
||||
if (!source)
|
||||
goto err;
|
||||
replace= args[2]->val_str_null_to_empty(&tmp2, null_to_empty);
|
||||
if (!replace || re.recompile(args[1]))
|
||||
goto err;
|
||||
if (!(source= re.convert_if_needed(source, &re.subject_converter)) ||
|
||||
!(replace= re.convert_if_needed(replace, &re.replace_converter)))
|
||||
goto err;
|
||||
null_value= false;
|
||||
|
||||
source->get_value(&src);
|
||||
replace->get_value(&rpl);
|
||||
|
@ -1429,7 +1430,7 @@ String *Item_func_regexp_replace::val_str_internal(String *str,
|
|||
|
||||
err:
|
||||
null_value= true;
|
||||
return (String *) 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1565,13 +1566,21 @@ bool Item_func_insert::fix_length_and_dec()
|
|||
String *Item_str_conv::val_str(String *str)
|
||||
{
|
||||
DBUG_ASSERT(fixed == 1);
|
||||
String *res;
|
||||
size_t alloced_length, len;
|
||||
String *res= args[0]->val_str(&tmp_value);
|
||||
|
||||
if ((null_value= (!(res= args[0]->val_str(&tmp_value)) ||
|
||||
str->alloc((alloced_length= res->length() * multiply)))))
|
||||
return 0;
|
||||
if (!res)
|
||||
{
|
||||
err:
|
||||
null_value= true;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
size_t alloced_length= res->length() * multiply, len;
|
||||
|
||||
if (str->alloc((alloced_length)))
|
||||
goto err;
|
||||
|
||||
null_value= false;
|
||||
len= converter(collation.collation, (char*) res->ptr(), res->length(),
|
||||
(char*) str->ptr(), alloced_length);
|
||||
DBUG_ASSERT(len <= alloced_length);
|
||||
|
|
|
@ -156,6 +156,7 @@ my_bool my_net_init(NET *net, Vio *vio, void *thd, uint my_flags)
|
|||
net->where_b = net->remain_in_buf=0;
|
||||
net->net_skip_rest_factor= 0;
|
||||
net->last_errno=0;
|
||||
net->using_proxy_protocol= 0;
|
||||
net->thread_specific_malloc= MY_TEST(my_flags & MY_THREAD_SPECIFIC);
|
||||
net->thd= 0;
|
||||
#ifdef MYSQL_SERVER
|
||||
|
@ -210,6 +211,7 @@ void net_end(NET *net)
|
|||
DBUG_ENTER("net_end");
|
||||
my_free(net->buff);
|
||||
net->buff=0;
|
||||
net->using_proxy_protocol= 0;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
@ -924,6 +926,7 @@ static handle_proxy_header_result handle_proxy_header(NET *net)
|
|||
return RETRY;
|
||||
/* Change peer address in THD and ACL structures.*/
|
||||
uint host_errors;
|
||||
net->using_proxy_protocol= 1;
|
||||
return (handle_proxy_header_result)thd_set_peer_addr(thd,
|
||||
&(peer_info.peer_addr), NULL, peer_info.port,
|
||||
false, &host_errors);
|
||||
|
|
|
@ -211,6 +211,13 @@ finish_event_group(rpl_parallel_thread *rpt, uint64 sub_id,
|
|||
signal_error_to_sql_driver_thread(thd, rgi, err);
|
||||
thd->wait_for_commit_ptr= NULL;
|
||||
|
||||
/*
|
||||
Calls to check_duplicate_gtid() must match up with
|
||||
record_and_update_gtid() (or release_domain_owner() in error case). This
|
||||
assertion tries to catch any missing release of the domain.
|
||||
*/
|
||||
DBUG_ASSERT(rgi->gtid_ignore_duplicate_state != rpl_group_info::GTID_DUPLICATE_OWNER);
|
||||
|
||||
mysql_mutex_lock(&entry->LOCK_parallel_entry);
|
||||
/*
|
||||
We need to mark that this event group started its commit phase, in case we
|
||||
|
@ -874,7 +881,13 @@ do_retry:
|
|||
});
|
||||
#endif
|
||||
|
||||
rgi->cleanup_context(thd, 1);
|
||||
/*
|
||||
We are still applying the event group, even though we will roll it back
|
||||
and retry it. So for --gtid-ignore-duplicates, keep ownership of the
|
||||
domain during the retry so another master connection will not try to take
|
||||
over and duplicate apply the same event group (MDEV-33475).
|
||||
*/
|
||||
rgi->cleanup_context(thd, 1, 1 /* keep_domain_owner */);
|
||||
wait_for_pending_deadlock_kill(thd, rgi);
|
||||
thd->reset_killed();
|
||||
thd->clear_error();
|
||||
|
|
|
@ -2251,7 +2251,7 @@ delete_or_keep_event_post_apply(rpl_group_info *rgi,
|
|||
}
|
||||
|
||||
|
||||
void rpl_group_info::cleanup_context(THD *thd, bool error)
|
||||
void rpl_group_info::cleanup_context(THD *thd, bool error, bool keep_domain_owner)
|
||||
{
|
||||
DBUG_ENTER("rpl_group_info::cleanup_context");
|
||||
DBUG_PRINT("enter", ("error: %d", (int) error));
|
||||
|
@ -2306,7 +2306,7 @@ void rpl_group_info::cleanup_context(THD *thd, bool error)
|
|||
Ensure we always release the domain for others to process, when using
|
||||
--gtid-ignore-duplicates.
|
||||
*/
|
||||
if (gtid_ignore_duplicate_state != GTID_DUPLICATE_NULL)
|
||||
if (gtid_ignore_duplicate_state != GTID_DUPLICATE_NULL && !keep_domain_owner)
|
||||
rpl_global_gtid_slave_state->release_domain_owner(this);
|
||||
}
|
||||
|
||||
|
|
|
@ -917,7 +917,7 @@ struct rpl_group_info
|
|||
}
|
||||
|
||||
void clear_tables_to_lock();
|
||||
void cleanup_context(THD *, bool);
|
||||
void cleanup_context(THD *, bool, bool keep_domain_owner= false);
|
||||
void slave_close_thread_tables(THD *);
|
||||
void mark_start_commit_no_lock();
|
||||
void mark_start_commit();
|
||||
|
|
|
@ -4252,22 +4252,22 @@ ER_ERROR_DURING_CHECKPOINT
|
|||
swe "Fick fel %M vid CHECKPOINT"
|
||||
ukr "Отримано помилку %M під час CHECKPOINT"
|
||||
ER_NEW_ABORTING_CONNECTION 08S01
|
||||
chi "终止的连接 %lld 到数据库: '%-.192s' 用户: '%-.48s' 主机: '%-.64s' (%-.64s)"
|
||||
cze "Spojení %lld do databáze: '%-.192s' uživatel: '%-.48s' stroj: '%-.64s' (%-.64s) bylo přerušeno"
|
||||
dan "Afbrød forbindelsen %lld til databasen '%-.192s' bruger: '%-.48s' vært: '%-.64s' (%-.64s)"
|
||||
eng "Aborted connection %lld to db: '%-.192s' user: '%-.48s' host: '%-.64s' (%-.64s)"
|
||||
est "Ühendus katkestatud %lld andmebaas: '%-.192s' kasutaja: '%-.48s' masin: '%-.64s' (%-.64s)"
|
||||
fre "Connection %lld avortée vers la bd: '%-.192s' utilisateur: '%-.48s' hôte: '%-.64s' (%-.64s)"
|
||||
ger "Abbruch der Verbindung %lld zur Datenbank '%-.192s'. Benutzer: '%-.48s', Host: '%-.64s' (%-.64s)"
|
||||
ita "Interrotta la connessione %lld al db: ''%-.192s' utente: '%-.48s' host: '%-.64s' (%-.64s)"
|
||||
jpn "接続 %lld が中断されました。データベース: '%-.192s' ユーザー: '%-.48s' ホスト: '%-.64s' (%-.64s)"
|
||||
nla "Afgebroken verbinding %lld naar db: '%-.192s' gebruiker: '%-.48s' host: '%-.64s' (%-.64s)"
|
||||
por "Conexão %lld abortada para banco de dados '%-.192s' - usuário '%-.48s' - 'host' '%-.64s' ('%-.64s')"
|
||||
rus "Прервано соединение %lld к базе данных '%-.192s' пользователя '%-.48s' с хоста '%-.64s' (%-.64s)"
|
||||
serbian "Prekinuta konekcija broj %lld ka bazi: '%-.192s' korisnik je bio: '%-.48s' a host: '%-.64s' (%-.64s)"
|
||||
spa "Abortada conexión %lld para db: '%-.192s' usuario: '%-.48s' servidor: '%-.64s' (%-.64s)"
|
||||
swe "Avbröt länken för tråd %lld till db '%-.192s', användare '%-.48s', host '%-.64s' (%-.64s)"
|
||||
ukr "Перервано з'єднання %lld до бази данних: '%-.192s' користувач: '%-.48s' хост: '%-.64s' (%-.64s)"
|
||||
chi "终止的连接 %lld 到数据库: '%-.192s' 用户: '%-.48s' 主机: '%-.64s'%-.64s (%-.64s)"
|
||||
cze "Spojení %lld do databáze: '%-.192s' uživatel: '%-.48s' stroj: '%-.64s'%-.64s (%-.64s) bylo přerušeno"
|
||||
dan "Afbrød forbindelsen %lld til databasen '%-.192s' bruger: '%-.48s' vært: '%-.64s'%-.64s (%-.64s)"
|
||||
eng "Aborted connection %lld to db: '%-.192s' user: '%-.48s' host: '%-.64s'%-.64s (%-.64s)"
|
||||
est "Ühendus katkestatud %lld andmebaas: '%-.192s' kasutaja: '%-.48s' masin: '%-.64s'%-.64s (%-.64s)"
|
||||
fre "Connection %lld avortée vers la bd: '%-.192s' utilisateur: '%-.48s' hôte: '%-.64s'%-.64s (%-.64s)"
|
||||
ger "Abbruch der Verbindung %lld zur Datenbank '%-.192s'. Benutzer: '%-.48s', Host: '%-.64s'%-.64s (%-.64s)"
|
||||
ita "Interrotta la connessione %lld al db: ''%-.192s' utente: '%-.48s' host: '%-.64s'%-.64s (%-.64s)"
|
||||
jpn "接続 %lld が中断されました。データベース: '%-.192s' ユーザー: '%-.48s' ホスト: '%-.64s'%-.64s (%-.64s)"
|
||||
nla "Afgebroken verbinding %lld naar db: '%-.192s' gebruiker: '%-.48s' host: '%-.64s'%-.64s (%-.64s)"
|
||||
por "Conexão %lld abortada para banco de dados '%-.192s' - usuário '%-.48s' - 'host' '%-.64s'%-.64s ('%-.64s')"
|
||||
rus "Прервано соединение %lld к базе данных '%-.192s' пользователя '%-.48s' с хоста '%-.64s'%-.64s (%-.64s)"
|
||||
serbian "Prekinuta konekcija broj %lld ka bazi: '%-.192s' korisnik je bio: '%-.48s' a host: '%-.64s'%-.64s (%-.64s)"
|
||||
spa "Abortada conexión %lld a la base de datos: '%-.192s' usuario: '%-.48s' equipo: '%-.64s'%-.64s (%-.64s)"
|
||||
swe "Avbröt länken för tråd %lld till db '%-.192s', användare '%-.48s', host '%-.64s'%-.64s (%-.64s)"
|
||||
ukr "Перервано з'єднання %lld до бази данних: '%-.192s' користувач: '%-.48s' хост: '%-.64s'%-.64s (%-.64s)"
|
||||
ER_UNUSED_10
|
||||
eng "You should never see it"
|
||||
ER_FLUSH_MASTER_BINLOG_CLOSED
|
||||
|
|
22
sql/sp.cc
22
sql/sp.cc
|
@ -1947,7 +1947,7 @@ Sp_handler::sp_show_create_routine(THD *thd,
|
|||
|
||||
DBUG_EXECUTE_IF("cache_sp_in_show_create",
|
||||
/* Some tests need just need a way to cache SP without other side-effects.*/
|
||||
sp_cache_routine(thd, name, false, &sp);
|
||||
sp_cache_routine(thd, name, &sp);
|
||||
sp->show_create_routine(thd, this);
|
||||
DBUG_RETURN(false);
|
||||
);
|
||||
|
@ -2371,7 +2371,7 @@ Sp_handler::sp_cache_routine_reentrant(THD *thd,
|
|||
int ret;
|
||||
Parser_state *oldps= thd->m_parser_state;
|
||||
thd->m_parser_state= NULL;
|
||||
ret= sp_cache_routine(thd, name, false, sp);
|
||||
ret= sp_cache_routine(thd, name, sp);
|
||||
thd->m_parser_state= oldps;
|
||||
return ret;
|
||||
}
|
||||
|
@ -2778,7 +2778,6 @@ void sp_update_stmt_used_routines(THD *thd, Query_tables_list *prelocking_ctx,
|
|||
*/
|
||||
|
||||
int Sroutine_hash_entry::sp_cache_routine(THD *thd,
|
||||
bool lookup_only,
|
||||
sp_head **sp) const
|
||||
{
|
||||
char qname_buff[NAME_LEN*2+1+1];
|
||||
|
@ -2791,7 +2790,7 @@ int Sroutine_hash_entry::sp_cache_routine(THD *thd,
|
|||
*/
|
||||
DBUG_ASSERT(mdl_request.ticket || this == thd->lex->sroutines_list.first);
|
||||
|
||||
return m_handler->sp_cache_routine(thd, &name, lookup_only, sp);
|
||||
return m_handler->sp_cache_routine(thd, &name, sp);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2803,9 +2802,6 @@ int Sroutine_hash_entry::sp_cache_routine(THD *thd,
|
|||
|
||||
@param[in] thd Thread context.
|
||||
@param[in] name Name of routine.
|
||||
@param[in] lookup_only Only check that the routine is in the cache.
|
||||
If it's not, don't try to load. If it is present,
|
||||
but old, don't try to reload.
|
||||
@param[out] sp Pointer to sp_head object for routine, NULL if routine was
|
||||
not found.
|
||||
|
||||
|
@ -2816,7 +2812,6 @@ int Sroutine_hash_entry::sp_cache_routine(THD *thd,
|
|||
|
||||
int Sp_handler::sp_cache_routine(THD *thd,
|
||||
const Database_qualified_name *name,
|
||||
bool lookup_only,
|
||||
sp_head **sp) const
|
||||
{
|
||||
int ret= 0;
|
||||
|
@ -2828,9 +2823,6 @@ int Sp_handler::sp_cache_routine(THD *thd,
|
|||
|
||||
*sp= sp_cache_lookup(spc, name);
|
||||
|
||||
if (lookup_only)
|
||||
DBUG_RETURN(SP_OK);
|
||||
|
||||
if (*sp)
|
||||
{
|
||||
sp_cache_flush_obsolete(spc, sp);
|
||||
|
@ -2882,7 +2874,6 @@ int Sp_handler::sp_cache_routine(THD *thd,
|
|||
* name->m_db is a database name, e.g. "dbname"
|
||||
* name->m_name is a package-qualified name,
|
||||
e.g. "pkgname.spname"
|
||||
@param lookup_only - don't load mysql.proc if not cached
|
||||
@param [OUT] sp - the result is returned here.
|
||||
@retval false - loaded or does not exists
|
||||
@retval true - error while loading mysql.proc
|
||||
|
@ -2892,14 +2883,13 @@ int
|
|||
Sp_handler::sp_cache_package_routine(THD *thd,
|
||||
const LEX_CSTRING &pkgname_cstr,
|
||||
const Database_qualified_name *name,
|
||||
bool lookup_only, sp_head **sp) const
|
||||
sp_head **sp) const
|
||||
{
|
||||
DBUG_ENTER("sp_cache_package_routine");
|
||||
DBUG_ASSERT(type() == SP_TYPE_FUNCTION || type() == SP_TYPE_PROCEDURE);
|
||||
sp_name pkgname(&name->m_db, &pkgname_cstr, false);
|
||||
sp_head *ph= NULL;
|
||||
int ret= sp_handler_package_body.sp_cache_routine(thd, &pkgname,
|
||||
lookup_only,
|
||||
&ph);
|
||||
if (!ret)
|
||||
{
|
||||
|
@ -2934,12 +2924,12 @@ Sp_handler::sp_cache_package_routine(THD *thd,
|
|||
|
||||
int Sp_handler::sp_cache_package_routine(THD *thd,
|
||||
const Database_qualified_name *name,
|
||||
bool lookup_only, sp_head **sp) const
|
||||
sp_head **sp) const
|
||||
{
|
||||
DBUG_ENTER("Sp_handler::sp_cache_package_routine");
|
||||
Prefix_name_buf pkgname(thd, name->m_name);
|
||||
DBUG_ASSERT(pkgname.length);
|
||||
DBUG_RETURN(sp_cache_package_routine(thd, pkgname, name, lookup_only, sp));
|
||||
DBUG_RETURN(sp_cache_package_routine(thd, pkgname, name, sp));
|
||||
}
|
||||
|
||||
|
||||
|
|
16
sql/sp.h
16
sql/sp.h
|
@ -102,10 +102,10 @@ protected:
|
|||
int sp_cache_package_routine(THD *thd,
|
||||
const LEX_CSTRING &pkgname_cstr,
|
||||
const Database_qualified_name *name,
|
||||
bool lookup_only, sp_head **sp) const;
|
||||
sp_head **sp) const;
|
||||
int sp_cache_package_routine(THD *thd,
|
||||
const Database_qualified_name *name,
|
||||
bool lookup_only, sp_head **sp) const;
|
||||
sp_head **sp) const;
|
||||
sp_head *sp_find_package_routine(THD *thd,
|
||||
const LEX_CSTRING pkgname_str,
|
||||
const Database_qualified_name *name,
|
||||
|
@ -202,7 +202,7 @@ public:
|
|||
const Database_qualified_name *name,
|
||||
bool cache_only) const;
|
||||
virtual int sp_cache_routine(THD *thd, const Database_qualified_name *name,
|
||||
bool lookup_only, sp_head **sp) const;
|
||||
sp_head **sp) const;
|
||||
|
||||
int sp_cache_routine_reentrant(THD *thd,
|
||||
const Database_qualified_name *nm,
|
||||
|
@ -283,9 +283,9 @@ class Sp_handler_package_procedure: public Sp_handler_procedure
|
|||
{
|
||||
public:
|
||||
int sp_cache_routine(THD *thd, const Database_qualified_name *name,
|
||||
bool lookup_only, sp_head **sp) const
|
||||
sp_head **sp) const
|
||||
{
|
||||
return sp_cache_package_routine(thd, name, lookup_only, sp);
|
||||
return sp_cache_package_routine(thd, name, sp);
|
||||
}
|
||||
sp_head *sp_find_routine(THD *thd,
|
||||
const Database_qualified_name *name,
|
||||
|
@ -332,9 +332,9 @@ class Sp_handler_package_function: public Sp_handler_function
|
|||
{
|
||||
public:
|
||||
int sp_cache_routine(THD *thd, const Database_qualified_name *name,
|
||||
bool lookup_only, sp_head **sp) const
|
||||
sp_head **sp) const
|
||||
{
|
||||
return sp_cache_package_routine(thd, name, lookup_only, sp);
|
||||
return sp_cache_package_routine(thd, name, sp);
|
||||
}
|
||||
sp_head *sp_find_routine(THD *thd,
|
||||
const Database_qualified_name *name,
|
||||
|
@ -632,7 +632,7 @@ public:
|
|||
|
||||
const Sp_handler *m_handler;
|
||||
|
||||
int sp_cache_routine(THD *thd, bool lookup_only, sp_head **sp) const;
|
||||
int sp_cache_routine(THD *thd, sp_head **sp) const;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -78,6 +78,8 @@ private:
|
|||
|
||||
/* All routines in this cache */
|
||||
HASH m_hashtable;
|
||||
public:
|
||||
void clear();
|
||||
}; // class sp_cache
|
||||
|
||||
#ifdef HAVE_PSI_INTERFACE
|
||||
|
@ -313,6 +315,10 @@ sp_cache::cleanup()
|
|||
my_hash_free(&m_hashtable);
|
||||
}
|
||||
|
||||
void sp_cache::clear()
|
||||
{
|
||||
my_hash_reset(&m_hashtable);
|
||||
}
|
||||
|
||||
void Sp_caches::sp_caches_clear()
|
||||
{
|
||||
|
@ -321,3 +327,15 @@ void Sp_caches::sp_caches_clear()
|
|||
sp_cache_clear(&sp_package_spec_cache);
|
||||
sp_cache_clear(&sp_package_body_cache);
|
||||
}
|
||||
|
||||
void Sp_caches::sp_caches_empty()
|
||||
{
|
||||
if (sp_proc_cache)
|
||||
sp_proc_cache->clear();
|
||||
if (sp_func_cache)
|
||||
sp_func_cache->clear();
|
||||
if (sp_package_spec_cache)
|
||||
sp_package_spec_cache->clear();
|
||||
if (sp_package_body_cache)
|
||||
sp_package_body_cache->clear();
|
||||
}
|
||||
|
|
|
@ -3459,7 +3459,7 @@ open_and_process_routine(THD *thd, Query_tables_list *prelocking_ctx,
|
|||
DBUG_RETURN(TRUE);
|
||||
|
||||
/* Ensures the routine is up-to-date and cached, if exists. */
|
||||
if (rt->sp_cache_routine(thd, has_prelocking_list, &sp))
|
||||
if (rt->sp_cache_routine(thd, &sp))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
/* Remember the version of the routine in the parse tree. */
|
||||
|
@ -3500,7 +3500,7 @@ open_and_process_routine(THD *thd, Query_tables_list *prelocking_ctx,
|
|||
Validating routine version is unnecessary, since CALL
|
||||
does not affect the prepared statement prelocked list.
|
||||
*/
|
||||
if (rt->sp_cache_routine(thd, false, &sp))
|
||||
if (rt->sp_cache_routine(thd, &sp))
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
}
|
||||
|
@ -5492,13 +5492,23 @@ bool lock_tables(THD *thd, TABLE_LIST *tables, uint count, uint flags)
|
|||
}
|
||||
}
|
||||
|
||||
DEBUG_SYNC(thd, "before_lock_tables_takes_lock");
|
||||
#ifdef ENABLED_DEBUG_SYNC
|
||||
if (!tables ||
|
||||
!(strcmp(tables->db.str, "mysql") == 0 &&
|
||||
strcmp(tables->table_name.str, "proc") == 0))
|
||||
DEBUG_SYNC(thd, "before_lock_tables_takes_lock");
|
||||
#endif
|
||||
|
||||
if (! (thd->lock= mysql_lock_tables(thd, start, (uint) (ptr - start),
|
||||
flags)))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
DEBUG_SYNC(thd, "after_lock_tables_takes_lock");
|
||||
#ifdef ENABLED_DEBUG_SYNC
|
||||
if (!tables ||
|
||||
!(strcmp(tables->db.str, "mysql") == 0 &&
|
||||
strcmp(tables->table_name.str, "proc") == 0))
|
||||
DEBUG_SYNC(thd, "after_lock_tables_takes_lock");
|
||||
#endif
|
||||
|
||||
if (thd->lex->requires_prelocking() &&
|
||||
thd->lex->sql_command != SQLCOM_LOCK_TABLES)
|
||||
|
|
|
@ -892,6 +892,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier)
|
|||
prepare_derived_at_open= FALSE;
|
||||
create_tmp_table_for_derived= FALSE;
|
||||
save_prep_leaf_list= FALSE;
|
||||
reset_sp_cache= false;
|
||||
org_charset= 0;
|
||||
/* Restore THR_THD */
|
||||
set_current_thd(old_THR_THD);
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/*
|
||||
Copyright (c) 2000, 2016, Oracle and/or its affiliates.
|
||||
Copyright (c) 2009, 2022, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
|
@ -2367,6 +2366,11 @@ public:
|
|||
swap_variables(sp_cache*, sp_package_body_cache, rhs.sp_package_body_cache);
|
||||
}
|
||||
void sp_caches_clear();
|
||||
/**
|
||||
Clear content of sp related caches.
|
||||
Don't delete cache objects itself.
|
||||
*/
|
||||
void sp_caches_empty();
|
||||
};
|
||||
|
||||
|
||||
|
@ -2698,6 +2702,12 @@ public:
|
|||
|
||||
bool save_prep_leaf_list;
|
||||
|
||||
/**
|
||||
The data member reset_sp_cache is to signal that content of sp_cache
|
||||
must be reset (all items be removed from it).
|
||||
*/
|
||||
bool reset_sp_cache;
|
||||
|
||||
/* container for handler's private per-connection data */
|
||||
Ha_data ha_data[MAX_HA];
|
||||
|
||||
|
@ -4936,11 +4946,29 @@ public:
|
|||
{
|
||||
if (global_system_variables.log_warnings > threshold)
|
||||
{
|
||||
char real_ip_str[64];
|
||||
real_ip_str[0]= 0;
|
||||
|
||||
/* For proxied connections, add the real IP to the warning message */
|
||||
if (net.using_proxy_protocol && net.vio)
|
||||
{
|
||||
if(net.vio->localhost)
|
||||
snprintf(real_ip_str, sizeof(real_ip_str), " real ip: 'localhost'");
|
||||
else
|
||||
{
|
||||
char buf[INET6_ADDRSTRLEN];
|
||||
if (!vio_getnameinfo((sockaddr *)&(net.vio->remote), buf,
|
||||
sizeof(buf),NULL, 0, NI_NUMERICHOST))
|
||||
{
|
||||
snprintf(real_ip_str, sizeof(real_ip_str), " real ip: '%s'",buf);
|
||||
}
|
||||
}
|
||||
}
|
||||
Security_context *sctx= &main_security_ctx;
|
||||
sql_print_warning(ER_THD(this, ER_NEW_ABORTING_CONNECTION),
|
||||
thread_id, (db.str ? db.str : "unconnected"),
|
||||
sctx->user ? sctx->user : "unauthenticated",
|
||||
sctx->host_or_ip, reason);
|
||||
sctx->host_or_ip, real_ip_str, reason);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1274,7 +1274,7 @@ void prepare_new_connection_state(THD* thd)
|
|||
thd->thread_id,
|
||||
thd->db.str ? thd->db.str : "unconnected",
|
||||
sctx->user ? sctx->user : "unauthenticated",
|
||||
sctx->host_or_ip, "init_connect command failed");
|
||||
sctx->host_or_ip, "", "init_connect command failed");
|
||||
thd->server_status&= ~SERVER_STATUS_CLEAR_SET;
|
||||
thd->protocol->end_statement();
|
||||
thd->killed = KILL_CONNECTION;
|
||||
|
|
|
@ -2463,6 +2463,11 @@ dispatch_end:
|
|||
}
|
||||
#endif /* WITH_WSREP */
|
||||
|
||||
if (thd->reset_sp_cache)
|
||||
{
|
||||
thd->sp_caches_empty();
|
||||
thd->reset_sp_cache= false;
|
||||
}
|
||||
|
||||
if (do_end_of_statement)
|
||||
{
|
||||
|
@ -2532,6 +2537,7 @@ dispatch_end:
|
|||
MYSQL_COMMAND_DONE(res);
|
||||
}
|
||||
DEBUG_SYNC(thd,"dispatch_command_end");
|
||||
DEBUG_SYNC(thd,"dispatch_command_end2");
|
||||
|
||||
/* Check that some variables are reset properly */
|
||||
DBUG_ASSERT(thd->abort_on_warning == 0);
|
||||
|
@ -5925,7 +5931,7 @@ mysql_execute_command(THD *thd)
|
|||
if (sph->sp_resolve_package_routine(thd, thd->lex->sphead,
|
||||
lex->spname, &sph, &pkgname))
|
||||
return true;
|
||||
if (sph->sp_cache_routine(thd, lex->spname, false, &sp))
|
||||
if (sph->sp_cache_routine(thd, lex->spname, &sp))
|
||||
goto error;
|
||||
if (!sp || sp->show_routine_code(thd))
|
||||
{
|
||||
|
|
|
@ -221,7 +221,7 @@ static int fake_rotate_event(binlog_send_info *info, ulonglong position,
|
|||
char* p = info->log_file_name+dirname_length(info->log_file_name);
|
||||
uint ident_len = (uint) strlen(p);
|
||||
String *packet= info->packet;
|
||||
ha_checksum crc;
|
||||
ha_checksum crc= 0;
|
||||
|
||||
/* reset transmit packet for the fake rotate event below */
|
||||
if (reset_transmit_packet(info, info->flags, &ev_offset, &info->errmsg))
|
||||
|
@ -262,7 +262,7 @@ static int fake_gtid_list_event(binlog_send_info *info,
|
|||
{
|
||||
my_bool do_checksum;
|
||||
int err;
|
||||
ha_checksum crc;
|
||||
ha_checksum crc= 0;
|
||||
char buf[128];
|
||||
String str(buf, sizeof(buf), system_charset_info);
|
||||
String* packet= info->packet;
|
||||
|
|
|
@ -2393,6 +2393,7 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
|
|||
table->table= 0;
|
||||
temporary_table_was_dropped= 1;
|
||||
}
|
||||
thd->reset_sp_cache= true;
|
||||
}
|
||||
|
||||
if ((drop_temporary && if_exists) || temporary_table_was_dropped)
|
||||
|
@ -2701,8 +2702,11 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
|
|||
}
|
||||
DBUG_PRINT("table", ("table: %p s: %p", table->table,
|
||||
table->table ? table->table->s : NULL));
|
||||
if (is_temporary_table(table))
|
||||
thd->reset_sp_cache= true;
|
||||
}
|
||||
DEBUG_SYNC(thd, "rm_table_no_locks_before_binlog");
|
||||
|
||||
thd->thread_specific_used= TRUE;
|
||||
error= 0;
|
||||
|
||||
|
@ -5424,6 +5428,7 @@ int create_table_impl(THD *thd, const LEX_CSTRING &orig_db,
|
|||
if (is_trans != NULL)
|
||||
*is_trans= table->file->has_transactions();
|
||||
|
||||
thd->reset_sp_cache= true;
|
||||
thd->thread_specific_used= TRUE;
|
||||
create_info->table= table; // Store pointer to table
|
||||
}
|
||||
|
|
|
@ -1142,7 +1142,10 @@ static int remove_fragment(THD* thd,
|
|||
seqno.get(),
|
||||
error);
|
||||
}
|
||||
ret= error;
|
||||
else
|
||||
{
|
||||
ret= error;
|
||||
}
|
||||
}
|
||||
else if (Wsrep_schema_impl::delete_row(frag_table))
|
||||
{
|
||||
|
|
|
@ -2471,7 +2471,7 @@ fil_space_crypt_close_tablespace(
|
|||
<< " seconds to drop space: "
|
||||
<< space->name << " ("
|
||||
<< space->id << ") active threads "
|
||||
<< cnt << "flushing="
|
||||
<< cnt << " flushing="
|
||||
<< flushing << ".";
|
||||
last = now;
|
||||
}
|
||||
|
|
|
@ -7259,26 +7259,55 @@ ha_innobase::build_template(
|
|||
|
||||
ulint num_v = 0;
|
||||
|
||||
if (active_index != MAX_KEY
|
||||
&& active_index == pushed_idx_cond_keyno) {
|
||||
m_prebuilt->idx_cond = this;
|
||||
goto icp;
|
||||
} else if (pushed_rowid_filter && rowid_filter_is_active) {
|
||||
icp:
|
||||
/* Push down an index condition or an end_range check. */
|
||||
/* MDEV-31154: For pushed down index condition we don't support virtual
|
||||
column and idx_cond_push() does check for it. For row ID filtering we
|
||||
don't need such restrictions but we get into trouble trying to use the
|
||||
ICP path.
|
||||
|
||||
1. It should be fine to follow no_icp path if primary key is generated.
|
||||
However, with user specified primary key(PK), the row is identified by
|
||||
the PK and those columns need to be converted to mysql format in
|
||||
row_search_idx_cond_check before doing the comparison. Since secondary
|
||||
indexes always have PK appended in innodb, it works with current ICP
|
||||
handling code when fetch_primary_key_cols is set to TRUE.
|
||||
|
||||
2. Although ICP comparison and Row ID comparison works on different
|
||||
columns the current ICP code can be shared by both.
|
||||
|
||||
3. In most cases, it works today by jumping to goto no_icp when we
|
||||
encounter a virtual column. This is hackish and already have some
|
||||
issues as it cannot handle PK and all states are not reset properly,
|
||||
for example, idx_cond_n_cols is not reset.
|
||||
|
||||
4. We already encountered MDEV-28747 m_prebuilt->idx_cond was being set.
|
||||
|
||||
Neither ICP nor row ID comparison needs virtual columns and the code is
|
||||
simplified to handle both. It should handle the issues. */
|
||||
|
||||
const bool pushed_down = active_index != MAX_KEY
|
||||
&& active_index == pushed_idx_cond_keyno;
|
||||
|
||||
m_prebuilt->idx_cond = pushed_down ? this : nullptr;
|
||||
|
||||
if (m_prebuilt->idx_cond || m_prebuilt->pk_filter) {
|
||||
/* Push down an index condition, end_range check or row ID
|
||||
filter */
|
||||
for (ulint i = 0; i < n_fields; i++) {
|
||||
const Field* field = table->field[i];
|
||||
const bool is_v = !field->stored_in_db();
|
||||
if (is_v && skip_virtual) {
|
||||
num_v++;
|
||||
continue;
|
||||
}
|
||||
|
||||
bool index_contains = index->contains_col_or_prefix(
|
||||
is_v ? num_v : i - num_v, is_v);
|
||||
if (is_v && index_contains) {
|
||||
m_prebuilt->n_template = 0;
|
||||
num_v = 0;
|
||||
goto no_icp;
|
||||
|
||||
if (is_v) {
|
||||
if (index_contains) {
|
||||
/* We want to ensure that ICP is not
|
||||
used with virtual columns. */
|
||||
ut_ad(!pushed_down);
|
||||
m_prebuilt->idx_cond = nullptr;
|
||||
}
|
||||
num_v++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Test if an end_range or an index condition
|
||||
|
@ -7298,7 +7327,7 @@ icp:
|
|||
which would be acceptable if end_range==NULL. */
|
||||
if (build_template_needs_field_in_icp(
|
||||
index, m_prebuilt, index_contains,
|
||||
is_v ? num_v : i - num_v, is_v)) {
|
||||
i - num_v, false)) {
|
||||
if (!whole_row) {
|
||||
field = build_template_needs_field(
|
||||
index_contains,
|
||||
|
@ -7307,15 +7336,10 @@ icp:
|
|||
fetch_primary_key_cols,
|
||||
index, table, i, num_v);
|
||||
if (!field) {
|
||||
if (is_v) {
|
||||
num_v++;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
ut_ad(!is_v);
|
||||
|
||||
mysql_row_templ_t* templ= build_template_field(
|
||||
m_prebuilt, clust_index, index,
|
||||
table, field, i - num_v, 0);
|
||||
|
@ -7392,15 +7416,16 @@ icp:
|
|||
*/
|
||||
}
|
||||
|
||||
if (is_v) {
|
||||
num_v++;
|
||||
}
|
||||
}
|
||||
|
||||
ut_ad(m_prebuilt->idx_cond_n_cols > 0);
|
||||
ut_ad(m_prebuilt->idx_cond_n_cols == m_prebuilt->n_template);
|
||||
|
||||
num_v = 0;
|
||||
ut_ad(m_prebuilt->idx_cond_n_cols == m_prebuilt->n_template);
|
||||
if (m_prebuilt->idx_cond_n_cols == 0) {
|
||||
/* No columns to push down. It is safe to jump to np ICP
|
||||
path. */
|
||||
m_prebuilt->idx_cond = nullptr;
|
||||
goto no_icp;
|
||||
}
|
||||
|
||||
/* Include the fields that are not needed in index condition
|
||||
pushdown. */
|
||||
|
@ -7415,7 +7440,7 @@ icp:
|
|||
bool index_contains = index->contains_col_or_prefix(
|
||||
is_v ? num_v : i - num_v, is_v);
|
||||
|
||||
if (!build_template_needs_field_in_icp(
|
||||
if (is_v || !build_template_needs_field_in_icp(
|
||||
index, m_prebuilt, index_contains,
|
||||
is_v ? num_v : i - num_v, is_v)) {
|
||||
/* Not needed in ICP */
|
||||
|
@ -7448,7 +7473,7 @@ icp:
|
|||
} else {
|
||||
no_icp:
|
||||
/* No index condition pushdown */
|
||||
m_prebuilt->idx_cond = NULL;
|
||||
ut_ad(!m_prebuilt->idx_cond);
|
||||
ut_ad(num_v == 0);
|
||||
|
||||
for (ulint i = 0; i < n_fields; i++) {
|
||||
|
|
|
@ -1536,11 +1536,9 @@ instant_alter_column_possible(
|
|||
ut_ad(!is_null || nullable);
|
||||
n_nullable += nullable;
|
||||
n_add++;
|
||||
uint l;
|
||||
uint l = (*af)->pack_length();
|
||||
switch ((*af)->type()) {
|
||||
case MYSQL_TYPE_VARCHAR:
|
||||
l = reinterpret_cast<const Field_varstring*>
|
||||
(*af)->get_length();
|
||||
variable_length:
|
||||
if (l >= min_local_len) {
|
||||
max_size += blob_prefix
|
||||
|
@ -1554,7 +1552,6 @@ instant_alter_column_possible(
|
|||
if (!is_null) {
|
||||
min_size += l;
|
||||
}
|
||||
l = (*af)->pack_length();
|
||||
max_size += l;
|
||||
lenlen += l > 255 ? 2 : 1;
|
||||
}
|
||||
|
@ -1568,7 +1565,6 @@ instant_alter_column_possible(
|
|||
((*af))->get_length();
|
||||
goto variable_length;
|
||||
default:
|
||||
l = (*af)->pack_length();
|
||||
if (l > 255 && ib_table.not_redundant()) {
|
||||
goto variable_length;
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ public:
|
|||
ulint used() const
|
||||
MY_ATTRIBUTE((warn_unused_result))
|
||||
{
|
||||
return(static_cast<ulint>(m_used & ~DYN_BLOCK_FULL_FLAG));
|
||||
return m_used;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -153,8 +153,7 @@ public:
|
|||
/** Storage */
|
||||
byte m_data[MAX_DATA_SIZE];
|
||||
|
||||
/** number of data bytes used in this block;
|
||||
DYN_BLOCK_FULL_FLAG is set when the block becomes full */
|
||||
/** number of data bytes used in this block */
|
||||
uint32_t m_used;
|
||||
|
||||
friend class mtr_buf_t;
|
||||
|
|
|
@ -33,7 +33,4 @@ Created 2013-03-16 Sunny Bains
|
|||
/** This is the initial 'payload' size of a dynamic array */
|
||||
#define DYN_ARRAY_DATA_SIZE 512
|
||||
|
||||
/** Flag for dyn_block_t::used that indicates a full block */
|
||||
#define DYN_BLOCK_FULL_FLAG 0x1000000UL
|
||||
|
||||
#endif /* dyn0types_h */
|
||||
|
|
|
@ -480,12 +480,23 @@ int ha_spider::open(
|
|||
result_list.last = NULL;
|
||||
result_list.current = NULL;
|
||||
result_list.record_num = 0;
|
||||
if (
|
||||
!(result_list.sqls = new spider_string[share->link_count]) ||
|
||||
!(result_list.insert_sqls = new spider_string[share->link_count]) ||
|
||||
!(result_list.update_sqls = new spider_string[share->link_count]) ||
|
||||
!(result_list.tmp_sqls = new spider_string[share->link_count])
|
||||
) {
|
||||
if (!(result_list.sqls = new spider_string[share->link_count]))
|
||||
{
|
||||
error_num = HA_ERR_OUT_OF_MEM;
|
||||
goto error_init_result_list;
|
||||
}
|
||||
if (!(result_list.insert_sqls = new spider_string[share->link_count]))
|
||||
{
|
||||
error_num = HA_ERR_OUT_OF_MEM;
|
||||
goto error_init_result_list;
|
||||
}
|
||||
if (!(result_list.update_sqls = new spider_string[share->link_count]))
|
||||
{
|
||||
error_num = HA_ERR_OUT_OF_MEM;
|
||||
goto error_init_result_list;
|
||||
}
|
||||
if (!(result_list.tmp_sqls = new spider_string[share->link_count]))
|
||||
{
|
||||
error_num = HA_ERR_OUT_OF_MEM;
|
||||
goto error_init_result_list;
|
||||
}
|
||||
|
|
|
@ -228,23 +228,22 @@ config::operator =(const config& x)
|
|||
conf_param *param, *new_param;
|
||||
for(ulong i = 0; i < x.conf_hash.records; i++)
|
||||
{
|
||||
if (
|
||||
(param = (conf_param *) my_hash_element((HASH *) &x.conf_hash, i)) &&
|
||||
(new_param = new conf_param())
|
||||
) {
|
||||
if (
|
||||
!new_param->key.copy(param->key) &&
|
||||
!new_param->val.copy(param->val)
|
||||
) {
|
||||
new_param->key.c_ptr_safe();
|
||||
new_param->val.c_ptr_safe();
|
||||
DENA_VERBOSE(10, fprintf(stderr, "CONFIG: %s=%s\n",
|
||||
new_param->key.ptr(), new_param->val.ptr()));
|
||||
if (my_hash_insert(&conf_hash, (uchar*) new_param))
|
||||
if ((param = (conf_param *) my_hash_element((HASH *) &x.conf_hash, i)))
|
||||
if ((new_param = new conf_param()))
|
||||
{
|
||||
if (
|
||||
!new_param->key.copy(param->key) &&
|
||||
!new_param->val.copy(param->val)
|
||||
) {
|
||||
new_param->key.c_ptr_safe();
|
||||
new_param->val.c_ptr_safe();
|
||||
DENA_VERBOSE(10, fprintf(stderr, "CONFIG: %s=%s\n",
|
||||
new_param->key.ptr(), new_param->val.ptr()));
|
||||
if (my_hash_insert(&conf_hash, (uchar*) new_param))
|
||||
delete new_param;
|
||||
} else
|
||||
delete new_param;
|
||||
} else
|
||||
delete new_param;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
DENA_VERBOSE(10, fprintf(stderr, "config operator = end %p", this));
|
||||
|
|
|
@ -1003,7 +1003,12 @@ long long spider_copy_tables_body(
|
|||
all_link_cnt =
|
||||
copy_tables->link_idx_count[0] + copy_tables->link_idx_count[1];
|
||||
if (
|
||||
!(tmp_sql = new spider_string[all_link_cnt]) ||
|
||||
!(tmp_sql = new spider_string[all_link_cnt])
|
||||
) {
|
||||
my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM);
|
||||
goto error;
|
||||
}
|
||||
if (
|
||||
!(spider = new ha_spider[all_link_cnt])
|
||||
) {
|
||||
my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM);
|
||||
|
@ -1032,13 +1037,6 @@ long long spider_copy_tables_body(
|
|||
tmp_spider->share = table_conn->share;
|
||||
tmp_spider->wide_handler = wide_handler;
|
||||
wide_handler->trx = copy_tables->trx;
|
||||
/*
|
||||
if (spider_db_append_set_names(table_conn->share))
|
||||
{
|
||||
my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM);
|
||||
goto error_append_set_names;
|
||||
}
|
||||
*/
|
||||
tmp_spider->conns = &table_conn->conn;
|
||||
tmp_sql[roop_count].init_calc_mem(SPD_MID_COPY_TABLES_BODY_3);
|
||||
tmp_sql[roop_count].set_charset(copy_tables->access_charset);
|
||||
|
@ -1078,13 +1076,6 @@ long long spider_copy_tables_body(
|
|||
tmp_spider->share = table_conn->share;
|
||||
tmp_spider->wide_handler = wide_handler;
|
||||
wide_handler->trx = copy_tables->trx;
|
||||
/*
|
||||
if (spider_db_append_set_names(table_conn->share))
|
||||
{
|
||||
my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM);
|
||||
goto error_append_set_names;
|
||||
}
|
||||
*/
|
||||
tmp_spider->conns = &table_conn->conn;
|
||||
tmp_sql[roop_count].init_calc_mem(SPD_MID_COPY_TABLES_BODY_5);
|
||||
tmp_sql[roop_count].set_charset(copy_tables->access_charset);
|
||||
|
@ -1111,14 +1102,6 @@ long long spider_copy_tables_body(
|
|||
bulk_insert_rows)))
|
||||
goto error_db_udf_copy_tables;
|
||||
|
||||
/*
|
||||
for (table_conn = copy_tables->table_conn[0];
|
||||
table_conn; table_conn = table_conn->next)
|
||||
spider_db_free_set_names(table_conn->share);
|
||||
for (table_conn = copy_tables->table_conn[1];
|
||||
table_conn; table_conn = table_conn->next)
|
||||
spider_db_free_set_names(table_conn->share);
|
||||
*/
|
||||
if (table_list->table)
|
||||
{
|
||||
#if MYSQL_VERSION_ID < 50500
|
||||
|
@ -1143,8 +1126,7 @@ long long spider_copy_tables_body(
|
|||
}
|
||||
delete [] spider;
|
||||
}
|
||||
if (tmp_sql)
|
||||
delete [] tmp_sql;
|
||||
delete [] tmp_sql;
|
||||
spider_udf_free_copy_tables_alloc(copy_tables);
|
||||
|
||||
DBUG_RETURN(1);
|
||||
|
@ -1152,17 +1134,6 @@ long long spider_copy_tables_body(
|
|||
error_db_udf_copy_tables:
|
||||
error_create_dbton_handler:
|
||||
error_init_dbton_handler:
|
||||
/*
|
||||
error_append_set_names:
|
||||
*/
|
||||
/*
|
||||
for (table_conn = copy_tables->table_conn[0];
|
||||
table_conn; table_conn = table_conn->next)
|
||||
spider_db_free_set_names(table_conn->share);
|
||||
for (table_conn = copy_tables->table_conn[1];
|
||||
table_conn; table_conn = table_conn->next)
|
||||
spider_db_free_set_names(table_conn->share);
|
||||
*/
|
||||
error:
|
||||
if (spider)
|
||||
{
|
||||
|
|
|
@ -7301,11 +7301,9 @@ int spider_mbase_share::init()
|
|||
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
|
||||
}
|
||||
|
||||
if (keys > 0 &&
|
||||
!(key_hint = new spider_string[keys])
|
||||
) {
|
||||
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
|
||||
}
|
||||
if (keys > 0)
|
||||
if (!(key_hint = new spider_string[keys]))
|
||||
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
|
||||
for (roop_count = 0; roop_count < keys; roop_count++)
|
||||
{
|
||||
key_hint[roop_count].init_calc_mem(SPD_MID_MBASE_SHARE_INIT_2);
|
||||
|
@ -7313,12 +7311,12 @@ int spider_mbase_share::init()
|
|||
}
|
||||
DBUG_PRINT("info",("spider key_hint=%p", key_hint));
|
||||
|
||||
if (
|
||||
!(table_select = new spider_string[1]) ||
|
||||
(keys > 0 &&
|
||||
!(key_select = new spider_string[keys])
|
||||
) ||
|
||||
(error_num = create_table_names_str()) ||
|
||||
if (!(table_select = new spider_string[1]))
|
||||
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
|
||||
if (keys > 0)
|
||||
if (!(key_select = new spider_string[keys]))
|
||||
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
|
||||
if ((error_num = create_table_names_str()) ||
|
||||
(table_share &&
|
||||
(
|
||||
(error_num = create_column_name_str()) ||
|
||||
|
@ -7469,11 +7467,18 @@ int spider_mbase_share::create_table_names_str()
|
|||
table_names_str = NULL;
|
||||
db_names_str = NULL;
|
||||
db_table_str = NULL;
|
||||
if (
|
||||
!(table_names_str = new spider_string[spider_share->all_link_count]) ||
|
||||
!(db_names_str = new spider_string[spider_share->all_link_count]) ||
|
||||
!(db_table_str = new spider_string[spider_share->all_link_count])
|
||||
) {
|
||||
if (!(table_names_str = new spider_string[spider_share->all_link_count]))
|
||||
{
|
||||
error_num = HA_ERR_OUT_OF_MEM;
|
||||
goto error;
|
||||
}
|
||||
if (!(db_names_str = new spider_string[spider_share->all_link_count]))
|
||||
{
|
||||
error_num = HA_ERR_OUT_OF_MEM;
|
||||
goto error;
|
||||
}
|
||||
if (!(db_table_str = new spider_string[spider_share->all_link_count]))
|
||||
{
|
||||
error_num = HA_ERR_OUT_OF_MEM;
|
||||
goto error;
|
||||
}
|
||||
|
@ -7624,11 +7629,9 @@ int spider_mbase_share::create_column_name_str()
|
|||
Field **field;
|
||||
TABLE_SHARE *table_share = spider_share->table_share;
|
||||
DBUG_ENTER("spider_mbase_share::create_column_name_str");
|
||||
if (
|
||||
table_share->fields &&
|
||||
!(column_name_str = new spider_string[table_share->fields])
|
||||
)
|
||||
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
|
||||
if (table_share->fields)
|
||||
if (!(column_name_str = new spider_string[table_share->fields]))
|
||||
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
|
||||
for (field = table_share->field, str = column_name_str;
|
||||
*field; field++, str++)
|
||||
{
|
||||
|
|
|
@ -4513,12 +4513,12 @@ SPIDER_SHARE *spider_create_share(
|
|||
share->table.read_set = &table_share->all_set;
|
||||
#endif
|
||||
|
||||
if (table_share->keys > 0 &&
|
||||
!(share->key_hint = new spider_string[table_share->keys])
|
||||
) {
|
||||
*error_num = HA_ERR_OUT_OF_MEM;
|
||||
goto error_init_hint_string;
|
||||
}
|
||||
if (table_share->keys > 0)
|
||||
if (!(share->key_hint = new spider_string[table_share->keys]))
|
||||
{
|
||||
*error_num = HA_ERR_OUT_OF_MEM;
|
||||
goto error_init_hint_string;
|
||||
}
|
||||
for (roop_count = 0; roop_count < (int) table_share->keys; roop_count++)
|
||||
share->key_hint[roop_count].init_calc_mem(SPD_MID_CREATE_SHARE_2);
|
||||
DBUG_PRINT("info",("spider share->key_hint=%p", share->key_hint));
|
||||
|
|
|
@ -20561,7 +20561,6 @@ typedef struct {
|
|||
#ifndef EMBEDDED_LIBRARY
|
||||
static void test_proxy_header_tcp(const char *ipaddr, int port)
|
||||
{
|
||||
|
||||
int rc;
|
||||
MYSQL_RES *result;
|
||||
int family = (strchr(ipaddr,':') == NULL)?AF_INET:AF_INET6;
|
||||
|
@ -20636,6 +20635,11 @@ static void test_proxy_header_tcp(const char *ipaddr, int port)
|
|||
DIE_UNLESS(strncmp(row[0], normalized_addr, addrlen) == 0);
|
||||
DIE_UNLESS(atoi(row[0] + addrlen+1) == port);
|
||||
mysql_free_result(result);
|
||||
if (i == 0 && !strcmp(ipaddr,"192.0.2.1"))
|
||||
{
|
||||
/* do "dirty" close, to get aborted message in error log.*/
|
||||
mariadb_cancel(m);
|
||||
}
|
||||
mysql_close(m);
|
||||
}
|
||||
sprintf(query,"DROP USER 'u'@'%s'",normalized_addr);
|
||||
|
|
Loading…
Reference in a new issue