Merge 10.2 into 10.3

This commit is contained in:
Marko Mäkelä 2021-03-27 16:11:26 +02:00
commit 3157fa182a
47 changed files with 349 additions and 608 deletions

View file

@ -190,21 +190,17 @@ OPTION(WITH_ASAN "Enable address sanitizer" OFF)
IF (WITH_ASAN AND NOT MSVC)
# this flag might be set by default on some OS
MY_CHECK_AND_SET_COMPILER_FLAG("-U_FORTIFY_SOURCE" DEBUG RELWITHDEBINFO)
# gcc 4.8.1 and new versions of clang
MY_CHECK_AND_SET_COMPILER_FLAG("-fsanitize=address -fPIC"
DEBUG RELWITHDEBINFO)
SET(HAVE_C_FSANITIZE ${have_C__fsanitize_address__fPIC})
SET(HAVE_CXX_FSANITIZE ${have_CXX__fsanitize_address__fPIC})
IF(HAVE_C_FSANITIZE AND HAVE_CXX_FSANITIZE)
OPTION(WITH_ASAN_SCOPE "Enable -fsanitize-address-use-after-scope" OFF)
SET(WITH_ASAN_OK 1)
ELSE()
# older versions of clang
MY_CHECK_AND_SET_COMPILER_FLAG("-faddress-sanitizer -fPIC"
DEBUG RELWITHDEBINFO)
SET(HAVE_C_FADDRESS ${have_C__faddress_sanitizer__fPIC})
SET(HAVE_CXX_FADDRESS ${have_CXX__faddress_sanitizer__fPIC})
IF(HAVE_C_FADDRESS AND HAVE_CXX_FADDRESS)
SET(WITH_ASAN_OK 1)
IF(WITH_ASAN_SCOPE)
MY_CHECK_AND_SET_COMPILER_FLAG(
"-fsanitize=address -fsanitize-address-use-after-scope"
DEBUG RELWITHDEBINFO)
ENDIF()
ENDIF()

View file

@ -4704,7 +4704,11 @@ sql_real_connect(char *host,char *database,char *user,char *password,
return -1; // Retryable
}
charset_info= get_charset_by_name(mysql.charset->name, MYF(0));
if (!(charset_info= get_charset_by_name(mysql.charset->name, MYF(0))))
{
put_info("Unknown default character set", INFO_ERROR);
return 1;
}
connected=1;

View file

@ -174,6 +174,7 @@
#cmakedefine HAVE_DECL_MADVISE 1
#cmakedefine HAVE_DECL_MHA_MAPSIZE_VA 1
#cmakedefine HAVE_MALLINFO 1
#cmakedefine HAVE_MALLINFO2 1
#cmakedefine HAVE_MEMCPY 1
#cmakedefine HAVE_MEMMOVE 1
#cmakedefine HAVE_MKSTEMP 1

View file

@ -366,6 +366,7 @@ CHECK_FUNCTION_EXISTS (localtime_r HAVE_LOCALTIME_R)
CHECK_FUNCTION_EXISTS (lstat HAVE_LSTAT)
CHECK_FUNCTION_EXISTS (madvise HAVE_MADVISE)
CHECK_FUNCTION_EXISTS (mallinfo HAVE_MALLINFO)
CHECK_FUNCTION_EXISTS (mallinfo2 HAVE_MALLINFO2)
CHECK_FUNCTION_EXISTS (memcpy HAVE_MEMCPY)
CHECK_FUNCTION_EXISTS (memmove HAVE_MEMMOVE)
CHECK_FUNCTION_EXISTS (mkstemp HAVE_MKSTEMP)

View file

@ -61,7 +61,7 @@ my %debuggers = (
lldb => {
term => 1,
options => '-s {script} {exe}',
script => 'process launch --stop-at-entry {args}',
script => 'process launch --stop-at-entry -- {args}',
},
valgrind => {
options => '--tool=memcheck --show-reachable=yes --leak-check=yes --num-callers=16 --quiet --suppressions='.cwd().'/valgrind.supp {exe} {args} --loose-wait-for-pos-timeout=1500',

View file

@ -6110,6 +6110,36 @@ a b c d e
DROP TABLE t1,t2,t3,t4;
set join_cache_level=@save_join_cache_level;
#
# MDEV-24767: forced BNLH used for equi-join supported by compound index
#
create table t1 (a int, b int, c int ) engine=myisam ;
create table t2 (a int, b int, c int, primary key (c,a,b)) engine=myisam ;
insert into t1 values (3,4,2), (5,6,4);
insert into t2 values (3,4,2), (5,6,4);
select t1.a, t1.b, t1.c from t1,t2
where t2.a = t1.a and t2.b = t1.b and t2.c=t1.c;
a b c
3 4 2
5 6 4
explain select t1.a, t1.b, t1.c from t1,t2
where t2.a = t1.a and t2.b = t1.b and t2.c=t1.c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
1 SIMPLE t2 eq_ref PRIMARY PRIMARY 12 test.t1.c,test.t1.a,test.t1.b 1 Using index
set join_cache_level=3;
select t1.a, t1.b, t1.c from t1,t2
where t2.a = t1.a and t2.b = t1.b and t2.c=t1.c;
a b c
3 4 2
5 6 4
explain select t1.a, t1.b, t1.c from t1,t2
where t2.a = t1.a and t2.b = t1.b and t2.c=t1.c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
1 SIMPLE t2 hash_index PRIMARY #hash#PRIMARY:PRIMARY 12:12 test.t1.c,test.t1.a,test.t1.b 2 Using index; Using join buffer (flat, BNLH join)
drop table t1,t2;
set join_cache_level=@save_join_cache_level;
#
# MDEV-21243: Join buffer: condition is checked in wrong place for range access
#
create table t1(a int primary key);

View file

@ -4050,6 +4050,30 @@ DROP TABLE t1,t2,t3,t4;
set join_cache_level=@save_join_cache_level;
--echo #
--echo # MDEV-24767: forced BNLH used for equi-join supported by compound index
--echo #
create table t1 (a int, b int, c int ) engine=myisam ;
create table t2 (a int, b int, c int, primary key (c,a,b)) engine=myisam ;
insert into t1 values (3,4,2), (5,6,4);
insert into t2 values (3,4,2), (5,6,4);
let $q=
select t1.a, t1.b, t1.c from t1,t2
where t2.a = t1.a and t2.b = t1.b and t2.c=t1.c;
eval $q;
eval explain $q;
set join_cache_level=3;
eval $q;
eval explain $q;
drop table t1,t2;
set join_cache_level=@save_join_cache_level;
--echo #
--echo # MDEV-21243: Join buffer: condition is checked in wrong place for range access
--echo #
@ -4090,5 +4114,6 @@ where
drop table t1,t2,t3;
--echo # End of 10.3 tests
# The following command must be the last one in the file
# The following command must be the last one in the file
set @@optimizer_switch=@save_optimizer_switch;

View file

@ -1,4 +1,3 @@
DROP TABLE IF EXISTS t1;
select variable_value from information_schema.global_status where variable_name="handler_read_key" into @global_read_key;
show columns from information_schema.client_statistics;
Field Type Null Key Default Extra
@ -235,3 +234,15 @@ select @@in_transaction;
0
drop table t1;
set @@global.general_log=@save_general_log;
#
# MDEV-25242 Server crashes in check_grant upon invoking function with userstat enabled
#
create function f() returns int return (select 1 from performance_schema.threads);
set global userstat= 1;
select f() from information_schema.table_statistics;
ERROR 21000: Subquery returns more than 1 row
set global userstat= 0;
drop function f;
#
# End of 10.2 tests
#

View file

@ -6,10 +6,6 @@
-- source include/have_innodb.inc
-- source include/have_log_bin.inc
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
select variable_value from information_schema.global_status where variable_name="handler_read_key" into @global_read_key;
show columns from information_schema.client_statistics;
show columns from information_schema.user_statistics;
@ -115,5 +111,18 @@ set @@autocommit=1;
select @@in_transaction;
drop table t1;
# Cleanup
set @@global.general_log=@save_general_log;
--echo #
--echo # MDEV-25242 Server crashes in check_grant upon invoking function with userstat enabled
--echo #
create function f() returns int return (select 1 from performance_schema.threads);
set global userstat= 1;
--error ER_SUBQUERY_NO_1_ROW
select f() from information_schema.table_statistics;
set global userstat= 0;
drop function f;
--echo #
--echo # End of 10.2 tests
--echo #

View file

@ -21,7 +21,7 @@ INSERT INTO t1 VALUES ('node2_committed_before');
INSERT INTO t1 VALUES ('node2_committed_before');
COMMIT;
--source suite/galera/include/galera_unload_provider.inc
--source suite/galera/include/galera_stop_replication.inc
--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
@ -53,7 +53,7 @@ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
--connection node_2
--source suite/galera/include/galera_load_provider.inc
--source suite/galera/include/galera_start_replication.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc

View file

@ -3,8 +3,9 @@ connection node_2;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1));
INSERT INTO t1 VALUES (1, 'a'), (2, 'a'), (3, 'a'), (4, 'a'), (5, 'a'),(6, 'a');
connection node_2;
SET SESSION wsrep_sync_wait=0;
Unloading wsrep provider ...
SET GLOBAL wsrep_provider = 'none';
SET GLOBAL wsrep_cluster_address = '';
connection node_1;
UPDATE t1 SET f2 = 'b' WHERE f1 > 1;
UPDATE t1 SET f2 = 'c' WHERE f1 > 2;

View file

@ -0,0 +1,73 @@
connection node_1;
reset master;
connection node_2;
reset master;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
CREATE TABLE t2 (id INT) ENGINE=InnoDB;
INSERT INTO t2 VALUES (1);
INSERT INTO t2 VALUES (1);
connection node_2;
SELECT COUNT(*) = 1 FROM t1;
COUNT(*) = 1
1
SELECT COUNT(*) = 2 FROM t2;
COUNT(*) = 2
1
connection node_1;
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
mysqld-bin.000001 # Gtid # # GTID #-#-#
mysqld-bin.000001 # Query # # use `test`; CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB
mysqld-bin.000001 # Gtid # # BEGIN GTID #-#-#
mysqld-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1)
mysqld-bin.000001 # Table_map # # table_id: # (test.t1)
mysqld-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
mysqld-bin.000001 # Xid # # COMMIT /* XID */
mysqld-bin.000001 # Gtid # # GTID #-#-#
mysqld-bin.000001 # Query # # use `test`; CREATE TABLE t2 (id INT) ENGINE=InnoDB
mysqld-bin.000001 # Gtid # # BEGIN GTID #-#-#
mysqld-bin.000001 # Annotate_rows # # INSERT INTO t2 VALUES (1)
mysqld-bin.000001 # Table_map # # table_id: # (test.t2)
mysqld-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
mysqld-bin.000001 # Xid # # COMMIT /* XID */
mysqld-bin.000001 # Gtid # # BEGIN GTID #-#-#
mysqld-bin.000001 # Annotate_rows # # INSERT INTO t2 VALUES (1)
mysqld-bin.000001 # Table_map # # table_id: # (test.t2)
mysqld-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
mysqld-bin.000001 # Xid # # COMMIT /* XID */
mysqld-bin.000001 # Gtid # # GTID #-#-#
mysqld-bin.000001 # Query # # use `test`; ALTER TABLE t1 ADD COLUMN f2 INTEGER
connection node_2;
SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
COUNT(*) = 2
1
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
mysqld-bin.000001 # Gtid # # GTID #-#-#
mysqld-bin.000001 # Query # # use `test`; CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB
mysqld-bin.000001 # Gtid # # BEGIN GTID #-#-#
mysqld-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1)
mysqld-bin.000001 # Table_map # # table_id: # (test.t1)
mysqld-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
mysqld-bin.000001 # Xid # # COMMIT /* XID */
mysqld-bin.000001 # Gtid # # GTID #-#-#
mysqld-bin.000001 # Query # # use `test`; CREATE TABLE t2 (id INT) ENGINE=InnoDB
mysqld-bin.000001 # Gtid # # BEGIN GTID #-#-#
mysqld-bin.000001 # Annotate_rows # # INSERT INTO t2 VALUES (1)
mysqld-bin.000001 # Table_map # # table_id: # (test.t2)
mysqld-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
mysqld-bin.000001 # Xid # # COMMIT /* XID */
mysqld-bin.000001 # Gtid # # BEGIN GTID #-#-#
mysqld-bin.000001 # Annotate_rows # # INSERT INTO t2 VALUES (1)
mysqld-bin.000001 # Table_map # # table_id: # (test.t2)
mysqld-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
mysqld-bin.000001 # Xid # # COMMIT /* XID */
mysqld-bin.000001 # Gtid # # GTID #-#-#
mysqld-bin.000001 # Query # # use `test`; ALTER TABLE t1 ADD COLUMN f2 INTEGER
DROP TABLE t1;
DROP TABLE t2;
#cleanup
connection node_1;
RESET MASTER;

View file

@ -4,4 +4,4 @@
wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.recover=yes;pc.ignore_sb=true'
[mysqld.2]
wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.recover=yes;pc.ignore_sb=true'
wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.recover=yes'

View file

@ -21,7 +21,8 @@ INSERT INTO t1 VALUES (1, 'a'), (2, 'a'), (3, 'a'), (4, 'a'), (5, 'a'),(6, 'a');
# Disconnect node #2
--connection node_2
--source suite/galera/include/galera_unload_provider.inc
SET SESSION wsrep_sync_wait=0;
--source suite/galera/include/galera_stop_replication.inc
--connection node_1
UPDATE t1 SET f2 = 'b' WHERE f1 > 1;
@ -45,7 +46,6 @@ UPDATE t1 SET f2 = 'c' WHERE f1 > 2;
--disable_query_log
# base_port setting is lost for some reason when unloading provider, so we need to restore it
--eval SET GLOBAL wsrep_provider_options= 'base_port=$NODE_GALERAPORT_2';
--eval SET GLOBAL wsrep_provider = '$wsrep_provider_orig';
# Make sure IST will block ...
--let $galera_sync_point = recv_IST_after_apply_trx
--source include/galera_set_sync_point.inc

View file

@ -0,0 +1,42 @@
--source include/galera_cluster.inc
--source include/force_restart.inc
--connection node_1
reset master;
--connection node_2
reset master;
#
# Test Galera with --log-bin --log-slave-updates .
# This way the actual MySQL binary log is used,
# rather than Galera's own implementation
#
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
CREATE TABLE t2 (id INT) ENGINE=InnoDB;
INSERT INTO t2 VALUES (1);
INSERT INTO t2 VALUES (1);
--connection node_2
SELECT COUNT(*) = 1 FROM t1;
SELECT COUNT(*) = 2 FROM t2;
--connection node_1
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
--let $MASTER_MYPORT=$NODE_MYPORT_1
--source include/show_binlog_events.inc
--connection node_2
SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
--let $MASTER_MYPORT=$NODE_MYPORT_2
--source include/show_binlog_events.inc
DROP TABLE t1;
DROP TABLE t2;
--echo #cleanup
--connection node_1
RESET MASTER;

View file

@ -1,42 +1 @@
--source include/galera_cluster.inc
--source include/force_restart.inc
--connection node_1
reset master;
--connection node_2
reset master;
#
# Test Galera with --log-bin --log-slave-updates .
# This way the actual MySQL binary log is used,
# rather than Galera's own implementation
#
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
CREATE TABLE t2 (id INT) ENGINE=InnoDB;
INSERT INTO t2 VALUES (1);
INSERT INTO t2 VALUES (1);
--connection node_2
SELECT COUNT(*) = 1 FROM t1;
SELECT COUNT(*) = 2 FROM t2;
--connection node_1
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
--let $MASTER_MYPORT=$NODE_MYPORT_1
--source include/show_binlog_events.inc
--connection node_2
SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1';
--let $MASTER_MYPORT=$NODE_MYPORT_2
--source include/show_binlog_events.inc
DROP TABLE t1;
DROP TABLE t2;
--echo #cleanup
--connection node_1
RESET MASTER;
--source galera_log_bin.inc

View file

@ -0,0 +1 @@
--log-slave-updates --log-bin

View file

@ -0,0 +1,15 @@
!include ../galera_2nodes.cnf
[mysqld]
wsrep_sst_method=mariabackup
wsrep_sst_auth="root:"
[mysqld.1]
wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=1;pc.ignore_sb=true'
[mysqld.2]
wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=1;pc.ignore_sb=true'
[sst]
transferfmt=@ENV.MTR_GALERA_TFMT
streamfmt=xbstream

View file

@ -0,0 +1,2 @@
--source include/have_mariabackup.inc
--source galera_log_bin.inc

View file

@ -10,7 +10,7 @@
--connection node_2
#--connection node_2
#--source suite/galera/include/galera_unload_provider.inc
#--source suite/galera/include/galera_stop_replication.inc
--echo Shutting down server ...
--source include/shutdown_mysqld.inc

View file

@ -37,7 +37,7 @@ SET GLOBAL wsrep_sst_method = 'mysqldump';
#
#--connection node_2
#--source suite/galera/include/galera_unload_provider.inc
#--source suite/galera/include/galera_stop_replication.inc
--echo Shutting down server ...
--source include/shutdown_mysqld.inc
@ -62,7 +62,7 @@ INSERT INTO t1 VALUES (1);
--let $start_mysqld_params = --wsrep_sst_auth=sst: --wsrep_sst_method=mysqldump --wsrep-sst-receive-address=[::1].1:$NODE_MYPORT_2
--source include/start_mysqld.inc
#--source suite/galera/include/galera_load_provider.inc
#--source suite/galera/include/galera_start_replication.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc

View file

@ -29,10 +29,10 @@ INSERT INTO t1 VALUES (01), (02), (03), (04), (05);
# Disconnect nodes #2 and #3
--connection node_2
--source suite/galera/include/galera_unload_provider.inc
--source suite/galera/include/galera_stop_replication.inc
--connection node_3
--source suite/galera/include/galera_unload_provider.inc
--source suite/galera/include/galera_stop_replication.inc
--connection node_1
--source include/wait_until_connected_again.inc

View file

@ -1,152 +0,0 @@
--echo #
--echo # Testing robustness against random compression failures
--echo #
--source include/not_embedded.inc
--source include/have_innodb.inc
--let $simulate_comp_failures_save = `SELECT @@innodb_simulate_comp_failures`
--disable_query_log
call mtr.add_suppression("InnoDB: Simulating a compression failure for table `test`\\.`t1`");
--enable_query_log
# create the table with compressed pages of size 8K.
CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255), KEY msg_i(msg)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
SHOW CREATE TABLE t1;
# percentage of compressions that will be forced to fail
SET GLOBAL innodb_simulate_comp_failures = 25;
--disable_query_log
--disable_result_log
let $num_inserts_ind = $num_inserts;
let $commit_iterations=50;
while ($num_inserts_ind)
{
let $repeat = `select floor(rand() * 10)`;
eval INSERT INTO t1(id, msg)
VALUES ($num_inserts_ind, REPEAT('abcdefghijklmnopqrstuvwxyz', $repeat));
dec $num_inserts_ind;
}
--enable_query_log
--enable_result_log
COMMIT;
SELECT COUNT(id) FROM t1;
--disable_query_log
--disable_result_log
# do random ops, making sure that some pages will get fragmented and reorganized.
let $num_ops_ind = $num_ops;
let $commit_count= $commit_iterations;
BEGIN;
while($num_ops_ind)
{
let $idx = `select floor(rand()*$num_inserts)`;
let $insert_or_update = `select floor(rand()*3)`;
let $repeat = `select floor(rand() * 9) + 1`;
let $msg = query_get_value(`select repeat('abcdefghijklmnopqrstuvwxyz', $repeat) as x`, x, 1);
let $single_or_multi = `select floor(rand()*10)`;
if ($insert_or_update)
{
let $cnt = query_get_value(SELECT COUNT(*) cnt FROM t1 WHERE id=$idx, cnt, 1);
if ($cnt)
{
let $update = `select floor(rand()*2)`;
if ($update)
{
if ($single_or_multi)
{
eval UPDATE t1 SET msg=\"$msg\" WHERE id=$idx;
}
if (!$single_or_multi)
{
eval UPDATE t1 SET msg=\"$msg\" WHERE id >= $idx - 100 AND id <= $idx + 100;
}
}
if (!$update)
{
if ($single_or_multi)
{
eval INSERT INTO t1(msg, id) VALUES (\"$msg\", $idx) ON DUPLICATE KEY UPDATE msg=VALUES(msg), id = VALUES(id);
}
if (!$single_or_multi)
{
let $diff = 200;
while ($diff)
{
eval INSERT INTO t1(msg, id) VALUES (\"$msg\", $idx + 100 - $diff) ON DUPLICATE KEY UPDATE msg=VALUES(msg), id=VALUES(id);
dec $diff;
}
}
}
}
if (!$cnt)
{
let $null_msg = `select floor(rand()*2)`;
if ($null_msg)
{
eval INSERT INTO t1(id,msg) VALUES ($idx, NULL);
}
if (!$null_msg)
{
eval INSERT INTO t1(id, msg) VALUES ($idx, \"$msg\");
}
}
}
if (!$insert_or_update)
{
if ($single_or_multi)
{
eval DELETE from t1 WHERE id=$idx;
}
if (!$single_or_multi)
{
eval DELETE from t1 WHERE id >= $idx - 100 AND id <= $idx + 100;
}
}
dec $commit_count;
if (!$commit_count)
{
let $commit_count= $commit_iterations;
COMMIT;
BEGIN;
}
dec $num_ops_ind;
}
COMMIT;
# final cleanup
DROP TABLE t1;
eval SET GLOBAL innodb_simulate_comp_failures = $simulate_comp_failures_save;
--enable_query_log

View file

@ -1,17 +0,0 @@
#
# Testing robustness against random compression failures
#
CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255), KEY msg_i(msg)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`msg` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `msg_i` (`msg`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8
SET GLOBAL innodb_simulate_comp_failures = 25;
COMMIT;
SELECT COUNT(id) FROM t1;
COUNT(id)
1500

View file

@ -1,17 +0,0 @@
#
# Testing robustness against random compression failures
#
CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255), KEY msg_i(msg)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`msg` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `msg_i` (`msg`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8
SET GLOBAL innodb_simulate_comp_failures = 25;
COMMIT;
SELECT COUNT(id) FROM t1;
COUNT(id)
1000

View file

@ -1,2 +0,0 @@
--innodb-file-per-table
--skip-innodb-doublewrite

View file

@ -1,9 +0,0 @@
--source include/big_test.inc
# test takes too long with valgrind
--source include/not_valgrind.inc
--source include/have_debug.inc
--let $num_inserts = 1500
--let $num_ops = 3500
--source suite/innodb/include/innodb_simulate_comp_failures.inc
# clean exit
--exit

View file

@ -1,2 +0,0 @@
--innodb-file-per-table

View file

@ -1,8 +0,0 @@
--source include/have_debug.inc
--source include/not_valgrind.inc
--let $num_inserts = 1000
--let $num_ops = 30
--source suite/innodb/include/innodb_simulate_comp_failures.inc
# clean exit
--exit

View file

@ -1,77 +0,0 @@
SET @start_global_value = @@global.innodb_simulate_comp_failures;
SELECT @start_global_value;
@start_global_value
0
Valid values are between 0 and 99
select @@global.innodb_simulate_comp_failures between 0 and 99;
@@global.innodb_simulate_comp_failures between 0 and 99
1
select @@global.innodb_simulate_comp_failures;
@@global.innodb_simulate_comp_failures
0
select @@session.innodb_simulate_comp_failures;
ERROR HY000: Variable 'innodb_simulate_comp_failures' is a GLOBAL variable
show global variables like 'innodb_simulate_comp_failures';
Variable_name Value
innodb_simulate_comp_failures 0
show session variables like 'innodb_simulate_comp_failures';
Variable_name Value
innodb_simulate_comp_failures 0
select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures';
VARIABLE_NAME VARIABLE_VALUE
INNODB_SIMULATE_COMP_FAILURES 0
select * from information_schema.session_variables where variable_name='innodb_simulate_comp_failures';
VARIABLE_NAME VARIABLE_VALUE
INNODB_SIMULATE_COMP_FAILURES 0
set global innodb_simulate_comp_failures=10;
select @@global.innodb_simulate_comp_failures;
@@global.innodb_simulate_comp_failures
10
select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures';
VARIABLE_NAME VARIABLE_VALUE
INNODB_SIMULATE_COMP_FAILURES 10
select * from information_schema.session_variables where variable_name='innodb_simulate_comp_failures';
VARIABLE_NAME VARIABLE_VALUE
INNODB_SIMULATE_COMP_FAILURES 10
set session innodb_simulate_comp_failures=1;
ERROR HY000: Variable 'innodb_simulate_comp_failures' is a GLOBAL variable and should be set with SET GLOBAL
set global innodb_simulate_comp_failures=1.1;
ERROR 42000: Incorrect argument type to variable 'innodb_simulate_comp_failures'
set global innodb_simulate_comp_failures=1e1;
ERROR 42000: Incorrect argument type to variable 'innodb_simulate_comp_failures'
set global innodb_simulate_comp_failures="foo";
ERROR 42000: Incorrect argument type to variable 'innodb_simulate_comp_failures'
set global innodb_simulate_comp_failures=-7;
Warnings:
Warning 1292 Truncated incorrect innodb_simulate_comp_failures value: '-7'
select @@global.innodb_simulate_comp_failures;
@@global.innodb_simulate_comp_failures
0
select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures';
VARIABLE_NAME VARIABLE_VALUE
INNODB_SIMULATE_COMP_FAILURES 0
set global innodb_simulate_comp_failures=106;
Warnings:
Warning 1292 Truncated incorrect innodb_simulate_comp_failures value: '106'
select @@global.innodb_simulate_comp_failures;
@@global.innodb_simulate_comp_failures
99
select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures';
VARIABLE_NAME VARIABLE_VALUE
INNODB_SIMULATE_COMP_FAILURES 99
set global innodb_simulate_comp_failures=0;
select @@global.innodb_simulate_comp_failures;
@@global.innodb_simulate_comp_failures
0
set global innodb_simulate_comp_failures=99;
select @@global.innodb_simulate_comp_failures;
@@global.innodb_simulate_comp_failures
99
set global innodb_simulate_comp_failures=DEFAULT;
select @@global.innodb_simulate_comp_failures;
@@global.innodb_simulate_comp_failures
0
SET @@global.innodb_simulate_comp_failures = @start_global_value;
SELECT @@global.innodb_simulate_comp_failures;
@@global.innodb_simulate_comp_failures
0

View file

@ -1773,18 +1773,6 @@ NUMERIC_BLOCK_SIZE 0
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME INNODB_SIMULATE_COMP_FAILURES
SESSION_VALUE NULL
DEFAULT_VALUE 0
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT Simulate compression failures.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 99
NUMERIC_BLOCK_SIZE 0
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT NONE
VARIABLE_NAME INNODB_SORT_BUFFER_SIZE
SESSION_VALUE NULL
DEFAULT_VALUE 1048576
@ -2138,7 +2126,7 @@ SESSION_VALUE NULL
DEFAULT_VALUE ON
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BOOLEAN
VARIABLE_COMMENT Enable atomic writes, instead of using the doublewrite buffer, for files on devices that supports atomic writes. To use this option one must use innodb_file_per_table=1, innodb_flush_method=O_DIRECT. This option only works on Linux with either FusionIO cards using the directFS filesystem or with Shannon cards using any file system.
VARIABLE_COMMENT Enable atomic writes, instead of using the doublewrite buffer, for files on devices that supports atomic writes. This option only works on Linux with either FusionIO cards using the directFS filesystem or with Shannon cards using any file system.
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL

View file

@ -1,65 +0,0 @@
--source include/have_innodb.inc
--source include/have_debug.inc
SET @start_global_value = @@global.innodb_simulate_comp_failures;
SELECT @start_global_value;
#
# exists as global only
#
--echo Valid values are between 0 and 99
select @@global.innodb_simulate_comp_failures between 0 and 99;
select @@global.innodb_simulate_comp_failures;
--error ER_INCORRECT_GLOBAL_LOCAL_VAR
select @@session.innodb_simulate_comp_failures;
show global variables like 'innodb_simulate_comp_failures';
show session variables like 'innodb_simulate_comp_failures';
select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures';
select * from information_schema.session_variables where variable_name='innodb_simulate_comp_failures';
#
# show that it's writable
#
set global innodb_simulate_comp_failures=10;
select @@global.innodb_simulate_comp_failures;
select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures';
select * from information_schema.session_variables where variable_name='innodb_simulate_comp_failures';
--error ER_GLOBAL_VARIABLE
set session innodb_simulate_comp_failures=1;
#
# incorrect types
#
--error ER_WRONG_TYPE_FOR_VAR
set global innodb_simulate_comp_failures=1.1;
--error ER_WRONG_TYPE_FOR_VAR
set global innodb_simulate_comp_failures=1e1;
--error ER_WRONG_TYPE_FOR_VAR
set global innodb_simulate_comp_failures="foo";
set global innodb_simulate_comp_failures=-7;
select @@global.innodb_simulate_comp_failures;
select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures';
set global innodb_simulate_comp_failures=106;
select @@global.innodb_simulate_comp_failures;
select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures';
#
# min/max/DEFAULT values
#
set global innodb_simulate_comp_failures=0;
select @@global.innodb_simulate_comp_failures;
set global innodb_simulate_comp_failures=99;
select @@global.innodb_simulate_comp_failures;
set global innodb_simulate_comp_failures=DEFAULT;
select @@global.innodb_simulate_comp_failures;
SET @@global.innodb_simulate_comp_failures = @start_global_value;
SELECT @@global.innodb_simulate_comp_failures;

View file

@ -4,10 +4,11 @@ INCLUDE (CheckFunctionExists)
CHECK_INCLUDE_FILES (security/pam_ext.h HAVE_PAM_EXT_H)
CHECK_INCLUDE_FILES (security/pam_appl.h HAVE_PAM_APPL_H)
CHECK_FUNCTION_EXISTS (strndup HAVE_STRNDUP)
CHECK_FUNCTION_EXISTS (getgrouplist HAVE_GETGROUPLIST)
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR})
# Check whether getgrouplist uses git_t for second and third arguments.
# Check whether getgrouplist uses gtid_t for second and third arguments.
SET(CMAKE_REQUIRED_FLAGS -Werror)
CHECK_C_SOURCE_COMPILES(
"
@ -29,7 +30,7 @@ SET(CMAKE_REQUIRED_LIBRARIES pam)
CHECK_FUNCTION_EXISTS(pam_syslog HAVE_PAM_SYSLOG)
SET(CMAKE_REQUIRED_LIBRARIES)
IF(HAVE_PAM_APPL_H)
IF(HAVE_PAM_APPL_H AND HAVE_GETGROUPLIST)
FIND_LIBRARY(PAM_LIBRARY pam) # for srpm build-depends detection
MYSQL_ADD_PLUGIN(auth_pam auth_pam.c LINK_LIBRARIES pam MODULE_ONLY)
@ -43,7 +44,7 @@ IF(HAVE_PAM_APPL_H)
SET(CPACK_RPM_server_USER_FILELIST ${CPACK_RPM_server_USER_FILELIST} "%config(noreplace) ${INSTALL_PAMDATADIR}/*" PARENT_SCOPE)
ENDIF()
ENDIF()
ENDIF(HAVE_PAM_APPL_H)
ENDIF()
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/config.h.cmake
${CMAKE_CURRENT_BINARY_DIR}/config_auth_pam.h)

View file

@ -33,8 +33,7 @@ static int table_stats_fill(THD *thd, TABLE_LIST *tables, COND *cond)
tmp_table.grant.privilege= 0;
if (check_access(thd, SELECT_ACL, tmp_table.db.str,
&tmp_table.grant.privilege, NULL, 0, 1) ||
check_grant(thd, SELECT_ACL, &tmp_table, 1, UINT_MAX,
1))
check_grant(thd, SELECT_ACL, &tmp_table, 1, 1, 1))
continue;
table->field[0]->store(table_stats->table, schema_length,

View file

@ -212,6 +212,9 @@ case "$1" in
"$option" != "--port" && \
"$option" != "--socket" ]]; then
value=${1#*=}
if [ "$value" == "$1" ]; then
value=""
fi
case "$option" in
'--innodb-data-home-dir')
if [ -z "$INNODB_DATA_HOME_DIR_ARG" ]; then

View file

@ -1200,7 +1200,7 @@ bool JOIN_CACHE::check_emb_key_usage()
Item *item= ref->items[i]->real_item();
Field *fld= ((Item_field *) item)->field;
CACHE_FIELD *init_copy= field_descr+flag_fields+i;
for (j= i, copy= init_copy; i < local_key_arg_fields; i++, copy++)
for (j= i, copy= init_copy; j < local_key_arg_fields; j++, copy++)
{
if (fld->eq(copy->field))
{

View file

@ -617,8 +617,12 @@ Next alarm time: %lu\n",
(ulong)alarm_info.next_alarm_time);
#endif
display_table_locks();
#ifdef HAVE_MALLINFO
struct mallinfo info= mallinfo();
#if defined(HAVE_MALLINFO2)
struct mallinfo2 info = mallinfo2();
#elif defined(HAVE_MALLINFO)
struct mallinfo info= mallinfo();
#endif
#if defined(HAVE_MALLINFO) || defined(HAVE_MALLINFO2)
char llbuff[10][22];
printf("\nMemory status:\n\
Non-mmapped space allocated from system: %s\n\

View file

@ -212,7 +212,6 @@ static char* innodb_large_prefix;
stopword table to be used */
static char* innobase_server_stopword_table;
static my_bool innobase_use_atomic_writes;
static my_bool innobase_use_checksums;
static my_bool innobase_locks_unsafe_for_binlog;
static my_bool innobase_rollback_on_timeout;
@ -4110,21 +4109,16 @@ static int innodb_init_params()
innobase_commit_concurrency_init_default();
srv_use_atomic_writes
= innobase_use_atomic_writes && my_may_have_atomic_write;
if (srv_use_atomic_writes && !srv_file_per_table)
{
fprintf(stderr, "InnoDB: Disabling atomic_writes as file_per_table is not used.\n");
srv_use_atomic_writes= 0;
}
if (innodb_idle_flush_pct != 100) {
ib::warn() << deprecated_idle_flush_pct;
}
if (srv_use_atomic_writes) {
fprintf(stderr, "InnoDB: using atomic writes.\n");
#ifndef _WIN32
if (srv_use_atomic_writes && my_may_have_atomic_write) {
/*
Force O_DIRECT on Unixes (on Windows writes are always
unbuffered)
*/
#ifndef _WIN32
switch (innodb_flush_method) {
case SRV_O_DIRECT:
case SRV_O_DIRECT_NO_FSYNC:
@ -4133,8 +4127,8 @@ static int innodb_init_params()
innodb_flush_method = SRV_O_DIRECT;
fprintf(stderr, "InnoDB: using O_DIRECT due to atomic writes.\n");
}
#endif
}
#endif
if (srv_read_only_mode) {
ib::info() << "Started in read only mode";
@ -19244,12 +19238,10 @@ static MYSQL_SYSVAR_BOOL(doublewrite, srv_use_doublewrite_buf,
" Disable with --skip-innodb-doublewrite.",
NULL, NULL, TRUE);
static MYSQL_SYSVAR_BOOL(use_atomic_writes, innobase_use_atomic_writes,
static MYSQL_SYSVAR_BOOL(use_atomic_writes, srv_use_atomic_writes,
PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
"Enable atomic writes, instead of using the doublewrite buffer, for files "
"on devices that supports atomic writes. "
"To use this option one must use "
"innodb_file_per_table=1, innodb_flush_method=O_DIRECT. "
"This option only works on Linux with either FusionIO cards using "
"the directFS filesystem or with Shannon cards using any file system.",
NULL, NULL, TRUE);
@ -20259,11 +20251,6 @@ static MYSQL_SYSVAR_BOOL(master_thread_disabled_debug,
PLUGIN_VAR_OPCMDARG,
"Disable master thread",
NULL, srv_master_thread_disabled_debug_update, FALSE);
static MYSQL_SYSVAR_UINT(simulate_comp_failures, srv_simulate_comp_failures,
PLUGIN_VAR_NOCMDARG,
"Simulate compression failures.",
NULL, NULL, 0, 0, 99, 0);
#endif /* UNIV_DEBUG */
static MYSQL_SYSVAR_BOOL(force_primary_key,
@ -20580,7 +20567,6 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(compression_pad_pct_max),
MYSQL_SYSVAR(default_row_format),
#ifdef UNIV_DEBUG
MYSQL_SYSVAR(simulate_comp_failures),
MYSQL_SYSVAR(trx_rseg_n_slots_debug),
MYSQL_SYSVAR(limit_optimistic_insert_debug),
MYSQL_SYSVAR(trx_purge_view_update_only_debug),

View file

@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2013, 2020, MariaDB Corporation.
Copyright (c) 2013, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -46,7 +46,7 @@ struct ha_table_option_struct
uint atomic_writes; /*!< Use atomic writes for this
table if this options is ON or
in DEFAULT if
srv_use_atomic_writes=1.
innodb_use_atomic_writes.
Atomic writes are not used if
value OFF.*/
uint encryption; /*!< DEFAULT, ON, OFF */

View file

@ -596,9 +596,6 @@ extern struct export_var_t export_vars;
/** Global counters */
extern srv_stats_t srv_stats;
/** Simulate compression failures. */
extern uint srv_simulate_comp_failures;
/** Fatal semaphore wait threshold = maximum number of seconds
that semaphore times out in InnoDB */
#define DEFAULT_SRV_FATAL_SEMAPHORE_TIMEOUT 600

View file

@ -1370,33 +1370,6 @@ page_zip_compress(
MONITOR_INC(MONITOR_PAGE_COMPRESS);
/* Simulate a compression failure with a probability determined by
innodb_simulate_comp_failures, only if the page has 2 or more
records. */
if (srv_simulate_comp_failures
&& !dict_index_is_ibuf(index)
&& page_get_n_recs(page) >= 2
&& ((ulint)(rand() % 100) < srv_simulate_comp_failures)
&& strcmp(index->table->name.m_name, "IBUF_DUMMY")) {
#ifdef UNIV_DEBUG
ib::error()
<< "Simulating a compression failure"
<< " for table " << index->table->name
<< " index "
<< index->name()
<< " page "
<< page_get_page_no(page)
<< "("
<< (page_is_leaf(page) ? "leaf" : "non-leaf")
<< ")";
#endif
goto err_exit;
}
heap = mem_heap_create(page_zip_get_size(page_zip)
+ n_fields * (2 + sizeof(ulint))
+ REC_OFFS_HEADER_SIZE

View file

@ -4862,10 +4862,6 @@ wait_again:
buf, i + 1, n_indexes);
}
DBUG_EXECUTE_IF(
"ib_merge_wait_after_sort",
os_thread_sleep(20000000);); /* 20 sec */
if (error == DB_SUCCESS) {
BtrBulk btr_bulk(sort_idx, trx,
trx->get_flush_observer());

View file

@ -126,25 +126,23 @@ NOTE that since we do not hold dict_operation_lock when leaving the
function, it may be that the referencing table has been dropped when
we leave this function: this function is only for heuristic use!
@return TRUE if referenced */
@return true if referenced */
static
ibool
bool
row_upd_index_is_referenced(
/*========================*/
dict_index_t* index, /*!< in: index */
trx_t* trx) /*!< in: transaction */
{
dict_table_t* table = index->table;
ibool froze_data_dict = FALSE;
ibool is_referenced = FALSE;
if (table->referenced_set.empty()) {
return(FALSE);
return false;
}
if (trx->dict_operation_lock_mode == 0) {
const bool froze_data_dict = !trx->dict_operation_lock_mode;
if (froze_data_dict) {
row_mysql_freeze_data_dictionary(trx);
froze_data_dict = TRUE;
}
dict_foreign_set::iterator it
@ -152,13 +150,13 @@ row_upd_index_is_referenced(
table->referenced_set.end(),
dict_foreign_with_index(index));
is_referenced = (it != table->referenced_set.end());
const bool is_referenced = (it != table->referenced_set.end());
if (froze_data_dict) {
row_mysql_unfreeze_data_dictionary(trx);
}
return(is_referenced);
return is_referenced;
}
#ifdef WITH_WSREP
@ -2281,7 +2279,6 @@ row_upd_sec_index_entry(
dtuple_t* entry;
dict_index_t* index;
btr_cur_t* btr_cur;
ibool referenced;
dberr_t err = DB_SUCCESS;
trx_t* trx = thr_get_trx(thr);
ulint mode;
@ -2292,7 +2289,7 @@ row_upd_sec_index_entry(
index = node->index;
referenced = row_upd_index_is_referenced(index, trx);
const bool referenced = row_upd_index_is_referenced(index, trx);
#ifdef WITH_WSREP
bool foreign = wsrep_row_upd_index_is_foreign(index, trx);
#endif /* WITH_WSREP */
@ -2693,12 +2690,13 @@ row_upd_clust_rec_by_insert(
upd_node_t* node, /*!< in/out: row update node */
dict_index_t* index, /*!< in: clustered index of the record */
que_thr_t* thr, /*!< in: query thread */
ibool referenced,/*!< in: TRUE if index may be referenced in
bool referenced,/*!< in: whether index may be referenced in
a foreign key constraint */
#ifdef WITH_WSREP
bool foreign,/*!< in: whether this is a foreign key */
#endif
mtr_t* mtr) /*!< in/out: mtr; gets committed here */
mtr_t* mtr) /*!< in/out: mini-transaction,
may be committed and restarted */
{
mem_heap_t* heap;
btr_pcur_t* pcur;
@ -2764,10 +2762,7 @@ row_upd_clust_rec_by_insert(
btr_cur_get_block(btr_cur), rec, index, offsets,
thr, node->row, mtr);
if (err != DB_SUCCESS) {
err_exit:
mtr_commit(mtr);
mem_heap_free(heap);
return(err);
goto err_exit;
}
/* If the the new row inherits externally stored
@ -2826,14 +2821,14 @@ check_fk:
}
}
mtr_commit(mtr);
mtr->commit();
mtr->start();
node->state = UPD_NODE_INSERT_CLUSTERED;
err = row_ins_clust_index_entry(index, entry, thr,
dtuple_get_n_ext(entry));
node->state = UPD_NODE_INSERT_CLUSTERED;
err_exit:
mem_heap_free(heap);
return(err);
}
@ -2853,7 +2848,8 @@ row_upd_clust_rec(
mem_heap_t** offsets_heap,
/*!< in/out: memory heap, can be emptied */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr) /*!< in: mtr; gets committed here */
mtr_t* mtr) /*!< in,out: mini-transaction; may be
committed and restarted here */
{
mem_heap_t* heap = NULL;
big_rec_t* big_rec = NULL;
@ -2899,16 +2895,15 @@ row_upd_clust_rec(
goto success;
}
mtr_commit(mtr);
if (buf_LRU_buf_pool_running_out()) {
err = DB_LOCK_TABLE_FULL;
goto func_exit;
}
/* We may have to modify the tree structure: do a pessimistic descent
down the index tree */
mtr->commit();
mtr->start();
if (index->table->is_temporary()) {
@ -2958,7 +2953,6 @@ success:
}
}
mtr_commit(mtr);
func_exit:
if (heap) {
mem_heap_free(heap);
@ -2983,17 +2977,17 @@ row_upd_del_mark_clust_rec(
rec_offs* offsets,/*!< in/out: rec_get_offsets() for the
record under the cursor */
que_thr_t* thr, /*!< in: query thread */
ibool referenced,
/*!< in: TRUE if index may be referenced in
bool referenced,
/*!< in: whether index may be referenced in
a foreign key constraint */
#ifdef WITH_WSREP
bool foreign,/*!< in: whether this is a foreign key */
#endif
mtr_t* mtr) /*!< in: mtr; gets committed here */
mtr_t* mtr) /*!< in,out: mini-transaction;
will be committed and restarted */
{
btr_pcur_t* pcur;
btr_cur_t* btr_cur;
dberr_t err;
rec_t* rec;
trx_t* trx = thr_get_trx(thr);
@ -3009,8 +3003,7 @@ row_upd_del_mark_clust_rec(
if (!row_upd_store_row(node, trx->mysql_thd,
thr->prebuilt && thr->prebuilt->table == node->table
? thr->prebuilt->m_mysql_table : NULL)) {
err = DB_COMPUTE_VALUE_FAILED;
return err;
return DB_COMPUTE_VALUE_FAILED;
}
/* Mark the clustered index record deleted; we do not have to check
@ -3018,7 +3011,7 @@ row_upd_del_mark_clust_rec(
rec = btr_cur_get_rec(btr_cur);
err = btr_cur_del_mark_set_clust_rec(
dberr_t err = btr_cur_del_mark_set_clust_rec(
btr_cur_get_block(btr_cur), rec,
index, offsets, thr, node->row, mtr);
@ -3055,8 +3048,6 @@ row_upd_del_mark_clust_rec(
#endif /* WITH_WSREP */
}
mtr_commit(mtr);
return(err);
}
@ -3073,14 +3064,12 @@ row_upd_clust_step(
{
dict_index_t* index;
btr_pcur_t* pcur;
ibool success;
dberr_t err;
mtr_t mtr;
rec_t* rec;
mem_heap_t* heap = NULL;
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs* offsets;
ibool referenced;
ulint flags;
trx_t* trx = thr_get_trx(thr);
@ -3088,8 +3077,7 @@ row_upd_clust_step(
index = dict_table_get_first_index(node->table);
referenced = row_upd_index_is_referenced(index, trx);
const bool referenced = row_upd_index_is_referenced(index, trx);
#ifdef WITH_WSREP
const bool foreign = wsrep_row_upd_index_is_foreign(index, trx);
#endif
@ -3135,14 +3123,9 @@ row_upd_clust_step(
mode = BTR_MODIFY_LEAF;
}
success = btr_pcur_restore_position(mode, pcur, &mtr);
if (!success) {
if (!btr_pcur_restore_position(mode, pcur, &mtr)) {
err = DB_RECORD_NOT_FOUND;
mtr_commit(&mtr);
return(err);
goto exit_func;
}
/* If this is a row in SYS_INDEXES table of the data dictionary,
@ -3162,14 +3145,9 @@ row_upd_clust_step(
mtr.start();
index->set_modified(mtr);
success = btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur,
&mtr);
if (!success) {
if (!btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur, &mtr)) {
err = DB_ERROR;
mtr.commit();
return(err);
goto exit_func;
}
}
@ -3182,7 +3160,6 @@ row_upd_clust_step(
0, btr_pcur_get_block(pcur),
rec, index, offsets, thr);
if (err != DB_SUCCESS) {
mtr.commit();
goto exit_func;
}
}
@ -3193,8 +3170,6 @@ row_upd_clust_step(
btr_pcur_get_block(pcur),
page_rec_get_heap_no(rec)));
/* NOTE: the following function calls will also commit mtr */
if (node->is_delete == PLAIN_DELETE) {
err = row_upd_del_mark_clust_rec(
node, index, offsets, thr, referenced,
@ -3202,13 +3177,7 @@ row_upd_clust_step(
foreign,
#endif
&mtr);
if (err == DB_SUCCESS) {
node->state = UPD_NODE_UPDATE_ALL_SEC;
node->index = dict_table_get_next_index(index);
}
goto exit_func;
goto all_done;
}
/* If the update is made for MySQL, we already have the update vector
@ -3223,14 +3192,13 @@ row_upd_clust_step(
}
if (!node->is_delete && node->cmpl_info & UPD_NODE_NO_ORD_CHANGE) {
err = row_upd_clust_rec(
flags, node, index, offsets, &heap, thr, &mtr);
goto exit_func;
}
if(!row_upd_store_row(node, trx->mysql_thd,
thr->prebuilt ? thr->prebuilt->m_mysql_table : NULL)) {
if (!row_upd_store_row(node, trx->mysql_thd, thr->prebuilt
? thr->prebuilt->m_mysql_table : NULL)) {
err = DB_COMPUTE_VALUE_FAILED;
goto exit_func;
}
@ -3255,34 +3223,31 @@ row_upd_clust_step(
foreign,
#endif
&mtr);
if (err != DB_SUCCESS) {
goto exit_func;
all_done:
if (err == DB_SUCCESS) {
node->state = UPD_NODE_UPDATE_ALL_SEC;
success:
node->index = dict_table_get_next_index(index);
}
node->state = UPD_NODE_UPDATE_ALL_SEC;
} else {
err = row_upd_clust_rec(
flags, node, index, offsets, &heap, thr, &mtr);
if (err != DB_SUCCESS) {
goto exit_func;
if (err == DB_SUCCESS) {
ut_ad(node->is_delete != PLAIN_DELETE);
node->state = node->is_delete
? UPD_NODE_UPDATE_ALL_SEC
: UPD_NODE_UPDATE_SOME_SEC;
goto success;
}
ut_ad(node->is_delete != PLAIN_DELETE);
node->state = node->is_delete ?
UPD_NODE_UPDATE_ALL_SEC :
UPD_NODE_UPDATE_SOME_SEC;
}
node->index = dict_table_get_next_index(index);
exit_func:
if (heap) {
mtr.commit();
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
return(err);
return err;
}
/***********************************************************//**

View file

@ -489,9 +489,6 @@ current_time % 5 != 0. */
#endif /* MEM_PERIODIC_CHECK */
# define SRV_MASTER_DICT_LRU_INTERVAL (47)
/** Simulate compression failures. */
UNIV_INTERN uint srv_simulate_comp_failures;
/** Buffer pool dump status frequence in percentages */
UNIV_INTERN ulong srv_buf_dump_status_frequency;

View file

@ -97,13 +97,14 @@ static void ssl_set_sys_error(int ssl_error)
@param vio VIO object representing a SSL connection.
@param ret Value returned by a SSL I/O function.
@param event[out] The type of I/O event to wait/retry.
@param should_wait[out] whether to wait for 'event'
@return Whether a SSL I/O operation should be deferred.
@retval TRUE Temporary failure, retry operation.
@retval FALSE Indeterminate failure.
*/
static my_bool ssl_should_retry(Vio *vio, int ret, enum enum_vio_io_event *event)
static my_bool ssl_should_retry(Vio *vio, int ret, enum enum_vio_io_event *event, my_bool *should_wait)
{
int ssl_error;
SSL *ssl= vio->ssl_arg;
@ -120,6 +121,7 @@ static my_bool ssl_should_retry(Vio *vio, int ret, enum enum_vio_io_event *event
ERR_GET_REASON(err) == X509_R_CERT_ALREADY_IN_HASH_TABLE)
{
ERR_clear_error();
*should_wait= FALSE;
return TRUE;
}
#endif
@ -132,12 +134,15 @@ static my_bool ssl_should_retry(Vio *vio, int ret, enum enum_vio_io_event *event
{
case SSL_ERROR_WANT_READ:
*event= VIO_IO_EVENT_READ;
*should_wait= TRUE;
break;
case SSL_ERROR_WANT_WRITE:
*event= VIO_IO_EVENT_WRITE;
*should_wait= TRUE;
break;
default:
should_retry= FALSE;
*should_wait= FALSE;
ssl_set_sys_error(ssl_error);
#ifndef HAVE_YASSL
ERR_clear_error();
@ -149,6 +154,32 @@ static my_bool ssl_should_retry(Vio *vio, int ret, enum enum_vio_io_event *event
}
/**
Handle SSL io error.
@param[in] vio Vio
@param[in] ret return from the failed IO operation
@return 0 - should retry last read/write operation
1 - some error has occured
*/
static int handle_ssl_io_error(Vio *vio, int ret)
{
enum enum_vio_io_event event;
my_bool should_wait;
/* Process the SSL I/O error. */
if (!ssl_should_retry(vio, ret, &event, &should_wait))
return 1;
if (!should_wait)
return 1;
/* Attempt to wait for an I/O event. */
return vio_socket_io_wait(vio, event);
}
size_t vio_ssl_read(Vio *vio, uchar *buf, size_t size)
{
int ret;
@ -164,13 +195,7 @@ size_t vio_ssl_read(Vio *vio, uchar *buf, size_t size)
{
while ((ret= SSL_read(ssl, buf, (int)size)) < 0)
{
enum enum_vio_io_event event;
/* Process the SSL I/O error. */
if (!ssl_should_retry(vio, ret, &event))
break;
/* Attempt to wait for an I/O event. */
if (vio_socket_io_wait(vio, event))
if (handle_ssl_io_error(vio,ret))
break;
}
}
@ -197,14 +222,7 @@ size_t vio_ssl_write(Vio *vio, const uchar *buf, size_t size)
{
while ((ret= SSL_write(ssl, buf, (int)size)) < 0)
{
enum enum_vio_io_event event;
/* Process the SSL I/O error. */
if (!ssl_should_retry(vio, ret, &event))
break;
/* Attempt to wait for an I/O event. */
if (vio_socket_io_wait(vio, event))
if (handle_ssl_io_error(vio,ret))
break;
}
}
@ -311,14 +329,7 @@ static int ssl_handshake_loop(Vio *vio, SSL *ssl, ssl_handshake_func_t func)
/* Initiate the SSL handshake. */
while ((ret= func(ssl)) < 1)
{
enum enum_vio_io_event event;
/* Process the SSL I/O error. */
if (!ssl_should_retry(vio, ret, &event))
break;
/* Wait for I/O so that the handshake can proceed. */
if (vio_socket_io_wait(vio, event))
if (handle_ssl_io_error(vio,ret))
break;
}