Merge branch '10.4' into 10.5

This commit is contained in:
Oleksandr Byelkin 2023-07-26 13:54:59 +02:00
commit 7564be1352
111 changed files with 3375 additions and 1511 deletions

View file

@ -1193,11 +1193,15 @@ print_summary(
fprintf(fil_out, "index_id\t#pages\t\t#leaf_pages\t#recs_per_page"
"\t#bytes_per_page\n");
for (std::map<unsigned long long, per_index_stats>::const_iterator it = index_ids.begin();
it != index_ids.end(); it++) {
const per_index_stats& index = it->second;
for (const auto &ids : index_ids) {
const per_index_stats& index = ids.second;
if (!index.pages) {
DBUG_ASSERT(index.free_pages);
continue;
}
fprintf(fil_out, "%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
it->first, index.pages, index.leaf_pages,
ids.first, index.pages, index.leaf_pages,
index.total_n_recs / index.pages,
index.total_data_bytes / index.pages);
}

View file

@ -147,7 +147,9 @@ IF(WOLFSSL_X86_64_BUILD)
LIST(APPEND WOLFCRYPT_SOURCES ${WOLFCRYPT_SRCDIR}/cpuid.c)
IF(MSVC)
SET(WOLFSSL_AESNI 1)
LIST(APPEND WOLFCRYPT_SOURCES ${WOLFCRYPT_SRCDIR}/aes_asm.asm)
LIST(APPEND WOLFCRYPT_SOURCES
${WOLFCRYPT_SRCDIR}/aes_asm.asm
${WOLFCRYPT_SRCDIR}/aes_gcm_asm.asm)
IF(CMAKE_C_COMPILER_ID MATCHES Clang)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -maes -msse4.2 -mpclmul -mrdrnd -mrdseed")
ENDIF()

@ -1 +1 @@
Subproject commit 4fbd4fd36a21efd9d1a7e17aba390e91c78693b1
Subproject commit 3b3c175af0e993ffaae251871421e206cc41963f

View file

@ -250,11 +250,30 @@ static inline void lex_string_set3(LEX_CSTRING *lex_str, const char *c_str,
static inline int safe_strcpy(char *dst, size_t dst_size, const char *src)
{
DBUG_ASSERT(dst_size > 0);
/* Note, strncpy will zerofill end of dst if src shorter than dst_size */
/* 1) IF there is a 0 byte in the first dst_size bytes of src, strncpy will
* 0-terminate dst, and pad dst with additional 0 bytes out to dst_size.
*
* 2) IF there is no 0 byte in the first dst_size bytes of src, strncpy will
* copy dst_size bytes, and the final byte won't be 0.
*
* In GCC 8+, the `-Wstringop-truncation` warning will object to strncpy()
* being used in this way, so we need to disable this warning for this
* single statement.
*/
#if defined(__GNUC__) && __GNUC__ >= 8
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstringop-truncation"
#endif
strncpy(dst, src, dst_size);
#if defined(__GNUC__) && __GNUC__ >= 8
#pragma GCC diagnostic pop
#endif
if (dst[dst_size-1])
{
/* Ensure string is zero terminated */
/* Only possible in case (2), meaning src was truncated. */
dst[dst_size-1]= 0;
return 1;
}

View file

@ -446,6 +446,7 @@ enum ha_base_keytype {
#define HA_ERR_CRASHED 126 /* Indexfile is crashed */
#define HA_ERR_WRONG_IN_RECORD 127 /* Record-file is crashed */
#define HA_ERR_OUT_OF_MEM 128 /* Out of memory */
#define HA_ERR_RETRY_INIT 129 /* Initialization failed and should be retried */
#define HA_ERR_NOT_A_TABLE 130 /* not a MYI file - no signature */
#define HA_ERR_WRONG_COMMAND 131 /* Command not supported */
#define HA_ERR_OLD_FILE 132 /* old databasfile */

View file

@ -281,7 +281,7 @@ enum enum_indicator_type
#define CLIENT_DEPRECATE_EOF (1ULL << 24)
#define CLIENT_PROGRESS_OBSOLETE (1ULL << 29)
#define CLIENT_SSL_VERIFY_SERVER_CERT (1ULL << 30)
#define CLIENT_SSL_VERIFY_SERVER_CERT_OBSOLETE (1ULL << 30)
/*
It used to be that if mysql_real_connect() failed, it would delete any
options set by the client, unless the CLIENT_REMEMBER_OPTIONS flag was
@ -334,7 +334,6 @@ enum enum_indicator_type
CLIENT_MULTI_STATEMENTS | \
CLIENT_MULTI_RESULTS | \
CLIENT_PS_MULTI_RESULTS | \
CLIENT_SSL_VERIFY_SERVER_CERT | \
CLIENT_REMEMBER_OPTIONS | \
MARIADB_CLIENT_PROGRESS | \
CLIENT_PLUGIN_AUTH | \
@ -352,9 +351,8 @@ enum enum_indicator_type
If any of the optional flags is supported by the build it will be switched
on before sending to the client during the connection handshake.
*/
#define CLIENT_BASIC_FLAGS (((CLIENT_ALL_FLAGS & ~CLIENT_SSL) \
& ~CLIENT_COMPRESS) \
& ~CLIENT_SSL_VERIFY_SERVER_CERT)
#define CLIENT_BASIC_FLAGS ((CLIENT_ALL_FLAGS & ~CLIENT_SSL) \
& ~CLIENT_COMPRESS)
enum mariadb_field_attr_t
{

View file

@ -44,6 +44,7 @@ struct st_mysql_options_extention {
struct mysql_async_context *async_context;
HASH connection_attributes;
size_t connection_attributes_length;
my_bool tls_verify_server_cert;
};
typedef struct st_mysql_methods

@ -1 +1 @@
Subproject commit d543bed61ba9a117e95764dd1429b21c3e0579d1
Subproject commit 3393fe35d378744e12636766931cf5109cc6c2e5

View file

@ -5799,4 +5799,52 @@ a a
9 9
10 10
drop table t1;
#
# MDEV-20010 Equal on two RANK window functions create wrong result
#
create table t1 (a int, b int) engine= innodb;
insert into t1 values (4, -2), (3, -1);
SELECT RANK() OVER (ORDER BY D.C) = RANK() OVER (ORDER BY B.a) FROM
(SELECT 5 AS C FROM t1) as D, (SELECT t1.b AS A FROM t1) AS B;
RANK() OVER (ORDER BY D.C) = RANK() OVER (ORDER BY B.a)
1
1
0
0
select b, rank() over (order by c) , rank() over (order by dt1.b)
from
(select 5 as c from t1) as dt,
(select b from t1) as dt1;
b rank() over (order by c) rank() over (order by dt1.b)
-2 1 1
-2 1 1
-1 1 3
-1 1 3
select b, rank() over (order by c) , rank() over (order by dt1.b),
rank() over (order by c) = rank() over (order by dt1.b)
from
(select 5 as c from t1) as dt,
(select b from t1) as dt1;
b rank() over (order by c) rank() over (order by dt1.b) rank() over (order by c) = rank() over (order by dt1.b)
-2 1 1 1
-2 1 1 1
-1 1 3 0
-1 1 3 0
alter table t1 engine=myisam;
select b, rank() over (order by c) , rank() over (order by dt1.b)
from
(select 5 as c from t1) as dt,
(select b from t1) as dt1;
b rank() over (order by c) rank() over (order by dt1.b)
-2 1 1
-2 1 1
-1 1 3
-1 1 3
create view v1 as select b,5 as c from t1;
select b, rank() over (order by c) from v1 order by b;
b rank() over (order by c)
-2 1
-1 1
drop view v1;
drop table t1;
# End of 10.4 tests

View file

@ -2,6 +2,7 @@
# This is too slow on MSAN
--source include/not_msan.inc
--source include/not_valgrind.inc
--source include/have_innodb.inc
create table t1 (a int, b varchar(32));
insert into t1 values
@ -4016,4 +4017,37 @@ with cte_e as (
drop table t1;
--echo #
--echo # MDEV-20010 Equal on two RANK window functions create wrong result
--echo #
create table t1 (a int, b int) engine= innodb;
insert into t1 values (4, -2), (3, -1);
SELECT RANK() OVER (ORDER BY D.C) = RANK() OVER (ORDER BY B.a) FROM
(SELECT 5 AS C FROM t1) as D, (SELECT t1.b AS A FROM t1) AS B;
select b, rank() over (order by c) , rank() over (order by dt1.b)
from
(select 5 as c from t1) as dt,
(select b from t1) as dt1;
select b, rank() over (order by c) , rank() over (order by dt1.b),
rank() over (order by c) = rank() over (order by dt1.b)
from
(select 5 as c from t1) as dt,
(select b from t1) as dt1;
alter table t1 engine=myisam;
select b, rank() over (order by c) , rank() over (order by dt1.b)
from
(select 5 as c from t1) as dt,
(select b from t1) as dt1;
create view v1 as select b,5 as c from t1;
select b, rank() over (order by c) from v1 order by b;
drop view v1;
drop table t1;
--echo # End of 10.4 tests

View file

@ -3351,6 +3351,40 @@ DROP FUNCTION f1;
# End of 10.3 tests
#
#
# Start of 10.4 tests
#
#
# MDEV-28384 UBSAN: null pointer passed as argument 1, which is declared to never be null in my_strnncoll_binary on SELECT ... COUNT or GROUP_CONCAT
#
CREATE TABLE t (c BLOB NOT NULL);
INSERT IGNORE INTO t VALUES (0);
SELECT COUNT(*) FROM t WHERE EXTRACTVALUE(c,'a')='a';
COUNT(*)
0
DROP TABLE t;
SET sql_mode='';
CREATE TABLE t (c TEXT NOT NULL);
INSERT INTO t VALUES();
Warnings:
Warning 1364 Field 'c' doesn't have a default value
INSERT IGNORE INTO t VALUES (NULL);
Warnings:
Warning 1048 Column 'c' cannot be null
SELECT GROUP_CONCAT(c ORDER BY BINARY c) FROM t GROUP BY c;
GROUP_CONCAT(c ORDER BY BINARY c)
,
DROP TABLE t;
#
# MDEV-30982 UBSAN: runtime error: null pointer passed as argument 2, which is declared to never be null in my_strnncoll_binary on DELETE
#
CREATE TABLE t (c1 SET('1','2','3'),c2 BINARY);
INSERT INTO t VALUES (0,0);
DELETE FROM t WHERE c2<c1;
DROP TABLE t;
#
# End of 10.4 tests
#
#
# Start of 10.5 tests
#
#

View file

@ -196,6 +196,39 @@ DROP FUNCTION f1;
--echo # End of 10.3 tests
--echo #
--echo #
--echo # Start of 10.4 tests
--echo #
--echo #
--echo # MDEV-28384 UBSAN: null pointer passed as argument 1, which is declared to never be null in my_strnncoll_binary on SELECT ... COUNT or GROUP_CONCAT
--echo #
CREATE TABLE t (c BLOB NOT NULL);
INSERT IGNORE INTO t VALUES (0);
SELECT COUNT(*) FROM t WHERE EXTRACTVALUE(c,'a')='a';
DROP TABLE t;
SET sql_mode='';
CREATE TABLE t (c TEXT NOT NULL);
INSERT INTO t VALUES();
INSERT IGNORE INTO t VALUES (NULL);
SELECT GROUP_CONCAT(c ORDER BY BINARY c) FROM t GROUP BY c;
DROP TABLE t;
--echo #
--echo # MDEV-30982 UBSAN: runtime error: null pointer passed as argument 2, which is declared to never be null in my_strnncoll_binary on DELETE
--echo #
CREATE TABLE t (c1 SET('1','2','3'),c2 BINARY);
INSERT INTO t VALUES (0,0);
DELETE FROM t WHERE c2<c1;
DROP TABLE t;
--echo #
--echo # End of 10.4 tests
--echo #
--echo #
--echo # Start of 10.5 tests
--echo #

View file

@ -8892,6 +8892,33 @@ DROP TABLE t1;
# End of 10.2 tests
#
#
# Start of 10.4 tests
#
#
# MDEV-28384 UBSAN: null pointer passed as argument 1, which is declared to never be null in my_strnncoll_binary on SELECT ... COUNT or GROUP_CONCAT
#
CREATE TABLE t (c TEXT CHARACTER SET latin1 COLLATE latin1_bin NOT NULL);
INSERT IGNORE INTO t VALUES (0);
SELECT COUNT(*) FROM t WHERE EXTRACTVALUE(c,'a')='a';
COUNT(*)
0
DROP TABLE t;
SET sql_mode='';
CREATE TABLE t (c TEXT CHARACTER SET latin1 COLLATE latin1_bin NOT NULL);
INSERT INTO t VALUES();
Warnings:
Warning 1364 Field 'c' doesn't have a default value
INSERT IGNORE INTO t VALUES (NULL);
Warnings:
Warning 1048 Column 'c' cannot be null
SELECT GROUP_CONCAT(c ORDER BY BINARY c) FROM t GROUP BY c;
GROUP_CONCAT(c ORDER BY BINARY c)
,
DROP TABLE t;
#
# End of 10.4 tests
#
#
# Start of 10.5 tests
#
#
@ -8903,9 +8930,9 @@ CREATE OR REPLACE TABLE t1 AS SELECT CAST(1 AS BINARY), CAST(@a AS BINARY), CAST
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`CAST(1 AS BINARY)` varbinary(1) DEFAULT NULL,
`CAST(1 AS BINARY)` varbinary(1) NOT NULL,
`CAST(@a AS BINARY)` varbinary(20) DEFAULT NULL,
`CAST(@b:=3 AS BINARY)` varbinary(1) DEFAULT NULL
`CAST(@b:=3 AS BINARY)` varbinary(1) NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
DROP TABLE t1;
#

View file

@ -442,6 +442,30 @@ SET NAMES latin1;
--echo # End of 10.2 tests
--echo #
--echo #
--echo # Start of 10.4 tests
--echo #
--echo #
--echo # MDEV-28384 UBSAN: null pointer passed as argument 1, which is declared to never be null in my_strnncoll_binary on SELECT ... COUNT or GROUP_CONCAT
--echo #
CREATE TABLE t (c TEXT CHARACTER SET latin1 COLLATE latin1_bin NOT NULL);
INSERT IGNORE INTO t VALUES (0);
SELECT COUNT(*) FROM t WHERE EXTRACTVALUE(c,'a')='a';
DROP TABLE t;
SET sql_mode='';
CREATE TABLE t (c TEXT CHARACTER SET latin1 COLLATE latin1_bin NOT NULL);
INSERT INTO t VALUES();
INSERT IGNORE INTO t VALUES (NULL);
SELECT GROUP_CONCAT(c ORDER BY BINARY c) FROM t GROUP BY c;
DROP TABLE t;
--echo #
--echo # End of 10.4 tests
--echo #
--echo #
--echo # Start of 10.5 tests
--echo #

View file

@ -178,8 +178,7 @@ drop table t1;
#
# Check more automatic conversion
#
# Enable view protocol after fix MDEV-28017
--disable_view_protocol
--disable_service_connection
set names koi8r;
create table t1 (c1 char(10) character set cp1251);
insert into t1 values ('ß');
@ -204,7 +203,7 @@ select rpad(c1,3,'
#select case c1 when 'ß' then 'ß' when 'ö' then 'ö' else 'c' end from t1;
#select export_set(5,c1,'ö'), export_set(5,'ö',c1) from t1;
drop table t1;
--enable_view_protocol
--enable_service_connection
#
# Bug 20695: problem with field default value's character set

View file

@ -2842,6 +2842,62 @@ VALUES ('') UNION VALUES ( _utf16 0x0020 COLLATE utf16_bin);
# End of 10.3 tests
#
#
# Start of 10.4 tests
#
#
# MDEV-22856 Assertion `!str || str != Ptr' and Assertion `!str || str != Ptr || !is_alloced()' failed in String::copy
#
SET NAMES utf8mb3, collation_connection='utf16_general_ci';
SET sql_buffer_result=1;
CREATE TABLE t(c INT);
INSERT INTO t VALUES(NULL);
SELECT PASSWORD(c) FROM t;
PASSWORD(c)
DROP TABLE t;
SET sql_buffer_result=DEFAULT;
SET NAMES utf8mb3, collation_connection='utf16_general_ci';
CREATE TABLE t1(c INT);
INSERT INTO t1 VALUES(NULL);
CREATE TABLE t2 AS SELECT PASSWORD(c) FROM t1;
DROP TABLE t2, t1;
SET NAMES utf8mb3, collation_connection='utf16_general_ci';
CREATE TABLE t1 AS SELECT PASSWORD(CAST(NULL AS SIGNED));
DROP TABLE t1;
SET NAMES utf8mb3, collation_connection='utf16_bin';
SET @@sql_buffer_result=ON;
CREATE TABLE t (c CHAR(1));
INSERT INTO t VALUES (1),(1),(1),(NULL);
INSERT INTO t SELECT * FROM t;
SELECT PASSWORD(c) FROM t;
PASSWORD(c)
*E6CC90B878B948C35E92B003C792C46C58C4AF40
*E6CC90B878B948C35E92B003C792C46C58C4AF40
*E6CC90B878B948C35E92B003C792C46C58C4AF40
*E6CC90B878B948C35E92B003C792C46C58C4AF40
*E6CC90B878B948C35E92B003C792C46C58C4AF40
*E6CC90B878B948C35E92B003C792C46C58C4AF40
DROP TABLE t;
SET @@sql_buffer_result=DEFAULT;
SET sql_mode='';
SET SESSION sql_buffer_result=1;
CREATE TABLE t1 (c1 INT);
INSERT INTO t1 VALUES ();
INSERT IGNORE INTO t1 VALUES (NULL);
SET NAMES utf8mb3, collation_connection='utf16_bin';
SELECT PASSWORD(c1) FROM t1;
PASSWORD(c1)
DROP TABLE t1;
SET SESSION sql_buffer_result=DEFAULT;
SET sql_mode=DEFAULT;
#
# End of 10.4 tests
#
#
# Start of 10.5 tests
#
#

View file

@ -961,6 +961,56 @@ VALUES ('') UNION VALUES ( _utf16 0x0020 COLLATE utf16_bin);
--echo # End of 10.3 tests
--echo #
--echo #
--echo # Start of 10.4 tests
--echo #
--echo #
--echo # MDEV-22856 Assertion `!str || str != Ptr' and Assertion `!str || str != Ptr || !is_alloced()' failed in String::copy
--echo #
SET NAMES utf8mb3, collation_connection='utf16_general_ci';
SET sql_buffer_result=1;
CREATE TABLE t(c INT);
INSERT INTO t VALUES(NULL);
SELECT PASSWORD(c) FROM t;
DROP TABLE t;
SET sql_buffer_result=DEFAULT;
SET NAMES utf8mb3, collation_connection='utf16_general_ci';
CREATE TABLE t1(c INT);
INSERT INTO t1 VALUES(NULL);
CREATE TABLE t2 AS SELECT PASSWORD(c) FROM t1;
DROP TABLE t2, t1;
SET NAMES utf8mb3, collation_connection='utf16_general_ci';
CREATE TABLE t1 AS SELECT PASSWORD(CAST(NULL AS SIGNED));
DROP TABLE t1;
SET NAMES utf8mb3, collation_connection='utf16_bin';
SET @@sql_buffer_result=ON;
CREATE TABLE t (c CHAR(1));
INSERT INTO t VALUES (1),(1),(1),(NULL);
INSERT INTO t SELECT * FROM t;
SELECT PASSWORD(c) FROM t;
DROP TABLE t;
SET @@sql_buffer_result=DEFAULT;
SET sql_mode='';
SET SESSION sql_buffer_result=1;
CREATE TABLE t1 (c1 INT);
INSERT INTO t1 VALUES ();
INSERT IGNORE INTO t1 VALUES (NULL);
SET NAMES utf8mb3, collation_connection='utf16_bin';
SELECT PASSWORD(c1) FROM t1;
DROP TABLE t1;
SET SESSION sql_buffer_result=DEFAULT;
SET sql_mode=DEFAULT;
--echo #
--echo # End of 10.4 tests
--echo #
--echo #
--echo # Start of 10.5 tests

View file

@ -2940,3 +2940,23 @@ DROP TABLE t1;
#
# End of 10.2 tests
#
#
# Start of 10.4 tests
#
#
# MDEV-29019 Assertion `(length % 4) == 0' failed in my_lengthsp_utf32 on SELECT
#
CREATE TABLE t (a INT);
SET collation_connection=utf32_unicode_ci;
INSERT INTO t VALUES (0);
SELECT * FROM t ORDER BY (OCT(a));
a
0
SELECT HEX(OCT(a)) FROM t;
HEX(OCT(a))
00000030
DROP TABLE t;
SET NAMES utf8;
#
# End of 10.4 tests
#

View file

@ -1099,7 +1099,30 @@ CREATE TABLE t1 (
SHOW CREATE TABLE t1;
DROP TABLE t1;
--enable_service_connection
--echo #
--echo # End of 10.2 tests
--echo #
--echo #
--echo # Start of 10.4 tests
--echo #
--echo #
--echo # MDEV-29019 Assertion `(length % 4) == 0' failed in my_lengthsp_utf32 on SELECT
--echo #
CREATE TABLE t (a INT);
SET collation_connection=utf32_unicode_ci;
INSERT INTO t VALUES (0);
SELECT * FROM t ORDER BY (OCT(a));
SELECT HEX(OCT(a)) FROM t;
DROP TABLE t;
SET NAMES utf8;
--echo #
--echo # End of 10.4 tests
--echo #
--enable_service_connection

View file

@ -3566,11 +3566,11 @@ DROP TABLE t1;
SELECT TRUNCATE(0, -9223372036854775808);
TRUNCATE(0, -9223372036854775808)
0
SELECT GET_FORMAT(TIME,'JIS') DIV ATAN (TRUNCATE (0,'2000000000000000' DIV SIN(1500)*NOW(5)));
GET_FORMAT(TIME,'JIS') DIV ATAN (TRUNCATE (0,'2000000000000000' DIV SIN(1500)*NOW(5)))
SELECT GET_FORMAT(TIME,'JIS') DIV ATAN (TRUNCATE (0,'2000000000000000' DIV SIN(1500)*NOW(5))) AS col1;
col1
NULL
SELECT (GET_FORMAT(TIME,'JIS') DIV ATAN (TRUNCATE (0,'2000000000000000' DIV SIN(1500)*NOW(5))/ROUND(-1)))DIV(-1-LOG2(1))-(-1*POWER(-1,0));
(GET_FORMAT(TIME,'JIS') DIV ATAN (TRUNCATE (0,'2000000000000000' DIV SIN(1500)*NOW(5))/ROUND(-1)))DIV(-1-LOG2(1))-(-1*POWER(-1,0))
SELECT (GET_FORMAT(TIME,'JIS') DIV ATAN (TRUNCATE (0,'2000000000000000' DIV SIN(1500)*NOW(5))/ROUND(-1)))DIV(-1-LOG2(1))-(-1*POWER(-1,0)) AS col1;
col1
NULL
#
# End of 10.4 tests

View file

@ -1888,8 +1888,8 @@ DROP TABLE t1;
SELECT TRUNCATE(0, -9223372036854775808);
--disable_warnings
SELECT GET_FORMAT(TIME,'JIS') DIV ATAN (TRUNCATE (0,'2000000000000000' DIV SIN(1500)*NOW(5)));
SELECT (GET_FORMAT(TIME,'JIS') DIV ATAN (TRUNCATE (0,'2000000000000000' DIV SIN(1500)*NOW(5))/ROUND(-1)))DIV(-1-LOG2(1))-(-1*POWER(-1,0));
SELECT GET_FORMAT(TIME,'JIS') DIV ATAN (TRUNCATE (0,'2000000000000000' DIV SIN(1500)*NOW(5))) AS col1;
SELECT (GET_FORMAT(TIME,'JIS') DIV ATAN (TRUNCATE (0,'2000000000000000' DIV SIN(1500)*NOW(5))/ROUND(-1)))DIV(-1-LOG2(1))-(-1*POWER(-1,0)) AS col1;
--enable_warnings
--echo #

View file

@ -2402,6 +2402,28 @@ progress
# End of 10.3 tests
#
#
# MDEV-MDEV-31064 Changes of the procedure are not immediatly seen in queries to I_S.parameter from other connections
#
CREATE PROCEDURE sp1(IN p1 INT, IN p2 INT)
BEGIN
END;
connect con2, localhost, root,,;
CALL sp1(10, 20);
connection default;
CREATE OR REPLACE PROCEDURE sp1(p1 INT)
BEGIN
END;
connection con2;
SELECT COUNT(*) FROM information_schema.parameters WHERE SPECIFIC_NAME = 'sp1';
COUNT(*)
1
disconnect con2;
connection default;
DROP PROCEDURE sp1;
#
# End of 10.4 tests
#
#
# Start of 10.5 tests
#
#

View file

@ -2112,6 +2112,28 @@ select progress from information_schema.processlist limit 1;
--echo # End of 10.3 tests
--echo #
--echo #
--echo # MDEV-MDEV-31064 Changes of the procedure are not immediatly seen in queries to I_S.parameter from other connections
--echo #
CREATE PROCEDURE sp1(IN p1 INT, IN p2 INT)
BEGIN
END;
--connect(con2, localhost, root,,)
CALL sp1(10, 20);
--connection default
CREATE OR REPLACE PROCEDURE sp1(p1 INT)
BEGIN
END;
--connection con2
SELECT COUNT(*) FROM information_schema.parameters WHERE SPECIFIC_NAME = 'sp1';
--disconnect con2
--connection default
DROP PROCEDURE sp1;
--echo #
--echo # End of 10.4 tests
--echo #
--echo #
--echo # Start of 10.5 tests
--echo #

View file

@ -58,6 +58,36 @@ SET SESSION session_track_system_variables=NULL;
ERROR 42000: Variable 'session_track_system_variables' can't be set to the value of 'NULL'
# End of 10.3 tests
#
# MDEV-25237: crash after setting global session_track_system_variables
# to an invalid value
#
SET GLOBAL session_track_system_variables='a';
ERROR HY000: Unknown system variable 'a'
SET GLOBAL event_scheduler=1;
# check that value really returns as it was
set GLOBAL session_track_system_variables='character_set_connection';
SET GLOBAL session_track_system_variables='a';
ERROR HY000: Unknown system variable 'a'
connect con,localhost,root,,test;
SET NAMES 'utf8';
-- Tracker : SESSION_TRACK_SYSTEM_VARIABLES
-- character_set_connection
-- utf8
SET NAMES 'big5';
-- Tracker : SESSION_TRACK_SYSTEM_VARIABLES
-- character_set_connection
-- big5
select @@session_track_system_variables;
@@session_track_system_variables
character_set_connection
connection default;
disconnect con;
SET GLOBAL session_track_system_variables=default;
SET GLOBAL event_scheduler=default;
# End of 10.4 test
#
# MDEV-16470 - Session user variables tracker
#
# End of 10.5 tests

View file

@ -61,6 +61,38 @@ SET SESSION session_track_system_variables=NULL;
--echo # End of 10.3 tests
--echo #
--echo # MDEV-25237: crash after setting global session_track_system_variables
--echo # to an invalid value
--echo #
--error ER_UNKNOWN_SYSTEM_VARIABLE
SET GLOBAL session_track_system_variables='a';
SET GLOBAL event_scheduler=1;
--echo # check that value really returns as it was
set GLOBAL session_track_system_variables='character_set_connection';
--error ER_UNKNOWN_SYSTEM_VARIABLE
SET GLOBAL session_track_system_variables='a';
connect (con,localhost,root,,test);
--enable_session_track_info
SET NAMES 'utf8';
SET NAMES 'big5';
--disable_session_track_info
select @@session_track_system_variables;
connection default;
disconnect con;
SET GLOBAL session_track_system_variables=default;
SET GLOBAL event_scheduler=default;
--echo # End of 10.4 test
--echo #
--echo # MDEV-16470 - Session user variables tracker
--echo #

View file

@ -0,0 +1,21 @@
set @save_session_track_system_variables=@@session_track_system_variables;
#
# MDEV-25237: Assertion `global_system_variables.
# session_track_system_variables' failed in
# Session_sysvars_tracker::init | SIGSEGV's in __strlen_avx2 |
# UBSAN: runtime error: null pointer passed as argument 1, which
# is declared to never be null in my_strdup
#
# check that that parser problems do not lead to crash
SET @old_debug= @@session.debug;
set debug_dbug="+d,dbug_session_tracker_parse_error";
SET GLOBAL session_track_system_variables='query_cache_size';
ERROR HY001: Out of memory; restart server and try again (needed 1 bytes)
set debug_dbug=@old_debug;
SELECT @@global.session_track_system_variables;
@@global.session_track_system_variables
NULL
SET GLOBAL event_scheduler=1;
SET GLOBAL session_track_system_variables=default;
SET GLOBAL event_scheduler=default;
# End of 10.4 test

View file

@ -0,0 +1,30 @@
--source include/have_debug.inc
--source include/no_protocol.inc
--source include/not_embedded.inc
set @save_session_track_system_variables=@@session_track_system_variables;
--echo #
--echo # MDEV-25237: Assertion `global_system_variables.
--echo # session_track_system_variables' failed in
--echo # Session_sysvars_tracker::init | SIGSEGV's in __strlen_avx2 |
--echo # UBSAN: runtime error: null pointer passed as argument 1, which
--echo # is declared to never be null in my_strdup
--echo #
--echo # check that that parser problems do not lead to crash
SET @old_debug= @@session.debug;
set debug_dbug="+d,dbug_session_tracker_parse_error";
--error ER_OUTOFMEMORY
SET GLOBAL session_track_system_variables='query_cache_size';
set debug_dbug=@old_debug;
SELECT @@global.session_track_system_variables;
SET GLOBAL event_scheduler=1;
SET GLOBAL session_track_system_variables=default;
SET GLOBAL event_scheduler=default;
--echo # End of 10.4 test

View file

@ -2446,5 +2446,49 @@ DROP TABLE t1;
SET @@time_zone=DEFAULT;
SET TIMESTAMP=DEFAULT;
#
# MDEV-23838 Possibly wrong result or Assertion `0' failed in Item_func_round::native_op
#
CREATE TABLE t1 (a TIME, b INT);
INSERT INTO t1 VALUES ('07:26:24',NULL),('23:55:04',NULL);
SELECT MAX(ROUND(a, 0)) FROM t1 GROUP BY 1 << b;
MAX(ROUND(a, 0))
23:55:04
DROP TABLE t1;
CREATE TABLE t1 (a TIME(6), b INT);
INSERT INTO t1 VALUES ('07:26:24.12',NULL),('23:55:04.34',NULL);
SELECT MAX(ROUND(a, 2)) FROM t1 GROUP BY 1 << b;
MAX(ROUND(a, 2))
23:55:04.34
DROP TABLE t1;
SET sql_mode='';
CREATE TABLE t1 (a TIME);
INSERT INTO t1 VALUES (0),(0);
SELECT MAX(ROUND (a,a)) FROM t1 GROUP BY a;
MAX(ROUND (a,a))
00:00:00
DROP TABLE t1;
SET sql_mode=DEFAULT;
CREATE TABLE t1 (a TIME, b INT);
INSERT INTO t1 VALUES ('07:26:24',1),('23:55:04',1);
SELECT MIN(CEILING(a)), MAX(CEILING(a)) FROM t1 GROUP BY b;
MIN(CEILING(a)) MAX(CEILING(a))
07:26:24 23:55:04
SELECT MIN(FLOOR(a)), MAX(FLOOR(a)) FROM t1 GROUP BY b;
MIN(FLOOR(a)) MAX(FLOOR(a))
07:26:24 23:55:04
DROP TABLE t1;
CREATE TABLE t1 (a TIME(6), b INT);
INSERT INTO t1 VALUES ('00:00:00.5',1),('00:01:00.5',1);
INSERT INTO t1 VALUES ('-00:00:00.5',2),('-00:01:00.5',2);
SELECT MIN(CEILING(a)), MAX(CEILING(a)) FROM t1 GROUP BY b;
MIN(CEILING(a)) MAX(CEILING(a))
00:00:01 00:01:01
-00:01:00 00:00:00
SELECT MIN(FLOOR(a)), MAX(FLOOR(a)) FROM t1 GROUP BY b;
MIN(FLOOR(a)) MAX(FLOOR(a))
00:00:00 00:01:00
-00:01:01 -00:00:01
DROP TABLE t1;
#
# End of 10.4 tests
#

View file

@ -1585,6 +1585,41 @@ SET @@time_zone=DEFAULT;
SET TIMESTAMP=DEFAULT;
--echo #
--echo # MDEV-23838 Possibly wrong result or Assertion `0' failed in Item_func_round::native_op
--echo #
CREATE TABLE t1 (a TIME, b INT);
INSERT INTO t1 VALUES ('07:26:24',NULL),('23:55:04',NULL);
SELECT MAX(ROUND(a, 0)) FROM t1 GROUP BY 1 << b;
DROP TABLE t1;
CREATE TABLE t1 (a TIME(6), b INT);
INSERT INTO t1 VALUES ('07:26:24.12',NULL),('23:55:04.34',NULL);
SELECT MAX(ROUND(a, 2)) FROM t1 GROUP BY 1 << b;
DROP TABLE t1;
SET sql_mode='';
CREATE TABLE t1 (a TIME);
INSERT INTO t1 VALUES (0),(0);
SELECT MAX(ROUND (a,a)) FROM t1 GROUP BY a;
DROP TABLE t1;
SET sql_mode=DEFAULT;
CREATE TABLE t1 (a TIME, b INT);
INSERT INTO t1 VALUES ('07:26:24',1),('23:55:04',1);
SELECT MIN(CEILING(a)), MAX(CEILING(a)) FROM t1 GROUP BY b;
SELECT MIN(FLOOR(a)), MAX(FLOOR(a)) FROM t1 GROUP BY b;
DROP TABLE t1;
CREATE TABLE t1 (a TIME(6), b INT);
INSERT INTO t1 VALUES ('00:00:00.5',1),('00:01:00.5',1);
INSERT INTO t1 VALUES ('-00:00:00.5',2),('-00:01:00.5',2);
SELECT MIN(CEILING(a)), MAX(CEILING(a)) FROM t1 GROUP BY b;
SELECT MIN(FLOOR(a)), MAX(FLOOR(a)) FROM t1 GROUP BY b;
DROP TABLE t1;
--echo #
--echo # End of 10.4 tests
--echo #

View file

@ -190,7 +190,7 @@ FROM federated.t3, (SELECT * FROM federated.t1 WHERE id > 3) t
WHERE federated.t3.name=t.name;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 7 7.00 100.00 100.00
1 PRIMARY <derived2> ref key0 key0 18 federated.t3.name 2 0.00 100.00 100.00
1 PRIMARY <derived2> ref key0 key0 18 federated.t3.name 2 1.00 100.00 100.00
2 PUSHED DERIVED NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
SELECT *
FROM federated.t3, (SELECT t1.name FROM federated.t1
@ -242,7 +242,7 @@ ANALYZE
"ref": ["federated.t3.name"],
"r_loops": 7,
"rows": 2,
"r_rows": 0,
"r_rows": 0.142857143,
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,

View file

@ -0,0 +1,78 @@
#
# Bug #23755664 DEADLOCK WITH 3 CONCURRENT DELETES BY UNIQUE KEY
#
connection default;
CREATE TABLE `t`(
`id` INT,
`a` INT DEFAULT NULL,
PRIMARY KEY(`id`),
UNIQUE KEY `u`(`a`)
) ENGINE=InnoDB;
INSERT INTO t (`id`,`a`) VALUES
(1,1),
(2,9999),
(3,10000);
connect deleter,localhost,root,,;
connect holder,localhost,root,,;
connect waiter,localhost,root,,;
connection deleter;
SET DEBUG_SYNC =
'lock_sec_rec_read_check_and_lock_has_locked
SIGNAL deleter_has_locked
WAIT_FOR waiter_has_locked';
DELETE FROM t WHERE a = 9999;
connection holder;
SET DEBUG_SYNC=
'now WAIT_FOR deleter_has_locked';
SET DEBUG_SYNC=
'lock_sec_rec_read_check_and_lock_has_locked SIGNAL holder_has_locked';
DELETE FROM t WHERE a = 9999;
connection waiter;
SET DEBUG_SYNC=
'now WAIT_FOR holder_has_locked';
SET DEBUG_SYNC=
'lock_sec_rec_read_check_and_lock_has_locked SIGNAL waiter_has_locked';
DELETE FROM t WHERE a = 9999;
connection deleter;
connection holder;
connection waiter;
connection default;
disconnect deleter;
disconnect holder;
disconnect waiter;
DROP TABLE `t`;
SET DEBUG_SYNC='reset';
CREATE TABLE `t`(
`id` INT NOT NULL PRIMARY KEY
) ENGINE=InnoDB;
INSERT INTO t (`id`) VALUES (1), (2);
connect holder,localhost,root,,;
connect waiter,localhost,root,,;
connection holder;
BEGIN;
SELECT id FROM t WHERE id=1 FOR UPDATE;
id
1
SELECT id FROM t WHERE id=2 FOR UPDATE;
id
2
connection waiter;
SET DEBUG_SYNC=
'lock_wait_suspend_thread_enter SIGNAL waiter_will_wait';
SELECT id FROM t WHERE id = 1 FOR UPDATE;
connection holder;
SET DEBUG_SYNC=
'now WAIT_FOR waiter_will_wait';
SELECT * FROM t FOR UPDATE;
id
1
2
COMMIT;
connection waiter;
id
1
connection default;
disconnect holder;
disconnect waiter;
DROP TABLE `t`;
SET DEBUG_SYNC='reset';

View file

@ -0,0 +1,143 @@
--echo #
--echo # Bug #23755664 DEADLOCK WITH 3 CONCURRENT DELETES BY UNIQUE KEY
--echo #
--source include/have_innodb.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
--source include/count_sessions.inc
--connection default
# There are various scenarious in which a transaction already holds "half"
# of a record lock (for example, a lock on the record but not on the gap)
# and wishes to "upgrade it" to a full lock (i.e. on both gap and record).
# This is often a cause for a deadlock, if there is another transaction
# which is already waiting for the lock being blocked by us:
# 1. our granted lock for one half
# 2. her waiting lock for the same half
# 3. our waiting lock for the whole
#
# SCENARIO 1
#
# In this scenario, three different threads try to delete the same row,
# identified by a secondary index key.
# This kind of operation (besides LOCK_IX on a table) requires
# an LOCK_REC_NOT_GAP|LOCK_REC|LOCK_X lock on a secondary index
# 1. `deleter` is the first to get the required lock
# 2. `holder` enqueues a waiting lock
# 3. `waiter` enqueues right after `holder`
# 4. `deleter` commits, releasing the lock, and granting it to `holder`
# 5. `holder` now observes that the row was deleted, so it needs to
# "seal the gap", by obtaining a LOCK_X|LOCK_REC, but..
# 6. this causes a deadlock between `holder` and `waiter`
#
# This scenario does not fail if MDEV-10962 is not fixed because of MDEV-30225
# fix, as the 'holder' does not "seal the gap" after 'deleter' was committed,
# because it was initially sealed, as row_search_mvcc() requests next-key lock
# after MDEV-30225 fix in the case when it requested not-gap lock before the
# fix.
#
# But let the scenario be in the tests, because it can fail if MDEV-30225
# related code is changed
CREATE TABLE `t`(
`id` INT,
`a` INT DEFAULT NULL,
PRIMARY KEY(`id`),
UNIQUE KEY `u`(`a`)
) ENGINE=InnoDB;
INSERT INTO t (`id`,`a`) VALUES
(1,1),
(2,9999),
(3,10000);
--connect(deleter,localhost,root,,)
--connect(holder,localhost,root,,)
--connect(waiter,localhost,root,,)
--connection deleter
SET DEBUG_SYNC =
'lock_sec_rec_read_check_and_lock_has_locked
SIGNAL deleter_has_locked
WAIT_FOR waiter_has_locked';
--send DELETE FROM t WHERE a = 9999
--connection holder
SET DEBUG_SYNC=
'now WAIT_FOR deleter_has_locked';
SET DEBUG_SYNC=
'lock_sec_rec_read_check_and_lock_has_locked SIGNAL holder_has_locked';
--send DELETE FROM t WHERE a = 9999
--connection waiter
SET DEBUG_SYNC=
'now WAIT_FOR holder_has_locked';
SET DEBUG_SYNC=
'lock_sec_rec_read_check_and_lock_has_locked SIGNAL waiter_has_locked';
--send DELETE FROM t WHERE a = 9999
--connection deleter
--reap
--connection holder
--reap
--connection waiter
--reap
--connection default
--disconnect deleter
--disconnect holder
--disconnect waiter
DROP TABLE `t`;
SET DEBUG_SYNC='reset';
# SCENARIO 2
#
# Here, we form a situation in which con1 has LOCK_REC_NOT_GAP on rows 1 and 2
# con2 waits for lock on row 1, and then con1 wants to upgrade the lock on row 1,
# which might cause a deadlock, unless con1 properly notices that even though the
# lock on row 1 can not be upgraded, a separate LOCK_GAP can be obtaied easily.
CREATE TABLE `t`(
`id` INT NOT NULL PRIMARY KEY
) ENGINE=InnoDB;
INSERT INTO t (`id`) VALUES (1), (2);
--connect(holder,localhost,root,,)
--connect(waiter,localhost,root,,)
--connection holder
BEGIN;
SELECT id FROM t WHERE id=1 FOR UPDATE;
SELECT id FROM t WHERE id=2 FOR UPDATE;
--connection waiter
SET DEBUG_SYNC=
'lock_wait_suspend_thread_enter SIGNAL waiter_will_wait';
--send SELECT id FROM t WHERE id = 1 FOR UPDATE
--connection holder
SET DEBUG_SYNC=
'now WAIT_FOR waiter_will_wait';
SELECT * FROM t FOR UPDATE;
COMMIT;
--connection waiter
--reap
--connection default
--disconnect holder
--disconnect waiter
DROP TABLE `t`;
SET DEBUG_SYNC='reset';
--source include/wait_until_count_sessions.inc

View file

@ -1393,3 +1393,26 @@ INSERT INTO t1 VALUES(repeat("this is the test case", 500));
ALTER TABLE t1 KEY_BLOCK_SIZE=4;
ALTER TABLE t1 KEY_BLOCK_SIZE=0;
DROP TABLE t1;
#
# MDEV-30528 Assertion in dtype_get_at_most_n_mbchars
#
create table t (f text) with system versioning character set utf8 engine=innodb;
insert into t (f) values
('mysql from tutorial dbms stands for database ...') ,
('when to use mysql well after that you went through a ...'),
('where will optimizing mysql in what tutorial we will show ...'),
('1001 mysql tricks 1. never run mysqld as root. 2. ...'),
('mysql vs. yoursql in the following database comparison ...'),
('mysql security when configured properly, mysql ...');
delete from t where f like 'mysql%';
alter table t add fulltext (f);
select * from t where match(f) against ("use");
f
when to use mysql well after that you went through a ...
select * from t where match(f) against ("run");
f
1001 mysql tricks 1. never run mysqld as root. 2. ...
select * from t where match(f) against ("tutorial");
f
where will optimizing mysql in what tutorial we will show ...
drop table t;

View file

@ -1342,3 +1342,21 @@ ALTER TABLE t1 KEY_BLOCK_SIZE=4;
ALTER TABLE t1 KEY_BLOCK_SIZE=0;
DROP TABLE t1;
--echo #
--echo # MDEV-30528 Assertion in dtype_get_at_most_n_mbchars
--echo #
create table t (f text) with system versioning character set utf8 engine=innodb;
insert into t (f) values
('mysql from tutorial dbms stands for database ...') ,
('when to use mysql well after that you went through a ...'),
('where will optimizing mysql in what tutorial we will show ...'),
('1001 mysql tricks 1. never run mysqld as root. 2. ...'),
('mysql vs. yoursql in the following database comparison ...'),
('mysql security when configured properly, mysql ...');
delete from t where f like 'mysql%';
alter table t add fulltext (f);
select * from t where match(f) against ("use");
select * from t where match(f) against ("run");
select * from t where match(f) against ("tutorial");
# cleanup
drop table t;

View file

@ -0,0 +1,68 @@
--connection master
create table t1 (a int) engine=innodb;
create table t2 (a int) engine=innodb;
insert into t1 values (1);
--source include/save_master_gtid.inc
--connection slave
call mtr.add_suppression("Slave: Commit failed due to failure of an earlier commit on which this one depends");
--source include/sync_with_master_gtid.inc
--source include/stop_slave.inc
set @save.slave_parallel_threads= @@global.slave_parallel_threads;
set @save.slave_parallel_mode= @@global.slave_parallel_mode;
set @@global.slave_parallel_threads= 3;
set @@global.slave_parallel_mode= CONSERVATIVE;
--connection slave1
BEGIN;
update t1 set a=2 where a=1;
--connection master
SET @old_dbug= @@SESSION.debug_dbug;
SET @@SESSION.debug_dbug="+d,binlog_force_commit_id";
# GCO 1
SET @commit_id= 10000;
# T1
update t1 set a=2 where a=1;
# T2
insert into t2 values (1);
# GCO 2
SET @commit_id= 10001;
# T3
insert into t1 values (3);
--connection slave
--source include/start_slave.inc
--let $wait_condition= SELECT count(*)=1 FROM information_schema.processlist WHERE state LIKE 'Update_rows_log_event::find_row(-1)' and command LIKE 'Slave_worker';
--source include/wait_condition.inc
--let $wait_condition= SELECT count(*)=1 FROM information_schema.processlist WHERE state LIKE 'Waiting for prior transaction to commit%' and command LIKE 'Slave_worker';
--source include/wait_condition.inc
--let $wait_condition= SELECT count(*)=1 FROM information_schema.processlist WHERE state LIKE 'Waiting for prior transaction to start commit%' and command LIKE 'Slave_worker';
--source include/wait_condition.inc
--let $t3_tid= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for prior transaction to start commit%'`
--evalp kill $t3_tid
--connection slave1
commit;
--connection slave
--let $slave_timeout=1032
--source include/wait_for_slave_sql_to_stop.inc
update t1 set a=1 where a=2;
set @@global.slave_parallel_threads = @save.slave_parallel_threads;
set @@global.slave_parallel_mode = @save.slave_parallel_mode;
--source include/start_slave.inc
--echo #
--echo # Cleanup
--connection master
DROP TABLE t1, t2;
--source include/save_master_gtid.inc
--connection slave
--source include/sync_with_master_gtid.inc

View file

@ -0,0 +1,94 @@
--echo # MDEV-31448 OOO finish event group by killed worker
# The test demonstrates how a killed worker access gco lists
# in finish_event_group() out-of-order to fire
# DBUG_ASSERT(!tmp_gco->next_gco || tmp_gco->last_sub_id > sub_id);
# in the buggy version.
--echo # Initialize test data
--connection master
create table t1 (a int) engine=innodb;
create table t2 (a int) engine=innodb;
insert into t1 values (1);
--source include/save_master_gtid.inc
--connection slave
call mtr.add_suppression("Connection was killed");
call mtr.add_suppression("Can.t find record");
--source include/sync_with_master_gtid.inc
--source include/stop_slave.inc
set @save.slave_parallel_threads= @@global.slave_parallel_threads;
set @save.slave_parallel_mode= @@global.slave_parallel_mode;
set @@global.slave_parallel_threads= 3;
set @@global.slave_parallel_mode= OPTIMISTIC;
--connection slave1
begin;
update t1 set a=2 where a=1;
--connection master
set @old_dbug= @@session.debug_dbug;
set @@session.debug_dbug="+d,binlog_force_commit_id";
# GCO 1
set @commit_id= 10000;
# T1
update t1 set a=2 where a=1;
if (!$killed_trx_commits)
{
set @commit_id= 10001;
# T2
set statement skip_parallel_replication=1 for insert into t2 values (1);
}
if ($killed_trx_commits)
{
insert into t2 values (1);
}
# GCO 2
# T3
drop table t2;
--connection slave
--source include/start_slave.inc
--echo # wait for T1
--let $wait_condition= SELECT count(*)=1 FROM information_schema.processlist WHERE state LIKE 'Update_rows_log_event::find_row(-1)' and command LIKE 'Slave_worker';
--source include/wait_condition.inc
--echo # wait for T2
--let $wait_condition= SELECT count(*)=1 FROM information_schema.processlist WHERE state LIKE 'Waiting for prior transaction to commit%' and command LIKE 'Slave_worker';
--source include/wait_condition.inc
--let $t2_tid= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for prior transaction to commit%' and command LIKE 'Slave_worker'`
--echo # wait for T3
--let $wait_condition= SELECT count(*)=1 FROM information_schema.processlist WHERE state LIKE 'Waiting for prior transaction to start commit%' and command LIKE 'Slave_worker';
--source include/wait_condition.inc
--evalp kill $t2_tid
# give some little time for T2 to re-sink into the same state
--let $slave_param=Last_Errno
--let $slave_param_value=1927
--source include/wait_for_slave_param.inc
--connection slave1
commit;
--connection slave
--let $slave_timeout=1032
--source include/wait_for_slave_sql_to_stop.inc
update t1 set a=1 where a=2;
set @@global.slave_parallel_threads = @save.slave_parallel_threads;
set @@global.slave_parallel_mode = @save.slave_parallel_mode;
--source include/start_slave.inc
--echo #
--echo # Cleanup
--connection master
drop table t1;
--source include/save_master_gtid.inc
--connection slave
--source include/sync_with_master_gtid.inc

View file

@ -0,0 +1,52 @@
include/master-slave.inc
[connection master]
# MDEV-31448 OOO finish event group by killed worker
# Initialize test data
connection master;
call mtr.add_suppression("Slave: Connection was killed");
call mtr.add_suppression("Slave: Commit failed due to failure of an earlier commit on which this one depends");
create table t1 (a int) engine=innodb;
create table t2 (a int) engine=innodb;
insert into t1 values (1);
include/save_master_gtid.inc
connection slave;
include/sync_with_master_gtid.inc
include/stop_slave.inc
set @@global.slave_parallel_threads= 4;
set @@global.slave_parallel_mode= OPTIMISTIC;
set @@global.innodb_lock_wait_timeout= 30;
set @@global.slave_transaction_retries= 0;
connection slave1;
BEGIN;
SELECT * FROM t1 WHERE a=1 FOR UPDATE;
a
1
connection master;
SET @old_dbug= @@SESSION.debug_dbug;
SET @@SESSION.debug_dbug="+d,binlog_force_commit_id";
SET @commit_id= 10000;
update t1 set a=2 where a=1;
set statement skip_parallel_replication=1 for insert into t2 values (1);
drop table t2;
connection slave;
include/start_slave.inc
# wait for T1
# wait for T2
# wait for T3
kill T2_TID;
connection slave1;
ROLLBACK;
connection master;
DROP TABLE t1;
include/save_master_gtid.inc
connection slave;
#
# Cleanup
include/stop_slave.inc
set @@global.slave_parallel_threads= 0;
set @@global.slave_parallel_mode= optimistic;
set @@global.innodb_lock_wait_timeout= 50;
set @@global.slave_transaction_retries= 10;
include/start_slave.inc
include/sync_with_master_gtid.inc
include/rpl_end.inc

View file

@ -0,0 +1,105 @@
include/master-slave.inc
[connection master]
connection slave;
include/stop_slave.inc
SET @old_parallel_threads= @@GLOBAL.slave_parallel_threads;
SET GLOBAL slave_parallel_threads=3;
SET @old_parallel_mode= @@GLOBAL.slave_parallel_mode;
SET GLOBAL slave_parallel_mode=aggressive;
SET @old_dbug= @@GLOBAL.debug_dbug;
CHANGE MASTER TO master_use_gtid=slave_pos;
include/start_slave.inc
*** MDEV-31509: Lost data with FTWRL and STOP SLAVE
connection master;
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=MyISAM;
INSERT INTO t1 VALUES (0,0);
INSERT INTO t2 VALUES (0,0);
include/save_master_gtid.inc
connection slave;
include/sync_with_master_gtid.inc
connection slave;
*** Arrange for T1 to delay before entering GCO wait.
SET GLOBAL debug_dbug="+d,gco_wait_delay_gtid_0_x_99";
*** Arrange for T2 to wait for FTWRL to start.
SET GLOBAL debug_dbug="+d,hold_worker_on_schedule";
*** Arrange for T2 to delay wakeup from FTWRL pause.
SET GLOBAL debug_dbug="+d,delay_ftwrl_wait_gtid_0_x_100";
connection master;
*** Event group T1
SET SESSION gtid_seq_no=99;
INSERT INTO t1 VALUES (1,1);
connection slave;
*** 1a. Wait for T1 to be queued.
SET debug_sync="now WAIT_FOR gco_wait_paused";
connection master;
*** Event group T2
SET SESSION gtid_seq_no=100;
INSERT INTO t2 VALUES (2,2);
connection slave;
*** 1b. Wait for T2 to be queued.
SET debug_sync= "now WAIT_FOR reached_pause";
connection slave1;
*** 2. Run FTWRL
SET GLOBAL debug_dbug= "+d,pause_for_ftwrl_wait";
FLUSH TABLES WITH READ LOCK;
connection slave;
SET debug_sync= "now WAIT_FOR pause_ftwrl_waiting";
*** 3. Wait for T2 to be waiting for FTWRL pause
SET debug_sync= "now SIGNAL continue_worker";
*** 4. FTWRL completes, UNLOCK TABLES.
SET debug_sync="now SIGNAL pause_ftwrl_cont";
connection slave1;
UNLOCK TABLES;
connection slave;
*** T2 is now ready to proceed after FTWRL pause, but did not wake up yet.
SET debug_sync="now WAIT_FOR pause_wait_started";
*** 5. STOP SLAVE is run.
connection slave1;
SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger";
STOP SLAVE;
connection slave;
SET debug_sync="now WAIT_FOR wait_for_done_waiting";
*** 5. T2 wakes up after FTWRL pause, reaches wait_for_prior_commit().
SET debug_sync="now SIGNAL pause_wait_continue";
*** 6. T1 starts.
SET debug_sync="now SIGNAL gco_wait_cont";
connection slave1;
connection slave;
include/wait_for_slave_to_stop.inc
connection master;
SELECT * FROM t1 ORDER BY a;
a b
0 0
1 1
SELECT * FROM t2 ORDER BY a;
a b
0 0
2 2
include/save_master_gtid.inc
connection slave;
include/start_slave.inc
include/sync_with_master_gtid.inc
SELECT @@GLOBAL.gtid_slave_pos;
@@GLOBAL.gtid_slave_pos
0-1-100
SELECT * FROM t1 ORDER BY a;
a b
0 0
1 1
SELECT * FROM t2 ORDER BY a;
a b
0 0
2 2
*** Clean up.
connection slave;
include/stop_slave.inc
SET DEBUG_SYNC= "RESET";
SET GLOBAL slave_parallel_threads= @old_parallel_threads;
SET GLOBAL slave_parallel_mode= @old_parallel_mode;
SET GLOBAL debug_dbug=@old_dbug;
include/start_slave.inc
connection master;
DROP TABLE t1, t2;
include/rpl_end.inc

View file

@ -0,0 +1,142 @@
include/master-slave.inc
[connection master]
connection master;
create table t1 (a int) engine=innodb;
create table t2 (a int) engine=innodb;
insert into t1 values (1);
include/save_master_gtid.inc
connection slave;
call mtr.add_suppression("Slave: Commit failed due to failure of an earlier commit on which this one depends");
include/sync_with_master_gtid.inc
include/stop_slave.inc
set @save.slave_parallel_threads= @@global.slave_parallel_threads;
set @save.slave_parallel_mode= @@global.slave_parallel_mode;
set @@global.slave_parallel_threads= 3;
set @@global.slave_parallel_mode= CONSERVATIVE;
connection slave1;
BEGIN;
update t1 set a=2 where a=1;
connection master;
SET @old_dbug= @@SESSION.debug_dbug;
SET @@SESSION.debug_dbug="+d,binlog_force_commit_id";
SET @commit_id= 10000;
update t1 set a=2 where a=1;
insert into t2 values (1);
SET @commit_id= 10001;
insert into t1 values (3);
connection slave;
include/start_slave.inc
kill $t3_tid;
connection slave1;
commit;
connection slave;
include/wait_for_slave_sql_to_stop.inc
update t1 set a=1 where a=2;
set @@global.slave_parallel_threads = @save.slave_parallel_threads;
set @@global.slave_parallel_mode = @save.slave_parallel_mode;
include/start_slave.inc
#
# Cleanup
connection master;
DROP TABLE t1, t2;
include/save_master_gtid.inc
connection slave;
include/sync_with_master_gtid.inc
# MDEV-31448 OOO finish event group by killed worker
# Initialize test data
connection master;
create table t1 (a int) engine=innodb;
create table t2 (a int) engine=innodb;
insert into t1 values (1);
include/save_master_gtid.inc
connection slave;
call mtr.add_suppression("Connection was killed");
call mtr.add_suppression("Can.t find record");
include/sync_with_master_gtid.inc
include/stop_slave.inc
set @save.slave_parallel_threads= @@global.slave_parallel_threads;
set @save.slave_parallel_mode= @@global.slave_parallel_mode;
set @@global.slave_parallel_threads= 3;
set @@global.slave_parallel_mode= OPTIMISTIC;
connection slave1;
begin;
update t1 set a=2 where a=1;
connection master;
set @old_dbug= @@session.debug_dbug;
set @@session.debug_dbug="+d,binlog_force_commit_id";
set @commit_id= 10000;
update t1 set a=2 where a=1;
insert into t2 values (1);
drop table t2;
connection slave;
include/start_slave.inc
# wait for T1
# wait for T2
# wait for T3
kill $t2_tid;
include/wait_for_slave_param.inc [Last_Errno]
connection slave1;
commit;
connection slave;
include/wait_for_slave_sql_to_stop.inc
update t1 set a=1 where a=2;
set @@global.slave_parallel_threads = @save.slave_parallel_threads;
set @@global.slave_parallel_mode = @save.slave_parallel_mode;
include/start_slave.inc
#
# Cleanup
connection master;
drop table t1;
include/save_master_gtid.inc
connection slave;
include/sync_with_master_gtid.inc
# MDEV-31448 OOO finish event group by killed worker
# Initialize test data
connection master;
create table t1 (a int) engine=innodb;
create table t2 (a int) engine=innodb;
insert into t1 values (1);
include/save_master_gtid.inc
connection slave;
call mtr.add_suppression("Connection was killed");
call mtr.add_suppression("Can.t find record");
include/sync_with_master_gtid.inc
include/stop_slave.inc
set @save.slave_parallel_threads= @@global.slave_parallel_threads;
set @save.slave_parallel_mode= @@global.slave_parallel_mode;
set @@global.slave_parallel_threads= 3;
set @@global.slave_parallel_mode= OPTIMISTIC;
connection slave1;
begin;
update t1 set a=2 where a=1;
connection master;
set @old_dbug= @@session.debug_dbug;
set @@session.debug_dbug="+d,binlog_force_commit_id";
set @commit_id= 10000;
update t1 set a=2 where a=1;
set @commit_id= 10001;
set statement skip_parallel_replication=1 for insert into t2 values (1);
drop table t2;
connection slave;
include/start_slave.inc
# wait for T1
# wait for T2
# wait for T3
kill $t2_tid;
include/wait_for_slave_param.inc [Last_Errno]
connection slave1;
commit;
connection slave;
include/wait_for_slave_sql_to_stop.inc
update t1 set a=1 where a=2;
set @@global.slave_parallel_threads = @save.slave_parallel_threads;
set @@global.slave_parallel_mode = @save.slave_parallel_mode;
include/start_slave.inc
#
# Cleanup
connection master;
drop table t1;
include/save_master_gtid.inc
connection slave;
include/sync_with_master_gtid.inc
include/rpl_end.inc

View file

@ -1,12 +1,17 @@
include/master-slave.inc
[connection master]
#
# MDEV-29639: Seconds_Behind_Master is incorrect for Delayed, Parallel Replicas
#
connection slave;
include/stop_slave.inc
set @@GLOBAL.debug_dbug= "d,negate_clock_diff_with_master";
set @@GLOBAL.slave_parallel_mode= CONSERVATIVE;
change master to master_delay=3, master_use_gtid=Slave_Pos;
set @@GLOBAL.slave_parallel_threads=2;
include/start_slave.inc
connection master;
create table t1 (a int);
create table t2 (a int);
include/sync_slave_sql_with_master.inc
#
# Pt 1) Ensure SBM is updated immediately upon arrival of the next event
@ -25,11 +30,10 @@ connection slave;
UNLOCK TABLES;
include/sync_with_master_gtid.inc
#
# Pt 2) If the SQL thread has not entered an idle state, ensure
# Pt 2) If the worker threads have not entered an idle state, ensure
# following events do not update SBM
# Stop slave IO thread so it receives both events together on restart
connection slave;
include/stop_slave_io.inc
LOCK TABLES t1 WRITE;
connection master;
# Sleep 2 to allow a buffer between events for SBM check
insert into t1 values (1);
@ -37,36 +41,49 @@ insert into t1 values (1);
insert into t1 values (2);
include/save_master_pos.inc
connection slave;
LOCK TABLES t1 WRITE;
SET @@global.debug_dbug="+d,pause_sql_thread_on_next_event";
START SLAVE IO_THREAD;
# Before we start processing the events, we ensure both transactions
# were written into the relay log. Otherwise, if the IO thread takes too
# long to queue the events, the sql thread can think it has caught up
# too quickly.
SET DEBUG_SYNC='now WAIT_FOR paused_on_event';
include/sync_io_with_master.inc
SET @@global.debug_dbug="-d,pause_sql_thread_on_next_event";
SET DEBUG_SYNC='now SIGNAL sql_thread_continue';
# Wait for first transaction to complete SQL delay and begin execution..
# Validate SBM calculation doesn't use the second transaction because SQL thread shouldn't have gone idle..
# Validate SBM calculation doesn't use the second transaction because worker threads shouldn't have gone idle..
# ..and that SBM wasn't calculated using prior committed transactions
# ..done
connection slave;
UNLOCK TABLES;
#
include/wait_for_slave_param.inc [Relay_Master_Log_File]
include/wait_for_slave_param.inc [Exec_Master_Log_Pos]
# Cleanup
# Reset master_delay
include/stop_slave.inc
CHANGE MASTER TO master_delay=0;
set @@GLOBAL.slave_parallel_threads=4;
SET @@global.debug_dbug="";
SET DEBUG_SYNC='RESET';
include/start_slave.inc
#
# MDEV-30619: Parallel Slave SQL Thread Can Update Seconds_Behind_Master with Active Workers
#
connection slave;
# Ensure the replica is fully idle before starting transactions
# Lock t1 on slave so the first received transaction does not complete/commit
LOCK TABLES t1 WRITE;
connection master;
DROP TABLE t1;
insert into t1 values (3);
include/save_master_gtid.inc
connection slave;
# Waiting for first transaction to begin..
connection master;
# Sleep 2 sec to create a gap between events
INSERT INTO t2 VALUES (1);
include/save_master_gtid.inc
connection slave;
# Waiting for second transaction to begin..
connection slave;
UNLOCK TABLES;
include/sync_with_master_gtid.inc
#
# Cleanup
connection master;
DROP TABLE t1, t2;
include/save_master_gtid.inc
connection slave;
include/sync_with_master_gtid.inc
include/stop_slave.inc
set @@GLOBAL.debug_dbug= "";
set @@GLOBAL.slave_parallel_mode= "$save_parallel_mode";
include/start_slave.inc
include/rpl_end.inc
# End of rpl_delayed_parallel_slave_sbm.test
# End of rpl_parallel_sbm.test

View file

@ -47,11 +47,21 @@ Warnings:
Note 1255 Slave already has been stopped
RESET MASTER;
SET @@global.gtid_slave_pos="";
SET @@global.gtid_strict_mode=1;
connection master;
RESET MASTER;
CREATE TABLE ti (a INT) ENGINE=innodb;
CREATE SEQUENCE s2 ENGINE=innodb;
include/save_master_gtid.inc
connection slave;
include/start_slave.inc
include/sync_with_master_gtid.inc
include/stop_slave.inc
include/rpl_restart_server.inc [server_number=2]
SET @@global.slave_parallel_threads=2;
SET @@global.slave_parallel_mode=optimistic;
SET @@global.debug_dbug="+d,hold_worker_on_schedule";
SET @@global.gtid_strict_mode=1;
connection master;
SET @@gtid_seq_no=100;
ALTER SEQUENCE s2 restart with 1;
INSERT INTO ti SET a=1;
@ -60,6 +70,7 @@ SELECT @@global.gtid_binlog_state "Master gtid state";
Master gtid state
0-1-101
connection slave;
SET STATEMENT sql_log_bin=0 FOR FLUSH TABLES;
include/start_slave.inc
SELECT @@global.gtid_binlog_state, @@global.gtid_slave_pos as "no 100,101 yet in both";
@@global.gtid_binlog_state no 100,101 yet in both

View file

@ -3,7 +3,7 @@ include/master-slave.inc
connection slave;
include/stop_slave.inc
SET @save_dbug= @@GLOBAL.debug_dbug;
SET @@global.debug_dbug="+d,pause_sql_thread_on_fde";
SET @@global.debug_dbug="+d,pause_sql_thread_on_fde,negate_clock_diff_with_master";
include/start_slave.inc
# Future events must be logged at least 2 seconds after
# the slave starts
@ -34,8 +34,31 @@ SET @@global.debug_dbug="-d,pause_sql_thread_on_fde";
SET DEBUG_SYNC='now SIGNAL sql_thread_continue';
# Wait for SQL thread to continue into normal execution
SET DEBUG_SYNC='RESET';
#
# MDEV-29639
# When receiving an event after the SQL Thread idles,
# Seconds_Behind_Master should not update before it updates
# last_master_timestamp
connection slave;
include/stop_slave.inc
set @@global.debug_dbug="+d,pause_sql_thread_on_next_event";
include/start_slave.inc
connection master;
insert into t1 values(2);
include/save_master_gtid.inc
connection slave;
set debug_sync='now wait_for paused_on_event';
connection master;
# Sleeping 1s to create a visible SBM gap between events
insert into t1 values(3);
include/save_master_gtid.inc
connection slave;
set debug_sync='now wait_for paused_on_event';
include/stop_slave.inc
set debug_sync='RESET';
SET @@global.debug_dbug=$save_dbug;
include/start_slave.inc
include/sync_with_master_gtid.inc
connection master;
DROP TABLE t1;
connection slave;
SET @@global.debug_dbug=$save_dbug;
include/rpl_end.inc

View file

@ -0,0 +1,93 @@
--source include/master-slave.inc
--source include/have_innodb.inc
--source include/have_debug.inc
--source include/have_binlog_format_row.inc
--echo # MDEV-31448 OOO finish event group by killed worker
# The test demonstrates how a killed worker access gco lists
# in finish_event_group() out-of-order to fire
# DBUG_ASSERT(!tmp_gco->next_gco || tmp_gco->last_sub_id > sub_id);
# in the buggy version.
--echo # Initialize test data
--connection master
call mtr.add_suppression("Slave: Connection was killed");
call mtr.add_suppression("Slave: Commit failed due to failure of an earlier commit on which this one depends");
create table t1 (a int) engine=innodb;
create table t2 (a int) engine=innodb;
insert into t1 values (1);
--source include/save_master_gtid.inc
--connection slave
--source include/sync_with_master_gtid.inc
--source include/stop_slave.inc
--let $save_slave_parallel_threads= `SELECT @@global.slave_parallel_threads`
--let $save_slave_parallel_mode= `SELECT @@global.slave_parallel_mode`
--let $save_innodb_lock_wait_timeout= `SELECT @@global.innodb_lock_wait_timeout`
--let $save_transaction_retries= `SELECT @@global.slave_transaction_retries`
set @@global.slave_parallel_threads= 4;
set @@global.slave_parallel_mode= OPTIMISTIC;
set @@global.innodb_lock_wait_timeout= 30;
set @@global.slave_transaction_retries= 0;
--connection slave1
BEGIN;
SELECT * FROM t1 WHERE a=1 FOR UPDATE;
--connection master
SET @old_dbug= @@SESSION.debug_dbug;
SET @@SESSION.debug_dbug="+d,binlog_force_commit_id";
# GCO 1
SET @commit_id= 10000;
# T1
update t1 set a=2 where a=1;
# T2
set statement skip_parallel_replication=1 for insert into t2 values (1);
# GCO 2
# T3
drop table t2;
--connection slave
--source include/start_slave.inc
--echo # wait for T1
--let $wait_condition= SELECT count(*)=1 FROM information_schema.processlist WHERE state LIKE 'Update_rows_log_event::find_row(-1)' and command LIKE 'Slave_worker';
--source include/wait_condition.inc
--echo # wait for T2
--let $wait_condition= SELECT count(*)=1 FROM information_schema.processlist WHERE state LIKE 'Waiting for prior transaction to commit%' and command LIKE 'Slave_worker';
--source include/wait_condition.inc
--let $t2_tid= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for prior transaction to commit%' and command LIKE 'Slave_worker'`
--echo # wait for T3
--let $wait_condition= SELECT count(*)=1 FROM information_schema.processlist WHERE state LIKE 'Waiting for prior transaction to start commit%' and command LIKE 'Slave_worker';
--source include/wait_condition.inc
--replace_result $t2_tid T2_TID
--eval kill $t2_tid
--sleep 1
--connection slave1
# Release the blocked T1
ROLLBACK;
--connection master
DROP TABLE t1;
--source include/save_master_gtid.inc
--connection slave
--echo #
--echo # Cleanup
--source include/stop_slave.inc
eval set @@global.slave_parallel_threads= $save_slave_parallel_threads;
eval set @@global.slave_parallel_mode= $save_slave_parallel_mode;
eval set @@global.innodb_lock_wait_timeout= $save_innodb_lock_wait_timeout;
eval set @@global.slave_transaction_retries= $save_transaction_retries;
--source include/start_slave.inc
--source include/sync_with_master_gtid.inc
--source include/rpl_end.inc

View file

@ -1 +0,0 @@
--slave-parallel-threads=4

View file

@ -0,0 +1,143 @@
--source include/have_innodb.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
--source include/have_binlog_format_mixed.inc
--source include/master-slave.inc
--connection slave
--source include/stop_slave.inc
SET @old_parallel_threads= @@GLOBAL.slave_parallel_threads;
SET GLOBAL slave_parallel_threads=3;
SET @old_parallel_mode= @@GLOBAL.slave_parallel_mode;
SET GLOBAL slave_parallel_mode=aggressive;
SET @old_dbug= @@GLOBAL.debug_dbug;
CHANGE MASTER TO master_use_gtid=slave_pos;
--source include/start_slave.inc
--echo *** MDEV-31509: Lost data with FTWRL and STOP SLAVE
# The bug was as follows:
# 1. Event groups T1 and T2 are queued but not started yet.
# 2. FLUSH TABLE WITH READ LOCKS starts, sets rpl_parallel_entry::pause_sub_id
# 3. T2 Sees pause_sub_id, goes to wait for the pause to complete.
# 4. FTWRL completes, UNLOCK TABLES is run.
# 5. STOP SLAVE is run, sets rpl_parallel_entry::stop_sub_id.
# 6. T2 wakes up after FTWRL pause, only now sets
# rpl_parallel_entry::largest_started_sub_id. This is the bug,
# largest_started_sub_id is set too late here.
# 7. T1 starts, it sees stop_sub_id<T1, so T1 is skipped due to STOP SLAVE.
# 8. T2 continues, its check for stop_sub_id was before STOP SLAVE. So T2 is
# wrongly applied, silently losing transaction T1.
--connection master
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=MyISAM;
INSERT INTO t1 VALUES (0,0);
INSERT INTO t2 VALUES (0,0);
--source include/save_master_gtid.inc
--connection slave
--source include/sync_with_master_gtid.inc
--connection slave
--echo *** Arrange for T1 to delay before entering GCO wait.
SET GLOBAL debug_dbug="+d,gco_wait_delay_gtid_0_x_99";
--echo *** Arrange for T2 to wait for FTWRL to start.
SET GLOBAL debug_dbug="+d,hold_worker_on_schedule";
--echo *** Arrange for T2 to delay wakeup from FTWRL pause.
SET GLOBAL debug_dbug="+d,delay_ftwrl_wait_gtid_0_x_100";
--connection master
--echo *** Event group T1
SET SESSION gtid_seq_no=99;
INSERT INTO t1 VALUES (1,1);
--connection slave
--echo *** 1a. Wait for T1 to be queued.
SET debug_sync="now WAIT_FOR gco_wait_paused";
--connection master
--echo *** Event group T2
SET SESSION gtid_seq_no=100;
INSERT INTO t2 VALUES (2,2);
--connection slave
--echo *** 1b. Wait for T2 to be queued.
SET debug_sync= "now WAIT_FOR reached_pause";
--connection slave1
--echo *** 2. Run FTWRL
SET GLOBAL debug_dbug= "+d,pause_for_ftwrl_wait";
send FLUSH TABLES WITH READ LOCK;
--connection slave
SET debug_sync= "now WAIT_FOR pause_ftwrl_waiting";
--echo *** 3. Wait for T2 to be waiting for FTWRL pause
SET debug_sync= "now SIGNAL continue_worker";
--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state LIKE "%Waiting due to global read lock%" and command="Slave_worker";
--source include/wait_condition.inc
--echo *** 4. FTWRL completes, UNLOCK TABLES.
SET debug_sync="now SIGNAL pause_ftwrl_cont";
--connection slave1
reap;
UNLOCK TABLES;
--connection slave
--echo *** T2 is now ready to proceed after FTWRL pause, but did not wake up yet.
SET debug_sync="now WAIT_FOR pause_wait_started";
--echo *** 5. STOP SLAVE is run.
--connection slave1
SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger";
send STOP SLAVE;
--connection slave
SET debug_sync="now WAIT_FOR wait_for_done_waiting";
--echo *** 5. T2 wakes up after FTWRL pause, reaches wait_for_prior_commit().
SET debug_sync="now SIGNAL pause_wait_continue";
--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state LIKE "%Waiting for prior transaction to commit%" and command="Slave_worker";
--source include/wait_condition.inc
--echo *** 6. T1 starts.
SET debug_sync="now SIGNAL gco_wait_cont";
--connection slave1
reap;
--connection slave
--source include/wait_for_slave_to_stop.inc
--connection master
SELECT * FROM t1 ORDER BY a;
SELECT * FROM t2 ORDER BY a;
--source include/save_master_gtid.inc
--connection slave
--source include/start_slave.inc
--source include/sync_with_master_gtid.inc
# The bug here was that T2 was errorneously replicated while T1 was
# being skipped due to STOP SLAVE. So the @@gtid_slave_pos was at T2,
# but we were missing the data from T1.
SELECT @@GLOBAL.gtid_slave_pos;
SELECT * FROM t1 ORDER BY a;
SELECT * FROM t2 ORDER BY a;
--echo *** Clean up.
--connection slave
--source include/stop_slave.inc
SET DEBUG_SYNC= "RESET";
SET GLOBAL slave_parallel_threads= @old_parallel_threads;
SET GLOBAL slave_parallel_mode= @old_parallel_mode;
SET GLOBAL debug_dbug=@old_dbug;
--source include/start_slave.inc
--connection master
DROP TABLE t1, t2;
--source include/rpl_end.inc

View file

@ -0,0 +1,15 @@
--source include/master-slave.inc
--source include/have_innodb.inc
--source include/have_debug.inc
--source include/have_binlog_format_row.inc
--source include/mdev-31448_conservative.inc
--let $killed_trx_commits=1
--source include/mdev-31448_optimistic.inc
--let $killed_trx_commits=0
--source include/mdev-31448_optimistic.inc
--source include/rpl_end.inc

View file

@ -0,0 +1 @@
--slave-parallel-threads=2

View file

@ -1,4 +1,14 @@
#
# Ensure that Seconds_Behind_Master works correctly on the parallel replica.
#
--source include/master-slave.inc
--source include/have_log_bin.inc
--source include/have_debug.inc
--echo #
--echo # MDEV-29639: Seconds_Behind_Master is incorrect for Delayed, Parallel Replicas
--echo #
# This test ensures that after a delayed parallel slave has idled, i.e.
# executed everything in its relay log, the next event group that the SQL
# thread reads from the relay log will immediately be used in the
@ -6,26 +16,21 @@
# Seconds_Behind_Master is based on the timestamp of the new transaction,
# rather than the last committed transaction.
#
# References:
# MDEV-29639: Seconds_Behind_Master is incorrect for Delayed, Parallel
# Replicas
#
--source include/master-slave.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
--connection slave
--source include/stop_slave.inc
--let $old_debug_dbug= `SELECT @@global.debug_dbug`
--let $save_dbug= `SELECT @@GLOBAL.debug_dbug`
--let $save_parallel_mode= `SELECT @@GLOBAL.slave_parallel_mode`
set @@GLOBAL.debug_dbug= "d,negate_clock_diff_with_master";
set @@GLOBAL.slave_parallel_mode= CONSERVATIVE;
--let $master_delay= 3
--eval change master to master_delay=$master_delay, master_use_gtid=Slave_Pos
--let $old_slave_threads= `SELECT @@GLOBAL.slave_parallel_threads`
set @@GLOBAL.slave_parallel_threads=2;
--source include/start_slave.inc
--connection master
--let insert_ctr= 0
create table t1 (a int);
create table t2 (a int);
--source include/sync_slave_sql_with_master.inc
--echo #
@ -40,7 +45,6 @@ LOCK TABLES t1 WRITE;
sleep 2;
--let $ts_trx_before_ins= `SELECT UNIX_TIMESTAMP()`
--let insert_ctr= 0
--eval insert into t1 values ($insert_ctr)
--inc $insert_ctr
--source include/save_master_gtid.inc
@ -66,15 +70,13 @@ UNLOCK TABLES;
--source include/sync_with_master_gtid.inc
--echo #
--echo # Pt 2) If the SQL thread has not entered an idle state, ensure
--echo # Pt 2) If the worker threads have not entered an idle state, ensure
--echo # following events do not update SBM
--echo # Stop slave IO thread so it receives both events together on restart
--connection slave
--source include/stop_slave_io.inc
LOCK TABLES t1 WRITE;
--connection master
--echo # Sleep 2 to allow a buffer between events for SBM check
sleep 2;
--let $ts_trxpt2_before_ins= `SELECT UNIX_TIMESTAMP()`
@ -88,29 +90,14 @@ sleep 3;
--source include/save_master_pos.inc
--connection slave
LOCK TABLES t1 WRITE;
SET @@global.debug_dbug="+d,pause_sql_thread_on_next_event";
START SLAVE IO_THREAD;
--echo # Before we start processing the events, we ensure both transactions
--echo # were written into the relay log. Otherwise, if the IO thread takes too
--echo # long to queue the events, the sql thread can think it has caught up
--echo # too quickly.
SET DEBUG_SYNC='now WAIT_FOR paused_on_event';
--source include/sync_io_with_master.inc
SET @@global.debug_dbug="-d,pause_sql_thread_on_next_event";
SET DEBUG_SYNC='now SIGNAL sql_thread_continue';
--echo # Wait for first transaction to complete SQL delay and begin execution..
--let $wait_condition= SELECT count(*) FROM information_schema.processlist WHERE state LIKE 'Waiting for table metadata lock%' AND command LIKE 'Slave_Worker';
--source include/wait_condition.inc
--echo # Validate SBM calculation doesn't use the second transaction because SQL thread shouldn't have gone idle..
--echo # Validate SBM calculation doesn't use the second transaction because worker threads shouldn't have gone idle..
--let $sbm_after_trx_no_idle= query_get_value(SHOW SLAVE STATUS, Seconds_Behind_Master, 1)
--let $timestamp_trxpt2_arrive= `SELECT UNIX_TIMESTAMP()`
if (`SELECT $sbm_after_trx_no_idle < $timestamp_trxpt2_arrive - $ts_trx_after_ins`)
if (`SELECT $sbm_after_trx_no_idle < $timestamp_trxpt2_arrive - $ts_trx_after_ins - 1`)
{
--let $cmpv= `SELECT $timestamp_trxpt2_arrive - $ts_trx_after_ins`
--echo # SBM $sbm_after_trx_no_idle was more recent than time since last transaction ($cmpv seconds)
@ -127,24 +114,86 @@ if (`SELECT $sbm_after_trx_no_idle > ($seconds_since_idling + 1)`)
--connection slave
UNLOCK TABLES;
--source include/sync_with_master.inc
--echo # Cleanup
--source include/stop_slave.inc
--eval CHANGE MASTER TO master_delay=0
--source include/start_slave.inc
--echo #
--echo # MDEV-30619: Parallel Slave SQL Thread Can Update Seconds_Behind_Master with Active Workers
--echo #
# This test ensures that a parallel slave will not update
# Seconds_Behind_Master after the SQL Thread has idled if the worker threads
# are still executing events. To test this, two events are executed on the
# primary with $sleep seconds in-between them. Once the second event begins
# execution on the replica, Seconds_Behind_Master is queried to ensure it
# reflects the value of the first transaction, rather than the second.
--connection slave
--echo # Ensure the replica is fully idle before starting transactions
--let $wait_condition= SELECT count(*) FROM information_schema.processlist WHERE state LIKE 'Slave has read all relay log%';
--source include/wait_condition.inc
--let $wait_condition= SELECT count(*)=2 FROM information_schema.processlist WHERE state LIKE 'Waiting for work from SQL thread';
--source include/wait_condition.inc
--echo # Lock t1 on slave so the first received transaction does not complete/commit
LOCK TABLES t1 WRITE;
--connection master
--let $ts_t1_before_master_ins= `SELECT UNIX_TIMESTAMP()`
--eval insert into t1 values ($insert_ctr)
--inc $insert_ctr
--source include/save_master_gtid.inc
--connection slave
--echo # Waiting for first transaction to begin..
--let $wait_condition= SELECT count(*) FROM information_schema.processlist WHERE state LIKE 'Waiting for table metadata lock';
--source include/wait_condition.inc
--let $sbm_1= query_get_value(SHOW SLAVE STATUS, Seconds_Behind_Master, 1)
--connection master
--let $sleep = 2
--echo # Sleep $sleep sec to create a gap between events
sleep $sleep;
INSERT INTO t2 VALUES (1);
--source include/save_master_gtid.inc
--connection slave
--echo # Waiting for second transaction to begin..
--let $wait_condition= SELECT count(*) FROM information_schema.processlist WHERE state LIKE 'Waiting for prior transaction to start commit%';
--source include/wait_condition.inc
--let $sbm_2= query_get_value(SHOW SLAVE STATUS, Seconds_Behind_Master, 1)
if (`SELECT $sbm_1 + $sleep > $sbm_2`)
{
--echo # Seconds_Behind_Masters: $sbm_1 $sbm_2_0
--die Two successive Seconds_Behind_Master timestamps must be separated by the sleep parameter value or greater
}
--connection slave
UNLOCK TABLES;
--source include/sync_with_master_gtid.inc
--echo #
--echo # Cleanup
--echo # Reset master_delay
--source include/stop_slave.inc
--eval CHANGE MASTER TO master_delay=0
--eval set @@GLOBAL.slave_parallel_threads=$old_slave_threads
--eval SET @@global.debug_dbug="$old_debug_dbug"
SET DEBUG_SYNC='RESET';
--source include/start_slave.inc
--connection master
DROP TABLE t1;
DROP TABLE t1, t2;
--source include/save_master_gtid.inc
--connection slave
--source include/sync_with_master_gtid.inc
--source include/stop_slave.inc
--eval set @@GLOBAL.debug_dbug= "$save_dbug"
--evalp set @@GLOBAL.slave_parallel_mode= "$save_parallel_mode"
--source include/start_slave.inc
--source include/rpl_end.inc
--echo # End of rpl_delayed_parallel_slave_sbm.test
--echo # End of rpl_parallel_sbm.test

View file

@ -77,15 +77,28 @@ SET DEBUG_SYNC = 'now SIGNAL continue_worker';
--source include/stop_slave.inc
RESET MASTER;
SET @@global.gtid_slave_pos="";
--let $slave_gtid_strict_mode=`select @@global.gtid_strict_mode`
SET @@global.gtid_strict_mode=1;
--connection master
RESET MASTER;
# Load from master
CREATE TABLE ti (a INT) ENGINE=innodb;
CREATE SEQUENCE s2 ENGINE=innodb;
--source include/save_master_gtid.inc
--connection slave
--source include/start_slave.inc
--source include/sync_with_master_gtid.inc
--source include/stop_slave.inc
--let $rpl_server_number= 2
--source include/rpl_restart_server.inc
# upon restart
SET @@global.slave_parallel_threads=2;
SET @@global.slave_parallel_mode=optimistic;
SET @@global.debug_dbug="+d,hold_worker_on_schedule";
--let $slave_gtid_strict_mode=`select @@global.gtid_strict_mode`
SET @@global.gtid_strict_mode=1;
--connection master
SET @@gtid_seq_no=100;
ALTER SEQUENCE s2 restart with 1;
INSERT INTO ti SET a=1;
@ -93,6 +106,10 @@ INSERT INTO ti SET a=1;
SELECT @@global.gtid_binlog_state "Master gtid state";
--connection slave
# The following FT complicates the opening table time with committing
# an internal transaction. The rest of the test also proves
# MDEV-31503 "branch" of the OOO error is fixed.
SET STATEMENT sql_log_bin=0 FOR FLUSH TABLES;
--source include/start_slave.inc
--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to commit"

View file

@ -27,7 +27,7 @@
--connection slave
--source include/stop_slave.inc
SET @save_dbug= @@GLOBAL.debug_dbug;
SET @@global.debug_dbug="+d,pause_sql_thread_on_fde";
SET @@global.debug_dbug="+d,pause_sql_thread_on_fde,negate_clock_diff_with_master";
--source include/start_slave.inc
--let $sleep_time=2
@ -93,11 +93,93 @@ SET DEBUG_SYNC='now SIGNAL sql_thread_continue';
# Reset last sql_thread_continue signal
SET DEBUG_SYNC='RESET';
--echo #
--echo # MDEV-29639
--echo # When receiving an event after the SQL Thread idles,
--echo # Seconds_Behind_Master should not update before it updates
--echo # last_master_timestamp
--connection slave
--source include/stop_slave.inc
set @@global.debug_dbug="+d,pause_sql_thread_on_next_event";
--source include/start_slave.inc
--connection master
insert into t1 values(2);
--source include/save_master_gtid.inc
# Each event after starting will trigger a pause, so continually send signal
# sql_thread_continue until caught up
--connection slave
--let $caught_up=0
--let $tries= 0
set debug_sync='now wait_for paused_on_event';
--disable_query_log
while (!$caught_up)
{
set debug_sync='now signal sql_thread_continue';
--let $slave_gtid= `SELECT @@global.gtid_slave_pos`
if (`SELECT strcmp("$master_pos","$slave_gtid") = 0`)
{
--inc $caught_up
}
--inc $tries
# Wait 30s
if (`SELECT $tries > 300`)
{
--die Replica failed to sync with primary
}
sleep 0.1;
}
--enable_query_log
--connection master
--echo # Sleeping 1s to create a visible SBM gap between events
sleep 1;
insert into t1 values(3);
--source include/save_master_gtid.inc
--connection slave
set debug_sync='now wait_for paused_on_event';
--let $sbm= query_get_value(SHOW SLAVE STATUS, Seconds_Behind_Master, 1)
if ($sbm)
{
--echo # Expected Seconds_Behind_Master to be 0 but was $sbm
--die Seconds_Behind_Master should not show updates before last_master_timestamp is updated
}
# Continually send signal sql_thread_continue until caught up
--let $caught_up=0
--let $tries= 0
--disable_query_log
while (!$caught_up)
{
set debug_sync='now signal sql_thread_continue';
--let $slave_gtid= `SELECT @@global.gtid_slave_pos`
if (`SELECT strcmp("$master_pos","$slave_gtid") = 0`)
{
--inc $caught_up
}
--inc $tries
# Wait 30s
if (`SELECT $tries > 300`)
{
--die Replica failed to sync with primary
}
sleep 0.1;
}
--enable_query_log
# Cleanup
--source include/stop_slave.inc
set debug_sync='RESET';
SET @@global.debug_dbug=$save_dbug;
--source include/start_slave.inc
--source include/sync_with_master_gtid.inc
--connection master
DROP TABLE t1;
--connection slave
SET @@global.debug_dbug=$save_dbug;
--source include/rpl_end.inc

View file

@ -197,3 +197,40 @@ Warnings:
Warning 1292 Truncated incorrect DECIMAL value: 'x'
Warning 1292 Truncated incorrect DECIMAL value: 'test'
drop table t1;
#
# MDEV-31319 Assertion const_item_cache == true failed in Item_func::fix_fields
#
create table t (f1 int, f2 int, fv int generated always as (case user() when 'foo' or 'bar' then f1 else f2 end) virtual);
Warnings:
Warning 1292 Truncated incorrect DOUBLE value: 'foo'
Warning 1292 Truncated incorrect DOUBLE value: 'bar'
select * from t;
f1 f2 fv
Warnings:
Warning 1292 Truncated incorrect DOUBLE value: 'foo'
Warning 1292 Truncated incorrect DOUBLE value: 'bar'
create table tmp as select * from information_schema.tables where table_name = 't';
select * from t;
f1 f2 fv
Warnings:
Warning 1292 Truncated incorrect DOUBLE value: 'foo'
Warning 1292 Truncated incorrect DOUBLE value: 'bar'
drop table t, tmp;
#
# MDEV-29357 Assertion (fixed) in Item_func_dayname on INSERT
#
set sql_mode='';
create table t (c1 blob ,c2 int,c3 char(10) as (dayname (c2)));
create trigger tr before insert on t for each row set new.c2=0;
insert into t values (0, 0, 0);
Warnings:
Warning 1906 The value specified for generated column 'c3' in table 't' has been ignored
Warning 1292 Incorrect datetime value: '0' for column `test`.`t`.`c2` at row 1
Warning 1292 Incorrect datetime value: '0' for column `test`.`t`.`c2` at row 1
insert into t values (1, 1, 1);
Warnings:
Warning 1906 The value specified for generated column 'c3' in table 't' has been ignored
Warning 1292 Incorrect datetime value: '1' for column `test`.`t`.`c2` at row 1
Warning 1292 Incorrect datetime value: '0' for column `test`.`t`.`c2` at row 1
drop trigger tr;
drop table t;

View file

@ -162,3 +162,26 @@ create table t1 (a int , b date as (1 in ('x' ,(database ()) ))) ;
select b from t1;
select a from t1 order by 'x' = b;
drop table t1;
--echo #
--echo # MDEV-31319 Assertion const_item_cache == true failed in Item_func::fix_fields
--echo #
create table t (f1 int, f2 int, fv int generated always as (case user() when 'foo' or 'bar' then f1 else f2 end) virtual);
select * from t;
create table tmp as select * from information_schema.tables where table_name = 't';
select * from t;
# cleanup
drop table t, tmp;
--echo #
--echo # MDEV-29357 Assertion (fixed) in Item_func_dayname on INSERT
--echo #
set sql_mode='';
create table t (c1 blob ,c2 int,c3 char(10) as (dayname (c2)));
create trigger tr before insert on t for each row set new.c2=0;
insert into t values (0, 0, 0);
insert into t values (1, 1, 1);
drop trigger tr;
drop table t;

View file

@ -91,6 +91,18 @@ begin
end~~
delimiter ;~~
delimiter ~~;
eval create or replace function check_row_slave(row_start $sys_datatype_expl, row_end $sys_datatype_expl)
returns varchar(255)
deterministic
begin
if current_row(row_end) then
return "CURRENT ROW";
end if;
return "HISTORICAL ROW";
end~~
delimiter ;~~
delimiter ~~;
eval create or replace function check_row_ts(row_start timestamp(6), row_end timestamp(6))
returns varchar(255)

View file

@ -4,6 +4,7 @@ drop procedure if exists verify_trt;
drop procedure if exists verify_trt_dummy;
drop function if exists current_row;
drop function if exists check_row;
drop function if exists check_row_slave;
drop function if exists current_row_ts;
drop function if exists check_row_ts;
--enable_warnings

View file

@ -133,18 +133,38 @@ drop table t1;
#
# MDEV-21138 Assertion `col->ord_part' or `f.col->ord_part' failed in row_build_index_entry_low
#
# Check DELETE and multi-DELETE with foreign key
create table t1 (
f1 int, f2 text, f3 int, fulltext (f2), key(f1), key(f3),
foreign key r (f3) references t1 (f1) on delete set null)
foreign key r (f3) references t1 (f1) on delete set null,
row_start SYS_TYPE as row start invisible,
row_end SYS_TYPE as row end invisible,
period for system_time (row_start, row_end))
with system versioning engine innodb;
insert into t1 values (1, repeat('a', 8193), 1), (1, repeat('b', 8193), 1);
select f1, f3, check_row_ts(row_start, row_end) from t1;
f1 f3 check_row_ts(row_start, row_end)
insert into t1 select 2, f2, 2 from t1;
select f1, f3, check_row(row_start, row_end) from t1;
f1 f3 check_row(row_start, row_end)
1 1 CURRENT ROW
1 1 CURRENT ROW
delete from t1;
select f1, f3, check_row_ts(row_start, row_end) from t1 for system_time all;
f1 f3 check_row_ts(row_start, row_end)
2 2 CURRENT ROW
2 2 CURRENT ROW
delete from t1 where f1 = 1;
select f1, f3, check_row(row_start, row_end) from t1 for system_time all order by f1, row_end;
f1 f3 check_row(row_start, row_end)
1 1 HISTORICAL ROW
1 1 HISTORICAL ROW
drop table t1;
2 2 CURRENT ROW
2 2 CURRENT ROW
create table t2 (f1 int);
insert into t2 values (2);
# Multi-delelte
delete t1, t2 from t1 join t2 where t1.f1 = t2.f1;
select f1, f3, check_row(row_start, row_end) from t1 for system_time all order by f1, row_end;
f1 f3 check_row(row_start, row_end)
1 1 HISTORICAL ROW
1 1 HISTORICAL ROW
2 2 HISTORICAL ROW
2 2 HISTORICAL ROW
# Cleanup
drop tables t1, t2;

View file

@ -188,4 +188,53 @@ connection slave;
include/diff_tables.inc [master:test.t1,slave:test.t1]
connection master;
drop table t1;
#
# MDEV-31313 SYSTEM VERSIONING and FOREIGN KEY CASCADE create orphan rows on replica
#
create table parent (
id int(11) not null auto_increment,
processdate datetime default null,
primary key (id)
) engine=innodb with system versioning;
set timestamp= unix_timestamp('2000-01-01 00:00:00');
insert into parent values (1, now());
create table child (
id int(11) not null auto_increment,
ch_name varchar(30),
andreid int(11) default null,
primary key (id),
key andreid (andreid),
constraint fk_andreid foreign key (andreid) references parent (id) on delete cascade
) engine=innodb with system versioning;
set timestamp= unix_timestamp('2000-01-01 00:00:01');
insert into child values (null, 'vimtomar', 1);
set timestamp= unix_timestamp('2000-01-01 00:00:02');
delete from parent where id = 1;
select check_row(row_start, row_end) from parent for system_time all;
check_row(row_start, row_end)
HISTORICAL ROW
select check_row(row_start, row_end) from child for system_time all;
check_row(row_start, row_end)
HISTORICAL ROW
select * from child;
id ch_name andreid
select * from parent;
id processdate
connection slave;
select check_row_slave(row_start, row_end) from parent for system_time all;
check_row_slave(row_start, row_end)
HISTORICAL ROW
select check_row_slave(row_start, row_end) from child for system_time all;
check_row_slave(row_start, row_end)
HISTORICAL ROW
select * from child;
id ch_name andreid
select * from parent;
id processdate
connection master;
set timestamp= default;
drop table child;
drop table parent;
connection slave;
connection master;
include/rpl_end.inc

View file

@ -1,6 +1,6 @@
--- update.result 2018-12-19 13:55:35.873917389 +0300
+++ update,trx_id.reject 2018-12-19 13:55:35.533917399 +0300
@@ -81,12 +81,10 @@
--- update.result
+++ update.reject
@@ -84,12 +84,10 @@
commit;
select x, y, sys_trx_end = MAXVAL as current from t1 for system_time all order by sys_trx_end, x, y;
x y current
@ -14,3 +14,11 @@
1 1 1
2 2 1
3 3 1
@@ -464,7 +462,6 @@
select nid, nstate, check_row(row_start, row_end) from t1 for system_time all order by row_start, row_end;
nid nstate check_row(row_start, row_end)
1 1 HISTORICAL ROW
-1 1 HISTORICAL ROW
1 3 CURRENT ROW
commit;
drop tables t1;

View file

@ -51,19 +51,22 @@ sys_trx_start SYS_DATATYPE as row start invisible,
sys_trx_end SYS_DATATYPE as row end invisible,
period for system_time (sys_trx_start, sys_trx_end))
with system versioning;
set timestamp= unix_timestamp('2000-01-01 00:00:00');
insert into t1 values(1, 1, 1);
set @ins_t= now(6);
select sys_trx_start into @tmp1 from t1;
set timestamp= unix_timestamp('2000-01-01 01:00:00');
update t1 set x= 11, y= 11 where id = 1;
select @tmp1 < sys_trx_start as A1, x, y from t1;
A1 x y
1 11 11
select sys_trx_start into @tmp1 from t1;
set timestamp= unix_timestamp('2000-01-01 02:00:00');
update t1 set y= 1 where id = 1;
select @tmp1 = sys_trx_start as A2, x from t1;
A2 x
1 11
drop table t1;
set timestamp= default;
create table t1 (
x int,
y int,
@ -437,4 +440,46 @@ update t1 set a = 3 where b <= 9;
update t2 set a = 3 where b <= 9;
update t1, t2 set t1.a = 3, t2.a = 3 where t1.b <= 10 and t2.b <= 10 and t1.b = t2.b;
drop tables t1, t2;
#
# MDEV-23100 ODKU of non-versioning column inserts history row
#
create table t1 (
x int unique,
y int without system versioning
) with system versioning;
insert into t1 (x, y) values ('1', '1');
insert into t1 (x, y) values ('1', '2')
on duplicate key update y = 3;
select x, y, check_row_ts(row_start, row_end) from t1 for system_time all order by row_end;
x y check_row_ts(row_start, row_end)
1 3 CURRENT ROW
drop table t1;
#
# MDEV-25644 UPDATE not working properly on transaction precise system versioned table
#
create or replace table t1 (nid int primary key, nstate int, ntype int) engine innodb;
alter table t1 add
row_start SYS_DATATYPE generated always as row start invisible,
add row_end SYS_DATATYPE generated always as row end invisible,
add period for system_time(row_start, row_end),
add system versioning;
insert into t1 values (1, 1, 1);
select nid, nstate, check_row(row_start, row_end) from t1 for system_time all order by row_start, row_end;
nid nstate check_row(row_start, row_end)
1 1 CURRENT ROW
start transaction;
update t1 set nstate= nstate where nid = 1;
select nid, nstate, check_row(row_start, row_end) from t1 for system_time all order by row_start, row_end;
nid nstate check_row(row_start, row_end)
1 1 HISTORICAL ROW
1 1 CURRENT ROW
# Bug: ERROR 1761 (23000): Foreign key constraint for table 'xxx', record '1-18446744073709551615' would lead to a duplicate entry in table 'xxx', key 'PRIMARY'
update t1 set nstate= 3 where nid= 1;
select nid, nstate, check_row(row_start, row_end) from t1 for system_time all order by row_start, row_end;
nid nstate check_row(row_start, row_end)
1 1 HISTORICAL ROW
1 1 HISTORICAL ROW
1 3 CURRENT ROW
commit;
drop tables t1;
# End of 10.4 tests

View file

@ -97,16 +97,26 @@ drop table t1;
--echo #
--echo # MDEV-21138 Assertion `col->ord_part' or `f.col->ord_part' failed in row_build_index_entry_low
--echo #
create table t1 (
--echo # Check DELETE and multi-DELETE with foreign key
replace_result $sys_datatype_expl SYS_TYPE;
eval create table t1 (
f1 int, f2 text, f3 int, fulltext (f2), key(f1), key(f3),
foreign key r (f3) references t1 (f1) on delete set null)
foreign key r (f3) references t1 (f1) on delete set null,
row_start $sys_datatype_expl as row start invisible,
row_end $sys_datatype_expl as row end invisible,
period for system_time (row_start, row_end))
with system versioning engine innodb;
insert into t1 values (1, repeat('a', 8193), 1), (1, repeat('b', 8193), 1);
select f1, f3, check_row_ts(row_start, row_end) from t1;
delete from t1;
select f1, f3, check_row_ts(row_start, row_end) from t1 for system_time all;
# cleanup
drop table t1;
insert into t1 select 2, f2, 2 from t1;
select f1, f3, check_row(row_start, row_end) from t1;
delete from t1 where f1 = 1;
select f1, f3, check_row(row_start, row_end) from t1 for system_time all order by f1, row_end;
create table t2 (f1 int);
insert into t2 values (2);
--echo # Multi-delelte
delete t1, t2 from t1 join t2 where t1.f1 = t2.f1;
select f1, f3, check_row(row_start, row_end) from t1 for system_time all order by f1, row_end;
--echo # Cleanup
drop tables t1, t2;
--source suite/versioning/common_finish.inc

View file

@ -1,4 +1,5 @@
--source suite/versioning/engines.inc
--source suite/versioning/common.inc
--source include/have_partition.inc
--source include/master-slave.inc
@ -6,6 +7,7 @@
#Testing command counters -BEFORE.
#Storing the before counts of Slave
connection slave;
--source suite/versioning/common.inc
let $slave_com_commit_before= query_get_value(SHOW GLOBAL STATUS LIKE 'com_commit', Value, 1);
let $slave_com_insert_before= query_get_value(SHOW GLOBAL STATUS LIKE 'com_insert', Value, 1);
let $slave_com_delete_before= query_get_value(SHOW GLOBAL STATUS LIKE 'com_delete', Value, 1);
@ -167,4 +169,55 @@ sync_slave_with_master;
connection master;
drop table t1;
--echo #
--echo # MDEV-31313 SYSTEM VERSIONING and FOREIGN KEY CASCADE create orphan rows on replica
--echo #
create table parent (
id int(11) not null auto_increment,
processdate datetime default null,
primary key (id)
) engine=innodb with system versioning;
set timestamp= unix_timestamp('2000-01-01 00:00:00');
insert into parent values (1, now());
create table child (
id int(11) not null auto_increment,
ch_name varchar(30),
andreid int(11) default null,
primary key (id),
key andreid (andreid),
constraint fk_andreid foreign key (andreid) references parent (id) on delete cascade
) engine=innodb with system versioning;
set timestamp= unix_timestamp('2000-01-01 00:00:01');
insert into child values (null, 'vimtomar', 1);
set timestamp= unix_timestamp('2000-01-01 00:00:02');
delete from parent where id = 1;
select check_row(row_start, row_end) from parent for system_time all;
select check_row(row_start, row_end) from child for system_time all;
select * from child;
select * from parent;
sync_slave_with_master;
# Annoying tweaking of microseconds in slave row_end, so row_end can be <= row_start
select check_row_slave(row_start, row_end) from parent for system_time all;
select check_row_slave(row_start, row_end) from child for system_time all;
select * from child;
select * from parent;
# Cleanup
--source suite/versioning/common_finish.inc
--connection master
set timestamp= default;
drop table child;
drop table parent;
sync_slave_with_master;
connection master;
--source suite/versioning/common_finish.inc
--source include/rpl_end.inc

View file

@ -26,15 +26,18 @@ eval create table t1 (
sys_trx_end $sys_datatype_expl as row end invisible,
period for system_time (sys_trx_start, sys_trx_end))
with system versioning;
set timestamp= unix_timestamp('2000-01-01 00:00:00');
insert into t1 values(1, 1, 1);
set @ins_t= now(6);
select sys_trx_start into @tmp1 from t1;
set timestamp= unix_timestamp('2000-01-01 01:00:00');
update t1 set x= 11, y= 11 where id = 1;
select @tmp1 < sys_trx_start as A1, x, y from t1;
select sys_trx_start into @tmp1 from t1;
set timestamp= unix_timestamp('2000-01-01 02:00:00');
update t1 set y= 1 where id = 1;
select @tmp1 = sys_trx_start as A2, x from t1;
drop table t1;
set timestamp= default;
replace_result $sys_datatype_expl SYS_DATATYPE;
eval create table t1 (
@ -373,6 +376,44 @@ update t1, t2 set t1.a = 3, t2.a = 3 where t1.b <= 10 and t2.b <= 10 and t1.b =
# cleanup
drop tables t1, t2;
--echo #
--echo # MDEV-23100 ODKU of non-versioning column inserts history row
--echo #
create table t1 (
x int unique,
y int without system versioning
) with system versioning;
insert into t1 (x, y) values ('1', '1');
insert into t1 (x, y) values ('1', '2')
on duplicate key update y = 3;
select x, y, check_row_ts(row_start, row_end) from t1 for system_time all order by row_end;
drop table t1;
--echo #
--echo # MDEV-25644 UPDATE not working properly on transaction precise system versioned table
--echo #
create or replace table t1 (nid int primary key, nstate int, ntype int) engine innodb;
--replace_result $sys_datatype_expl SYS_DATATYPE
eval alter table t1 add
row_start $sys_datatype_expl generated always as row start invisible,
add row_end $sys_datatype_expl generated always as row end invisible,
add period for system_time(row_start, row_end),
add system versioning;
insert into t1 values (1, 1, 1);
select nid, nstate, check_row(row_start, row_end) from t1 for system_time all order by row_start, row_end;
start transaction;
update t1 set nstate= nstate where nid = 1;
select nid, nstate, check_row(row_start, row_end) from t1 for system_time all order by row_start, row_end;
--echo # Bug: ERROR 1761 (23000): Foreign key constraint for table 'xxx', record '1-18446744073709551615' would lead to a duplicate entry in table 'xxx', key 'PRIMARY'
update t1 set nstate= 3 where nid= 1;
# Under one transaction trx_id generates only one history row, that differs from timestamp
select nid, nstate, check_row(row_start, row_end) from t1 for system_time all order by row_start, row_end;
commit;
drop tables t1;
--echo # End of 10.4 tests
source suite/versioning/common_finish.inc;

View file

@ -626,12 +626,27 @@ static int search_default_file_with_ext(struct handle_option_ctx *ctx,
if (!my_stat(name,&stat_info,MYF(0)))
return 1;
/*
Ignore world-writable regular files.
This is mainly done to protect us to not read a file created by
the mysqld server, but the check is still valid in most context.
Ignore world-writable regular files (exceptions apply).
This is mainly done to protect us to not read a file that may be
modified by anyone.
Also check access so that read only mounted (EROFS)
or immutable files (EPERM) that are suitable protections.
The main case we are allowing is a container readonly volume mount
from a filesystem that doesn't have unix permissions. This will
have a 0777 permission and access will set errno = EROFS.
Note if a ROFS has a file with permissions 04n6, access sets errno
EACCESS, rather the ROFS, so in this case we'll error, even though
the ROFS is protecting the file.
An ideal, race free, implementation would do fstat / fstatvfs / ioctl
for permission, read only filesystem, and immutability resprectively.
*/
if ((stat_info.st_mode & S_IWOTH) &&
(stat_info.st_mode & S_IFMT) == S_IFREG)
(stat_info.st_mode & S_IFMT) == S_IFREG &&
(access(name, W_OK) == 0 || (errno != EROFS && errno != EPERM)))
{
fprintf(stderr, "Warning: World-writable config file '%s' is ignored\n",
name);

View file

@ -216,7 +216,7 @@ int pam_sm_authenticate(pam_handle_t *pamh, int flags,
}
from= s;
skip(isalnum(*s) || (*s == '_') || (*s == '.') || (*s == '-') ||
(*s == '$') || (*s == '\\') || (*s == '/'));
(*s == '$') || (*s == '\\') || (*s == '/') || (*s == '@'));
end_from= s;
skip(isspace(*s));
if (end_from == from || *s++ != ':') goto syntax_error;

View file

@ -2137,7 +2137,7 @@ static int send_client_reply_packet(MCPVIO_EXT *mpvio,
If the server does not support ssl, we abort the connection.
*/
if (mysql->options.use_ssl &&
(mysql->client_flag & CLIENT_SSL_VERIFY_SERVER_CERT) &&
(mysql->options.extension && mysql->options.extension->tls_verify_server_cert) &&
!(mysql->server_capabilities & CLIENT_SSL))
{
set_mysql_extended_error(mysql, CR_SSL_CONNECTION_ERROR, unknown_sqlstate,
@ -2207,7 +2207,7 @@ static int send_client_reply_packet(MCPVIO_EXT *mpvio,
DBUG_PRINT("info", ("IO layer change done!"));
/* Verify server cert */
if ((mysql->client_flag & CLIENT_SSL_VERIFY_SERVER_CERT) &&
if ((mysql->options.extension && mysql->options.extension->tls_verify_server_cert) &&
ssl_verify_server_cert(net->vio, mysql->host, &cert_error))
{
set_mysql_extended_error(mysql, CR_SSL_CONNECTION_ERROR, unknown_sqlstate,
@ -3894,10 +3894,13 @@ mysql_options(MYSQL *mysql,enum mysql_option option, const void *arg)
mysql->options.use_thread_specific_memory= *(my_bool *) arg;
break;
case MYSQL_OPT_SSL_VERIFY_SERVER_CERT:
if (*(my_bool*) arg)
mysql->options.client_flag|= CLIENT_SSL_VERIFY_SERVER_CERT;
else
mysql->options.client_flag&= ~CLIENT_SSL_VERIFY_SERVER_CERT;
if (!mysql->options.extension)
mysql->options.extension= (struct st_mysql_options_extention *)
my_malloc(PSI_INSTRUMENT_ME,
sizeof(struct st_mysql_options_extention),
MYF(MY_WME | MY_ZEROFILL));
if (mysql->options.extension)
mysql->options.extension->tls_verify_server_cert= *(my_bool*) arg;
break;
case MYSQL_PLUGIN_DIR:
EXTENSION_SET_STRING(&mysql->options, plugin_dir, arg);

View file

@ -40,7 +40,6 @@
Pushdown_derived::Pushdown_derived(TABLE_LIST *tbl, derived_handler *h)
: derived(tbl), handler(h)
{
is_analyze= handler->thd->lex->analyze_stmt;
}
@ -57,12 +56,6 @@ int Pushdown_derived::execute()
if ((err= handler->init_scan()))
goto error;
if (is_analyze)
{
handler->end_scan();
DBUG_RETURN(0);
}
while (!(err= handler->next_row()))
{
if (unlikely(thd->check_killed()))

View file

@ -6395,7 +6395,7 @@ int ha_partition::multi_range_key_create_key(RANGE_SEQ_IF *seq,
m_mrr_range_current->ptr= m_mrr_range_current->key_multi_range.ptr;
m_mrr_range_current->key_multi_range.ptr= m_mrr_range_current;
if (start_key->key && (start_key->flag & HA_READ_KEY_EXACT))
if (start_key->key && (start_key->flag == HA_READ_KEY_EXACT))
get_partition_set(table, table->record[0], active_index,
start_key, &m_part_spec);
else

View file

@ -620,6 +620,7 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
{
handlerton *hton;
static const char *no_exts[]= { 0 };
int ret= 0;
DBUG_ENTER("ha_initialize_handlerton");
DBUG_PRINT("plugin", ("initialize plugin: '%s'", plugin->name.str));
@ -629,6 +630,7 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
{
sql_print_error("Unable to allocate memory for plugin '%s' handlerton.",
plugin->name.str);
ret= 1;
goto err_no_hton_memory;
}
@ -639,12 +641,15 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
hton->slot= HA_SLOT_UNDEF;
/* Historical Requirement */
plugin->data= hton; // shortcut for the future
if (plugin->plugin->init && plugin->plugin->init(hton))
{
sql_print_error("Plugin '%s' init function returned error.",
plugin->name.str);
/* [remove after merge] notes on merge conflict (MDEV-31400):
10.6-10.11: 13ba00ff4933cfc1712676f323587504e453d1b5
11.0-11.2: 42f8be10f18163c4025710cf6a212e82bddb2f62
The 10.11->11.0 conflict is trivial, but the reference commit also
contains different non-conflict changes needs to be applied to 11.0
(and beyond).
*/
if (plugin->plugin->init && (ret= plugin->plugin->init(hton)))
goto err;
}
// hton_ext_based_table_discovery() works only when discovery
// is supported and the engine if file-based.
@ -682,6 +687,7 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
if (idx == (int) DB_TYPE_DEFAULT)
{
sql_print_warning("Too many storage engines!");
ret= 1;
goto err_deinit;
}
if (hton->db_type != DB_TYPE_UNKNOWN)
@ -709,6 +715,7 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
{
sql_print_error("Too many plugins loaded. Limit is %lu. "
"Failed on '%s'", (ulong) MAX_HA, plugin->name.str);
ret= 1;
goto err_deinit;
}
hton->slot= total_ha++;
@ -758,7 +765,7 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
resolve_sysvar_table_options(hton);
update_discovery_counters(hton, 1);
DBUG_RETURN(0);
DBUG_RETURN(ret);
err_deinit:
/*
@ -776,7 +783,7 @@ err:
my_free(hton);
err_no_hton_memory:
plugin->data= NULL;
DBUG_RETURN(1);
DBUG_RETURN(ret);
}
int ha_init()
@ -2046,17 +2053,26 @@ int ha_rollback_trans(THD *thd, bool all)
attempt. Otherwise those following transactions can run too early, and
possibly cause replication to fail. See comments in retry_event_group().
(This concerns rollbacks due to temporary errors where the transaction
will be retried afterwards. For non-recoverable errors, following
transactions will not start but just be skipped as the worker threads
perform the error stop).
There were several bugs with this in the past that were very hard to
track down (MDEV-7458, MDEV-8302). So we add here an assertion for
rollback without signalling following transactions. And in release
builds, we explicitly do the signalling before rolling back.
*/
DBUG_ASSERT(
!(thd->rgi_slave && thd->rgi_slave->did_mark_start_commit) ||
!(thd->rgi_slave &&
!thd->rgi_slave->worker_error &&
thd->rgi_slave->did_mark_start_commit) ||
(thd->transaction->xid_state.is_explicit_XA() ||
(thd->rgi_slave->gtid_ev_flags2 & Gtid_log_event::FL_PREPARED_XA)));
if (thd->rgi_slave && thd->rgi_slave->did_mark_start_commit)
if (thd->rgi_slave &&
!thd->rgi_slave->worker_error &&
thd->rgi_slave->did_mark_start_commit)
thd->rgi_slave->unmark_start_commit();
}
#endif

View file

@ -6159,7 +6159,9 @@ void Regexp_processor_pcre::fix_owner(Item_func *owner,
Item *subject_arg,
Item *pattern_arg)
{
if (!is_compiled() && pattern_arg->const_item())
if (!is_compiled() &&
pattern_arg->const_item() &&
!pattern_arg->is_expensive())
{
if (compile(pattern_arg, true))
{

View file

@ -353,7 +353,10 @@ Item_func::fix_fields(THD *thd, Item **ref)
We shouldn't call fix_fields() twice, so check 'fixed' field first
*/
if ((*arg)->fix_fields_if_needed(thd, arg))
{
cleanup();
return TRUE; /* purecov: inspected */
}
item= *arg;
if (item->maybe_null)
@ -369,9 +372,15 @@ Item_func::fix_fields(THD *thd, Item **ref)
}
}
if (check_arguments())
{
cleanup();
return true;
}
if (fix_length_and_dec())
{
cleanup();
return TRUE;
}
fixed= 1;
return FALSE;
}
@ -2295,6 +2304,16 @@ bool Item_func_int_val::fix_length_and_dec()
}
bool Item_func_int_val::native_op(THD *thd, Native *to)
{
// TODO: turn Item_func_int_val into Item_handled_func eventually.
if (type_handler()->mysql_timestamp_type() == MYSQL_TIMESTAMP_TIME)
return Time(thd, this).to_native(to, decimals);
DBUG_ASSERT(0);
return true;
}
longlong Item_func_ceiling::int_op()
{
switch (args[0]->result_type()) {
@ -2723,6 +2742,16 @@ bool Item_func_round::date_op(THD *thd, MYSQL_TIME *to, date_mode_t fuzzydate)
}
bool Item_func_round::native_op(THD *thd, Native *to)
{
// TODO: turn Item_func_round into Item_handled_func eventually.
if (type_handler()->mysql_timestamp_type() == MYSQL_TIMESTAMP_TIME)
return Time(thd, this).to_native(to, decimals);
DBUG_ASSERT(0);
return true;
}
void Item_func_rand::seed_random(Item *arg)
{
/*

View file

@ -1891,11 +1891,7 @@ public:
}
bool fix_length_and_dec();
String *str_op(String *str) { DBUG_ASSERT(0); return 0; }
bool native_op(THD *thd, Native *to)
{
DBUG_ASSERT(0);
return true;
}
bool native_op(THD *thd, Native *to);
};
@ -1947,11 +1943,7 @@ public:
my_decimal *decimal_op(my_decimal *);
bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
bool time_op(THD *thd, MYSQL_TIME *ltime);
bool native_op(THD *thd, Native *to)
{
DBUG_ASSERT(0);
return true;
}
bool native_op(THD *thd, Native *to);
String *str_op(String *str)
{
DBUG_ASSERT(0);

View file

@ -3514,8 +3514,12 @@ String *Item_func_conv::val_str(String *str)
from_base, &endptr, &err);
}
uint dummy_errors;
if (!(ptr= longlong2str(dec, ans, to_base)) ||
str->copy(ans, (uint32) (ptr - ans), default_charset()))
(collation.collation->state & MY_CS_NONASCII) ?
str->copy(ans, (uint32) (ptr - ans), &my_charset_latin1,
collation.collation, &dummy_errors) :
str->copy(ans, (uint32) (ptr - ans), collation.collation))
{
null_value= 1;
return NULL;

View file

@ -7843,7 +7843,7 @@ MYSQL_BIN_LOG::queue_for_group_commit(group_commit_entry *orig_entry)
Setting this flag may or may not be seen by the other thread, but we
are safe in any case: The other thread will set queued_by_other under
its LOCK_wait_commit, and we will not check queued_by_other only after
its LOCK_wait_commit, and we will not check queued_by_other until after
we have been woken up.
*/
wfc->opaque_pointer= orig_entry;
@ -7940,7 +7940,7 @@ MYSQL_BIN_LOG::queue_for_group_commit(group_commit_entry *orig_entry)
is pointed to by `last` (we do not use NULL to terminate the list).
As we process an entry, any waiters for that entry are added at the end of
the list, to be processed in subsequent iterations. The the entry is added
the list, to be processed in subsequent iterations. Then the entry is added
to the group_commit_queue. This continues until the list is exhausted,
with all entries ever added eventually processed.

View file

@ -3287,7 +3287,10 @@ Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg,
thd_arg->transaction->all.has_created_dropped_temp_table() ||
thd_arg->transaction->all.trans_executed_admin_cmd())
flags2|= FL_DDL;
else if (is_transactional && !is_tmp_table)
else if (is_transactional && !is_tmp_table &&
!(thd_arg->transaction->all.modified_non_trans_table &&
thd->variables.binlog_direct_non_trans_update == 0 &&
!thd->is_current_stmt_binlog_format_row()))
flags2|= FL_TRANSACTIONAL;
if (!(thd_arg->variables.option_bits & OPTION_RPL_SKIP_PARALLEL))
flags2|= FL_ALLOW_PARALLEL;
@ -8357,6 +8360,11 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi)
return error;
}
const bool history_change= m_table->versioned() ?
!m_table->vers_end_field()->is_max() : false;
TABLE_LIST *tl= m_table->pos_in_table_list;
uint8 trg_event_map_save= tl->trg_event_map;
/*
This is the situation after locating BI:
@ -8414,9 +8422,17 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi)
goto err;
}
if (m_table->versioned())
{
if (m_vers_from_plain && m_table->versioned(VERS_TIMESTAMP))
m_table->vers_update_fields();
if (!history_change && !m_table->vers_end_field()->is_max())
{
tl->trg_event_map|= trg2bit(TRG_EVENT_DELETE);
}
}
error= m_table->file->ha_update_row(m_table->record[1], m_table->record[0]);
tl->trg_event_map= trg_event_map_save;
if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
error= 0;
if (m_vers_from_plain && m_table->versioned(VERS_TIMESTAMP))

View file

@ -27,6 +27,9 @@ struct rpl_parallel_thread_pool global_rpl_thread_pool;
static void signal_error_to_sql_driver_thread(THD *thd, rpl_group_info *rgi,
int err);
static void
register_wait_for_prior_event_group_commit(rpl_group_info *rgi,
rpl_parallel_entry *entry);
static int
rpt_handle_event(rpl_parallel_thread::queued_event *qev,
@ -151,15 +154,35 @@ finish_event_group(rpl_parallel_thread *rpt, uint64 sub_id,
int err;
thd->get_stmt_da()->set_overwrite_status(true);
if (unlikely(rgi->worker_error))
{
/*
In case a previous wait was killed, we need to re-register to be able to
repeat the wait.
And before doing that, we un-register any previous registration (in case
we got an error earlier and skipped waiting).
*/
thd->wait_for_commit_ptr->unregister_wait_for_prior_commit();
mysql_mutex_lock(&entry->LOCK_parallel_entry);
register_wait_for_prior_event_group_commit(rgi, entry);
mysql_mutex_unlock(&entry->LOCK_parallel_entry);
}
/*
Remove any left-over registration to wait for a prior commit to
complete. Normally, such wait would already have been removed at
this point by wait_for_prior_commit() called from within COMMIT
processing. However, in case of MyISAM and no binlog, we might not
have any commit processing, and so we need to do the wait here,
before waking up any subsequent commits, to preserve correct
order of event execution. Also, in the error case we might have
skipped waiting and thus need to remove it explicitly.
processing.
However, in case of MyISAM and no binlog, we might not have any commit
processing, and so we need to do the wait here, before waking up any
subsequent commits, to preserve correct order of event execution.
Also, in the error case we might have skipped waiting and thus need to
remove it explicitly. Or the wait might have been killed and we need to
repeat the registration and the wait.
It is important in the non-error case to do a wait, not just an
unregister. Because we might be last in a group-commit that is
@ -172,8 +195,18 @@ finish_event_group(rpl_parallel_thread *rpt, uint64 sub_id,
all earlier event groups have also committed; this way no more
mark_start_commit() calls can be made and it is safe to de-allocate
the GCO.
Thus this final wait is done with kill ignored during the wait. This is
fine, at this point there is no active query or transaction to abort, and
the thread will continue as soon as earlier event groups complete.
Note though, that in the non-error case there is no guarantee that
finish_event_group() will be run in-order. For example, a successful
binlog group commit will wakeup all participating event groups
simultaneously so only thread scheduling will decide the order in which
finish_event_group() calls acquire LOCK_parallel_entry.
*/
err= wfc->wait_for_prior_commit(thd);
err= wfc->wait_for_prior_commit(thd, false);
if (unlikely(err) && !rgi->worker_error)
signal_error_to_sql_driver_thread(thd, rgi, err);
thd->wait_for_commit_ptr= NULL;
@ -242,8 +275,7 @@ finish_event_group(rpl_parallel_thread *rpt, uint64 sub_id,
not yet started should just skip their group, preparing for stop of the
SQL driver thread.
*/
if (unlikely(rgi->worker_error) &&
entry->stop_on_error_sub_id == (uint64)ULONGLONG_MAX)
if (unlikely(rgi->worker_error) && entry->stop_on_error_sub_id > sub_id)
entry->stop_on_error_sub_id= sub_id;
mysql_mutex_unlock(&entry->LOCK_parallel_entry);
#ifdef ENABLED_DEBUG_SYNC
@ -286,16 +318,11 @@ static void
signal_error_to_sql_driver_thread(THD *thd, rpl_group_info *rgi, int err)
{
rgi->worker_error= err;
/*
In case we get an error during commit, inform following transactions that
we aborted our commit.
*/
DBUG_EXECUTE_IF("hold_worker2_favor_worker3", {
if (rgi->current_gtid.seq_no == 2002) {
debug_sync_set_action(thd, STRING_WITH_LEN("now WAIT_FOR cont_worker2"));
}});
rgi->unmark_start_commit();
rgi->cleanup_context(thd, true);
rgi->rli->abort_slave= true;
rgi->rli->stop_for_until= false;
@ -342,7 +369,7 @@ register_wait_for_prior_event_group_commit(rpl_group_info *rgi,
Do not start parallel execution of this event group until all prior groups
have reached the commit phase that are not safe to run in parallel with.
*/
static bool
static void
do_gco_wait(rpl_group_info *rgi, group_commit_orderer *gco,
bool *did_enter_cond, PSI_stage_info *old_stage)
{
@ -394,18 +421,45 @@ do_gco_wait(rpl_group_info *rgi, group_commit_orderer *gco,
&entry->LOCK_parallel_entry);
} while (wait_count > entry->count_committing_event_groups);
}
}
if (entry->force_abort && wait_count > entry->stop_count)
static bool
do_stop_handling(rpl_group_info *rgi)
{
bool should_stop= false;
rpl_parallel_entry *entry= rgi->parallel_entry;
mysql_mutex_assert_owner(&entry->LOCK_parallel_entry);
if (unlikely(entry->force_abort) && rgi->gtid_sub_id > entry->stop_sub_id)
{
/*
We are stopping (STOP SLAVE), and this event group is beyond the point
where we can safely stop. So return a flag that will cause us to skip,
rather than execute, the following events.
We are stopping (STOP SLAVE), and this event group need not be applied
before we can safely stop. So return a flag that will cause us to skip,
rather than execute, the following events. Once all queued events have
been skipped, the STOP SLAVE is complete (for this thread).
*/
return true;
should_stop= true;
}
else
return false;
if (unlikely(entry->stop_on_error_sub_id <= rgi->wait_commit_sub_id))
{
rgi->worker_error= 1;
should_stop= true;
}
if (likely(!should_stop))
{
/*
Since we did not decide to stop, bump the largest_started_sub_id while
still holding LOCK_parallel_entry.
*/
if (rgi->gtid_sub_id > entry->largest_started_sub_id)
entry->largest_started_sub_id= rgi->gtid_sub_id;
}
return should_stop;
}
@ -452,15 +506,25 @@ do_ftwrl_wait(rpl_group_info *rgi,
mysql_cond_wait(&entry->COND_parallel_entry, &entry->LOCK_parallel_entry);
} while (sub_id > entry->pause_sub_id);
DBUG_EXECUTE_IF("delay_ftwrl_wait_gtid_0_x_100", {
if (rgi->current_gtid.domain_id == 0 &&
rgi->current_gtid.seq_no == 100) {
/*
Simulate delayed wakeup from the mysql_cond_wait(). To do this, we
need to have the LOCK_parallel_entry mutex released during the wait.
*/
mysql_mutex_unlock(&entry->LOCK_parallel_entry);
debug_sync_set_action(thd,
STRING_WITH_LEN("now SIGNAL pause_wait_started WAIT_FOR pause_wait_continue"));
mysql_mutex_lock(&entry->LOCK_parallel_entry);
}
});
/*
We do not call EXIT_COND() here, as this will be done later by our
caller (since we set *did_enter_cond to true).
*/
}
if (sub_id > entry->largest_started_sub_id)
entry->largest_started_sub_id= sub_id;
DBUG_RETURN(aborted);
}
@ -618,7 +682,17 @@ rpl_pause_for_ftwrl(THD *thd)
mysql_mutex_unlock(&rpt->LOCK_rpl_thread);
++e->need_sub_id_signal;
if (e->pause_sub_id == (uint64)ULONGLONG_MAX)
{
e->pause_sub_id= e->largest_started_sub_id;
DBUG_EXECUTE_IF("pause_for_ftwrl_wait", {
mysql_mutex_unlock(&e->LOCK_parallel_entry);
debug_sync_set_action(thd,
STRING_WITH_LEN("now "
"SIGNAL pause_ftwrl_waiting "
"WAIT_FOR pause_ftwrl_cont"));
mysql_mutex_lock(&e->LOCK_parallel_entry);
});
}
thd->ENTER_COND(&e->COND_parallel_entry, &e->LOCK_parallel_entry,
&stage_waiting_for_ftwrl_threads_to_pause, &old_stage);
thd->set_time_for_next_stage();
@ -826,12 +900,15 @@ do_retry:
for (;;)
{
mysql_mutex_lock(&entry->LOCK_parallel_entry);
register_wait_for_prior_event_group_commit(rgi, entry);
if (!(entry->stop_on_error_sub_id == (uint64) ULONGLONG_MAX ||
if (rgi->gtid_sub_id < entry->stop_on_error_sub_id
#ifndef DBUG_OFF
(DBUG_EVALUATE_IF("simulate_mdev_12746", 1, 0)) ||
|| DBUG_EVALUATE_IF("simulate_mdev_12746", 1, 0)
#endif
rgi->gtid_sub_id < entry->stop_on_error_sub_id))
)
{
register_wait_for_prior_event_group_commit(rgi, entry);
}
else
{
/*
A failure of a preceding "parent" transaction may not be
@ -1255,14 +1332,15 @@ handle_rpl_parallel_thread(void *arg)
event_gtid_sub_id= rgi->gtid_sub_id;
rgi->thd= thd;
mysql_mutex_lock(&entry->LOCK_parallel_entry);
skip_event_group= do_gco_wait(rgi, gco, &did_enter_cond, &old_stage);
DBUG_EXECUTE_IF("gco_wait_delay_gtid_0_x_99", {
if (rgi->current_gtid.domain_id == 0 && rgi->current_gtid.seq_no == 99) {
debug_sync_set_action(thd,
STRING_WITH_LEN("now SIGNAL gco_wait_paused WAIT_FOR gco_wait_cont"));
} });
if (unlikely(entry->stop_on_error_sub_id <= rgi->wait_commit_sub_id))
{
skip_event_group= true;
rgi->worker_error= 1;
}
mysql_mutex_lock(&entry->LOCK_parallel_entry);
do_gco_wait(rgi, gco, &did_enter_cond, &old_stage);
skip_event_group= do_stop_handling(rgi);
if (likely(!skip_event_group))
skip_event_group= do_ftwrl_wait(rgi, &did_enter_cond, &old_stage);
@ -2370,20 +2448,18 @@ rpl_parallel::wait_for_done(THD *thd, Relay_log_info *rli)
are also executed, so that we stop at a consistent point in the binlog
stream (per replication domain).
All event groups wait for e->count_committing_event_groups to reach
the value of group_commit_orderer::wait_count before starting to
execute. Thus, at this point we know that any event group with a
strictly larger wait_count are safe to skip, none of them can have
started executing yet. So we set e->stop_count here and use it to
decide in the worker threads whether to continue executing an event
group or whether to skip it, when force_abort is set.
At this point, we are holding LOCK_parallel_entry, and we know that no
event group after e->largest_started_sub_id has started running yet. We
record this value in e->stop_sub_id, and then each event group can check
their own sub_id against it. If their sub_id is strictly larger, then
that event group will be skipped.
If we stop due to reaching the START SLAVE UNTIL condition, then we
need to continue executing any queued events up to that point.
*/
e->force_abort= true;
e->stop_count= rli->stop_for_until ?
e->count_queued_event_groups : e->count_committing_event_groups;
e->stop_sub_id= rli->stop_for_until ?
e->current_sub_id : e->largest_started_sub_id;
mysql_mutex_unlock(&e->LOCK_parallel_entry);
for (j= 0; j < e->rpl_thread_max; ++j)
{
@ -2439,7 +2515,7 @@ rpl_parallel::stop_during_until()
e= (struct rpl_parallel_entry *)my_hash_element(&domain_hash, i);
mysql_mutex_lock(&e->LOCK_parallel_entry);
if (e->force_abort)
e->stop_count= e->count_committing_event_groups;
e->stop_sub_id= e->largest_started_sub_id;
mysql_mutex_unlock(&e->LOCK_parallel_entry);
}
}

View file

@ -91,6 +91,10 @@ struct group_commit_orderer {
};
uint8 flags;
#ifndef DBUG_OFF
/*
Flag set when the GCO has been freed and entered the free list, to catch
(in debug) errors in the complex lifetime of this object.
*/
bool gc_done;
#endif
};
@ -276,13 +280,13 @@ struct rpl_parallel_entry {
/*
At STOP SLAVE (force_abort=true), we do not want to process all events in
the queue (which could unnecessarily delay stop, if a lot of events happen
to be queued). The stop_count provides a safe point at which to stop, so
to be queued). The stop_sub_id provides a safe point at which to stop, so
that everything before becomes committed and nothing after does. The value
corresponds to group_commit_orderer::wait_count; if wait_count is less than
or equal to stop_count, we execute the associated event group, else we
skip it (and all following) and stop.
corresponds to rpl_group_info::gtid_sub_id; if that is less than or equal
to stop_sub_id, we execute the associated event group, else we skip it (and
all following) and stop.
*/
uint64 stop_count;
uint64 stop_sub_id;
/*
Cyclic array recording the last rpl_thread_max worker threads that we

View file

@ -167,7 +167,7 @@ bool Session_sysvars_tracker::vars_list::parse_var_list(THD *thd,
{
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WRONG_VALUE_FOR_VAR,
"%.*s is not a valid system variable and will"
"%.*s is not a valid system variable and will "
"be ignored.", (int)var.length, token);
}
else
@ -221,7 +221,7 @@ bool sysvartrack_validate_value(THD *thd, const char *str, size_t len)
/* Remove leading/trailing whitespace. */
trim_whitespace(system_charset_info, &var);
if (!strcmp(var.str, "*") && !find_sys_var(thd, var.str, var.length))
if (strcmp(var.str, "*") && !find_sys_var(thd, var.str, var.length))
return true;
if (lasts)
@ -331,10 +331,9 @@ void Session_sysvars_tracker::init(THD *thd)
mysql_mutex_assert_owner(&LOCK_global_system_variables);
DBUG_ASSERT(thd->variables.session_track_system_variables ==
global_system_variables.session_track_system_variables);
DBUG_ASSERT(global_system_variables.session_track_system_variables);
thd->variables.session_track_system_variables=
my_strdup(PSI_INSTRUMENT_ME,
global_system_variables.session_track_system_variables,
safe_str(global_system_variables.session_track_system_variables),
MYF(MY_WME | MY_THREAD_SPECIFIC));
}
@ -576,6 +575,12 @@ bool sysvartrack_global_update(THD *thd, char *str, size_t len)
{
LEX_STRING tmp= { str, len };
Session_sysvars_tracker::vars_list dummy;
DBUG_EXECUTE_IF("dbug_session_tracker_parse_error",
{
my_error(ER_OUTOFMEMORY, MYF(0), 1);
return true;
});
if (!dummy.parse_var_list(thd, tmp, false, system_charset_info))
{
dummy.construct_var_list(str, len + 1);

View file

@ -1895,8 +1895,10 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
(master_row= mysql_fetch_row(master_res)))
{
mysql_mutex_lock(&mi->data_lock);
mi->clock_diff_with_master=
(long) (time((time_t*) 0) - strtoul(master_row[0], 0, 10));
mi->clock_diff_with_master= DBUG_EVALUATE_IF(
"negate_clock_diff_with_master", 0,
(long) (time((time_t *) 0) - strtoul(master_row[0], 0, 10)));
mysql_mutex_unlock(&mi->data_lock);
}
else if (check_io_slave_killed(mi, NULL))
@ -3225,6 +3227,14 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full,
else
{
idle= mi->rli.sql_thread_caught_up;
/*
The idleness of the SQL thread is needed for the parallel slave
because events can be ignored before distribution to a worker thread.
That is, Seconds_Behind_Master should still be calculated and visible
while the slave is processing ignored events, such as those skipped
due to slave_skip_counter.
*/
if (mi->using_parallel() && idle && !mi->rli.parallel.workers_idle())
idle= false;
}
@ -4190,7 +4200,6 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
thd,
STRING_WITH_LEN(
"now SIGNAL paused_on_event WAIT_FOR sql_thread_continue")));
DBUG_SET("-d,pause_sql_thread_on_next_event");
mysql_mutex_lock(&rli->data_lock);
});
@ -4207,7 +4216,8 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
the user might be surprised to see a claim that the slave is up to date
long before those queued events are actually executed.
*/
if ((!rli->mi->using_parallel()) && event_can_update_last_master_timestamp(ev))
if ((!rli->mi->using_parallel()) &&
event_can_update_last_master_timestamp(ev))
{
rli->last_master_timestamp= ev->when + (time_t) ev->exec_time;
rli->sql_thread_caught_up= false;
@ -4262,9 +4272,22 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
if (rli->mi->using_parallel())
{
if (unlikely((rli->last_master_timestamp == 0 ||
rli->sql_thread_caught_up) &&
event_can_update_last_master_timestamp(ev)))
/*
rli->sql_thread_caught_up is checked and negated here to ensure that
the value of Seconds_Behind_Master in SHOW SLAVE STATUS is consistent
with the update of last_master_timestamp. It was previously unset
immediately after reading an event from the relay log; however, for the
duration between that unset and the time that LMT would be updated
could lead to spikes in SBM.
The check for queued_count == dequeued_count ensures the worker threads
are all idle (i.e. all events have been executed).
*/
if ((unlikely(rli->last_master_timestamp == 0) ||
(rli->sql_thread_caught_up &&
(rli->last_inuse_relaylog->queued_count ==
rli->last_inuse_relaylog->dequeued_count))) &&
event_can_update_last_master_timestamp(ev))
{
if (rli->last_master_timestamp < ev->when)
{

View file

@ -3056,7 +3056,9 @@ Sp_handler::sp_load_for_information_schema(THD *thd, TABLE *proc_table,
sp_cache **spc= get_cache(thd);
sp_name sp_name_obj(&db, &name, true); // This can change "name"
*free_sp_head= 0;
if ((sp= sp_cache_lookup(spc, &sp_name_obj)))
sp= sp_cache_lookup(spc, &sp_name_obj);
if (sp && !(sp->sp_cache_version() < sp_cache_version()))
{
return sp;
}

View file

@ -13135,7 +13135,6 @@ static bool send_server_handshake_packet(MPVIO_EXT *mpvio,
if (ssl_acceptor_fd)
{
thd->client_capabilities |= CLIENT_SSL;
thd->client_capabilities |= CLIENT_SSL_VERIFY_SERVER_CERT;
}
if (data_len)

View file

@ -812,6 +812,11 @@ int close_thread_tables(THD *thd)
!thd->stmt_arena->is_stmt_prepare())
table->part_info->vers_check_limit(thd);
#endif
/*
For simple locking we cleanup it here because we don't close thread
tables. For prelocking we close it when we do close thread tables.
*/
if (thd->locked_tables_mode != LTM_PRELOCKED)
table->vcol_cleanup_expr(thd);
}

View file

@ -7972,15 +7972,22 @@ wait_for_commit::register_wait_for_prior_commit(wait_for_commit *waitee)
with register_wait_for_prior_commit(). If the commit already completed,
returns immediately.
If ALLOW_KILL is set to true (the default), the wait can be aborted by a
kill. In case of kill, the wait registration is still removed, so another
call of unregister_wait_for_prior_commit() is needed to later retry the
wait. If ALLOW_KILL is set to false, then kill will be ignored and this
function will not return until the prior commit (if any) has called
wakeup_subsequent_commits().
If thd->backup_commit_lock is set, release it while waiting for other threads
*/
int
wait_for_commit::wait_for_prior_commit2(THD *thd)
wait_for_commit::wait_for_prior_commit2(THD *thd, bool allow_kill)
{
PSI_stage_info old_stage;
wait_for_commit *loc_waitee;
bool backup_lock_released= 0;
bool backup_lock_released= false;
/*
Release MDL_BACKUP_COMMIT LOCK while waiting for other threads to commit
@ -7990,7 +7997,7 @@ wait_for_commit::wait_for_prior_commit2(THD *thd)
*/
if (thd->backup_commit_lock && thd->backup_commit_lock->ticket)
{
backup_lock_released= 1;
backup_lock_released= true;
thd->mdl_context.release_lock(thd->backup_commit_lock->ticket);
thd->backup_commit_lock->ticket= 0;
}
@ -8001,7 +8008,7 @@ wait_for_commit::wait_for_prior_commit2(THD *thd)
&stage_waiting_for_prior_transaction_to_commit,
&old_stage);
while ((loc_waitee= this->waitee.load(std::memory_order_relaxed)) &&
likely(!thd->check_killed(1)))
(!allow_kill || likely(!thd->check_killed(1))))
mysql_cond_wait(&COND_wait_commit, &LOCK_wait_commit);
if (!loc_waitee)
{
@ -8043,14 +8050,14 @@ wait_for_commit::wait_for_prior_commit2(THD *thd)
use within enter_cond/exit_cond.
*/
DEBUG_SYNC(thd, "wait_for_prior_commit_killed");
if (backup_lock_released)
if (unlikely(backup_lock_released))
thd->mdl_context.acquire_lock(thd->backup_commit_lock,
thd->variables.lock_wait_timeout);
return wakeup_error;
end:
thd->EXIT_COND(&old_stage);
if (backup_lock_released)
if (unlikely(backup_lock_released))
thd->mdl_context.acquire_lock(thd->backup_commit_lock,
thd->variables.lock_wait_timeout);
return wakeup_error;

View file

@ -2243,14 +2243,14 @@ struct wait_for_commit
bool commit_started;
void register_wait_for_prior_commit(wait_for_commit *waitee);
int wait_for_prior_commit(THD *thd)
int wait_for_prior_commit(THD *thd, bool allow_kill=true)
{
/*
Quick inline check, to avoid function call and locking in the common case
where no wakeup is registered, or a registered wait was already signalled.
*/
if (waitee.load(std::memory_order_acquire))
return wait_for_prior_commit2(thd);
return wait_for_prior_commit2(thd, allow_kill);
else
{
if (wakeup_error)
@ -2304,7 +2304,7 @@ struct wait_for_commit
void wakeup(int wakeup_error);
int wait_for_prior_commit2(THD *thd);
int wait_for_prior_commit2(THD *thd, bool allow_kill);
void wakeup_subsequent_commits2(int wakeup_error);
void unregister_wait_for_prior_commit2();
@ -4899,10 +4899,10 @@ public:
}
wait_for_commit *wait_for_commit_ptr;
int wait_for_prior_commit()
int wait_for_prior_commit(bool allow_kill=true)
{
if (wait_for_commit_ptr)
return wait_for_commit_ptr->wait_for_prior_commit(this);
return wait_for_commit_ptr->wait_for_prior_commit(this, allow_kill);
return 0;
}
void wakeup_subsequent_commits(int wakeup_error)

View file

@ -1985,7 +1985,8 @@ int write_record(THD *thd, TABLE *table, COPY_INFO *info, select_result *sink)
if (error != HA_ERR_RECORD_IS_THE_SAME)
{
info->updated++;
if (table->versioned())
if (table->versioned() &&
table->vers_check_update(*info->update_fields))
{
if (table->versioned(VERS_TIMESTAMP))
{

View file

@ -6399,9 +6399,11 @@ execute_show_status(THD *thd, TABLE_LIST *all_tables)
memcpy(&thd->status_var, &old_status_var,
offsetof(STATUS_VAR, last_cleared_system_status_var));
mysql_mutex_unlock(&LOCK_status);
thd->initial_status_var= NULL;
return res;
#ifdef WITH_WSREP
wsrep_error_label: /* see WSREP_SYNC_WAIT() macro above */
thd->initial_status_var= NULL;
return true;
#endif /* WITH_WSREP */
}

View file

@ -1432,6 +1432,50 @@ void plugin_unlock_list(THD *thd, plugin_ref *list, uint count)
DBUG_VOID_RETURN;
}
static void print_init_failed_error(st_plugin_int *p)
{
sql_print_error("Plugin '%s' registration as a %s failed.",
p->name.str,
plugin_type_names[p->plugin->type].str);
}
static int plugin_do_initialize(struct st_plugin_int *plugin, uint &state)
{
DBUG_ENTER("plugin_do_initialize");
mysql_mutex_assert_not_owner(&LOCK_plugin);
plugin_type_init init= plugin_type_initialize[plugin->plugin->type];
if (!init)
init= (plugin_type_init) plugin->plugin->init;
if (init)
if (int ret= init(plugin))
{
/* Plugin init failed and did not requested a retry */
if (ret != HA_ERR_RETRY_INIT)
print_init_failed_error(plugin);
DBUG_RETURN(ret);
}
state= PLUGIN_IS_READY; // plugin->init() succeeded
if (plugin->plugin->status_vars)
{
/*
historical ndb behavior caused MySQL plugins to specify
status var names in full, with the plugin name prefix.
this was never fixed in MySQL.
MariaDB fixes that but supports MySQL style too.
*/
SHOW_VAR *show_vars= plugin->plugin->status_vars;
SHOW_VAR tmp_array[2]= {{plugin->plugin->name,
(char *) plugin->plugin->status_vars, SHOW_ARRAY},
{0, 0, SHOW_UNDEF}};
if (strncasecmp(show_vars->name, plugin->name.str, plugin->name.length))
show_vars= tmp_array;
if (add_status_vars(show_vars))
DBUG_RETURN(1);
}
DBUG_RETURN(0);
}
static int plugin_initialize(MEM_ROOT *tmp_root, struct st_plugin_int *plugin,
int *argc, char **argv, bool options_only)
@ -1454,52 +1498,10 @@ static int plugin_initialize(MEM_ROOT *tmp_root, struct st_plugin_int *plugin,
{
ret= !options_only && plugin_is_forced(plugin);
state= PLUGIN_IS_DISABLED;
goto err;
}
else
ret= plugin_do_initialize(plugin, state);
if (plugin_type_initialize[plugin->plugin->type])
{
if ((*plugin_type_initialize[plugin->plugin->type])(plugin))
{
sql_print_error("Plugin '%s' registration as a %s failed.",
plugin->name.str, plugin_type_names[plugin->plugin->type].str);
goto err;
}
}
else if (plugin->plugin->init)
{
if (plugin->plugin->init(plugin))
{
sql_print_error("Plugin '%s' init function returned error.",
plugin->name.str);
goto err;
}
}
state= PLUGIN_IS_READY; // plugin->init() succeeded
if (plugin->plugin->status_vars)
{
/*
historical ndb behavior caused MySQL plugins to specify
status var names in full, with the plugin name prefix.
this was never fixed in MySQL.
MariaDB fixes that but supports MySQL style too.
*/
SHOW_VAR *show_vars= plugin->plugin->status_vars;
SHOW_VAR tmp_array[2]= {
{plugin->plugin->name, (char*)plugin->plugin->status_vars, SHOW_ARRAY},
{0, 0, SHOW_UNDEF}
};
if (strncasecmp(show_vars->name, plugin->name.str, plugin->name.length))
show_vars= tmp_array;
if (add_status_vars(show_vars))
goto err;
}
ret= 0;
err:
if (ret)
plugin_variables_deinit(plugin);
@ -1592,7 +1594,7 @@ int plugin_init(int *argc, char **argv, int flags)
uint i;
struct st_maria_plugin **builtins;
struct st_maria_plugin *plugin;
struct st_plugin_int tmp, *plugin_ptr, **reap;
struct st_plugin_int tmp, *plugin_ptr, **reap, **retry_end, **retry_start;
MEM_ROOT tmp_root;
bool reaped_mandatory_plugin= false;
bool mandatory= true;
@ -1733,11 +1735,16 @@ int plugin_init(int *argc, char **argv, int flags)
*/
mysql_mutex_lock(&LOCK_plugin);
/* List of plugins to reap */
reap= (st_plugin_int **) my_alloca((plugin_array.elements+1) * sizeof(void*));
*(reap++)= NULL;
/* List of plugins to retry */
retry_start= retry_end=
(st_plugin_int **) my_alloca((plugin_array.elements+1) * sizeof(void*));
for(;;)
{
int error;
for (i=0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++)
{
HASH *hash= plugin_hash + plugin_type_initialization_order[i];
@ -1751,14 +1758,51 @@ int plugin_init(int *argc, char **argv, int flags)
bool opts_only= flags & PLUGIN_INIT_SKIP_INITIALIZATION &&
(flags & PLUGIN_INIT_SKIP_PLUGIN_TABLE ||
!plugin_table_engine);
if (plugin_initialize(&tmp_root, plugin_ptr, argc, argv, opts_only))
error= plugin_initialize(&tmp_root, plugin_ptr, argc, argv,
opts_only);
if (error)
{
plugin_ptr->state= PLUGIN_IS_DYING;
/* The plugin wants a retry of the initialisation,
possibly due to dependency on other plugins */
if (unlikely(error == HA_ERR_RETRY_INIT))
*(retry_end++)= plugin_ptr;
else
*(reap++)= plugin_ptr;
}
}
}
}
/* Retry plugins that asked for it */
while (retry_start < retry_end)
{
st_plugin_int **to_re_retry, **retrying;
for (to_re_retry= retrying= retry_start; retrying < retry_end; retrying++)
{
plugin_ptr= *retrying;
uint state= plugin_ptr->state;
mysql_mutex_unlock(&LOCK_plugin);
error= plugin_do_initialize(plugin_ptr, state);
mysql_mutex_lock(&LOCK_plugin);
plugin_ptr->state= state;
if (error == HA_ERR_RETRY_INIT)
*(to_re_retry++)= plugin_ptr;
else if (error)
*(reap++)= plugin_ptr;
}
/* If the retry list has not changed, i.e. if all retry attempts
result in another retry request, empty the retry list */
if (to_re_retry == retry_end)
while (to_re_retry > retry_start)
{
plugin_ptr= *(--to_re_retry);
*(reap++)= plugin_ptr;
/** `plugin_do_initialize()' did not print any error in this
case, so we do it here. */
print_init_failed_error(plugin_ptr);
}
retry_end= to_re_retry;
}
/* load and init plugins from the plugin table (unless done already) */
if (flags & PLUGIN_INIT_SKIP_PLUGIN_TABLE)
@ -1784,6 +1828,7 @@ int plugin_init(int *argc, char **argv, int flags)
}
mysql_mutex_unlock(&LOCK_plugin);
my_afree(retry_start);
my_afree(reap);
if (reaped_mandatory_plugin && !opt_help)
goto err;

View file

@ -2564,8 +2564,6 @@ class derived_handler;
class Pushdown_derived: public Sql_alloc
{
private:
bool is_analyze;
public:
TABLE_LIST *derived;
derived_handler *handler;

View file

@ -897,6 +897,20 @@ end:
DBUG_RETURN(error);
}
#if defined(HAVE_REPLICATION)
class wait_for_commit_raii
{
private:
THD *m_thd;
wait_for_commit *m_wfc;
public:
wait_for_commit_raii(THD* thd) :
m_thd(thd), m_wfc(thd->suspend_subsequent_commits())
{}
~wait_for_commit_raii() { m_thd->resume_subsequent_commits(m_wfc); }
};
#endif
bool Sql_cmd_alter_sequence::execute(THD *thd)
{
@ -909,7 +923,10 @@ bool Sql_cmd_alter_sequence::execute(THD *thd)
SEQUENCE *seq;
No_such_table_error_handler no_such_table_handler;
DBUG_ENTER("Sql_cmd_alter_sequence::execute");
#if defined(HAVE_REPLICATION)
/* No wakeup():s of subsequent commits is allowed in this function. */
wait_for_commit_raii suspend_wfc(thd);
#endif
if (check_access(thd, ALTER_ACL, first_table->db.str,
&first_table->grant.privilege,
@ -1009,19 +1026,15 @@ bool Sql_cmd_alter_sequence::execute(THD *thd)
else
table->file->print_error(error, MYF(0));
seq->write_unlock(table);
{
wait_for_commit* suspended_wfc= thd->suspend_subsequent_commits();
if (trans_commit_stmt(thd))
error= 1;
if (trans_commit_implicit(thd))
error= 1;
thd->resume_subsequent_commits(suspended_wfc);
DBUG_EXECUTE_IF("hold_worker_on_schedule",
{
/* delay binlogging of a parent trx in rpl_parallel_seq */
my_sleep(100000);
});
}
if (likely(!error))
error= write_bin_log(thd, 1, thd->query(), thd->query_length());
if (likely(!error))

View file

@ -231,6 +231,11 @@ bool TABLE::vers_check_update(List<Item> &items)
}
}
}
/*
Tell TRX_ID-versioning that it does not insert history row
(see calc_row_difference()).
*/
vers_write= false;
return false;
}

View file

@ -3084,7 +3084,7 @@ bool Window_funcs_sort::setup(THD *thd, SQL_SELECT *sel,
spec= win_func->window_spec;
int win_func_order_elements= spec->partition_list->elements +
spec->order_list->elements;
if (win_func_order_elements > longest_order_elements)
if (win_func_order_elements >= longest_order_elements)
{
win_func_with_longest_order= win_func;
longest_order_elements= win_func_order_elements;

View file

@ -664,8 +664,12 @@ public:
{
if (sysvartrack_global_update(thd, new_val,
var->save_result.string_value.length))
{
if (new_val)
my_free(new_val);
new_val= 0;
}
}
global_update_finish(new_val);
return (new_val == 0 && var->save_result.string_value.str != 0);
}

View file

@ -335,9 +335,9 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, PTOS topt, bool info)
hp->Headlen(), hp->Reclen(), fields);
htrc("flags(iem)=%d,%d,%d cp=%d\n", hp->Incompleteflag,
hp->Encryptflag, hp->Mdxflag, hp->Language);
htrc("%hd records, last changed %02d/%02d/%d\n",
hp->Records(), hp->Filedate[1], hp->Filedate[2],
hp->Filedate[0] + ((hp->Filedate[0] <= 30) ? 2000 : 1900));
htrc("%hd records, last changed %04d-%02d-%02d\n",
hp->Records(),
hp->Filedate[0] + 1900, hp->Filedate[1], hp->Filedate[2]);
htrc("Field Type Offset Len Dec Set Mdx\n");
} // endif trace
@ -605,8 +605,7 @@ bool DBFFAM::OpenTableFile(PGLOBAL g)
strcpy(opmode, (UseTemp) ? "rb" : "r+b");
break;
case MODE_INSERT:
// Must be in text mode to remove an eventual EOF character
strcpy(opmode, "a+");
strcpy(opmode, Records ? "r+b" : "w+b");
break;
default:
snprintf(g->Message, sizeof(g->Message), MSG(BAD_OPEN_MODE), mode);
@ -643,6 +642,7 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g)
{
char c;
int rc;
int len;
MODE mode = Tdbp->GetMode();
Buflen = Blksize;
@ -664,7 +664,7 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g)
/************************************************************************/
/* If this is a new file, the header must be generated. */
/************************************************************************/
int len = GetFileLength(g);
len = GetFileLength(g);
if (!len) {
// Make the header for this DBF table file
@ -702,7 +702,7 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g)
header->Version = DBFTYPE;
t = time(NULL) - (time_t)DTVAL::GetShift();
datm = gmtime(&t);
header->Filedate[0] = datm->tm_year - 100;
header->Filedate[0] = datm->tm_year;
header->Filedate[1] = datm->tm_mon + 1;
header->Filedate[2] = datm->tm_mday;
header->SetHeadlen((ushort)hlen);
@ -793,8 +793,12 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g)
/**************************************************************************/
/* Position the file at the begining of the data. */
/**************************************************************************/
if (Tdbp->GetMode() == MODE_INSERT)
if (Tdbp->GetMode() == MODE_INSERT) {
if (len)
rc = fseek(Stream, -1, SEEK_END);
else
rc = fseek(Stream, 0, SEEK_END);
}
else
rc = fseek(Stream, Headlen, SEEK_SET);
@ -979,6 +983,7 @@ void DBFFAM::CloseTableFile(PGLOBAL g, bool abort)
Rbuf = CurNum--;
// Closing = true;
wrc = WriteBuffer(g);
fputc(0x1a, Stream);
} else if (mode == MODE_UPDATE || mode == MODE_DELETE) {
if (Modif && !Closing) {
// Last updated block remains to be written
@ -1003,34 +1008,26 @@ void DBFFAM::CloseTableFile(PGLOBAL g, bool abort)
} // endif's mode
if (Tdbp->GetMode() == MODE_INSERT) {
int n = ftell(Stream) - Headlen;
rc = PlugCloseFile(g, To_Fb);
int n = ftell(Stream) - Headlen - 1;
if (n >= 0 && !(n % Lrecl)) {
n /= Lrecl; // New number of lines
if (n > Records) {
// Update the number of rows in the file header
char filename[_MAX_PATH];
PlugSetPath(filename, To_File, Tdbp->GetPath());
if ((Stream= global_fopen(g, MSGID_OPEN_MODE_STRERROR, filename, "r+b")))
{
char nRecords[4];
int4store(nRecords, n);
fseek(Stream, 4, SEEK_SET); // Get header.Records position
fwrite(nRecords, sizeof(nRecords), 1, Stream);
fclose(Stream);
Stream= NULL;
Records= n; // Update Records value
} // endif n
} // endif n
}
} // endif n
} // endif n
} else // Finally close the file
// Finally close the file
rc = PlugCloseFile(g, To_Fb);
fin:

View file

@ -64,6 +64,24 @@ t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci `TABLE_TYPE`=DBF `FILE_NAME`='t1.dbf'
INSERT INTO t1 VALUES (10),(20);
CALL dbf_header('MYSQLD_DATADIR/test/t1.dbf');
-------- --------
FileSize 91
DBF_Version 03
NRecords 2
FirstRecPos 66
RecLength 12
TableFlags 0000
CodePageMark 00
--- ---
FieldN 0
Name a
Type N
Offset 0
Length 11
Dec 0
Flags 00
-------- --------
SELECT * FROM t1;
a
10
@ -89,6 +107,24 @@ t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci `TABLE_TYPE`=DBF `FILE_NAME`='t1.dbf' `READONLY`=NO
INSERT INTO t1 VALUES (30);
CALL dbf_header('MYSQLD_DATADIR/test/t1.dbf');
-------- --------
FileSize 103
DBF_Version 03
NRecords 3
FirstRecPos 66
RecLength 12
TableFlags 0000
CodePageMark 00
--- ---
FieldN 0
Name a
Type N
Offset 0
Length 11
Dec 0
Flags 00
-------- --------
SELECT * FROM t1;
a
10
@ -137,7 +173,7 @@ a
test
CALL dbf_header('MYSQLD_DATADIR/test/t1.dbf');
-------- --------
FileSize 77
FileSize 78
DBF_Version 03
NRecords 1
FirstRecPos 66
@ -171,7 +207,7 @@ a b c
2 2 2
CALL dbf_header('MYSQLD_DATADIR/test/t1.dbf');
-------- --------
FileSize 194
FileSize 195
DBF_Version 03
NRecords 2
FirstRecPos 130
@ -264,7 +300,7 @@ a
-9223372036854775808
CALL dbf_header('MYSQLD_DATADIR/test/t1.dbf');
-------- --------
FileSize 108
FileSize 109
DBF_Version 03
NRecords 2
FirstRecPos 66
@ -308,7 +344,7 @@ a
-32768
CALL dbf_header('MYSQLD_DATADIR/test/t1.dbf');
-------- --------
FileSize 80
FileSize 81
DBF_Version 03
NRecords 2
FirstRecPos 66
@ -338,7 +374,7 @@ LENGTH(a)
255
CALL dbf_header('MYSQLD_DATADIR/test/t1.dbf');
-------- --------
FileSize 322
FileSize 323
DBF_Version 03
NRecords 1
FirstRecPos 66
@ -419,7 +455,7 @@ a
2001-01-01
CALL dbf_header('MYSQLD_DATADIR/test/t1.dbf');
-------- --------
FileSize 75
FileSize 76
DBF_Version 03
NRecords 1
FirstRecPos 66
@ -449,7 +485,7 @@ a
123.0000
CALL dbf_header('MYSQLD_DATADIR/test/t1.dbf');
-------- --------
FileSize 79
FileSize 80
DBF_Version 03
NRecords 1
FirstRecPos 66
@ -481,7 +517,7 @@ a
123456789.12345
CALL dbf_header('MYSQLD_DATADIR/test/t1.dbf');
-------- --------
FileSize 108
FileSize 109
DBF_Version 03
NRecords 2
FirstRecPos 66
@ -511,7 +547,7 @@ a
10
CALL dbf_header('MYSQLD_DATADIR/test/t1c.dbf');
-------- --------
FileSize 77
FileSize 78
DBF_Version 03
NRecords 1
FirstRecPos 66
@ -538,7 +574,7 @@ a
10
CALL dbf_header('MYSQLD_DATADIR/test/t1c.dbf');
-------- --------
FileSize 77
FileSize 78
DBF_Version 03
NRecords 1
FirstRecPos 66
@ -567,7 +603,7 @@ a
10
CALL dbf_header('MYSQLD_DATADIR/test/t1c.dbf');
-------- --------
FileSize 77
FileSize 78
DBF_Version 03
NRecords 1
FirstRecPos 66
@ -604,7 +640,7 @@ c1 c2 i1 i2
30 def 30 123
CALL dbf_header('MYSQLD_DATADIR/test/t1.dbf');
-------- --------
FileSize 291
FileSize 292
DBF_Version 03
NRecords 3
FirstRecPos 162

View file

@ -63,6 +63,11 @@ DELIMITER ;//
CREATE TABLE t1 (a INT NOT NULL) ENGINE=CONNECT TABLE_TYPE=DBF FILE_NAME='t1.dbf';
SHOW CREATE TABLE t1;
INSERT INTO t1 VALUES (10),(20);
--chmod 0777 $MYSQLD_DATADIR/test/t1.dbf
--vertical_results
--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
eval CALL dbf_header('$MYSQLD_DATADIR/test/t1.dbf');
--horizontal_results
SELECT * FROM t1;
ALTER TABLE t1 READONLY=Yes;
SHOW CREATE TABLE t1;
@ -77,6 +82,11 @@ TRUNCATE TABLE t1;
ALTER TABLE t1 READONLY=NO;
SHOW CREATE TABLE t1;
INSERT INTO t1 VALUES (30);
--chmod 0777 $MYSQLD_DATADIR/test/t1.dbf
--vertical_results
--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
eval CALL dbf_header('$MYSQLD_DATADIR/test/t1.dbf');
--horizontal_results
SELECT * FROM t1;
DROP TABLE t1;
--remove_file $MYSQLD_DATADIR/test/t1.dbf

View file

@ -7937,6 +7937,10 @@ calc_row_difference(
trx_t* const trx = prebuilt->trx;
doc_id_t doc_id = FTS_NULL_DOC_ID;
uint16_t num_v = 0;
#ifndef DBUG_OFF
uint vers_fields = 0;
#endif
prebuilt->versioned_write = table->versioned_write(VERS_TRX_ID);
const bool skip_virtual = ha_innobase::omits_virtual_cols(*table->s);
ut_ad(!srv_read_only_mode);
@ -7949,6 +7953,14 @@ calc_row_difference(
for (uint i = 0; i < table->s->fields; i++) {
field = table->field[i];
#ifndef DBUG_OFF
if (!field->vers_sys_field()
&& !field->vers_update_unversioned()) {
++vers_fields;
}
#endif
const bool is_virtual = !field->stored_in_db();
if (is_virtual && skip_virtual) {
num_v++;
@ -8288,6 +8300,21 @@ calc_row_difference(
ut_a(buf <= (byte*) original_upd_buff + buff_len);
const TABLE_LIST *tl= table->pos_in_table_list;
const uint8 op_map= tl->trg_event_map | tl->slave_fk_event_map;
/* Used to avoid reading history in FK check on DELETE (see MDEV-16210). */
prebuilt->upd_node->is_delete =
(op_map & trg2bit(TRG_EVENT_DELETE)
&& table->versioned(VERS_TIMESTAMP))
? VERSIONED_DELETE : NO_DELETE;
if (prebuilt->versioned_write) {
/* Guaranteed by CREATE TABLE, but anyway we make sure we
generate history only when there are versioned fields. */
DBUG_ASSERT(vers_fields);
prebuilt->upd_node->vers_make_update(trx);
}
ut_ad(uvect->validate());
return(DB_SUCCESS);
}
@ -8437,45 +8464,23 @@ ha_innobase::update_row(
MySQL that the row is not really updated and it
should not increase the count of updated rows.
This is fix for http://bugs.mysql.com/29157 */
if (m_prebuilt->versioned_write
&& thd_sql_command(m_user_thd) != SQLCOM_ALTER_TABLE
/* Multiple UPDATE of same rows in single transaction create
historical rows only once. */
&& trx->id != table->vers_start_id()) {
error = row_insert_for_mysql((byte*) old_row,
m_prebuilt,
ROW_INS_HISTORICAL);
if (error != DB_SUCCESS) {
goto func_exit;
}
}
DBUG_RETURN(HA_ERR_RECORD_IS_THE_SAME);
} else {
const bool vers_set_fields = m_prebuilt->versioned_write
&& m_prebuilt->upd_node->update->affects_versioned();
const bool vers_ins_row = vers_set_fields
&& thd_sql_command(m_user_thd) != SQLCOM_ALTER_TABLE;
TABLE_LIST *tl= table->pos_in_table_list;
uint8 op_map= tl->trg_event_map | tl->slave_fk_event_map;
/* This is not a delete */
m_prebuilt->upd_node->is_delete =
(vers_set_fields && !vers_ins_row) ||
(op_map & trg2bit(TRG_EVENT_DELETE) &&
table->versioned(VERS_TIMESTAMP))
? VERSIONED_DELETE
: NO_DELETE;
if (m_prebuilt->upd_node->is_delete) {
trx->fts_next_doc_id = 0;
}
/* row_start was updated by vers_make_update()
in calc_row_difference() */
error = row_update_for_mysql(m_prebuilt);
if (error == DB_SUCCESS && vers_ins_row
if (error == DB_SUCCESS && m_prebuilt->versioned_write
/* Multiple UPDATE of same rows in single transaction create
historical rows only once. */
&& trx->id != table->vers_start_id()) {
/* UPDATE is not used by ALTER TABLE. Just precaution
as we don't need history generation for ALTER TABLE. */
ut_ad(thd_sql_command(m_user_thd) != SQLCOM_ALTER_TABLE);
error = row_insert_for_mysql((byte*) old_row,
m_prebuilt,
ROW_INS_HISTORICAL);

View file

@ -176,6 +176,26 @@ operator<<(std::ostream& out, const lock_rec_t& lock)
#endif
/* @} */
/**
Checks if the `mode` is LOCK_S or LOCK_X (possibly ORed with LOCK_WAIT or
LOCK_REC) which means the lock is a
Next Key Lock, a.k.a. LOCK_ORDINARY, as opposed to Predicate Lock,
GAP lock, Insert Intention or Record Lock.
@param mode A mode and flags, of a lock.
@return true if the only bits set in `mode` are LOCK_S or LOCK_X and optionally
LOCK_WAIT or LOCK_REC */
static inline bool lock_mode_is_next_key_lock(ulint mode)
{
static_assert(LOCK_ORDINARY == 0, "LOCK_ORDINARY must be 0 (no flags)");
ut_ad((mode & LOCK_TABLE) == 0);
mode&= ~(LOCK_WAIT | LOCK_REC);
ut_ad((mode & LOCK_WAIT) == 0);
ut_ad((mode & LOCK_TYPE_MASK) == 0);
ut_ad(((mode & ~(LOCK_MODE_MASK)) == LOCK_ORDINARY) ==
(mode == LOCK_S || mode == LOCK_X));
return (mode & ~(LOCK_MODE_MASK)) == LOCK_ORDINARY;
}
/** Lock struct; protected by lock_sys.mutex */
struct ib_lock_t
{
@ -232,6 +252,13 @@ struct ib_lock_t
return(type_mode & LOCK_REC_NOT_GAP);
}
/** @return true if the lock is a Next Key Lock */
bool is_next_key_lock() const
{
return is_record_lock()
&& lock_mode_is_next_key_lock(type_mode);
}
bool is_insert_intention() const
{
return(type_mode & LOCK_INSERT_INTENTION);

View file

@ -988,6 +988,14 @@ lock_rec_has_expl(
static_cast<lock_mode>(
precise_mode & LOCK_MODE_MASK))
&& !lock_get_wait(lock)
/* If we unfold the following expression, we will see it's
true when:
(heap_no is supremum)
or
(the found lock is LOCK_ORDINARY)
or
(the requested and the found lock modes are equal to each
other and equal to LOCK_REC_GAP | LOCK_REC_NOT_GAP). */
&& (!lock_rec_get_rec_not_gap(lock)
|| (precise_mode & LOCK_REC_NOT_GAP)
|| heap_no == PAGE_HEAP_NO_SUPREMUM)
@ -1848,6 +1856,46 @@ lock_rec_add_to_queue(
type_mode, block, heap_no, index, trx, caller_owns_trx_mutex);
}
/** A helper function for lock_rec_lock_slow(), which grants a Next Key Lock
(either LOCK_X or LOCK_S as specified by `mode`) on <`block`,`heap_no`> in the
`index` to the `trx`, assuming that it already has a granted `held_lock`, which
is at least as strong as mode|LOCK_REC_NOT_GAP. It does so by either reusing the
lock if it already covers the gap, or by ensuring a separate GAP Lock, which in
combination with Record Lock satisfies the request.
@param[in] held_lock a lock granted to `trx` which is at least as strong
as mode|LOCK_REC_NOT_GAP
@param[in] mode requested lock mode: LOCK_X or LOCK_S
@param[in] block buffer block containing the record to be locked
@param[in] heap_no heap number of the record to be locked
@param[in] index index of record to be locked
@param[in] trx the transaction requesting the Next Key Lock */
static void lock_reuse_for_next_key_lock(const lock_t *held_lock, unsigned mode,
const buf_block_t *block,
ulint heap_no, dict_index_t *index,
trx_t *trx)
{
ut_ad(lock_mutex_own());
ut_ad(trx_mutex_own(trx));
ut_ad(mode == LOCK_S || mode == LOCK_X);
ut_ad(lock_mode_is_next_key_lock(mode));
if (!held_lock->is_record_not_gap())
{
ut_ad(held_lock->is_next_key_lock());
return;
}
/* We have a Record Lock granted, so we only need a GAP Lock. We assume
that GAP Locks do not conflict with anything. Therefore a GAP Lock
could be granted to us right now if we've requested: */
mode|= LOCK_GAP;
ut_ad(nullptr == lock_rec_other_has_conflicting(mode, block, heap_no, trx));
/* It might be the case we already have one, so we first check that. */
if (lock_rec_has_expl(mode, block, heap_no, trx) == nullptr)
lock_rec_add_to_queue(LOCK_REC | mode, block, heap_no, index, trx, true);
}
/*********************************************************************//**
Tries to lock the specified record in the mode requested. If not immediately
possible, enqueues a waiting lock request. This is a low-level function
@ -1900,8 +1948,17 @@ lock_rec_lock(
lock->type_mode != (ulint(mode) | LOCK_REC) ||
lock_rec_get_n_bits(lock) <= heap_no)
{
ulint checked_mode= (heap_no != PAGE_HEAP_NO_SUPREMUM &&
lock_mode_is_next_key_lock(mode))
? mode | LOCK_REC_NOT_GAP
: mode;
const lock_t *held_lock=
lock_rec_has_expl(checked_mode, block, heap_no, trx);
/* Do nothing if the trx already has a strong enough lock on rec */
if (!lock_rec_has_expl(mode, block, heap_no, trx))
if (!held_lock)
{
if (
#ifdef WITH_WSREP
@ -1928,6 +1985,16 @@ lock_rec_lock(
err= DB_SUCCESS_LOCKED_REC;
}
}
/* If checked_mode == mode, trx already has a strong enough lock on rec */
else if (checked_mode != mode)
{
/* As check_mode != mode, the mode is Next Key Lock, which can not be
emulated by implicit lock (which are LOCK_REC_NOT_GAP only). */
ut_ad(!impl);
lock_reuse_for_next_key_lock(held_lock, mode, block, heap_no, index,
trx);
}
}
else if (!impl)
{
@ -5763,6 +5830,8 @@ lock_sec_rec_read_check_and_lock(
ut_ad(lock_rec_queue_validate(FALSE, block, rec, index, offsets));
DEBUG_SYNC_C("lock_sec_rec_read_check_and_lock_has_locked");
return(err);
}

View file

@ -2438,7 +2438,10 @@ row_ins_duplicate_error_in_clust(
duplicate:
trx->error_info = cursor->index;
err = DB_DUPLICATE_KEY;
if (cursor->index->table->versioned()
if (thr->prebuilt
&& thr->prebuilt->upd_node
&& thr->prebuilt->upd_node->is_delete
== VERSIONED_DELETE
&& entry->vers_history_row())
{
ulint trx_id_len;

View file

@ -504,7 +504,8 @@ row_merge_buf_add(
VCOL_STORAGE vcol_storage;
DBUG_ENTER("row_merge_buf_add");
if (buf->n_tuples >= buf->max_tuples) {
if (buf->n_tuples >= buf->max_tuples
|| (history_fts && (buf->index->type & DICT_FTS))) {
error:
n_row_added = 0;
goto end;
@ -597,7 +598,8 @@ error:
/* Tokenize and process data for FTS */
if (!history_fts && (index->type & DICT_FTS)) {
if (index->type & DICT_FTS) {
ut_ad(!history_fts);
fts_doc_item_t* doc_item;
byte* value;
void* ptr;
@ -1872,6 +1874,7 @@ row_merge_read_clustered_index(
mach_write_to_8(new_sys_trx_start, trx->id);
mach_write_to_8(new_sys_trx_end, TRX_ID_MAX);
uint64_t n_rows = 0;
bool history_row = false;
/* Scan the clustered index. */
for (;;) {
@ -1888,7 +1891,7 @@ row_merge_read_clustered_index(
dtuple_t* row;
row_ext_t* ext;
page_cur_t* cur = btr_pcur_get_page_cur(&pcur);
bool history_row, history_fts = false;
bool history_fts = false;
page_cur_move_to_next(cur);
@ -2514,7 +2517,8 @@ write_buffers:
ut_ad(i == 0);
break;
}
} else if (dict_index_is_unique(buf->index)) {
} else if (!history_row
&& dict_index_is_unique(buf->index)) {
row_merge_dup_t dup = {
buf->index, table, col_map, 0};

View file

@ -1789,12 +1789,8 @@ row_update_for_mysql(row_prebuilt_t* prebuilt)
ut_ad(!prebuilt->versioned_write || node->table->versioned());
if (prebuilt->versioned_write) {
if (node->is_delete == VERSIONED_DELETE) {
if (prebuilt->versioned_write && node->is_delete == VERSIONED_DELETE) {
node->vers_make_delete(trx);
} else if (node->update->affects_versioned()) {
node->vers_make_update(trx);
}
}
for (;;) {

View file

@ -3172,9 +3172,8 @@ error_handling:
void thd_get_query_start_data(THD *thd, char *buf);
/** Appends row_start or row_end field to update vector and sets a
CURRENT_TIMESTAMP/trx->id value to it.
Supposed to be called only by make_versioned_update() and
make_versioned_delete().
CURRENT_TIMESTAMP/trx->id value to it. Called by vers_make_update() and
vers_make_delete().
@param[in] trx transaction
@param[in] vers_sys_idx table->row_start or table->row_end */
void upd_node_t::vers_update_fields(const trx_t *trx, ulint idx)

Some files were not shown because too many files have changed in this diff Show more