Merge 10.5 into 10.6

This commit is contained in:
Marko Mäkelä 2021-02-02 15:15:53 +02:00
commit 1110beccd4
57 changed files with 1814 additions and 1446 deletions

View file

@ -51,6 +51,7 @@ static my_bool upgrade_from_mysql;
static DYNAMIC_STRING ds_args;
static DYNAMIC_STRING conn_args;
static DYNAMIC_STRING ds_plugin_data_types;
static char *opt_password= 0;
static char *opt_plugin_dir= 0, *opt_default_auth= 0;
@ -187,6 +188,7 @@ static void free_used_memory(void)
dynstr_free(&ds_args);
dynstr_free(&conn_args);
dynstr_free(&ds_plugin_data_types);
if (cnf_file_path)
my_delete(cnf_file_path, MYF(MY_WME));
}
@ -965,6 +967,73 @@ static my_bool from_before_10_1()
}
static void uninstall_plugins(void)
{
if (ds_plugin_data_types.length)
{
char *plugins= ds_plugin_data_types.str;
char *next= get_line(plugins);
char buff[512];
while(*plugins)
{
if (next[-1] == '\n')
next[-1]= 0;
verbose("uninstalling plugin for %s data type", plugins);
strxnmov(buff, sizeof(buff)-1, "UNINSTALL SONAME ", plugins,"", NULL);
run_query(buff, NULL, TRUE);
plugins= next;
next= get_line(next);
}
}
}
/**
@brief Install plugins for missing data types
@details Check for entries with "Unknown data type" in I_S.TABLES,
try to load plugins for these tables if available (MDEV-24093)
@return Operation status
@retval TRUE - error
@retval FALSE - success
*/
static int install_used_plugin_data_types(void)
{
DYNAMIC_STRING ds_result;
const char *query = "SELECT table_comment FROM information_schema.tables"
" WHERE table_comment LIKE 'Unknown data type: %'";
if (init_dynamic_string(&ds_result, "", 512, 512))
die("Out of memory");
run_query(query, &ds_result, TRUE);
if (ds_result.length)
{
char *line= ds_result.str;
char *next= get_line(line);
while(*line)
{
if (next[-1] == '\n')
next[-1]= 0;
if (strstr(line, "'MYSQL_JSON'"))
{
verbose("installing plugin for MYSQL_JSON data type");
if(!run_query("INSTALL SONAME 'type_mysql_json'", NULL, TRUE))
{
dynstr_append(&ds_plugin_data_types, "'type_mysql_json'");
dynstr_append(&ds_plugin_data_types, "\n");
break;
}
else
{
fprintf(stderr, "... can't %s\n", "INSTALL SONAME 'type_mysql_json'");
return 1;
}
}
line= next;
next= get_line(next);
}
}
dynstr_free(&ds_result);
return 0;
}
/*
Check for entries with "Unknown storage engine" in I_S.TABLES,
try to load plugins for these tables if available (MDEV-11942)
@ -1218,7 +1287,8 @@ int main(int argc, char **argv)
}
if (init_dynamic_string(&ds_args, "", 512, 256) ||
init_dynamic_string(&conn_args, "", 512, 256))
init_dynamic_string(&conn_args, "", 512, 256) ||
init_dynamic_string(&ds_plugin_data_types, "", 512, 256))
die("Out of memory");
if (handle_options(&argc, &argv, my_long_options, get_one_option))
@ -1281,6 +1351,7 @@ int main(int argc, char **argv)
*/
if (run_mysqlcheck_upgrade(TRUE) ||
install_used_engines() ||
install_used_plugin_data_types() ||
run_mysqlcheck_views() ||
run_sql_fix_privilege_tables() ||
run_mysqlcheck_fixnames() ||
@ -1288,6 +1359,7 @@ int main(int argc, char **argv)
check_slave_repositories())
die("Upgrade failed" );
uninstall_plugins();
verbose("Phase %d/%d: Running 'FLUSH PRIVILEGES'", ++phase, phases_total);
if (run_query("FLUSH PRIVILEGES", NULL, TRUE))
die("Upgrade failed" );

View file

@ -70,5 +70,5 @@ REPLACE_FOR_CLIENTS(CFLAGS "[DU]DBUG_OFF" "[DU]SAFE_MUTEX" "[DU]NDEBUG"
# Same for --libs
REPLACE_FOR_CLIENTS(LIBS "Wl,[^ ]*" lmtmalloc static-libcxa i-static static-intel)
REPLACE_FOR_CLIENTS(EMB_LIBS lmtmalloc static-libcxa i-static static-intel)
REPLACE_FOR_CLIENTS(EMB_LIBS lmtmalloc static-libcxa i-static static-intel ltpool)

View file

@ -152,7 +152,7 @@
# if defined(__i386__) || defined(__ppc__)
# define SIZEOF_CHARP 4
# define SIZEOF_LONG 4
# elif defined(__x86_64__) || defined(__ppc64__)
# elif defined(__x86_64__) || defined(__ppc64__) || defined(__aarch64__) || defined(__arm64__)
# define SIZEOF_CHARP 8
# define SIZEOF_LONG 8
# else

View file

@ -0,0 +1,3 @@
if (!$TYPE_MYSQL_JSON_SO) {
skip Need MYSQL_JSON plugin;
}

View file

@ -0,0 +1,2 @@
--loose-type_mysql_json
--plugin-load-add=$TYPE_MYSQL_JSON_SO

View file

@ -0,0 +1,198 @@
#
# MDEV-24093: Detect during mysql_upgrade if type_mysql_json.so
# is needed and load it
#
SET NAMES utf8;
show create table mysql_json_test;
ERROR HY000: Unknown data type: 'MYSQL_JSON'
Phase 1/7: Checking and upgrading mysql database
Processing databases
mysql
mysql.column_stats OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
mysql.index_stats OK
mysql.innodb_index_stats
Error : Unknown storage engine 'InnoDB'
error : Corrupt
mysql.innodb_table_stats
Error : Unknown storage engine 'InnoDB'
error : Corrupt
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.roles_mapping OK
mysql.servers OK
mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry
Error : Unknown storage engine 'InnoDB'
error : Corrupt
Repairing tables
mysql.innodb_index_stats
Error : Unknown storage engine 'InnoDB'
error : Corrupt
mysql.innodb_table_stats
Error : Unknown storage engine 'InnoDB'
error : Corrupt
mysql.transaction_registry
Error : Unknown storage engine 'InnoDB'
error : Corrupt
Phase 2/7: Installing used storage engines... Skipped
installing plugin for MYSQL_JSON data type
Phase 3/7: Fixing views
mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
Processing databases
information_schema
mtr
mtr.global_suppressions OK
mtr.test_suppressions OK
performance_schema
test
test.mysql_json_test Needs upgrade
test.mysql_json_test_big Needs upgrade
Repairing tables
test.mysql_json_test OK
test.mysql_json_test_big OK
uninstalling plugin for 'type_mysql_json' data type
Phase 7/7: Running 'FLUSH PRIVILEGES'
OK
show create table mysql_json_test;
Table Create Table
mysql_json_test CREATE TABLE `mysql_json_test` (
`description` varchar(100) COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`expected` longtext COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`actual` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
select * from mysql_json_test;
description expected actual
Raw integers as JSON 0 0
Raw integers as JSON -127 -127
Raw integers as JSON 128 128
Raw integers as JSON 32767 32767
Raw integers as JSON -32768 -32768
Raw integers as JSON 65535 65535
Raw integers as JSON 65536 65536
Raw integers as JSON -2147483648 -2147483648
Raw integers as JSON 2147483647 2147483647
Raw integers as JSON 4294967295 4294967295
Raw integers as JSON -9223372036854775807 -9223372036854775807
Raw integers as JSON 9223372036854775807 9223372036854775807
Raw integers as JSON 18446744073709551615 18446744073709551615
Raw doubles as JSON 3.14 3.14
Raw doubles as JSON -5678.987 -5678.987
Raw doubles as JSON -2.2250738585072014e-308 -2.2250738585072014e-308
Raw doubles as JSON 2.2250738585072014e-308 2.2250738585072014e-308
Simple JSON test {"key1": "val1", "key2": "val2"} {"key1": "val1", "key2": "val2"}
Raw doubles as JSON 0.0 0.0
Simple Array as Value {"a": [1, 2], "b": ["x", "y"]} {"a": [1, 2], "b": ["x", "y"]}
Simple Array as Base Key [1, 2, 3, 4, 5, [], "a", "b", "c"] [1, 2, 3, 4, 5, [], "a", "b", "c"]
GeoJSON {"type": "MultiPoint", "coordinates": [[1, 1], [2, 2], [3, 3]]} {"type": "MultiPoint", "coordinates": [[1, 1], [2, 2], [3, 3]]}
GeoJSON {"type": "LineString", "coordinates": [[0, 5], [5, 10], [10, 15]]} {"type": "LineString", "coordinates": [[0, 5], [5, 10], [10, 15]]}
GeoJSON {"type": "GeometryCollection", "geometries": []} {"type": "GeometryCollection", "geometries": []}
GeoJSON {"type": "Point", "coordinates": [11.1111, 12.22222]} {"type": "Point", "coordinates": [11.1111, 12.22222]}
Opaque Types: opaque_mysql_type_set "b,c" "b,c"
Opaque Types: opaque_mysql_type_enum "b" "b"
Opaque Types: opaque_mysql_type_date "2015-01-15" "2015-01-15"
Opaque Types: opaque_mysql_type_time "23:24:25.000000" "23:24:25.000000"
Opaque Types: opaque_mysql_type_datetime "2015-01-15 23:24:25.000000" "2015-01-15 23:24:25.000000"
Opaque Types: opaque_mysql_type_geom {"type": "Point", "coordinates": [1, 1]} {"type": "Point", "coordinates": [1, 1]}
Opaque Types: opaque_mysql_type_bit "base64:type16:yv4=" "base64:type16:yv4="
Opaque Types: opaque_mysql_type_year "base64:type13:MjAxOQ==" "base64:type13:MjAxOQ=="
Opaque Types: opaque_mysql_type_blob "base64:type252:yv66vg==" "base64:type252:yv66vg=="
Opaque Types: opaque_mysql_type_longblob "base64:type251:yv66vg==" "base64:type251:yv66vg=="
Opaque Types: opaque_mysql_type_mediumblob "base64:type250:yv66vg==" "base64:type250:yv66vg=="
Opaque Types: opaque_mysql_type_tinyblob "base64:type249:yv66vg==" "base64:type249:yv66vg=="
Opaque Types: opaque_mysql_type_varchar "base64:type15:Zm9v" "base64:type15:Zm9v"
DateTime as Raw Value: "2015-01-15 23:24:25.000000" "2015-01-15 23:24:25.000000"
Opaque Types: opaque_mysql_type_varbinary "base64:type15:YWJj" "base64:type15:YWJj"
Opaque Types: opaque_mysql_type_binary "base64:type254:YWJjAAAAAAAAAA==" "base64:type254:YWJjAAAAAAAAAA=="
DateTime as Raw Value: "23:24:25.000000" "23:24:25.000000"
DateTime as Raw Value: "2015-01-15" "2015-01-15"
DateTime as Raw Value: "2015-01-15 23:24:25.000000" "2015-01-15 23:24:25.000000"
UTF8 Characters: {"Person": "EMP", "details": {"Name": "Anel Husaković - test: đžšćč"}} {"Person": "EMP", "details": {"Name": "Anel Husaković - test: đžšćč"}}
UTF8 Characters: "Anel Husaković - test: đžšćč" "Anel Husaković - test: đžšćč"
UTF8 Characters: {"Name": "Anel Husaković - test: đžšćč"} {"Name": "Anel Husaković - test: đžšćč"}
UTF8 Characters: {"details": {"Name": "Anel Husaković - test: đžšćč"}, "\"Anel Husaković - test: đžšćč\"": "EMP"} {"details": {"Name": "Anel Husaković - test: đžšćč"}, "\"Anel Husaković - test: đžšćč\"": "EMP"}
Special Characters: {"{": "}"} {"{": "}"}
Special Characters: "key1 - with \" val " "key1 - with \" val "
Special Characters: {"key1 and \n\"key2\"": "val1\t val2"} {"key1 and \n\"key2\"": "val1\t val2"}
Special Characters: "'" "'"
Special Characters: "q" "q"
Special Characters: {"[": "]"} {"[": "]"}
Special Characters: {"{": "}"} {"{": "}"}
Empty JSON Object/Array: [] []
Special Characters: "some_string" "some_string"
Special Characters: "'" "'"
Special Characters: "\"" "\""
Special Characters: "" ""
Special Characters: "'" "'"
Special Characters: "''" "''"
Empty JSON Object/Array: {} {}
Special Characters: "f" "f"
Special Characters: "\\" "\\"
Special Characters: "\n" "\n"
Special Characters: "\f" "\f"
Special Characters: "\t" "\t"
Special Characters: "\r" "\r"
Special Characters: "\b" "\b"
Special Characters: "\\b" "\\b"
Special Characters: {"key \n key": "val \n val"} {"key \n key": "val \n val"}
Special Characters: {"key \f key": "val \f val"} {"key \f key": "val \f val"}
Special Characters: {"key \t key": "val \t val"} {"key \t key": "val \t val"}
Special Characters: {"key \r key": "val \r val"} {"key \r key": "val \r val"}
Special Characters: {"key \b key": "val \b val"} {"key \b key": "val \b val"}
Special Characters: {"key \\0 key": "val \n val"} {"key \\0 key": "val \n val"}
Special Characters: {"key \\ key": "val \\ val"} {"key \\ key": "val \\ val"}
Special Characters: {"key \" key": "val \" val"} {"key \" key": "val \" val"}
Special Characters: {"key ' key": "val ' val"} {"key ' key": "val ' val"}
Special Characters: {"key \\Z key": "val ' val"} {"key \\Z key": "val ' val"}
Special Characters: ["a \f b", "c \f d"] ["a \f b", "c \f d"]
Special Characters: ["a \t b", "c \t d"] ["a \t b", "c \t d"]
Special Characters: ["a \r b", "c \r d"] ["a \r b", "c \r d"]
Special Characters: ["a \b b", "c \b d"] ["a \b b", "c \b d"]
Special Characters: ["a \\ b", "c \\ d"] ["a \\ b", "c \\ d"]
Special Characters: ["a \" b", "c \" d"] ["a \" b", "c \" d"]
Special Characters: ["a ' b", "c ' d"] ["a ' b", "c ' d"]
Special String Cases: {"": ""} {"": ""}
Special String Cases: [""] [""]
Raw LITERALS: true true
Raw LITERALS: false false
Raw LITERALS: null null
JSON LITERALS: {"val": true} {"val": true}
JSON LITERALS: {"val": false} {"val": false}
JSON LITERALS: {"val": null} {"val": null}
Timestamp as RawValue "2019-12-26 19:56:03.000000" "2019-12-26 19:56:03.000000"
Array LITERALS: ["prefix", null, "suffix", 1] ["prefix", null, "suffix", 1]
Array LITERALS: ["prefix", false, "suffix", 1] ["prefix", false, "suffix", 1]
Array LITERALS: ["prefix", true, "suffix", 1] ["prefix", true, "suffix", 1]
show create table mysql_json_test_big;
Table Create Table
mysql_json_test_big CREATE TABLE `mysql_json_test_big` (
`description` varchar(100) COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`expected` longtext COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`actual` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
select * from mysql.plugin;
name dl
drop table mysql_json_test;
drop table mysql_json_test_big;

View file

@ -0,0 +1,32 @@
--echo #
--echo # MDEV-24093: Detect during mysql_upgrade if type_mysql_json.so
--echo # is needed and load it
--echo #
-- source include/have_utf8.inc
-- source include/mysql_upgrade_preparation.inc
SET NAMES utf8;
let $MYSQLD_DATADIR= `select @@datadir`;
--copy_file std_data/mysql_json/mysql_json_test.frm $MYSQLD_DATADIR/test/mysql_json_test.frm
--copy_file std_data/mysql_json/mysql_json_test.MYI $MYSQLD_DATADIR/test/mysql_json_test.MYI
--copy_file std_data/mysql_json/mysql_json_test.MYD $MYSQLD_DATADIR/test/mysql_json_test.MYD
--copy_file std_data/mysql_json/mysql_json_test_big.frm $MYSQLD_DATADIR/test/mysql_json_test_big.frm
--copy_file std_data/mysql_json/mysql_json_test_big.MYI $MYSQLD_DATADIR/test/mysql_json_test_big.MYI
--copy_file std_data/mysql_json/mysql_json_test_big.MYD $MYSQLD_DATADIR/test/mysql_json_test_big.MYD
--error ER_UNKNOWN_DATA_TYPE
show create table mysql_json_test;
--exec $MYSQL_UPGRADE --force 2>&1
--remove_file $MYSQLD_DATADIR/mysql_upgrade_info
show create table mysql_json_test;
select * from mysql_json_test;
show create table mysql_json_test_big;
select * from mysql.plugin;
drop table mysql_json_test;
drop table mysql_json_test_big;

View file

@ -0,0 +1,95 @@
#
# MDEV-24093: Detect during mysql_upgrade if type_mysql_json.so
# is needed and load it
#
SET NAMES utf8;
call mtr.add_suppression("Table rebuild required");
show create table mysql_json_test;
ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.mysql_json_test` FORCE" or dump/reload to fix it!
Phase 1/7: Checking and upgrading mysql database
Processing databases
mysql
mysql.column_stats OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
mysql.index_stats OK
mysql.innodb_index_stats
Error : Unknown storage engine 'InnoDB'
error : Corrupt
mysql.innodb_table_stats
Error : Unknown storage engine 'InnoDB'
error : Corrupt
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.roles_mapping OK
mysql.servers OK
mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry
Error : Unknown storage engine 'InnoDB'
error : Corrupt
Repairing tables
mysql.innodb_index_stats
Error : Unknown storage engine 'InnoDB'
error : Corrupt
mysql.innodb_table_stats
Error : Unknown storage engine 'InnoDB'
error : Corrupt
mysql.transaction_registry
Error : Unknown storage engine 'InnoDB'
error : Corrupt
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
Processing databases
information_schema
mtr
mtr.global_suppressions OK
mtr.test_suppressions OK
performance_schema
test
test.mysql_json_test Needs upgrade
test.mysql_json_test_big Needs upgrade
Repairing tables
test.mysql_json_test OK
test.mysql_json_test_big OK
Phase 7/7: Running 'FLUSH PRIVILEGES'
OK
show create table mysql_json_test;
Table Create Table
mysql_json_test CREATE TABLE `mysql_json_test` (
`description` varchar(100) COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`expected` longtext COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`actual` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
show create table mysql_json_test_big;
Table Create Table
mysql_json_test_big CREATE TABLE `mysql_json_test_big` (
`description` varchar(100) COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`expected` longtext COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`actual` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
select * from mysql.plugin;
name dl
drop table mysql_json_test;
drop table mysql_json_test_big;

View file

@ -0,0 +1,35 @@
--echo #
--echo # MDEV-24093: Detect during mysql_upgrade if type_mysql_json.so
--echo # is needed and load it
--echo #
# Let's now load plugin first
-- source include/have_utf8.inc
-- source include/have_type_mysql_json.inc
-- source include/mysql_upgrade_preparation.inc
SET NAMES utf8;
call mtr.add_suppression("Table rebuild required");
let $MYSQLD_DATADIR= `select @@datadir`;
--copy_file std_data/mysql_json/mysql_json_test.frm $MYSQLD_DATADIR/test/mysql_json_test.frm
--copy_file std_data/mysql_json/mysql_json_test.MYI $MYSQLD_DATADIR/test/mysql_json_test.MYI
--copy_file std_data/mysql_json/mysql_json_test.MYD $MYSQLD_DATADIR/test/mysql_json_test.MYD
--copy_file std_data/mysql_json/mysql_json_test_big.frm $MYSQLD_DATADIR/test/mysql_json_test_big.frm
--copy_file std_data/mysql_json/mysql_json_test_big.MYI $MYSQLD_DATADIR/test/mysql_json_test_big.MYI
--copy_file std_data/mysql_json/mysql_json_test_big.MYD $MYSQLD_DATADIR/test/mysql_json_test_big.MYD
# In the previous example (mysql_json_mysql_upgrade.test)
# instead of ER_TABLE_NEEDS_REBUILD we had ER_UNKNOWN_DATA_TYPE
--error ER_TABLE_NEEDS_REBUILD
show create table mysql_json_test;
--exec $MYSQL_UPGRADE --force 2>&1
--remove_file $MYSQLD_DATADIR/mysql_upgrade_info
show create table mysql_json_test;
show create table mysql_json_test_big;
select * from mysql.plugin;
drop table mysql_json_test;
drop table mysql_json_test_big;

View file

@ -1,8 +1,5 @@
--source include/have_utf8.inc
if (!$TYPE_MYSQL_JSON_SO) {
skip Need MYSQL_JSON plugin;
}
--source include/have_type_mysql_json.inc
--echo #
--echo # The following test takes 2 tables containing a JSON column and attempts

View file

@ -3,7 +3,6 @@ call mtr.add_suppression("is marked as crashed");
call mtr.add_suppression("Checking");
SET NAMES utf8;
set sql_mode="";
install soname 'type_mysql_json';
show create table tempty;
ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.tempty` FORCE" or dump/reload to fix it!
show create table mysql_json_test;
@ -103,4 +102,3 @@ Total_Number_of_Tests Succesful_Tests String_is_valid_JSON
drop table tempty;
drop table mysql_json_test;
drop table mysql_json_test_big;
uninstall soname 'type_mysql_json';

View file

@ -1,10 +1,7 @@
-- source include/mysql_upgrade_preparation.inc
-- source include/have_working_dns.inc
-- source include/have_innodb.inc
if (!$TYPE_MYSQL_JSON_SO) {
skip Need MYSQL_JSON plugin;
}
-- source include/have_type_mysql_json.inc
call mtr.add_suppression("Table rebuild required");
call mtr.add_suppression("is marked as crashed");
@ -28,8 +25,6 @@ SET NAMES utf8;
set sql_mode="";
install soname 'type_mysql_json';
--error ER_TABLE_NEEDS_REBUILD
show create table tempty;
--error ER_TABLE_NEEDS_REBUILD
@ -63,5 +58,4 @@ drop table tempty;
drop table mysql_json_test;
drop table mysql_json_test_big;
uninstall soname 'type_mysql_json';
--remove_file $MYSQLD_DATADIR/mysql_upgrade_info

View file

@ -681,6 +681,9 @@ The following specify which files/extra groups are read (specified before remain
max_connections*5 or max_connections + table_cache*2
(whichever is larger) number of file descriptors
(Automatically configured unless set explicitly)
--optimizer-max-sel-arg-weight=#
The maximum weight of the SEL_ARG graph. Set to 0 for no
limit
--optimizer-prune-level=#
Controls the heuristic(s) applied during query
optimization to prune less-promising partial plans from
@ -1637,6 +1640,7 @@ old-alter-table DEFAULT
old-mode
old-passwords FALSE
old-style-user-limits FALSE
optimizer-max-sel-arg-weight 32000
optimizer-prune-level 1
optimizer-search-depth 62
optimizer-selectivity-sampling-limit 100

View file

@ -35,3 +35,193 @@ json_detailed(JSON_EXTRACT(trace, '$**.ranges'))
]
set optimizer_trace=@tmp_21958;
drop table t2;
#
# MDEV-9750: Quick memory exhaustion with 'extended_keys=on'...
#
create table t1 (
kp1 int,
kp2 int,
kp3 int,
kp4 int,
key key1(kp1, kp2, kp3,kp4)
);
insert into t1 values (1,1,1,1),(2,2,2,2),(3,3,3,3);
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show variables like 'optimizer_max_sel_arg_weight';
Variable_name Value
optimizer_max_sel_arg_weight 32000
set @tmp_9750=@@optimizer_trace;
set optimizer_trace=1;
explain select * from t1 where
kp1 in (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20) and
kp2 in (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20) and
kp3 in (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20) and
kp4 in (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20)
;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index key1 key1 20 NULL 3 Using where; Using index
set @json= (select json_detailed(JSON_EXTRACT(trace, '$**.range_scan_alternatives'))
from information_schema.optimizer_trace);
# This will show 3-component ranges.
# The ranges were produced, but the optimizer has cut away kp4
# to keep the number of ranges at manageable level:
select left(@json, 500);
left(@json, 500)
[
[
{
"index": "key1",
"ranges":
[
"(1,1,1) <= (kp1,kp2,kp3) <= (1,1,1)",
"(1,1,2) <= (kp1,kp2,kp3) <= (1,1,2)",
"(1,1,3) <= (kp1,kp2,kp3) <= (1,1,3)",
"(1,1,4) <= (kp1,kp2,kp3) <= (1,1,4)",
"(1,1,5) <= (kp1,kp2,kp3) <= (1,1,5)",
"(1,1,6) <= (kp1,kp2,kp3) <= (1,1,6)",
"(1,1,7) <= (kp1,kp2,kp3) <= (1,1,7)",
"
## Repeat the above with low max_weight:
set @tmp9750_weight=@@optimizer_max_sel_arg_weight;
set optimizer_max_sel_arg_weight=20;
explain select * from t1 where
kp1 in (1,2,3,4,5,6,7,8,9,10) and
kp2 in (1,2,3,4,5,6,7,8,9,10) and
kp3 in (1,2,3,4,5,6,7,8,9,10) and
kp4 in (1,2,3,4,5,6,7,8,9,10)
;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index key1 key1 20 NULL 3 Using where; Using index
set @trace= (select trace from information_schema.optimizer_trace);
set @json= json_detailed(json_extract(@trace, '$**.range_scan_alternatives'));
select left(@json, 500);
left(@json, 500)
[
[
{
"index": "key1",
"ranges":
[
"(1) <= (kp1) <= (1)",
"(2) <= (kp1) <= (2)",
"(3) <= (kp1) <= (3)",
"(4) <= (kp1) <= (4)",
"(5) <= (kp1) <= (5)",
"(6) <= (kp1) <= (6)",
"(7) <= (kp1) <= (7)",
"(8) <= (kp1) <= (8)",
"(9) <= (kp1) <= (9)",
"(10) <= (kp1) <= (10)"
set @json= json_detailed(json_extract(@trace, '$**.setup_range_conditions'));
select left(@json, 2500);
left(@json, 2500)
[
[
{
"sel_arg_weight_heuristic":
{
"key1_field": "kp1",
"key2_field": "kp2",
"key1_weight": 10,
"key2_weight": 10
}
},
{
"sel_arg_weight_heuristic":
{
"key1_field": "kp1",
"key2_field": "kp3",
"key1_weight": 10,
"key2_weight": 10
}
},
{
"sel_arg_weight_heuristic":
{
"key1_field": "kp1",
"key2_field": "kp4",
"key1_weight": 10,
"key2_weight": 10
}
}
]
]
## Repeat the above with a bit higher max_weight:
set @tmp9750_weight=@@optimizer_max_sel_arg_weight;
set optimizer_max_sel_arg_weight=120;
explain select * from t1 where
kp1 in (1,2,3,4,5,6,7,8,9,10) and
kp2 in (1,2,3,4,5,6,7,8,9,10) and
kp3 in (1,2,3,4,5,6,7,8,9,10) and
kp4 in (1,2,3,4,5,6,7,8,9,10)
;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index key1 key1 20 NULL 3 Using where; Using index
set @json= (select json_detailed(JSON_EXTRACT(trace, '$**.range_scan_alternatives'))
from information_schema.optimizer_trace);
select left(@json, 1500);
left(@json, 1500)
[
[
{
"index": "key1",
"ranges":
[
"(1,1) <= (kp1,kp2) <= (1,1)",
"(1,2) <= (kp1,kp2) <= (1,2)",
"(1,3) <= (kp1,kp2) <= (1,3)",
"(1,4) <= (kp1,kp2) <= (1,4)",
"(1,5) <= (kp1,kp2) <= (1,5)",
"(1,6) <= (kp1,kp2) <= (1,6)",
"(1,7) <= (kp1,kp2) <= (1,7)",
"(1,8) <= (kp1,kp2) <= (1,8)",
"(1,9) <= (kp1,kp2) <= (1,9)",
"(1,10) <= (kp1,kp2) <= (1,10)",
"(2,1) <= (kp1,kp2) <= (2,1)",
"(2,2) <= (kp1,kp2) <= (2,2)",
"(2,3) <= (kp1,kp2) <= (2,3)",
"(2,4) <= (kp1,kp2) <= (2,4)",
"(2,5) <= (kp1,kp2) <= (2,5)",
"(2,6) <= (kp1,kp2) <= (2,6)",
"(2,7) <= (kp1,kp2) <= (2,7)",
"(2,8) <= (kp1,kp2) <= (2,8)",
"(2,9) <= (kp1,kp2) <= (2,9)",
"(2,10) <= (kp1,kp2) <= (2,10)",
"(3,1) <= (kp1,kp2) <= (3,1)",
"(3,2) <= (kp1,kp2) <= (3,2)",
"(3,3) <= (kp1,kp2) <= (3,3)",
"(3,4) <= (kp1,kp2) <= (3,4)",
"(3,5) <= (kp1,kp2) <= (3,5)",
"(3,6) <= (kp1,kp2) <= (3,6)",
"(3,7) <= (kp1,kp2) <= (3,7)",
"(3,8) <= (kp1,kp2) <= (3,8)",
"(3,9) <= (kp1,kp2) <= (3,9)",
"(3,10) <= (kp1,kp2
set optimizer_max_sel_arg_weight= @tmp9750_weight;
set optimizer_trace=@tmp_9750;
drop table t1;
#
# MDEV-24739: Assertion `root->weight >= ...' failed in SEL_ARG::tree_delete
#
SELECT *
FROM mysql.help_relation
WHERE NOT (help_topic_id != 8 AND help_keyword_id != 0 OR help_keyword_id = 2 OR help_topic_id < 1900);
help_topic_id help_keyword_id
SELECT *
FROM mysql.help_relation ignore index (help_topic_id)
WHERE (help_topic_id = 8 OR help_keyword_id = 0) AND help_keyword_id != 2 AND help_topic_id >= 1900;
help_topic_id help_keyword_id

View file

@ -31,3 +31,82 @@ from information_schema.optimizer_trace;
set optimizer_trace=@tmp_21958;
drop table t2;
--echo #
--echo # MDEV-9750: Quick memory exhaustion with 'extended_keys=on'...
--echo #
create table t1 (
kp1 int,
kp2 int,
kp3 int,
kp4 int,
key key1(kp1, kp2, kp3,kp4)
);
insert into t1 values (1,1,1,1),(2,2,2,2),(3,3,3,3);
analyze table t1;
show variables like 'optimizer_max_sel_arg_weight';
# 20 * 20 * 20 *20 = 400*400 = 160,000 ranges
set @tmp_9750=@@optimizer_trace;
set optimizer_trace=1;
explain select * from t1 where
kp1 in (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20) and
kp2 in (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20) and
kp3 in (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20) and
kp4 in (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20)
;
set @json= (select json_detailed(JSON_EXTRACT(trace, '$**.range_scan_alternatives'))
from information_schema.optimizer_trace);
--echo # This will show 3-component ranges.
--echo # The ranges were produced, but the optimizer has cut away kp4
--echo # to keep the number of ranges at manageable level:
select left(@json, 500);
--echo ## Repeat the above with low max_weight:
set @tmp9750_weight=@@optimizer_max_sel_arg_weight;
set optimizer_max_sel_arg_weight=20;
explain select * from t1 where
kp1 in (1,2,3,4,5,6,7,8,9,10) and
kp2 in (1,2,3,4,5,6,7,8,9,10) and
kp3 in (1,2,3,4,5,6,7,8,9,10) and
kp4 in (1,2,3,4,5,6,7,8,9,10)
;
set @trace= (select trace from information_schema.optimizer_trace);
set @json= json_detailed(json_extract(@trace, '$**.range_scan_alternatives'));
select left(@json, 500);
set @json= json_detailed(json_extract(@trace, '$**.setup_range_conditions'));
select left(@json, 2500);
--echo ## Repeat the above with a bit higher max_weight:
set @tmp9750_weight=@@optimizer_max_sel_arg_weight;
set optimizer_max_sel_arg_weight=120;
explain select * from t1 where
kp1 in (1,2,3,4,5,6,7,8,9,10) and
kp2 in (1,2,3,4,5,6,7,8,9,10) and
kp3 in (1,2,3,4,5,6,7,8,9,10) and
kp4 in (1,2,3,4,5,6,7,8,9,10)
;
set @json= (select json_detailed(JSON_EXTRACT(trace, '$**.range_scan_alternatives'))
from information_schema.optimizer_trace);
select left(@json, 1500);
set optimizer_max_sel_arg_weight= @tmp9750_weight;
set optimizer_trace=@tmp_9750;
drop table t1;
--echo #
--echo # MDEV-24739: Assertion `root->weight >= ...' failed in SEL_ARG::tree_delete
--echo #
SELECT *
FROM mysql.help_relation
WHERE NOT (help_topic_id != 8 AND help_keyword_id != 0 OR help_keyword_id = 2 OR help_topic_id < 1900);
SELECT *
FROM mysql.help_relation ignore index (help_topic_id)
WHERE (help_topic_id = 8 OR help_keyword_id = 0) AND help_keyword_id != 2 AND help_topic_id >= 1900;

View file

@ -31,7 +31,7 @@ Field Type Null Key Default Extra
GROUP_ID int(6) NO 0
POSITION int(6) NO 0
PRIORITY int(1) NO 0
CONNECTION_ID bigint(19) unsigned NO 0
CONNECTION_ID bigint(19) unsigned YES NULL
QUEUEING_TIME_MICROSECONDS bigint(19) NO 0
DESC INFORMATION_SCHEMA.THREAD_POOL_STATS;
Field Type Null Key Default Extra

View file

@ -0,0 +1,96 @@
SHOW VARIABLES LIKE 'innodb_encrypt%';
Variable_name Value
innodb_encrypt_log ON
innodb_encrypt_tables ON
innodb_encrypt_temporary_tables OFF
innodb_encryption_rotate_key_age 1
innodb_encryption_rotation_iops 100
innodb_encryption_threads 1
SET GLOBAL innodb_encrypt_tables = ON;
CREATE TABLE t1(f1 BIGINT PRIMARY KEY, f2 int not null,
f3 int not null, index(f1), index idx_1(f2),
index(f2, f3)) ENGINE=InnoDB;
# Wait max 10 min for key encryption threads to encrypt all spaces
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
NAME
innodb_system
mysql/innodb_index_stats
mysql/innodb_table_stats
mysql/transaction_registry
test/t1
CREATE TABLE t2 (f1 int not null)engine=innodb;
# restart: --debug=d,ib_log_checkpoint_avoid
connect con1,localhost,root,,,;
begin;
insert into t2 values(1);
connection default;
set global innodb_encrypt_tables = OFF;
# Wait max 10 min for key encryption threads to decrypt all spaces
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0;
NAME
innodb_system
mysql/innodb_index_stats
mysql/innodb_table_stats
mysql/transaction_registry
test/t1
test/t2
alter table t1 drop index idx_1;
set global innodb_encrypt_tables = ON;
# Wait max 10 min for key encryption threads to encrypt all spaces
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
NAME
innodb_system
mysql/innodb_index_stats
mysql/innodb_table_stats
mysql/transaction_registry
test/t1
test/t2
disconnect con1;
# restart: --debug=d,ib_log_checkpoint_avoid
drop table t1, t2;
CREATE TABLE t1(f1 BIGINT PRIMARY KEY, f2 int not null,
f3 int not null, index(f1), index idx_1(f2),
index(f2, f3)) ENGINE=InnoDB;
# Wait max 10 min for key encryption threads to encrypt all spaces
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
NAME
innodb_system
mysql/innodb_index_stats
mysql/innodb_table_stats
mysql/transaction_registry
test/t1
CREATE TABLE t2 (f1 int not null)engine=innodb;
# restart: --debug=d,ib_log_checkpoint_avoid
connect con1,localhost,root,,,;
begin;
insert into t2 values(1);
connection default;
set global innodb_encrypt_tables = OFF;
# Wait max 10 min for key encryption threads to decrypt all spaces
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0;
NAME
innodb_system
mysql/innodb_index_stats
mysql/innodb_table_stats
mysql/transaction_registry
test/t1
test/t2
alter table t1 drop index idx_1;
disconnect con1;
# restart: --debug=d,ib_log_checkpoint_avoid
connect con1,localhost,root,,,;
begin;
insert into t2 values(1);
connection default;
set global innodb_encrypt_tables = ON;
# Wait max 10 min for key encryption threads to encrypt all spaces
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
NAME
innodb_system
mysql/innodb_index_stats
mysql/innodb_table_stats
mysql/transaction_registry
test/t1
test/t2
disconnect con1;
drop table t2, t1;

View file

@ -0,0 +1,4 @@
--innodb-encrypt-tables
--innodb-encrypt-log
--innodb-encryption-threads=1
--innodb-tablespaces-encryption

View file

@ -0,0 +1,117 @@
--source include/have_innodb.inc
--source include/have_example_key_management_plugin.inc
--source include/have_debug.inc
--source include/not_embedded.inc
SHOW VARIABLES LIKE 'innodb_encrypt%';
SET GLOBAL innodb_encrypt_tables = ON;
CREATE TABLE t1(f1 BIGINT PRIMARY KEY, f2 int not null,
f3 int not null, index(f1), index idx_1(f2),
index(f2, f3)) ENGINE=InnoDB;
--let $tables_count= `select count(*) + 1 from information_schema.tables where engine = 'InnoDB'`
--echo # Wait max 10 min for key encryption threads to encrypt all spaces
--let $wait_timeout= 600
--let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
--source include/wait_condition.inc
--sorted_result
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
CREATE TABLE t2 (f1 int not null)engine=innodb;
let $restart_parameters="--debug=d,ib_log_checkpoint_avoid";
--source include/restart_mysqld.inc
# Stop the purge
connect(con1,localhost,root,,,);
begin;
insert into t2 values(1);
connection default;
--let $tables_count= `select count(*) + 1 from information_schema.tables where engine = 'InnoDB'`
set global innodb_encrypt_tables = OFF;
--echo # Wait max 10 min for key encryption threads to decrypt all spaces
--let $wait_timeout= 600
--let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0;
--source include/wait_condition.inc
--sorted_result
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0;
# Free the index `idx_1`
alter table t1 drop index idx_1;
set global innodb_encrypt_tables = ON;
--echo # Wait max 10 min for key encryption threads to encrypt all spaces
--let $wait_timeout= 600
--let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
--source include/wait_condition.inc
--sorted_result
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
disconnect con1;
let $shutdown_timeout=0;
--source include/restart_mysqld.inc
drop table t1, t2;
#
#
CREATE TABLE t1(f1 BIGINT PRIMARY KEY, f2 int not null,
f3 int not null, index(f1), index idx_1(f2),
index(f2, f3)) ENGINE=InnoDB;
--let $tables_count= `select count(*) + 1 from information_schema.tables where engine = 'InnoDB'`
--echo # Wait max 10 min for key encryption threads to encrypt all spaces
--let $wait_timeout= 600
--let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
--source include/wait_condition.inc
--sorted_result
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
CREATE TABLE t2 (f1 int not null)engine=innodb;
--source include/restart_mysqld.inc
# Stop the purge
connect(con1,localhost,root,,,);
begin;
insert into t2 values(1);
connection default;
--let $tables_count= `select count(*) + 1 from information_schema.tables where engine = 'InnoDB'`
set global innodb_encrypt_tables = OFF;
--echo # Wait max 10 min for key encryption threads to decrypt all spaces
--let $wait_timeout= 600
--let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0;
--source include/wait_condition.inc
--sorted_result
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0;
# Free the index `idx_1`
alter table t1 drop index idx_1;
disconnect con1;
--source include/restart_mysqld.inc
# Stop the purge
connect(con1,localhost,root,,,);
begin;
insert into t2 values(1);
connection default;
set global innodb_encrypt_tables = ON;
--echo # Wait max 10 min for key encryption threads to encrypt all spaces
--let $wait_timeout= 600
--let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
--source include/wait_condition.inc
--sorted_result
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
disconnect con1;
drop table t2, t1;

View file

@ -14,7 +14,7 @@ SHOW VARIABLES LIKE 'innodb_encrypt%';
SET GLOBAL innodb_encrypt_tables = ON;
--let $tables_count= `select count(*) + 1 from information_schema.tables where engine = 'InnoDB'`
--let $tables_count= `select count(*) + @@global.innodb_undo_tablespaces + 1 from information_schema.tables where engine = 'InnoDB'`
--echo # Wait max 10 min for key encryption threads to encrypt all spaces
--let $wait_timeout= 600

View file

@ -35,6 +35,7 @@ galera_ssl_upgrade : MDEV-19950 Galera test failure on galera_ssl_upgrade
galera_sst_mariabackup_encrypt_with_key : MDEV-21484 galera_sst_mariabackup_encrypt_with_key
galera_toi_ddl_nonconflicting : MDEV-21518 galera.galera_toi_ddl_nonconflicting
galera_toi_truncate : MDEV-22996 Hang on galera_toi_truncate test case
galera_trigger : MDEV-24048 galera.galera_trigger MTR fails: Result content mismatch
galera_var_node_address : MDEV-20485 Galera test failure
galera_var_notify_cmd : MDEV-21905 Galera test galera_var_notify_cmd causes hang
galera_var_reject_queries : assertion in inline_mysql_socket_send

View file

@ -10,8 +10,8 @@
#
##############################################################################
GCF-354 : MDEV-24010 galera_3nodes.GCF-354 MTR fails : WSREP has not yet prepared node for application use
galera_gtid_2_cluster : MDEV-23775 Galera test failure on galera_3nodes.galera_gtid_2_cluster
galera_ist_gcache_rollover : MDEV-23578 WSREP: exception caused by message: {v=0,t=1,ut=255,o=4,s=0,sr=0,as=1,f=6,src=50524cfe,srcvid=view_id(REG,50524cfe,4),insvid=view_id(UNKNOWN,00000000,0),ru=00000000,r=[-1,-1],fs=75,nl=(}
galera_slave_options_do :MDEV-8798
galera_slave_options_ignore : MDEV-8798
galera_vote_rejoin_mysqldump : MDEV-24481: galera_3nodes.galera_vote_rejoin_mysqldump MTR failed: mysql_shutdown failed

View file

@ -6,13 +6,14 @@ connection node_1;
connection node_2;
connection node_3;
connection node_2;
SET wsrep_on=OFF;
SET SESSION wsrep_on=OFF;
DROP SCHEMA test;
connection node_3;
SET SESSION wsrep_on=OFF;
CREATE TABLE test.t1 (f1 INTEGER) engine=innodb;
CREATE TABLE test.t1 (f1 INTEGER NOT NULL PRIMARY KEY) engine=innodb;
connection node_1;
CREATE TABLE test.t1 (f1 INTEGER) engine=innodb;
CREATE TABLE test.t1 (f1 INTEGER NOT NULL PRIMARY KEY) engine=innodb;
INSERT INTO test.t1 values (1);
SHOW STATUS LIKE 'wsrep_cluster_status';
Variable_name Value
wsrep_cluster_status Primary

View file

@ -14,19 +14,20 @@
# 1. Create different inconsistencies on nodes 2 and 3
#
--connection node_2
SET wsrep_on=OFF;
SET SESSION wsrep_on=OFF;
DROP SCHEMA test;
--connection node_3
SET SESSION wsrep_on=OFF;
CREATE TABLE test.t1 (f1 INTEGER) engine=innodb;
CREATE TABLE test.t1 (f1 INTEGER NOT NULL PRIMARY KEY) engine=innodb;
#
# 2. The following should generate different errors on nodes 2 and 3 and
# trigger voting with 3 different votes. node_1 should remain alone
# in the cluster.
#
--connection node_1
CREATE TABLE test.t1 (f1 INTEGER) engine=innodb;
CREATE TABLE test.t1 (f1 INTEGER NOT NULL PRIMARY KEY) engine=innodb;
INSERT INTO test.t1 values (1);
--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc

View file

@ -11,5 +11,7 @@
##############################################################################
GCF-1060 : MDEV-20848 galera_sr.GCF_1060
GCF-585 : MDEV-24698 galera_sr.GCF-585 MTR failed with SIGABRT: no such a transition REPLICATING -> APPLYING
galera_sr_shutdown_master : MDEV-23612: galera_sr.galera_sr_shutdown_master MTR failed: WSREP_SST: [ERROR] Possible timeout in receving first data from donor in gtid stage

View file

@ -121,5 +121,19 @@ SELECT index_name, stat_name, stat_description
FROM mysql.innodb_index_stats
WHERE database_name = 'test' AND table_name = 't';
index_name stat_name stat_description
# MDEV-24564 FIXME: Do reload statistics after the above ALTER TABLE!
GEN_CLUST_INDEX n_diff_pfx01 DB_ROW_ID
GEN_CLUST_INDEX n_leaf_pages Number of leaf pages in the index
GEN_CLUST_INDEX size Number of pages in the index
idxb n_diff_pfx01 b
idxb n_diff_pfx02 b,DB_ROW_ID
idxb n_leaf_pages Number of leaf pages in the index
idxb size Number of pages in the index
vidxe n_diff_pfx01 e
vidxe n_diff_pfx02 e,DB_ROW_ID
vidxe n_leaf_pages Number of leaf pages in the index
vidxe size Number of pages in the index
vidxf n_diff_pfx01 f
vidxf n_diff_pfx02 f,DB_ROW_ID
vidxf n_leaf_pages Number of leaf pages in the index
vidxf size Number of pages in the index
DROP TABLE t;

View file

@ -52,6 +52,5 @@ ALTER TABLE t DROP INDEX vidxcd;
SELECT index_name, stat_name, stat_description
FROM mysql.innodb_index_stats
WHERE database_name = 'test' AND table_name = 't';
-- echo # MDEV-24564 FIXME: Do reload statistics after the above ALTER TABLE!
DROP TABLE t;

View file

@ -11,4 +11,3 @@
##############################################################################
create-index-debug : MDEV-13680 InnoDB may crash when btr_page_alloc() fails
innodb_wl6326_big : MDEV-24661: no instrumentation since MDEV-24142

View file

@ -8,3 +8,11 @@ PARTITION BY RANGE(a)
PARTITION pb VALUES LESS THAN (4));
ERROR HY000: Partitioned tables do not support FOREIGN KEY
DROP TABLE t1;
#
# MDEV-24754 Server crash in
# ha_partition_inplace_ctx::~ha_partition_inplace_ctx
#
CREATE TABLE t1 (id INT PRIMARY KEY, a INT, va INT AS (a) VIRTUAL)
ENGINE=InnoDB PARTITION BY HASH(id) PARTITIONS 2;
ALTER TABLE t1 ADD b INT, ALGORITHM=INSTANT;
DROP TABLE t1;

View file

@ -1,449 +0,0 @@
CREATE SCHEMA my_schema;
USE my_schema;
CREATE FUNCTION f_thread_id (i INT) RETURNS CHAR(4) DETERMINISTIC
RETURN CONCAT(LPAD(CAST(i AS CHAR),3,'_'),'_') ;
SELECT CONCAT('->', f_thread_id( 1), '<-');
CONCAT('->', f_thread_id( 1), '<-')
->__1_<-
SELECT CONCAT('->', f_thread_id(12), '<-');
CONCAT('->', f_thread_id(12), '<-')
->_12_<-
SET @extra_int = 1;
SET @extra_string = f_thread_id(@extra_int);
SELECT @extra_int , @extra_string;
@extra_int @extra_string
1 __1_
CREATE FUNCTION f_col_int1 (i INT) RETURNS INT(20) DETERMINISTIC
RETURN i * 1000 + @extra_int ;
SELECT f_col_int1(my_col) AS my_result
FROM (SELECT 1 AS my_col UNION SELECT 12 UNION SELECT 123
UNION SELECT 1234 UNION SELECT 12345) AS tx;
my_result
1001
12001
123001
1234001
12345001
CREATE FUNCTION f_col_int2 (i INT) RETURNS INT(20) DETERMINISTIC
RETURN @extra_int * 10000000 + i ;
SELECT f_col_int2(my_col) AS my_result
FROM (SELECT 1 AS my_col UNION SELECT 12 UNION SELECT 123
UNION SELECT 1234 UNION SELECT 12345) AS tx;
my_result
10000001
10000012
10000123
10001234
10012345
CREATE FUNCTION f_col_int3 (i INT) RETURNS INT(20) DETERMINISTIC
RETURN @extra_int ;
SELECT f_col_int3(my_col) AS my_result
FROM (SELECT 1 AS my_col UNION SELECT 12 UNION SELECT 123
UNION SELECT 1234 UNION SELECT 12345) AS tx;
my_result
1
1
1
1
1
CREATE FUNCTION f_col_blob (i INT) RETURNS BLOB DETERMINISTIC
RETURN RPAD(@extra_string,(@@innodb_page_size / 2 ) + 1,'a');
SELECT CONCAT('->', SUBSTR(f_col_blob(my_col) FROM 1 FOR 10),
'<-.....->', SUBSTR(f_col_blob(my_col) FROM -10 FOR 10), '<-') AS my_result
FROM (SELECT 1 AS my_col UNION SELECT 12 UNION SELECT 123
UNION SELECT 1234 UNION SELECT 12345) AS tx;
my_result
->__1_aaaaaa<-.....->aaaaaaaaaa<-
->__1_aaaaaa<-.....->aaaaaaaaaa<-
->__1_aaaaaa<-.....->aaaaaaaaaa<-
->__1_aaaaaa<-.....->aaaaaaaaaa<-
->__1_aaaaaa<-.....->aaaaaaaaaa<-
CREATE FUNCTION f_col_char0 (i INT) RETURNS CHAR(255) DETERMINISTIC
RETURN LPAD(CAST(i AS CHAR),255,' ');
SELECT CONCAT('->', f_col_char0(my_col), '<-') AS my_result
FROM (SELECT 1 AS my_col UNION SELECT 12 UNION SELECT 123
UNION SELECT 1234 UNION SELECT 12345) AS tx;
my_result
-> 1<-
-> 12<-
-> 123<-
-> 1234<-
-> 12345<-
CREATE FUNCTION f_col_char1 (i INT) RETURNS CHAR(26) DETERMINISTIC
RETURN
CONCAT('B',
LPAD(SUBSTR(CAST(i AS CHAR),1,(LENGTH(CAST(i AS CHAR)) DIV 2)),10,' '),
@extra_string,
RPAD(SUBSTR(CAST(i AS CHAR), -((LENGTH(CAST(i AS CHAR)) + 1) DIV 2)),10,' '),
'E') ;
SELECT CONCAT('->', f_col_char1(my_col), '<-') AS my_result
FROM (SELECT 1 AS my_col UNION SELECT 12 UNION SELECT 123
UNION SELECT 1234 UNION SELECT 12345) AS tx;
my_result
->B __1_1 E<-
->B 1__1_2 E<-
->B 1__1_23 E<-
->B 12__1_34 E<-
->B 12__1_345 E<-
CREATE FUNCTION f_col_char2 (i INT) RETURNS CHAR(26) DETERMINISTIC
RETURN
CONCAT('B',
RPAD(SUBSTR(CAST(i AS CHAR),1,(LENGTH(CAST(i AS CHAR)) DIV 2)),10,' '),
@extra_string,
LPAD(SUBSTR(CAST(i AS CHAR), -((LENGTH(CAST(i AS CHAR)) + 1) DIV 2)),10,' '),
'E');
SELECT CONCAT('->', f_col_char2(my_col), '<-') AS my_result
FROM (SELECT 1 AS my_col UNION SELECT 12 UNION SELECT 123
UNION SELECT 1234 UNION SELECT 12345) AS tx;
my_result
->B __1_ 1E<-
->B1 __1_ 2E<-
->B1 __1_ 23E<-
->B12 __1_ 34E<-
->B12 __1_ 345E<-
CREATE FUNCTION f_col_char3 (i INT) RETURNS CHAR(26) DETERMINISTIC
RETURN
CONCAT('B',@extra_string,LPAD(CAST(i AS CHAR),20,' '),'E');
SELECT CONCAT('->', f_col_char3(my_col), '<-') AS my_result
FROM (SELECT 1 AS my_col UNION SELECT 12 UNION SELECT 123
UNION SELECT 1234 UNION SELECT 12345) AS tx;
my_result
->B__1_ 1E<-
->B__1_ 12E<-
->B__1_ 123E<-
->B__1_ 1234E<-
->B__1_ 12345E<-
CREATE FUNCTION f_col_char4 (i INT) RETURNS CHAR(26) DETERMINISTIC
RETURN
CONCAT('B',RPAD(CAST(i AS CHAR),20,' '),@extra_string,'E');
SELECT CONCAT('->', f_col_char4(my_col), '<-') AS my_result
FROM (SELECT 1 AS my_col UNION SELECT 12 UNION SELECT 123
UNION SELECT 1234 UNION SELECT 12345) AS tx;
my_result
->B1 __1_E<-
->B12 __1_E<-
->B123 __1_E<-
->B1234 __1_E<-
->B12345 __1_E<-
CREATE TABLE my_metrics LIKE information_schema.innodb_metrics;
ALTER TABLE my_metrics ADD COLUMN phase ENUM('after', 'before'),
DROP COLUMN SUBSYSTEM, DROP COLUMN TYPE, DROP COLUMN COMMENT,
ADD PRIMARY KEY (NAME,phase);
CREATE TABLE t1 (
col_int0 BIGINT,
col_int1 BIGINT,
col_int2 BIGINT,
col_int3 BIGINT,
col_blob BLOB,
col_char0 VARCHAR(255),
col_char1 VARCHAR(30),
col_char2 VARCHAR(30),
col_char3 VARCHAR(30),
col_char4 VARCHAR(30)
) ENGINE = InnoDB;
ALTER TABLE t1 ADD UNIQUE KEY uidx_col_int0 (col_int0),
ADD UNIQUE KEY uidx1 (col_int1, col_char0),
ADD UNIQUE KEY uidx2 (col_int2, col_char0, col_int1),
ADD UNIQUE KEY uidx3 (col_int3, col_int2, col_char0),
ADD UNIQUE KEY uidx4 (col_char1, col_char0),
ADD UNIQUE KEY uidx5 (col_char2, col_char0, col_char1),
ADD UNIQUE KEY uidx6 (col_char3, col_char2, col_char0),
ADD UNIQUE KEY uidx7 (col_int1, col_int2, col_int3, col_char4,
col_char1, col_char2, col_char3, col_char0),
ADD KEY idx8 (col_blob(10), col_char4);
CREATE PROCEDURE proc_fill_t1 (max_row_count INT, load_unit INT)
BEGIN
DECLARE my_count INTEGER DEFAULT 0;
DECLARE max_load_count INTEGER DEFAULT 0;
DROP TABLE IF EXISTS t0;
CREATE TEMPORARY TABLE t0 (col_int0 BIGINT, PRIMARY KEY(col_int0));
WHILE (my_count < load_unit ) DO
SET my_count = my_count + 1;
INSERT INTO t0 SET col_int0 = my_count;
END WHILE;
SET max_load_count = (SELECT (max_row_count DIV load_unit) + 1 );
SELECT COUNT(col_int0) INTO @val FROM t1;
SET my_count = 0;
REPEAT
INSERT INTO t1 (col_int0, col_int1, col_int2, col_int3, col_blob,
col_char0, col_char1, col_char2,col_char3,col_char4)
SELECT col_int0 + @val,
f_col_int1(col_int0 + @val),
f_col_int2(col_int0 + @val),
f_col_int3(col_int0 + @val),
f_col_blob(col_int0 + @val),
f_col_char0(col_int0 + @val),
f_col_char1(col_int0 + @val),
f_col_char2(col_int0 + @val),
f_col_char3(col_int0 + @val),
f_col_char4(col_int0 + @val)
FROM t0;
COMMIT;
SELECT MAX(col_int0) INTO @val FROM t1;
SET my_count = my_count + 1;
UNTIL( my_count > max_load_count OR @val >= max_row_count )
END REPEAT;
DROP TEMPORARY TABLE t0;
END|
CREATE PROCEDURE proc_dml (max_duration INT, t1_stripe_half INT)
BEGIN
DECLARE aux INTEGER DEFAULT 0;
DECLARE start_time INT;
DECLARE CONTINUE HANDLER FOR SQLEXCEPTION, SQLWARNING, NOT FOUND BEGIN END;
SET @extra_int = CONNECTION_ID();
SET @extra_string = f_thread_id(@extra_int);
SELECT ROUND(MAX(col_int0) / 2 ) INTO @t1_half FROM t1;
# The user lock 'Blocker' should be already set by some other session S1.
# S1 starts the race by releasing that lock.
# Wait till the lock is released and the lock can be obtained.
# In order to prevent endless waiting in case of non foreseen problems
# limit the timespan to 30 seconds.
SELECT GET_LOCK('Blocker', 30) INTO @aux;
# Release the lock immediate so that the other "runner" sessions start too.
SELECT RELEASE_LOCK('Blocker') INTO @aux;
SET start_time = UNIX_TIMESTAMP();
WHILE (UNIX_TIMESTAMP() - start_time < max_duration) DO
SET @aux = @t1_half - t1_stripe_half + ROUND(RAND() * t1_stripe_half * 2);
UPDATE t1 SET
col_int1 = f_col_int1(col_int0),
col_int2 = f_col_int2(col_int0),
col_int3 = f_col_int3(col_int0),
col_blob = f_col_blob(col_int0),
col_char0 = f_col_char0(col_int0),
col_char1 = f_col_char1(col_int0),
col_char2 = f_col_char2(col_int0),
col_char3 = f_col_char3(col_int0),
col_char4 = f_col_char4(col_int0)
WHERE col_int0 = @aux;
COMMIT;
END WHILE;
END|
SET GLOBAL innodb_monitor_disable = "innodb_rwlock_sx_%";
SET @pre_reset_ts = NOW();
SET GLOBAL innodb_monitor_reset = "innodb_rwlock_sx_%";
SET @pre_enable_ts = NOW();
SET GLOBAL innodb_monitor_enable = "innodb_rwlock_sx_%";
SET @pre_collect_ts = NOW();
DELETE FROM my_metrics;
INSERT INTO my_metrics
SELECT NAME, COUNT, MAX_COUNT, MIN_COUNT, AVG_COUNT,
COUNT_RESET, MAX_COUNT_RESET, MIN_COUNT_RESET, AVG_COUNT_RESET,
TIME_ENABLED, TIME_DISABLED, TIME_ELAPSED, TIME_RESET,
ENABLED, 'before'
FROM information_schema.innodb_metrics
WHERE NAME LIKE 'innodb_rwlock_sx_%';
# TC-01 There are exact three entries "innodb_rwlock_sx_%" with the
# with the name which follow in innodb_metrics.
# pass
SELECT COUNT(*) INTO @sx_count FROM my_metrics;
# TC-02 Counting is now enabled. ALL = @sx_count entries show that.
# pass
# TC-03 @pre_reset_ts < TIME_RESET. ALL = @sx_count entries show that.
# pass
# TC-04 @pre_enable_ts < TIME_ENABLED. ALL = @sx_count entries show that.
# pass
# TC-05 TIME_RESET < TIME_ENABLED AND TIME_ENABLED < @pre_collect_ts
# AND TIME_ELAPSED > 0. ALL = @sx_count entries show that.
# pass
# TC-06 COUNT_RESET = MAX_COUNT_RESET. ALL = @sx_count entries show that.
# pass
SET GLOBAL innodb_monitor_reset = "innodb_rwlock_sx_%";
SHOW ENGINE INNODB STATUS;
DELETE FROM my_metrics;
INSERT INTO my_metrics
SELECT NAME, COUNT, MAX_COUNT, MIN_COUNT, AVG_COUNT,
COUNT_RESET, MAX_COUNT_RESET, MIN_COUNT_RESET, AVG_COUNT_RESET,
TIME_ENABLED, TIME_DISABLED, TIME_ELAPSED, TIME_RESET,
ENABLED, 'before'
FROM information_schema.innodb_metrics
WHERE NAME LIKE 'innodb_rwlock_sx_%';
SET @extra_string = '__0_';
SET @extra_int = 0;
# TC-07 One session inserts some significant amount of rows into t1.
# The system MUST survive that.
SET @max_row_count = <max_row_count>;
SET @load_unit = <load_unit>;
SET @start_time = UNIX_TIMESTAMP();
SET AUTOCOMMIT = OFF;
CALL proc_fill_t1 (@max_row_count, @load_unit);
# pass
SET AUTOCOMMIT = ON;
SELECT col_int0 INTO @t1_half FROM t1
WHERE col_int0 >= (@val DIV 2) ORDER BY col_int0 LIMIT 1;
SHOW ENGINE INNODB STATUS;
SELECT col_int0, col_int1, col_int2, col_int3,
CONCAT('->', SUBSTR(col_blob FROM 1 FOR 10),
'<-.....->', SUBSTR(col_blob FROM -10 FOR 10), '<-') AS col_blobx,
CONCAT('->',col_char0,'<-') AS col_char0x,
CONCAT('->',col_char1,'<-') AS col_char1x,
CONCAT('->',col_char2,'<-') AS col_char2x,
CONCAT('->',col_char3,'<-') AS col_char3x,
CONCAT('->',col_char4,'<-') AS col_char4x
FROM t1 WHERE col_int0 between 98 AND 102;
col_int0 98
col_int1 98000
col_int2 98
col_int3 0
col_blobx ->__0_aaaaaa<-.....->aaaaaaaaaa<-
col_char0x -> 98<-
col_char1x ->B 9__0_8 E<-
col_char2x ->B9 __0_ 8E<-
col_char3x ->B__0_ 98E<-
col_char4x ->B98 __0_E<-
col_int0 99
col_int1 99000
col_int2 99
col_int3 0
col_blobx ->__0_aaaaaa<-.....->aaaaaaaaaa<-
col_char0x -> 99<-
col_char1x ->B 9__0_9 E<-
col_char2x ->B9 __0_ 9E<-
col_char3x ->B__0_ 99E<-
col_char4x ->B99 __0_E<-
col_int0 100
col_int1 100000
col_int2 100
col_int3 0
col_blobx ->__0_aaaaaa<-.....->aaaaaaaaaa<-
col_char0x -> 100<-
col_char1x ->B 1__0_00 E<-
col_char2x ->B1 __0_ 00E<-
col_char3x ->B__0_ 100E<-
col_char4x ->B100 __0_E<-
col_int0 101
col_int1 101000
col_int2 101
col_int3 0
col_blobx ->__0_aaaaaa<-.....->aaaaaaaaaa<-
col_char0x -> 101<-
col_char1x ->B 1__0_01 E<-
col_char2x ->B1 __0_ 01E<-
col_char3x ->B__0_ 101E<-
col_char4x ->B101 __0_E<-
col_int0 102
col_int1 102000
col_int2 102
col_int3 0
col_blobx ->__0_aaaaaa<-.....->aaaaaaaaaa<-
col_char0x -> 102<-
col_char1x ->B 1__0_02 E<-
col_char2x ->B1 __0_ 02E<-
col_char3x ->B__0_ 102E<-
col_char4x ->B102 __0_E<-
# TC-11 Several concurrent sessions perform updates in t1 like mad.
# The system MUST survive this.
# Printing of statements is partially suppressed.
SET @start_time = UNIX_TIMESTAMP();
SELECT 1 FROM t1 WHERE col_int0 = @t1_half FOR UPDATE;
1
1
SELECT GET_LOCK('Blocker', 1000) ;
GET_LOCK('Blocker', 1000)
1
RELEASE_LOCK('Blocker')
1
# pass
SHOW ENGINE INNODB STATUS;
# TC-13 One session performs ALTER TABLE t1 ADD KEY ... on the fat table t1.
# The system MUST survive this.
SET @start_time = UNIX_TIMESTAMP();
ALTER TABLE t1 ADD KEY idx_col_char4_col_char0 (col_char4,col_char0);
SHOW ENGINE INNODB STATUS;
# pass
# TC-15 One session performs a fat update on the fat table t1.
# The system MUST survive this.
SET @start_time = UNIX_TIMESTAMP();
SET @extra_int = 13;
SET @extra_string = f_thread_id(@extra_int);
UPDATE t1 SET
col_int1 = f_col_int1(col_int0), col_int2 = f_col_int2(col_int0),
col_int3 = f_col_int3(col_int0), col_blob = f_col_blob(col_int0),
col_char0 = f_col_char0(col_int0), col_char1 = f_col_char1(col_int0),
col_char2 = f_col_char2(col_int0), col_char3 = f_col_char3(col_int0),
col_char4 = f_col_char4(col_int0)
WHERE col_int0 BETWEEN @t1_half - 2500 AND @t1_half + 2500;
COMMIT;
SHOW ENGINE INNODB STATUS;
# pass
INSERT INTO my_metrics
SELECT NAME, COUNT, MAX_COUNT, MIN_COUNT, AVG_COUNT,
COUNT_RESET, MAX_COUNT_RESET, MIN_COUNT_RESET, AVG_COUNT_RESET,
TIME_ENABLED, TIME_DISABLED, TIME_ELAPSED, TIME_RESET,
ENABLED, 'after'
FROM information_schema.innodb_metrics
WHERE NAME LIKE 'innodb_rwlock_sx_%';
# TC-16 The following activities happend after reset in innodb_metrics
# - Insert some significant amount of rows into t1.
# - Several concurrent users perform excessive updates in t1.
# - ALTER TABLE ... ADD KEY <sufficient big enough structure>
# - One UPDATE statement modifying a huge slice of t1.
# Any of them causes heavy use of SX lock and therefore COUNT_RESET
# must have grown for ALL = @sx_count entries.
# pass
# TC-09 Heavy activity after reset.
# COUNT_RESET = MAX_COUNT_RESET for ALL = @sx_count entries
# needs to stay valid though he counters will have grown.
# pass
DELETE FROM my_metrics;
INSERT INTO my_metrics
SELECT NAME, COUNT, MAX_COUNT, MIN_COUNT, AVG_COUNT,
COUNT_RESET, MAX_COUNT_RESET, MIN_COUNT_RESET, AVG_COUNT_RESET,
TIME_ENABLED, TIME_DISABLED, TIME_ELAPSED, TIME_RESET,
ENABLED, 'before'
FROM information_schema.innodb_metrics
WHERE NAME LIKE 'innodb_rwlock_sx_%';
SET GLOBAL innodb_monitor_reset = "innodb_rwlock_sx_%";
INSERT INTO my_metrics
SELECT NAME, COUNT, MAX_COUNT, MIN_COUNT, AVG_COUNT,
COUNT_RESET, MAX_COUNT_RESET, MIN_COUNT_RESET, AVG_COUNT_RESET,
TIME_ENABLED, TIME_DISABLED, TIME_ELAPSED, TIME_RESET,
ENABLED, 'after'
FROM information_schema.innodb_metrics
WHERE NAME LIKE 'innodb_rwlock_sx_%';
# TC-08 There was a reset. COUNT_RESET = MAX_COUNT_RESET for ALL
# = @sx_count entries.
# pass
# TC-17 We had heavy activity causing big counters and after that a reset.
# Reset causes COUNT > COUNT_RESET AND MAX_COUNT > MAX_COUNT_RESET
# for ALL @sx_count entries.
# pass
# TC-18 We had some reset but this must not decrease COUNT or MAX_COUNT
# after.COUNT >= before.COUNT AND
# after.MAX_COUNT >= before.MAX_COUNT for ALL @sx_count entries.
# pass
# TC-19 We had some reset after heavy activity and this must cause
# after.COUNT_RESET < before.COUNT_RESET
# AND after.MAX_COUNT_RESET < before.MAX_COUNT_RESET AND
# for ALL @sx_count entries.
# pass
connection con10;
disconnect con10;
connection con9;
disconnect con9;
connection con8;
disconnect con8;
connection con7;
disconnect con7;
connection con6;
disconnect con6;
connection con5;
disconnect con5;
connection con4;
disconnect con4;
connection con3;
disconnect con3;
connection con2;
disconnect con2;
connection con1;
disconnect con1;
connection default;
USE test;
DROP SCHEMA my_schema;
SET GLOBAL innodb_monitor_disable = all;
SET GLOBAL innodb_monitor_reset_all = all;
SET GLOBAL innodb_monitor_enable = default;
SET GLOBAL innodb_monitor_disable = default;
SET GLOBAL innodb_monitor_reset = default;
SET GLOBAL innodb_monitor_reset_all = default;
SET GLOBAL innodb_monitor_disable = "innodb_rwlock_sx_%";
SET GLOBAL innodb_monitor_reset = "innodb_rwlock_sx_%";

View file

@ -13,3 +13,12 @@ PARTITION BY RANGE(a)
PARTITION pb VALUES LESS THAN (4));
DROP TABLE t1;
--echo #
--echo # MDEV-24754 Server crash in
--echo # ha_partition_inplace_ctx::~ha_partition_inplace_ctx
--echo #
CREATE TABLE t1 (id INT PRIMARY KEY, a INT, va INT AS (a) VIRTUAL)
ENGINE=InnoDB PARTITION BY HASH(id) PARTITIONS 2;
ALTER TABLE t1 ADD b INT, ALGORITHM=INSTANT;
DROP TABLE t1;

View file

@ -1,5 +1,6 @@
# This test is for MDEV-24612 fix
--source include/have_innodb.inc
--source include/not_embedded.inc
call mtr.add_suppression("Creating system tablespace with existing redo log file is not recommended.");
call mtr.add_suppression("InnoDB: Database creation was aborted");

View file

@ -1,720 +0,0 @@
# This is a script for MTR with hybrid use.
# a) As regression test
# Mostly some brute force attempt to stress the internal sx locks of
# InnoDB which were introduced by WL#6326+WL#6363.
# The file with expected results fits to this variant.
# The impact on code coverage is quite good.
# b) As testbed for attempts to extend or improve the RQG test wl6326_sql.yy.
# The MTR based test uses
# - a table t1 with the same layout
# - the same stored functions
# - the same stored procedure proc_fill_t1 for inserting a configurable
# amount of records into t1
# like the RQG test wl6326_sql.yy.
# Feel free to modify parameters like $max_row_count, $max_con,
# $high_load_duration or switch debugging on (let $test_debug= 1).
# But please be aware that MTR will most probably report that the test
# failed because it got a difference to expected results.
# Reasons:
# - In general: The file with expected results fits to a) only.
# - The actual results might dependend on $max_row_count.
# - Additional result sets might be printed.
#
# WL#6326 is about the sx locks (InnoDB feature only).
--source include/have_innodb.inc
# Runtime properties:
# Notebook i5 dual core with HT, MySQL binaries compiled with debug,
# max_row_count=10000 rows
# vardir on tmpfs : ~ 375
# vardir on disk : ~ 546
--source include/big_test.inc
# Possibly related to MDEV-16678, the test seems to deterministically fail on
# non-debug builds. innodb_force_recovery=2 (disabling the purge of history)
# would seem to help a little.
--source include/have_debug.inc
# We go with "--send" and "--reap" and that fails with the embedded server.
--source include/not_embedded.inc
# Its intentional to not take the risk that a run with valgrind times out.
--source include/not_valgrind.inc
# FIXME:
# Increase the code coverage provided by the current test by
# trying "InnoDB Tablespace Monitor" as soon as some bug is fixed
# or wait till the deprecated "InnoDB Tablespace Monitor" is
# removed.
# Setup of some parameters
# ------------------------
# Number of records within every chunk to be added to t1.
let $load_unit= 10000;
#
# Rough number of records in t1 to achieve.
# We add chunks of $load_unit rows till the actual number
# of rows in the table t1 exceeds $max_row_count.
# let $max_row_count= 1000000;
# let $max_row_count= 300000;
# let $max_row_count= 100000;
# let $max_row_count= 30000;
let $max_row_count= 10000; # ~ 322s on tmpfs (NB)
#
# Determine which variant to run.
let $test_debug= 0;
#
# Number of concurrent sessions to be used in the high load test.
let $max_con= 10;
# Duration of the high load test in seconds.
let $high_load_duration= 60;
# Putting all objects into the SCHEMA my_schema makes the final cleanup easier.
# We simply run than DROP SCHEMA my_schema.
CREATE SCHEMA my_schema;
USE my_schema;
CREATE FUNCTION f_thread_id (i INT) RETURNS CHAR(4) DETERMINISTIC
RETURN CONCAT(LPAD(CAST(i AS CHAR),3,'_'),'_') ;
SELECT CONCAT('->', f_thread_id( 1), '<-');
SELECT CONCAT('->', f_thread_id(12), '<-');
# Definition of parameters used in functions.
# We use here a "1" in order to make the impact on the results of the functions
# good visible.
SET @extra_int = 1;
SET @extra_string = f_thread_id(@extra_int);
SELECT @extra_int , @extra_string;
# The different functions are used later when filling t1 and also during
# RQG testing. They serve to generate the difference between column values
# in different rows in different areas of the column.
# Fictional example:
# row 1 col_int0=1 colx='1abcdefgh' coly='abcd1efgh' colz='abcdefgh1'
# row 2 col_int0=2 colx='2abcdefgh' coly='abcd2efgh' colz='abcdefgh2'
# The function f_<pattern> is for the column with the name <pattern>.
# There is a function
# - for every column except col_int0
# - even if the SQL for generating the value is simple.
# The reason for this is the architecture of the RQG test.
let $part= AS my_result
FROM (SELECT 1 AS my_col UNION SELECT 12 UNION SELECT 123
UNION SELECT 1234 UNION SELECT 12345) AS tx;
let $function_name= f_col_int1;
eval CREATE FUNCTION $function_name (i INT) RETURNS INT(20) DETERMINISTIC
RETURN i * 1000 + @extra_int ;
eval SELECT $function_name(my_col) $part;
let $function_name= f_col_int2;
eval CREATE FUNCTION $function_name (i INT) RETURNS INT(20) DETERMINISTIC
RETURN @extra_int * 10000000 + i ;
eval SELECT $function_name(my_col) $part;
let $function_name= f_col_int3;
eval CREATE FUNCTION $function_name (i INT) RETURNS INT(20) DETERMINISTIC
RETURN @extra_int ;
eval SELECT $function_name(my_col) $part;
let $function_name= f_col_blob;
eval CREATE FUNCTION $function_name (i INT) RETURNS BLOB DETERMINISTIC
RETURN RPAD(@extra_string,(@@innodb_page_size / 2 ) + 1,'a');
eval SELECT CONCAT('->', SUBSTR($function_name(my_col) FROM 1 FOR 10),
'<-.....->', SUBSTR($function_name(my_col) FROM -10 FOR 10), '<-') $part;
let $function_name= f_col_char0;
eval CREATE FUNCTION $function_name (i INT) RETURNS CHAR(255) DETERMINISTIC
RETURN LPAD(CAST(i AS CHAR),255,' ');
eval SELECT CONCAT('->', $function_name(my_col), '<-') $part;
let $function_name= f_col_char1;
eval CREATE FUNCTION $function_name (i INT) RETURNS CHAR(26) DETERMINISTIC
RETURN
CONCAT('B',
LPAD(SUBSTR(CAST(i AS CHAR),1,(LENGTH(CAST(i AS CHAR)) DIV 2)),10,' '),
@extra_string,
RPAD(SUBSTR(CAST(i AS CHAR), -((LENGTH(CAST(i AS CHAR)) + 1) DIV 2)),10,' '),
'E') ;
eval SELECT CONCAT('->', $function_name(my_col), '<-') $part;
let $function_name= f_col_char2;
eval CREATE FUNCTION $function_name (i INT) RETURNS CHAR(26) DETERMINISTIC
RETURN
CONCAT('B',
RPAD(SUBSTR(CAST(i AS CHAR),1,(LENGTH(CAST(i AS CHAR)) DIV 2)),10,' '),
@extra_string,
LPAD(SUBSTR(CAST(i AS CHAR), -((LENGTH(CAST(i AS CHAR)) + 1) DIV 2)),10,' '),
'E');
eval SELECT CONCAT('->', $function_name(my_col), '<-') $part;
let $function_name= f_col_char3;
eval CREATE FUNCTION $function_name (i INT) RETURNS CHAR(26) DETERMINISTIC
RETURN
CONCAT('B',@extra_string,LPAD(CAST(i AS CHAR),20,' '),'E');
eval SELECT CONCAT('->', $function_name(my_col), '<-') $part;
let $function_name= f_col_char4;
eval CREATE FUNCTION $function_name (i INT) RETURNS CHAR(26) DETERMINISTIC
RETURN
CONCAT('B',RPAD(CAST(i AS CHAR),20,' '),@extra_string,'E');
eval SELECT CONCAT('->', $function_name(my_col), '<-') $part;
# Auxiliary table for figuring out the impact of scenarios on
# information_schema.innodb_metrics content.
CREATE TABLE my_metrics LIKE information_schema.innodb_metrics;
ALTER TABLE my_metrics ADD COLUMN phase ENUM('after', 'before'),
DROP COLUMN SUBSYSTEM, DROP COLUMN TYPE, DROP COLUMN COMMENT,
ADD PRIMARY KEY (NAME,phase);
let $empty_my_metrics= DELETE FROM my_metrics;
let $before_my_metrics= INSERT INTO my_metrics
SELECT NAME, COUNT, MAX_COUNT, MIN_COUNT, AVG_COUNT,
COUNT_RESET, MAX_COUNT_RESET, MIN_COUNT_RESET, AVG_COUNT_RESET,
TIME_ENABLED, TIME_DISABLED, TIME_ELAPSED, TIME_RESET,
ENABLED, 'before'
FROM information_schema.innodb_metrics
WHERE NAME LIKE 'innodb_rwlock_sx_%';
let $after_my_metrics= INSERT INTO my_metrics
SELECT NAME, COUNT, MAX_COUNT, MIN_COUNT, AVG_COUNT,
COUNT_RESET, MAX_COUNT_RESET, MIN_COUNT_RESET, AVG_COUNT_RESET,
TIME_ENABLED, TIME_DISABLED, TIME_ELAPSED, TIME_RESET,
ENABLED, 'after'
FROM information_schema.innodb_metrics
WHERE NAME LIKE 'innodb_rwlock_sx_%';
let $print_metrics= SELECT NAME, COUNT, MAX_COUNT, MIN_COUNT, AVG_COUNT,
COUNT_RESET, MAX_COUNT_RESET, MIN_COUNT_RESET, AVG_COUNT_RESET,
TIME_ENABLED, TIME_DISABLED, TIME_ELAPSED, TIME_RESET, ENABLED
FROM information_schema.innodb_metrics
WHERE NAME LIKE 'innodb_rwlock_sx_%'
ORDER BY NAME;
# The main table for testing.
CREATE TABLE t1 (
col_int0 BIGINT,
col_int1 BIGINT,
col_int2 BIGINT,
col_int3 BIGINT,
col_blob BLOB,
col_char0 VARCHAR(255),
col_char1 VARCHAR(30),
col_char2 VARCHAR(30),
col_char3 VARCHAR(30),
col_char4 VARCHAR(30)
) ENGINE = InnoDB;
# Use many indexes with mostly significant size in order to cause
# some heavy use of sx locks during data generation.
ALTER TABLE t1 ADD UNIQUE KEY uidx_col_int0 (col_int0),
ADD UNIQUE KEY uidx1 (col_int1, col_char0),
ADD UNIQUE KEY uidx2 (col_int2, col_char0, col_int1),
ADD UNIQUE KEY uidx3 (col_int3, col_int2, col_char0),
ADD UNIQUE KEY uidx4 (col_char1, col_char0),
ADD UNIQUE KEY uidx5 (col_char2, col_char0, col_char1),
ADD UNIQUE KEY uidx6 (col_char3, col_char2, col_char0),
ADD UNIQUE KEY uidx7 (col_int1, col_int2, col_int3, col_char4,
col_char1, col_char2, col_char3, col_char0),
ADD KEY idx8 (col_blob(10), col_char4);
delimiter |;
CREATE PROCEDURE proc_fill_t1 (max_row_count INT, load_unit INT)
BEGIN
DECLARE my_count INTEGER DEFAULT 0;
DECLARE max_load_count INTEGER DEFAULT 0;
DROP TABLE IF EXISTS t0;
CREATE TEMPORARY TABLE t0 (col_int0 BIGINT, PRIMARY KEY(col_int0));
WHILE (my_count < load_unit ) DO
SET my_count = my_count + 1;
INSERT INTO t0 SET col_int0 = my_count;
END WHILE;
SET max_load_count = (SELECT (max_row_count DIV load_unit) + 1 );
SELECT COUNT(col_int0) INTO @val FROM t1;
SET my_count = 0;
REPEAT
INSERT INTO t1 (col_int0, col_int1, col_int2, col_int3, col_blob,
col_char0, col_char1, col_char2,col_char3,col_char4)
SELECT col_int0 + @val,
f_col_int1(col_int0 + @val),
f_col_int2(col_int0 + @val),
f_col_int3(col_int0 + @val),
f_col_blob(col_int0 + @val),
f_col_char0(col_int0 + @val),
f_col_char1(col_int0 + @val),
f_col_char2(col_int0 + @val),
f_col_char3(col_int0 + @val),
f_col_char4(col_int0 + @val)
FROM t0;
COMMIT;
SELECT MAX(col_int0) INTO @val FROM t1;
SET my_count = my_count + 1;
UNTIL( my_count > max_load_count OR @val >= max_row_count )
END REPEAT;
DROP TEMPORARY TABLE t0;
END|
delimiter ;|
delimiter |;
CREATE PROCEDURE proc_dml (max_duration INT, t1_stripe_half INT)
BEGIN
DECLARE aux INTEGER DEFAULT 0;
DECLARE start_time INT;
DECLARE CONTINUE HANDLER FOR SQLEXCEPTION, SQLWARNING, NOT FOUND BEGIN END;
SET @extra_int = CONNECTION_ID();
SET @extra_string = f_thread_id(@extra_int);
SELECT ROUND(MAX(col_int0) / 2 ) INTO @t1_half FROM t1;
# The user lock 'Blocker' should be already set by some other session S1.
# S1 starts the race by releasing that lock.
# Wait till the lock is released and the lock can be obtained.
# In order to prevent endless waiting in case of non foreseen problems
# limit the timespan to 30 seconds.
SELECT GET_LOCK('Blocker', 30) INTO @aux;
# Release the lock immediate so that the other "runner" sessions start too.
SELECT RELEASE_LOCK('Blocker') INTO @aux;
SET start_time = UNIX_TIMESTAMP();
WHILE (UNIX_TIMESTAMP() - start_time < max_duration) DO
SET @aux = @t1_half - t1_stripe_half + ROUND(RAND() * t1_stripe_half * 2);
UPDATE t1 SET
col_int1 = f_col_int1(col_int0),
col_int2 = f_col_int2(col_int0),
col_int3 = f_col_int3(col_int0),
col_blob = f_col_blob(col_int0),
col_char0 = f_col_char0(col_int0),
col_char1 = f_col_char1(col_int0),
col_char2 = f_col_char2(col_int0),
col_char3 = f_col_char3(col_int0),
col_char4 = f_col_char4(col_int0)
WHERE col_int0 = @aux;
COMMIT;
END WHILE;
END|
delimiter ;|
SET GLOBAL innodb_monitor_disable = "innodb_rwlock_sx_%";
SET @pre_reset_ts = NOW();
--sleep 1.1
SET GLOBAL innodb_monitor_reset = "innodb_rwlock_sx_%";
SET @pre_enable_ts = NOW();
--sleep 1.1
SET GLOBAL innodb_monitor_enable = "innodb_rwlock_sx_%";
--sleep 1.1
SET @pre_collect_ts = NOW();
eval $empty_my_metrics;
eval $before_my_metrics;
--echo # TC-01 There are exact three entries "innodb_rwlock_sx_%" with the
--echo # with the name which follow in innodb_metrics.
let $check_statement=
SELECT COUNT(*) <> 3 FROM my_metrics
WHERE NAME IN ('innodb_rwlock_sx_spin_waits',
'innodb_rwlock_sx_spin_rounds',
'innodb_rwlock_sx_os_waits');
if(`$check_statement`)
{
--echo # fail
eval $check_statement;
SELECT NAME FROM my_metrics
ORDER BY NAME;
exit;
}
--echo # pass
SELECT COUNT(*) INTO @sx_count FROM my_metrics;
--echo # TC-02 Counting is now enabled. ALL = @sx_count entries show that.
let $check_statement=
SELECT COUNT(*) <> @sx_count FROM my_metrics
WHERE ENABLED;
if(`$check_statement`)
{
--echo # fail
eval $check_statement;
SELECT NAME, ENABLED FROM my_metrics
ORDER BY NAME;
exit;
}
--echo # pass
--echo # TC-03 @pre_reset_ts < TIME_RESET. ALL = @sx_count entries show that.
let $check_statement=
SELECT COUNT(*) <> @sx_count FROM my_metrics
WHERE @pre_reset_ts < TIME_RESET;
if(`$check_statement`)
{
--echo # fail
eval $check_statement;
SELECT NAME, @pre_reset_ts, TIME_RESET FROM my_metrics
ORDER BY NAME;
exit;
}
--echo # pass
--echo # TC-04 @pre_enable_ts < TIME_ENABLED. ALL = @sx_count entries show that.
let $check_statement=
SELECT COUNT(*) <> @sx_count FROM my_metrics
WHERE @pre_enable_ts < TIME_ENABLED;
if(`$check_statement`)
{
--echo # fail
eval $check_statement;
SELECT NAME, @pre_enable_ts, TIME_ENABLED FROM my_metrics
ORDER BY NAME;
exit;
}
--echo # pass
--echo # TC-05 TIME_RESET < TIME_ENABLED AND TIME_ENABLED < @pre_collect_ts
--echo # AND TIME_ELAPSED > 0. ALL = @sx_count entries show that.
let $check_statement=
SELECT COUNT(*) <> @sx_count FROM my_metrics
WHERE TIME_RESET < TIME_ENABLED AND TIME_ENABLED < @pre_collect_ts
AND TIME_ELAPSED > 0;
if(`$check_statement`)
{
--echo # fail
eval $check_statement;
SELECT NAME, @pre_collect_ts, TIME_RESET, TIME_ENABLED, TIME_ELAPSED
FROM my_metrics
ORDER BY NAME;
exit;
}
--echo # pass
--echo # TC-06 COUNT_RESET = MAX_COUNT_RESET. ALL = @sx_count entries show that.
let $check_statement=
SELECT COUNT(*) <> @sx_count FROM my_metrics
WHERE COUNT_RESET = MAX_COUNT_RESET;
if(`$check_statement`)
{
--echo # fail
eval $check_statement;
SELECT NAME, COUNT_RESET, MAX_COUNT_RESET FROM my_metrics
ORDER BY NAME;
exit;
}
--echo # pass
SET GLOBAL innodb_monitor_reset = "innodb_rwlock_sx_%";
--disable_result_log
if($test_debug)
{
--enable_result_log
eval $print_metrics;
}
SHOW ENGINE INNODB STATUS;
--enable_result_log
eval $empty_my_metrics;
eval $before_my_metrics;
# These values (the "0") help to identify later if some record is in its
# initial state or already modified.
SET @extra_string = '__0_';
SET @extra_int = 0;
--echo # TC-07 One session inserts some significant amount of rows into t1.
--echo # The system MUST survive that.
--replace_result $max_row_count <max_row_count>
eval SET @max_row_count = $max_row_count;
--replace_result $load_unit <load_unit>
eval SET @load_unit = $load_unit;
SET @start_time = UNIX_TIMESTAMP();
SET AUTOCOMMIT = OFF;
CALL proc_fill_t1 (@max_row_count, @load_unit);
--echo # pass
SET AUTOCOMMIT = ON;
SELECT col_int0 INTO @t1_half FROM t1
WHERE col_int0 >= (@val DIV 2) ORDER BY col_int0 LIMIT 1;
--disable_result_log
if($test_debug)
{
--enable_result_log
SELECT COUNT(*) AS table_row_count,
UNIX_TIMESTAMP() - @start_time AS fill_run_time
FROM t1;
eval $print_metrics;
}
SHOW ENGINE INNODB STATUS;
--enable_result_log
# Show that the value distribution is according to the plan.
--vertical_results
SELECT col_int0, col_int1, col_int2, col_int3,
CONCAT('->', SUBSTR(col_blob FROM 1 FOR 10),
'<-.....->', SUBSTR(col_blob FROM -10 FOR 10), '<-') AS col_blobx,
CONCAT('->',col_char0,'<-') AS col_char0x,
CONCAT('->',col_char1,'<-') AS col_char1x,
CONCAT('->',col_char2,'<-') AS col_char2x,
CONCAT('->',col_char3,'<-') AS col_char3x,
CONCAT('->',col_char4,'<-') AS col_char4x
FROM t1 WHERE col_int0 between 98 AND 102;
--horizontal_results
# For experiments/interest only. Please do not remove that.
if (0)
{
ANALYZE TABLE t1;
SELECT n_rows, clustered_index_size, sum_of_other_index_sizes
FROM mysql.innodb_table_stats;
# SELECT * FROM mysql.innodb_index_stats;
# idx_col_int3_int0 n_diff_pfx01 1 col_int3
# idx_col_int3_int0 n_diff_pfx02 10000 col_int3,col_int0
# idx_col_int3_int0 n_diff_pfx03 10000 col_int3,col_int0,DB_ROW_ID
# idx_col_int3_int0 n_leaf_pages 19 Number of leaf pages in the index
# idx_col_int3_int0 size 20 Number of pages in the index
--vertical_results
SELECT t1.index_name, t1.stat_value AS idx_pages, t2.stat_value AS idx_leaf_pages,
(t1.stat_value - t2.stat_value - 1) / t1.stat_value AS sx_page_ratio
FROM mysql.innodb_index_stats t1, mysql.innodb_index_stats t2
WHERE t1.index_name = t2.index_name
AND t1.stat_name = 'size' AND t2.stat_name = 'n_leaf_pages'
ORDER BY t1.index_name;
--horizontal_results
}
--echo # TC-11 Several concurrent sessions perform updates in t1 like mad.
--echo # The system MUST survive this.
--echo # Printing of statements is partially suppressed.
SET @start_time = UNIX_TIMESTAMP();
SELECT 1 FROM t1 WHERE col_int0 = @t1_half FOR UPDATE;
SELECT GET_LOCK('Blocker', 1000) ;
--disable_query_log
let $num= $max_con;
while ($num)
{
--connect (con$num,localhost,root,,)
USE my_schema;
# The second parameter of the procedure is size of the affected stripe / 2.
# A smaller stripe causes some smaller counter growth but most probably
# also more stress around locking in general.
# Example # (nnnn) = half stripe size
# NAME | COUNT_RESET (5000) | COUNT_RESET (100)
# -----------------------------+--------------------+----------------
# innodb_rwlock_sx_os_waits | 1412 | 486
# innodb_rwlock_sx_spin_rounds | 44061 | 17031
# innodb_rwlock_sx_spin_waits | 996 | 515
--send
eval CALL proc_dml($high_load_duration,@t1_half);
dec $num;
}
--connection default
SELECT RELEASE_LOCK('Blocker') ;
--sleep 3
COMMIT;
let $num= $max_con;
while ($num)
{
--connection con$num
--reap
dec $num;
}
--echo # pass
--connection default
--enable_query_log
# let $wait_timeout= 181;
# --source include/wait_condition.inc
# eval $after_my_metrics;
--disable_result_log
if($test_debug)
{
--enable_result_log
SELECT UNIX_TIMESTAMP() - @start_time AS update_battle_run_time;
eval $print_metrics;
}
SHOW ENGINE INNODB STATUS;
--enable_result_log
--echo # TC-13 One session performs ALTER TABLE t1 ADD KEY ... on the fat table t1.
--echo # The system MUST survive this.
SET @start_time = UNIX_TIMESTAMP();
ALTER TABLE t1 ADD KEY idx_col_char4_col_char0 (col_char4,col_char0);
--disable_result_log
if($test_debug)
{
--enable_result_log
SELECT UNIX_TIMESTAMP() - @start_time AS add_key_run_time;
eval $print_metrics;
}
SHOW ENGINE INNODB STATUS;
--enable_result_log
--echo # pass
--echo # TC-15 One session performs a fat update on the fat table t1.
--echo # The system MUST survive this.
SET @start_time = UNIX_TIMESTAMP();
SET @extra_int = 13;
SET @extra_string = f_thread_id(@extra_int);
eval UPDATE t1 SET
col_int1 = f_col_int1(col_int0), col_int2 = f_col_int2(col_int0),
col_int3 = f_col_int3(col_int0), col_blob = f_col_blob(col_int0),
col_char0 = f_col_char0(col_int0), col_char1 = f_col_char1(col_int0),
col_char2 = f_col_char2(col_int0), col_char3 = f_col_char3(col_int0),
col_char4 = f_col_char4(col_int0)
WHERE col_int0 BETWEEN @t1_half - 2500 AND @t1_half + 2500;
COMMIT;
--disable_result_log
if($test_debug)
{
--enable_result_log
SELECT UNIX_TIMESTAMP() - @start_time AS total_update_run_time;
eval $print_metrics;
}
SHOW ENGINE INNODB STATUS;
--enable_result_log
--echo # pass
# Basically every of the big activities causes some counter growth.
# But caused by
# - the architecture of InnoDB (certain things happen asynchronous)
# - the actual test configuration (server/InnoDB options)
# - conditions like parallel (./mtr --parallel=auto?) load on the testing box
# this might be not fulfilled per single big activity every time except
# we go with huge waits or similar.
# Observation:
# - non debug binaries: expectation frequent not fulfilled
# - debug binaries: expectation rare not fulfilled
#
let $wait_timeout= 121;
let $wait_condition=
SELECT COUNT(*) = @sx_count
FROM information_schema.innodb_metrics t_after
JOIN my_metrics t_before
ON t_after.COUNT_RESET > t_before.COUNT_RESET AND t_after.NAME = t_before.NAME;
--source include/wait_condition.inc
eval $after_my_metrics;
--echo # TC-16 The following activities happend after reset in innodb_metrics
--echo # - Insert some significant amount of rows into t1.
--echo # - Several concurrent users perform excessive updates in t1.
--echo # - ALTER TABLE ... ADD KEY <sufficient big enough structure>
--echo # - One UPDATE statement modifying a huge slice of t1.
--echo # Any of them causes heavy use of SX lock and therefore COUNT_RESET
--echo # must have grown for ALL = @sx_count entries.
# The former testcases TC-10 and TC12 had to be made a part of this testcase
# because their results were unstable.
let $check_statement=
SELECT COUNT(*) <> @sx_count FROM my_metrics t_after JOIN my_metrics t_before
ON t_after.COUNT_RESET > t_before.COUNT_RESET AND t_after.NAME = t_before.NAME
WHERE t_after.phase = 'after' AND t_before.phase = 'before';
if(`$check_statement`)
{
--echo # fail
eval $check_statement;
SELECT * FROM my_metrics
ORDER BY NAME, phase;
exit;
}
--echo # pass
--echo # TC-09 Heavy activity after reset.
--echo # COUNT_RESET = MAX_COUNT_RESET for ALL = @sx_count entries
--echo # needs to stay valid though he counters will have grown.
let $check_statement=
SELECT COUNT(*) <> @sx_count FROM my_metrics
WHERE phase = 'after' AND COUNT_RESET = MAX_COUNT_RESET;
if(`$check_statement`)
{
--echo # fail
eval $check_statement;
SELECT * FROM my_metrics
ORDER BY NAME, phase;
exit;
}
--echo # pass
eval $empty_my_metrics;
eval $before_my_metrics;
SET GLOBAL innodb_monitor_reset = "innodb_rwlock_sx_%";
eval $after_my_metrics;
--echo # TC-08 There was a reset. COUNT_RESET = MAX_COUNT_RESET for ALL
--echo # = @sx_count entries.
let $check_statement=
SELECT COUNT(*) <> @sx_count FROM my_metrics
WHERE phase = 'before' AND COUNT_RESET = MAX_COUNT_RESET;
if(`$check_statement`)
{
--echo # fail
eval $check_statement;
SELECT * FROM my_metrics
ORDER BY NAME, phase;
exit;
}
--echo # pass
--echo # TC-17 We had heavy activity causing big counters and after that a reset.
--echo # Reset causes COUNT > COUNT_RESET AND MAX_COUNT > MAX_COUNT_RESET
--echo # for ALL @sx_count entries.
let $check_statement=
SELECT COUNT(*) <> @sx_count FROM my_metrics
WHERE phase = 'after'
AND COUNT > COUNT_RESET
AND MAX_COUNT > MAX_COUNT_RESET;
if(`$check_statement`)
{
--echo # fail
eval $check_statement;
SELECT * FROM my_metrics
ORDER BY NAME, phase;
exit;
}
--echo # pass
--echo # TC-18 We had some reset but this must not decrease COUNT or MAX_COUNT
--echo # after.COUNT >= before.COUNT AND
--echo # after.MAX_COUNT >= before.MAX_COUNT for ALL @sx_count entries.
let $check_statement=
SELECT COUNT(*) <> @sx_count FROM my_metrics t_after JOIN my_metrics t_before
ON t_after.COUNT >= t_before.COUNT AND t_after.MAX_COUNT >= t_before.MAX_COUNT
AND t_after.NAME = t_before.NAME
WHERE t_after.phase = 'after' AND t_before.phase = 'before';
if(`$check_statement`)
{
--echo # fail
eval $check_statement;
SELECT * FROM my_metrics
ORDER BY NAME, phase;
exit;
}
--echo # pass
--echo # TC-19 We had some reset after heavy activity and this must cause
--echo # after.COUNT_RESET < before.COUNT_RESET
--echo # AND after.MAX_COUNT_RESET < before.MAX_COUNT_RESET AND
--echo # for ALL @sx_count entries.
let $check_statement=
SELECT COUNT(*) <> @sx_count FROM my_metrics t_after JOIN my_metrics t_before
ON t_after.COUNT_RESET < t_before.COUNT_RESET
AND t_after.MAX_COUNT_RESET < t_before.MAX_COUNT_RESET
AND t_after.NAME = t_before.NAME
WHERE t_after.phase = 'after' AND t_before.phase = 'before';
if(`$check_statement`)
{
--echo # fail
eval $check_statement;
SELECT * FROM my_metrics
ORDER BY NAME, phase;
exit;
}
--echo # pass
# Cleanup
let $num= $max_con;
while ($num)
{
--connection con$num
--disconnect con$num
--source include/wait_until_disconnected.inc
dec $num;
}
--connection default
USE test;
DROP SCHEMA my_schema;
SET GLOBAL innodb_monitor_disable = all;
SET GLOBAL innodb_monitor_reset_all = all;
--disable_warnings
SET GLOBAL innodb_monitor_enable = default;
SET GLOBAL innodb_monitor_disable = default;
SET GLOBAL innodb_monitor_reset = default;
SET GLOBAL innodb_monitor_reset_all = default;
--enable_warnings
SET GLOBAL innodb_monitor_disable = "innodb_rwlock_sx_%";
SET GLOBAL innodb_monitor_reset = "innodb_rwlock_sx_%";

View file

@ -713,7 +713,20 @@
VARIABLE_COMMENT If this is not 0, then mysqld will use this value to reserve file descriptors to use with setrlimit(). If this value is 0 or autoset then mysqld will reserve max_connections*5 or max_connections + table_cache*2 (whichever is larger) number of file descriptors
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
@@ -2235,7 +2235,7 @@
@@ -2235,10 +2235,10 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_MAX_SEL_ARG_WEIGHT
VARIABLE_SCOPE SESSION
-VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT The maximum weight of the SEL_ARG graph. Set to 0 for no limit
NUMERIC_MIN_VALUE 0
-NUMERIC_MAX_VALUE 18446744073709551615
+NUMERIC_MAX_VALUE 4294967295
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -2245,7 +2245,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_PRUNE_LEVEL
VARIABLE_SCOPE SESSION
@ -722,7 +735,7 @@
VARIABLE_COMMENT Controls the heuristic(s) applied during query optimization to prune less-promising partial plans from the optimizer search space. Meaning: 0 - do not apply any heuristic, thus perform exhaustive search; 1 - prune plans based on number of retrieved rows
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1
@@ -2245,7 +2245,7 @@
@@ -2255,7 +2255,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_SEARCH_DEPTH
VARIABLE_SCOPE SESSION
@ -731,7 +744,7 @@
VARIABLE_COMMENT Maximum depth of search performed by the query optimizer. Values larger than the number of relations in a query result in better query plans, but take longer to compile a query. Values smaller than the number of tables in a relation result in faster optimization, but may produce very bad query plans. If set to 0, the system will automatically pick a reasonable value.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 62
@@ -2255,7 +2255,7 @@
@@ -2265,7 +2265,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_SELECTIVITY_SAMPLING_LIMIT
VARIABLE_SCOPE SESSION
@ -740,7 +753,7 @@
VARIABLE_COMMENT Controls number of record samples to check condition selectivity
NUMERIC_MIN_VALUE 10
NUMERIC_MAX_VALUE 4294967295
@@ -2285,17 +2285,17 @@
@@ -2295,17 +2295,17 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_TRACE_MAX_MEM_SIZE
VARIABLE_SCOPE SESSION
@ -761,7 +774,7 @@
VARIABLE_COMMENT Controls selectivity of which conditions the optimizer takes into account to calculate cardinality of a partial join when it searches for the best execution plan Meaning: 1 - use selectivity of index backed range conditions to calculate the cardinality of a partial join if the last joined table is accessed by full table scan or an index scan, 2 - use selectivity of index backed range conditions to calculate the cardinality of a partial join in any case, 3 - additionally always use selectivity of range conditions that are not backed by any index to calculate the cardinality of a partial join, 4 - use histograms to calculate selectivity of range conditions that are not backed by any index to calculate the cardinality of a partial join.5 - additionally use selectivity of certain non-range predicates calculated on record samples
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 5
@@ -2315,7 +2315,7 @@
@@ -2325,7 +2325,7 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME PERFORMANCE_SCHEMA_ACCOUNTS_SIZE
VARIABLE_SCOPE GLOBAL
@ -770,7 +783,7 @@
VARIABLE_COMMENT Maximum number of instrumented user@host accounts. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2325,7 +2325,7 @@
@@ -2335,7 +2335,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_DIGESTS_SIZE
VARIABLE_SCOPE GLOBAL
@ -779,7 +792,7 @@
VARIABLE_COMMENT Size of the statement digest. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 200
@@ -2335,7 +2335,7 @@
@@ -2345,7 +2345,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_EVENTS_STAGES_HISTORY_LONG_SIZE
VARIABLE_SCOPE GLOBAL
@ -788,7 +801,7 @@
VARIABLE_COMMENT Number of rows in EVENTS_STAGES_HISTORY_LONG. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2345,7 +2345,7 @@
@@ -2355,7 +2355,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_EVENTS_STAGES_HISTORY_SIZE
VARIABLE_SCOPE GLOBAL
@ -806,7 +819,7 @@
VARIABLE_COMMENT Number of rows in EVENTS_STATEMENTS_HISTORY_LONG. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2365,7 +2365,7 @@
@@ -2375,7 +2375,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_EVENTS_STATEMENTS_HISTORY_SIZE
VARIABLE_SCOPE GLOBAL
@ -815,7 +828,7 @@
VARIABLE_COMMENT Number of rows per thread in EVENTS_STATEMENTS_HISTORY. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1024
@@ -2375,7 +2375,7 @@
@@ -2385,7 +2395,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_EVENTS_TRANSACTIONS_HISTORY_LONG_SIZE
VARIABLE_SCOPE GLOBAL
@ -824,7 +837,7 @@
VARIABLE_COMMENT Number of rows in EVENTS_TRANSACTIONS_HISTORY_LONG. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2385,7 +2385,7 @@
@@ -2395,7 +2395,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_EVENTS_TRANSACTIONS_HISTORY_SIZE
VARIABLE_SCOPE GLOBAL
@ -833,7 +846,7 @@
VARIABLE_COMMENT Number of rows per thread in EVENTS_TRANSACTIONS_HISTORY. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1024
@@ -2395,7 +2395,7 @@
@@ -2405,7 +2405,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_EVENTS_WAITS_HISTORY_LONG_SIZE
VARIABLE_SCOPE GLOBAL
@ -842,7 +855,7 @@
VARIABLE_COMMENT Number of rows in EVENTS_WAITS_HISTORY_LONG. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2405,7 +2405,7 @@
@@ -2415,7 +2415,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_EVENTS_WAITS_HISTORY_SIZE
VARIABLE_SCOPE GLOBAL
@ -851,7 +864,7 @@
VARIABLE_COMMENT Number of rows per thread in EVENTS_WAITS_HISTORY. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1024
@@ -2415,7 +2415,7 @@
@@ -2425,7 +2425,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_HOSTS_SIZE
VARIABLE_SCOPE GLOBAL
@ -860,7 +873,7 @@
VARIABLE_COMMENT Maximum number of instrumented hosts. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2425,7 +2425,7 @@
@@ -2435,7 +2435,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_COND_CLASSES
VARIABLE_SCOPE GLOBAL
@ -869,7 +882,7 @@
VARIABLE_COMMENT Maximum number of condition instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
@@ -2435,7 +2435,7 @@
@@ -2445,7 +2445,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_COND_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -878,7 +891,7 @@
VARIABLE_COMMENT Maximum number of instrumented condition objects. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2445,7 +2445,7 @@
@@ -2455,7 +2455,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_DIGEST_LENGTH
VARIABLE_SCOPE GLOBAL
@ -887,7 +900,7 @@
VARIABLE_COMMENT Maximum length considered for digest text, when stored in performance_schema tables.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1048576
@@ -2455,7 +2455,7 @@
@@ -2465,7 +2465,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_FILE_CLASSES
VARIABLE_SCOPE GLOBAL
@ -896,7 +909,7 @@
VARIABLE_COMMENT Maximum number of file instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
@@ -2465,7 +2465,7 @@
@@ -2475,7 +2475,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_FILE_HANDLES
VARIABLE_SCOPE GLOBAL
@ -905,7 +918,7 @@
VARIABLE_COMMENT Maximum number of opened instrumented files.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1048576
@@ -2475,7 +2475,7 @@
@@ -2485,7 +2485,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_FILE_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -914,7 +927,7 @@
VARIABLE_COMMENT Maximum number of instrumented files. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2485,7 +2485,7 @@
@@ -2495,7 +2495,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_INDEX_STAT
VARIABLE_SCOPE GLOBAL
@ -923,7 +936,7 @@
VARIABLE_COMMENT Maximum number of index statistics for instrumented tables. Use 0 to disable, -1 for automated scaling.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2495,7 +2495,7 @@
@@ -2505,7 +2505,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_MEMORY_CLASSES
VARIABLE_SCOPE GLOBAL
@ -932,7 +945,7 @@
VARIABLE_COMMENT Maximum number of memory pool instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1024
@@ -2505,7 +2505,7 @@
@@ -2515,7 +2515,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_METADATA_LOCKS
VARIABLE_SCOPE GLOBAL
@ -941,7 +954,7 @@
VARIABLE_COMMENT Maximum number of metadata locks. Use 0 to disable, -1 for automated scaling.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 104857600
@@ -2515,7 +2515,7 @@
@@ -2525,7 +2525,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_MUTEX_CLASSES
VARIABLE_SCOPE GLOBAL
@ -950,7 +963,7 @@
VARIABLE_COMMENT Maximum number of mutex instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
@@ -2525,7 +2525,7 @@
@@ -2535,7 +2535,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_MUTEX_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -959,7 +972,7 @@
VARIABLE_COMMENT Maximum number of instrumented MUTEX objects. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 104857600
@@ -2535,7 +2535,7 @@
@@ -2545,7 +2545,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_PREPARED_STATEMENTS_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -968,7 +981,7 @@
VARIABLE_COMMENT Maximum number of instrumented prepared statements. Use 0 to disable, -1 for automated scaling.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2545,7 +2545,7 @@
@@ -2555,7 +2555,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_PROGRAM_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -977,7 +990,7 @@
VARIABLE_COMMENT Maximum number of instrumented programs. Use 0 to disable, -1 for automated scaling.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2555,7 +2555,7 @@
@@ -2565,7 +2565,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_RWLOCK_CLASSES
VARIABLE_SCOPE GLOBAL
@ -986,7 +999,7 @@
VARIABLE_COMMENT Maximum number of rwlock instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
@@ -2565,7 +2565,7 @@
@@ -2575,7 +2575,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_RWLOCK_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -995,7 +1008,7 @@
VARIABLE_COMMENT Maximum number of instrumented RWLOCK objects. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 104857600
@@ -2575,7 +2575,7 @@
@@ -2585,7 +2585,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_SOCKET_CLASSES
VARIABLE_SCOPE GLOBAL
@ -1004,7 +1017,7 @@
VARIABLE_COMMENT Maximum number of socket instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
@@ -2585,7 +2585,7 @@
@@ -2595,7 +2595,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_SOCKET_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -1013,7 +1026,7 @@
VARIABLE_COMMENT Maximum number of opened instrumented sockets. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2595,7 +2595,7 @@
@@ -2605,7 +2605,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_SQL_TEXT_LENGTH
VARIABLE_SCOPE GLOBAL
@ -1022,7 +1035,7 @@
VARIABLE_COMMENT Maximum length of displayed sql text.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1048576
@@ -2605,7 +2605,7 @@
@@ -2615,7 +2615,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_STAGE_CLASSES
VARIABLE_SCOPE GLOBAL
@ -1031,7 +1044,7 @@
VARIABLE_COMMENT Maximum number of stage instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
@@ -2615,7 +2615,7 @@
@@ -2625,7 +2625,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_STATEMENT_CLASSES
VARIABLE_SCOPE GLOBAL
@ -1040,7 +1053,7 @@
VARIABLE_COMMENT Maximum number of statement instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
@@ -2625,7 +2625,7 @@
@@ -2635,7 +2635,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_STATEMENT_STACK
VARIABLE_SCOPE GLOBAL
@ -1049,7 +1062,7 @@
VARIABLE_COMMENT Number of rows per thread in EVENTS_STATEMENTS_CURRENT.
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 256
@@ -2635,7 +2635,7 @@
@@ -2645,7 +2645,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_TABLE_HANDLES
VARIABLE_SCOPE GLOBAL
@ -1058,7 +1071,7 @@
VARIABLE_COMMENT Maximum number of opened instrumented tables. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2645,7 +2645,7 @@
@@ -2655,7 +2655,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_TABLE_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -1067,7 +1080,7 @@
VARIABLE_COMMENT Maximum number of instrumented tables. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2655,7 +2655,7 @@
@@ -2665,7 +2665,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_TABLE_LOCK_STAT
VARIABLE_SCOPE GLOBAL
@ -1076,7 +1089,7 @@
VARIABLE_COMMENT Maximum number of lock statistics for instrumented tables. Use 0 to disable, -1 for automated scaling.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2665,7 +2665,7 @@
@@ -2675,7 +2675,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_THREAD_CLASSES
VARIABLE_SCOPE GLOBAL
@ -1085,7 +1098,7 @@
VARIABLE_COMMENT Maximum number of thread instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
@@ -2675,7 +2675,7 @@
@@ -2685,7 +2685,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_THREAD_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -1094,7 +1107,7 @@
VARIABLE_COMMENT Maximum number of instrumented threads. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2685,7 +2685,7 @@
@@ -2695,7 +2695,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_SESSION_CONNECT_ATTRS_SIZE
VARIABLE_SCOPE GLOBAL
@ -1103,7 +1116,7 @@
VARIABLE_COMMENT Size of session attribute string buffer per thread. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2695,7 +2695,7 @@
@@ -2705,7 +2705,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_SETUP_ACTORS_SIZE
VARIABLE_SCOPE GLOBAL
@ -1112,7 +1125,7 @@
VARIABLE_COMMENT Maximum number of rows in SETUP_ACTORS.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1024
@@ -2705,7 +2705,7 @@
@@ -2715,7 +2715,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_SETUP_OBJECTS_SIZE
VARIABLE_SCOPE GLOBAL
@ -1121,7 +1134,7 @@
VARIABLE_COMMENT Maximum number of rows in SETUP_OBJECTS.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2715,7 +2715,7 @@
@@ -2725,7 +2725,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_USERS_SIZE
VARIABLE_SCOPE GLOBAL
@ -1130,7 +1143,7 @@
VARIABLE_COMMENT Maximum number of instrumented users. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2765,7 +2765,7 @@
@@ -2775,7 +2775,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PRELOAD_BUFFER_SIZE
VARIABLE_SCOPE SESSION
@ -1139,7 +1152,7 @@
VARIABLE_COMMENT The size of the buffer that is allocated when preloading indexes
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 1073741824
@@ -2785,7 +2785,7 @@
@@ -2795,7 +2795,7 @@
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME PROFILING_HISTORY_SIZE
VARIABLE_SCOPE SESSION
@ -1148,7 +1161,7 @@
VARIABLE_COMMENT Number of statements about which profiling information is maintained. If set to 0, no profiles are stored. See SHOW PROFILES.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 100
@@ -2795,7 +2795,7 @@
@@ -2805,7 +2805,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PROGRESS_REPORT_TIME
VARIABLE_SCOPE SESSION
@ -1157,7 +1170,7 @@
VARIABLE_COMMENT Seconds between sending progress reports to the client for time-consuming statements. Set to 0 to disable progress reporting.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
@@ -2855,7 +2855,7 @@
@@ -2865,7 +2865,7 @@
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME QUERY_ALLOC_BLOCK_SIZE
VARIABLE_SCOPE SESSION
@ -1166,7 +1179,7 @@
VARIABLE_COMMENT Allocation block size for query parsing and execution
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 4294967295
@@ -2865,7 +2865,7 @@
@@ -2875,7 +2875,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME QUERY_CACHE_LIMIT
VARIABLE_SCOPE GLOBAL
@ -1175,7 +1188,7 @@
VARIABLE_COMMENT Don't cache results that are bigger than this
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
@@ -2875,7 +2875,7 @@
@@ -2885,7 +2885,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME QUERY_CACHE_MIN_RES_UNIT
VARIABLE_SCOPE GLOBAL
@ -1184,7 +1197,7 @@
VARIABLE_COMMENT The minimum size for blocks allocated by the query cache
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
@@ -2888,7 +2888,7 @@
@@ -2898,7 +2898,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The memory allocated to store results from old queries
NUMERIC_MIN_VALUE 0
@ -1193,7 +1206,7 @@
NUMERIC_BLOCK_SIZE 1024
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -2925,7 +2925,7 @@
@@ -2935,7 +2935,7 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME QUERY_PREALLOC_SIZE
VARIABLE_SCOPE SESSION
@ -1202,7 +1215,7 @@
VARIABLE_COMMENT Persistent buffer for query parsing and execution
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 4294967295
@@ -2938,7 +2938,7 @@
@@ -2948,7 +2948,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Sets the internal state of the RAND() generator for replication purposes
NUMERIC_MIN_VALUE 0
@ -1211,7 +1224,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -2948,14 +2948,14 @@
@@ -2958,14 +2958,14 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Sets the internal state of the RAND() generator for replication purposes
NUMERIC_MIN_VALUE 0
@ -1228,7 +1241,7 @@
VARIABLE_COMMENT Allocation block size for storing ranges during optimization
NUMERIC_MIN_VALUE 4096
NUMERIC_MAX_VALUE 4294967295
@@ -2965,7 +2965,7 @@
@@ -2975,7 +2975,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME READ_BUFFER_SIZE
VARIABLE_SCOPE SESSION
@ -1237,7 +1250,7 @@
VARIABLE_COMMENT Each thread that does a sequential scan allocates a buffer of this size for each table it scans. If you do many sequential scans, you may want to increase this value
NUMERIC_MIN_VALUE 8192
NUMERIC_MAX_VALUE 2147483647
@@ -2985,7 +2985,7 @@
@@ -2995,7 +2995,7 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME READ_RND_BUFFER_SIZE
VARIABLE_SCOPE SESSION
@ -1246,7 +1259,7 @@
VARIABLE_COMMENT When reading rows in sorted order after a sort, the rows are read through this buffer to avoid a disk seeks
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 2147483647
@@ -2905,10 +2905,10 @@
@@ -2915,10 +2915,10 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME ROWID_MERGE_BUFF_SIZE
VARIABLE_SCOPE SESSION
@ -1259,7 +1272,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -2945,7 +2945,7 @@
@@ -2955,7 +2955,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME SERVER_ID
VARIABLE_SCOPE SESSION
@ -1268,7 +1281,7 @@
VARIABLE_COMMENT Uniquely identifies the server instance in the community of replication partners
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
@@ -3015,7 +3015,7 @@
@@ -3025,7 +3025,7 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME SLAVE_MAX_ALLOWED_PACKET
VARIABLE_SCOPE GLOBAL
@ -1277,7 +1290,7 @@
VARIABLE_COMMENT The maximum packet length to sent successfully from the master to slave.
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 1073741824
@@ -3025,7 +3025,7 @@
@@ -3035,7 +3035,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME SLOW_LAUNCH_TIME
VARIABLE_SCOPE GLOBAL
@ -1286,7 +1299,7 @@
VARIABLE_COMMENT If creating the thread takes longer than this value (in seconds), the Slow_launch_threads counter will be incremented
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 31536000
@@ -3068,7 +3068,7 @@
@@ -3078,7 +3078,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Each thread that needs to do a sort allocates a buffer of this size
NUMERIC_MIN_VALUE 1024
@ -1295,7 +1308,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -3275,7 +3275,7 @@
@@ -3285,7 +3285,7 @@
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME STORED_PROGRAM_CACHE
VARIABLE_SCOPE GLOBAL
@ -1304,7 +1317,7 @@
VARIABLE_COMMENT The soft upper limit for number of cached stored routines for one connection.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 524288
@@ -3355,7 +3355,7 @@
@@ -3365,7 +3365,7 @@
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME TABLE_DEFINITION_CACHE
VARIABLE_SCOPE GLOBAL
@ -1313,7 +1326,7 @@
VARIABLE_COMMENT The number of cached table definitions
NUMERIC_MIN_VALUE 400
NUMERIC_MAX_VALUE 2097152
@@ -3365,7 +3365,7 @@
@@ -3375,7 +3375,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME TABLE_OPEN_CACHE
VARIABLE_SCOPE GLOBAL
@ -1322,7 +1335,7 @@
VARIABLE_COMMENT The number of cached open tables
NUMERIC_MIN_VALUE 10
NUMERIC_MAX_VALUE 1048576
@@ -3425,7 +3425,7 @@
@@ -3435,7 +3435,7 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME THREAD_CACHE_SIZE
VARIABLE_SCOPE GLOBAL
@ -1331,7 +1344,7 @@
VARIABLE_COMMENT How many threads we should keep in a cache for reuse. These are freed after 5 minutes of idle time
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 16384
@@ -3508,7 +3508,7 @@
@@ -3518,7 +3518,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Max size for data for an internal temporary on-disk MyISAM or Aria table.
NUMERIC_MIN_VALUE 1024
@ -1340,7 +1353,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -3518,7 +3518,7 @@
@@ -3528,7 +3528,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT If an internal in-memory temporary table exceeds this size, MariaDB will automatically convert it to an on-disk MyISAM or Aria table. Same as tmp_table_size.
NUMERIC_MIN_VALUE 0
@ -1349,7 +1362,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -3528,14 +3528,14 @@
@@ -3538,14 +3538,14 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Alias for tmp_memory_table_size. If an internal in-memory temporary table exceeds this size, MariaDB will automatically convert it to an on-disk MyISAM or Aria table.
NUMERIC_MIN_VALUE 0
@ -1366,7 +1379,7 @@
VARIABLE_COMMENT Allocation block size for transactions to be stored in binary log
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 134217728
@@ -3545,7 +3545,7 @@
@@ -3555,7 +3555,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME TRANSACTION_PREALLOC_SIZE
VARIABLE_SCOPE SESSION
@ -1375,7 +1388,7 @@
VARIABLE_COMMENT Persistent buffer for transactions to be stored in binary log
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 134217728
@@ -3685,7 +3685,7 @@
@@ -3695,7 +3695,7 @@
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME WAIT_TIMEOUT
VARIABLE_SCOPE SESSION
@ -1384,7 +1397,7 @@
VARIABLE_COMMENT The number of seconds the server waits for activity on a connection before closing it
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 31536000
@@ -3712,7 +3712,7 @@
@@ -3722,7 +3722,7 @@
VARIABLE_NAME LOG_TC_SIZE
GLOBAL_VALUE_ORIGIN AUTO
VARIABLE_SCOPE GLOBAL

View file

@ -2233,6 +2233,16 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY YES
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_MAX_SEL_ARG_WEIGHT
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The maximum weight of the SEL_ARG graph. Set to 0 for no limit
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 18446744073709551615
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_PRUNE_LEVEL
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED

View file

@ -713,7 +713,20 @@
VARIABLE_COMMENT If this is not 0, then mysqld will use this value to reserve file descriptors to use with setrlimit(). If this value is 0 or autoset then mysqld will reserve max_connections*5 or max_connections + table_cache*2 (whichever is larger) number of file descriptors
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
@@ -2395,7 +2395,7 @@
@@ -2395,10 +2395,10 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_MAX_SEL_ARG_WEIGHT
VARIABLE_SCOPE SESSION
-VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT The maximum weight of the SEL_ARG graph. Set to 0 for no limit
NUMERIC_MIN_VALUE 0
-NUMERIC_MAX_VALUE 18446744073709551615
+NUMERIC_MAX_VALUE 4294967295
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -2405,7 +2405,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_PRUNE_LEVEL
VARIABLE_SCOPE SESSION
@ -722,7 +735,7 @@
VARIABLE_COMMENT Controls the heuristic(s) applied during query optimization to prune less-promising partial plans from the optimizer search space. Meaning: 0 - do not apply any heuristic, thus perform exhaustive search; 1 - prune plans based on number of retrieved rows
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1
@@ -2405,7 +2405,7 @@
@@ -2415,7 +2415,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_SEARCH_DEPTH
VARIABLE_SCOPE SESSION
@ -731,7 +744,7 @@
VARIABLE_COMMENT Maximum depth of search performed by the query optimizer. Values larger than the number of relations in a query result in better query plans, but take longer to compile a query. Values smaller than the number of tables in a relation result in faster optimization, but may produce very bad query plans. If set to 0, the system will automatically pick a reasonable value.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 62
@@ -2415,7 +2415,7 @@
@@ -2425,7 +2425,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_SELECTIVITY_SAMPLING_LIMIT
VARIABLE_SCOPE SESSION
@ -740,7 +753,7 @@
VARIABLE_COMMENT Controls number of record samples to check condition selectivity
NUMERIC_MIN_VALUE 10
NUMERIC_MAX_VALUE 4294967295
@@ -2445,17 +2445,17 @@
@@ -2455,17 +2455,17 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_TRACE_MAX_MEM_SIZE
VARIABLE_SCOPE SESSION
@ -761,7 +774,7 @@
VARIABLE_COMMENT Controls selectivity of which conditions the optimizer takes into account to calculate cardinality of a partial join when it searches for the best execution plan Meaning: 1 - use selectivity of index backed range conditions to calculate the cardinality of a partial join if the last joined table is accessed by full table scan or an index scan, 2 - use selectivity of index backed range conditions to calculate the cardinality of a partial join in any case, 3 - additionally always use selectivity of range conditions that are not backed by any index to calculate the cardinality of a partial join, 4 - use histograms to calculate selectivity of range conditions that are not backed by any index to calculate the cardinality of a partial join.5 - additionally use selectivity of certain non-range predicates calculated on record samples
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 5
@@ -2475,7 +2475,7 @@
@@ -2485,7 +2485,7 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME PERFORMANCE_SCHEMA_ACCOUNTS_SIZE
VARIABLE_SCOPE GLOBAL
@ -770,7 +783,7 @@
VARIABLE_COMMENT Maximum number of instrumented user@host accounts. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2485,7 +2485,7 @@
@@ -2495,7 +2495,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_DIGESTS_SIZE
VARIABLE_SCOPE GLOBAL
@ -779,7 +792,7 @@
VARIABLE_COMMENT Size of the statement digest. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 200
@@ -2495,7 +2495,7 @@
@@ -2505,7 +2505,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_EVENTS_STAGES_HISTORY_LONG_SIZE
VARIABLE_SCOPE GLOBAL
@ -788,7 +801,7 @@
VARIABLE_COMMENT Number of rows in EVENTS_STAGES_HISTORY_LONG. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2505,7 +2505,7 @@
@@ -2515,7 +2515,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_EVENTS_STAGES_HISTORY_SIZE
VARIABLE_SCOPE GLOBAL
@ -797,7 +810,7 @@
VARIABLE_COMMENT Number of rows per thread in EVENTS_STAGES_HISTORY. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1024
@@ -2515,7 +2515,7 @@
@@ -2525,7 +2525,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_EVENTS_STATEMENTS_HISTORY_LONG_SIZE
VARIABLE_SCOPE GLOBAL
@ -806,7 +819,7 @@
VARIABLE_COMMENT Number of rows in EVENTS_STATEMENTS_HISTORY_LONG. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2525,7 +2525,7 @@
@@ -2535,7 +2535,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_EVENTS_STATEMENTS_HISTORY_SIZE
VARIABLE_SCOPE GLOBAL
@ -815,7 +828,7 @@
VARIABLE_COMMENT Number of rows per thread in EVENTS_STATEMENTS_HISTORY. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1024
@@ -2535,7 +2535,7 @@
@@ -2545,7 +2545,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_EVENTS_TRANSACTIONS_HISTORY_LONG_SIZE
VARIABLE_SCOPE GLOBAL
@ -824,7 +837,7 @@
VARIABLE_COMMENT Number of rows in EVENTS_TRANSACTIONS_HISTORY_LONG. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2545,7 +2545,7 @@
@@ -2555,7 +2555,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_EVENTS_TRANSACTIONS_HISTORY_SIZE
VARIABLE_SCOPE GLOBAL
@ -833,7 +846,7 @@
VARIABLE_COMMENT Number of rows per thread in EVENTS_TRANSACTIONS_HISTORY. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1024
@@ -2555,7 +2555,7 @@
@@ -2565,7 +2565,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_EVENTS_WAITS_HISTORY_LONG_SIZE
VARIABLE_SCOPE GLOBAL
@ -842,7 +855,7 @@
VARIABLE_COMMENT Number of rows in EVENTS_WAITS_HISTORY_LONG. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2565,7 +2565,7 @@
@@ -2575,7 +2575,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_EVENTS_WAITS_HISTORY_SIZE
VARIABLE_SCOPE GLOBAL
@ -851,7 +864,7 @@
VARIABLE_COMMENT Number of rows per thread in EVENTS_WAITS_HISTORY. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1024
@@ -2575,7 +2575,7 @@
@@ -2585,7 +2585,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_HOSTS_SIZE
VARIABLE_SCOPE GLOBAL
@ -860,7 +873,7 @@
VARIABLE_COMMENT Maximum number of instrumented hosts. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2585,7 +2585,7 @@
@@ -2595,7 +2595,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_COND_CLASSES
VARIABLE_SCOPE GLOBAL
@ -869,7 +882,7 @@
VARIABLE_COMMENT Maximum number of condition instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
@@ -2595,7 +2595,7 @@
@@ -2605,7 +2605,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_COND_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -878,7 +891,7 @@
VARIABLE_COMMENT Maximum number of instrumented condition objects. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2605,7 +2605,7 @@
@@ -2615,7 +2615,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_DIGEST_LENGTH
VARIABLE_SCOPE GLOBAL
@ -887,7 +900,7 @@
VARIABLE_COMMENT Maximum length considered for digest text, when stored in performance_schema tables.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1048576
@@ -2615,7 +2615,7 @@
@@ -2625,7 +2625,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_FILE_CLASSES
VARIABLE_SCOPE GLOBAL
@ -896,7 +909,7 @@
VARIABLE_COMMENT Maximum number of file instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
@@ -2625,7 +2625,7 @@
@@ -2635,7 +2635,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_FILE_HANDLES
VARIABLE_SCOPE GLOBAL
@ -905,7 +918,7 @@
VARIABLE_COMMENT Maximum number of opened instrumented files.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1048576
@@ -2635,7 +2635,7 @@
@@ -2645,7 +2645,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_FILE_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -914,7 +927,7 @@
VARIABLE_COMMENT Maximum number of instrumented files. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2645,7 +2645,7 @@
@@ -2655,7 +2655,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_INDEX_STAT
VARIABLE_SCOPE GLOBAL
@ -923,7 +936,7 @@
VARIABLE_COMMENT Maximum number of index statistics for instrumented tables. Use 0 to disable, -1 for automated scaling.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2655,7 +2655,7 @@
@@ -2665,7 +2665,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_MEMORY_CLASSES
VARIABLE_SCOPE GLOBAL
@ -932,7 +945,7 @@
VARIABLE_COMMENT Maximum number of memory pool instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1024
@@ -2665,7 +2665,7 @@
@@ -2675,7 +2675,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_METADATA_LOCKS
VARIABLE_SCOPE GLOBAL
@ -941,7 +954,7 @@
VARIABLE_COMMENT Maximum number of metadata locks. Use 0 to disable, -1 for automated scaling.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 104857600
@@ -2675,7 +2675,7 @@
@@ -2685,7 +2685,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_MUTEX_CLASSES
VARIABLE_SCOPE GLOBAL
@ -950,7 +963,7 @@
VARIABLE_COMMENT Maximum number of mutex instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
@@ -2685,7 +2685,7 @@
@@ -2695,7 +2695,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_MUTEX_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -959,7 +972,7 @@
VARIABLE_COMMENT Maximum number of instrumented MUTEX objects. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 104857600
@@ -2695,7 +2695,7 @@
@@ -2705,7 +2705,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_PREPARED_STATEMENTS_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -968,7 +981,7 @@
VARIABLE_COMMENT Maximum number of instrumented prepared statements. Use 0 to disable, -1 for automated scaling.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2705,7 +2705,7 @@
@@ -2715,7 +2715,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_PROGRAM_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -977,7 +990,7 @@
VARIABLE_COMMENT Maximum number of instrumented programs. Use 0 to disable, -1 for automated scaling.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2715,7 +2715,7 @@
@@ -2725,7 +2725,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_RWLOCK_CLASSES
VARIABLE_SCOPE GLOBAL
@ -986,7 +999,7 @@
VARIABLE_COMMENT Maximum number of rwlock instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
@@ -2725,7 +2725,7 @@
@@ -2735,7 +2735,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_RWLOCK_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -995,7 +1008,7 @@
VARIABLE_COMMENT Maximum number of instrumented RWLOCK objects. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 104857600
@@ -2735,7 +2735,7 @@
@@ -2745,7 +2745,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_SOCKET_CLASSES
VARIABLE_SCOPE GLOBAL
@ -1004,7 +1017,7 @@
VARIABLE_COMMENT Maximum number of socket instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
@@ -2745,7 +2745,7 @@
@@ -2755,7 +2755,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_SOCKET_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -1013,7 +1026,7 @@
VARIABLE_COMMENT Maximum number of opened instrumented sockets. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2755,7 +2755,7 @@
@@ -2765,7 +2765,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_SQL_TEXT_LENGTH
VARIABLE_SCOPE GLOBAL
@ -1022,7 +1035,7 @@
VARIABLE_COMMENT Maximum length of displayed sql text.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1048576
@@ -2765,7 +2765,7 @@
@@ -2775,7 +2775,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_STAGE_CLASSES
VARIABLE_SCOPE GLOBAL
@ -1031,7 +1044,7 @@
VARIABLE_COMMENT Maximum number of stage instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
@@ -2775,7 +2775,7 @@
@@ -2785,7 +2785,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_STATEMENT_CLASSES
VARIABLE_SCOPE GLOBAL
@ -1040,7 +1053,7 @@
VARIABLE_COMMENT Maximum number of statement instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
@@ -2785,7 +2785,7 @@
@@ -2795,7 +2795,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_STATEMENT_STACK
VARIABLE_SCOPE GLOBAL
@ -1049,7 +1062,7 @@
VARIABLE_COMMENT Number of rows per thread in EVENTS_STATEMENTS_CURRENT.
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 256
@@ -2795,7 +2795,7 @@
@@ -2805,7 +2805,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_TABLE_HANDLES
VARIABLE_SCOPE GLOBAL
@ -1058,7 +1071,7 @@
VARIABLE_COMMENT Maximum number of opened instrumented tables. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2805,7 +2805,7 @@
@@ -2815,7 +2815,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_TABLE_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -1067,7 +1080,7 @@
VARIABLE_COMMENT Maximum number of instrumented tables. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2815,7 +2815,7 @@
@@ -2825,7 +2825,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_TABLE_LOCK_STAT
VARIABLE_SCOPE GLOBAL
@ -1076,7 +1089,7 @@
VARIABLE_COMMENT Maximum number of lock statistics for instrumented tables. Use 0 to disable, -1 for automated scaling.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2825,7 +2825,7 @@
@@ -2835,7 +2835,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_THREAD_CLASSES
VARIABLE_SCOPE GLOBAL
@ -1085,7 +1098,7 @@
VARIABLE_COMMENT Maximum number of thread instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
@@ -2835,7 +2835,7 @@
@@ -2845,7 +2845,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_THREAD_INSTANCES
VARIABLE_SCOPE GLOBAL
@ -1094,7 +1107,7 @@
VARIABLE_COMMENT Maximum number of instrumented threads. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2845,7 +2845,7 @@
@@ -2855,7 +2855,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_SESSION_CONNECT_ATTRS_SIZE
VARIABLE_SCOPE GLOBAL
@ -1103,7 +1116,7 @@
VARIABLE_COMMENT Size of session attribute string buffer per thread. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2855,7 +2855,7 @@
@@ -2865,7 +2865,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_SETUP_ACTORS_SIZE
VARIABLE_SCOPE GLOBAL
@ -1112,7 +1125,7 @@
VARIABLE_COMMENT Maximum number of rows in SETUP_ACTORS.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1024
@@ -2865,7 +2865,7 @@
@@ -2875,7 +2875,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_SETUP_OBJECTS_SIZE
VARIABLE_SCOPE GLOBAL
@ -1121,7 +1134,7 @@
VARIABLE_COMMENT Maximum number of rows in SETUP_OBJECTS.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2875,7 +2875,7 @@
@@ -2885,7 +2885,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_USERS_SIZE
VARIABLE_SCOPE GLOBAL
@ -1130,7 +1143,7 @@
VARIABLE_COMMENT Maximum number of instrumented users. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
@@ -2925,7 +2925,7 @@
@@ -2935,7 +2935,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PRELOAD_BUFFER_SIZE
VARIABLE_SCOPE SESSION
@ -1139,7 +1152,7 @@
VARIABLE_COMMENT The size of the buffer that is allocated when preloading indexes
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 1073741824
@@ -2945,7 +2945,7 @@
@@ -2955,7 +2955,7 @@
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME PROFILING_HISTORY_SIZE
VARIABLE_SCOPE SESSION
@ -1148,7 +1161,7 @@
VARIABLE_COMMENT Number of statements about which profiling information is maintained. If set to 0, no profiles are stored. See SHOW PROFILES.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 100
@@ -2955,7 +2955,7 @@
@@ -2965,7 +2965,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PROGRESS_REPORT_TIME
VARIABLE_SCOPE SESSION
@ -1157,7 +1170,7 @@
VARIABLE_COMMENT Seconds between sending progress reports to the client for time-consuming statements. Set to 0 to disable progress reporting.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
@@ -3015,7 +3015,7 @@
@@ -3025,7 +3025,7 @@
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME QUERY_ALLOC_BLOCK_SIZE
VARIABLE_SCOPE SESSION
@ -1166,7 +1179,7 @@
VARIABLE_COMMENT Allocation block size for query parsing and execution
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 4294967295
@@ -3025,7 +3025,7 @@
@@ -3035,7 +3035,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME QUERY_CACHE_LIMIT
VARIABLE_SCOPE GLOBAL
@ -1175,7 +1188,7 @@
VARIABLE_COMMENT Don't cache results that are bigger than this
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
@@ -3035,7 +3035,7 @@
@@ -3045,7 +3045,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME QUERY_CACHE_MIN_RES_UNIT
VARIABLE_SCOPE GLOBAL
@ -1184,7 +1197,7 @@
VARIABLE_COMMENT The minimum size for blocks allocated by the query cache
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
@@ -3048,7 +3048,7 @@
@@ -3058,7 +3058,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The memory allocated to store results from old queries
NUMERIC_MIN_VALUE 0
@ -1193,7 +1206,7 @@
NUMERIC_BLOCK_SIZE 1024
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -3085,7 +3085,7 @@
@@ -3095,7 +3095,7 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME QUERY_PREALLOC_SIZE
VARIABLE_SCOPE SESSION
@ -1202,7 +1215,7 @@
VARIABLE_COMMENT Persistent buffer for query parsing and execution
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 4294967295
@@ -3098,7 +3098,7 @@
@@ -3108,7 +3108,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Sets the internal state of the RAND() generator for replication purposes
NUMERIC_MIN_VALUE 0
@ -1211,7 +1224,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -3108,14 +3108,14 @@
@@ -3118,14 +3118,14 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Sets the internal state of the RAND() generator for replication purposes
NUMERIC_MIN_VALUE 0
@ -1228,7 +1241,7 @@
VARIABLE_COMMENT Allocation block size for storing ranges during optimization
NUMERIC_MIN_VALUE 4096
NUMERIC_MAX_VALUE 4294967295
@@ -3128,14 +3128,14 @@
@@ -3138,14 +3138,14 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Maximum speed(KB/s) to read binlog from master (0 = no limit)
NUMERIC_MIN_VALUE 0
@ -1245,7 +1258,7 @@
VARIABLE_COMMENT Each thread that does a sequential scan allocates a buffer of this size for each table it scans. If you do many sequential scans, you may want to increase this value
NUMERIC_MIN_VALUE 8192
NUMERIC_MAX_VALUE 2147483647
@@ -3155,7 +3155,7 @@
@@ -3165,7 +3165,7 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME READ_RND_BUFFER_SIZE
VARIABLE_SCOPE SESSION
@ -1254,7 +1267,7 @@
VARIABLE_COMMENT When reading rows in sorted order after a sort, the rows are read through this buffer to avoid a disk seeks
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 2147483647
@@ -3365,10 +3365,10 @@
@@ -3375,10 +3375,10 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME ROWID_MERGE_BUFF_SIZE
VARIABLE_SCOPE SESSION
@ -1267,7 +1280,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -3385,20 +3385,20 @@
@@ -3395,20 +3395,20 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME RPL_SEMI_SYNC_MASTER_TIMEOUT
VARIABLE_SCOPE GLOBAL
@ -1292,7 +1305,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -3455,10 +3455,10 @@
@@ -3465,10 +3465,10 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME RPL_SEMI_SYNC_SLAVE_TRACE_LEVEL
VARIABLE_SCOPE GLOBAL
@ -1305,7 +1318,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -3495,7 +3495,7 @@
@@ -3505,7 +3505,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME SERVER_ID
VARIABLE_SCOPE SESSION
@ -1314,7 +1327,7 @@
VARIABLE_COMMENT Uniquely identifies the server instance in the community of replication partners
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
@@ -3635,7 +3635,7 @@
@@ -3645,7 +3645,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME SLAVE_DOMAIN_PARALLEL_THREADS
VARIABLE_SCOPE GLOBAL
@ -1323,7 +1336,7 @@
VARIABLE_COMMENT Maximum number of parallel threads to use on slave for events in a single replication domain. When using multiple domains, this can be used to limit a single domain from grabbing all threads and thus stalling other domains. The default of 0 means to allow a domain to grab as many threads as it wants, up to the value of slave_parallel_threads.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 16383
@@ -3665,7 +3665,7 @@
@@ -3675,7 +3675,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME SLAVE_MAX_ALLOWED_PACKET
VARIABLE_SCOPE GLOBAL
@ -1332,7 +1345,7 @@
VARIABLE_COMMENT The maximum packet length to sent successfully from the master to slave.
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 1073741824
@@ -3685,7 +3685,7 @@
@@ -3695,7 +3695,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME SLAVE_PARALLEL_MAX_QUEUED
VARIABLE_SCOPE GLOBAL
@ -1341,7 +1354,7 @@
VARIABLE_COMMENT Limit on how much memory SQL threads should use per parallel replication thread when reading ahead in the relay log looking for opportunities for parallel replication. Only used when --slave-parallel-threads > 0.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 2147483647
@@ -3705,7 +3705,7 @@
@@ -3715,7 +3715,7 @@
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME SLAVE_PARALLEL_THREADS
VARIABLE_SCOPE GLOBAL
@ -1350,7 +1363,7 @@
VARIABLE_COMMENT If non-zero, number of threads to spawn to apply in parallel events on the slave that were group-committed on the master or were logged with GTID in different replication domains. Note that these threads are in addition to the IO and SQL threads, which are always created by a replication slave
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 16383
@@ -3715,7 +3715,7 @@
@@ -3725,7 +3725,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME SLAVE_PARALLEL_WORKERS
VARIABLE_SCOPE GLOBAL
@ -1359,7 +1372,7 @@
VARIABLE_COMMENT Alias for slave_parallel_threads
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 16383
@@ -3755,7 +3755,7 @@
@@ -3765,7 +3765,7 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME SLAVE_TRANSACTION_RETRIES
VARIABLE_SCOPE GLOBAL
@ -1368,7 +1381,7 @@
VARIABLE_COMMENT Number of times the slave SQL thread will retry a transaction in case it failed with a deadlock, elapsed lock wait timeout or listed in slave_transaction_retry_errors, before giving up and stopping
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
@@ -3775,7 +3775,7 @@
@@ -3785,7 +3785,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME SLAVE_TRANSACTION_RETRY_INTERVAL
VARIABLE_SCOPE GLOBAL
@ -1377,7 +1390,7 @@
VARIABLE_COMMENT Interval of the slave SQL thread will retry a transaction in case it failed with a deadlock or elapsed lock wait timeout or listed in slave_transaction_retry_errors
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 3600
@@ -3795,7 +3795,7 @@
@@ -3805,7 +3805,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME SLOW_LAUNCH_TIME
VARIABLE_SCOPE GLOBAL
@ -1386,7 +1399,7 @@
VARIABLE_COMMENT If creating the thread takes longer than this value (in seconds), the Slow_launch_threads counter will be incremented
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 31536000
@@ -3838,7 +3838,7 @@
@@ -3848,7 +3848,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Each thread that needs to do a sort allocates a buffer of this size
NUMERIC_MIN_VALUE 1024
@ -1395,7 +1408,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -4065,7 +4065,7 @@
@@ -4075,7 +4075,7 @@
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME STORED_PROGRAM_CACHE
VARIABLE_SCOPE GLOBAL
@ -1404,7 +1417,7 @@
VARIABLE_COMMENT The soft upper limit for number of cached stored routines for one connection.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 524288
@@ -4165,7 +4165,7 @@
@@ -4175,7 +4175,7 @@
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME TABLE_DEFINITION_CACHE
VARIABLE_SCOPE GLOBAL
@ -1413,7 +1426,7 @@
VARIABLE_COMMENT The number of cached table definitions
NUMERIC_MIN_VALUE 400
NUMERIC_MAX_VALUE 2097152
@@ -4175,7 +4175,7 @@
@@ -4185,7 +4185,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME TABLE_OPEN_CACHE
VARIABLE_SCOPE GLOBAL
@ -1422,7 +1435,7 @@
VARIABLE_COMMENT The number of cached open tables
NUMERIC_MIN_VALUE 10
NUMERIC_MAX_VALUE 1048576
@@ -4235,7 +4235,7 @@
@@ -4245,7 +4245,7 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME THREAD_CACHE_SIZE
VARIABLE_SCOPE GLOBAL
@ -1431,7 +1444,7 @@
VARIABLE_COMMENT How many threads we should keep in a cache for reuse. These are freed after 5 minutes of idle time
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 16384
@@ -4408,7 +4408,7 @@
@@ -4418,7 +4418,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Max size for data for an internal temporary on-disk MyISAM or Aria table.
NUMERIC_MIN_VALUE 1024
@ -1440,7 +1453,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -4418,7 +4418,7 @@
@@ -4428,7 +4428,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT If an internal in-memory temporary table exceeds this size, MariaDB will automatically convert it to an on-disk MyISAM or Aria table. Same as tmp_table_size.
NUMERIC_MIN_VALUE 0
@ -1449,7 +1462,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
@@ -4428,14 +4428,14 @@
@@ -4438,14 +4438,14 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Alias for tmp_memory_table_size. If an internal in-memory temporary table exceeds this size, MariaDB will automatically convert it to an on-disk MyISAM or Aria table.
NUMERIC_MIN_VALUE 0
@ -1466,7 +1479,7 @@
VARIABLE_COMMENT Allocation block size for transactions to be stored in binary log
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 134217728
@@ -4445,7 +4445,7 @@
@@ -4455,7 +4455,7 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME TRANSACTION_PREALLOC_SIZE
VARIABLE_SCOPE SESSION
@ -1475,7 +1488,7 @@
VARIABLE_COMMENT Persistent buffer for transactions to be stored in binary log
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 134217728
@@ -4585,7 +4585,7 @@
@@ -4595,7 +4595,7 @@
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME WAIT_TIMEOUT
VARIABLE_SCOPE SESSION
@ -1484,7 +1497,7 @@
VARIABLE_COMMENT The number of seconds the server waits for activity on a connection before closing it
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 31536000
@@ -4612,7 +4612,7 @@
@@ -4622,7 +4622,7 @@
VARIABLE_NAME LOG_TC_SIZE
GLOBAL_VALUE_ORIGIN AUTO
VARIABLE_SCOPE GLOBAL

View file

@ -2393,6 +2393,16 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY YES
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_MAX_SEL_ARG_WEIGHT
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The maximum weight of the SEL_ARG graph. Set to 0 for no limit
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 18446744073709551615
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_PRUNE_LEVEL
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED

View file

@ -4,6 +4,29 @@
#if defined(HAVE_ARMV8_CRC)
#if defined(__APPLE__)
#include <sys/sysctl.h>
static int pmull_supported;
int crc32_aarch64_available(void)
{
int ret;
size_t len = sizeof(ret);
if (sysctlbyname("hw.optional.armv8_crc32", &ret, &len, NULL, 0) == -1)
return 0;
return ret;
}
const char *crc32c_aarch64_available(void)
{
if (crc32_aarch64_available() == 0)
return NULL;
pmull_supported = 1;
return "Using ARMv8 crc32 + pmull instructions";
}
#else
#include <sys/auxv.h>
#if defined(__FreeBSD__)
static unsigned long getauxval(unsigned int key)
@ -50,6 +73,7 @@ const char *crc32c_aarch64_available(void)
return "Using ARMv8 crc32 instructions";
}
#endif /* __APPLE__ */
#endif /* HAVE_ARMV8_CRC */
#ifndef HAVE_ARMV8_CRC_CRYPTO_INTRINSICS

View file

@ -92,6 +92,61 @@ static handler *partition_create_handler(handlerton *hton,
static uint partition_flags();
static alter_table_operations alter_table_flags(alter_table_operations flags);
int ha_partition::notify_tabledef_changed(LEX_CSTRING *db,
LEX_CSTRING *org_table_name,
LEX_CUSTRING *frm,
LEX_CUSTRING *version)
{
char from_buff[FN_REFLEN + 1], from_lc_buff[FN_REFLEN + 1];
const char *from_path, *name_buffer_ptr, *from;
int res= 0;
handler **file= m_file;
DBUG_ENTER("ha_partition::notify_tabledef_changed");
from= table->s->normalized_path.str;
/* setup m_name_buffer_ptr */
if (read_par_file(table->s->normalized_path.str))
DBUG_RETURN(1);
from_path= get_canonical_filename(*file, from, from_lc_buff);
name_buffer_ptr= m_name_buffer_ptr;
do
{
LEX_CSTRING table_name;
const char *table_name_ptr;
if (create_partition_name(from_buff, sizeof(from_buff),
from_path, name_buffer_ptr,
NORMAL_PART_NAME, FALSE))
res=1;
table_name_ptr= from_buff + dirname_length(from_buff);
lex_string_set3(&table_name, table_name_ptr, strlen(table_name_ptr));
if (((*file)->ht)->notify_tabledef_changed((*file)->ht, db, &table_name,
frm, version, *file))
res=1;
name_buffer_ptr= strend(name_buffer_ptr) + 1;
} while (*(++file));
DBUG_RETURN(res);
}
static int
partition_notify_tabledef_changed(handlerton *,
LEX_CSTRING *db,
LEX_CSTRING *table,
LEX_CUSTRING *frm,
LEX_CUSTRING *version,
handler *file)
{
DBUG_ENTER("partition_notify_tabledef_changed");
DBUG_RETURN(static_cast<ha_partition*>
(file)->notify_tabledef_changed(db, table, frm, version));
}
/*
If frm_error() is called then we will use this to to find out what file
extensions exist for the storage engine. This is also used by the default
@ -149,7 +204,9 @@ static int partition_initialize(void *p)
partition_hton->db_type= DB_TYPE_PARTITION_DB;
partition_hton->create= partition_create_handler;
partition_hton->partition_flags= partition_flags;
partition_hton->notify_tabledef_changed= partition_notify_tabledef_changed;
partition_hton->alter_table_flags= alter_table_flags;
partition_hton->flags= HTON_NOT_USER_SELECTABLE |
HTON_HIDDEN |
@ -211,25 +268,6 @@ static handler *partition_create_handler(handlerton *hton,
return file;
}
/*
HA_CAN_PARTITION:
Used by storage engines that can handle partitioning without this
partition handler
(Partition, NDB)
HA_CAN_UPDATE_PARTITION_KEY:
Set if the handler can update fields that are part of the partition
function.
HA_CAN_PARTITION_UNIQUE:
Set if the handler can handle unique indexes where the fields of the
unique key are not part of the fields of the partition function. Thus
a unique key can be set on all fields.
HA_USE_AUTO_PARTITION
Set if the handler sets all tables to be partitioned by default.
*/
static uint partition_flags()
{
return HA_CAN_PARTITION;

View file

@ -3,7 +3,7 @@
/*
Copyright (c) 2005, 2012, Oracle and/or its affiliates.
Copyright (c) 2009, 2020, MariaDB Corporation.
Copyright (c) 2009, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -1617,6 +1617,9 @@ public:
return part_recs;
}
int notify_tabledef_changed(LEX_CSTRING *db, LEX_CSTRING *table,
LEX_CUSTRING *frm, LEX_CUSTRING *version);
friend int cmp_key_rowid_part_id(void *ptr, uchar *ref1, uchar *ref2);
friend int cmp_key_part_id(void *key_p, uchar *ref1, uchar *ref2);
bool can_convert_string(

View file

@ -2,7 +2,7 @@
#define HANDLER_INCLUDED
/*
Copyright (c) 2000, 2019, Oracle and/or its affiliates.
Copyright (c) 2009, 2020, MariaDB
Copyright (c) 2009, 2021, MariaDB
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
@ -1681,7 +1681,8 @@ struct handlerton
*/
int (*notify_tabledef_changed)(handlerton *hton, LEX_CSTRING *db,
LEX_CSTRING *table_name, LEX_CUSTRING *frm,
LEX_CUSTRING *org_tabledef_version);
LEX_CUSTRING *org_tabledef_version,
handler *file);
/*
System Versioning

View file

@ -398,6 +398,11 @@ static SEL_ARG *key_or(RANGE_OPT_PARAM *param,
static SEL_ARG *key_and(RANGE_OPT_PARAM *param,
SEL_ARG *key1, SEL_ARG *key2,
uint clone_flag);
static SEL_ARG *key_or_with_limit(RANGE_OPT_PARAM *param, uint keyno,
SEL_ARG *key1, SEL_ARG *key2);
static SEL_ARG *key_and_with_limit(RANGE_OPT_PARAM *param, uint keyno,
SEL_ARG *key1, SEL_ARG *key2,
uint clone_flag);
static bool get_range(SEL_ARG **e1,SEL_ARG **e2,SEL_ARG *root1);
bool get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key,
SEL_ARG *key_tree, uchar *min_key,uint min_key_flag,
@ -409,6 +414,13 @@ static bool null_part_in_key(KEY_PART *key_part, const uchar *key,
uint length);
static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts);
static
SEL_ARG *enforce_sel_arg_weight_limit(RANGE_OPT_PARAM *param, uint keyno,
SEL_ARG *sel_arg);
static
bool sel_arg_and_weight_heuristic(RANGE_OPT_PARAM *param, SEL_ARG *key1,
SEL_ARG *key2);
#include "opt_range_mrr.cc"
static bool sel_trees_have_common_keys(SEL_TREE *tree1, SEL_TREE *tree2,
@ -706,7 +718,8 @@ int SEL_IMERGE::or_sel_tree_with_checks(RANGE_OPT_PARAM *param,
SEL_ARG *key1= (*or_tree)->keys[key_no];
SEL_ARG *key2= tree->keys[key_no];
key2->incr_refs();
if ((result->keys[key_no]= key_or(param, key1, key2)))
if ((result->keys[key_no]= key_or_with_limit(param, key_no, key1,
key2)))
{
result_keys.set_bit(key_no);
@ -1872,9 +1885,13 @@ SEL_ARG::SEL_ARG(SEL_ARG &arg) :Sql_alloc()
next_key_part=arg.next_key_part;
max_part_no= arg.max_part_no;
use_count=1; elements=1;
weight=1;
next= 0;
if (next_key_part)
{
++next_key_part->use_count;
weight += next_key_part->weight;
}
}
@ -1891,7 +1908,7 @@ SEL_ARG::SEL_ARG(Field *f,const uchar *min_value_arg,
:min_flag(0), max_flag(0), maybe_flag(0), maybe_null(f->real_maybe_null()),
elements(1), use_count(1), field(f), min_value((uchar*) min_value_arg),
max_value((uchar*) max_value_arg), next(0),prev(0),
next_key_part(0), color(BLACK), type(KEY_RANGE)
next_key_part(0), color(BLACK), type(KEY_RANGE), weight(1)
{
left=right= &null_element;
max_part_no= 1;
@ -1903,7 +1920,7 @@ SEL_ARG::SEL_ARG(Field *field_,uint8 part_,
:min_flag(min_flag_),max_flag(max_flag_),maybe_flag(maybe_flag_),
part(part_),maybe_null(field_->real_maybe_null()), elements(1),use_count(1),
field(field_), min_value(min_value_), max_value(max_value_),
next(0),prev(0),next_key_part(0),color(BLACK),type(KEY_RANGE)
next(0),prev(0),next_key_part(0),color(BLACK),type(KEY_RANGE), weight(1)
{
max_part_no= part+1;
left=right= &null_element;
@ -2062,6 +2079,7 @@ SEL_ARG *SEL_ARG::clone(RANGE_OPT_PARAM *param, SEL_ARG *new_parent,
tmp->color= color;
tmp->elements= this->elements;
tmp->max_part_no= max_part_no;
tmp->weight= weight;
return tmp;
}
@ -5447,7 +5465,7 @@ TABLE_READ_PLAN *merge_same_index_scans(PARAM *param, SEL_IMERGE *imerge,
if ((*tree)->keys[key_idx])
(*tree)->keys[key_idx]->incr_refs();
if (((*changed_tree)->keys[key_idx]=
key_or(param, key, (*tree)->keys[key_idx])))
key_or_with_limit(param, key_idx, key, (*tree)->keys[key_idx])))
(*changed_tree)->keys_map.set_bit(key_idx);
*tree= NULL;
removed_cnt++;
@ -9018,6 +9036,19 @@ SEL_ARG *Field::stored_field_make_mm_leaf_exact(RANGE_OPT_PARAM *param,
** KEY_RANGE: Condition uses a key
******************************************************************************/
/*
Update weights for SEL_ARG graph that is connected only via next_key_part
(and not left/right) links
*/
static uint update_weight_for_single_arg(SEL_ARG *arg)
{
if (arg->next_key_part)
return (arg->weight= 1 + update_weight_for_single_arg(arg->next_key_part));
else
return (arg->weight= 1);
}
/*
Add a new key test to a key when scanning through all keys
This will never be called for same key parts.
@ -9050,6 +9081,8 @@ sel_add(SEL_ARG *key1,SEL_ARG *key2)
}
}
*key_link=key1 ? key1 : key2;
update_weight_for_single_arg(root);
return root;
}
@ -9116,7 +9149,8 @@ int and_range_trees(RANGE_OPT_PARAM *param, SEL_TREE *tree1, SEL_TREE *tree2,
key2->incr_refs();
}
SEL_ARG *key;
if ((result->keys[key_no]= key =key_and(param, key1, key2, flag)))
if ((result->keys[key_no]= key= key_and_with_limit(param, key_no,
key1, key2, flag)))
{
if (key && key->type == SEL_ARG::IMPOSSIBLE)
{
@ -9678,7 +9712,7 @@ tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
key1->incr_refs();
key2->incr_refs();
}
if ((result->keys[key_no]= key_or(param, key1, key2)))
if ((result->keys[key_no]= key_or_with_limit(param, key_no, key1, key2)))
result->keys_map.set_bit(key_no);
}
result->type= tree1->type;
@ -9752,6 +9786,9 @@ and_all_keys(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2,
SEL_ARG *next;
ulong use_count=key1->use_count;
if (sel_arg_and_weight_heuristic(param, key1, key2))
return key1;
if (key1->elements != 1)
{
key2->use_count+=key1->elements-1; //psergey: why we don't count that key1 has n-k-p?
@ -9764,6 +9801,8 @@ and_all_keys(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2,
key1->right= key1->left= &null_element;
key1->next= key1->prev= 0;
}
uint new_weight= 0;
for (next=key1->first(); next ; next=next->next)
{
if (next->next_key_part)
@ -9775,17 +9814,22 @@ and_all_keys(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2,
continue;
}
next->next_key_part=tmp;
new_weight += 1 + tmp->weight;
if (use_count)
next->increment_use_count(use_count);
if (param->alloced_sel_args > SEL_ARG::MAX_SEL_ARGS)
break;
}
else
{
new_weight += 1 + key2->weight;
next->next_key_part=key2;
}
}
if (!key1)
return &null_element; // Impossible ranges
key1->use_count++;
key1->weight= new_weight;
key1->max_part_no= MY_MAX(key2->max_part_no, key2->part+1);
return key1;
}
@ -9821,6 +9865,10 @@ key_and(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, uint clone_flag)
clone_flag=swap_clone_flag(clone_flag);
}
// key1->part < key2->part
if (sel_arg_and_weight_heuristic(param, key1, key2))
return key1;
key1->use_count--;
if (key1->use_count > 0)
if (!(key1= key1->clone_tree(param)))
@ -9851,6 +9899,9 @@ key_and(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, uint clone_flag)
{ // Both are maybe key
key1->next_key_part=key_and(param, key1->next_key_part,
key2->next_key_part, clone_flag);
key1->weight= 1 + (key1->next_key_part? key1->next_key_part->weight : 0);
if (key1->next_key_part &&
key1->next_key_part->type == SEL_ARG::IMPOSSIBLE)
return key1;
@ -9901,6 +9952,9 @@ key_and(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, uint clone_flag)
if (!new_arg)
return &null_element; // End of memory
new_arg->next_key_part=next;
if (new_arg->next_key_part)
new_arg->weight += new_arg->next_key_part->weight;
if (!new_tree)
{
new_tree=new_arg;
@ -9939,6 +9993,85 @@ get_range(SEL_ARG **e1,SEL_ARG **e2,SEL_ARG *root1)
return 0;
}
#ifndef DBUG_OFF
/*
Verify SEL_TREE's weight.
Recompute the weight and compare
*/
uint SEL_ARG::verify_weight()
{
uint computed_weight= 0;
SEL_ARG *first_arg= first();
if (first_arg)
{
for (SEL_ARG *arg= first_arg; arg; arg= arg->next)
{
computed_weight++;
if (arg->next_key_part)
computed_weight+= arg->next_key_part->verify_weight();
}
}
else
{
// first()=NULL means this is a special kind of SEL_ARG, e.g.
// SEL_ARG with type=MAYBE_KEY
computed_weight= 1;
if (next_key_part)
computed_weight += next_key_part->verify_weight();
}
if (computed_weight != weight)
{
sql_print_error("SEL_ARG weight mismatch: computed %u have %u\n",
computed_weight, weight);
DBUG_ASSERT(computed_weight == weight); // Fail an assertion
}
return computed_weight;
}
#endif
static
SEL_ARG *key_or_with_limit(RANGE_OPT_PARAM *param, uint keyno,
SEL_ARG *key1, SEL_ARG *key2)
{
#ifndef DBUG_OFF
if (key1)
key1->verify_weight();
if (key2)
key2->verify_weight();
#endif
SEL_ARG *res= key_or(param, key1, key2);
res= enforce_sel_arg_weight_limit(param, keyno, res);
#ifndef DBUG_OFF
if (res)
res->verify_weight();
#endif
return res;
}
static
SEL_ARG *key_and_with_limit(RANGE_OPT_PARAM *param, uint keyno,
SEL_ARG *key1, SEL_ARG *key2, uint clone_flag)
{
#ifndef DBUG_OFF
if (key1)
key1->verify_weight();
if (key2)
key2->verify_weight();
#endif
SEL_ARG *res= key_and(param, key1, key2, clone_flag);
res= enforce_sel_arg_weight_limit(param, keyno, res);
#ifndef DBUG_OFF
if (res)
res->verify_weight();
#endif
return res;
}
/**
Combine two range expression under a common OR. On a logical level, the
@ -10595,6 +10728,19 @@ end:
}
key1->use_count++;
/* Re-compute the result tree's weight. */
{
uint new_weight= 0;
const SEL_ARG *sl;
for (sl= key1->first(); sl ; sl= sl->next)
{
new_weight++;
if (sl->next_key_part)
new_weight += sl->next_key_part->weight;
}
key1->weight= new_weight;
}
key1->max_part_no= max_part_no;
return key1;
}
@ -10632,6 +10778,160 @@ static bool eq_tree(SEL_ARG* a,SEL_ARG *b)
}
/*
Compute the MAX(key part) in this SEL_ARG graph.
*/
uint SEL_ARG::get_max_key_part() const
{
const SEL_ARG *cur;
uint max_part= part;
for (cur= first(); cur ; cur=cur->next)
{
if (cur->next_key_part)
{
uint mp= cur->next_key_part->get_max_key_part();
max_part= MY_MAX(part, mp);
}
}
return max_part;
}
/*
Remove the SEL_ARG graph elements which have part > max_part.
@detail
Also update weight for the graph and any modified subgraphs.
*/
void prune_sel_arg_graph(SEL_ARG *sel_arg, uint max_part)
{
SEL_ARG *cur;
DBUG_ASSERT(max_part >= sel_arg->part);
for (cur= sel_arg->first(); cur ; cur=cur->next)
{
if (cur->next_key_part)
{
if (cur->next_key_part->part > max_part)
{
// Remove cur->next_key_part.
sel_arg->weight -= cur->next_key_part->weight;
cur->next_key_part= NULL;
}
else
{
uint old_weight= cur->next_key_part->weight;
prune_sel_arg_graph(cur->next_key_part, max_part);
sel_arg->weight -= (old_weight - cur->next_key_part->weight);
}
}
}
}
/*
@brief
Make sure the passed SEL_ARG graph's weight is below SEL_ARG::MAX_WEIGHT,
by cutting off branches if necessary.
@detail
@see declaration of SEL_ARG::weight for definition of weight.
This function attempts to reduce the graph's weight by cutting off
SEL_ARG::next_key_part connections if necessary.
We start with maximum used keypart and then remove one keypart after
another until the graph's weight is within the limit.
@seealso
sel_arg_and_weight_heuristic();
@return
tree pointer The tree after processing,
NULL If it was not possible to reduce the weight of the tree below the
limit.
*/
SEL_ARG *enforce_sel_arg_weight_limit(RANGE_OPT_PARAM *param, uint keyno,
SEL_ARG *sel_arg)
{
if (!sel_arg || sel_arg->type != SEL_ARG::KEY_RANGE ||
!param->thd->variables.optimizer_max_sel_arg_weight)
return sel_arg;
Field *field= sel_arg->field;
uint weight1= sel_arg->weight;
while (1)
{
if (likely(sel_arg->weight <= param->thd->variables.
optimizer_max_sel_arg_weight))
break;
uint max_part= sel_arg->get_max_key_part();
if (max_part == sel_arg->part)
{
/*
We don't return NULL right away as we want to have the information
about the changed tree in the optimizer trace.
*/
sel_arg= NULL;
break;
}
max_part--;
prune_sel_arg_graph(sel_arg, max_part);
}
uint weight2= sel_arg? sel_arg->weight : 0;
if (weight2 != weight1)
{
Json_writer_object wrapper(param->thd);
Json_writer_object obj(param->thd, "enforce_sel_arg_weight_limit");
if (param->using_real_indexes)
obj.add("index", param->table->key_info[param->real_keynr[keyno]].name);
else
obj.add("pseudo_index", field->field_name);
obj.add("old_weight", (longlong)weight1);
obj.add("new_weight", (longlong)weight2);
}
return sel_arg;
}
/*
@detail
Do not combine the trees if their total weight is likely to exceed the
MAX_WEIGHT.
(It is possible that key1 has next_key_part that has empty overlap with
key2. In this case, the combined tree will have a smaller weight than we
predict. We assume this is rare.)
*/
static
bool sel_arg_and_weight_heuristic(RANGE_OPT_PARAM *param, SEL_ARG *key1,
SEL_ARG *key2)
{
DBUG_ASSERT(key1->part < key2->part);
ulong max_weight= param->thd->variables.optimizer_max_sel_arg_weight;
if (max_weight && key1->weight + key1->elements*key2->weight > max_weight)
{
Json_writer_object wrapper(param->thd);
Json_writer_object obj(param->thd, "sel_arg_weight_heuristic");
obj.add("key1_field", key1->field->field_name);
obj.add("key2_field", key2->field->field_name);
obj.add("key1_weight", (longlong)key1->weight);
obj.add("key2_weight", (longlong)key2->weight);
return true; // Discard key2
}
return false;
}
SEL_ARG *
SEL_ARG::insert(SEL_ARG *key)
{
@ -10670,6 +10970,13 @@ SEL_ARG::insert(SEL_ARG *key)
SEL_ARG *root=rb_insert(key); // rebalance tree
root->use_count=this->use_count; // copy root info
root->elements= this->elements+1;
/*
The new weight is:
old root's weight
+1 for the weight of the added element
+ next_key_part's weight of the added element
*/
root->weight = weight + 1 + (key->next_key_part? key->next_key_part->weight: 0);
root->maybe_flag=this->maybe_flag;
return root;
}
@ -10727,6 +11034,17 @@ SEL_ARG::tree_delete(SEL_ARG *key)
root=this;
this->parent= 0;
/*
Compute the weight the tree will have after the element is removed.
We remove the element itself (weight=1)
and the sub-graph connected to its next_key_part.
*/
uint new_weight= root->weight - (1 + (key->next_key_part?
key->next_key_part->weight : 0));
DBUG_ASSERT(root->weight >= (1 + (key->next_key_part ?
key->next_key_part->weight : 0)));
/* Unlink from list */
if (key->prev)
key->prev->next=key->next;
@ -10778,6 +11096,7 @@ SEL_ARG::tree_delete(SEL_ARG *key)
test_rb_tree(root,root->parent);
root->use_count=this->use_count; // Fix root counters
root->weight= new_weight;
root->elements=this->elements-1;
root->maybe_flag=this->maybe_flag;
DBUG_RETURN(root);

View file

@ -223,6 +223,50 @@ class RANGE_OPT_PARAM;
We avoid consuming too much memory by setting a limit on the number of
SEL_ARG object we can construct during one range analysis invocation.
5. SEL_ARG GRAPH WEIGHT
A SEL_ARG graph has a property we call weight, and we define it as follows:
<definition>
If the SEL_ARG graph does not have any node with multiple incoming
next_key_part edges, then its weight is the number of SEL_ARG objects used.
If there is a node with multiple incoming next_key_part edges, clone that
node, (and the nodes connected to it via prev/next links) and redirect one
of the incoming next_key_part edges to the clone.
Continue with cloning until we get a graph that has no nodes with multiple
incoming next_key_part edges. Then, the number of SEL_ARG objects in the
graph is the weight of the original graph.
</definition>
Example:
kp1 $ kp2 $ kp3
$ $
| +-------+ $ $
\->| kp1=2 |--$--------------$-+
+-------+ $ $ | +--------+
| $ $ ==>| kp3=11 |
+-------+ $ $ | +--------+
| kp1>3 |--$--------------$-+ |
+-------+ $ $ +--------+
$ $ | kp3=14 |
$ $ +--------+
$ $ |
$ $ +--------+
$ $ | kp3=14 |
$ $ +--------+
Here, the weight is 2 + 2*3=8.
The rationale behind using this definition of weight is:
- it has the same order-of-magnitude as the number of ranges that the
SEL_ARG graph is describing,
- it is a lot easier to compute than computing the number of ranges,
- it can be updated incrementally when performing AND/OR operations on
parts of the graph.
*/
class SEL_ARG :public Sql_alloc
@ -236,6 +280,9 @@ public:
/*
The ordinal number the least significant component encountered in
the ranges of the SEL_ARG tree (the first component has number 1)
Note: this number is currently not precise, it is an upper bound.
@seealso SEL_ARG::get_max_key_part()
*/
uint16 max_part_no;
/*
@ -263,6 +310,17 @@ public:
enum leaf_color { BLACK,RED } color;
enum Type { IMPOSSIBLE, MAYBE, MAYBE_KEY, KEY_RANGE } type;
/*
For R-B root nodes only: the graph weight, as defined above in the
SEL_ARG GRAPH WEIGHT section.
*/
uint weight;
enum { MAX_WEIGHT = 32000 };
#ifndef DBUG_OFF
uint verify_weight();
#endif
/* See RANGE_OPT_PARAM::alloced_sel_args */
enum { MAX_SEL_ARGS = 16000 };
SEL_ARG() {}
@ -273,7 +331,7 @@ public:
SEL_ARG(enum Type type_arg)
:min_flag(0), max_part_no(0) /* first key part means 1. 0 mean 'no parts'*/,
elements(1),use_count(1),left(0),right(0),
next_key_part(0), color(BLACK), type(type_arg)
next_key_part(0), color(BLACK), type(type_arg), weight(1)
{}
/**
returns true if a range predicate is equal. Use all_same()
@ -287,6 +345,9 @@ public:
return true;
return cmp_min_to_min(arg) == 0 && cmp_max_to_max(arg) == 0;
}
uint get_max_key_part() const;
/**
returns true if all the predicates in the keypart tree are equal
*/

View file

@ -815,6 +815,7 @@ typedef struct system_variables
uint column_compression_threshold;
uint column_compression_zlib_level;
uint in_subquery_conversion_threshold;
ulong optimizer_max_sel_arg_weight;
ulonglong max_rowid_filter_size;
vers_asof_timestamp_t vers_asof_timestamp;

View file

@ -4549,6 +4549,63 @@ void JOIN::cleanup_item_list(List<Item> &items) const
}
/**
@brief
Look for provision of the select_handler interface by a foreign engine
@param thd The thread handler
@details
The function checks that this is an upper level select and if so looks
through its tables searching for one whose handlerton owns a
create_select call-back function. If the call of this function returns
a select_handler interface object then the server will push the select
query into this engine.
This is a responsibility of the create_select call-back function to
check whether the engine can execute the query.
@retval the found select_handler if the search is successful
0 otherwise
*/
select_handler *find_select_handler(THD *thd,
SELECT_LEX* select_lex)
{
if (select_lex->next_select())
return 0;
if (select_lex->master_unit()->outer_select())
return 0;
TABLE_LIST *tbl= nullptr;
// For SQLCOM_INSERT_SELECT the server takes TABLE_LIST
// from thd->lex->query_tables and skips its first table
// b/c it is the target table for the INSERT..SELECT.
if (thd->lex->sql_command != SQLCOM_INSERT_SELECT)
{
tbl= select_lex->join->tables_list;
}
else if (thd->lex->query_tables &&
thd->lex->query_tables->next_global)
{
tbl= thd->lex->query_tables->next_global;
}
else
return 0;
for (;tbl; tbl= tbl->next_global)
{
if (!tbl->table)
continue;
handlerton *ht= tbl->table->file->partition_ht();
if (!ht->create_select)
continue;
select_handler *sh= ht->create_select(thd, select_lex);
return sh;
}
return 0;
}
/**
An entry point to single-unit select (a select without UNION).
@ -4653,7 +4710,7 @@ mysql_select(THD *thd, TABLE_LIST *tables, List<Item> &fields, COND *conds,
}
/* Look for a table owned by an engine with the select_handler interface */
select_lex->pushdown_select= select_lex->find_select_handler(thd);
select_lex->pushdown_select= find_select_handler(thd, select_lex);
if ((err= join->optimize()))
{
@ -29040,46 +29097,6 @@ void JOIN_TAB::partial_cleanup()
free_cache(&read_record);
}
/**
@brief
Look for provision of the select_handler interface by a foreign engine
@param thd The thread handler
@details
The function checks that this is an upper level select and if so looks
through its tables searching for one whose handlerton owns a
create_select call-back function. If the call of this function returns
a select_handler interface object then the server will push the select
query into this engine.
This is a responsibility of the create_select call-back function to
check whether the engine can execute the query.
@retval the found select_handler if the search is successful
0 otherwise
*/
select_handler *SELECT_LEX::find_select_handler(THD *thd)
{
if (next_select())
return 0;
if (master_unit()->outer_select())
return 0;
for (TABLE_LIST *tbl= join->tables_list; tbl; tbl= tbl->next_global)
{
if (!tbl->table)
continue;
handlerton *ht= tbl->table->file->partition_ht();
if (!ht->create_select)
continue;
select_handler *sh= ht->create_select(thd, this);
return sh;
}
return 0;
}
/**
@brief
Construct not null conditions for provingly not nullable fields

View file

@ -7934,7 +7934,6 @@ static bool mysql_inplace_alter_table(THD *thd,
Alter_info *alter_info= ha_alter_info->alter_info;
bool reopen_tables= false;
bool res;
handlerton *hton;
const enum_alter_inplace_result inplace_supported=
ha_alter_info->inplace_supported;
@ -8145,20 +8144,22 @@ static bool mysql_inplace_alter_table(THD *thd,
/* Notify the engine that the table definition has changed */
hton= table->file->partition_ht();
if (hton->notify_tabledef_changed)
if (table->file->partition_ht()->notify_tabledef_changed)
{
char db_buff[FN_REFLEN], table_buff[FN_REFLEN];
handlerton *hton= table->file->ht;
LEX_CSTRING tmp_db, tmp_table;
tmp_db.str= db_buff;
tmp_table.str= table_buff;
tmp_db.str= db_buff;
tmp_table.str= table_buff;
tmp_db.length= tablename_to_filename(table_list->db.str,
db_buff, sizeof(db_buff));
tmp_table.length= tablename_to_filename(table_list->table_name.str,
table_buff, sizeof(table_buff));
if ((hton->notify_tabledef_changed)(hton, &tmp_db, &tmp_table,
table->s->frm_image,
&table->s->tabledef_version))
&table->s->tabledef_version,
table->file))
{
my_error(HA_ERR_INCOMPATIBLE_DEFINITION, MYF(0));
DBUG_RETURN(true);

View file

@ -6711,6 +6711,12 @@ static Sys_var_uint Sys_in_subquery_conversion_threshold(
SESSION_VAR(in_subquery_conversion_threshold), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, UINT_MAX), DEFAULT(IN_SUBQUERY_CONVERSION_THRESHOLD), BLOCK_SIZE(1));
static Sys_var_ulong Sys_optimizer_max_sel_arg_weight(
"optimizer_max_sel_arg_weight",
"The maximum weight of the SEL_ARG graph. Set to 0 for no limit",
SESSION_VAR(optimizer_max_sel_arg_weight), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, ULONG_MAX), DEFAULT(SEL_ARG::MAX_WEIGHT), BLOCK_SIZE(1));
static Sys_var_enum Sys_secure_timestamp(
"secure_timestamp", "Restricts direct setting of a session "
"timestamp. Possible levels are: YES - timestamp cannot deviate from "

View file

@ -92,7 +92,7 @@ static ST_FIELD_INFO queues_field_info[] =
Column("GROUP_ID", SLong(6), NOT_NULL),
Column("POSITION", SLong(6), NOT_NULL),
Column("PRIORITY", SLong(1), NOT_NULL),
Column("CONNECTION_ID", ULonglong(19), NOT_NULL),
Column("CONNECTION_ID", ULonglong(19), NULLABLE),
Column("QUEUEING_TIME_MICROSECONDS", SLonglong(19), NOT_NULL),
CEnd()
};
@ -130,7 +130,8 @@ static int queues_fill_table(THD* thd, TABLE_LIST* tables, COND*)
/* PRIORITY */
table->field[2]->store(prio, true);
/* CONNECTION_ID */
table->field[3]->store(c->thd->thread_id, true);
if (c->thd)
table->field[3]->store(c->thd->thread_id, true);
/* QUEUEING_TIME */
table->field[4]->store(now - c->enqueue_time, true);

View file

@ -381,9 +381,7 @@ void wsrep_register_for_group_commit(THD *thd)
void wsrep_unregister_from_group_commit(THD *thd)
{
DBUG_ASSERT(thd->wsrep_trx().state() == wsrep::transaction::s_ordered_commit||
// ordered_commit() failure results in s_aborting state
thd->wsrep_trx().state() == wsrep::transaction::s_aborting);
DBUG_ASSERT(thd->wsrep_trx().ordered());
wait_for_commit *wfc= thd->wait_for_commit_ptr;
if (wfc)

View file

@ -335,6 +335,10 @@ buf_dump(
continue;
}
if (bpage->status == buf_page_t::FREED) {
continue;
}
dump[j++] = id;
}

View file

@ -1717,6 +1717,11 @@ fil_crypt_get_page_throttle(
return NULL;
}
if (fseg_page_is_free(space, state->offset)) {
/* page is already freed */
return NULL;
}
state->crypt_stat.pages_read_from_disk++;
const ulonglong start = my_interval_timer();
@ -1819,6 +1824,9 @@ fil_crypt_rotate_page(
some dummy pages will be allocated, with 0 in
the FIL_PAGE_TYPE. Those pages should be
skipped from key rotation forever. */
} else if (block->page.status == buf_page_t::FREED) {
/* Do not modify freed pages to avoid an assertion
failure on recovery.*/
} else if (fil_crypt_needs_rotation(
crypt_data,
kv,

View file

@ -2631,9 +2631,9 @@ fseg_free_extent(
fsp_free_extent(space, page, mtr);
for (ulint i = 0; i < FSP_EXTENT_SIZE; i++) {
for (uint32_t i = 0; i < FSP_EXTENT_SIZE; i++) {
if (!xdes_is_free(descr, i)) {
buf_page_free(space, first_page_in_extent + 1, mtr);
buf_page_free(space, first_page_in_extent + i, mtr);
}
}
}
@ -2700,10 +2700,11 @@ fseg_free_step(
DBUG_RETURN(true);
}
fseg_free_page_low(
inode, iblock, space,
fseg_get_nth_frag_page_no(inode, n),
mtr);
page_no_t page_no = fseg_get_nth_frag_page_no(inode, n);
fseg_free_page_low(inode, iblock, space, page_no, mtr);
buf_page_free(space, page_no, mtr);
n = fseg_find_last_used_frag_page_slot(inode);
@ -2765,6 +2766,7 @@ fseg_free_step_not_header(
}
fseg_free_page_low(inode, iblock, space, page_no, mtr);
buf_page_free(space, page_no, mtr);
return false;
}

View file

@ -1357,6 +1357,30 @@ innobase_show_status(
stat_print_fn* stat_print,
enum ha_stat_type stat_type);
/** After ALTER TABLE, recompute statistics. */
inline void ha_innobase::reload_statistics()
{
if (dict_table_t *table= m_prebuilt ? m_prebuilt->table : nullptr)
{
if (table->is_readable())
dict_stats_init(table);
else
table->stat_initialized= 1;
}
}
/** After ALTER TABLE, recompute statistics. */
static int innodb_notify_tabledef_changed(handlerton *,
LEX_CSTRING *, LEX_CSTRING *,
LEX_CUSTRING *, LEX_CUSTRING *,
handler *handler)
{
DBUG_ENTER("innodb_notify_tabledef_changed");
if (handler)
static_cast<ha_innobase*>(handler)->reload_statistics();
DBUG_RETURN(0);
}
/****************************************************************//**
Parse and enable InnoDB monitor counters during server startup.
User can enable monitor counters/groups by specifying
@ -3591,6 +3615,7 @@ static int innodb_init(void* p)
innobase_hton->flush_logs = innobase_flush_logs;
innobase_hton->show_status = innobase_show_status;
innobase_hton->notify_tabledef_changed= innodb_notify_tabledef_changed;
innobase_hton->flags =
HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS
| HTON_NATIVE_SYS_VERSIONING | HTON_WSREP_REPLICATION;
@ -15672,6 +15697,7 @@ innobase_show_status(
/* Success */
return(false);
}
/*********************************************************************//**
Returns number of THR_LOCK locks used for one instance of InnoDB table.
InnoDB no longer relies on THR_LOCK locks so 0 value is returned.

View file

@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2013, 2020, MariaDB Corporation.
Copyright (c) 2013, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -211,6 +211,8 @@ public:
int check(THD* thd, HA_CHECK_OPT* check_opt) override;
char* update_table_comment(const char* comment) override;
inline void reload_statistics();
char* get_foreign_key_create_info() override;
int get_foreign_key_list(THD *thd,

View file

@ -11258,12 +11258,18 @@ foreign_fail:
&& m_prebuilt->table->n_v_cols
&& ha_alter_info->handler_flags & ALTER_STORED_COLUMN_ORDER)) {
DBUG_ASSERT(ctx0->old_table->get_ref_count() == 1);
ut_ad(ctx0->prebuilt == m_prebuilt);
trx_commit_for_mysql(m_prebuilt->trx);
m_prebuilt->table = innobase_reload_table(m_user_thd,
m_prebuilt->table,
table->s->table_name,
*ctx0);
for (inplace_alter_handler_ctx** pctx = ctx_array; *pctx;
pctx++) {
auto ctx= static_cast<ha_innobase_inplace_ctx*>(*pctx);
ctx->prebuilt->table = innobase_reload_table(
m_user_thd, ctx->prebuilt->table,
table->s->table_name, *ctx);
innobase_copy_frm_flags_from_table_share(
ctx->prebuilt->table, altered_table->s);
}
row_mysql_unlock_data_dictionary(trx);
trx->free();

View file

@ -2272,12 +2272,13 @@ static void recv_recover_page(buf_block_t* block, mtr_t& mtr,
ut_d(lsn_t recv_start_lsn = 0);
const lsn_t init_lsn = init ? init->lsn : 0;
bool skipped_after_init = false;
for (const log_rec_t* recv : p->second.log) {
const log_phys_t* l = static_cast<const log_phys_t*>(recv);
ut_ad(l->lsn);
ut_ad(end_lsn <= l->lsn);
end_lsn = l->lsn;
ut_ad(end_lsn <= log_sys.log.scanned_lsn);
ut_ad(l->lsn <= log_sys.log.scanned_lsn);
ut_ad(l->start_lsn);
ut_ad(recv_start_lsn <= l->start_lsn);
@ -2290,6 +2291,8 @@ static void recv_recover_page(buf_block_t* block, mtr_t& mtr,
block->page.id().space(),
block->page.id().page_no(),
l->start_lsn, page_lsn));
skipped_after_init = true;
end_lsn = l->lsn;
continue;
}
@ -2299,9 +2302,24 @@ static void recv_recover_page(buf_block_t* block, mtr_t& mtr,
block->page.id().space(),
block->page.id().page_no(),
l->start_lsn, init_lsn));
skipped_after_init = false;
end_lsn = l->lsn;
continue;
}
/* There is no need to check LSN for just initialized pages. */
if (skipped_after_init) {
skipped_after_init = false;
ut_ad(end_lsn == page_lsn);
if (end_lsn != page_lsn)
ib::warn()
<< "The last skipped log record LSN "
<< end_lsn
<< " is not equal to page LSN "
<< page_lsn;
}
end_lsn = l->lsn;
if (UNIV_UNLIKELY(srv_print_verbose_log == 2)) {
ib::info() << "apply " << l->start_lsn

View file

@ -1,4 +1,4 @@
/* Copyright (C) 2019, 2020 MariaDB Corporation Ab
/* Copyright (C) 2019, 2021 MariaDB Corporation Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -887,10 +887,11 @@ int ha_s3::discover_check_version()
Update the .frm file in S3
*/
static int s3_notify_tabledef_changed(handlerton *hton __attribute__((unused)),
static int s3_notify_tabledef_changed(handlerton *,
LEX_CSTRING *db, LEX_CSTRING *table,
LEX_CUSTRING *frm,
LEX_CUSTRING *org_tabledef_version)
LEX_CUSTRING *org_tabledef_version,
handler *)
{
char aws_path[AWS_PATH_LENGTH];
S3_INFO s3_info;
@ -898,6 +899,9 @@ static int s3_notify_tabledef_changed(handlerton *hton __attribute__((unused)),
int error= 0;
DBUG_ENTER("s3_notify_tabledef_changed");
if (strstr(table->str, "#P#"))
DBUG_RETURN(0); // Ignore partitions
if (s3_info_init(&s3_info))
DBUG_RETURN(0);
if (!(s3_client= s3_open_connection(&s3_info)))
@ -916,7 +920,7 @@ static int s3_notify_tabledef_changed(handlerton *hton __attribute__((unused)),
NullS);
if (s3_put_object(s3_client, s3_info.bucket.str, aws_path, (uchar*) frm->str,
frm->length, 0))
frm->length, 0))
error= 2;
err: