Merge 10.4 into 10.5

This commit is contained in:
Marko Mäkelä 2023-01-13 10:18:30 +02:00
commit 73ecab3d26
81 changed files with 1810 additions and 602 deletions

View file

@ -20,3 +20,21 @@ select (select max(m2.ut) from t1 m2 where m1.id <> 0) from t1 m1;
2001-01-01 00:00:00.200000
2001-01-01 00:00:00.200000
drop table t1;
#
# MDEV-30345 DML does not find rows it is supposed to
#
CREATE TABLE t1 (f timestamp);
INSERT INTO t1 VALUES ('2022-01-01 00:00:00'),('2022-12-12 12:12:12');
CREATE TABLE t2 (f timestamp);
INSERT INTO t2 VALUES ('2022-01-01 00:00:00'),('2022-12-12 12:12:12');
SELECT * FROM t2 WHERE f IN ( SELECT f FROM t1 ) ;
f
2022-01-01 00:00:00
2022-12-12 12:12:12
DELETE FROM t2 WHERE f IN ( SELECT f FROM t1 ) ;
SELECT * FROM t2 WHERE f IN ( SELECT f FROM t1 ) ;
f
DROP TABLE t1,t2;
#
# End of 10.4 tests
#

View file

@ -20,3 +20,21 @@ select * from t1;
select (select max(m2.ut) from t1 m2 where m1.id <> 0) from t1 m1;
drop table t1;
--echo #
--echo # MDEV-30345 DML does not find rows it is supposed to
--echo #
CREATE TABLE t1 (f timestamp);
INSERT INTO t1 VALUES ('2022-01-01 00:00:00'),('2022-12-12 12:12:12');
CREATE TABLE t2 (f timestamp);
INSERT INTO t2 VALUES ('2022-01-01 00:00:00'),('2022-12-12 12:12:12');
SELECT * FROM t2 WHERE f IN ( SELECT f FROM t1 ) ;
DELETE FROM t2 WHERE f IN ( SELECT f FROM t1 ) ;
SELECT * FROM t2 WHERE f IN ( SELECT f FROM t1 ) ;
DROP TABLE t1,t2;
--echo #
--echo # End of 10.4 tests
--echo #

View file

@ -2031,6 +2031,17 @@ Warnings:
Warning 1280 Name 'foo' ignored for PRIMARY key.
DROP TABLE t1;
#
# MDEV-30342 Wrong "Truncated incorrect DECIMAL value" warning/error
#
create table t1(c1 varchar(1));
insert into t1(c1) values('#');
select @@sql_mode like '%strict_all_tables%';
@@sql_mode like '%strict_all_tables%'
0
create table t2 as select if(c1 = '#', c1 = 0, c1) as c1 from t1;
ERROR 22007: Truncated incorrect DECIMAL value: '#'
drop table t1;
#
# End of 10.3 tests
#
#

View file

@ -1892,6 +1892,19 @@ create table t1 (c int(10) unsigned) engine=memory transactional=0;
CREATE TABLE t1 ( id1 INT, id2 INT, CONSTRAINT `foo` PRIMARY KEY (id1), CONSTRAINT `bar` UNIQUE KEY(id2));
DROP TABLE t1;
--echo #
--echo # MDEV-30342 Wrong "Truncated incorrect DECIMAL value" warning/error
--echo #
create table t1(c1 varchar(1));
insert into t1(c1) values('#');
select @@sql_mode like '%strict_all_tables%';
--error ER_TRUNCATED_WRONG_VALUE
create table t2 as select if(c1 = '#', c1 = 0, c1) as c1 from t1;
drop table t1;
--echo #
--echo # End of 10.3 tests
--echo #

View file

@ -3463,5 +3463,26 @@ SELECT 1 FROM t1 GROUP BY DEFAULT(pk);
1
DROP TABLE t1;
#
# MDEV-29890 Update with inner join false row count result
#
create table t1 (a int not null);
select column_name, column_default, column_default is not null as 'has_default', is_nullable from information_schema.columns where table_schema='test' and table_name='t1';
column_name column_default has_default is_nullable
a NULL 0 NO
create or replace view v1 as select * from t1;
select column_name, column_default, column_default is not null as 'has_default', is_nullable from information_schema.columns where table_schema='test' and table_name='v1';
column_name column_default has_default is_nullable
a NULL 0 NO
create or replace view v1 as select * from t1 group by a;
select column_name, column_default, column_default is not null as 'has_default', is_nullable from information_schema.columns where table_schema='test' and table_name='v1';
column_name column_default has_default is_nullable
a NULL 0 NO
create or replace view v1 as select * from t1 group by a with rollup;
select column_name, column_default, column_default is not null as 'has_default', is_nullable from information_schema.columns where table_schema='test' and table_name='v1';
column_name column_default has_default is_nullable
a NULL 1 YES
drop view v1;
drop table t1;
#
# End of 10.4 test
#

View file

@ -2169,6 +2169,20 @@ INSERT INTO t1 VALUES (),();
SELECT 1 FROM t1 GROUP BY DEFAULT(pk);
DROP TABLE t1;
--echo #
--echo # MDEV-29890 Update with inner join false row count result
--echo #
create table t1 (a int not null);
select column_name, column_default, column_default is not null as 'has_default', is_nullable from information_schema.columns where table_schema='test' and table_name='t1';
create or replace view v1 as select * from t1;
select column_name, column_default, column_default is not null as 'has_default', is_nullable from information_schema.columns where table_schema='test' and table_name='v1';
create or replace view v1 as select * from t1 group by a;
select column_name, column_default, column_default is not null as 'has_default', is_nullable from information_schema.columns where table_schema='test' and table_name='v1';
create or replace view v1 as select * from t1 group by a with rollup;
select column_name, column_default, column_default is not null as 'has_default', is_nullable from information_schema.columns where table_schema='test' and table_name='v1';
drop view v1;
drop table t1;
--echo #
--echo # End of 10.4 test
--echo #

View file

@ -1,4 +1,3 @@
drop table if exists t1,t2,t3,t4,t5,t6;
set @sav_dpi= @@div_precision_increment;
set div_precision_increment= 5;
show variables like 'div_precision_increment';
@ -2567,5 +2566,15 @@ stddev_samp(i) stddev_pop(i) stddev(i) std(i)
drop view v1;
drop table t1;
#
# MDEV-29988: Major performance regression with 10.6.11
#
create table t1 (a varchar(10) charset utf8mb4, b int, c int);
insert t1 values (1,2,3),(4,5,6),(1,7,8);
select concat(a,":",group_concat(b)) from t1 group by a;
concat(a,":",group_concat(b))
1:2,7
4:5
drop table t1;
#
# End of 10.3 tests
#

View file

@ -2,10 +2,6 @@
# simple test of all group functions
#
--disable_warnings
drop table if exists t1,t2,t3,t4,t5,t6;
--enable_warnings
set @sav_dpi= @@div_precision_increment;
set div_precision_increment= 5;
show variables like 'div_precision_increment';
@ -1801,6 +1797,14 @@ select * from v1;
drop view v1;
drop table t1;
--echo #
--echo # MDEV-29988: Major performance regression with 10.6.11
--echo #
create table t1 (a varchar(10) charset utf8mb4, b int, c int);
insert t1 values (1,2,3),(4,5,6),(1,7,8);
select concat(a,":",group_concat(b)) from t1 group by a;
drop table t1;
--echo #
--echo # End of 10.3 tests
--echo #

View file

@ -1291,6 +1291,164 @@ SELECT JSON_OBJECTAGG(a) FROM t1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ') FROM t1' at line 1
DROP TABLE t1;
#
# MDEV-19160 JSON_DETAILED output unnecessarily verbose
#
create table t200 (a text);
insert into t200 values
('{
"steps": [
{
"join_optimization": {
"select_id": 1,
"steps": [
{
"rows_estimation": [
{
"table": "t1",
"range_analysis": {
"table_scan": {
"rows": 1000,
"cost": 2e308
},
"potential_range_indexes": [
{
"index": "a_b",
"usable": true,
"key_parts": ["a", "b"]
}
],
"best_covering_index_scan": {
"index": "a_b",
"cost": 52.195,
"chosen": true
},
"setup_range_conditions": [],
"group_index_range": {
"chosen": false,
"cause": "no group by or distinct"
},
"analyzing_range_alternatives": {
"range_scan_alternatives": [
{
"index": "a_b",
"ranges": ["2 <= a <= 2 AND 4 <= b <= 4", "123"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": true,
"rows": 1,
"cost": 1.1752,
"chosen": true
}
],
"analyzing_roworder_intersect": {
"cause": "too few roworder scans"
},
"analyzing_index_merge_union": [],
"test_one_line_array":["123"]
},
"chosen_range_access_summary": {
"range_access_plan": {
"type": "range_scan",
"index": "a_b",
"rows": 1,
"ranges": ["2 <= a <= 2 AND 4 <= b <= 4"]
},
"rows_for_plan": 1,
"cost_for_plan": 1.1752,
"chosen": true
}
}
},
{
"selectivity_for_indexes": [
{
"index_name": "a_b",
"selectivity_from_index": 0.001
}
],
"selectivity_for_columns": [],
"cond_selectivity": 0.001
}
]
}
]
}
},
{
"join_execution": {
"select_id": 1,
"steps": []
}
}
]
}');
select JSON_DETAILED(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200;
JSON_DETAILED(JSON_EXTRACT(a, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "a_b",
"ranges":
[
"2 <= a <= 2 AND 4 <= b <= 4",
"123"
],
"rowid_ordered": true,
"using_mrr": false,
"index_only": true,
"rows": 1,
"cost": 1.1752,
"chosen": true
}
],
"analyzing_roworder_intersect":
{
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[],
"test_one_line_array":
["123"]
}
]
select JSON_PRETTY(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200;
JSON_PRETTY(JSON_EXTRACT(a, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "a_b",
"ranges":
[
"2 <= a <= 2 AND 4 <= b <= 4",
"123"
],
"rowid_ordered": true,
"using_mrr": false,
"index_only": true,
"rows": 1,
"cost": 1.1752,
"chosen": true
}
],
"analyzing_roworder_intersect":
{
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[],
"test_one_line_array":
["123"]
}
]
select JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200;
JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives'))
[{"range_scan_alternatives": [{"index": "a_b", "ranges": ["2 <= a <= 2 AND 4 <= b <= 4", "123"], "rowid_ordered": true, "using_mrr": false, "index_only": true, "rows": 1, "cost": 1.1752, "chosen": true}], "analyzing_roworder_intersect": {"cause": "too few roworder scans"}, "analyzing_index_merge_union": [], "test_one_line_array": ["123"]}]
drop table t200;
#
# End of 10.4 tests
#
#

View file

@ -816,6 +816,107 @@ SELECT JSON_OBJECTAGG(a) FROM t1;
DROP TABLE t1;
--echo #
--echo # MDEV-19160 JSON_DETAILED output unnecessarily verbose
--echo #
create table t200 (a text);
insert into t200 values
('{
"steps": [
{
"join_optimization": {
"select_id": 1,
"steps": [
{
"rows_estimation": [
{
"table": "t1",
"range_analysis": {
"table_scan": {
"rows": 1000,
"cost": 2e308
},
"potential_range_indexes": [
{
"index": "a_b",
"usable": true,
"key_parts": ["a", "b"]
}
],
"best_covering_index_scan": {
"index": "a_b",
"cost": 52.195,
"chosen": true
},
"setup_range_conditions": [],
"group_index_range": {
"chosen": false,
"cause": "no group by or distinct"
},
"analyzing_range_alternatives": {
"range_scan_alternatives": [
{
"index": "a_b",
"ranges": ["2 <= a <= 2 AND 4 <= b <= 4", "123"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": true,
"rows": 1,
"cost": 1.1752,
"chosen": true
}
],
"analyzing_roworder_intersect": {
"cause": "too few roworder scans"
},
"analyzing_index_merge_union": [],
"test_one_line_array":["123"]
},
"chosen_range_access_summary": {
"range_access_plan": {
"type": "range_scan",
"index": "a_b",
"rows": 1,
"ranges": ["2 <= a <= 2 AND 4 <= b <= 4"]
},
"rows_for_plan": 1,
"cost_for_plan": 1.1752,
"chosen": true
}
}
},
{
"selectivity_for_indexes": [
{
"index_name": "a_b",
"selectivity_from_index": 0.001
}
],
"selectivity_for_columns": [],
"cond_selectivity": 0.001
}
]
}
]
}
},
{
"join_execution": {
"select_id": 1,
"steps": []
}
}
]
}');
select JSON_DETAILED(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200;
select JSON_PRETTY(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200;
select JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200;
drop table t200;
--echo #
--echo # End of 10.4 tests
--echo #

View file

@ -1042,4 +1042,17 @@ select * from t1;
a
deallocate prepare stmt;
drop table t1,t2,t3;
#
# MDEV-30342 Wrong "Truncated incorrect DECIMAL value" warning/error
#
create table t1(c1 varchar(1));
create table t2(c1 varchar(1));
insert into t1(c1) values('#');
select @@sql_mode like '%strict_all_tables%';
@@sql_mode like '%strict_all_tables%'
0
insert into t2(c1) select if(c1 = '#', c1 = 0, c1) as c1 from t1;
drop table t1, t2;
#
# End of 10.3 test
#

View file

@ -595,4 +595,21 @@ deallocate prepare stmt;
drop table t1,t2,t3;
--echo #
--echo # MDEV-30342 Wrong "Truncated incorrect DECIMAL value" warning/error
--echo #
create table t1(c1 varchar(1));
create table t2(c1 varchar(1));
insert into t1(c1) values('#');
select @@sql_mode like '%strict_all_tables%';
insert into t2(c1) select if(c1 = '#', c1 = 0, c1) as c1 from t1;
drop table t1, t2;
--echo #
--echo # End of 10.3 test
--echo #

View file

@ -1999,3 +1999,55 @@ Note 1003 select `test`.`t3`.`pk` AS `pk`,`test`.`t3`.`c1` AS `c1`,`test`.`t3`.`
DROP TABLE t1,t2,t3;
set join_cache_level= @save_join_cache_level;
set optimizer_switch=@save_optimizer_switch;
#
# MDEV-27624: Nested left joins with not_exists optimization
# for most inner left join
#
set @save_join_cache_level= @@join_cache_level;
CREATE TABLE t1 (a INT NOT NULL, b INT, c INT);
INSERT INTO t1 VALUES (1,1,1), (1,2,1), (1,3,1);
CREATE TABLE t2(a INT NOT NULL);
INSERT INTO t2 VALUES (1), (2);
CREATE TABLE t3(a INT not null, b INT);
INSERT INTO t3 VALUES (1, 1), (2, 1), (3, 1);
set join_cache_level = 0;
EXPLAIN SELECT *
FROM t1
LEFT JOIN
( t2 LEFT JOIN t3 ON t2.a = t3.b )
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
WHERE t1.c = 1 AND t3.a is NULL;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where
1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Not exists
SELECT *
FROM t1
LEFT JOIN
( t2 LEFT JOIN t3 ON t2.a = t3.b )
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
WHERE t1.c = 1 AND t3.a is NULL;
a b c a a b
1 3 1 NULL NULL NULL
set join_cache_level = 2;
EXPLAIN SELECT *
FROM t1
LEFT JOIN
( t2 LEFT JOIN t3 ON t2.a = t3.b )
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
WHERE t1.c = 1 AND t3.a is NULL;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Not exists; Using join buffer (incremental, BNL join)
SELECT *
FROM t1
LEFT JOIN
( t2 LEFT JOIN t3 ON t2.a = t3.b )
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
WHERE t1.c = 1 AND t3.a is NULL;
a b c a a b
1 3 1 NULL NULL NULL
DROP TABLE t1, t2, t3;
set join_cache_level= @save_join_cache_level;
# end of 10.3 tests

View file

@ -1419,3 +1419,41 @@ DROP TABLE t1,t2,t3;
set join_cache_level= @save_join_cache_level;
set optimizer_switch=@save_optimizer_switch;
--echo #
--echo # MDEV-27624: Nested left joins with not_exists optimization
--echo # for most inner left join
--echo #
set @save_join_cache_level= @@join_cache_level;
CREATE TABLE t1 (a INT NOT NULL, b INT, c INT);
INSERT INTO t1 VALUES (1,1,1), (1,2,1), (1,3,1);
CREATE TABLE t2(a INT NOT NULL);
INSERT INTO t2 VALUES (1), (2);
CREATE TABLE t3(a INT not null, b INT);
INSERT INTO t3 VALUES (1, 1), (2, 1), (3, 1);
let $q=
SELECT *
FROM t1
LEFT JOIN
( t2 LEFT JOIN t3 ON t2.a = t3.b )
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
WHERE t1.c = 1 AND t3.a is NULL;
set join_cache_level = 0;
eval EXPLAIN $q;
eval $q;
set join_cache_level = 2;
eval EXPLAIN $q;
eval $q;
DROP TABLE t1, t2, t3;
set join_cache_level= @save_join_cache_level;
--echo # end of 10.3 tests

View file

@ -2008,6 +2008,58 @@ Note 1003 select `test`.`t3`.`pk` AS `pk`,`test`.`t3`.`c1` AS `c1`,`test`.`t3`.`
DROP TABLE t1,t2,t3;
set join_cache_level= @save_join_cache_level;
set optimizer_switch=@save_optimizer_switch;
#
# MDEV-27624: Nested left joins with not_exists optimization
# for most inner left join
#
set @save_join_cache_level= @@join_cache_level;
CREATE TABLE t1 (a INT NOT NULL, b INT, c INT);
INSERT INTO t1 VALUES (1,1,1), (1,2,1), (1,3,1);
CREATE TABLE t2(a INT NOT NULL);
INSERT INTO t2 VALUES (1), (2);
CREATE TABLE t3(a INT not null, b INT);
INSERT INTO t3 VALUES (1, 1), (2, 1), (3, 1);
set join_cache_level = 0;
EXPLAIN SELECT *
FROM t1
LEFT JOIN
( t2 LEFT JOIN t3 ON t2.a = t3.b )
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
WHERE t1.c = 1 AND t3.a is NULL;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where
1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Not exists
SELECT *
FROM t1
LEFT JOIN
( t2 LEFT JOIN t3 ON t2.a = t3.b )
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
WHERE t1.c = 1 AND t3.a is NULL;
a b c a a b
1 3 1 NULL NULL NULL
set join_cache_level = 2;
EXPLAIN SELECT *
FROM t1
LEFT JOIN
( t2 LEFT JOIN t3 ON t2.a = t3.b )
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
WHERE t1.c = 1 AND t3.a is NULL;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Not exists; Using join buffer (incremental, BNL join)
SELECT *
FROM t1
LEFT JOIN
( t2 LEFT JOIN t3 ON t2.a = t3.b )
ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL)
WHERE t1.c = 1 AND t3.a is NULL;
a b c a a b
1 3 1 NULL NULL NULL
DROP TABLE t1, t2, t3;
set join_cache_level= @save_join_cache_level;
# end of 10.3 tests
CREATE TABLE t5 (a int, b int, c int, PRIMARY KEY(a), KEY b_i (b));
CREATE TABLE t6 (a int, b int, c int, PRIMARY KEY(a), KEY b_i (b));
CREATE TABLE t7 (a int, b int, c int, PRIMARY KEY(a), KEY b_i (b));

View file

@ -1,4 +1,5 @@
drop table if exists t0,t1,t2,t3,t4,t5;
drop view if exists v0,v1,v2,v3;
SET @org_optimizer_switch=@@optimizer_switch;
SET optimizer_switch=ifnull(@optimizer_switch_for_join_outer_test,'outer_join_with_cache=off');
set join_cache_level=1;
@ -2825,5 +2826,89 @@ WHERE t3.pk IN (2);
1
drop view v4;
drop table t1,t2,t3,t4;
#
# MDEV-28602 Wrong result with outer join, merged derived table and view
#
create table t1 (
Election int(10) unsigned NOT NULL
);
insert into t1 (Election) values (1);
create table t2 (
VoteID int(10),
ElectionID int(10),
UserID int(10)
);
insert into t2 (ElectionID, UserID) values (2, 30), (3, 30);
create view v1 as select * from t1
left join ( select 'Y' AS Voted, ElectionID from t2 ) AS T
on T.ElectionID = t1.Election
limit 9;
select * from v1;
Election Voted ElectionID
1 NULL NULL
drop table t1, t2;
drop view v1;
#
# and another contrived example showing a bit of heirarchy
#
create table t10 (a int);
create table t20 (b int);
insert into t10 values (1),(2);
insert into t20 values (1),(3);
create view v10 as select *, 'U' as u from t10 left join (select 'Y' as y, t20.b from t20) dt1 on t10.a= dt1.b limit 3;
create table t30 (c int);
insert into t30 values (1),(3);
create view v20 as select * from t30 left join (select 'X' as x, v10.u, v10.y, v10.b from v10) dt2 on t30.c=dt2.b limit 6;
select * from v20 limit 9;
c x u y b
1 X U Y 1
3 NULL NULL NULL NULL
drop view v10, v20;
drop table t10, t20, t30;
#
# More complex testcase
#
create table t2 (b int);
insert into t2 values (3),(7),(1);
create table t3 (c int);
insert into t3 values (3),(1);
create table t1 (a int);
insert into t1 values (1),(2),(7),(1);
select * from
(
select * from
(select 'Z' as z, t1.a from t1) dt1
left join
(select 'Y' as y, t2.b from t2) dt2
left join
(select 'X' as x, t3.c from t3) dt3
on dt2.b=dt3.c
on dt1.a=dt2.b
limit 9
) dt;
z a y b x c
Z 1 Y 1 X 1
Z 2 NULL NULL NULL NULL
Z 7 Y 7 NULL NULL
Z 1 Y 1 X 1
create view v3(x,c) as select * from (select 'X' as x, t3.c from t3) dt3;
create view v2(y,b) as select * from (select 'Y' as y, t2.b from t2) dt2;
create view v0(y,b,x,c) as select * from v2 left join v3 on v2.b=v3.c;
create view v1 as select 'Z' as z, t1.a, v0.* from t1 left join v0 on t1.a=v0.b limit 9;
select * from v1;
z a y b x c
Z 1 Y 1 X 1
Z 2 NULL NULL NULL NULL
Z 7 Y 7 NULL NULL
Z 1 Y 1 X 1
set statement join_cache_level=0 for
select * from v1;
z a y b x c
Z 1 Y 1 X 1
Z 2 NULL NULL NULL NULL
Z 7 Y 7 NULL NULL
Z 1 Y 1 X 1
drop view v0, v1, v2, v3;
drop table t1, t2, t3;
# end of 10.3 tests
SET optimizer_switch=@org_optimizer_switch;

View file

@ -6,6 +6,7 @@
--disable_warnings
drop table if exists t0,t1,t2,t3,t4,t5;
drop view if exists v0,v1,v2,v3;
--enable_warnings
SET @org_optimizer_switch=@@optimizer_switch;
@ -2341,6 +2342,90 @@ WHERE t3.pk IN (2);
drop view v4;
drop table t1,t2,t3,t4;
--echo #
--echo # MDEV-28602 Wrong result with outer join, merged derived table and view
--echo #
create table t1 (
Election int(10) unsigned NOT NULL
);
insert into t1 (Election) values (1);
create table t2 (
VoteID int(10),
ElectionID int(10),
UserID int(10)
);
insert into t2 (ElectionID, UserID) values (2, 30), (3, 30);
create view v1 as select * from t1
left join ( select 'Y' AS Voted, ElectionID from t2 ) AS T
on T.ElectionID = t1.Election
limit 9;
# limit X causes merge algorithm select as opposed to temp table
select * from v1;
drop table t1, t2;
drop view v1;
--echo #
--echo # and another contrived example showing a bit of heirarchy
--echo #
create table t10 (a int);
create table t20 (b int);
insert into t10 values (1),(2);
insert into t20 values (1),(3);
create view v10 as select *, 'U' as u from t10 left join (select 'Y' as y, t20.b from t20) dt1 on t10.a= dt1.b limit 3;
create table t30 (c int);
insert into t30 values (1),(3);
create view v20 as select * from t30 left join (select 'X' as x, v10.u, v10.y, v10.b from v10) dt2 on t30.c=dt2.b limit 6;
select * from v20 limit 9;
drop view v10, v20;
drop table t10, t20, t30;
--echo #
--echo # More complex testcase
--echo #
create table t2 (b int);
insert into t2 values (3),(7),(1);
create table t3 (c int);
insert into t3 values (3),(1);
create table t1 (a int);
insert into t1 values (1),(2),(7),(1);
select * from
(
select * from
(select 'Z' as z, t1.a from t1) dt1
left join
(select 'Y' as y, t2.b from t2) dt2
left join
(select 'X' as x, t3.c from t3) dt3
on dt2.b=dt3.c
on dt1.a=dt2.b
limit 9
) dt;
## Same as dt3 above
create view v3(x,c) as select * from (select 'X' as x, t3.c from t3) dt3;
## Same as dt2 above
create view v2(y,b) as select * from (select 'Y' as y, t2.b from t2) dt2;
## Same as (...) in the "... dt1 left join (...)" above
create view v0(y,b,x,c) as select * from v2 left join v3 on v2.b=v3.c;
# Same as above select statement
create view v1 as select 'Z' as z, t1.a, v0.* from t1 left join v0 on t1.a=v0.b limit 9;
select * from v1;
set statement join_cache_level=0 for
select * from v1;
drop view v0, v1, v2, v3;
drop table t1, t2, t3;
--echo # end of 10.3 tests
SET optimizer_switch=@org_optimizer_switch;

View file

@ -6,6 +6,7 @@ set @@join_cache_level=6;
set @optimizer_switch_for_join_outer_test=@@optimizer_switch;
set @join_cache_level_for_join_outer_test=@@join_cache_level;
drop table if exists t0,t1,t2,t3,t4,t5;
drop view if exists v0,v1,v2,v3;
SET @org_optimizer_switch=@@optimizer_switch;
SET optimizer_switch=ifnull(@optimizer_switch_for_join_outer_test,'outer_join_with_cache=off');
set join_cache_level=@join_cache_level_for_join_outer_test;
@ -2832,5 +2833,89 @@ WHERE t3.pk IN (2);
1
drop view v4;
drop table t1,t2,t3,t4;
#
# MDEV-28602 Wrong result with outer join, merged derived table and view
#
create table t1 (
Election int(10) unsigned NOT NULL
);
insert into t1 (Election) values (1);
create table t2 (
VoteID int(10),
ElectionID int(10),
UserID int(10)
);
insert into t2 (ElectionID, UserID) values (2, 30), (3, 30);
create view v1 as select * from t1
left join ( select 'Y' AS Voted, ElectionID from t2 ) AS T
on T.ElectionID = t1.Election
limit 9;
select * from v1;
Election Voted ElectionID
1 NULL NULL
drop table t1, t2;
drop view v1;
#
# and another contrived example showing a bit of heirarchy
#
create table t10 (a int);
create table t20 (b int);
insert into t10 values (1),(2);
insert into t20 values (1),(3);
create view v10 as select *, 'U' as u from t10 left join (select 'Y' as y, t20.b from t20) dt1 on t10.a= dt1.b limit 3;
create table t30 (c int);
insert into t30 values (1),(3);
create view v20 as select * from t30 left join (select 'X' as x, v10.u, v10.y, v10.b from v10) dt2 on t30.c=dt2.b limit 6;
select * from v20 limit 9;
c x u y b
1 X U Y 1
3 NULL NULL NULL NULL
drop view v10, v20;
drop table t10, t20, t30;
#
# More complex testcase
#
create table t2 (b int);
insert into t2 values (3),(7),(1);
create table t3 (c int);
insert into t3 values (3),(1);
create table t1 (a int);
insert into t1 values (1),(2),(7),(1);
select * from
(
select * from
(select 'Z' as z, t1.a from t1) dt1
left join
(select 'Y' as y, t2.b from t2) dt2
left join
(select 'X' as x, t3.c from t3) dt3
on dt2.b=dt3.c
on dt1.a=dt2.b
limit 9
) dt;
z a y b x c
Z 1 Y 1 X 1
Z 1 Y 1 X 1
Z 7 Y 7 NULL NULL
Z 2 NULL NULL NULL NULL
create view v3(x,c) as select * from (select 'X' as x, t3.c from t3) dt3;
create view v2(y,b) as select * from (select 'Y' as y, t2.b from t2) dt2;
create view v0(y,b,x,c) as select * from v2 left join v3 on v2.b=v3.c;
create view v1 as select 'Z' as z, t1.a, v0.* from t1 left join v0 on t1.a=v0.b limit 9;
select * from v1;
z a y b x c
Z 1 Y 1 X 1
Z 1 Y 1 X 1
Z 7 Y 7 NULL NULL
Z 2 NULL NULL NULL NULL
set statement join_cache_level=0 for
select * from v1;
z a y b x c
Z 1 Y 1 X 1
Z 2 NULL NULL NULL NULL
Z 7 Y 7 NULL NULL
Z 1 Y 1 X 1
drop view v0, v1, v2, v3;
drop table t1, t2, t3;
# end of 10.3 tests
SET optimizer_switch=@org_optimizer_switch;

View file

@ -1,4 +1,3 @@
drop table if exists t1,t2;
set @sav_dpi= @@div_precision_increment;
set div_precision_increment= 5;
show variables like 'div_precision_increment';
@ -696,7 +695,7 @@ CREATE VIEW v1 AS
SELECT a, LENGTH(a), COUNT(*) FROM t1 GROUP BY a WITH ROLLUP;
DESC v1;
Field Type Null Key Default Extra
a int(11) YES 0
a int(11) YES NULL
LENGTH(a) int(10) YES NULL
COUNT(*) bigint(21) NO 0
SELECT * FROM v1;
@ -859,7 +858,7 @@ INSERT INTO t1 VALUES (1),(2);
CREATE OR REPLACE VIEW v1 AS SELECT a, LENGTH(a), COUNT(*) FROM t1 GROUP BY a WITH ROLLUP;
DESCRIBE v1;
Field Type Null Key Default Extra
a int(11) YES 0
a int(11) YES NULL
LENGTH(a) int(10) YES NULL
COUNT(*) bigint(21) NO 0
DROP VIEW v1;
@ -869,7 +868,7 @@ INSERT INTO t1 VALUES (1),(2);
CREATE OR REPLACE VIEW v1 AS SELECT a, LENGTH(a), COUNT(*) FROM t1 GROUP BY a WITH ROLLUP;
DESCRIBE v1;
Field Type Null Key Default Extra
a bigint(20) YES 0
a bigint(20) YES NULL
LENGTH(a) int(10) YES NULL
COUNT(*) bigint(21) NO 0
DROP VIEW v1;
@ -894,8 +893,8 @@ DROP VIEW v1;
CREATE VIEW v1 AS SELECT a, b FROM t1 GROUP BY a,b WITH ROLLUP;
DESC v1;
Field Type Null Key Default Extra
a int(11) YES 0
b int(20) YES 0
a int(11) YES NULL
b int(20) YES NULL
DROP VIEW v1;
DROP TABLE t1;
#

View file

@ -1,7 +1,3 @@
--disable_warnings
drop table if exists t1,t2;
--enable_warnings
set @sav_dpi= @@div_precision_increment;
set div_precision_increment= 5;
show variables like 'div_precision_increment';

View file

@ -7421,17 +7421,13 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "a_b",
"ranges":
[
"(2,4) <= (a,b) <= (2,4)"
],
["(2,4) <= (a,b) <= (2,4)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": true,
@ -7445,8 +7441,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[
]
[]
}
]
explain select * from t1 where a >= 900 and b between 10 and 20;
@ -7455,17 +7450,13 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "a_b",
"ranges":
[
"(900,10) <= (a,b)"
],
["(900,10) <= (a,b)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": true,
@ -7479,8 +7470,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[
]
[]
}
]
drop table t0,t1;
@ -7492,17 +7482,13 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "start_date",
"ranges":
[
"(2019-02-10,NULL) < (start_date,end_date)"
],
["(2019-02-10,NULL) < (start_date,end_date)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": false,
@ -7516,8 +7502,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[
]
[]
}
]
drop table t1,one_k;
@ -7537,17 +7522,13 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "a_b_c",
"ranges":
[
"(1) <= (a,b) < (4,50)"
],
["(1) <= (a,b) < (4,50)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": false,
@ -7561,8 +7542,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[
]
[]
}
]
drop table ten,t1;
@ -7576,17 +7556,13 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "i_b",
"ranges":
[
"(\xD9[\x943j\x99F\xA3\x9C\xF5\xB5\x8C\xFEw-\x8C) <= (b) <= (\xD9[\x943j\x99F\xA3\x9C\xF5\xB5\x8C\xFEw-\x8C)"
],
["(\xD9[\x943j\x99F\xA3\x9C\xF5\xB5\x8C\xFEw-\x8C) <= (b) <= (\xD9[\x943j\x99F\xA3\x9C\xF5\xB5\x8C\xFEw-\x8C)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@ -7600,8 +7576,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[
]
[]
}
]
EXPLAIN SELECT * FROM t1 WHERE b IS NULL;
@ -7610,17 +7585,13 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "i_b",
"ranges":
[
"(NULL) <= (b) <= (NULL)"
],
["(NULL) <= (b) <= (NULL)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@ -7634,8 +7605,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[
]
[]
}
]
drop table t1;
@ -7652,17 +7622,13 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "i_b",
"ranges":
[
"(ab\x0A) <= (b) <= (ab\x0A)"
],
["(ab\x0A) <= (b) <= (ab\x0A)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@ -7676,8 +7642,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[
]
[]
}
]
ALTER TABLE t1 modify column b BINARY(10) AFTER i;
@ -7687,17 +7652,13 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "i_b",
"ranges":
[
"(ab\x0A\x00\x00\x00\x00\x00\x00\x00) <= (b) <= (ab\x0A\x00\x00\x00\x00\x00\x00\x00)"
],
["(ab\x0A\x00\x00\x00\x00\x00\x00\x00) <= (b) <= (ab\x0A\x00\x00\x00\x00\x00\x00\x00)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@ -7711,8 +7672,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[
]
[]
}
]
ALTER TABLE t1 modify column b VARBINARY(10) AFTER i;
@ -7722,17 +7682,13 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "i_b",
"ranges":
[
"(ab\x0A) <= (b) <= (ab\x0A)"
],
["(ab\x0A) <= (b) <= (ab\x0A)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@ -7746,8 +7702,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[
]
[]
}
]
drop table t1;
@ -7760,17 +7715,13 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "i_b",
"ranges":
[
"(ab\n) <= (b) <= (ab\n)"
],
["(ab\n) <= (b) <= (ab\n)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@ -7784,8 +7735,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[
]
[]
}
]
drop table t1;
@ -7801,17 +7751,13 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "i_b",
"ranges":
[
"(ab\x0A) <= (b) <= (ab\x0A)"
],
["(ab\x0A) <= (b) <= (ab\x0A)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": false,
@ -7825,8 +7771,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[
]
[]
}
]
drop table t1;
@ -7840,17 +7785,13 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "i_b",
"ranges":
[
"(ab\n) <= (b) <= (ab\n)"
],
["(ab\n) <= (b) <= (ab\n)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@ -7864,8 +7805,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[
]
[]
}
]
drop table t1;
@ -7896,17 +7836,13 @@ EXPLAIN
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "start_date",
"ranges":
[
"(2019-02-10,NULL) < (start_date,end_date)"
],
["(2019-02-10,NULL) < (start_date,end_date)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": false,
@ -7920,8 +7856,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[
]
[]
}
]
drop table t1, t0, one_k;
@ -7960,19 +7895,15 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
[
[
{
"plan_prefix":
[
],
[],
"table": "A",
"best_access_path":
{
"considered_access_paths":
[
{
"access_type": "scan",
"resulting_rows": 5.9375,
@ -7992,18 +7923,14 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
"cost_for_plan": 4.017089844,
"rest_of_plan":
[
{
"plan_prefix":
[
"A"
],
["A"],
"table": "B",
"best_access_path":
{
"considered_access_paths":
[
{
"access_type": "scan",
"resulting_rows": 804.6875,
@ -8025,17 +7952,14 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
}
]
},
{
"plan_prefix":
[
],
[],
"table": "B",
"best_access_path":
{
"considered_access_paths":
[
{
"access_type": "scan",
"resulting_rows": 804.6875,
@ -8066,19 +7990,15 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
[
[
{
"plan_prefix":
[
],
[],
"table": "A",
"best_access_path":
{
"considered_access_paths":
[
{
"access_type": "scan",
"resulting_rows": 10,
@ -8098,18 +8018,14 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
"cost_for_plan": 4.017089844,
"rest_of_plan":
[
{
"plan_prefix":
[
"A"
],
["A"],
"table": "B",
"best_access_path":
{
"considered_access_paths":
[
{
"access_type": "ref",
"index": "b",
@ -8120,7 +8036,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
"cost": 20.00585794,
"chosen": true
},
{
"access_type": "scan",
"resulting_rows": 804.6875,
@ -8143,17 +8058,14 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
}
]
},
{
"plan_prefix":
[
],
[],
"table": "B",
"best_access_path":
{
"considered_access_paths":
[
{
"access_type": "scan",
"resulting_rows": 804.6875,
@ -8189,17 +8101,13 @@ a
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "a",
"ranges":
[
"() <= (a) <= ()"
],
["() <= (a) <= ()"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": false,
@ -8213,8 +8121,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[
]
[]
}
]
DROP TABLE t1;
@ -8236,7 +8143,6 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.semijoin_table_pullout')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.semijoin_table_pullout'))
[
{
"pulled_out_tables":
[
@ -8257,15 +8163,11 @@ kp1 kp2
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.range_scan_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.range_scan_alternatives'))
[
[
{
"index": "kp1",
"ranges":
[
"(2,4) <= (kp1,kp2) <= (2)"
],
["(2,4) <= (kp1,kp2) <= (2)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": true,
@ -8291,19 +8193,15 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
[
[
{
"plan_prefix":
[
],
[],
"table": "t1",
"best_access_path":
{
"considered_access_paths":
[
{
"access_type": "scan",
"resulting_rows": 10,
@ -8323,18 +8221,14 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
"cost_for_plan": 4.021972656,
"rest_of_plan":
[
{
"plan_prefix":
[
"t1"
],
["t1"],
"table": "t2",
"best_access_path":
{
"considered_access_paths":
[
{
"access_type": "ref",
"index": "a",
@ -8345,7 +8239,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
"cost": 20.00585794,
"chosen": true
},
{
"access_type": "scan",
"resulting_rows": 100,
@ -8368,17 +8261,14 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
}
]
},
{
"plan_prefix":
[
],
[],
"table": "t2",
"best_access_path":
{
"considered_access_paths":
[
{
"access_type": "scan",
"resulting_rows": 100,
@ -8399,18 +8289,14 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
"cost_for_plan": 22.21972656,
"rest_of_plan":
[
{
"plan_prefix":
[
"t2"
],
["t2"],
"table": "t1",
"best_access_path":
{
"considered_access_paths":
[
{
"access_type": "ref",
"index": "a",
@ -8421,7 +8307,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
"cost": 200.0585794,
"chosen": true
},
{
"access_type": "scan",
"resulting_rows": 10,
@ -8465,24 +8350,17 @@ Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1`
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_columns')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_columns'))
[
[
{
"column_name": "a",
"ranges":
[
"1 <= a <= 5"
],
["1 <= a <= 5"],
"selectivity_from_histogram": 0.046875
},
{
"column_name": "b",
"ranges":
[
"NULL < b <= 5"
],
["NULL < b <= 5"],
"selectivity_from_histogram": 0.046875
}
]
@ -8495,9 +8373,7 @@ Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1`
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_columns')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_columns'))
[
[
{
"column_name": "a",
"ranges":
@ -8517,15 +8393,11 @@ Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1`
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_columns')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_columns'))
[
[
{
"column_name": "b",
"ranges":
[
"10 <= b < 25"
],
["10 <= b < 25"],
"selectivity_from_histogram": 0.15625
}
]
@ -8543,15 +8415,11 @@ UPDATE t1 SET b=10 WHERE a=1;
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.range_scan_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.range_scan_alternatives'))
[
[
{
"index": "PRIMARY",
"ranges":
[
"(1) <= (a) <= (1)"
],
["(1) <= (a) <= (1)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@ -8737,7 +8605,6 @@ select JSON_DETAILED(JSON_EXTRACT(trace, '$**.mark_join_nest_as_const'))
from information_schema.optimizer_trace;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.mark_join_nest_as_const'))
[
{
"members":
[
@ -8763,19 +8630,16 @@ select json_detailed(json_extract(trace, '$**.in_to_subquery_conversion'))
from information_schema.optimizer_trace;
json_detailed(json_extract(trace, '$**.in_to_subquery_conversion'))
[
{
"item": "t0.a in (1,2,3,4,5,6)",
"conversion":
[
{
"join_preparation":
{
"select_id": 2,
"steps":
[
{
"derived":
{
@ -8784,7 +8648,6 @@ json_detailed(json_extract(trace, '$**.in_to_subquery_conversion'))
"algorithm": "materialized"
}
},
{
"transformation":
{
@ -8795,7 +8658,6 @@ json_detailed(json_extract(trace, '$**.in_to_subquery_conversion'))
"possible": true
}
},
{
"transformation":
{
@ -8805,7 +8667,6 @@ json_detailed(json_extract(trace, '$**.in_to_subquery_conversion'))
"chosen": true
}
},
{
"expanded_query": "/* select#2 */ select tvc_0._col_1 from (values (1),(2),(3),(4),(5),(6)) tvc_0"
}
@ -8822,7 +8683,6 @@ select json_detailed(json_extract(trace, '$**.in_to_subquery_conversion'))
from information_schema.optimizer_trace;
json_detailed(json_extract(trace, '$**.in_to_subquery_conversion'))
[
{
"item": "t0.a in (1,2,3,4,5,t0.a + 1)",
"done": false,
@ -8836,7 +8696,6 @@ select json_detailed(json_extract(trace, '$**.in_to_subquery_conversion'))
from information_schema.optimizer_trace;
json_detailed(json_extract(trace, '$**.in_to_subquery_conversion'))
[
{
"item": "t0.a in ('1','2','3','4','5','6')",
"done": false,

View file

@ -307,17 +307,13 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "key1",
"ranges":
[
"(100) <= (key1) <= (100)"
],
["(100) <= (key1) <= (100)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@ -325,13 +321,10 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cost": 2700.058937,
"chosen": true
},
{
"index": "key2",
"ranges":
[
"(100) <= (key2) <= (100)"
],
["(100) <= (key2) <= (100)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@ -340,13 +333,10 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"chosen": false,
"cause": "cost"
},
{
"index": "key3",
"ranges":
[
"(100) <= (key3) <= (100)"
],
["(100) <= (key3) <= (100)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@ -360,7 +350,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
{
"intersecting_indexes":
[
{
"index": "key1",
"index_scan_cost": 10.31393703,
@ -372,7 +361,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"intersect_covering_with_this_index": false,
"chosen": true
},
{
"index": "key2",
"index_scan_cost": 10.31393703,
@ -384,7 +372,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"intersect_covering_with_this_index": false,
"chosen": true
},
{
"index": "key3",
"index_scan_cost": 10.31393703,
@ -408,14 +395,12 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"chosen": true
},
"analyzing_index_merge_union":
[
]
[]
}
]
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
[
{
"range_access_plan":
{
@ -426,35 +411,26 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
"clustered_pk_scan": false,
"intersect_of":
[
{
"type": "range_scan",
"index": "key1",
"rows": 2243,
"ranges":
[
"(100) <= (key1) <= (100)"
]
["(100) <= (key1) <= (100)"]
},
{
"type": "range_scan",
"index": "key2",
"rows": 2243,
"ranges":
[
"(100) <= (key2) <= (100)"
]
["(100) <= (key2) <= (100)"]
},
{
"type": "range_scan",
"index": "key3",
"rows": 2243,
"ranges":
[
"(100) <= (key3) <= (100)"
]
["(100) <= (key3) <= (100)"]
}
]
},
@ -470,32 +446,25 @@ id select_type table type possible_keys key key_len ref rows Extra
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
],
[],
"analyzing_roworder_intersect":
{
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[
{
"indexes_to_merge":
[
{
"range_scan_alternatives":
[
{
"index": "key1",
"ranges":
[
"(100) <= (key1) <= (100)"
],
["(100) <= (key1) <= (100)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": true,
@ -503,13 +472,10 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cost": 457.058937,
"chosen": true
},
{
"index": "key2",
"ranges":
[
"(100) <= (key2) <= (100)"
],
["(100) <= (key2) <= (100)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": true,
@ -522,17 +488,13 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"index_to_merge": "key1",
"cumulated_cost": 457.058937
},
{
"range_scan_alternatives":
[
{
"index": "key3",
"ranges":
[
"(100) <= (key3) <= (100)"
],
["(100) <= (key3) <= (100)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": true,
@ -540,13 +502,10 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cost": 457.058937,
"chosen": true
},
{
"index": "key4",
"ranges":
[
"(100) <= (key4) <= (100)"
],
["(100) <= (key4) <= (100)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": true,
@ -565,20 +524,16 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "always cheaper than non roworder retrieval",
"analyzing_roworder_scans":
[
{
"type": "range_scan",
"index": "key1",
"rows": 2243,
"ranges":
[
"(100) <= (key1) <= (100)"
],
["(100) <= (key1) <= (100)"],
"analyzing_roworder_intersect":
{
"intersecting_indexes":
[
{
"index": "key1",
"index_scan_cost": 10.31393703,
@ -590,7 +545,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"intersect_covering_with_this_index": false,
"chosen": true
},
{
"index": "key2",
"index_scan_cost": 10.31393703,
@ -614,20 +568,16 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"chosen": true
}
},
{
"type": "range_scan",
"index": "key3",
"rows": 2243,
"ranges":
[
"(100) <= (key3) <= (100)"
],
["(100) <= (key3) <= (100)"],
"analyzing_roworder_intersect":
{
"intersecting_indexes":
[
{
"index": "key3",
"index_scan_cost": 10.31393703,
@ -639,7 +589,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"intersect_covering_with_this_index": false,
"chosen": true
},
{
"index": "key4",
"index_scan_cost": 10.31393703,
@ -674,14 +623,12 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
[
{
"range_access_plan":
{
"type": "index_roworder_union",
"union_of":
[
{
"type": "index_roworder_intersect",
"rows": 77,
@ -690,29 +637,22 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
"clustered_pk_scan": false,
"intersect_of":
[
{
"type": "range_scan",
"index": "key1",
"rows": 2243,
"ranges":
[
"(100) <= (key1) <= (100)"
]
["(100) <= (key1) <= (100)"]
},
{
"type": "range_scan",
"index": "key2",
"rows": 2243,
"ranges":
[
"(100) <= (key2) <= (100)"
]
["(100) <= (key2) <= (100)"]
}
]
},
{
"type": "index_roworder_intersect",
"rows": 77,
@ -721,25 +661,19 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
"clustered_pk_scan": false,
"intersect_of":
[
{
"type": "range_scan",
"index": "key3",
"rows": 2243,
"ranges":
[
"(100) <= (key3) <= (100)"
]
["(100) <= (key3) <= (100)"]
},
{
"type": "range_scan",
"index": "key4",
"rows": 2243,
"ranges":
[
"(100) <= (key4) <= (100)"
]
["(100) <= (key4) <= (100)"]
}
]
}

View file

@ -23,17 +23,13 @@ EXPLAIN
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"range_scan_alternatives":
[
{
"index": "col1",
"ranges":
[
"(a) <= (col1)"
],
["(a) <= (col1)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": false,
@ -47,8 +43,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "too few roworder scans"
},
"analyzing_index_merge_union":
[
]
[]
}
]
drop table t1;

View file

@ -64,3 +64,19 @@ SQRT(?) is not null
#
# End of 10.3 tests
#
#
# MDEV-17869 AddressSanitizer: use-after-poison in Item_change_list::rollback_item_tree_changes
#
create table t1 (pk int, v1 varchar(1));
insert t1 values (1,'v'),(2,'v'),(3,'c');
create table t2 (pk int, v1 varchar(1));
insert t2 values (1,'x');
create table t3 (pk int, i1 int, v1 varchar(1));
insert t3 values (10,8,9);
execute immediate 'select straight_join 1 from (t1 join t2 on (t1.v1 = t2.v1))
where (3, 6) in (select tc.pk, t3.i1 from (t3 join t1 as tc on (tc.v1 = t3.v1)) having tc.pk > 1 );';
1
drop table t1, t2, t3;
#
# End of 10.4 tests
#

View file

@ -52,3 +52,20 @@ execute p1 using 17864960750176564435;
--echo #
--echo # End of 10.3 tests
--echo #
--echo #
--echo # MDEV-17869 AddressSanitizer: use-after-poison in Item_change_list::rollback_item_tree_changes
--echo #
create table t1 (pk int, v1 varchar(1));
insert t1 values (1,'v'),(2,'v'),(3,'c');
create table t2 (pk int, v1 varchar(1));
insert t2 values (1,'x');
create table t3 (pk int, i1 int, v1 varchar(1));
insert t3 values (10,8,9);
execute immediate 'select straight_join 1 from (t1 join t2 on (t1.v1 = t2.v1))
where (3, 6) in (select tc.pk, t3.i1 from (t3 join t1 as tc on (tc.v1 = t3.v1)) having tc.pk > 1 );';
drop table t1, t2, t3;
--echo #
--echo # End of 10.4 tests
--echo #

View file

@ -5718,6 +5718,24 @@ EXECUTE stmt USING 'd';
EXECUTE stmt USING 'd';
300
DROP TABLE t1, t2, t3;
set @@max_session_mem_used=default;
create table t (a varchar(10)) character set utf8;
insert into t values ('');
prepare stmt from "select 1 from t where a = ?";
set @@max_session_mem_used=(select memory_used*2 from information_schema.processlist where id=connection_id());
deallocate prepare stmt;
drop table t;
set @@max_session_mem_used=default;
create table t (a varchar(10)) character set utf8;
insert into t values ('');
prepare stmt from "select 1 from t where a = 'a'";
set @@max_session_mem_used=(select memory_used*2 from information_schema.processlist where id=connection_id());
deallocate prepare stmt;
drop table t;
set @@max_session_mem_used=default;
#
# End of 10.3 tests
#
#
# MDEV-19263: Server crashes in mysql_handle_single_derived
# upon 2nd execution of PS

View file

@ -5109,6 +5109,45 @@ EXECUTE stmt USING 'd';
EXECUTE stmt USING 'd';
DROP TABLE t1, t2, t3;
set @@max_session_mem_used=default;
create table t (a varchar(10)) character set utf8;
insert into t values ('');
prepare stmt from "select 1 from t where a = ?";
set @@max_session_mem_used=(select memory_used*2 from information_schema.processlist where id=connection_id());
let $run= 1000;
disable_result_log;
disable_query_log;
while ($run) {
execute stmt using repeat('x',10000);
dec $run;
}
enable_result_log;
enable_query_log;
deallocate prepare stmt;
drop table t;
set @@max_session_mem_used=default;
create table t (a varchar(10)) character set utf8;
insert into t values ('');
prepare stmt from "select 1 from t where a = 'a'";
set @@max_session_mem_used=(select memory_used*2 from information_schema.processlist where id=connection_id());
let $run= 1000;
disable_result_log;
disable_query_log;
while ($run) {
execute stmt;
dec $run;
}
enable_result_log;
enable_query_log;
deallocate prepare stmt;
drop table t;
set @@max_session_mem_used=default;
--echo #
--echo # End of 10.3 tests
--echo #
--echo #
--echo # MDEV-19263: Server crashes in mysql_handle_single_derived
--echo # upon 2nd execution of PS

View file

@ -20,13 +20,11 @@ select json_detailed(JSON_EXTRACT(trace, '$**.ranges'))
from information_schema.optimizer_trace;
json_detailed(JSON_EXTRACT(trace, '$**.ranges'))
[
[
"(1) <= (key1) <= (1)",
"(2) <= (key1) <= (2)",
"(3) <= (key1) <= (3)"
],
[
"(1) <= (key1) <= (1)",
"(2) <= (key1) <= (2)",
@ -71,9 +69,7 @@ from information_schema.optimizer_trace);
select left(@json, 500);
left(@json, 500)
[
[
{
"index": "key1",
"ranges":
@ -85,7 +81,7 @@ left(@json, 500)
"(1,1,5) <= (kp1,kp2,kp3) <= (1,1,5)",
"(1,1,6) <= (kp1,kp2,kp3) <= (1,1,6)",
"(1,1,7) <= (kp1,kp2,kp3) <= (1,1,7)",
"
"(1,1,8) <= (kp
## Repeat the above with low max_weight:
set @tmp9750_weight=@@optimizer_max_sel_arg_weight;
set optimizer_max_sel_arg_weight=20;
@ -102,9 +98,7 @@ set @json= json_detailed(json_extract(@trace, '$**.range_scan_alternatives'));
select left(@json, 500);
left(@json, 500)
[
[
{
"index": "key1",
"ranges":
@ -119,14 +113,13 @@ left(@json, 500)
"(8) <= (kp1) <= (8)",
"(9) <= (kp1) <= (9)",
"(10) <= (kp1) <= (10)"
],
set @json= json_detailed(json_extract(@trace, '$**.setup_range_conditions'));
select left(@json, 2500);
left(@json, 2500)
[
[
{
"sel_arg_weight_heuristic":
{
@ -136,7 +129,6 @@ left(@json, 2500)
"key2_weight": 10
}
},
{
"sel_arg_weight_heuristic":
{
@ -146,7 +138,6 @@ left(@json, 2500)
"key2_weight": 10
}
},
{
"sel_arg_weight_heuristic":
{
@ -173,9 +164,7 @@ from information_schema.optimizer_trace);
select left(@json, 1500);
left(@json, 1500)
[
[
{
"index": "key1",
"ranges":
@ -209,7 +198,8 @@ left(@json, 1500)
"(3,7) <= (kp1,kp2) <= (3,7)",
"(3,8) <= (kp1,kp2) <= (3,8)",
"(3,9) <= (kp1,kp2) <= (3,9)",
"(3,10) <= (kp1,kp2
"(3,10) <= (kp1,kp2) <= (3,10)",
set optimizer_max_sel_arg_weight= @tmp9750_weight;
set optimizer_trace=@tmp_9750;
drop table t1;

View file

@ -1,4 +1,3 @@
drop table if exists t1,t2;
create table t1 (a char(16), b date, c datetime);
insert into t1 SET a='test 2000-01-01', b='2000-01-01', c='2000-01-01';
select * from t1 where c = '2000-01-01';
@ -1145,5 +1144,23 @@ t2 CREATE TABLE `t2` (
DROP TABLE t2;
DROP TABLE t1;
#
# MDEV-29890 Update with inner join false row count result
#
set sql_mode='NO_ZERO_DATE';
create table t1 (a1 bigint primary key, a2 date not null, a3 bigint not null);
create table t2 (b1 bigint primary key);
insert into t2 (b1) values (1);
insert into t1 (a1, a2, a3) values (1, current_date, 1),( 2, current_date, 1);
update t1 inner join t2 on t1.a3 = t2.b1 set t1.a2 = t1.a2 + interval 1 day;
select row_count();
row_count()
2
set sql_mode='';
alter table t1 modify a2 date not null default '0000-00-00';
set sql_mode='NO_ZERO_DATE';
update t1 inner join t2 on t1.a3 = t2.b1 set t1.a2 = t1.a2 + interval 1 day;
drop table t1, t2;
set sql_mode=default;
#
# End of 10.4 tests
#

View file

@ -1,10 +1,6 @@
#
# test of problem with date fields
#
--disable_warnings
drop table if exists t1,t2;
--enable_warnings
create table t1 (a char(16), b date, c datetime);
insert into t1 SET a='test 2000-01-01', b='2000-01-01', c='2000-01-01';
select * from t1 where c = '2000-01-01';
@ -788,6 +784,23 @@ SHOW CREATE TABLE t2;
DROP TABLE t2;
DROP TABLE t1;
--echo #
--echo # MDEV-29890 Update with inner join false row count result
--echo #
set sql_mode='NO_ZERO_DATE';
create table t1 (a1 bigint primary key, a2 date not null, a3 bigint not null);
create table t2 (b1 bigint primary key);
insert into t2 (b1) values (1);
insert into t1 (a1, a2, a3) values (1, current_date, 1),( 2, current_date, 1);
update t1 inner join t2 on t1.a3 = t2.b1 set t1.a2 = t1.a2 + interval 1 day;
select row_count();
set sql_mode='';
alter table t1 modify a2 date not null default '0000-00-00';
set sql_mode='NO_ZERO_DATE';
update t1 inner join t2 on t1.a3 = t2.b1 set t1.a2 = t1.a2 + interval 1 day;
drop table t1, t2;
set sql_mode=default;
--echo #
--echo # End of 10.4 tests
--echo #

View file

@ -0,0 +1,15 @@
CREATE TABLE t1 (a TEXT, ac TEXT COMPRESSED, b TINYTEXT, bc TINYTEXT COMPRESSED, c MEDIUMTEXT, cc MEDIUMTEXT COMPRESSED, d LONGTEXT, dc LONGTEXT COMPRESSED, e VARCHAR(10), ec VARCHAR(10) COMPRESSED);
# Isolate row event into its own binary log
FLUSH BINARY LOGS;
INSERT INTO t1 VALUES ('mya', 'myac', 'myb', 'mybc', 'myc', 'mycc', 'myd', 'mydc', 'mye', 'myec');
FLUSH BINARY LOGS;
# MYSQLBINLOG --base64-output=decode-rows -vv datadir/binlog_file --result-file=result_binlog
include/assert_grep.inc [Ensure compressed TEXT fields are annotated correctly]
include/assert_grep.inc [Ensure compressed TINYTEXT fields are annotated correctly]
include/assert_grep.inc [Ensure compressed MEDIUMTEXT fields are annotated correctly]
include/assert_grep.inc [Ensure compressed LONGTEXT fields are annotated correctly]
include/assert_grep.inc [Ensure compressed VARSTRING fields are annotated correctly]
include/assert_grep.inc [Ensure COMPRESSED only shows up for corresponding fields]
include/assert_grep.inc [Ensure non-compressed TEXT fields are annotated correctly]
include/assert_grep.inc [Ensure non-compressed VARSTRING fields are annotated correctly]
DROP TABLE t1;

View file

@ -0,0 +1,70 @@
#
# Purpose:
# This test validates that mysqlbinlog is able to annotate compressed column
# types with two levels of verbosity.
#
# Methodology:
# Validate that the output from mysqlbinlog -vv after creating and inserting
# into a table with compressed and uncompressed fields correctly annotates
# which columns are compressed
#
# References:
# MDEV-25277: mysqlbinlog --verbose cannot read row events with compressed
# columns: Don't know how to handle column type: 140
#
--source include/have_binlog_format_row.inc
CREATE TABLE t1 (a TEXT, ac TEXT COMPRESSED, b TINYTEXT, bc TINYTEXT COMPRESSED, c MEDIUMTEXT, cc MEDIUMTEXT COMPRESSED, d LONGTEXT, dc LONGTEXT COMPRESSED, e VARCHAR(10), ec VARCHAR(10) COMPRESSED);
--echo # Isolate row event into its own binary log
FLUSH BINARY LOGS;
INSERT INTO t1 VALUES ('mya', 'myac', 'myb', 'mybc', 'myc', 'mycc', 'myd', 'mydc', 'mye', 'myec');
FLUSH BINARY LOGS;
--let $binlog_file= query_get_value(SHOW BINARY LOGS, Log_name, 2)
--let $datadir= `SELECT @@datadir`
--let $result_binlog= $MYSQLTEST_VARDIR/tmp/$binlog_file
--echo # MYSQLBINLOG --base64-output=decode-rows -vv datadir/binlog_file --result-file=result_binlog
--exec $MYSQL_BINLOG --base64-output=decode-rows -vv $datadir/$binlog_file --result-file=$result_binlog
--let $assert_file= $result_binlog
--let $assert_count= 1
--let $assert_text= Ensure compressed TEXT fields are annotated correctly
--let $assert_select=\WTEXT COMPRESSED
--source include/assert_grep.inc
--let $assert_text= Ensure compressed TINYTEXT fields are annotated correctly
--let $assert_select=\WTINYTEXT COMPRESSED
--source include/assert_grep.inc
--let $assert_text= Ensure compressed MEDIUMTEXT fields are annotated correctly
--let $assert_select=\WMEDIUMTEXT COMPRESSED
--source include/assert_grep.inc
--let $assert_text= Ensure compressed LONGTEXT fields are annotated correctly
--let $assert_select=\WLONGTEXT COMPRESSED
--source include/assert_grep.inc
--let $assert_text= Ensure compressed VARSTRING fields are annotated correctly
--let $assert_select=\WVARSTRING\(\d+\) COMPRESSED
--source include/assert_grep.inc
--let $assert_text= Ensure COMPRESSED only shows up for corresponding fields
--let $assert_count= 5
--let $assert_select= COMPRESSED
--source include/assert_grep.inc
--let $assert_text= Ensure non-compressed TEXT fields are annotated correctly
--let $assert_count= 8
--let $assert_select=/*.*TEXT
--source include/assert_grep.inc
--let $assert_text= Ensure non-compressed VARSTRING fields are annotated correctly
--let $assert_count= 2
--let $assert_select=/*.*VARSTRING
--source include/assert_grep.inc
# Cleanup
DROP TABLE t1;

View file

@ -17,6 +17,7 @@ galera_sst_encrypted : MDEV-29876 Galera test failure on galera_sst_encrypted
MW-284 : MDEV-29861 Galera test case hangs
galera_binlog_checksum : MDEV-29861 Galera test case hangs
galera_var_notify_ssl_ipv6 : MDEV-29861 Galera test case hangs
galera_var_notify_cmd: MDEV-29861 Galera test case hangs
galera_var_node_address : MDEV-20485 Galera test failure
MDEV-26575 : MDEV-29878 Galera test failure on MDEV-26575
galera_bf_abort_shutdown : MDEV-29918 Assertion failure on galera_bf_abort_shutdown

View file

@ -9,14 +9,6 @@ connection node_2;
Cleaning grastate.dat file ...
Starting server ...
connection node_1;
connection node_2;
connection node_2;
Shutting down server ...
connection node_1;
connection node_2;
Cleaning grastate.dat file ...
Starting server ...
connection node_1;
SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
VARIABLE_VALUE
Primary

View file

@ -34,34 +34,6 @@
--source include/start_mysqld.inc
--source include/wait_until_ready.inc
# Save original auto_increment_offset values.
--let $node_1=node_1
--let $node_2=node_2
--source include/auto_increment_offset_save.inc
--connection node_2
--echo Shutting down server ...
--source include/shutdown_mysqld.inc
--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc
--connection node_2
#
# Delete grastate.dat with safe_to_bootstrap: 0
#
--echo Cleaning grastate.dat file ...
--remove_file $MYSQLTEST_VARDIR/mysqld.2/data/grastate.dat
--echo Starting server ...
--let $restart_noprint=2
--let $start_mysqld_params="--wsrep-new-cluster"
--source include/start_mysqld.inc
--source include/wait_until_ready.inc
--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';

View file

@ -111,16 +111,13 @@ json_detailed(priv)
"authentication_string": "*7D8C3DF236D9163B6C274A9D47704BC496988460",
"auth_or":
[
{
"plugin": "ed25519",
"authentication_string": "F4aF8bw7130VaRbdLCl4f/P/wkjDmgJXwWvpJ5gmsZc"
},
{
"plugin": "unix_socket"
},
{
}
],

View file

@ -496,11 +496,48 @@ delete history from t1;
delete from t1;
drop table t1;
#
# MDEV-30378 Versioned REPLACE succeeds with ON DELETE RESTRICT
# constraint
#
create table t0 (pk integer primary key) with system versioning engine=innodb;
create table t1 (pk integer primary key,
foreign key(pk) references t0(pk)
on delete restrict on update cascade) engine=innodb;
create table t2 (pk integer);
insert into t0 (pk) values (1);
insert into t1 (pk) values (1);
insert into t2 (pk) values (1);
delete from t0;
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t1`, CONSTRAINT `t1_ibfk_1` FOREIGN KEY (`pk`) REFERENCES `t0` (`pk`) ON UPDATE CASCADE)
replace t0 values (1);
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t1`, CONSTRAINT `t1_ibfk_1` FOREIGN KEY (`pk`) REFERENCES `t0` (`pk`) ON UPDATE CASCADE)
select * into outfile 'load_t0' from t0 ;
load data infile 'load_t0' replace into table t0;
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t1`, CONSTRAINT `t1_ibfk_1` FOREIGN KEY (`pk`) REFERENCES `t0` (`pk`) ON UPDATE CASCADE)
delete t0, t2 from t0 join t2;
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t1`, CONSTRAINT `t1_ibfk_1` FOREIGN KEY (`pk`) REFERENCES `t0` (`pk`) ON UPDATE CASCADE)
select pk from t0;
pk
1
# Cleanup
drop table t1, t0, t2;
# create_select for a temporary table didn't set up pos_in_locked_tables.
create table t (a int unique) engine=innodb
replace select 1 as a, 2 as b union select 1 as a, 3 as c;
select * from t;
a b
1 3
drop table t;
create temporary table t (a int unique) engine=innodb
replace select 1 as a, 2 as b union select 1 as a, 3 as c;
select * from t;
a b
1 3
drop table t;
#
# MDEV-20729 Fix REFERENCES constraint in column definition
#
create or replace table t1(
id int
);
create table t1(id int);
# system fields can't be foreign keys:
create or replace table t2(
x int,
@ -533,3 +570,4 @@ foreign key (sys_end) references t1(id)
) engine innodb with system versioning;
ERROR HY000: Can't create table `test`.`t2` (errno: 150 "Foreign key constraint is incorrectly formed")
drop table t1;
# End of 10.5 tests

View file

@ -527,12 +527,56 @@ delete from t1;
# cleanup
drop table t1;
--echo #
--echo # MDEV-30378 Versioned REPLACE succeeds with ON DELETE RESTRICT
--echo # constraint
--echo #
create table t0 (pk integer primary key) with system versioning engine=innodb;
create table t1 (pk integer primary key,
foreign key(pk) references t0(pk)
on delete restrict on update cascade) engine=innodb;
create table t2 (pk integer);
insert into t0 (pk) values (1);
insert into t1 (pk) values (1);
insert into t2 (pk) values (1);
--error ER_ROW_IS_REFERENCED_2
delete from t0;
--error ER_ROW_IS_REFERENCED_2
replace t0 values (1);
select * into outfile 'load_t0' from t0 ;
--error ER_ROW_IS_REFERENCED_2
load data infile 'load_t0' replace into table t0;
--error ER_ROW_IS_REFERENCED_2
delete t0, t2 from t0 join t2;
select pk from t0;
--echo # Cleanup
drop table t1, t0, t2;
--let $datadir= `select @@datadir`
--remove_file $datadir/test/load_t0
--echo # create_select for a temporary table didn't set up pos_in_locked_tables.
create table t (a int unique) engine=innodb
replace select 1 as a, 2 as b union select 1 as a, 3 as c;
select * from t;
drop table t;
create temporary table t (a int unique) engine=innodb
replace select 1 as a, 2 as b union select 1 as a, 3 as c;
select * from t;
drop table t;
--echo #
--echo # MDEV-20729 Fix REFERENCES constraint in column definition
--echo #
create or replace table t1(
id int
);
create table t1(id int);
--echo # system fields can't be foreign keys:
--replace_result $sys_datatype_expl SYS_DATATYPE
--error ER_PARSE_ERROR,ER_PARSE_ERROR
@ -570,4 +614,6 @@ eval create or replace table t2(
) engine innodb with system versioning;
drop table t1;
--echo # End of 10.5 tests
--source suite/versioning/common_finish.inc

View file

@ -36,7 +36,7 @@ echo '[Service]'
echo
if [[ ( "$user" != "root" && "$user" != "mysql" ) || "${SET_USER}" == 1 ]]; then
if [[ ( ! -z "$user" && "$user" != "root" && "$user" != "mysql" ) || "${SET_USER}" == 1 ]]; then
echo User=$user
fi

View file

@ -584,7 +584,7 @@ if ($MySQLaccess::CGI) { #CGI-version
# ----------------------
# brief and table-format
# exclude each-other
# table-format is prefered
# table-format is preferred
if (defined($Param{'table'})) { undef($Param{'brief'}); }
if (defined($Param{'preview'}) or
defined($Param{'copy'}) or

View file

@ -47,18 +47,51 @@ trim_string()
trim_dir()
{
local t=$(trim_string "$1")
if [ "$t" != '/' ]; then
if [ "${t%/}" != "$t" ]; then
t=$(trim_string "${t%/}")
if [ -n "$BASH_VERSION" ]; then
local pattern="![:space:]${2:-}"
local x="${1#*[$pattern]}"
local z=${#1}
x=${#x}
if [ $x -ne $z ]; then
local y="${1%[$pattern/]*}"
y=${#y}
x=$(( z-x-1 ))
y=$(( y-x+1 ))
x="${1:$x:$y}"
[ -z "$x" ] && x='.'
printf '%s' "$x"
else
printf ''
fi
else
t='.'
local pattern="[:space:]${2:-}"
local x=$(echo "$1" | sed -E "s/^[$pattern]+|[$pattern/]+\$//g")
if [ -n "$x" ]; then
echo "$x"
elif "${1#*/}" != "$1"; then
echo '.'
else
echo ''
fi
fi
}
trim_right()
{
if [ -n "$BASH_VERSION" ]; then
printf '%s' "$t"
local pattern="[![:space:]${2:-}]"
local z=${#1}
local y="${1%$pattern*}"
y=${#y}
if [ $y -ne $z ]; then
y=$(( y+1 ))
printf '%s' "${1:0:$y}"
else
printf ''
fi
else
echo "$t"
local pattern="[[:space:]${2:-}]"
echo "$1" | sed -E "s/$pattern+\$//g"
fi
}
@ -101,6 +134,7 @@ WSREP_SST_OPT_ADDR=""
WSREP_SST_OPT_ADDR_PORT=""
WSREP_SST_OPT_HOST=""
WSREP_SST_OPT_HOST_UNESCAPED=""
ARIA_LOG_DIR=""
INNODB_DATA_HOME_DIR=$(trim_dir "${INNODB_DATA_HOME_DIR:-}")
INNODB_LOG_GROUP_HOME=$(trim_dir "${INNODB_LOG_GROUP_HOME:-}")
INNODB_UNDO_DIR=$(trim_dir "${INNODB_UNDO_DIR:-}")
@ -111,7 +145,7 @@ INNOEXTRA=""
while [ $# -gt 0 ]; do
case "$1" in
'--address')
WSREP_SST_OPT_ADDR="$2"
WSREP_SST_OPT_ADDR=$(trim_string "$2")
#
# Break address string into host:port/path parts
#
@ -119,20 +153,22 @@ case "$1" in
\[*)
# IPv6
# Remove the starting and ending square brackets, if present:
addr_no_bracket="${WSREP_SST_OPT_ADDR#\[}"
addr="${WSREP_SST_OPT_ADDR#\[}"
addr=$(trim_right "${addr%%\]*}")
# Some utilities and subsequent code require an address
# without square brackets:
readonly WSREP_SST_OPT_HOST_UNESCAPED="${addr_no_bracket%%\]*}"
readonly WSREP_SST_OPT_HOST_UNESCAPED="$addr"
# Square brackets are needed in most cases:
readonly WSREP_SST_OPT_HOST="[$WSREP_SST_OPT_HOST_UNESCAPED]"
readonly WSREP_SST_OPT_HOST="[$addr]"
# Mark this address as IPv6:
readonly WSREP_SST_OPT_HOST_IPv6=1
# Let's remove the leading part that contains the host address:
remain="${WSREP_SST_OPT_ADDR#*\]}"
;;
*)
readonly WSREP_SST_OPT_HOST="${WSREP_SST_OPT_ADDR%%[:/]*}"
readonly WSREP_SST_OPT_HOST_UNESCAPED="$WSREP_SST_OPT_HOST"
addr=$(trim_right "${WSREP_SST_OPT_ADDR%%[:/]*}")
readonly WSREP_SST_OPT_HOST="$addr"
readonly WSREP_SST_OPT_HOST_UNESCAPED="$addr"
readonly WSREP_SST_OPT_HOST_IPv6=0
# Let's remove the leading part that contains the host address:
remain="${WSREP_SST_OPT_ADDR#*[:/]}"
@ -154,17 +190,18 @@ case "$1" in
else
readonly WSREP_SST_OPT_PATH=""
fi
WSREP_SST_OPT_ADDR_PORT=$(trim_right "$WSREP_SST_OPT_ADDR_PORT")
# Remove the module name part from the string, which ends with "/":
remain="${WSREP_SST_OPT_PATH#*/}"
# This operation removes the tail after the very first occurrence
# of the "/" character, inclusively:
readonly WSREP_SST_OPT_MODULE="${WSREP_SST_OPT_PATH%%/*}"
readonly WSREP_SST_OPT_MODULE=$(trim_right "${WSREP_SST_OPT_PATH%%/*}")
# If there is one more "/" in the string, then everything before
# it will be the LSN, otherwise the LSN is empty:
if [ "$remain" != "$WSREP_SST_OPT_PATH" ]; then
# Extract the part that matches the LSN by removing all
# characters starting from the very first "/":
readonly WSREP_SST_OPT_LSN="${remain%%/*}"
readonly WSREP_SST_OPT_LSN=$(trim_right "${remain%%/*}")
# Exctract everything after the first occurrence of
# the "/" character in the string:
source="$remain"
@ -176,7 +213,7 @@ case "$1" in
# Let's extract the version number by removing the tail
# after the very first occurence of the "/" character
# (inclusively):
readonly WSREP_SST_OPT_SST_VER="${remain%%/*}"
readonly WSREP_SST_OPT_SST_VER=$(trim_right "${remain%%/*}")
else
readonly WSREP_SST_OPT_SST_VER=""
fi
@ -198,6 +235,11 @@ case "$1" in
readonly WSREP_SST_OPT_DATA=$(trim_dir "$2")
shift
;;
'--aria-log-dir-path')
# Let's remove the trailing slash:
readonly ARIA_LOG_DIR=$(trim_dir "$2")
shift
;;
'--innodb-data-home-dir')
# Let's remove the trailing slash:
readonly INNODB_DATA_HOME_DIR=$(trim_dir "$2")
@ -218,41 +260,46 @@ case "$1" in
shift
;;
'--defaults-file')
readonly WSREP_SST_OPT_DEFAULT="$1=$2"
readonly WSREP_SST_OPT_DEFAULTS="$1='$2'"
file=$(trim_string "$2")
readonly WSREP_SST_OPT_DEFAULT="$1=$file"
readonly WSREP_SST_OPT_DEFAULTS="$1='$file'"
shift
;;
'--defaults-extra-file')
readonly WSREP_SST_OPT_EXTRA_DEFAULT="$1=$2"
readonly WSREP_SST_OPT_EXTRA_DEFAULTS="$1='$2'"
file=$(trim_string "$2")
readonly WSREP_SST_OPT_EXTRA_DEFAULT="$1=$file"
readonly WSREP_SST_OPT_EXTRA_DEFAULTS="$1='$file'"
shift
;;
'--defaults-group-suffix')
readonly WSREP_SST_OPT_SUFFIX_DEFAULT="$1=$2"
readonly WSREP_SST_OPT_SUFFIX_VALUE="$2"
suffix=$(trim_string "$2")
readonly WSREP_SST_OPT_SUFFIX_DEFAULT="$1=$suffix"
readonly WSREP_SST_OPT_SUFFIX_VALUE="$suffix"
shift
;;
'--host')
case "$2" in
addr=$(trim_string "$2")
case "$addr" in
\[*)
# IPv6
# Remove the starting and ending square brackets, if present:
addr_no_bracket="${2#\[}"
addr="${addr#\[}"
addr=$(trim_right "${addr%%\]*}")
# Some utilities and subsequent code require an address
# without square brackets:
readonly WSREP_SST_OPT_HOST_UNESCAPED="${addr_no_bracket%%\]*}"
readonly WSREP_SST_OPT_HOST_UNESCAPED="$addr"
# Square brackets are needed in most cases:
readonly WSREP_SST_OPT_HOST="[${WSREP_SST_OPT_HOST_UNESCAPED}]"
readonly WSREP_SST_OPT_HOST="[$addr]"
# Mark this address as IPv6:
readonly WSREP_SST_OPT_HOST_IPv6=1
;;
*)
readonly WSREP_SST_OPT_HOST="$2"
readonly WSREP_SST_OPT_HOST_UNESCAPED="$2"
readonly WSREP_SST_OPT_HOST="$addr"
readonly WSREP_SST_OPT_HOST_UNESCAPED="$addr"
readonly WSREP_SST_OPT_HOST_IPv6=0
;;
esac
WSREP_SST_OPT_ADDR="$WSREP_SST_OPT_HOST"
WSREP_SST_OPT_ADDR="$addr"
shift
;;
'--local-port')
@ -272,11 +319,11 @@ case "$1" in
shift
;;
'--role')
readonly WSREP_SST_OPT_ROLE="$2"
readonly WSREP_SST_OPT_ROLE=$(trim_string "$2")
shift
;;
'--socket')
readonly WSREP_SST_OPT_SOCKET="$2"
readonly WSREP_SST_OPT_SOCKET=$(trim_string "$2")
shift
;;
'--user')
@ -284,23 +331,23 @@ case "$1" in
shift
;;
'--gtid')
readonly WSREP_SST_OPT_GTID="$2"
readonly WSREP_SST_OPT_GTID=$(trim_string "$2")
shift
;;
'--binlog'|'--log-bin')
readonly WSREP_SST_OPT_BINLOG="$2"
readonly WSREP_SST_OPT_BINLOG=$(trim_string "$2")
shift
;;
'--binlog-index'|'--log-bin-index')
WSREP_SST_OPT_BINLOG_INDEX="$2"
WSREP_SST_OPT_BINLOG_INDEX=$(trim_string "$2")
shift
;;
'--log-basename')
readonly WSREP_SST_OPT_LOG_BASENAME="$2"
readonly WSREP_SST_OPT_LOG_BASENAME=$(trim_string "$2")
shift
;;
'--gtid-domain-id')
readonly WSREP_SST_OPT_GTID_DOMAIN_ID="$2"
readonly WSREP_SST_OPT_GTID_DOMAIN_ID=$(trim_string "$2")
shift
;;
'--mysqld-args')
@ -458,6 +505,12 @@ case "$1" in
# from mysqld's argument list:
skip_mysqld_arg=0
case "$option" in
'--aria-log-dir-path')
if [ -z "$ARIA_LOG_DIR" ]; then
MYSQLD_OPT_ARIA_LOG_DIR=$(trim_dir "$value")
fi
skip_mysqld_arg=1
;;
'--innodb-data-home-dir')
if [ -z "$INNODB_DATA_HOME_DIR" ]; then
MYSQLD_OPT_INNODB_DATA_HOME_DIR=$(trim_dir "$value")
@ -551,6 +604,10 @@ readonly WSREP_SST_OPT_PROGRESS
# The same argument can be present on the command line several
# times, in this case we must take its last value:
if [ -n "${MYSQLD_OPT_ARIA_LOG_DIR:-}" -a \
-z "$ARIA_LOG_DIR" ]; then
readonly ARIA_LOG_DIR="$MYSQLD_OPT_ARIA_LOG_DIR"
fi
if [ -n "${MYSQLD_OPT_INNODB_DATA_HOME_DIR:-}" -a \
-z "$INNODB_DATA_HOME_DIR" ]; then
readonly INNODB_DATA_HOME_DIR="$MYSQLD_OPT_INNODB_DATA_HOME_DIR"
@ -608,6 +665,9 @@ if [ -n "$WSREP_SST_OPT_LOG_BASENAME" ]; then
WSREP_SST_OPT_MYSQLD="--log-basename='$WSREP_SST_OPT_LOG_BASENAME'"
fi
fi
if [ -n "$ARIA_LOG_DIR" ]; then
INNOEXTRA="$INNOEXTRA --aria-log-dir-path='$ARIA_LOG_DIR'"
fi
if [ -n "$INNODB_DATA_HOME_DIR" ]; then
INNOEXTRA="$INNOEXTRA --innodb-data-home-dir='$INNODB_DATA_HOME_DIR'"
fi

View file

@ -55,6 +55,7 @@ speciald=1
ib_home_dir=""
ib_log_dir=""
ib_undo_dir=""
ar_log_dir=""
sfmt=""
strmcmd=""
@ -439,9 +440,10 @@ get_footprint()
-regex '.*undo[0-9]+$\|.*\.ibd$\|.*\.MYI$\|.*\.MYD$\|.*ibdata1$' \
-type f -print0 | du --files0-from=- --block-size=1 -c -s | \
awk 'END { print $1 }')
local payload_undo=0
if [ -n "$ib_undo_dir" -a -d "$ib_undo_dir" ]; then
if [ -n "$ib_undo_dir" -a "$ib_undo_dir" != '.' -a \
"$ib_undo_dir" != "$DATA_DIR" -a -d "$ib_undo_dir" ]
then
cd "$ib_undo_dir"
payload_undo=$(find . -regex '.*undo[0-9]+$' -type f -print0 | \
du --files0-from=- --block-size=1 -c -s | awk 'END { print $1 }')
@ -451,7 +453,7 @@ get_footprint()
wsrep_log_info \
"SST footprint estimate: data: $payload_data, undo: $payload_undo"
payload=$(( payload_data + payload_undo ))
payload=$(( payload_data+payload_undo ))
if [ "$compress" != 'none' ]; then
# QuickLZ has around 50% compression ratio
@ -1220,13 +1222,16 @@ else # joiner
INNODB_DATA_HOME_DIR=$(trim_dir "$INNODB_DATA_HOME_DIR")
fi
if [ -n "$INNODB_DATA_HOME_DIR" -a "$INNODB_DATA_HOME_DIR" != '.' ]; then
if [ -n "$INNODB_DATA_HOME_DIR" -a "$INNODB_DATA_HOME_DIR" != '.' -a \
"$INNODB_DATA_HOME_DIR" != "$DATA_DIR" ]
then
# handle both relative and absolute paths:
cd "$DATA"
[ ! -d "$INNODB_DATA_HOME_DIR" ] && mkdir -p "$INNODB_DATA_HOME_DIR"
cd "$INNODB_DATA_HOME_DIR"
ib_home_dir="$(pwd)"
cd "$OLD_PWD"
[ "$ib_home_dir" = "$DATA_DIR" ] && ib_home_dir=""
fi
# if no command line argument and INNODB_LOG_GROUP_HOME is not set,
@ -1236,13 +1241,16 @@ else # joiner
INNODB_LOG_GROUP_HOME=$(trim_dir "$INNODB_LOG_GROUP_HOME")
fi
if [ -n "$INNODB_LOG_GROUP_HOME" -a "$INNODB_LOG_GROUP_HOME" != '.' ]; then
if [ -n "$INNODB_LOG_GROUP_HOME" -a "$INNODB_LOG_GROUP_HOME" != '.' -a \
"$INNODB_LOG_GROUP_HOME" != "$DATA_DIR" ]
then
# handle both relative and absolute paths:
cd "$DATA"
[ ! -d "$INNODB_LOG_GROUP_HOME" ] && mkdir -p "$INNODB_LOG_GROUP_HOME"
cd "$INNODB_LOG_GROUP_HOME"
ib_log_dir="$(pwd)"
cd "$OLD_PWD"
[ "$ib_log_dir" = "$DATA_DIR" ] && ib_log_dir=""
fi
# if no command line argument and INNODB_UNDO_DIR is not set,
@ -1252,13 +1260,34 @@ else # joiner
INNODB_UNDO_DIR=$(trim_dir "$INNODB_UNDO_DIR")
fi
if [ -n "$INNODB_UNDO_DIR" -a "$INNODB_UNDO_DIR" != '.' ]; then
if [ -n "$INNODB_UNDO_DIR" -a "$INNODB_UNDO_DIR" != '.' -a \
"$INNODB_UNDO_DIR" != "$DATA_DIR" ]
then
# handle both relative and absolute paths:
cd "$DATA"
[ ! -d "$INNODB_UNDO_DIR" ] && mkdir -p "$INNODB_UNDO_DIR"
cd "$INNODB_UNDO_DIR"
ib_undo_dir="$(pwd)"
cd "$OLD_PWD"
[ "$ib_undo_dir" = "$DATA_DIR" ] && ib_undo_dir=""
fi
# if no command line argument then try to get it from the my.cnf:
if [ -z "$ARIA_LOG_DIR" ]; then
ARIA_LOG_DIR=$(parse_cnf '--mysqld' 'aria-log-dir-path')
ARIA_LOG_DIR=$(trim_dir "$ARIA_LOG_DIR")
fi
if [ -n "$ARIA_LOG_DIR" -a "$ARIA_LOG_DIR" != '.' -a \
"$ARIA_LOG_DIR" != "$DATA_DIR" ]
then
# handle both relative and absolute paths:
cd "$DATA"
[ ! -d "$ARIA_LOG_DIR" ] && mkdir -p "$ARIA_LOG_DIR"
cd "$ARIA_LOG_DIR"
ar_log_dir="$(pwd)"
cd "$OLD_PWD"
[ "$ar_log_dir" = "$DATA_DIR" ] && ar_log_dir=""
fi
if [ -n "$backup_threads" ]; then
@ -1400,12 +1429,14 @@ else # joiner
find -E ${ib_home_dir:+"$ib_home_dir"} \
${ib_undo_dir:+"$ib_undo_dir"} \
${ib_log_dir:+"$ib_log_dir"} \
${ar_log_dir:+"$ar_log_dir"} \
"$DATA" -mindepth 1 -prune -regex "$cpat" \
-o -exec rm -rf {} >&2 \+
else
find ${ib_home_dir:+"$ib_home_dir"} \
${ib_undo_dir:+"$ib_undo_dir"} \
${ib_log_dir:+"$ib_log_dir"} \
${ar_log_dir:+"$ar_log_dir"} \
"$DATA" -mindepth 1 -prune -regex "$cpat" \
-o -exec rm -rf {} >&2 \+
fi
@ -1500,11 +1531,15 @@ else # joiner
binlogs=$(ls -d -1 "$binlog_base".[0-9]* 2>/dev/null || :)
fi
cd "$DATA_DIR"
if [ -n "$binlog_dir" -a "$binlog_dir" != '.' ]; then
if [ -n "$binlog_dir" -a "$binlog_dir" != '.' -a \
"$binlog_dir" != "$DATA_DIR" ]
then
[ ! -d "$binlog_dir" ] && mkdir -p "$binlog_dir"
fi
index_dir=$(dirname "$binlog_index");
if [ -n "$index_dir" -a "$index_dir" != '.' ]; then
if [ -n "$index_dir" -a "$index_dir" != '.' -a \
"$index_dir" != "$DATA_DIR" ]
then
[ ! -d "$index_dir" ] && mkdir -p "$index_dir"
fi
if [ -n "$binlogs" ]; then

View file

@ -174,6 +174,7 @@ cd "$OLD_PWD"
BINLOG_TAR_FILE="$DATA_DIR/wsrep_sst_binlog.tar"
ar_log_dir="$DATA_DIR"
ib_log_dir="$DATA_DIR"
ib_home_dir="$DATA_DIR"
ib_undo_dir="$DATA_DIR"
@ -185,7 +186,9 @@ if [ -z "$INNODB_LOG_GROUP_HOME" ]; then
INNODB_LOG_GROUP_HOME=$(trim_dir "$INNODB_LOG_GROUP_HOME")
fi
if [ -n "$INNODB_LOG_GROUP_HOME" -a "$INNODB_LOG_GROUP_HOME" != '.' ]; then
if [ -n "$INNODB_LOG_GROUP_HOME" -a "$INNODB_LOG_GROUP_HOME" != '.' -a \
"$INNODB_LOG_GROUP_HOME" != "$DATA_DIR" ]
then
# handle both relative and absolute paths:
cd "$DATA"
[ ! -d "$INNODB_LOG_GROUP_HOME" ] && mkdir -p "$INNODB_LOG_GROUP_HOME"
@ -201,7 +204,9 @@ if [ -z "$INNODB_DATA_HOME_DIR" ]; then
INNODB_DATA_HOME_DIR=$(trim_dir "$INNODB_DATA_HOME_DIR")
fi
if [ -n "$INNODB_DATA_HOME_DIR" -a "$INNODB_DATA_HOME_DIR" != '.' ]; then
if [ -n "$INNODB_DATA_HOME_DIR" -a "$INNODB_DATA_HOME_DIR" != '.' -a \
"$INNODB_DATA_HOME_DIR" != "$DATA_DIR" ]
then
# handle both relative and absolute paths:
cd "$DATA"
[ ! -d "$INNODB_DATA_HOME_DIR" ] && mkdir -p "$INNODB_DATA_HOME_DIR"
@ -217,7 +222,9 @@ if [ -z "$INNODB_UNDO_DIR" ]; then
INNODB_UNDO_DIR=$(trim_dir "$INNODB_UNDO_DIR")
fi
if [ -n "$INNODB_UNDO_DIR" -a "$INNODB_UNDO_DIR" != '.' ]; then
if [ -n "$INNODB_UNDO_DIR" -a "$INNODB_UNDO_DIR" != '.' -a \
"$INNODB_UNDO_DIR" != "$DATA_DIR" ]
then
# handle both relative and absolute paths:
cd "$DATA"
[ ! -d "$INNODB_UNDO_DIR" ] && mkdir -p "$INNODB_UNDO_DIR"
@ -226,6 +233,23 @@ if [ -n "$INNODB_UNDO_DIR" -a "$INNODB_UNDO_DIR" != '.' ]; then
cd "$OLD_PWD"
fi
# if no command line argument then try to get it from the my.cnf:
if [ -z "$ARIA_LOG_DIR" ]; then
ARIA_LOG_DIR=$(parse_cnf '--mysqld' 'aria-log-dir-path')
ARIA_LOG_DIR=$(trim_dir "$ARIA_LOG_DIR")
fi
if [ -n "$ARIA_LOG_DIR" -a "$ARIA_LOG_DIR" != '.' -a \
"$ARIA_LOG_DIR" != "$DATA_DIR" ]
then
# handle both relative and absolute paths:
cd "$DATA"
[ ! -d "$ARIA_LOG_DIR" ] && mkdir -p "$ARIA_LOG_DIR"
cd "$ARIA_LOG_DIR"
ar_log_dir="$(pwd)"
cd "$OLD_PWD"
fi
encgroups='--mysqld|sst'
check_server_ssl_config
@ -504,7 +528,9 @@ EOF
if [ "$first" = '-' -o "$first" = '@' ]; then
bin_base="./$bin_base"
fi
if [ -n "$bin_dir" -a "$bin_dir" != '.' ]; then
if [ -n "$bin_dir" -a "$bin_dir" != '.' -a \
"$bin_dir" != "$DATA_DIR" ]
then
tar $tar_options "$BINLOG_TAR_FILE" \
-C "$bin_dir" "$bin_base" >&2
else
@ -541,7 +567,10 @@ FILTER="-f '- /lost+found'
-f '+ /wsrep_sst_binlog.tar'
-f '- $ib_home_dir/ib_lru_dump'
-f '- $ib_home_dir/ibdata*'
-f '+ $ib_undo_dir/undo*'
-f '- $ib_undo_dir/undo*'
-f '- $ib_log_dir/ib_logfile[0-9]*'
-f '- $ar_log_dir/aria_log_control'
-f '- $ar_log_dir/aria_log.*'
-f '+ /*/'
-f '- /*'"
@ -587,12 +616,12 @@ FILTER="-f '- /lost+found'
wsrep_log_info "Transfer of InnoDB data files done"
# second, we transfer InnoDB and Aria log files
# second, we transfer InnoDB log files
rsync ${STUNNEL:+--rsh="$STUNNEL"} \
--owner --group --perms --links --specials \
--ignore-times --inplace --dirs --delete --quiet \
$WHOLE_FILE_OPT -f '+ /ib_logfile[0-9]*' -f '+ /aria_log.*' \
-f '+ /aria_log_control' -f '- **' "$ib_log_dir/" \
$WHOLE_FILE_OPT -f '+ /ib_logfile[0-9]*' \
-f '- **' "$ib_log_dir/" \
"rsync://$WSREP_SST_OPT_ADDR-log_dir" >&2 || RC=$?
if [ $RC -ne 0 ]; then
@ -600,26 +629,61 @@ FILTER="-f '- /lost+found'
exit 255 # unknown error
fi
wsrep_log_info "Transfer of InnoDB and Aria log files done"
wsrep_log_info "Transfer of InnoDB log files done"
# third, we transfer InnoDB undo logs
rsync ${STUNNEL:+--rsh="$STUNNEL"} \
--owner --group --perms --links --specials \
--ignore-times --inplace --dirs --delete --quiet \
$WHOLE_FILE_OPT -f '+ /undo*' \
-f '- **' "$ib_undo_dir/" \
"rsync://$WSREP_SST_OPT_ADDR-undo_dir" >&2 || RC=$?
if [ $RC -ne 0 ]; then
wsrep_log_error "rsync innodb_undo_dir returned code $RC:"
exit 255 # unknown error
fi
wsrep_log_info "Transfer of InnoDB undo logs done"
# fourth, we transfer Aria logs
rsync ${STUNNEL:+--rsh="$STUNNEL"} \
--owner --group --perms --links --specials \
--ignore-times --inplace --dirs --delete --quiet \
$WHOLE_FILE_OPT -f '+ /aria_log_control' -f '+ /aria_log.*' \
-f '- **' "$ar_log_dir/" \
"rsync://$WSREP_SST_OPT_ADDR-aria_log" >&2 || RC=$?
if [ $RC -ne 0 ]; then
wsrep_log_error "rsync aria_log_dir_path returned code $RC:"
exit 255 # unknown error
fi
wsrep_log_info "Transfer of Aria logs done"
# then, we parallelize the transfer of database directories,
# use '.' so that path concatenation works:
cd "$DATA"
backup_threads=$(parse_cnf '--mysqld|sst' 'backup-threads')
if [ -z "$backup_threads" ]; then
get_proc
backup_threads=$nproc
fi
cd "$DATA"
find . -maxdepth 1 -mindepth 1 -type d -not -name 'lost+found' \
-not -name '.zfs' -print0 | xargs -I{} -0 -P $backup_threads \
rsync ${STUNNEL:+--rsh="$STUNNEL"} \
--owner --group --perms --links --specials --ignore-times \
--inplace --recursive --delete --quiet $WHOLE_FILE_OPT \
--exclude '*/ib_logfile*' --exclude '*/aria_log.*' \
--exclude '*/aria_log_control' "$WSREP_SST_OPT_DATA/{}/" \
-f '- $ib_home_dir/ib_lru_dump' \
-f '- $ib_home_dir/ibdata*' \
-f '- $ib_undo_dir/undo*' \
-f '- $ib_log_dir/ib_logfile[0-9]*' \
-f '- $ar_log_dir/aria_log_control' \
-f '- $ar_log_dir/aria_log.*' \
"$WSREP_SST_OPT_DATA/{}/" \
"rsync://$WSREP_SST_OPT_ADDR/{}" >&2 || RC=$?
cd "$OLD_PWD"
@ -707,6 +771,10 @@ $SILENT
path = $ib_log_dir
[$MODULE-data_dir]
path = $ib_home_dir
[$MODULE-undo_dir]
path = $ib_undo_dir
[$MODULE-aria_log]
path = $ar_log_dir
EOF
# If the IP is local, listen only on it:
@ -872,7 +940,7 @@ EOF
binlog_cd=0
# Change the directory to binlog base (if possible):
if [ -n "$binlog_dir" -a "$binlog_dir" != '.' -a \
-d "$binlog_dir" ]
"$binlog_dir" != "$DATA_DIR" -a -d "$binlog_dir" ]
then
binlog_cd=1
cd "$binlog_dir"
@ -891,11 +959,15 @@ EOF
tmpfile=$(TMPDIR="$tmpdir"; mktemp)
fi
index_dir=$(dirname "$binlog_index");
if [ -n "$index_dir" -a "$index_dir" != '.' ]; then
if [ -n "$index_dir" -a "$index_dir" != '.' -a \
"$index_dir" != "$DATA_DIR" ]
then
[ ! -d "$index_dir" ] && mkdir -p "$index_dir"
fi
binlog_cd=0
if [ -n "$binlog_dir" -a "$binlog_dir" != '.' ]; then
if [ -n "$binlog_dir" -a "$binlog_dir" != '.' -a \
"$binlog_dir" != "$DATA_DIR" ]
then
[ ! -d "$binlog_dir" ] && mkdir -p "$binlog_dir"
binlog_cd=1
cd "$binlog_dir"

View file

@ -10956,7 +10956,7 @@ Create_field *Create_field::clone(MEM_ROOT *mem_root) const
}
/**
Return true if default is an expression that must be saved explicitely
Return true if default is an expression that must be saved explicitly
This is:
- Not basic constants

View file

@ -5430,7 +5430,7 @@ public:
Record_addr addr(true);
return make_field(share, mem_root, &addr, field_name_arg);
}
/* Return true if default is an expression that must be saved explicitely */
/* Return true if default is an expression that must be saved explicitly */
bool has_default_expression();
bool has_default_now_unireg_check() const

View file

@ -41,6 +41,7 @@
// find_item_in_list,
// RESOLVED_AGAINST_ALIAS, ...
#include "sql_expression_cache.h"
#include "sql_lex.h" // empty_clex_str
const String my_null_string("NULL", 4, default_charset_info);
const String my_default_string("DEFAULT", 7, default_charset_info);
@ -1303,12 +1304,11 @@ Item *Item_cache::safe_charset_converter(THD *thd, CHARSET_INFO *tocs)
Item *conv= example->safe_charset_converter(thd, tocs);
if (conv == example)
return this;
Item_cache *cache;
if (!conv || conv->fix_fields(thd, (Item **) NULL) ||
unlikely(!(cache= new (thd->mem_root) Item_cache_str(thd, conv))))
return NULL; // Safe conversion is not possible, or OEM
cache->setup(thd, conv);
return cache;
if (!conv || conv->fix_fields(thd, (Item **) NULL))
return NULL; // Safe conversion is not possible, or OOM
setup(thd, conv);
thd->change_item_tree(&example, conv);
return this;
}
@ -1495,12 +1495,8 @@ int Item::save_in_field_no_warnings(Field *field, bool no_conversions)
{
int res;
TABLE *table= field->table;
THD *thd= table->in_use;
Check_level_instant_set check_level_save(thd, CHECK_FIELD_IGNORE);
Sql_mode_save sms(thd);
thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE);
thd->variables.sql_mode|= MODE_INVALID_DATES;
MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
Use_relaxed_field_copy urfc(table->in_use);
res= save_in_field(field, no_conversions);
dbug_tmp_restore_column_map(&table->write_set, old_map);
return res;
@ -2305,7 +2301,8 @@ void Item::split_sum_func2(THD *thd, Ref_ptr_array ref_pointer_array,
if (unlikely((!(used_tables() & ~PARAM_TABLE_BIT) ||
(type() == REF_ITEM &&
((Item_ref*)this)->ref_type() != Item_ref::VIEW_REF))))
((Item_ref*)this)->ref_type() != Item_ref::VIEW_REF &&
((Item_ref*)this)->ref_type() != Item_ref::DIRECT_REF))))
return;
}
@ -2618,7 +2615,6 @@ bool Type_std_attributes::agg_item_set_converter(const DTCollation &coll,
safe_args[1]= args[item_sep];
}
bool res= FALSE;
uint i;
DBUG_ASSERT(!thd->stmt_arena->is_stmt_prepare());
@ -2638,19 +2634,31 @@ bool Type_std_attributes::agg_item_set_converter(const DTCollation &coll,
args[item_sep]= safe_args[1];
}
my_coll_agg_error(args, nargs, fname, item_sep);
res= TRUE;
break; // we cannot return here, we need to restore "arena".
return TRUE;
}
thd->change_item_tree(arg, conv);
if (conv->fix_fields_if_needed(thd, arg))
return TRUE;
Query_arena *arena, backup;
arena= thd->activate_stmt_arena_if_needed(&backup);
if (arena)
{
res= TRUE;
break; // we cannot return here, we need to restore "arena".
Item_direct_ref_to_item *ref=
new (thd->mem_root) Item_direct_ref_to_item(thd, *arg);
if ((ref == NULL) || ref->fix_fields(thd, (Item **)&ref))
{
thd->restore_active_arena(arena, &backup);
return TRUE;
}
*arg= ref;
thd->restore_active_arena(arena, &backup);
ref->change_item(thd, conv);
}
else
thd->change_item_tree(arg, conv);
}
return res;
return FALSE;
}
@ -7295,7 +7303,7 @@ bool Item_null::send(Protocol *protocol, st_value *buffer)
bool Item::cache_const_expr_analyzer(uchar **arg)
{
bool *cache_flag= (bool*)*arg;
uchar *cache_flag= *arg;
if (!*cache_flag)
{
Item *item= real_item();
@ -7334,9 +7342,9 @@ bool Item::cache_const_expr_analyzer(uchar **arg)
Item* Item::cache_const_expr_transformer(THD *thd, uchar *arg)
{
if (*(bool*)arg)
if (*arg)
{
*((bool*)arg)= FALSE;
*arg= FALSE;
Item_cache *cache= get_cache(thd);
if (!cache)
return NULL;
@ -10281,8 +10289,8 @@ bool Item_cache_timestamp::cache_value()
if (!example)
return false;
value_cached= true;
null_value= example->val_native_with_conversion_result(current_thd, &m_native,
type_handler());
null_value_inside= null_value=
example->val_native_with_conversion_result(current_thd, &m_native, type_handler());
return true;
}
@ -10854,8 +10862,6 @@ const char *dbug_print(SELECT_LEX_UNIT *x) { return dbug_print_unit(x); }
#endif /*DBUG_OFF*/
void Item::register_in(THD *thd)
{
next= thd->free_list;
@ -10863,6 +10869,48 @@ void Item::register_in(THD *thd)
}
Item_direct_ref_to_item::Item_direct_ref_to_item(THD *thd, Item *item)
: Item_direct_ref(thd, NULL, NULL, empty_clex_str, empty_clex_str)
{
m_item= item;
ref= (Item**)&m_item;
}
bool Item_direct_ref_to_item::fix_fields(THD *thd, Item **)
{
DBUG_ASSERT(m_item != NULL);
if (m_item->fix_fields_if_needed_for_scalar(thd, ref))
return TRUE;
set_properties();
return FALSE;
}
void Item_direct_ref_to_item::print(String *str, enum_query_type query_type)
{
m_item->print(str, query_type);
}
Item *Item_direct_ref_to_item::safe_charset_converter(THD *thd,
CHARSET_INFO *tocs)
{
Item *conv= m_item->safe_charset_converter(thd, tocs);
if (conv != m_item)
{
if (conv== NULL || conv->fix_fields(thd, &conv))
return NULL;
change_item(thd, conv);
}
return this;
}
void Item_direct_ref_to_item::change_item(THD *thd, Item *i)
{
DBUG_ASSERT(i->is_fixed());
thd->change_item_tree(ref, i);
set_properties();
}
bool Item::cleanup_excluding_immutables_processor (void *arg)
{
if (!(get_extraction_flag() == IMMUTABLE_FL))

View file

@ -1910,6 +1910,11 @@ public:
}
virtual Item* transform(THD *thd, Item_transformer transformer, uchar *arg);
virtual Item* top_level_transform(THD *thd, Item_transformer transformer,
uchar *arg)
{
return transform(thd, transformer, arg);
}
/*
This function performs a generic "compilation" of the Item tree.
@ -1934,6 +1939,11 @@ public:
return ((this->*transformer) (thd, arg_t));
return 0;
}
virtual Item* top_level_compile(THD *thd, Item_analyzer analyzer, uchar **arg_p,
Item_transformer transformer, uchar *arg_t)
{
return compile(thd, analyzer, arg_p, transformer, arg_t);
}
virtual void traverse_cond(Cond_traverser traverser,
void *arg, traverse_order order)
@ -5994,6 +6004,14 @@ public:
Item_direct_ref::save_in_result_field(no_conversions);
}
int save_in_field(Field *field, bool no_conversions) override
{
if (check_null_ref())
return set_field_to_null_with_conversions(field, no_conversions);
return Item_direct_ref::save_in_field(field, no_conversions);
}
void cleanup() override
{
null_ref_table= NULL;
@ -7591,6 +7609,104 @@ inline void Virtual_column_info::print(String* str)
expr->print_for_table_def(str);
}
class Item_direct_ref_to_item : public Item_direct_ref
{
Item *m_item;
public:
Item_direct_ref_to_item(THD *thd, Item *item);
void change_item(THD *thd, Item *);
bool fix_fields(THD *thd, Item **it);
void print(String *str, enum_query_type query_type);
Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs);
Item *get_tmp_table_item(THD *thd)
{ return m_item->get_tmp_table_item(thd); }
Item *get_copy(THD *thd)
{ return m_item->get_copy(thd); }
COND *build_equal_items(THD *thd, COND_EQUAL *inherited,
bool link_item_fields,
COND_EQUAL **cond_equal_ref)
{
return m_item->build_equal_items(thd, inherited, link_item_fields,
cond_equal_ref);
}
const char *full_name() const { return m_item->full_name(); }
void make_send_field(THD *thd, Send_field *field)
{ m_item->make_send_field(thd, field); }
bool eq(const Item *item, bool binary_cmp) const
{
Item *it= ((Item *) item)->real_item();
return m_item->eq(it, binary_cmp);
}
void fix_after_pullout(st_select_lex *new_parent, Item **refptr, bool merge)
{ m_item->fix_after_pullout(new_parent, &m_item, merge); }
void save_val(Field *to)
{ return m_item->save_val(to); }
void save_result(Field *to)
{ return m_item->save_result(to); }
int save_in_field(Field *to, bool no_conversions)
{ return m_item->save_in_field(to, no_conversions); }
const Type_handler *type_handler() const { return m_item->type_handler(); }
table_map used_tables() const { return m_item->used_tables(); }
void update_used_tables()
{ m_item->update_used_tables(); }
bool const_item() const { return m_item->const_item(); }
table_map not_null_tables() const { return m_item->not_null_tables(); }
bool walk(Item_processor processor, bool walk_subquery, void *arg)
{
return m_item->walk(processor, walk_subquery, arg) ||
(this->*processor)(arg);
}
bool enumerate_field_refs_processor(void *arg)
{ return m_item->enumerate_field_refs_processor(arg); }
Item_field *field_for_view_update()
{ return m_item->field_for_view_update(); }
/* Row emulation: forwarding of ROW-related calls to orig_item */
uint cols() const
{ return m_item->cols(); }
Item* element_index(uint i)
{ return this; }
Item** addr(uint i)
{ return &m_item; }
bool check_cols(uint c)
{ return Item::check_cols(c); }
bool null_inside()
{ return m_item->null_inside(); }
void bring_value()
{}
Item_equal *get_item_equal() { return m_item->get_item_equal(); }
void set_item_equal(Item_equal *item_eq) { m_item->set_item_equal(item_eq); }
Item_equal *find_item_equal(COND_EQUAL *cond_equal)
{ return m_item->find_item_equal(cond_equal); }
Item *propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond)
{ return m_item->propagate_equal_fields(thd, ctx, cond); }
Item *replace_equal_field(THD *thd, uchar *arg)
{ return m_item->replace_equal_field(thd, arg); }
bool excl_dep_on_table(table_map tab_map)
{ return m_item->excl_dep_on_table(tab_map); }
bool excl_dep_on_grouping_fields(st_select_lex *sel)
{ return m_item->excl_dep_on_grouping_fields(sel); }
bool is_expensive() { return m_item->is_expensive(); }
Item* build_clone(THD *thd) { return get_copy(thd); }
void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &fields, uint flags)
{
m_item->split_sum_func(thd, ref_pointer_array, fields, flags);
}
/*
This processor states that this is safe for virtual columns
(because this Item transparency)
*/
bool check_vcol_func_processor(void *arg) { return FALSE;}
};
inline bool TABLE::mark_column_with_deps(Field *field)
{
bool res;

View file

@ -420,18 +420,9 @@ bool Item_func::setup_args_and_comparator(THD *thd, Arg_comparator *cmp)
if (args[0]->cmp_type() == STRING_RESULT &&
args[1]->cmp_type() == STRING_RESULT)
{
Query_arena *arena, backup;
arena= thd->activate_stmt_arena_if_needed(&backup);
DTCollation tmp;
bool ret= agg_arg_charsets_for_comparison(tmp, args, 2);
if (arena)
thd->restore_active_arena(arena, &backup);
if (ret)
return ret;
if (agg_arg_charsets_for_comparison(tmp, args, 2))
return true;
cmp->m_compare_collation= tmp.collation;
}
// Convert constants when compared to int/year field
@ -5162,7 +5153,8 @@ bool Item_cond::walk(Item_processor processor, bool walk_subquery, void *arg)
Item returned as the result of transformation of the root node
*/
Item *Item_cond::transform(THD *thd, Item_transformer transformer, uchar *arg)
Item *Item_cond::do_transform(THD *thd, Item_transformer transformer, uchar *arg,
bool toplevel)
{
DBUG_ASSERT(!thd->stmt_arena->is_stmt_prepare());
@ -5170,7 +5162,8 @@ Item *Item_cond::transform(THD *thd, Item_transformer transformer, uchar *arg)
Item *item;
while ((item= li++))
{
Item *new_item= item->transform(thd, transformer, arg);
Item *new_item= toplevel ? item->top_level_transform(thd, transformer, arg)
: item->transform(thd, transformer, arg);
if (!new_item)
return 0;
@ -5180,7 +5173,9 @@ Item *Item_cond::transform(THD *thd, Item_transformer transformer, uchar *arg)
Otherwise we'll be allocating a lot of unnecessary memory for
change records at each execution.
*/
if (new_item != item)
if (toplevel)
*li.ref()= new_item;
else if (new_item != item)
thd->change_item_tree(li.ref(), new_item);
}
return Item_func::transform(thd, transformer, arg);
@ -5211,8 +5206,8 @@ Item *Item_cond::transform(THD *thd, Item_transformer transformer, uchar *arg)
Item returned as the result of transformation of the root node
*/
Item *Item_cond::compile(THD *thd, Item_analyzer analyzer, uchar **arg_p,
Item_transformer transformer, uchar *arg_t)
Item *Item_cond::do_compile(THD *thd, Item_analyzer analyzer, uchar **arg_p,
Item_transformer transformer, uchar *arg_t, bool toplevel)
{
if (!(this->*analyzer)(arg_p))
return 0;
@ -5227,7 +5222,11 @@ Item *Item_cond::compile(THD *thd, Item_analyzer analyzer, uchar **arg_p,
*/
uchar *arg_v= *arg_p;
Item *new_item= item->compile(thd, analyzer, &arg_v, transformer, arg_t);
if (new_item && new_item != item)
if (!new_item || new_item == item)
continue;
if (toplevel)
*li.ref()= new_item;
else
thd->change_item_tree(li.ref(), new_item);
}
return Item_func::transform(thd, transformer, arg_t);

View file

@ -3020,12 +3020,30 @@ public:
bool top_level() { return abort_on_null; }
void copy_andor_arguments(THD *thd, Item_cond *item);
bool walk(Item_processor processor, bool walk_subquery, void *arg);
Item *transform(THD *thd, Item_transformer transformer, uchar *arg);
Item *do_transform(THD *thd, Item_transformer transformer, uchar *arg, bool toplevel);
Item *transform(THD *thd, Item_transformer transformer, uchar *arg)
{
return do_transform(thd, transformer, arg, 0);
}
Item *top_level_transform(THD *thd, Item_transformer transformer, uchar *arg)
{
return do_transform(thd, transformer, arg, 1);
}
void traverse_cond(Cond_traverser, void *arg, traverse_order order);
void neg_arguments(THD *thd);
Item* propagate_equal_fields(THD *, const Context &, COND_EQUAL *);
Item *do_compile(THD *thd, Item_analyzer analyzer, uchar **arg_p,
Item_transformer transformer, uchar *arg_t, bool toplevel);
Item *compile(THD *thd, Item_analyzer analyzer, uchar **arg_p,
Item_transformer transformer, uchar *arg_t);
Item_transformer transformer, uchar *arg_t)
{
return do_compile(thd, analyzer, arg_p, transformer, arg_t, 0);
}
Item* top_level_compile(THD *thd, Item_analyzer analyzer, uchar **arg_p,
Item_transformer transformer, uchar *arg_t)
{
return do_compile(thd, analyzer, arg_p, transformer, arg_t, 1);
}
bool eval_not_null_tables(void *opt_arg);
bool find_not_null_fields(table_map allowed);
Item *build_clone(THD *thd);

View file

@ -5538,6 +5538,7 @@ Native_func_registry func_array[] =
{ { STRING_WITH_LEN("JSON_CONTAINS_PATH") }, BUILDER(Create_func_json_contains_path)},
{ { STRING_WITH_LEN("JSON_DEPTH") }, BUILDER(Create_func_json_depth)},
{ { STRING_WITH_LEN("JSON_DETAILED") }, BUILDER(Create_func_json_detailed)},
{ { STRING_WITH_LEN("JSON_PRETTY") }, BUILDER(Create_func_json_detailed)},
{ { STRING_WITH_LEN("JSON_EXISTS") }, BUILDER(Create_func_json_exists)},
{ { STRING_WITH_LEN("JSON_EXTRACT") }, BUILDER(Create_func_json_extract)},
{ { STRING_WITH_LEN("JSON_INSERT") }, BUILDER(Create_func_json_insert)},

View file

@ -257,16 +257,22 @@ static int json_nice(json_engine_t *je, String *nice_js,
Item_func_json_format::formats mode, int tab_size=4)
{
int depth= 0;
const char *comma, *colon;
static const char *comma= ", ", *colon= "\": ";
uint comma_len, colon_len;
int first_value= 1;
DBUG_ASSERT(je->s.cs == nice_js->charset());
int value_size = 0;
int curr_state= -1;
int64_t value_len= 0;
String curr_str{};
nice_js->length(0);
nice_js->set_charset(je->s.cs);
nice_js->alloc(je->s.str_end - je->s.c_str + 32);
DBUG_ASSERT(mode != Item_func_json_format::DETAILED ||
(tab_size >= 0 && tab_size <= TAB_SIZE_LIMIT));
comma= ", ";
colon= "\": ";
if (mode == Item_func_json_format::LOOSE)
{
comma_len= 2;
@ -285,6 +291,7 @@ static int json_nice(json_engine_t *je, String *nice_js,
do
{
curr_state= je->state;
switch (je->state)
{
case JST_KEY:
@ -307,7 +314,7 @@ static int json_nice(json_engine_t *je, String *nice_js,
append_tab(nice_js, depth, tab_size))
goto error;
nice_js->append("\"", 1);
nice_js->append('"');
append_simple(nice_js, key_start, key_end - key_start);
nice_js->append(colon, colon_len);
}
@ -332,17 +339,23 @@ handle_value:
if (append_simple(nice_js, je->value_begin,
je->value_end - je->value_begin))
goto error;
curr_str.copy((const char *)je->value_begin,
je->value_end - je->value_begin, je->s.cs);
value_len= je->value_end - je->value_begin;
first_value= 0;
if (value_size != -1)
value_size++;
}
else
{
if (mode == Item_func_json_format::DETAILED &&
depth > 0 &&
depth > 0 && !(curr_state != JST_KEY) &&
append_tab(nice_js, depth, tab_size))
goto error;
nice_js->append((je->value_type == JSON_VALUE_OBJECT) ? "{" : "[", 1);
first_value= 1;
value_size= (je->value_type == JSON_VALUE_OBJECT) ? -1: 0;
depth++;
}
@ -351,11 +364,27 @@ handle_value:
case JST_OBJ_END:
case JST_ARRAY_END:
depth--;
if (mode == Item_func_json_format::DETAILED &&
if (mode == Item_func_json_format::DETAILED && (value_size > 1 || value_size == -1) &&
append_tab(nice_js, depth, tab_size))
goto error;
if (mode == Item_func_json_format::DETAILED &&
value_size == 1 && je->state != JST_OBJ_END)
{
for (auto i = 0; i < value_len; i++)
{
nice_js->chop();
}
for (auto i = 0; i < (depth + 1) * tab_size + 1; i++)
{
nice_js->chop();
}
nice_js->append(curr_str);
}
nice_js->append((je->state == JST_OBJ_END) ? "}": "]", 1);
first_value= 0;
value_size= -1;
break;
default:

View file

@ -839,10 +839,12 @@ log_event_print_value(IO_CACHE *file, PRINT_EVENT_INFO *print_event_info,
my_b_write_bit(file, ptr , (meta & 0xFF) * 8);
return meta & 0xFF;
case MYSQL_TYPE_BLOB_COMPRESSED:
case MYSQL_TYPE_BLOB:
switch (meta) {
case 1:
strmake(typestr, "TINYBLOB/TINYTEXT", typestr_length);
my_snprintf(typestr, typestr_length, "TINYBLOB/TINYTEXT%s",
type == MYSQL_TYPE_BLOB_COMPRESSED ? " COMPRESSED" : "");
if (!ptr)
goto return_null;
@ -850,7 +852,8 @@ log_event_print_value(IO_CACHE *file, PRINT_EVENT_INFO *print_event_info,
my_b_write_quoted(file, ptr + 1, length);
return length + 1;
case 2:
strmake(typestr, "BLOB/TEXT", typestr_length);
my_snprintf(typestr, typestr_length, "BLOB/TEXT%s",
type == MYSQL_TYPE_BLOB_COMPRESSED ? " COMPRESSED" : "");
if (!ptr)
goto return_null;
@ -858,7 +861,8 @@ log_event_print_value(IO_CACHE *file, PRINT_EVENT_INFO *print_event_info,
my_b_write_quoted(file, ptr + 2, length);
return length + 2;
case 3:
strmake(typestr, "MEDIUMBLOB/MEDIUMTEXT", typestr_length);
my_snprintf(typestr, typestr_length, "MEDIUMBLOB/MEDIUMTEXT%s",
type == MYSQL_TYPE_BLOB_COMPRESSED ? " COMPRESSED" : "");
if (!ptr)
goto return_null;
@ -866,7 +870,8 @@ log_event_print_value(IO_CACHE *file, PRINT_EVENT_INFO *print_event_info,
my_b_write_quoted(file, ptr + 3, length);
return length + 3;
case 4:
strmake(typestr, "LONGBLOB/LONGTEXT", typestr_length);
my_snprintf(typestr, typestr_length, "LONGBLOB/LONGTEXT%s",
type == MYSQL_TYPE_BLOB_COMPRESSED ? " COMPRESSED" : "");
if (!ptr)
goto return_null;
@ -878,10 +883,12 @@ log_event_print_value(IO_CACHE *file, PRINT_EVENT_INFO *print_event_info,
return 0;
}
case MYSQL_TYPE_VARCHAR_COMPRESSED:
case MYSQL_TYPE_VARCHAR:
case MYSQL_TYPE_VAR_STRING:
length= meta;
my_snprintf(typestr, typestr_length, "VARSTRING(%d)", length);
my_snprintf(typestr, typestr_length, "VARSTRING(%d)%s", length,
type == MYSQL_TYPE_VARCHAR_COMPRESSED ? " COMPRESSED" : "");
if (!ptr)
goto return_null;

View file

@ -128,6 +128,8 @@
#include <my_service_manager.h>
#include <source_revision.h>
#define mysqld_charset &my_charset_latin1
/* We have HAVE_valgrind below as this speeds up the shutdown of MySQL */
@ -3852,21 +3854,6 @@ static int init_common_variables()
mysql_real_data_home_len= uint(strlen(mysql_real_data_home));
if (!opt_abort)
{
if (IS_SYSVAR_AUTOSIZE(&server_version_ptr))
sql_print_information("%s (mysqld %s) starting as process %lu ...",
my_progname, server_version, (ulong) getpid());
else
{
char real_server_version[SERVER_VERSION_LENGTH];
set_server_version(real_server_version, sizeof(real_server_version));
sql_print_information("%s (mysqld %s as %s) starting as process %lu ...",
my_progname, real_server_version, server_version,
(ulong) getpid());
}
}
sf_leaking_memory= 0; // no memory leaks from now on
#ifndef EMBEDDED_LIBRARY
@ -4714,6 +4701,14 @@ static int init_server_components()
error_handler_hook= my_message_sql;
proc_info_hook= set_thd_stage_info;
/*
Print source revision hash, as one of the first lines, if not the
first in error log, for troubleshooting and debugging purposes
*/
if (!opt_help)
sql_print_information("Starting MariaDB %s source revision %s as process %lu",
server_version, SOURCE_REVISION, (ulong) getpid());
#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
/*
Parsing the performance schema command line option may have reported

View file

@ -779,7 +779,7 @@ class QUICK_RANGE :public Sql_alloc {
}
/**
Initalizes a key_range object for communication with storage engine.
Initializes a key_range object for communication with storage engine.
This function facilitates communication with the Storage Engine API by
translating the minimum endpoint of the interval represented by this
@ -800,7 +800,7 @@ class QUICK_RANGE :public Sql_alloc {
}
/**
Initalizes a key_range object for communication with storage engine.
Initializes a key_range object for communication with storage engine.
This function facilitates communication with the Storage Engine API by
translating the minimum endpoint of the interval represented by this
@ -817,7 +817,7 @@ class QUICK_RANGE :public Sql_alloc {
}
/**
Initalizes a key_range object for communication with storage engine.
Initializes a key_range object for communication with storage engine.
This function facilitates communication with the Storage Engine API by
translating the maximum endpoint of the interval represented by this
@ -838,7 +838,7 @@ class QUICK_RANGE :public Sql_alloc {
}
/**
Initalizes a key_range object for communication with storage engine.
Initializes a key_range object for communication with storage engine.
This function facilitates communication with the Storage Engine API by
translating the maximum endpoint of the interval represented by this

View file

@ -23,6 +23,7 @@
#include "mysqld.h"
#include "sql_class.h"
#include "my_stacktrace.h"
#include <source_revision.h>
#ifdef __WIN__
#include <crtdbg.h>
@ -180,7 +181,8 @@ extern "C" sig_handler handle_fatal_signal(int sig)
"something is definitely wrong and this may fail.\n\n");
set_server_version(server_version, sizeof(server_version));
my_safe_printf_stderr("Server version: %s\n", server_version);
my_safe_printf_stderr("Server version: %s source revision: %s\n",
server_version, SOURCE_REVISION);
if (dflt_key_cache)
my_safe_printf_stderr("key_buffer_size=%zu\n",

View file

@ -2411,7 +2411,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
Disable slow log if:
- Slow logging is enabled (no change needed)
- This is a normal SP (not event log)
- If we have not explicitely disabled logging of SP
- If we have not explicitly disabled logging of SP
*/
if (save_enable_slow_log &&
((!(m_flags & LOG_SLOW_STATEMENTS) &&
@ -2425,7 +2425,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
Disable general log if:
- If general log is enabled (no change needed)
- This is a normal SP (not event log)
- If we have not explicitely disabled logging of SP
- If we have not explicitly disabled logging of SP
*/
if (!(thd->variables.option_bits & OPTION_LOG_OFF) &&
(!(m_flags & LOG_GENERAL_LOG) &&

View file

@ -32,7 +32,7 @@ typedef struct st_changed_table_list CHANGED_TABLE_LIST;
Can't create new free memory block if unused memory in block less
then QUERY_CACHE_MIN_ALLOCATION_UNIT.
if QUERY_CACHE_MIN_ALLOCATION_UNIT == 0 then
QUERY_CACHE_MIN_ALLOCATION_UNIT choosed automaticaly
QUERY_CACHE_MIN_ALLOCATION_UNIT choosed automatically
*/
#define QUERY_CACHE_MIN_ALLOCATION_UNIT 512

View file

@ -5838,7 +5838,7 @@ class select_insert :public select_result_interceptor {
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
virtual int prepare2(JOIN *join);
virtual int send_data(List<Item> &items);
virtual bool store_values(List<Item> &values, bool ignore_errors);
virtual bool store_values(List<Item> &values);
virtual bool can_rollback_data() { return 0; }
bool prepare_eof();
bool send_ok_packet();
@ -5880,7 +5880,7 @@ public:
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
int binlog_show_create_table(TABLE **tables, uint count);
bool store_values(List<Item> &values, bool ignore_errors);
bool store_values(List<Item> &values);
bool send_eof();
virtual void abort_result_set();
virtual bool can_rollback_data() { return 1; }
@ -7315,6 +7315,19 @@ public:
};
class Use_relaxed_field_copy: public Sql_mode_save,
public Check_level_instant_set
{
public:
Use_relaxed_field_copy(THD *thd) :
Sql_mode_save(thd), Check_level_instant_set(thd, CHECK_FIELD_IGNORE)
{
thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE);
thd->variables.sql_mode|= MODE_INVALID_DATES;
}
};
/**
This class resembles the SQL Standard schema qualified object name:
<schema qualified name> ::= [ <schema name> <period> ] <qualified identifier>

View file

@ -4083,7 +4083,7 @@ int select_insert::send_data(List<Item> &values)
bool error=0;
thd->count_cuted_fields= CHECK_FIELD_WARN; // Calculate cuted fields
if (store_values(values, info.ignore))
if (store_values(values))
DBUG_RETURN(1);
thd->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL;
if (unlikely(thd->is_error()))
@ -4141,17 +4141,17 @@ int select_insert::send_data(List<Item> &values)
}
bool select_insert::store_values(List<Item> &values, bool ignore_errors)
bool select_insert::store_values(List<Item> &values)
{
DBUG_ENTER("select_insert::store_values");
bool error;
if (fields->elements)
error= fill_record_n_invoke_before_triggers(thd, table, *fields, values,
ignore_errors, TRG_EVENT_INSERT);
true, TRG_EVENT_INSERT);
else
error= fill_record_n_invoke_before_triggers(thd, table, table->field_to_fill(),
values, ignore_errors, TRG_EVENT_INSERT);
values, true, TRG_EVENT_INSERT);
DBUG_RETURN(error);
}
@ -4569,6 +4569,7 @@ TABLE *select_create::create_table_from_items(THD *thd, List<Item> *items,
*/
DBUG_ASSERT(0);
}
create_table->table->pos_in_table_list= create_table;
}
}
else
@ -4941,10 +4942,10 @@ bool binlog_drop_table(THD *thd, TABLE *table)
}
bool select_create::store_values(List<Item> &values, bool ignore_errors)
bool select_create::store_values(List<Item> &values)
{
return fill_record_n_invoke_before_triggers(thd, table, field, values,
ignore_errors, TRG_EVENT_INSERT);
true, TRG_EVENT_INSERT);
}

View file

@ -2293,11 +2293,7 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last)
int error;
enum_nested_loop_state rc= NESTED_LOOP_OK;
join_tab->table->null_row= 0;
bool check_only_first_match=
join_tab->check_only_first_match() &&
(!join_tab->first_inner || // semi-join case
join_tab->first_inner == join_tab->first_unmatched); // outer join case
bool outer_join_first_inner= join_tab->is_first_inner_for_outer_join();
bool check_only_first_match= join_tab->check_only_first_match();
DBUG_ENTER("JOIN_CACHE::join_matching_records");
/* Return at once if there are no records in the join buffer */
@ -2363,7 +2359,34 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last)
Also those records that must be null complemented are not considered
as candidates for matches.
*/
if ((!check_only_first_match && !outer_join_first_inner) ||
bool not_exists_opt_is_applicable= true;
if (check_only_first_match && join_tab->first_inner)
{
/*
This is the case with not_exists optimization for nested outer join
when join_tab is the last inner table for one or more embedding outer
joins. To safely use 'not_exists' optimization in this case we have
to check that the match flags for all these embedding outer joins are
in the 'on' state.
(See also a similar check in evaluate_join_record() for the case when
join buffer are not used.)
*/
for (JOIN_TAB *tab= join_tab->first_inner;
tab && tab->first_inner && tab->last_inner == join_tab;
tab= tab->first_inner->first_upper)
{
if (get_match_flag_by_pos_from_join_buffer(rec_ptr, tab) !=
MATCH_FOUND)
{
not_exists_opt_is_applicable= false;
break;
}
}
}
if (!check_only_first_match ||
(join_tab->first_inner && !not_exists_opt_is_applicable) ||
!skip_next_candidate_for_match(rec_ptr))
{
read_next_candidate_for_match(rec_ptr);

View file

@ -10740,9 +10740,8 @@ st_select_lex::build_pushable_cond_for_having_pushdown(THD *thd, Item *cond)
*/
if (cond->get_extraction_flag() == FULL_EXTRACTION_FL)
{
Item *result= cond->transform(thd,
&Item::multiple_equality_transformer,
(uchar *)this);
Item *result= cond->top_level_transform(thd,
&Item::multiple_equality_transformer, (uchar *)this);
if (!result)
return true;
if (result->type() == Item::COND_ITEM &&

View file

@ -18338,7 +18338,8 @@ Field *Item_field::create_tmp_field_ex(MEM_ROOT *root, TABLE *table,
src->set_field(field);
if (!(result= create_tmp_field_from_item_field(root, table, NULL, param)))
return NULL;
if (field->eq_def(result))
if (!(field->flags & NO_DEFAULT_VALUE_FLAG) &&
field->eq_def(result))
src->set_default_field(field);
return result;
}
@ -19239,8 +19240,10 @@ bool Create_tmp_table::finalize(THD *thd,
{
/*
Copy default value. We have to use field_conv() for copy, instead of
memcpy(), because bit_fields may be stored differently
memcpy(), because bit_fields may be stored differently.
But otherwise we copy as is, in particular, ignore NO_ZERO_DATE, etc
*/
Use_relaxed_field_copy urfc(thd);
my_ptrdiff_t ptr_diff= (orig_field->table->s->default_values -
orig_field->table->record[0]);
field->set_notnull();
@ -28545,20 +28548,20 @@ JOIN::reoptimize(Item *added_where, table_map join_tables,
void JOIN::cache_const_exprs()
{
bool cache_flag= FALSE;
bool *analyzer_arg= &cache_flag;
uchar cache_flag= FALSE;
uchar *analyzer_arg= &cache_flag;
/* No need in cache if all tables are constant. */
if (const_tables == table_count)
return;
if (conds)
conds->compile(thd, &Item::cache_const_expr_analyzer, (uchar **)&analyzer_arg,
&Item::cache_const_expr_transformer, (uchar *)&cache_flag);
conds->top_level_compile(thd, &Item::cache_const_expr_analyzer, &analyzer_arg,
&Item::cache_const_expr_transformer, &cache_flag);
cache_flag= FALSE;
if (having)
having->compile(thd, &Item::cache_const_expr_analyzer, (uchar **)&analyzer_arg,
&Item::cache_const_expr_transformer, (uchar *)&cache_flag);
having->top_level_compile(thd, &Item::cache_const_expr_analyzer,
&analyzer_arg, &Item::cache_const_expr_transformer, &cache_flag);
for (JOIN_TAB *tab= first_depth_first_tab(this); tab;
tab= next_depth_first_tab(this, tab))
@ -28566,10 +28569,8 @@ void JOIN::cache_const_exprs()
if (*tab->on_expr_ref)
{
cache_flag= FALSE;
(*tab->on_expr_ref)->compile(thd, &Item::cache_const_expr_analyzer,
(uchar **)&analyzer_arg,
&Item::cache_const_expr_transformer,
(uchar *)&cache_flag);
(*tab->on_expr_ref)->top_level_compile(thd, &Item::cache_const_expr_analyzer,
&analyzer_arg, &Item::cache_const_expr_transformer, &cache_flag);
}
}
}
@ -29951,7 +29952,6 @@ void JOIN::init_join_cache_and_keyread()
}
/**
@} (end of group Query_Optimizer)
*/

View file

@ -1926,11 +1926,7 @@ public:
enum store_key_result copy()
{
enum store_key_result result;
THD *thd= to_field->table->in_use;
Check_level_instant_set check_level_save(thd, CHECK_FIELD_IGNORE);
Sql_mode_save sql_mode(thd);
thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE);
thd->variables.sql_mode|= MODE_INVALID_DATES;
Use_relaxed_field_copy urfc(to_field->table->in_use);
result= copy_inner();
return result;
}

View file

@ -572,7 +572,7 @@ bool Item_func_in::create_value_list_for_tvc(THD *thd,
if (is_list_of_rows)
{
Item_row *row_list= (Item_row *)(args[i]->build_clone(thd));
Item_row *row_list= (Item_row *)(args[i]);
if (!row_list)
return true;
@ -597,8 +597,7 @@ bool Item_func_in::create_value_list_for_tvc(THD *thd,
sprintf(col_name, "_col_%i", 1);
args[i]->set_name(thd, col_name, strlen(col_name), thd->charset());
}
Item *arg_clone= args[i]->build_clone(thd);
if (!arg_clone || tvc_value->push_back(arg_clone))
if (tvc_value->push_back(args[i]))
return true;
}
@ -1175,12 +1174,10 @@ bool JOIN::transform_in_predicates_into_in_subq(THD *thd)
{
select_lex->parsing_place= IN_WHERE;
conds=
conds->transform(thd,
&Item::in_predicate_to_in_subs_transformer,
(uchar*) 0);
conds->top_level_transform(thd,
&Item::in_predicate_to_in_subs_transformer, 0);
if (!conds)
DBUG_RETURN(true);
select_lex->prep_where= conds ? conds->copy_andor_structure(thd) : 0;
select_lex->where= conds;
}
@ -1195,13 +1192,10 @@ bool JOIN::transform_in_predicates_into_in_subq(THD *thd)
if (table->on_expr)
{
table->on_expr=
table->on_expr->transform(thd,
&Item::in_predicate_to_in_subs_transformer,
(uchar*) 0);
table->on_expr->top_level_transform(thd,
&Item::in_predicate_to_in_subs_transformer, 0);
if (!table->on_expr)
DBUG_RETURN(true);
table->prep_on_expr= table->on_expr ?
table->on_expr->copy_andor_structure(thd) : 0;
}
}
}

View file

@ -1230,7 +1230,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
TRANSACTION can be a non-empty history unit, or can be an identifier
in bit_expr.
In the grammar below we use %prec to explicitely tell Bison to go
In the grammar below we use %prec to explicitly tell Bison to go
through the empty branch in the optional rule only when the lookahead
token does not belong to a small set of selected tokens.

View file

@ -5810,7 +5810,7 @@ static bool update_wsrep_auto_increment_control (sys_var *self, THD *thd, enum_v
{
/*
The variables that control auto increment shall be calculated
automaticaly based on the size of the cluster. This usually done
automatically based on the size of the cluster. This usually done
within the wsrep_view_handler_cb callback. However, if the user
manually sets the value of wsrep_auto_increment_control to 'ON',
then we should to re-calculate these variables again (because

View file

@ -3566,7 +3566,7 @@ class Vcol_expr_context
bool inited;
THD *thd;
TABLE *table;
Query_arena backup_arena;
Query_arena backup_arena, *stmt_arena;
table_map old_map;
Security_context *save_security_ctx;
sql_mode_t save_sql_mode;
@ -3576,6 +3576,7 @@ public:
inited(false),
thd(_thd),
table(_table),
stmt_arena(thd->stmt_arena),
old_map(table->map),
save_security_ctx(thd->security_ctx),
save_sql_mode(thd->variables.sql_mode) {}
@ -3596,6 +3597,7 @@ bool Vcol_expr_context::init()
thd->security_ctx= tl->security_ctx;
thd->set_n_backup_active_arena(table->expr_arena, &backup_arena);
thd->stmt_arena= thd;
inited= true;
return false;
@ -3609,6 +3611,7 @@ Vcol_expr_context::~Vcol_expr_context()
thd->security_ctx= save_security_ctx;
thd->restore_active_arena(table->expr_arena, &backup_arena);
thd->variables.sql_mode= save_sql_mode;
thd->stmt_arena= stmt_arena;
}

View file

@ -249,8 +249,7 @@ int azdopen(azio_stream *s, File fd, int Flags)
for end of file.
IN assertion: the stream s has been sucessfully opened for reading.
*/
int get_byte(s)
azio_stream *s;
int get_byte(azio_stream *s)
{
if (s->z_eof) return EOF;
if (s->stream.avail_in == 0)
@ -427,8 +426,7 @@ void read_header(azio_stream *s, unsigned char *buffer)
* Cleanup then free the given azio_stream. Return a zlib error code.
Try freeing in the reverse order of allocations.
*/
int destroy (s)
azio_stream *s;
int destroy (azio_stream *s)
{
int err = Z_OK;
@ -679,9 +677,7 @@ int do_flush (azio_stream *s, int flush)
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
}
int ZEXPORT azflush (s, flush)
azio_stream *s;
int flush;
int ZEXPORT azflush (azio_stream *s, int flush)
{
int err;
@ -708,8 +704,7 @@ int ZEXPORT azflush (s, flush)
/* ===========================================================================
Rewinds input file.
*/
int azrewind (s)
azio_stream *s;
int azrewind (azio_stream *s)
{
if (s == NULL || s->mode != 'r') return -1;
@ -733,10 +728,7 @@ int azrewind (s)
SEEK_END is not implemented, returns error.
In this version of the library, azseek can be extremely slow.
*/
my_off_t azseek (s, offset, whence)
azio_stream *s;
my_off_t offset;
int whence;
my_off_t azseek (azio_stream *s, my_off_t offset, int whence)
{
if (s == NULL || whence == SEEK_END ||
@ -812,8 +804,7 @@ my_off_t azseek (s, offset, whence)
given compressed file. This position represents a number of bytes in the
uncompressed data stream.
*/
my_off_t ZEXPORT aztell (file)
azio_stream *file;
my_off_t ZEXPORT aztell (azio_stream *file)
{
return azseek(file, 0L, SEEK_CUR);
}

View file

@ -237,7 +237,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
bool del, PHC)
{
char *p;
int i, n;
int n;
bool rcop= true;
PCOL colp;
//PCOLUMN cp;
@ -276,7 +276,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
n = strlen(p) + 1;
} // endfor p
for (i = 0, colp = tdbp->GetColumns(); colp; i++, colp = colp->GetNext()) {
for (colp = tdbp->GetColumns(); colp; colp = colp->GetNext()) {
if (colp->InitValue(g))
throw 2;
@ -317,7 +317,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
n = strlen(p) + 1;
} // endfor p
for (i = 0, colp = utp->GetColumns(); colp; i++, colp = colp->GetNext()) {
for (colp = utp->GetColumns(); colp; colp = colp->GetNext()) {
if (colp->InitValue(g))
throw 5;

View file

@ -221,8 +221,7 @@ static int ZCALLBACK ferror_file_func (voidpf opaque __attribute__((unused)), vo
return ret;
}
void fill_fopen_filefunc (pzlib_filefunc_def)
zlib_filefunc_def* pzlib_filefunc_def;
void fill_fopen_filefunc (zlib_filefunc_def* pzlib_filefunc_def)
{
pzlib_filefunc_def->zopen_file = fopen_file_func;
pzlib_filefunc_def->zread_file = fread_file_func;

View file

@ -1054,7 +1054,7 @@ bool TDBCSV::PrepareWriting(PGLOBAL g)
if (!strlen(Field[i])) {
// Generally null fields are not quoted
if (Quoted > 2)
// Except if explicitely required
// Except if explicitly required
strcat(strcat(To_Line, qot), qot);
} else if (Qot && (strchr(Field[i], Sep) || *Field[i] == Qot

View file

@ -230,7 +230,6 @@ PCOL TDBTBL::InsertSpecialColumn(PCOL scp)
/***********************************************************************/
bool TDBTBL::InitTableList(PGLOBAL g)
{
int n;
uint sln;
const char *scs;
PTABLE tp, tabp;
@ -243,7 +242,7 @@ bool TDBTBL::InitTableList(PGLOBAL g)
sln = hc->get_table()->s->connect_string.length;
// PlugSetPath(filename, Tdbp->GetFile(g), Tdbp->GetPath());
for (n = 0, tp = tdp->Tablep; tp; tp = tp->GetNext()) {
for (tp = tdp->Tablep; tp; tp = tp->GetNext()) {
if (TestFil(g, To_CondFil, tp)) {
tabp = new(g) XTAB(tp);
@ -276,7 +275,6 @@ bool TDBTBL::InitTableList(PGLOBAL g)
else
Tablist = tabp;
n++;
} // endif filp
} // endfor tp

View file

@ -1471,11 +1471,6 @@ extern int ZEXPORT zipWriteInFileInZip (zipFile file,const void* buf,unsigned in
{
uLong uTotalOutBefore = zi->ci.stream.total_out;
err=deflate(&zi->ci.stream, Z_NO_FLUSH);
if(uTotalOutBefore > zi->ci.stream.total_out)
{
int bBreak = 0;
bBreak++;
}
zi->ci.pos_in_buffered_data += (uInt)(zi->ci.stream.total_out - uTotalOutBefore) ;
}

View file

@ -8458,10 +8458,12 @@ ha_innobase::update_row(
const bool vers_ins_row = vers_set_fields
&& thd_sql_command(m_user_thd) != SQLCOM_ALTER_TABLE;
TABLE_LIST *tl= table->pos_in_table_list;
uint8 op_map= tl->trg_event_map | tl->slave_fk_event_map;
/* This is not a delete */
m_prebuilt->upd_node->is_delete =
(vers_set_fields && !vers_ins_row) ||
(thd_sql_command(m_user_thd) == SQLCOM_DELETE &&
(op_map & trg2bit(TRG_EVENT_DELETE) &&
table->versioned(VERS_TIMESTAMP))
? VERSIONED_DELETE
: NO_DELETE;

View file

@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2022, Oracle and/or its affiliates.
Copyright (c) 2014, 2022, MariaDB Corporation.
Copyright (c) 2014, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -689,8 +689,6 @@ UNIV_INLINE
bool
lock_rec_has_to_wait(
/*=================*/
bool for_locking,
/*!< in is called locking or releasing */
const trx_t* trx, /*!< in: trx of new lock */
unsigned type_mode,/*!< in: precise mode of the new lock
to set: LOCK_S or LOCK_X, possibly
@ -846,7 +844,7 @@ lock_has_to_wait(
}
return lock_rec_has_to_wait(
false, lock1->trx, lock1->type_mode, lock2,
lock1->trx, lock1->type_mode, lock2,
lock_rec_get_nth_bit(lock1, PAGE_HEAP_NO_SUPREMUM));
}
@ -1099,7 +1097,7 @@ lock_rec_other_has_conflicting(
lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
if (lock_rec_has_to_wait(true, trx, mode, lock, is_supremum)) {
if (lock_rec_has_to_wait(trx, mode, lock, is_supremum)) {
#ifdef WITH_WSREP
if (trx->is_wsrep()) {
trx_mutex_enter(lock->trx);

View file

@ -167,7 +167,7 @@ void init_timers(void)
/*
For STAGE and STATEMENT, a timer with a fixed frequency is better.
The prefered timer is nanosecond, or lower resolutions.
The preferred timer is nanosecond, or lower resolutions.
*/
if (nanosec_to_pico != 0)
@ -209,7 +209,7 @@ void init_timers(void)
/*
For IDLE, a timer with a fixed frequency is critical,
as the CPU clock may slow down a lot if the server is completely idle.
The prefered timer is microsecond, or lower resolutions.
The preferred timer is microsecond, or lower resolutions.
*/
if (microsec_to_pico != 0)

View file

@ -8486,8 +8486,7 @@ int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key,
const key_range *end_key) {
DBUG_ENTER_FUNC();
DBUG_EXECUTE_IF("myrocks_busy_loop_on_row_read", int debug_i = 0;
while (1) { debug_i++; });
DBUG_EXECUTE_IF("myrocks_busy_loop_on_row_read", my_sleep(50000););
int rc = 0;
@ -12152,7 +12151,6 @@ static int calculate_stats(
}
}
int num_sst = 0;
for (const auto &it : props) {
std::vector<Rdb_index_stats> sst_stats;
Rdb_tbl_prop_coll::read_stats_from_tbl_props(it.second, &sst_stats);
@ -12181,7 +12179,6 @@ static int calculate_stats(
stats[it1.m_gl_index_id].merge(
it1, true, it_index->second->max_storage_fmt_length());
}
num_sst++;
}
if (include_memtables) {

View file

@ -406,7 +406,7 @@ class ha_rocksdb : public my_core::handler {
void free_key_buffers();
// the buffer size should be at least 2*Rdb_key_def::INDEX_NUMBER_SIZE
rocksdb::Range get_range(const int i, uchar buf[]) const;
rocksdb::Range get_range(const int i, uchar buf[2 * 4]) const;
/*
Perf timers for data reads

View file

@ -9609,6 +9609,12 @@ int spider_db_print_item_type(
DBUG_ENTER("spider_db_print_item_type");
DBUG_PRINT("info",("spider COND type=%d", item->type()));
if (item->type() == Item::REF_ITEM &&
((Item_ref*)item)->ref_type() == Item_ref::DIRECT_REF)
{
item= item->real_item();
DBUG_PRINT("info",("spider new COND type=%d", item->type()));
}
switch (item->type())
{
case Item::FUNC_ITEM:

View file

@ -2423,7 +2423,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
// The pivots file will contain all the pivot strings (in the form <size(32bits)> <data>)
// The pivots_fname is the name of the pivots file.
// Note that the pivots file will have one extra pivot in it (the last key in the dictionary) which will not appear in the tree.
int64_t n_pivots=0; // number of pivots in pivots_file
FIDX pivots_file; // the file
r = ft_loader_open_temp_file (bl, &pivots_file);
@ -2539,8 +2538,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
allocate_node(&sts, lblock);
n_pivots++;
invariant(maxkey.data != NULL);
if ((r = bl_write_dbt(&maxkey, pivots_stream, NULL, nullptr, bl))) {
ft_loader_set_panic(bl, r, true, which_db, nullptr, nullptr);
@ -2616,8 +2613,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
// We haven't paniced, so the sum should add up.
invariant(used_estimate == total_disksize_estimate);
n_pivots++;
{
DBT key = make_dbt(0,0); // must write an extra DBT into the pivots file.
r = bl_write_dbt(&key, pivots_stream, NULL, nullptr, bl);
@ -3302,7 +3297,7 @@ static int write_nonleaves (FTLOADER bl, FIDX pivots_fidx, struct dbout *out, st
int height = 1;
// Watch out for the case where we saved the last pivot but didn't write any more nodes out.
// The trick is not to look at n_pivots, but to look at blocks.n_blocks
// The trick is to look at blocks.n_blocks
while (sts->n_subtrees > 1) {
// If there is more than one block in blocks, then we must build another level of the tree.

View file

@ -208,12 +208,10 @@ verify_snapshot_system(TXN_MANAGER txn_manager UU()) {
{
//verify neither pair->begin_id nor end_id is in snapshot_xids
TOKUTXN curr_txn = txn_manager->snapshot_head;
uint32_t curr_index = 0;
while (curr_txn != NULL) {
invariant(tuple->begin_id != curr_txn->txnid.parent_id64);
invariant(tuple->end_id != curr_txn->txnid.parent_id64);
curr_txn = curr_txn->snapshot_next;
curr_index++;
}
}
{