mirror of
https://github.com/MariaDB/server.git
synced 2025-01-29 02:05:57 +01:00
Merge branch '10.1' into bb-10.1-serg
This commit is contained in:
commit
a73676b2e6
43 changed files with 11763 additions and 146 deletions
|
@ -2007,3 +2007,22 @@ INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8);
|
|||
INSERT INTO t1 SELECT a.* FROM t1 a, t1 b, t1 c, t1 d, t1 e;
|
||||
ALTER TABLE t1 MODIFY i FLOAT;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# Start of 10.1 tests
|
||||
#
|
||||
#
|
||||
# MDEV-7816 ALTER with DROP INDEX and ADD INDEX .. COMMENT='comment2' ignores the new comment
|
||||
#
|
||||
CREATE TABLE t1(a INT);
|
||||
CREATE INDEX i1 ON t1(a) COMMENT 'comment1';
|
||||
ALTER TABLE t1 DROP INDEX i1, ADD INDEX i1(a) COMMENT 'comment2';
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` int(11) DEFAULT NULL,
|
||||
KEY `i1` (`a`) COMMENT 'comment2'
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# Start of 10.1 tests
|
||||
#
|
||||
|
|
|
@ -143,6 +143,7 @@ ANALYZE
|
|||
"attached_condition": "(tbl2.b < 60)"
|
||||
},
|
||||
"buffer_type": "flat",
|
||||
"buffer_size": "128Kb",
|
||||
"join_type": "BNL",
|
||||
"r_filtered": 100
|
||||
}
|
||||
|
@ -180,6 +181,7 @@ ANALYZE
|
|||
"attached_condition": "(tbl2.b < 60)"
|
||||
},
|
||||
"buffer_type": "flat",
|
||||
"buffer_size": "128Kb",
|
||||
"join_type": "BNL",
|
||||
"attached_condition": "(tbl1.c > tbl2.c)",
|
||||
"r_filtered": 15.833
|
||||
|
@ -412,7 +414,7 @@ create table t0 (a int);
|
|||
INSERT INTO t0 VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t1 (a int);
|
||||
INSERT INTO t1 select * from t0;
|
||||
analyze format=json (select * from t1 A where a<5) union (select * from t1 B where a in (2,3));
|
||||
analyze format=json (select * from t1 tbl1 where a<5) union (select * from t1 tbl2 where a in (2,3));
|
||||
ANALYZE
|
||||
{
|
||||
"query_block": {
|
||||
|
@ -428,7 +430,7 @@ ANALYZE
|
|||
"r_loops": 1,
|
||||
"r_total_time_ms": "REPLACED",
|
||||
"table": {
|
||||
"table_name": "A",
|
||||
"table_name": "tbl1",
|
||||
"access_type": "ALL",
|
||||
"r_loops": 1,
|
||||
"rows": 10,
|
||||
|
@ -436,7 +438,7 @@ ANALYZE
|
|||
"r_total_time_ms": "REPLACED",
|
||||
"filtered": 100,
|
||||
"r_filtered": 50,
|
||||
"attached_condition": "(A.a < 5)"
|
||||
"attached_condition": "(tbl1.a < 5)"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -446,7 +448,7 @@ ANALYZE
|
|||
"r_loops": 1,
|
||||
"r_total_time_ms": "REPLACED",
|
||||
"table": {
|
||||
"table_name": "B",
|
||||
"table_name": "tbl2",
|
||||
"access_type": "ALL",
|
||||
"r_loops": 1,
|
||||
"rows": 10,
|
||||
|
@ -454,7 +456,7 @@ ANALYZE
|
|||
"r_total_time_ms": "REPLACED",
|
||||
"filtered": 100,
|
||||
"r_filtered": 20,
|
||||
"attached_condition": "(B.a in (2,3))"
|
||||
"attached_condition": "(tbl2.a in (2,3))"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
5234
mysql-test/r/analyze_stmt_privileges2.result
Normal file
5234
mysql-test/r/analyze_stmt_privileges2.result
Normal file
File diff suppressed because it is too large
Load diff
|
@ -365,6 +365,7 @@ EXPLAIN
|
|||
"attached_condition": "(tbl2.b < 5)"
|
||||
},
|
||||
"buffer_type": "flat",
|
||||
"buffer_size": "128Kb",
|
||||
"join_type": "BNL",
|
||||
"attached_condition": "(tbl2.a = tbl1.a)"
|
||||
}
|
||||
|
@ -618,6 +619,7 @@ EXPLAIN
|
|||
"filtered": 100
|
||||
},
|
||||
"buffer_type": "flat",
|
||||
"buffer_size": "128Kb",
|
||||
"join_type": "BNL"
|
||||
}
|
||||
}
|
||||
|
@ -651,6 +653,7 @@ EXPLAIN
|
|||
"first_match": "t2"
|
||||
},
|
||||
"buffer_type": "flat",
|
||||
"buffer_size": "128Kb",
|
||||
"join_type": "BNL",
|
||||
"attached_condition": "((t1.b = t2.b) and (t1.a = t2.a))"
|
||||
}
|
||||
|
@ -687,6 +690,7 @@ EXPLAIN
|
|||
"filtered": 100
|
||||
},
|
||||
"buffer_type": "flat",
|
||||
"buffer_size": "128Kb",
|
||||
"join_type": "BNL",
|
||||
"attached_condition": "((t1.b = t2.b) and (t1.a = t2.a))"
|
||||
}
|
||||
|
@ -799,6 +803,7 @@ EXPLAIN
|
|||
"filtered": 100
|
||||
},
|
||||
"buffer_type": "flat",
|
||||
"buffer_size": "128Kb",
|
||||
"join_type": "BNL",
|
||||
"attached_condition": "((t2.b <> outer_t1.a) and trigcond(((<cache>(outer_t1.a) = t1.a) or isnull(t1.a))))"
|
||||
}
|
||||
|
@ -849,6 +854,7 @@ EXPLAIN
|
|||
"filtered": 100
|
||||
},
|
||||
"buffer_type": "flat",
|
||||
"buffer_size": "128Kb",
|
||||
"join_type": "BNL",
|
||||
"attached_condition": "(tbl2.b = tbl1.b)"
|
||||
}
|
||||
|
@ -894,3 +900,154 @@ EXPLAIN
|
|||
}
|
||||
}
|
||||
DROP TABLE t1, t2;
|
||||
#
|
||||
# MDEV-7927: Server crashes in in Time_and_counter_tracker::incr_loops
|
||||
#
|
||||
CREATE TABLE t1 (i INT);
|
||||
INSERT INTO t1 VALUES (1),(2);
|
||||
EXPLAIN SELECT * FROM t1 WHERE 3 IN ( SELECT 4 UNION SELECT 5 );
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used
|
||||
3 UNION NULL NULL NULL NULL NULL NULL NULL No tables used
|
||||
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-7860: EXPLAIN FORMAT=JSON crashes for loose scan query
|
||||
#
|
||||
create table t2(a int);
|
||||
insert into t2 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t1 (a int, b int, c int, d int, key(a,b,c));
|
||||
insert into t1 select A.a, B.a, C.a, D.a from t2 A, t2 B, t2 C, t2 D;
|
||||
explain select count(distinct b) from t1 group by a;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range NULL a 10 NULL 101 Using index for group-by
|
||||
explain format=json select count(distinct b) from t1 group by a;
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"table_name": "t1",
|
||||
"access_type": "range",
|
||||
"key": "a",
|
||||
"key_length": "10",
|
||||
"used_key_parts": ["a", "b"],
|
||||
"rows": 101,
|
||||
"filtered": 100,
|
||||
"using_index_for_group_by": true
|
||||
}
|
||||
}
|
||||
}
|
||||
analyze format=json select count(distinct b) from t1 group by a;
|
||||
ANALYZE
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"r_loops": 1,
|
||||
"r_total_time_ms": "REPLACED",
|
||||
"table": {
|
||||
"table_name": "t1",
|
||||
"access_type": "range",
|
||||
"key": "a",
|
||||
"key_length": "10",
|
||||
"used_key_parts": ["a", "b"],
|
||||
"r_loops": 1,
|
||||
"rows": 101,
|
||||
"r_rows": 100,
|
||||
"r_total_time_ms": "REPLACED",
|
||||
"filtered": 100,
|
||||
"r_filtered": 100,
|
||||
"using_index_for_group_by": true
|
||||
}
|
||||
}
|
||||
}
|
||||
drop table t1,t2;
|
||||
#
|
||||
# Try both variants of LooseScan (data/queries borrowed from group_min_max.test)
|
||||
#
|
||||
create table t1 (
|
||||
a1 char(64), a2 char(64), b char(16), c char(16) not null, d char(16), dummy char(248) default ' '
|
||||
);
|
||||
insert into t1 (a1, a2, b, c, d) values
|
||||
('a','a','a','a111','xy1'),('a','a','a','b111','xy2'),('a','a','a','c111','xy3'),('a','a','a','d111','xy4'),
|
||||
('a','a','b','e112','xy1'),('a','a','b','f112','xy2'),('a','a','b','g112','xy3'),('a','a','b','h112','xy4'),
|
||||
('a','b','a','i121','xy1'),('a','b','a','j121','xy2'),('a','b','a','k121','xy3'),('a','b','a','l121','xy4'),
|
||||
('a','b','b','m122','xy1'),('a','b','b','n122','xy2'),('a','b','b','o122','xy3'),('a','b','b','p122','xy4'),
|
||||
('b','a','a','a211','xy1'),('b','a','a','b211','xy2'),('b','a','a','c211','xy3'),('b','a','a','d211','xy4'),
|
||||
('b','a','b','e212','xy1'),('b','a','b','f212','xy2'),('b','a','b','g212','xy3'),('b','a','b','h212','xy4'),
|
||||
('b','b','a','i221','xy1'),('b','b','a','j221','xy2'),('b','b','a','k221','xy3'),('b','b','a','l221','xy4'),
|
||||
('b','b','b','m222','xy1'),('b','b','b','n222','xy2'),('b','b','b','o222','xy3'),('b','b','b','p222','xy4'),
|
||||
('c','a','a','a311','xy1'),('c','a','a','b311','xy2'),('c','a','a','c311','xy3'),('c','a','a','d311','xy4'),
|
||||
('c','a','b','e312','xy1'),('c','a','b','f312','xy2'),('c','a','b','g312','xy3'),('c','a','b','h312','xy4'),
|
||||
('c','b','a','i321','xy1'),('c','b','a','j321','xy2'),('c','b','a','k321','xy3'),('c','b','a','l321','xy4'),
|
||||
('c','b','b','m322','xy1'),('c','b','b','n322','xy2'),('c','b','b','o322','xy3'),('c','b','b','p322','xy4'),
|
||||
('d','a','a','a411','xy1'),('d','a','a','b411','xy2'),('d','a','a','c411','xy3'),('d','a','a','d411','xy4'),
|
||||
('d','a','b','e412','xy1'),('d','a','b','f412','xy2'),('d','a','b','g412','xy3'),('d','a','b','h412','xy4'),
|
||||
('d','b','a','i421','xy1'),('d','b','a','j421','xy2'),('d','b','a','k421','xy3'),('d','b','a','l421','xy4'),
|
||||
('d','b','b','m422','xy1'),('d','b','b','n422','xy2'),('d','b','b','o422','xy3'),('d','b','b','p422','xy4'),
|
||||
('a','a','a','a111','xy1'),('a','a','a','b111','xy2'),('a','a','a','c111','xy3'),('a','a','a','d111','xy4'),
|
||||
('a','a','b','e112','xy1'),('a','a','b','f112','xy2'),('a','a','b','g112','xy3'),('a','a','b','h112','xy4'),
|
||||
('a','b','a','i121','xy1'),('a','b','a','j121','xy2'),('a','b','a','k121','xy3'),('a','b','a','l121','xy4'),
|
||||
('a','b','b','m122','xy1'),('a','b','b','n122','xy2'),('a','b','b','o122','xy3'),('a','b','b','p122','xy4'),
|
||||
('b','a','a','a211','xy1'),('b','a','a','b211','xy2'),('b','a','a','c211','xy3'),('b','a','a','d211','xy4'),
|
||||
('b','a','b','e212','xy1'),('b','a','b','f212','xy2'),('b','a','b','g212','xy3'),('b','a','b','h212','xy4'),
|
||||
('b','b','a','i221','xy1'),('b','b','a','j221','xy2'),('b','b','a','k221','xy3'),('b','b','a','l221','xy4'),
|
||||
('b','b','b','m222','xy1'),('b','b','b','n222','xy2'),('b','b','b','o222','xy3'),('b','b','b','p222','xy4'),
|
||||
('c','a','a','a311','xy1'),('c','a','a','b311','xy2'),('c','a','a','c311','xy3'),('c','a','a','d311','xy4'),
|
||||
('c','a','b','e312','xy1'),('c','a','b','f312','xy2'),('c','a','b','g312','xy3'),('c','a','b','h312','xy4'),
|
||||
('c','b','a','i321','xy1'),('c','b','a','j321','xy2'),('c','b','a','k321','xy3'),('c','b','a','l321','xy4'),
|
||||
('c','b','b','m322','xy1'),('c','b','b','n322','xy2'),('c','b','b','o322','xy3'),('c','b','b','p322','xy4'),
|
||||
('d','a','a','a411','xy1'),('d','a','a','b411','xy2'),('d','a','a','c411','xy3'),('d','a','a','d411','xy4'),
|
||||
('d','a','b','e412','xy1'),('d','a','b','f412','xy2'),('d','a','b','g412','xy3'),('d','a','b','h412','xy4'),
|
||||
('d','b','a','i421','xy1'),('d','b','a','j421','xy2'),('d','b','a','k421','xy3'),('d','b','a','l421','xy4'),
|
||||
('d','b','b','m422','xy1'),('d','b','b','n422','xy2'),('d','b','b','o422','xy3'),('d','b','b','p422','xy4');
|
||||
create index idx_t1_0 on t1 (a1);
|
||||
create index idx_t1_1 on t1 (a1,a2,b,c);
|
||||
create index idx_t1_2 on t1 (a1,a2,b);
|
||||
analyze table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Table is already up to date
|
||||
explain select count(distinct a1,a2,b) from t1 where (a2 >= 'b') and (b = 'a');
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range NULL idx_t1_1 147 NULL 17 Using where; Using index for group-by
|
||||
explain select count(distinct a1,a2,b,c) from t1 where (a2 >= 'b') and (b = 'a') and (c = 'i121');
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range NULL idx_t1_1 163 NULL 65 Using where; Using index for group-by (scanning)
|
||||
explain format=json select count(distinct a1,a2,b) from t1 where (a2 >= 'b') and (b = 'a');
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"table_name": "t1",
|
||||
"access_type": "range",
|
||||
"key": "idx_t1_1",
|
||||
"key_length": "147",
|
||||
"used_key_parts": ["a1", "a2", "b"],
|
||||
"rows": 17,
|
||||
"filtered": 100,
|
||||
"attached_condition": "((t1.b = 'a') and (t1.a2 >= 'b'))",
|
||||
"using_index_for_group_by": true
|
||||
}
|
||||
}
|
||||
}
|
||||
explain format=json select count(distinct a1,a2,b,c) from t1 where (a2 >= 'b') and (b = 'a') and (c = 'i121');
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"table_name": "t1",
|
||||
"access_type": "range",
|
||||
"key": "idx_t1_1",
|
||||
"key_length": "163",
|
||||
"used_key_parts": ["a1", "a2", "b", "c"],
|
||||
"rows": 65,
|
||||
"filtered": 100,
|
||||
"attached_condition": "((t1.b = 'a') and (t1.c = 'i121') and (t1.a2 >= 'b'))",
|
||||
"using_index_for_group_by": "scanning"
|
||||
}
|
||||
}
|
||||
}
|
||||
drop table t1;
|
||||
|
|
83
mysql-test/r/explain_json_format_partitions.result
Normal file
83
mysql-test/r/explain_json_format_partitions.result
Normal file
|
@ -0,0 +1,83 @@
|
|||
create table t2(a int);
|
||||
insert into t2 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t1 (
|
||||
a int not null
|
||||
) partition by key(a);
|
||||
insert into t1 select a from t2;
|
||||
explain partitions select * from t1 where a in (2,3,4);
|
||||
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 p0 ALL NULL NULL NULL NULL 10 Using where
|
||||
explain format=json select * from t1 where a in (2,3,4);
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"table_name": "t1",
|
||||
"partitions": ["p0"],
|
||||
"access_type": "ALL",
|
||||
"rows": 10,
|
||||
"filtered": 100,
|
||||
"attached_condition": "(t1.a in (2,3,4))"
|
||||
}
|
||||
}
|
||||
}
|
||||
analyze format=json select * from t1 where a in (2,3,4);
|
||||
ANALYZE
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"r_loops": 1,
|
||||
"r_total_time_ms": "REPLACED",
|
||||
"table": {
|
||||
"table_name": "t1",
|
||||
"partitions": ["p0"],
|
||||
"access_type": "ALL",
|
||||
"r_loops": 1,
|
||||
"rows": 10,
|
||||
"r_rows": 10,
|
||||
"r_total_time_ms": "REPLACED",
|
||||
"filtered": 100,
|
||||
"r_filtered": 30,
|
||||
"attached_condition": "(t1.a in (2,3,4))"
|
||||
}
|
||||
}
|
||||
}
|
||||
analyze format=json update t1 set a=a+10 where a in (2,3,4);
|
||||
ANALYZE
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"update": 1,
|
||||
"table_name": "t1",
|
||||
"partitions": ["p0"],
|
||||
"access_type": "ALL",
|
||||
"rows": 10,
|
||||
"r_rows": 3,
|
||||
"r_filtered": 100,
|
||||
"using_io_buffer": 1,
|
||||
"r_total_time_ms": "REPLACED",
|
||||
"attached_condition": "(t1.a in (2,3,4))"
|
||||
}
|
||||
}
|
||||
}
|
||||
analyze format=json delete from t1 where a in (20,30,40);
|
||||
ANALYZE
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"delete": 1,
|
||||
"table_name": "t1",
|
||||
"partitions": ["p0"],
|
||||
"access_type": "ALL",
|
||||
"rows": 10,
|
||||
"r_rows": 10,
|
||||
"r_filtered": 0,
|
||||
"r_total_time_ms": "REPLACED",
|
||||
"attached_condition": "(t1.a in (20,30,40))"
|
||||
}
|
||||
}
|
||||
}
|
||||
drop table t1,t2;
|
|
@ -542,7 +542,8 @@ CREATE TABLE `db_17876.slow_log_data` (
|
|||
`insert_id` int(11) default NULL,
|
||||
`server_id` int(11) default NULL,
|
||||
`sql_text` mediumtext,
|
||||
`thread_id` bigint(21) unsigned default NULL
|
||||
`thread_id` bigint(21) unsigned default NULL,
|
||||
`rows_affected` int(11) default NULL
|
||||
);
|
||||
CREATE TABLE `db_17876.general_log_data` (
|
||||
`event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
|
|
|
@ -103,6 +103,7 @@ eval INSERT INTO t3 VALUES (11, '$bigdata');
|
|||
# So here, we check that the values are consistent with SHOW MASTER STATUS,
|
||||
# which uses a different code path and did not have the bug.
|
||||
|
||||
--source include/wait_for_binlog_checkpoint.inc
|
||||
--let $snap_file= query_get_value(SHOW STATUS LIKE 'binlog_snapshot_file', Value, 1)
|
||||
--let $snap_pos= query_get_value(SHOW STATUS LIKE 'binlog_snapshot_position', Value, 1)
|
||||
|
||||
|
|
|
@ -44,14 +44,14 @@ a
|
|||
1
|
||||
2
|
||||
include/stop_slave.inc
|
||||
START SLAVE UNTIL master_gtid_pos = "1-10-100,2-20-200";
|
||||
START SLAVE UNTIL master_gtid_pos = "1-10-100,2-20-200,0-1-300";
|
||||
include/wait_for_slave_to_start.inc
|
||||
Using_Gtid = 'Current_Pos'
|
||||
Until_Condition = 'Gtid'
|
||||
INSERT INTO t1 VALUES (3);
|
||||
DELETE FROM t1 WHERE a=3;
|
||||
include/stop_slave.inc
|
||||
include/start_slave.inc
|
||||
*** Test UNTIL condition in an earlier binlog than the start GTID. ***
|
||||
include/stop_slave.inc
|
||||
SET gtid_domain_id = 1;
|
||||
INSERT INTO t1 VALUES (3);
|
||||
SET gtid_domain_id = 2;
|
||||
|
|
|
@ -1549,6 +1549,64 @@ a b
|
|||
99 99
|
||||
include/stop_slave.inc
|
||||
SET GLOBAL slave_transaction_retries= @old_retries;
|
||||
SET GLOBAL slave_parallel_threads=10;
|
||||
include/start_slave.inc
|
||||
*** MDEV-7888: ANALYZE TABLE does wakeup_subsequent_commits(), causing wrong binlog order and parallel replication hang ***
|
||||
include/stop_slave.inc
|
||||
SET @old_dbug= @@GLOBAL.debug_dbug;
|
||||
SET GLOBAL debug_dbug= '+d,inject_analyze_table_sleep';
|
||||
SET @old_dbug= @@SESSION.debug_dbug;
|
||||
SET SESSION debug_dbug="+d,binlog_force_commit_id";
|
||||
SET @commit_id= 10000;
|
||||
ANALYZE TABLE t2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t2 analyze status OK
|
||||
INSERT INTO t3 VALUES (120, 0);
|
||||
SET @commit_id= 10001;
|
||||
INSERT INTO t3 VALUES (121, 0);
|
||||
SET SESSION debug_dbug=@old_dbug;
|
||||
SELECT * FROM t3 WHERE a >= 120 ORDER BY a;
|
||||
a b
|
||||
120 0
|
||||
121 0
|
||||
include/save_master_gtid.inc
|
||||
include/start_slave.inc
|
||||
include/sync_with_master_gtid.inc
|
||||
SELECT * FROM t3 WHERE a >= 120 ORDER BY a;
|
||||
a b
|
||||
120 0
|
||||
121 0
|
||||
include/stop_slave.inc
|
||||
SET GLOBAL debug_dbug= @old_debug;
|
||||
include/start_slave.inc
|
||||
*** MDEV-7929: record_gtid() for non-transactional event group calls wakeup_subsequent_commits() too early, causing slave hang. ***
|
||||
include/stop_slave.inc
|
||||
SET @old_dbug= @@GLOBAL.debug_dbug;
|
||||
SET GLOBAL debug_dbug= '+d,inject_record_gtid_serverid_100_sleep';
|
||||
SET @old_dbug= @@SESSION.debug_dbug;
|
||||
SET SESSION debug_dbug="+d,binlog_force_commit_id";
|
||||
SET @old_server_id= @@SESSION.server_id;
|
||||
SET SESSION server_id= 100;
|
||||
SET @commit_id= 10010;
|
||||
ALTER TABLE t1 COMMENT "Hulubulu!";
|
||||
SET SESSION server_id= @old_server_id;
|
||||
INSERT INTO t3 VALUES (130, 0);
|
||||
SET @commit_id= 10011;
|
||||
INSERT INTO t3 VALUES (131, 0);
|
||||
SET SESSION debug_dbug=@old_dbug;
|
||||
SELECT * FROM t3 WHERE a >= 130 ORDER BY a;
|
||||
a b
|
||||
130 0
|
||||
131 0
|
||||
include/save_master_gtid.inc
|
||||
include/start_slave.inc
|
||||
include/sync_with_master_gtid.inc
|
||||
SELECT * FROM t3 WHERE a >= 130 ORDER BY a;
|
||||
a b
|
||||
130 0
|
||||
131 0
|
||||
include/stop_slave.inc
|
||||
SET GLOBAL debug_dbug= @old_debug;
|
||||
include/start_slave.inc
|
||||
include/stop_slave.inc
|
||||
SET GLOBAL slave_parallel_threads=@old_parallel_threads;
|
||||
|
|
|
@ -287,6 +287,174 @@ include/stop_slave.inc
|
|||
SET GLOBAL binlog_format= @old_format;
|
||||
SET GLOBAL tx_isolation= @old_isolation;
|
||||
include/start_slave.inc
|
||||
*** MDEV-7888: ANALYZE TABLE does wakeup_subsequent_commits(), causing wrong binlog order and parallel replication hang ***
|
||||
DROP TABLE t1, t2, t3;
|
||||
CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
|
||||
CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
|
||||
CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=MyISAM;
|
||||
INSERT INTO t2 VALUES (1,1), (2,1), (3,1), (4,1), (5,1);
|
||||
include/save_master_gtid.inc
|
||||
include/sync_with_master_gtid.inc
|
||||
include/stop_slave.inc
|
||||
SET @old_dbug= @@GLOBAL.debug_dbug;
|
||||
SET GLOBAL debug_dbug= '+d,inject_analyze_table_sleep';
|
||||
ALTER TABLE t2 COMMENT "123abc";
|
||||
ANALYZE TABLE t2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t2 analyze status OK
|
||||
INSERT INTO t1 VALUES (1,2);
|
||||
INSERT INTO t1 VALUES (2,2);
|
||||
INSERT INTO t1 VALUES (3,2);
|
||||
INSERT INTO t1 VALUES (4,2);
|
||||
INSERT INTO t3 VALUES (1,3);
|
||||
ALTER TABLE t2 COMMENT "hello, world";
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES (5,4);
|
||||
INSERT INTO t1 VALUES (6,4);
|
||||
INSERT INTO t1 VALUES (7,4);
|
||||
INSERT INTO t1 VALUES (8,4);
|
||||
INSERT INTO t1 VALUES (9,4);
|
||||
INSERT INTO t1 VALUES (10,4);
|
||||
INSERT INTO t1 VALUES (11,4);
|
||||
INSERT INTO t1 VALUES (12,4);
|
||||
INSERT INTO t1 VALUES (13,4);
|
||||
INSERT INTO t1 VALUES (14,4);
|
||||
INSERT INTO t1 VALUES (15,4);
|
||||
INSERT INTO t1 VALUES (16,4);
|
||||
INSERT INTO t1 VALUES (17,4);
|
||||
INSERT INTO t1 VALUES (18,4);
|
||||
INSERT INTO t1 VALUES (19,4);
|
||||
INSERT INTO t1 VALUES (20,4);
|
||||
COMMIT;
|
||||
INSERT INTO t1 VALUES (21,5);
|
||||
INSERT INTO t1 VALUES (22,5);
|
||||
SELECT * FROM t1 ORDER BY a;
|
||||
a b
|
||||
1 2
|
||||
2 2
|
||||
3 2
|
||||
4 2
|
||||
5 4
|
||||
6 4
|
||||
7 4
|
||||
8 4
|
||||
9 4
|
||||
10 4
|
||||
11 4
|
||||
12 4
|
||||
13 4
|
||||
14 4
|
||||
15 4
|
||||
16 4
|
||||
17 4
|
||||
18 4
|
||||
19 4
|
||||
20 4
|
||||
21 5
|
||||
22 5
|
||||
SELECT * FROM t2 ORDER BY a;
|
||||
a b
|
||||
1 1
|
||||
2 1
|
||||
3 1
|
||||
4 1
|
||||
5 1
|
||||
SELECT * FROM t3 ORDER BY a;
|
||||
a b
|
||||
1 3
|
||||
include/save_master_gtid.inc
|
||||
include/start_slave.inc
|
||||
include/sync_with_master_gtid.inc
|
||||
SELECT * FROM t1 ORDER BY a;
|
||||
a b
|
||||
1 2
|
||||
2 2
|
||||
3 2
|
||||
4 2
|
||||
5 4
|
||||
6 4
|
||||
7 4
|
||||
8 4
|
||||
9 4
|
||||
10 4
|
||||
11 4
|
||||
12 4
|
||||
13 4
|
||||
14 4
|
||||
15 4
|
||||
16 4
|
||||
17 4
|
||||
18 4
|
||||
19 4
|
||||
20 4
|
||||
21 5
|
||||
22 5
|
||||
SELECT * FROM t2 ORDER BY a;
|
||||
a b
|
||||
1 1
|
||||
2 1
|
||||
3 1
|
||||
4 1
|
||||
5 1
|
||||
SELECT * FROM t3 ORDER BY a;
|
||||
a b
|
||||
1 3
|
||||
include/stop_slave.inc
|
||||
SET GLOBAL debug_dbug= @old_debug;
|
||||
include/start_slave.inc
|
||||
*** MDEV-7929: record_gtid() for non-transactional event group calls wakeup_subsequent_commits() too early, causing slave hang. ***
|
||||
include/stop_slave.inc
|
||||
SET @old_dbug= @@GLOBAL.debug_dbug;
|
||||
SET GLOBAL debug_dbug= '+d,inject_record_gtid_serverid_100_sleep';
|
||||
ALTER TABLE t3 COMMENT "DDL statement 1";
|
||||
INSERT INTO t1 VALUES (30,0);
|
||||
INSERT INTO t1 VALUES (31,0);
|
||||
INSERT INTO t1 VALUES (32,0);
|
||||
INSERT INTO t1 VALUES (33,0);
|
||||
INSERT INTO t1 VALUES (34,0);
|
||||
INSERT INTO t1 VALUES (35,0);
|
||||
INSERT INTO t1 VALUES (36,0);
|
||||
SET @old_server_id= @@SESSION.server_id;
|
||||
SET SESSION server_id= 100;
|
||||
ANALYZE TABLE t2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t2 analyze status OK
|
||||
SET SESSION server_id= @old_server_id;
|
||||
INSERT INTO t1 VALUES (37,0);
|
||||
ALTER TABLE t3 COMMENT "DDL statement 2";
|
||||
INSERT INTO t1 VALUES (38,0);
|
||||
INSERT INTO t1 VALUES (39,0);
|
||||
ALTER TABLE t3 COMMENT "DDL statement 3";
|
||||
SELECT * FROM t1 WHERE a >= 30 ORDER BY a;
|
||||
a b
|
||||
30 0
|
||||
31 0
|
||||
32 0
|
||||
33 0
|
||||
34 0
|
||||
35 0
|
||||
36 0
|
||||
37 0
|
||||
38 0
|
||||
39 0
|
||||
include/save_master_gtid.inc
|
||||
include/start_slave.inc
|
||||
include/sync_with_master_gtid.inc
|
||||
SELECT * FROM t1 WHERE a >= 30 ORDER BY a;
|
||||
a b
|
||||
30 0
|
||||
31 0
|
||||
32 0
|
||||
33 0
|
||||
34 0
|
||||
35 0
|
||||
36 0
|
||||
37 0
|
||||
38 0
|
||||
39 0
|
||||
include/stop_slave.inc
|
||||
SET GLOBAL debug_dbug= @old_debug;
|
||||
include/start_slave.inc
|
||||
include/stop_slave.inc
|
||||
SET GLOBAL slave_parallel_mode=@old_parallel_mode;
|
||||
SET GLOBAL slave_parallel_threads=@old_parallel_threads;
|
||||
|
|
|
@ -73,19 +73,29 @@ SELECT * FROM t1 ORDER BY a;
|
|||
|
||||
# Test showing the UNTIL condition in SHOW SLAVE STATUS.
|
||||
--source include/stop_slave.inc
|
||||
START SLAVE UNTIL master_gtid_pos = "1-10-100,2-20-200";
|
||||
START SLAVE UNTIL master_gtid_pos = "1-10-100,2-20-200,0-1-300";
|
||||
--source include/wait_for_slave_to_start.inc
|
||||
--let $status_items= Using_Gtid,Until_Condition
|
||||
--source include/show_slave_status.inc
|
||||
|
||||
# Clear the UNTIL condition.
|
||||
# Note that we need to wait for a transaction to get through from the master.
|
||||
# Otherwise the IO thread may still be in get_master_version_and_clock()
|
||||
# (wait_for_slave_to_start.inc returns as soon as the IO thread is connected),
|
||||
# and we can get test failures from warnings in the log about IO thread being
|
||||
# killed in the middle of setting @@gtid_strict_mode or similar (MDEV-7940).
|
||||
--connection server_1
|
||||
INSERT INTO t1 VALUES (3);
|
||||
DELETE FROM t1 WHERE a=3;
|
||||
--save_master_pos
|
||||
|
||||
--connection server_2
|
||||
--sync_with_master
|
||||
--source include/stop_slave.inc
|
||||
--source include/start_slave.inc
|
||||
|
||||
|
||||
--echo *** Test UNTIL condition in an earlier binlog than the start GTID. ***
|
||||
--connection server_2
|
||||
--source include/stop_slave.inc
|
||||
|
||||
--connection server_1
|
||||
SET gtid_domain_id = 1;
|
||||
|
|
|
@ -2158,6 +2158,100 @@ SELECT * FROM t8 ORDER BY a;
|
|||
|
||||
--source include/stop_slave.inc
|
||||
SET GLOBAL slave_transaction_retries= @old_retries;
|
||||
SET GLOBAL slave_parallel_threads=10;
|
||||
--source include/start_slave.inc
|
||||
|
||||
|
||||
--echo *** MDEV-7888: ANALYZE TABLE does wakeup_subsequent_commits(), causing wrong binlog order and parallel replication hang ***
|
||||
|
||||
--connection server_2
|
||||
--source include/stop_slave.inc
|
||||
SET @old_dbug= @@GLOBAL.debug_dbug;
|
||||
SET GLOBAL debug_dbug= '+d,inject_analyze_table_sleep';
|
||||
|
||||
--connection server_1
|
||||
# Inject two group commits. The bug was that ANALYZE TABLE would call
|
||||
# wakeup_subsequent_commits() too early, allowing the following transaction
|
||||
# in the same group to run ahead and binlog and free the GCO. Then we get
|
||||
# wrong binlog order and later access freed GCO, which causes lost wakeup
|
||||
# of following GCO and thus replication hang.
|
||||
# We injected a small sleep in ANALYZE to make the race easier to hit (this
|
||||
# can only cause false negatives in versions with the bug, not false positives,
|
||||
# so sleep is ok here. And it's in general not possible to trigger reliably
|
||||
# the race with debug_sync, since the bugfix makes the race impossible).
|
||||
|
||||
SET @old_dbug= @@SESSION.debug_dbug;
|
||||
SET SESSION debug_dbug="+d,binlog_force_commit_id";
|
||||
|
||||
# Group commit with cid=10000, two event groups.
|
||||
SET @commit_id= 10000;
|
||||
ANALYZE TABLE t2;
|
||||
INSERT INTO t3 VALUES (120, 0);
|
||||
|
||||
# Group commit with cid=10001, one event group.
|
||||
SET @commit_id= 10001;
|
||||
INSERT INTO t3 VALUES (121, 0);
|
||||
|
||||
SET SESSION debug_dbug=@old_dbug;
|
||||
|
||||
SELECT * FROM t3 WHERE a >= 120 ORDER BY a;
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection server_2
|
||||
--source include/start_slave.inc
|
||||
--source include/sync_with_master_gtid.inc
|
||||
|
||||
SELECT * FROM t3 WHERE a >= 120 ORDER BY a;
|
||||
|
||||
--source include/stop_slave.inc
|
||||
SET GLOBAL debug_dbug= @old_debug;
|
||||
--source include/start_slave.inc
|
||||
|
||||
|
||||
--echo *** MDEV-7929: record_gtid() for non-transactional event group calls wakeup_subsequent_commits() too early, causing slave hang. ***
|
||||
|
||||
--connection server_2
|
||||
--source include/stop_slave.inc
|
||||
SET @old_dbug= @@GLOBAL.debug_dbug;
|
||||
SET GLOBAL debug_dbug= '+d,inject_record_gtid_serverid_100_sleep';
|
||||
|
||||
--connection server_1
|
||||
# Inject two group commits. The bug was that record_gtid for a
|
||||
# non-transactional event group would commit its own transaction, which would
|
||||
# cause ha_commit_trans() to call wakeup_subsequent_commits() too early. This
|
||||
# in turn lead to access to freed group_commit_orderer object, losing a wakeup
|
||||
# and causing slave threads to hang.
|
||||
# We inject a small sleep in the corresponding record_gtid() to make the race
|
||||
# easier to hit.
|
||||
|
||||
SET @old_dbug= @@SESSION.debug_dbug;
|
||||
SET SESSION debug_dbug="+d,binlog_force_commit_id";
|
||||
|
||||
# Group commit with cid=10010, two event groups.
|
||||
SET @old_server_id= @@SESSION.server_id;
|
||||
SET SESSION server_id= 100;
|
||||
SET @commit_id= 10010;
|
||||
ALTER TABLE t1 COMMENT "Hulubulu!";
|
||||
SET SESSION server_id= @old_server_id;
|
||||
INSERT INTO t3 VALUES (130, 0);
|
||||
|
||||
# Group commit with cid=10011, one event group.
|
||||
SET @commit_id= 10011;
|
||||
INSERT INTO t3 VALUES (131, 0);
|
||||
|
||||
SET SESSION debug_dbug=@old_dbug;
|
||||
|
||||
SELECT * FROM t3 WHERE a >= 130 ORDER BY a;
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection server_2
|
||||
--source include/start_slave.inc
|
||||
--source include/sync_with_master_gtid.inc
|
||||
|
||||
SELECT * FROM t3 WHERE a >= 130 ORDER BY a;
|
||||
|
||||
--source include/stop_slave.inc
|
||||
SET GLOBAL debug_dbug= @old_debug;
|
||||
--source include/start_slave.inc
|
||||
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
--source include/have_innodb.inc
|
||||
--source include/have_debug.inc
|
||||
--source include/have_debug_sync.inc
|
||||
--let $rpl_topology=1->2
|
||||
--source include/rpl_init.inc
|
||||
|
@ -307,6 +308,129 @@ SET GLOBAL tx_isolation= @old_isolation;
|
|||
--source include/start_slave.inc
|
||||
|
||||
|
||||
--echo *** MDEV-7888: ANALYZE TABLE does wakeup_subsequent_commits(), causing wrong binlog order and parallel replication hang ***
|
||||
|
||||
--connection server_1
|
||||
DROP TABLE t1, t2, t3;
|
||||
CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
|
||||
CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
|
||||
CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=MyISAM;
|
||||
INSERT INTO t2 VALUES (1,1), (2,1), (3,1), (4,1), (5,1);
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection server_2
|
||||
--source include/sync_with_master_gtid.inc
|
||||
--source include/stop_slave.inc
|
||||
SET @old_dbug= @@GLOBAL.debug_dbug;
|
||||
SET GLOBAL debug_dbug= '+d,inject_analyze_table_sleep';
|
||||
|
||||
--connection server_1
|
||||
# The bug was that ANALYZE TABLE would call
|
||||
# wakeup_subsequent_commits() too early, allowing the following
|
||||
# transaction in the same group to run ahead and binlog and free the
|
||||
# GCO. Then we get wrong binlog order and later access freed GCO,
|
||||
# which causes lost wakeup of following GCO and thus replication hang.
|
||||
# We injected a small sleep in ANALYZE to make the race easier to hit (this
|
||||
# can only cause false negatives in versions with the bug, not false positives,
|
||||
# so sleep is ok here. And it's in general not possible to trigger reliably
|
||||
# the race with debug_sync, since the bugfix makes the race impossible).
|
||||
|
||||
ALTER TABLE t2 COMMENT "123abc";
|
||||
ANALYZE TABLE t2;
|
||||
INSERT INTO t1 VALUES (1,2);
|
||||
INSERT INTO t1 VALUES (2,2);
|
||||
INSERT INTO t1 VALUES (3,2);
|
||||
INSERT INTO t1 VALUES (4,2);
|
||||
INSERT INTO t3 VALUES (1,3);
|
||||
ALTER TABLE t2 COMMENT "hello, world";
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES (5,4);
|
||||
INSERT INTO t1 VALUES (6,4);
|
||||
INSERT INTO t1 VALUES (7,4);
|
||||
INSERT INTO t1 VALUES (8,4);
|
||||
INSERT INTO t1 VALUES (9,4);
|
||||
INSERT INTO t1 VALUES (10,4);
|
||||
INSERT INTO t1 VALUES (11,4);
|
||||
INSERT INTO t1 VALUES (12,4);
|
||||
INSERT INTO t1 VALUES (13,4);
|
||||
INSERT INTO t1 VALUES (14,4);
|
||||
INSERT INTO t1 VALUES (15,4);
|
||||
INSERT INTO t1 VALUES (16,4);
|
||||
INSERT INTO t1 VALUES (17,4);
|
||||
INSERT INTO t1 VALUES (18,4);
|
||||
INSERT INTO t1 VALUES (19,4);
|
||||
INSERT INTO t1 VALUES (20,4);
|
||||
COMMIT;
|
||||
INSERT INTO t1 VALUES (21,5);
|
||||
INSERT INTO t1 VALUES (22,5);
|
||||
|
||||
SELECT * FROM t1 ORDER BY a;
|
||||
SELECT * FROM t2 ORDER BY a;
|
||||
SELECT * FROM t3 ORDER BY a;
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection server_2
|
||||
--source include/start_slave.inc
|
||||
--source include/sync_with_master_gtid.inc
|
||||
|
||||
SELECT * FROM t1 ORDER BY a;
|
||||
SELECT * FROM t2 ORDER BY a;
|
||||
SELECT * FROM t3 ORDER BY a;
|
||||
|
||||
--source include/stop_slave.inc
|
||||
SET GLOBAL debug_dbug= @old_debug;
|
||||
--source include/start_slave.inc
|
||||
|
||||
--echo *** MDEV-7929: record_gtid() for non-transactional event group calls wakeup_subsequent_commits() too early, causing slave hang. ***
|
||||
|
||||
--connection server_2
|
||||
--source include/stop_slave.inc
|
||||
SET @old_dbug= @@GLOBAL.debug_dbug;
|
||||
# The bug was that record_gtid(), when there is no existing transaction from
|
||||
# a DML event being replicated, would commit its own transaction. This wrongly
|
||||
# caused wakeup_subsequent_commits(), with similar consequences as MDEV-7888
|
||||
# above. We simulate this condition with a small sleep in record_gtid() for
|
||||
# a specific ANALYZE that we binlog with server id 100.
|
||||
SET GLOBAL debug_dbug= '+d,inject_record_gtid_serverid_100_sleep';
|
||||
|
||||
--connection server_1
|
||||
|
||||
ALTER TABLE t3 COMMENT "DDL statement 1";
|
||||
INSERT INTO t1 VALUES (30,0);
|
||||
INSERT INTO t1 VALUES (31,0);
|
||||
INSERT INTO t1 VALUES (32,0);
|
||||
INSERT INTO t1 VALUES (33,0);
|
||||
INSERT INTO t1 VALUES (34,0);
|
||||
INSERT INTO t1 VALUES (35,0);
|
||||
INSERT INTO t1 VALUES (36,0);
|
||||
SET @old_server_id= @@SESSION.server_id;
|
||||
SET SESSION server_id= 100;
|
||||
ANALYZE TABLE t2;
|
||||
SET SESSION server_id= @old_server_id;
|
||||
INSERT INTO t1 VALUES (37,0);
|
||||
ALTER TABLE t3 COMMENT "DDL statement 2";
|
||||
INSERT INTO t1 VALUES (38,0);
|
||||
INSERT INTO t1 VALUES (39,0);
|
||||
ALTER TABLE t3 COMMENT "DDL statement 3";
|
||||
|
||||
SELECT * FROM t1 WHERE a >= 30 ORDER BY a;
|
||||
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
|
||||
--connection server_2
|
||||
--source include/start_slave.inc
|
||||
--source include/sync_with_master_gtid.inc
|
||||
SELECT * FROM t1 WHERE a >= 30 ORDER BY a;
|
||||
|
||||
|
||||
--source include/stop_slave.inc
|
||||
SET GLOBAL debug_dbug= @old_debug;
|
||||
--source include/start_slave.inc
|
||||
|
||||
|
||||
# Clean up.
|
||||
|
||||
--connection server_2
|
||||
--source include/stop_slave.inc
|
||||
SET GLOBAL slave_parallel_mode=@old_parallel_mode;
|
||||
|
|
|
@ -1697,3 +1697,19 @@ INSERT INTO t1 SELECT a.* FROM t1 a, t1 b, t1 c, t1 d, t1 e;
|
|||
ALTER TABLE t1 MODIFY i FLOAT;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # Start of 10.1 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-7816 ALTER with DROP INDEX and ADD INDEX .. COMMENT='comment2' ignores the new comment
|
||||
--echo #
|
||||
CREATE TABLE t1(a INT);
|
||||
CREATE INDEX i1 ON t1(a) COMMENT 'comment1';
|
||||
ALTER TABLE t1 DROP INDEX i1, ADD INDEX i1(a) COMMENT 'comment2';
|
||||
SHOW CREATE TABLE t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # Start of 10.1 tests
|
||||
--echo #
|
||||
|
|
|
@ -145,7 +145,7 @@ create table t1 (a int);
|
|||
INSERT INTO t1 select * from t0;
|
||||
|
||||
--replace_regex /"r_total_time_ms": [0-9]*[.]?[0-9]*/"r_total_time_ms": "REPLACED"/
|
||||
analyze format=json (select * from t1 A where a<5) union (select * from t1 B where a in (2,3));
|
||||
analyze format=json (select * from t1 tbl1 where a<5) union (select * from t1 tbl2 where a in (2,3));
|
||||
|
||||
drop table t0, t1;
|
||||
|
||||
|
|
5400
mysql-test/t/analyze_stmt_privileges2.test
Normal file
5400
mysql-test/t/analyze_stmt_privileges2.test
Normal file
File diff suppressed because it is too large
Load diff
|
@ -200,3 +200,82 @@ INSERT INTO t2 VALUES (3),(4);
|
|||
EXPLAIN FORMAT=JSON SELECT * FROM t1 WHERE a <> ALL ( SELECT b FROM t2 );
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-7927: Server crashes in in Time_and_counter_tracker::incr_loops
|
||||
--echo #
|
||||
CREATE TABLE t1 (i INT);
|
||||
INSERT INTO t1 VALUES (1),(2);
|
||||
EXPLAIN SELECT * FROM t1 WHERE 3 IN ( SELECT 4 UNION SELECT 5 );
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-7860: EXPLAIN FORMAT=JSON crashes for loose scan query
|
||||
--echo #
|
||||
|
||||
create table t2(a int);
|
||||
insert into t2 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t1 (a int, b int, c int, d int, key(a,b,c));
|
||||
insert into t1 select A.a, B.a, C.a, D.a from t2 A, t2 B, t2 C, t2 D;
|
||||
explain select count(distinct b) from t1 group by a;
|
||||
explain format=json select count(distinct b) from t1 group by a;
|
||||
--replace_regex /"r_total_time_ms": [0-9]*[.]?[0-9]*/"r_total_time_ms": "REPLACED"/
|
||||
analyze format=json select count(distinct b) from t1 group by a;
|
||||
drop table t1,t2;
|
||||
|
||||
--echo #
|
||||
--echo # Try both variants of LooseScan (data/queries borrowed from group_min_max.test)
|
||||
--echo #
|
||||
|
||||
create table t1 (
|
||||
a1 char(64), a2 char(64), b char(16), c char(16) not null, d char(16), dummy char(248) default ' '
|
||||
);
|
||||
|
||||
insert into t1 (a1, a2, b, c, d) values
|
||||
('a','a','a','a111','xy1'),('a','a','a','b111','xy2'),('a','a','a','c111','xy3'),('a','a','a','d111','xy4'),
|
||||
('a','a','b','e112','xy1'),('a','a','b','f112','xy2'),('a','a','b','g112','xy3'),('a','a','b','h112','xy4'),
|
||||
('a','b','a','i121','xy1'),('a','b','a','j121','xy2'),('a','b','a','k121','xy3'),('a','b','a','l121','xy4'),
|
||||
('a','b','b','m122','xy1'),('a','b','b','n122','xy2'),('a','b','b','o122','xy3'),('a','b','b','p122','xy4'),
|
||||
('b','a','a','a211','xy1'),('b','a','a','b211','xy2'),('b','a','a','c211','xy3'),('b','a','a','d211','xy4'),
|
||||
('b','a','b','e212','xy1'),('b','a','b','f212','xy2'),('b','a','b','g212','xy3'),('b','a','b','h212','xy4'),
|
||||
('b','b','a','i221','xy1'),('b','b','a','j221','xy2'),('b','b','a','k221','xy3'),('b','b','a','l221','xy4'),
|
||||
('b','b','b','m222','xy1'),('b','b','b','n222','xy2'),('b','b','b','o222','xy3'),('b','b','b','p222','xy4'),
|
||||
('c','a','a','a311','xy1'),('c','a','a','b311','xy2'),('c','a','a','c311','xy3'),('c','a','a','d311','xy4'),
|
||||
('c','a','b','e312','xy1'),('c','a','b','f312','xy2'),('c','a','b','g312','xy3'),('c','a','b','h312','xy4'),
|
||||
('c','b','a','i321','xy1'),('c','b','a','j321','xy2'),('c','b','a','k321','xy3'),('c','b','a','l321','xy4'),
|
||||
('c','b','b','m322','xy1'),('c','b','b','n322','xy2'),('c','b','b','o322','xy3'),('c','b','b','p322','xy4'),
|
||||
('d','a','a','a411','xy1'),('d','a','a','b411','xy2'),('d','a','a','c411','xy3'),('d','a','a','d411','xy4'),
|
||||
('d','a','b','e412','xy1'),('d','a','b','f412','xy2'),('d','a','b','g412','xy3'),('d','a','b','h412','xy4'),
|
||||
('d','b','a','i421','xy1'),('d','b','a','j421','xy2'),('d','b','a','k421','xy3'),('d','b','a','l421','xy4'),
|
||||
('d','b','b','m422','xy1'),('d','b','b','n422','xy2'),('d','b','b','o422','xy3'),('d','b','b','p422','xy4'),
|
||||
('a','a','a','a111','xy1'),('a','a','a','b111','xy2'),('a','a','a','c111','xy3'),('a','a','a','d111','xy4'),
|
||||
('a','a','b','e112','xy1'),('a','a','b','f112','xy2'),('a','a','b','g112','xy3'),('a','a','b','h112','xy4'),
|
||||
('a','b','a','i121','xy1'),('a','b','a','j121','xy2'),('a','b','a','k121','xy3'),('a','b','a','l121','xy4'),
|
||||
('a','b','b','m122','xy1'),('a','b','b','n122','xy2'),('a','b','b','o122','xy3'),('a','b','b','p122','xy4'),
|
||||
('b','a','a','a211','xy1'),('b','a','a','b211','xy2'),('b','a','a','c211','xy3'),('b','a','a','d211','xy4'),
|
||||
('b','a','b','e212','xy1'),('b','a','b','f212','xy2'),('b','a','b','g212','xy3'),('b','a','b','h212','xy4'),
|
||||
('b','b','a','i221','xy1'),('b','b','a','j221','xy2'),('b','b','a','k221','xy3'),('b','b','a','l221','xy4'),
|
||||
('b','b','b','m222','xy1'),('b','b','b','n222','xy2'),('b','b','b','o222','xy3'),('b','b','b','p222','xy4'),
|
||||
('c','a','a','a311','xy1'),('c','a','a','b311','xy2'),('c','a','a','c311','xy3'),('c','a','a','d311','xy4'),
|
||||
('c','a','b','e312','xy1'),('c','a','b','f312','xy2'),('c','a','b','g312','xy3'),('c','a','b','h312','xy4'),
|
||||
('c','b','a','i321','xy1'),('c','b','a','j321','xy2'),('c','b','a','k321','xy3'),('c','b','a','l321','xy4'),
|
||||
('c','b','b','m322','xy1'),('c','b','b','n322','xy2'),('c','b','b','o322','xy3'),('c','b','b','p322','xy4'),
|
||||
('d','a','a','a411','xy1'),('d','a','a','b411','xy2'),('d','a','a','c411','xy3'),('d','a','a','d411','xy4'),
|
||||
('d','a','b','e412','xy1'),('d','a','b','f412','xy2'),('d','a','b','g412','xy3'),('d','a','b','h412','xy4'),
|
||||
('d','b','a','i421','xy1'),('d','b','a','j421','xy2'),('d','b','a','k421','xy3'),('d','b','a','l421','xy4'),
|
||||
('d','b','b','m422','xy1'),('d','b','b','n422','xy2'),('d','b','b','o422','xy3'),('d','b','b','p422','xy4');
|
||||
|
||||
create index idx_t1_0 on t1 (a1);
|
||||
create index idx_t1_1 on t1 (a1,a2,b,c);
|
||||
create index idx_t1_2 on t1 (a1,a2,b);
|
||||
analyze table t1;
|
||||
|
||||
explain select count(distinct a1,a2,b) from t1 where (a2 >= 'b') and (b = 'a');
|
||||
explain select count(distinct a1,a2,b,c) from t1 where (a2 >= 'b') and (b = 'a') and (c = 'i121');
|
||||
|
||||
explain format=json select count(distinct a1,a2,b) from t1 where (a2 >= 'b') and (b = 'a');
|
||||
explain format=json select count(distinct a1,a2,b,c) from t1 where (a2 >= 'b') and (b = 'a') and (c = 'i121');
|
||||
|
||||
drop table t1;
|
||||
|
||||
|
||||
|
|
17
mysql-test/t/explain_json_format_partitions.test
Normal file
17
mysql-test/t/explain_json_format_partitions.test
Normal file
|
@ -0,0 +1,17 @@
|
|||
|
||||
--source include/have_partition.inc
|
||||
create table t2(a int);
|
||||
insert into t2 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t1 (
|
||||
a int not null
|
||||
) partition by key(a);
|
||||
insert into t1 select a from t2;
|
||||
explain partitions select * from t1 where a in (2,3,4);
|
||||
explain format=json select * from t1 where a in (2,3,4);
|
||||
--replace_regex /"r_total_time_ms": [0-9]*[.]?[0-9]*/"r_total_time_ms": "REPLACED"/
|
||||
analyze format=json select * from t1 where a in (2,3,4);
|
||||
--replace_regex /"r_total_time_ms": [0-9]*[.]?[0-9]*/"r_total_time_ms": "REPLACED"/
|
||||
analyze format=json update t1 set a=a+10 where a in (2,3,4);
|
||||
--replace_regex /"r_total_time_ms": [0-9]*[.]?[0-9]*/"r_total_time_ms": "REPLACED"/
|
||||
analyze format=json delete from t1 where a in (20,30,40);
|
||||
drop table t1,t2;
|
|
@ -727,7 +727,8 @@ CREATE TABLE `db_17876.slow_log_data` (
|
|||
`insert_id` int(11) default NULL,
|
||||
`server_id` int(11) default NULL,
|
||||
`sql_text` mediumtext,
|
||||
`thread_id` bigint(21) unsigned default NULL
|
||||
`thread_id` bigint(21) unsigned default NULL,
|
||||
`rows_affected` int(11) default NULL
|
||||
);
|
||||
|
||||
CREATE TABLE `db_17876.general_log_data` (
|
||||
|
|
|
@ -4210,12 +4210,13 @@ inline const char *table_case_name(HA_CREATE_INFO *info, const char *name)
|
|||
|
||||
#define TABLE_IO_WAIT(TRACKER, PSI, OP, INDEX, FLAGS, PAYLOAD) \
|
||||
{ \
|
||||
if (unlikely(tracker)) \
|
||||
Exec_time_tracker *this_tracker; \
|
||||
if (unlikely((this_tracker= tracker))) \
|
||||
tracker->start_tracking(); \
|
||||
\
|
||||
MYSQL_TABLE_IO_WAIT(PSI, OP, INDEX, FLAGS, PAYLOAD); \
|
||||
\
|
||||
if (unlikely(tracker)) \
|
||||
if (unlikely(this_tracker)) \
|
||||
tracker->stop_tracking(); \
|
||||
}
|
||||
|
||||
|
|
12
sql/log.cc
12
sql/log.cc
|
@ -5932,6 +5932,7 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info, my_bool *with_annotate)
|
|||
if (direct)
|
||||
{
|
||||
int res;
|
||||
uint64 commit_id= 0;
|
||||
DBUG_PRINT("info", ("direct is set"));
|
||||
if ((res= thd->wait_for_prior_commit()))
|
||||
DBUG_RETURN(res);
|
||||
|
@ -5939,7 +5940,16 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info, my_bool *with_annotate)
|
|||
my_org_b_tell= my_b_tell(file);
|
||||
mysql_mutex_lock(&LOCK_log);
|
||||
prev_binlog_id= current_binlog_id;
|
||||
if (write_gtid_event(thd, true, using_trans, 0))
|
||||
DBUG_EXECUTE_IF("binlog_force_commit_id",
|
||||
{
|
||||
const LEX_STRING name= { C_STRING_WITH_LEN("commit_id") };
|
||||
bool null_value;
|
||||
user_var_entry *entry=
|
||||
(user_var_entry*) my_hash_search(&thd->user_vars,
|
||||
(uchar*) name.str, name.length);
|
||||
commit_id= entry->val_int(&null_value);
|
||||
});
|
||||
if (write_gtid_event(thd, true, using_trans, commit_id))
|
||||
goto err;
|
||||
}
|
||||
else
|
||||
|
|
|
@ -6405,7 +6405,8 @@ Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg,
|
|||
if (thd_arg->transaction.stmt.trans_did_wait() ||
|
||||
thd_arg->transaction.all.trans_did_wait())
|
||||
flags2|= FL_WAITED;
|
||||
if (sql_command_flags[thd->lex->sql_command] & CF_DISALLOW_IN_RO_TRANS)
|
||||
if (sql_command_flags[thd->lex->sql_command] &
|
||||
(CF_DISALLOW_IN_RO_TRANS | CF_AUTO_COMMIT_TRANS))
|
||||
flags2|= FL_DDL;
|
||||
else if (is_transactional)
|
||||
flags2|= FL_TRANSACTIONAL;
|
||||
|
|
|
@ -130,6 +130,27 @@ void Json_writer::add_ll(longlong val)
|
|||
}
|
||||
|
||||
|
||||
/* Add a memory size, printing in Kb, Kb, Gb if necessary */
|
||||
void Json_writer::add_size(longlong val)
|
||||
{
|
||||
char buf[64];
|
||||
if (val < 1024)
|
||||
my_snprintf(buf, sizeof(buf), "%ld", val);
|
||||
else if (val < 1024*1024*16)
|
||||
{
|
||||
/* Values less than 16MB are specified in KB for precision */
|
||||
size_t len= my_snprintf(buf, sizeof(buf), "%ld", val/1024);
|
||||
strcpy(buf + len, "Kb");
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t len= my_snprintf(buf, sizeof(buf), "%ld", val/(1024*1024));
|
||||
strcpy(buf + len, "Mb");
|
||||
}
|
||||
add_str(buf);
|
||||
}
|
||||
|
||||
|
||||
void Json_writer::add_double(double val)
|
||||
{
|
||||
char buf[64];
|
||||
|
|
|
@ -108,6 +108,7 @@ public:
|
|||
void add_str(const String &str);
|
||||
|
||||
void add_ll(longlong val);
|
||||
void add_size(longlong val);
|
||||
void add_double(double val);
|
||||
void add_bool(bool val);
|
||||
void add_null();
|
||||
|
|
|
@ -517,6 +517,7 @@ rpl_slave_state::record_gtid(THD *thd, const rpl_gtid *gtid, uint64 sub_id,
|
|||
element *elem;
|
||||
ulonglong thd_saved_option= thd->variables.option_bits;
|
||||
Query_tables_list lex_backup;
|
||||
wait_for_commit* suspended_wfc;
|
||||
DBUG_ENTER("record_gtid");
|
||||
|
||||
if (unlikely(!loaded))
|
||||
|
@ -540,6 +541,28 @@ rpl_slave_state::record_gtid(THD *thd, const rpl_gtid *gtid, uint64 sub_id,
|
|||
DBUG_RETURN(1);
|
||||
} );
|
||||
|
||||
/*
|
||||
If we are applying a non-transactional event group, we will be committing
|
||||
here a transaction, but that does not imply that the event group has
|
||||
completed or has been binlogged. So we should not trigger
|
||||
wakeup_subsequent_commits() here.
|
||||
|
||||
Note: An alternative here could be to put a call to mark_start_commit() in
|
||||
stmt_done() before the call to record_and_update_gtid(). This would
|
||||
prevent later calling mark_start_commit() after we have run
|
||||
wakeup_subsequent_commits() from committing the GTID update transaction
|
||||
(which must be avoided to avoid accessing freed group_commit_orderer
|
||||
object). It would also allow following event groups to start slightly
|
||||
earlier. And in the cases where record_gtid() is called without an active
|
||||
transaction, the current statement should have been binlogged already, so
|
||||
binlog order is preserved.
|
||||
|
||||
But this is rather subtle, and potentially fragile. And it does not really
|
||||
seem worth it; non-transactional loads are unlikely to benefit much from
|
||||
parallel replication in any case. So for now, we go with the simple
|
||||
suspend/resume of wakeup_subsequent_commits() here in record_gtid().
|
||||
*/
|
||||
suspended_wfc= thd->suspend_subsequent_commits();
|
||||
thd->lex->reset_n_backup_query_tables_list(&lex_backup);
|
||||
tlist.init_one_table(STRING_WITH_LEN("mysql"),
|
||||
rpl_gtid_slave_state_table_name.str,
|
||||
|
@ -691,6 +714,12 @@ end:
|
|||
}
|
||||
thd->lex->restore_backup_query_tables_list(&lex_backup);
|
||||
thd->variables.option_bits= thd_saved_option;
|
||||
thd->resume_subsequent_commits(suspended_wfc);
|
||||
DBUG_EXECUTE_IF("inject_record_gtid_serverid_100_sleep",
|
||||
{
|
||||
if (gtid->server_id == 100)
|
||||
my_sleep(500000);
|
||||
});
|
||||
DBUG_RETURN(err);
|
||||
}
|
||||
|
||||
|
|
|
@ -170,8 +170,24 @@ finish_event_group(rpl_parallel_thread *rpt, uint64 sub_id,
|
|||
/* Now free any GCOs in which all transactions have committed. */
|
||||
group_commit_orderer *tmp_gco= rgi->gco;
|
||||
while (tmp_gco &&
|
||||
(!tmp_gco->next_gco || tmp_gco->last_sub_id > sub_id))
|
||||
(!tmp_gco->next_gco || tmp_gco->last_sub_id > sub_id ||
|
||||
tmp_gco->next_gco->wait_count > entry->count_committing_event_groups))
|
||||
{
|
||||
/*
|
||||
We must not free a GCO before the wait_count of the following GCO has
|
||||
been reached and wakeup has been sent. Otherwise we will lose the
|
||||
wakeup and hang (there were several such bugs in the past).
|
||||
|
||||
The intention is that this is ensured already since we only free when
|
||||
the last event group in the GCO has committed
|
||||
(tmp_gco->last_sub_id <= sub_id). However, if we have a bug, we have
|
||||
extra check on next_gco->wait_count to hopefully avoid hanging; we
|
||||
have here an assertion in debug builds that this check does not in
|
||||
fact trigger.
|
||||
*/
|
||||
DBUG_ASSERT(!tmp_gco->next_gco || tmp_gco->last_sub_id > sub_id);
|
||||
tmp_gco= tmp_gco->prev_gco;
|
||||
}
|
||||
while (tmp_gco)
|
||||
{
|
||||
group_commit_orderer *prev_gco= tmp_gco->prev_gco;
|
||||
|
|
|
@ -320,6 +320,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
|
|||
int result_code;
|
||||
int compl_result_code;
|
||||
bool need_repair_or_alter= 0;
|
||||
wait_for_commit* suspended_wfc;
|
||||
|
||||
DBUG_ENTER("mysql_admin_table");
|
||||
DBUG_PRINT("enter", ("extra_open_options: %u", extra_open_options));
|
||||
|
@ -337,6 +338,13 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
|
|||
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
/*
|
||||
This function calls trans_commit() during its operation, but that does not
|
||||
imply that the operation is complete or binlogged. So we have to suspend
|
||||
temporarily the wakeup_subsequent_commits() calls (if used).
|
||||
*/
|
||||
suspended_wfc= thd->suspend_subsequent_commits();
|
||||
|
||||
mysql_ha_rm_tables(thd, tables);
|
||||
|
||||
/*
|
||||
|
@ -464,7 +472,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
|
|||
if (!table->table->part_info)
|
||||
{
|
||||
my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
|
||||
DBUG_RETURN(TRUE);
|
||||
goto err2;
|
||||
}
|
||||
if (set_part_state(alter_info, table->table->part_info, PART_ADMIN))
|
||||
{
|
||||
|
@ -1045,6 +1053,8 @@ send_result_message:
|
|||
}
|
||||
|
||||
my_eof(thd);
|
||||
thd->resume_subsequent_commits(suspended_wfc);
|
||||
DBUG_EXECUTE_IF("inject_analyze_table_sleep", my_sleep(500000););
|
||||
DBUG_RETURN(FALSE);
|
||||
|
||||
err:
|
||||
|
@ -1058,6 +1068,8 @@ err:
|
|||
}
|
||||
close_thread_tables(thd); // Shouldn't be needed
|
||||
thd->mdl_context.release_transactional_locks();
|
||||
err2:
|
||||
thd->resume_subsequent_commits(suspended_wfc);
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
*/
|
||||
class Exec_time_tracker
|
||||
{
|
||||
protected:
|
||||
ulonglong count;
|
||||
ulonglong cycles;
|
||||
ulonglong last_start;
|
||||
|
@ -34,9 +35,8 @@ public:
|
|||
|
||||
void stop_tracking()
|
||||
{
|
||||
ulonglong last_end= my_timer_cycles();
|
||||
count++;
|
||||
cycles += last_end - last_start;
|
||||
cycles += my_timer_cycles()- last_start;
|
||||
}
|
||||
|
||||
// interface for getting the time
|
||||
|
@ -48,3 +48,41 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
A class for counting certain actions (in all queries), and optionally
|
||||
collecting the timings (in ANALYZE queries).
|
||||
*/
|
||||
|
||||
class Time_and_counter_tracker: public Exec_time_tracker
|
||||
{
|
||||
public:
|
||||
const bool timed;
|
||||
|
||||
Time_and_counter_tracker(bool timed_arg) : timed(timed_arg)
|
||||
{}
|
||||
|
||||
/* Loops are counted in both ANALYZE and regular queries, as this is cheap */
|
||||
void incr_loops() { count++; }
|
||||
|
||||
/*
|
||||
Unlike Exec_time_tracker::stop_tracking, we don't increase loops.
|
||||
*/
|
||||
void stop_tracking()
|
||||
{
|
||||
cycles += my_timer_cycles()- last_start;
|
||||
}
|
||||
};
|
||||
|
||||
#define ANALYZE_START_TRACKING(tracker) \
|
||||
{ \
|
||||
(tracker)->incr_loops(); \
|
||||
if (unlikely((tracker)->timed)) \
|
||||
{ (tracker)->start_tracking(); } \
|
||||
}
|
||||
|
||||
#define ANALYZE_STOP_TRACKING(tracker) \
|
||||
if (unlikely((tracker)->timed)) \
|
||||
{ (tracker)->stop_tracking(); }
|
||||
|
||||
|
||||
|
|
|
@ -3774,6 +3774,15 @@ public:
|
|||
if (wait_for_commit_ptr)
|
||||
wait_for_commit_ptr->wakeup_subsequent_commits(wakeup_error);
|
||||
}
|
||||
wait_for_commit *suspend_subsequent_commits() {
|
||||
wait_for_commit *suspended= wait_for_commit_ptr;
|
||||
wait_for_commit_ptr= NULL;
|
||||
return suspended;
|
||||
}
|
||||
void resume_subsequent_commits(wait_for_commit *suspended) {
|
||||
DBUG_ASSERT(!wait_for_commit_ptr);
|
||||
wait_for_commit_ptr= suspended;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
|
|
|
@ -52,9 +52,11 @@
|
|||
invoked on a running DELETE statement.
|
||||
*/
|
||||
|
||||
void Delete_plan::save_explain_data(MEM_ROOT *mem_root, Explain_query *query)
|
||||
Explain_delete* Delete_plan::save_explain_delete_data(MEM_ROOT *mem_root, THD *thd)
|
||||
{
|
||||
Explain_delete *explain= new (mem_root) Explain_delete(mem_root);
|
||||
Explain_query *query= thd->lex->explain;
|
||||
Explain_delete *explain=
|
||||
new (mem_root) Explain_delete(mem_root, thd->lex->analyze_stmt);
|
||||
|
||||
if (deleting_all_rows)
|
||||
{
|
||||
|
@ -69,14 +71,19 @@ void Delete_plan::save_explain_data(MEM_ROOT *mem_root, Explain_query *query)
|
|||
}
|
||||
|
||||
query->add_upd_del_plan(explain);
|
||||
return explain;
|
||||
}
|
||||
|
||||
|
||||
void Update_plan::save_explain_data(MEM_ROOT *mem_root, Explain_query *query)
|
||||
Explain_update*
|
||||
Update_plan::save_explain_update_data(MEM_ROOT *mem_root, THD *thd)
|
||||
{
|
||||
Explain_update* explain= new (mem_root) Explain_update(mem_root);
|
||||
Explain_query *query= thd->lex->explain;
|
||||
Explain_update* explain=
|
||||
new (mem_root) Explain_update(mem_root, thd->lex->analyze_stmt);
|
||||
save_explain_data_intern(mem_root, query, explain);
|
||||
query->add_upd_del_plan(explain);
|
||||
return explain;
|
||||
}
|
||||
|
||||
|
||||
|
@ -110,7 +117,8 @@ void Update_plan::save_explain_data_intern(MEM_ROOT *mem_root,
|
|||
partition_info *part_info;
|
||||
if ((part_info= table->part_info))
|
||||
{
|
||||
make_used_partitions_str(part_info, &explain->used_partitions);
|
||||
make_used_partitions_str(mem_root, part_info, &explain->used_partitions,
|
||||
explain->used_partitions_list);
|
||||
explain->used_partitions_set= true;
|
||||
}
|
||||
else
|
||||
|
@ -461,7 +469,8 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
if (thd->lex->describe)
|
||||
goto produce_explain_and_leave;
|
||||
|
||||
query_plan.save_explain_data(thd->mem_root, thd->lex->explain);
|
||||
explain= query_plan.save_explain_delete_data(thd->mem_root, thd);
|
||||
ANALYZE_START_TRACKING(&explain->command_tracker);
|
||||
|
||||
DBUG_EXECUTE_IF("show_explain_probe_delete_exec_start",
|
||||
dbug_serve_apcs(thd, 1););
|
||||
|
@ -542,7 +551,6 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
|
||||
explain= (Explain_delete*)thd->lex->explain->get_upd_del_plan();
|
||||
explain->tracker.on_scan_init();
|
||||
ANALYZE_START_TRACKING(&explain->time_tracker);
|
||||
|
||||
while (!(error=info.read_record(&info)) && !thd->killed &&
|
||||
! thd->is_error())
|
||||
|
@ -620,7 +628,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
end_read_record(&info);
|
||||
if (options & OPTION_QUICK)
|
||||
(void) table->file->extra(HA_EXTRA_NORMAL);
|
||||
ANALYZE_STOP_TRACKING(&explain->time_tracker);
|
||||
ANALYZE_STOP_TRACKING(&explain->command_tracker);
|
||||
|
||||
cleanup:
|
||||
/*
|
||||
|
@ -701,7 +709,7 @@ produce_explain_and_leave:
|
|||
We come here for various "degenerate" query plans: impossible WHERE,
|
||||
no-partitions-used, impossible-range, etc.
|
||||
*/
|
||||
query_plan.save_explain_data(thd->mem_root, thd->lex->explain);
|
||||
query_plan.save_explain_delete_data(thd->mem_root, thd);
|
||||
|
||||
send_nothing_and_leave:
|
||||
/*
|
||||
|
|
|
@ -37,6 +37,18 @@ Explain_query::Explain_query(THD *thd_arg, MEM_ROOT *root) :
|
|||
{
|
||||
}
|
||||
|
||||
static void print_json_array(Json_writer *writer,
|
||||
const char *title, String_list &list)
|
||||
{
|
||||
List_iterator_fast<char> it(list);
|
||||
const char *name;
|
||||
writer->add_member(title).start_array();
|
||||
while ((name= it++))
|
||||
writer->add_str(name);
|
||||
writer->end_array();
|
||||
}
|
||||
|
||||
|
||||
|
||||
Explain_query::~Explain_query()
|
||||
{
|
||||
|
@ -1201,6 +1213,13 @@ void Explain_table_access::tag_to_json(Json_writer *writer, enum explain_extra_t
|
|||
case ET_USING_MRR:
|
||||
writer->add_member("mrr_type").add_str(mrr_type.c_ptr());
|
||||
break;
|
||||
case ET_USING_INDEX_FOR_GROUP_BY:
|
||||
writer->add_member("using_index_for_group_by");
|
||||
if (loose_scan_is_scanning)
|
||||
writer->add_str("scanning");
|
||||
else
|
||||
writer->add_bool(true);
|
||||
break;
|
||||
default:
|
||||
DBUG_ASSERT(0);
|
||||
}
|
||||
|
@ -1212,17 +1231,9 @@ void add_json_keyset(Json_writer *writer, const char *elem_name,
|
|||
String_list *keyset)
|
||||
{
|
||||
if (!keyset->is_empty())
|
||||
{
|
||||
List_iterator_fast<char> it(*keyset);
|
||||
const char *name;
|
||||
writer->add_member(elem_name).start_array();
|
||||
while ((name= it++))
|
||||
writer->add_str(name);
|
||||
writer->end_array();
|
||||
}
|
||||
print_json_array(writer, elem_name, *keyset);
|
||||
}
|
||||
|
||||
|
||||
void Explain_table_access::print_explain_json(Explain_query *query,
|
||||
Json_writer *writer,
|
||||
bool is_analyze)
|
||||
|
@ -1245,7 +1256,10 @@ void Explain_table_access::print_explain_json(Explain_query *query,
|
|||
writer->add_member("table").start_object();
|
||||
|
||||
writer->add_member("table_name").add_str(table_name);
|
||||
// partitions
|
||||
|
||||
if (used_partitions_set)
|
||||
print_json_array(writer, "partitions", used_partitions_list);
|
||||
|
||||
writer->add_member("access_type").add_str(join_type_str[type]);
|
||||
|
||||
add_json_keyset(writer, "possible_keys", &possible_keys);
|
||||
|
@ -1274,14 +1288,7 @@ void Explain_table_access::print_explain_json(Explain_query *query,
|
|||
parts_list= &key.key_parts_list;
|
||||
|
||||
if (parts_list && !parts_list->is_empty())
|
||||
{
|
||||
List_iterator_fast<char> it(*parts_list);
|
||||
const char *name;
|
||||
writer->add_member("used_key_parts").start_array();
|
||||
while ((name= it++))
|
||||
writer->add_str(name);
|
||||
writer->end_array();
|
||||
}
|
||||
print_json_array(writer, "used_key_parts", *parts_list);
|
||||
|
||||
if (quick_info && !quick_info->is_basic())
|
||||
{
|
||||
|
@ -1292,14 +1299,7 @@ void Explain_table_access::print_explain_json(Explain_query *query,
|
|||
|
||||
/* `ref` */
|
||||
if (!ref_list.is_empty())
|
||||
{
|
||||
List_iterator_fast<char> it(ref_list);
|
||||
const char *str;
|
||||
writer->add_member("ref").start_array();
|
||||
while ((str= it++))
|
||||
writer->add_str(str);
|
||||
writer->end_array();
|
||||
}
|
||||
print_json_array(writer, "ref", ref_list);
|
||||
|
||||
/* r_loops (not present in tabular output) */
|
||||
if (is_analyze)
|
||||
|
@ -1360,6 +1360,7 @@ void Explain_table_access::print_explain_json(Explain_query *query,
|
|||
writer->end_object(); // "block-nl-join"
|
||||
writer->add_member("buffer_type").add_str(bka_type.incremental?
|
||||
"incremental":"flat");
|
||||
writer->add_member("buffer_size").add_size(bka_type.join_buffer_size);
|
||||
writer->add_member("join_type").add_str(bka_type.join_alg);
|
||||
if (bka_type.mrr_type.length())
|
||||
writer->add_member("mrr_type").add_str(bka_type.mrr_type);
|
||||
|
@ -1547,13 +1548,8 @@ void Explain_quick_select::print_json(Json_writer *writer)
|
|||
writer->add_member("range").start_object();
|
||||
|
||||
writer->add_member("key").add_str(range.get_key_name());
|
||||
|
||||
List_iterator_fast<char> it(range.key_parts_list);
|
||||
const char *name;
|
||||
writer->add_member("used_key_parts").start_array();
|
||||
while ((name= it++))
|
||||
writer->add_str(name);
|
||||
writer->end_array();
|
||||
|
||||
print_json_array(writer, "used_key_parts", range.key_parts_list);
|
||||
|
||||
writer->end_object();
|
||||
}
|
||||
|
@ -1572,8 +1568,7 @@ void Explain_quick_select::print_json(Json_writer *writer)
|
|||
|
||||
void Explain_quick_select::print_extra_recursive(String *str)
|
||||
{
|
||||
if (quick_type == QUICK_SELECT_I::QS_TYPE_RANGE ||
|
||||
quick_type == QUICK_SELECT_I::QS_TYPE_RANGE_DESC)
|
||||
if (is_basic())
|
||||
{
|
||||
str->append(range.get_key_name());
|
||||
}
|
||||
|
@ -1839,6 +1834,10 @@ void Explain_update::print_explain_json(Explain_query *query,
|
|||
writer->add_member("delete").add_ll(1);
|
||||
|
||||
writer->add_member("table_name").add_str(table_name);
|
||||
|
||||
if (used_partitions_set)
|
||||
print_json_array(writer, "partitions", used_partitions_list);
|
||||
|
||||
writer->add_member("access_type").add_str(join_type_str[jtype]);
|
||||
|
||||
if (!possible_keys.is_empty())
|
||||
|
@ -1933,9 +1932,9 @@ void Explain_update::print_explain_json(Explain_query *query,
|
|||
if (using_io_buffer)
|
||||
writer->add_member("using_io_buffer").add_ll(1);
|
||||
|
||||
if (is_analyze && time_tracker.get_loops())
|
||||
if (is_analyze && command_tracker.get_loops())
|
||||
writer->
|
||||
add_member("r_total_time_ms").add_double(time_tracker.get_time_ms());
|
||||
add_member("r_total_time_ms").add_double(command_tracker.get_time_ms());
|
||||
|
||||
if (where_cond)
|
||||
{
|
||||
|
|
|
@ -51,6 +51,9 @@ it into the slow query log.
|
|||
|
||||
*/
|
||||
|
||||
#ifndef SQL_EXPLAIN_INCLUDED
|
||||
#define SQL_EXPLAIN_INCLUDED
|
||||
|
||||
class String_list: public List<char>
|
||||
{
|
||||
public:
|
||||
|
@ -104,40 +107,6 @@ public:
|
|||
inline void on_record_after_where() { r_rows_after_where++; }
|
||||
};
|
||||
|
||||
#if 0
|
||||
/*
|
||||
A class to track operations (currently, row reads) on a PSI_table.
|
||||
*/
|
||||
class Table_op_tracker
|
||||
{
|
||||
PSI_table *psi_table;
|
||||
|
||||
/* Table counter values at start. Sum is in picoseconds */
|
||||
ulonglong start_sum;
|
||||
ulonglong start_count;
|
||||
|
||||
/* Table counter values at end */
|
||||
ulonglong end_sum;
|
||||
ulonglong end_count;
|
||||
public:
|
||||
void start_tracking(TABLE *table);
|
||||
// At the moment, print_json will call end_tracking.
|
||||
void end_tracking();
|
||||
|
||||
// this may print nothing if the table was not tracked.
|
||||
void print_json(Json_writer *writer);
|
||||
};
|
||||
#endif
|
||||
|
||||
#define ANALYZE_START_TRACKING(tracker) \
|
||||
if (tracker) \
|
||||
{ (tracker)->start_tracking(); }
|
||||
|
||||
#define ANALYZE_STOP_TRACKING(tracker) \
|
||||
if (tracker) \
|
||||
{ (tracker)->stop_tracking(); }
|
||||
|
||||
|
||||
/**************************************************************************************
|
||||
|
||||
Data structures for producing EXPLAIN outputs.
|
||||
|
@ -160,8 +129,9 @@ class Explain_query;
|
|||
class Explain_node : public Sql_alloc
|
||||
{
|
||||
public:
|
||||
Explain_node(MEM_ROOT *root)
|
||||
:children(root)
|
||||
Explain_node(MEM_ROOT *root) :
|
||||
connection_type(EXPLAIN_NODE_OTHER),
|
||||
children(root)
|
||||
{}
|
||||
/* A type specifying what kind of node this is */
|
||||
enum explain_node_type
|
||||
|
@ -181,7 +151,6 @@ public:
|
|||
EXPLAIN_NODE_NON_MERGED_SJ /* aka JTBM semi-join */
|
||||
};
|
||||
|
||||
Explain_node() : connection_type(EXPLAIN_NODE_OTHER) {}
|
||||
|
||||
virtual enum explain_node_type get_type()= 0;
|
||||
virtual int get_select_id()= 0;
|
||||
|
@ -274,10 +243,11 @@ class Explain_select : public Explain_basic_join
|
|||
public:
|
||||
enum explain_node_type get_type() { return EXPLAIN_SELECT; }
|
||||
|
||||
Explain_select(MEM_ROOT *root) :
|
||||
Explain_select(MEM_ROOT *root, bool is_analyze) :
|
||||
Explain_basic_join(root),
|
||||
message(NULL),
|
||||
using_temporary(false), using_filesort(false)
|
||||
using_temporary(false), using_filesort(false),
|
||||
time_tracker(is_analyze)
|
||||
{}
|
||||
|
||||
/*
|
||||
|
@ -303,7 +273,7 @@ public:
|
|||
bool using_filesort;
|
||||
|
||||
/* ANALYZE members */
|
||||
Exec_time_tracker time_tracker;
|
||||
Time_and_counter_tracker time_tracker;
|
||||
|
||||
int print_explain(Explain_query *query, select_result_sink *output,
|
||||
uint8 explain_flags, bool is_analyze);
|
||||
|
@ -329,7 +299,8 @@ class Explain_union : public Explain_node
|
|||
{
|
||||
public:
|
||||
Explain_union(MEM_ROOT *root) :
|
||||
Explain_node(root)
|
||||
Explain_node(root),
|
||||
time_tracker(false)
|
||||
{}
|
||||
|
||||
enum explain_node_type get_type() { return EXPLAIN_UNION; }
|
||||
|
@ -364,6 +335,8 @@ public:
|
|||
const char *fake_select_type;
|
||||
bool using_filesort;
|
||||
bool using_tmp;
|
||||
/* TODO: the below is not printed yet:*/
|
||||
Time_and_counter_tracker time_tracker;
|
||||
|
||||
Table_access_tracker *get_fake_select_lex_tracker()
|
||||
{
|
||||
|
@ -538,6 +511,8 @@ class EXPLAIN_BKA_TYPE
|
|||
public:
|
||||
EXPLAIN_BKA_TYPE() : join_alg(NULL) {}
|
||||
|
||||
size_t join_buffer_size;
|
||||
|
||||
bool incremental;
|
||||
|
||||
/*
|
||||
|
@ -676,6 +651,7 @@ public:
|
|||
/* id and 'select_type' are cared-of by the parent Explain_select */
|
||||
StringBuffer<32> table_name;
|
||||
StringBuffer<32> used_partitions;
|
||||
String_list used_partitions_list;
|
||||
// valid with ET_USING_MRR
|
||||
StringBuffer<32> mrr_type;
|
||||
StringBuffer<32> firstmatch_table_name;
|
||||
|
@ -787,8 +763,9 @@ class Explain_update : public Explain_node
|
|||
{
|
||||
public:
|
||||
|
||||
Explain_update(MEM_ROOT *root) :
|
||||
Explain_node(root)
|
||||
Explain_update(MEM_ROOT *root, bool is_analyze) :
|
||||
Explain_node(root),
|
||||
command_tracker(is_analyze)
|
||||
{}
|
||||
|
||||
virtual enum explain_node_type get_type() { return EXPLAIN_UPDATE; }
|
||||
|
@ -797,6 +774,7 @@ public:
|
|||
const char *select_type;
|
||||
|
||||
StringBuffer<32> used_partitions;
|
||||
String_list used_partitions_list;
|
||||
bool used_partitions_set;
|
||||
|
||||
bool impossible_where;
|
||||
|
@ -827,7 +805,9 @@ public:
|
|||
|
||||
/* ANALYZE members and methods */
|
||||
Table_access_tracker tracker;
|
||||
Exec_time_tracker time_tracker;
|
||||
|
||||
/* This tracks execution of the whole command */
|
||||
Time_and_counter_tracker command_tracker;
|
||||
//psergey-todo: io-tracker here.
|
||||
|
||||
virtual int print_explain(Explain_query *query, select_result_sink *output,
|
||||
|
@ -870,8 +850,8 @@ public:
|
|||
class Explain_delete: public Explain_update
|
||||
{
|
||||
public:
|
||||
Explain_delete(MEM_ROOT *root) :
|
||||
Explain_update(root)
|
||||
Explain_delete(MEM_ROOT *root, bool is_analyze) :
|
||||
Explain_update(root, is_analyze)
|
||||
{}
|
||||
|
||||
/*
|
||||
|
@ -890,3 +870,4 @@ public:
|
|||
};
|
||||
|
||||
|
||||
#endif //SQL_EXPLAIN_INCLUDED
|
||||
|
|
|
@ -2582,6 +2582,8 @@ void JOIN_CACHE::save_explain_data(EXPLAIN_BKA_TYPE *explain)
|
|||
{
|
||||
explain->incremental= MY_TEST(prev_cache);
|
||||
|
||||
explain->join_buffer_size= get_join_buffer_size();
|
||||
|
||||
switch (get_join_alg()) {
|
||||
case BNL_JOIN_ALG:
|
||||
explain->join_alg= "BNL";
|
||||
|
|
|
@ -4374,6 +4374,12 @@ int st_select_lex_unit::save_union_explain_part2(Explain_query *output)
|
|||
eu->add_child(unit->first_select()->select_number);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Having a time tracker for reading UNION result is not very interesting
|
||||
but is easier, as JOIN::exec now relies on having a tracker.
|
||||
*/
|
||||
fake_select_lex->join->tracker= &eu->time_tracker;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2290,6 +2290,7 @@ class SQL_SELECT;
|
|||
|
||||
class Explain_query;
|
||||
class Explain_update;
|
||||
class Explain_delete;
|
||||
|
||||
/*
|
||||
Query plan of a single-table UPDATE.
|
||||
|
@ -2332,10 +2333,11 @@ public:
|
|||
void set_impossible_where() { impossible_where= true; }
|
||||
void set_no_partitions() { no_partitions= true; }
|
||||
|
||||
void save_explain_data(MEM_ROOT *mem_root, Explain_query *query);
|
||||
Explain_update* save_explain_update_data(MEM_ROOT *mem_root, THD *thd);
|
||||
protected:
|
||||
void save_explain_data_intern(MEM_ROOT *mem_root, Explain_query *query,
|
||||
Explain_update *eu);
|
||||
|
||||
public:
|
||||
virtual ~Update_plan() {}
|
||||
|
||||
Update_plan(MEM_ROOT *mem_root_arg) :
|
||||
|
@ -2365,7 +2367,7 @@ public:
|
|||
scanned_rows= rows_arg;
|
||||
}
|
||||
|
||||
void save_explain_data(MEM_ROOT *mem_root, Explain_query *query);
|
||||
Explain_delete* save_explain_delete_data(MEM_ROOT *mem_root, THD *thd);
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -68,6 +68,7 @@
|
|||
// mysql_*_alter_copy_data
|
||||
#include "opt_range.h" // store_key_image_to_rec
|
||||
#include "sql_alter.h" // Alter_table_ctx
|
||||
#include "sql_select.h"
|
||||
|
||||
#include <algorithm>
|
||||
using std::max;
|
||||
|
@ -7290,8 +7291,10 @@ void mem_alloc_error(size_t size)
|
|||
/**
|
||||
Return comma-separated list of used partitions in the provided given string.
|
||||
|
||||
@param mem_root Where to allocate following list
|
||||
@param part_info Partitioning info
|
||||
@param[out] parts The resulting list of string to fill
|
||||
@param[out] used_partitions_list result list to fill
|
||||
|
||||
Generate a list of used partitions (from bits in part_info->read_partitions
|
||||
bitmap), and store it into the provided String object.
|
||||
|
@ -7302,7 +7305,10 @@ void mem_alloc_error(size_t size)
|
|||
that was written or locked.
|
||||
*/
|
||||
|
||||
void make_used_partitions_str(partition_info *part_info, String *parts_str)
|
||||
void make_used_partitions_str(MEM_ROOT *alloc,
|
||||
partition_info *part_info,
|
||||
String *parts_str,
|
||||
String_list &used_partitions_list)
|
||||
{
|
||||
parts_str->length(0);
|
||||
partition_element *pe;
|
||||
|
@ -7321,6 +7327,7 @@ void make_used_partitions_str(partition_info *part_info, String *parts_str)
|
|||
{
|
||||
if (parts_str->length())
|
||||
parts_str->append(',');
|
||||
uint index= parts_str->length();
|
||||
parts_str->append(head_pe->partition_name,
|
||||
strlen(head_pe->partition_name),
|
||||
system_charset_info);
|
||||
|
@ -7328,6 +7335,7 @@ void make_used_partitions_str(partition_info *part_info, String *parts_str)
|
|||
parts_str->append(pe->partition_name,
|
||||
strlen(pe->partition_name),
|
||||
system_charset_info);
|
||||
used_partitions_list.append_str(alloc, parts_str->ptr() + index);
|
||||
}
|
||||
partition_id++;
|
||||
}
|
||||
|
@ -7341,6 +7349,7 @@ void make_used_partitions_str(partition_info *part_info, String *parts_str)
|
|||
{
|
||||
if (parts_str->length())
|
||||
parts_str->append(',');
|
||||
used_partitions_list.append_str(alloc, pe->partition_name);
|
||||
parts_str->append(pe->partition_name, strlen(pe->partition_name),
|
||||
system_charset_info);
|
||||
}
|
||||
|
|
|
@ -74,6 +74,7 @@ typedef struct {
|
|||
uint32 end_part;
|
||||
} part_id_range;
|
||||
|
||||
class String_list;
|
||||
struct st_partition_iter;
|
||||
#define NOT_A_PARTITION_ID UINT_MAX32
|
||||
|
||||
|
@ -114,7 +115,9 @@ bool mysql_unpack_partition(THD *thd, char *part_buf,
|
|||
TABLE *table, bool is_create_table_ind,
|
||||
handlerton *default_db_type,
|
||||
bool *work_part_info_used);
|
||||
void make_used_partitions_str(partition_info *part_info, String *parts_str);
|
||||
void make_used_partitions_str(MEM_ROOT *mem_root,
|
||||
partition_info *part_info, String *parts_str,
|
||||
String_list &used_partitions_list);
|
||||
uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
|
||||
bool left_endpoint,
|
||||
bool include_endpoint);
|
||||
|
|
|
@ -2376,8 +2376,10 @@ void JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
|
|||
This is fake_select_lex. It has no query plan, but we need to set up a
|
||||
tracker for ANALYZE
|
||||
*/
|
||||
Explain_union *eu= output->get_union(select_lex->master_unit()->first_select()->select_number);
|
||||
uint nr= select_lex->master_unit()->first_select()->select_number;
|
||||
Explain_union *eu= output->get_union(nr);
|
||||
join_tab[0].tracker= eu->get_fake_select_lex_tracker();
|
||||
tracker= &eu->time_tracker;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -23434,8 +23436,11 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
|
|||
tab->tracker= &eta->tracker;
|
||||
tab->jbuf_tracker= &eta->jbuf_tracker;
|
||||
|
||||
tab->table->file->tracker= &eta->op_tracker;
|
||||
/* id and select_type are kept in Explain_select */
|
||||
/* Enable the table access time tracker only for "ANALYZE stmt" */
|
||||
if (thd->lex->analyze_stmt)
|
||||
tab->table->file->tracker= &eta->op_tracker;
|
||||
|
||||
/* No need to save id and select_type here, they are kept in Explain_select */
|
||||
|
||||
/* table */
|
||||
if (table->derived_select_number)
|
||||
|
@ -23488,8 +23493,9 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
|
|||
partition_info *part_info;
|
||||
if (!table->derived_select_number &&
|
||||
(part_info= table->part_info))
|
||||
{
|
||||
make_used_partitions_str(part_info, &eta->used_partitions);
|
||||
{ //TODO: all thd->mem_root here should be fixed
|
||||
make_used_partitions_str(thd->mem_root, part_info, &eta->used_partitions,
|
||||
eta->used_partitions_list);
|
||||
eta->used_partitions_set= true;
|
||||
}
|
||||
else
|
||||
|
@ -23857,12 +23863,15 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table,
|
|||
if (message)
|
||||
{
|
||||
Explain_select *xpl_sel;
|
||||
explain_node= xpl_sel= new (output->mem_root) Explain_select(output->mem_root);
|
||||
explain_node= xpl_sel=
|
||||
new (output->mem_root) Explain_select(output->mem_root,
|
||||
thd->lex->analyze_stmt);
|
||||
join->select_lex->set_explain_type(true);
|
||||
|
||||
xpl_sel->select_id= join->select_lex->select_number;
|
||||
xpl_sel->select_type= join->select_lex->type;
|
||||
xpl_sel->message= message;
|
||||
tracker= &xpl_sel->time_tracker;
|
||||
if (select_lex->master_unit()->derived)
|
||||
xpl_sel->connection_type= Explain_node::EXPLAIN_NODE_DERIVED;
|
||||
/* Setting xpl_sel->message means that all other members are invalid */
|
||||
|
@ -23871,7 +23880,9 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table,
|
|||
else
|
||||
{
|
||||
Explain_select *xpl_sel;
|
||||
explain_node= xpl_sel= new (output->mem_root) Explain_select(output->mem_root);
|
||||
explain_node= xpl_sel=
|
||||
new (output->mem_root) Explain_select(output->mem_root,
|
||||
thd->lex->analyze_stmt);
|
||||
table_map used_tables=0;
|
||||
tracker= &xpl_sel->time_tracker;
|
||||
|
||||
|
|
|
@ -1282,7 +1282,7 @@ public:
|
|||
bool optimized; ///< flag to avoid double optimization in EXPLAIN
|
||||
bool initialized; ///< flag to avoid double init_execution calls
|
||||
|
||||
Exec_time_tracker *tracker;
|
||||
Time_and_counter_tracker *tracker;
|
||||
|
||||
enum { QEP_NOT_PRESENT_YET, QEP_AVAILABLE, QEP_DELETED} have_query_plan;
|
||||
|
||||
|
|
|
@ -6383,6 +6383,14 @@ static bool fill_alter_inplace_info(THD *thd,
|
|||
new_field->field->field_index != key_part->fieldnr - 1)
|
||||
goto index_changed;
|
||||
}
|
||||
|
||||
/* Check that key comment is not changed. */
|
||||
if (table_key->comment.length != new_key->comment.length ||
|
||||
(table_key->comment.length &&
|
||||
memcmp(table_key->comment.str, new_key->comment.str,
|
||||
table_key->comment.length) != 0))
|
||||
goto index_changed;
|
||||
|
||||
continue;
|
||||
|
||||
index_changed:
|
||||
|
|
|
@ -517,7 +517,9 @@ int mysql_update(THD *thd,
|
|||
*/
|
||||
if (thd->lex->describe)
|
||||
goto produce_explain_and_leave;
|
||||
query_plan.save_explain_data(thd->mem_root, thd->lex->explain);
|
||||
explain= query_plan.save_explain_update_data(query_plan.mem_root, thd);
|
||||
|
||||
ANALYZE_START_TRACKING(&explain->command_tracker);
|
||||
|
||||
DBUG_EXECUTE_IF("show_explain_probe_update_exec_start",
|
||||
dbug_serve_apcs(thd, 1););
|
||||
|
@ -721,7 +723,6 @@ int mysql_update(THD *thd,
|
|||
if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
|
||||
table->prepare_for_position();
|
||||
|
||||
explain= thd->lex->explain->get_upd_del_plan();
|
||||
table->reset_default_fields();
|
||||
|
||||
/*
|
||||
|
@ -731,7 +732,6 @@ int mysql_update(THD *thd,
|
|||
*/
|
||||
can_compare_record= records_are_comparable(table);
|
||||
explain->tracker.on_scan_init();
|
||||
ANALYZE_START_TRACKING(&explain->time_tracker);
|
||||
|
||||
while (!(error=info.read_record(&info)) && !thd->killed)
|
||||
{
|
||||
|
@ -908,7 +908,7 @@ int mysql_update(THD *thd,
|
|||
break;
|
||||
}
|
||||
}
|
||||
ANALYZE_STOP_TRACKING(&explain->time_tracker);
|
||||
ANALYZE_STOP_TRACKING(&explain->command_tracker);
|
||||
table->auto_increment_field_not_null= FALSE;
|
||||
dup_key_found= 0;
|
||||
/*
|
||||
|
@ -1046,7 +1046,7 @@ produce_explain_and_leave:
|
|||
We come here for various "degenerate" query plans: impossible WHERE,
|
||||
no-partitions-used, impossible-range, etc.
|
||||
*/
|
||||
query_plan.save_explain_data(thd->mem_root, thd->lex->explain);
|
||||
query_plan.save_explain_update_data(query_plan.mem_root, thd);
|
||||
|
||||
emit_explain_and_leave:
|
||||
int err2= thd->lex->explain->send_explain(thd);
|
||||
|
|
|
@ -275,13 +275,6 @@ buf_page_set_state(
|
|||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
bpage->state = state;
|
||||
#ifdef UNIV_DEBUG
|
||||
if( buf_page_get_state(bpage) != state) {
|
||||
fprintf(stderr, "InnoDB: Error: Requested state %d current state %d old_state %d\n",
|
||||
state, buf_page_get_state(bpage), old_state);
|
||||
}
|
||||
#endif
|
||||
ut_ad(buf_page_get_state(bpage) == state);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
|
|
|
@ -279,13 +279,6 @@ buf_page_set_state(
|
|||
#endif /* UNIV_DEBUG */
|
||||
|
||||
bpage->state = state;
|
||||
#ifdef UNIV_DEBUG
|
||||
if( buf_page_get_state(bpage) != state) {
|
||||
fprintf(stderr, "InnoDB: Error: Requested state %d current state %d old_state %d\n",
|
||||
state, buf_page_get_state(bpage), old_state);
|
||||
}
|
||||
#endif
|
||||
ut_ad(buf_page_get_state(bpage) == state);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
|
|
Loading…
Add table
Reference in a new issue