Merge 10.3 into 10.4

This commit is contained in:
Marko Mäkelä 2020-09-03 15:53:38 +03:00
commit c9cf6b13f6
78 changed files with 1535 additions and 1150 deletions

View file

@ -11,7 +11,7 @@ language: cpp
os:
- linux
- osx
osx_image: xcode10.1
osx_image: xcode12u
compiler:
- gcc
- clang
@ -20,8 +20,6 @@ cache:
timeout: 500
apt: true
ccache: true
directories:
- /usr/local/Cellar # Fails do to permission error: https://github.com/travis-ci/travis-ci/issues/8092
env:
matrix:

View file

@ -968,8 +968,12 @@ static bool print_row_event(PRINT_EVENT_INFO *print_event_info, Log_event *ev,
my_b_printf(body_cache, "'%s\n", print_event_info->delimiter);
// flush cache
if ((copy_event_cache_to_file_and_reinit(&print_event_info->head_cache, result_file) ||
copy_event_cache_to_file_and_reinit(&print_event_info->body_cache, result_file)))
if ((copy_event_cache_to_file_and_reinit(&print_event_info->head_cache,
result_file) ||
copy_event_cache_to_file_and_reinit(&print_event_info->body_cache,
result_file) ||
copy_event_cache_to_file_and_reinit(&print_event_info->tail_cache,
result_file)))
return 1;
}
}

View file

@ -0,0 +1,5 @@
[covering]
innodb_prefix_index_cluster_optimization=on
[unoptimized]
innodb_prefix_index_cluster_optimization=off

View file

@ -0,0 +1 @@
--source include/have_innodb.inc

View file

@ -1,4 +1,4 @@
drop table if exists prefixinno;
SET @save_opt= @@GLOBAL.innodb_prefix_index_cluster_optimization;
set global innodb_prefix_index_cluster_optimization = ON;
show variables like 'innodb_prefix_index_cluster_optimization';
Variable_name Value
@ -346,10 +346,10 @@ f1
🐱🌑
select @cluster_lookups;
@cluster_lookups
2
1
select @cluster_lookups_avoided;
@cluster_lookups_avoided
0
1
# Eligible - record length is shorter than prefix length
SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '🌑%';
f1
@ -366,10 +366,10 @@ f1
🌒
select @cluster_lookups;
@cluster_lookups
1
0
select @cluster_lookups_avoided;
@cluster_lookups_avoided
1
2
DROP TABLE t1;
CREATE TABLE t1(
col1 INT,
@ -398,4 +398,60 @@ select @cluster_lookups_avoided;
@cluster_lookups_avoided
0
DROP TABLE t1;
set global innodb_prefix_index_cluster_optimization = OFF;
#
# MDEV-20464 Division by 0 in row_search_with_covering_prefix()
#
CREATE TABLE t1 (f1 INT, f2 INT AS (f1), f3 INT AS (f1), f4 INT AS (f1),
KEY (f1,f2,f3)) ENGINE=InnoDB;
INSERT INTO t1 (f1) VALUES (NULL),(0);
SELECT f1, MAX(f3), COUNT(f4) FROM t1 GROUP BY f1;
f1 MAX(f3) COUNT(f4)
NULL NULL 0
0 0 1
DROP TABLE t1;
#
# MDEV-23600 Division by 0 in row_search_with_covering_prefix()
#
CREATE TABLE t(c POINT UNIQUE) ENGINE=InnoDB;
INSERT t SET c=POINT(1,1);
SELECT * FROM t WHERE c > (SELECT MAX(c) FROM t);
c
DROP TABLE t;
#
# MDEV-12486 Wrong results with innodb_prefix_index_cluster_optimization
#
CREATE TABLE wp_blogs (
blog_id bigint(20) NOT NULL auto_increment,
site_id bigint(20) NOT NULL default '0',
domain varchar(200) NOT NULL default '',
path varchar(100) NOT NULL default '',
registered datetime NOT NULL default '0000-00-00 00:00:00',
last_updated datetime NOT NULL default '0000-00-00 00:00:00',
public tinyint(2) NOT NULL default '1',
archived tinyint(2) NOT NULL default '0',
mature tinyint(2) NOT NULL default '0',
spam tinyint(2) NOT NULL default '0',
deleted tinyint(2) NOT NULL default '0',
lang_id int(11) NOT NULL default '0',
PRIMARY KEY (blog_id),
KEY domain (domain(50),path(5)),
KEY lang_id (lang_id)
) ENGINE=InnoDB DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci;
INSERT INTO wp_blogs (domain, path) VALUES
('domain.no', '/fondsinvesteringer/'), ('domain.no', '/'),
('foo', 'bar'), ('bar', 'foo'), ('foo', 'foo'), ('bar', 'bar'),
('foo', 'foobar'), ('bar', 'foobar'), ('foobar', 'foobar');
SET GLOBAL innodb_prefix_index_cluster_optimization=off;
SELECT blog_id FROM wp_blogs WHERE domain IN ('domain.no')
AND path IN ( '/fondsinvesteringer/', '/' );
blog_id
2
1
SET GLOBAL innodb_prefix_index_cluster_optimization=on;
SELECT blog_id FROM wp_blogs WHERE domain IN ('domain.no')
AND path IN ( '/fondsinvesteringer/', '/' );
blog_id
2
1
DROP TABLE wp_blogs;
SET GLOBAL innodb_prefix_index_cluster_optimization = @save_opt;

View file

@ -1,9 +1,6 @@
-- source include/have_innodb.inc
--disable_warnings
drop table if exists prefixinno;
--enable_warnings
SET @save_opt= @@GLOBAL.innodb_prefix_index_cluster_optimization;
set global innodb_prefix_index_cluster_optimization = ON;
show variables like 'innodb_prefix_index_cluster_optimization';
@ -665,4 +662,58 @@ select @cluster_lookups;
select @cluster_lookups_avoided;
DROP TABLE t1;
set global innodb_prefix_index_cluster_optimization = OFF;
--echo #
--echo # MDEV-20464 Division by 0 in row_search_with_covering_prefix()
--echo #
CREATE TABLE t1 (f1 INT, f2 INT AS (f1), f3 INT AS (f1), f4 INT AS (f1),
KEY (f1,f2,f3)) ENGINE=InnoDB;
INSERT INTO t1 (f1) VALUES (NULL),(0);
SELECT f1, MAX(f3), COUNT(f4) FROM t1 GROUP BY f1;
DROP TABLE t1;
--echo #
--echo # MDEV-23600 Division by 0 in row_search_with_covering_prefix()
--echo #
CREATE TABLE t(c POINT UNIQUE) ENGINE=InnoDB;
INSERT t SET c=POINT(1,1);
SELECT * FROM t WHERE c > (SELECT MAX(c) FROM t);
DROP TABLE t;
--echo #
--echo # MDEV-12486 Wrong results with innodb_prefix_index_cluster_optimization
--echo #
CREATE TABLE wp_blogs (
blog_id bigint(20) NOT NULL auto_increment,
site_id bigint(20) NOT NULL default '0',
domain varchar(200) NOT NULL default '',
path varchar(100) NOT NULL default '',
registered datetime NOT NULL default '0000-00-00 00:00:00',
last_updated datetime NOT NULL default '0000-00-00 00:00:00',
public tinyint(2) NOT NULL default '1',
archived tinyint(2) NOT NULL default '0',
mature tinyint(2) NOT NULL default '0',
spam tinyint(2) NOT NULL default '0',
deleted tinyint(2) NOT NULL default '0',
lang_id int(11) NOT NULL default '0',
PRIMARY KEY (blog_id),
KEY domain (domain(50),path(5)),
KEY lang_id (lang_id)
) ENGINE=InnoDB DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci;
INSERT INTO wp_blogs (domain, path) VALUES
('domain.no', '/fondsinvesteringer/'), ('domain.no', '/'),
('foo', 'bar'), ('bar', 'foo'), ('foo', 'foo'), ('bar', 'bar'),
('foo', 'foobar'), ('bar', 'foobar'), ('foobar', 'foobar');
SET GLOBAL innodb_prefix_index_cluster_optimization=off;
SELECT blog_id FROM wp_blogs WHERE domain IN ('domain.no')
AND path IN ( '/fondsinvesteringer/', '/' );
SET GLOBAL innodb_prefix_index_cluster_optimization=on;
SELECT blog_id FROM wp_blogs WHERE domain IN ('domain.no')
AND path IN ( '/fondsinvesteringer/', '/' );
DROP TABLE wp_blogs;
SET GLOBAL innodb_prefix_index_cluster_optimization = @save_opt;

View file

@ -0,0 +1,298 @@
@@ -9,7 +9,7 @@
explain
select count(*) from lineitem where l_orderkey=130 and l_shipdate='1992-07-01';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 8 const,const 1 Using index
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 const 5 Using where
flush status;
select count(*) from lineitem where l_orderkey=130 and l_shipdate='1992-07-01';
count(*)
@@ -19,7 +19,7 @@
Handler_read_first 0
Handler_read_key 1
Handler_read_last 0
-Handler_read_next 1
+Handler_read_next 5
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
@@ -50,7 +50,7 @@
select count(*) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 8 NULL 1 Using where; Using index
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 const 6 Using where; Using index
flush status;
select count(*) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000;
@@ -61,7 +61,7 @@
Handler_read_first 0
Handler_read_key 1
Handler_read_last 0
-Handler_read_next 1
+Handler_read_next 6
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
@@ -71,7 +71,7 @@
select l_orderkey, l_linenumber from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 8 NULL 3 Using where; Using index
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 const 6 Using where; Using index
flush status;
select l_orderkey, l_linenumber from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
@@ -84,7 +84,7 @@
Handler_read_first 0
Handler_read_key 1
Handler_read_last 0
-Handler_read_next 3
+Handler_read_next 6
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
@@ -93,7 +93,7 @@
explain
select min(l_orderkey) from lineitem where l_shipdate='1992-07-01';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+1 SIMPLE lineitem ref i_l_shipdate i_l_shipdate 4 const 6 Using index
flush status;
select min(l_orderkey) from lineitem where l_shipdate='1992-07-01';
min(l_orderkey)
@@ -103,7 +103,7 @@
Handler_read_first 0
Handler_read_key 1
Handler_read_last 0
-Handler_read_next 0
+Handler_read_next 6
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
@@ -113,7 +113,7 @@
select min(l_orderkey) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 const 6 Using where; Using index
flush status;
select min(l_orderkey) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
@@ -124,7 +124,7 @@
Handler_read_first 0
Handler_read_key 1
Handler_read_last 0
-Handler_read_next 0
+Handler_read_next 6
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
@@ -134,7 +134,7 @@
select max(l_linenumber) from lineitem
where l_shipdate='1992-07-01' and l_orderkey=130;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 const 5 Using where
flush status;
select max(l_linenumber) from lineitem
where l_shipdate='1992-07-01' and l_orderkey=130;
@@ -145,7 +145,7 @@
Handler_read_first 0
Handler_read_key 1
Handler_read_last 0
-Handler_read_next 0
+Handler_read_next 5
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
@@ -157,7 +157,7 @@
where l_shipdate='1992-07-01' and l_orderkey=130
or l_receiptdate='1992-07-01' and l_orderkey=5603;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem index_merge i_l_shipdate,i_l_receiptdate i_l_shipdate,i_l_receiptdate 8,8 NULL 2 Using union(i_l_shipdate,i_l_receiptdate); Using where
+1 SIMPLE lineitem index_merge i_l_shipdate,i_l_receiptdate i_l_shipdate,i_l_receiptdate 4,4 NULL 9 Using union(i_l_shipdate,i_l_receiptdate); Using where
flush status;
select l_orderkey, l_linenumber
from lineitem use index (i_l_shipdate, i_l_receiptdate)
@@ -171,10 +171,10 @@
Handler_read_first 0
Handler_read_key 2
Handler_read_last 0
-Handler_read_next 2
+Handler_read_next 9
Handler_read_prev 0
Handler_read_retry 0
-Handler_read_rnd 2
+Handler_read_rnd 9
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
explain
@@ -183,7 +183,7 @@
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000
or l_receiptdate='1992-07-01' and l_orderkey between 5001 and 6000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem index_merge i_l_shipdate,i_l_receiptdate i_l_shipdate,i_l_receiptdate 8,8 NULL 3 Using sort_union(i_l_shipdate,i_l_receiptdate); Using where
+1 SIMPLE lineitem index_merge i_l_shipdate,i_l_receiptdate i_l_shipdate,i_l_receiptdate 4,4 NULL 9 Using union(i_l_shipdate,i_l_receiptdate); Using where
flush status;
select l_orderkey, l_linenumber
from lineitem use index (i_l_shipdate, i_l_receiptdate)
@@ -198,10 +198,10 @@
Handler_read_first 0
Handler_read_key 2
Handler_read_last 0
-Handler_read_next 3
+Handler_read_next 9
Handler_read_prev 0
Handler_read_retry 0
-Handler_read_rnd 3
+Handler_read_rnd 9
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
explain
@@ -209,7 +209,7 @@
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000
or l_receiptdate='1992-07-01' and l_orderkey between 5001 and 6000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem index_merge PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate,i_l_receiptdate 8,8 NULL 3 Using sort_union(i_l_shipdate,i_l_receiptdate); Using where
+1 SIMPLE lineitem index_merge PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate,PRIMARY,i_l_receiptdate,PRIMARY 4,4,4,4 NULL 2 Using union(intersect(i_l_shipdate,PRIMARY),intersect(i_l_receiptdate,PRIMARY)); Using where
flush status;
select l_orderkey, l_linenumber from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000
@@ -223,7 +223,7 @@
Handler_read_first 0
Handler_read_key 2
Handler_read_last 0
-Handler_read_next 3
+Handler_read_next 9
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 3
@@ -233,7 +233,7 @@
select max(l_orderkey) from lineitem
where l_partkey between 1 and 10 group by l_partkey;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem range i_l_suppkey_partkey,i_l_partkey i_l_partkey 5 NULL # Using where; Using index for group-by
+1 SIMPLE lineitem range i_l_suppkey_partkey,i_l_partkey i_l_partkey 5 NULL # Using where; Using index
flush status;
select max(l_orderkey) from lineitem
where l_partkey between 1 and 10 group by l_partkey;
@@ -251,9 +251,9 @@
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
-Handler_read_key 21
-Handler_read_last 1
-Handler_read_next 0
+Handler_read_key 1
+Handler_read_last 0
+Handler_read_next 294
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
@@ -263,7 +263,7 @@
select max(l_orderkey) from lineitem
where l_suppkey in (1,4) group by l_suppkey;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem range i_l_suppkey i_l_suppkey 5 NULL # Using where; Using index for group-by
+1 SIMPLE lineitem range i_l_suppkey i_l_suppkey 5 NULL # Using where; Using index
flush status;
select max(l_orderkey) from lineitem
where l_suppkey in (1,4) group by l_suppkey;
@@ -273,9 +273,9 @@
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
-Handler_read_key 6
-Handler_read_last 1
-Handler_read_next 0
+Handler_read_key 2
+Handler_read_last 0
+Handler_read_next 1230
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
@@ -291,7 +291,7 @@
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE part range i_p_retailprice i_p_retailprice 9 NULL # Using where; Using index
1 SIMPLE orders ref PRIMARY,i_o_orderdate i_o_orderdate 4 const # Using index
-1 SIMPLE lineitem ref i_l_partkey i_l_partkey 9 dbt3_s001.part.p_partkey,dbt3_s001.orders.o_orderkey # Using index
+1 SIMPLE lineitem ref i_l_partkey i_l_partkey 5 dbt3_s001.part.p_partkey # Using where; Using index
flush status;
select o_orderkey, p_partkey
from part use index (i_p_retailprice),
@@ -305,7 +305,7 @@
Handler_read_first 0
Handler_read_key 3
Handler_read_last 0
-Handler_read_next 3
+Handler_read_next 26
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
@@ -322,8 +322,8 @@
select * from t0, part ignore index (primary)
where p_partkey=t0.a and p_size=1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 ALL NULL NULL NULL NULL 5 Using where
-1 SIMPLE part eq_ref i_p_size i_p_size 9 const,dbt3_s001.t0.a 1
+1 SIMPLE t0 ALL NULL NULL NULL NULL 5
+1 SIMPLE part ref i_p_size i_p_size 5 const 5 Using index condition
select * from t0, part ignore index (primary)
where p_partkey=t0.a and p_size=1;
a p_partkey p_name p_mfgr p_brand p_type p_size p_container p_retailprice p_comment
@@ -502,7 +502,7 @@
select * from t1, t3 where t3.col1=t1.a and t3.col2=t1.a and t3.pk1=t1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where
-1 SIMPLE t3 ref PRIMARY,col1 col1 12 test.t1.a,test.t1.a,test.t1.a # Using index
+1 SIMPLE t3 ref PRIMARY,col1 col1 8 test.t1.a,test.t1.a # Using where; Using index
drop table t1,t2,t3;
#
# Bug mdev-4340: performance regression with extended_keys=on
@@ -718,13 +718,13 @@
select * from t1 force index(index_date_updated)
where index_date_updated= 10 and index_id < 800;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range index_date_updated index_date_updated 13 NULL # Using index condition
+1 SIMPLE t1 ref index_date_updated index_date_updated 5 const # Using index condition
# This used to work from the start:
explain
select * from t2 force index(index_date_updated)
where index_date_updated= 10 and index_id < 800;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range index_date_updated index_date_updated 13 NULL # Using index condition
+1 SIMPLE t2 ref index_date_updated index_date_updated 5 const # Using index condition
drop table t0,t1,t2;
#
# MDEV-11196: Error:Run-Time Check Failure #2 - Stack around the variable 'key_buff'
@@ -759,13 +759,14 @@
"select_id": 1,
"table": {
"table_name": "t1",
- "access_type": "range",
+ "access_type": "ref",
"possible_keys": ["f2"],
"key": "f2",
- "key_length": "3070",
- "used_key_parts": ["f2", "pk1"],
+ "key_length": "3066",
+ "used_key_parts": ["f2"],
+ "ref": ["const"],
"rows": 1,
- "filtered": 50,
+ "filtered": 100,
"index_condition": "t1.pk1 <= 5 and t1.pk2 <= 5 and t1.f2 = 'abc'",
"attached_condition": "t1.f1 <= '3'"
}
@@ -792,8 +793,8 @@
"access_type": "range",
"possible_keys": ["k1"],
"key": "k1",
- "key_length": "3011",
- "used_key_parts": ["pk1", "f2", "pk2"],
+ "key_length": "3007",
+ "used_key_parts": ["pk1", "f2"],
"rows": 1,
"filtered": 50,
"index_condition": "t1.f2 <= 5 and t1.pk2 <= 5 and t1.pk1 = 'abc'",

View file

@ -0,0 +1,5 @@
[on]
optimizer_switch=extended_keys=on
[off]
optimizer_switch=extended_keys=off

View file

@ -1,5 +1,3 @@
DROP TABLE IF EXISTS t1,t2,t3,t4;
DROP DATABASE IF EXISTS dbt3_s001;
SET SESSION STORAGE_ENGINE='InnoDB';
set @innodb_stats_persistent_save= @@innodb_stats_persistent;
set @innodb_stats_persistent_sample_pages_save=
@ -8,28 +6,6 @@ set global innodb_stats_persistent= 1;
set global innodb_stats_persistent_sample_pages=100;
CREATE DATABASE dbt3_s001;
use dbt3_s001;
set @save_ext_key_optimizer_switch=@@optimizer_switch;
set optimizer_switch='extended_keys=off';
explain
select count(*) from lineitem where l_orderkey=130 and l_shipdate='1992-07-01';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 const 5 Using where
flush status;
select count(*) from lineitem where l_orderkey=130 and l_shipdate='1992-07-01';
count(*)
1
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
Handler_read_key 1
Handler_read_last 0
Handler_read_next 5
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=on';
explain
select count(*) from lineitem where l_orderkey=130 and l_shipdate='1992-07-01';
id select_type table type possible_keys key key_len ref rows Extra
@ -49,29 +25,6 @@ Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=off';
explain
select count(*) from lineitem
where l_orderkey=130 and l_linenumber=2 and l_shipdate='1992-07-01';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE lineitem const PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 8 const,const 1
flush status;
select count(*) from lineitem
where l_orderkey=130 and l_linenumber=2 and l_shipdate='1992-07-01';
count(*)
1
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
Handler_read_key 1
Handler_read_last 0
Handler_read_next 0
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=on';
explain
select count(*) from lineitem
where l_orderkey=130 and l_linenumber=2 and l_shipdate='1992-07-01';
@ -93,29 +46,6 @@ Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=off';
explain
select count(*) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 const 6 Using where; Using index
flush status;
select count(*) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000;
count(*)
1
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
Handler_read_key 1
Handler_read_last 0
Handler_read_next 6
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=on';
explain
select count(*) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000;
@ -137,31 +67,6 @@ Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=off';
explain
select l_orderkey, l_linenumber from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 const 6 Using where; Using index
flush status;
select l_orderkey, l_linenumber from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
l_orderkey l_linenumber
1088 3
1217 1
1221 3
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
Handler_read_key 1
Handler_read_last 0
Handler_read_next 6
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=on';
explain
select l_orderkey, l_linenumber from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
@ -185,27 +90,6 @@ Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=off';
explain
select min(l_orderkey) from lineitem where l_shipdate='1992-07-01';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE lineitem ref i_l_shipdate i_l_shipdate 4 const 6 Using index
flush status;
select min(l_orderkey) from lineitem where l_shipdate='1992-07-01';
min(l_orderkey)
130
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
Handler_read_key 1
Handler_read_last 0
Handler_read_next 6
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=on';
explain
select min(l_orderkey) from lineitem where l_shipdate='1992-07-01';
id select_type table type possible_keys key key_len ref rows Extra
@ -225,29 +109,6 @@ Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=off';
explain
select min(l_orderkey) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 const 6 Using where; Using index
flush status;
select min(l_orderkey) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
min(l_orderkey)
1088
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
Handler_read_key 1
Handler_read_last 0
Handler_read_next 6
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=on';
explain
select min(l_orderkey) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
@ -269,29 +130,6 @@ Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=off';
explain
select max(l_linenumber) from lineitem
where l_shipdate='1992-07-01' and l_orderkey=130;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 const 5 Using where
flush status;
select max(l_linenumber) from lineitem
where l_shipdate='1992-07-01' and l_orderkey=130;
max(l_linenumber)
2
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
Handler_read_key 1
Handler_read_last 0
Handler_read_next 5
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=on';
explain
select max(l_linenumber) from lineitem
where l_shipdate='1992-07-01' and l_orderkey=130;
@ -313,34 +151,6 @@ Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=off';
explain
select l_orderkey, l_linenumber
from lineitem use index (i_l_shipdate, i_l_receiptdate)
where l_shipdate='1992-07-01' and l_orderkey=130
or l_receiptdate='1992-07-01' and l_orderkey=5603;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE lineitem index_merge i_l_shipdate,i_l_receiptdate i_l_shipdate,i_l_receiptdate 4,4 NULL 9 Using union(i_l_shipdate,i_l_receiptdate); Using where
flush status;
select l_orderkey, l_linenumber
from lineitem use index (i_l_shipdate, i_l_receiptdate)
where l_shipdate='1992-07-01' and l_orderkey=130
or l_receiptdate='1992-07-01' and l_orderkey=5603;
l_orderkey l_linenumber
130 2
5603 2
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
Handler_read_key 2
Handler_read_last 0
Handler_read_next 9
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 9
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=on';
explain
select l_orderkey, l_linenumber
from lineitem use index (i_l_shipdate, i_l_receiptdate)
@ -367,35 +177,6 @@ Handler_read_retry 0
Handler_read_rnd 2
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=off';
explain
select l_orderkey, l_linenumber
from lineitem use index (i_l_shipdate, i_l_receiptdate)
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000
or l_receiptdate='1992-07-01' and l_orderkey between 5001 and 6000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE lineitem index_merge i_l_shipdate,i_l_receiptdate i_l_shipdate,i_l_receiptdate 4,4 NULL 9 Using union(i_l_shipdate,i_l_receiptdate); Using where
flush status;
select l_orderkey, l_linenumber
from lineitem use index (i_l_shipdate, i_l_receiptdate)
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000
or l_receiptdate='1992-07-01' and l_orderkey between 5001 and 6000;
l_orderkey l_linenumber
130 2
5603 2
5959 3
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
Handler_read_key 2
Handler_read_last 0
Handler_read_next 9
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 9
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=on';
explain
select l_orderkey, l_linenumber
from lineitem use index (i_l_shipdate, i_l_receiptdate)
@ -423,33 +204,6 @@ Handler_read_retry 0
Handler_read_rnd 3
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=off';
explain
select l_orderkey, l_linenumber from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000
or l_receiptdate='1992-07-01' and l_orderkey between 5001 and 6000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE lineitem index_merge PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate,PRIMARY,i_l_receiptdate,PRIMARY 4,4,4,4 NULL 2 Using union(intersect(i_l_shipdate,PRIMARY),intersect(i_l_receiptdate,PRIMARY)); Using where
flush status;
select l_orderkey, l_linenumber from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000
or l_receiptdate='1992-07-01' and l_orderkey between 5001 and 6000;
l_orderkey l_linenumber
130 2
5603 2
5959 3
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
Handler_read_key 2
Handler_read_last 0
Handler_read_next 9
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 3
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=on';
explain
select l_orderkey, l_linenumber from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000
@ -475,38 +229,6 @@ Handler_read_retry 0
Handler_read_rnd 3
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=off';
explain
select max(l_orderkey) from lineitem
where l_partkey between 1 and 10 group by l_partkey;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE lineitem range i_l_suppkey_partkey,i_l_partkey i_l_partkey 5 NULL # Using where; Using index
flush status;
select max(l_orderkey) from lineitem
where l_partkey between 1 and 10 group by l_partkey;
max(l_orderkey)
5984
5957
5892
5856
5959
5957
5794
5894
5859
5632
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
Handler_read_key 1
Handler_read_last 0
Handler_read_next 294
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=on';
explain
select max(l_orderkey) from lineitem
where l_partkey between 1 and 10 group by l_partkey;
@ -537,30 +259,6 @@ Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=off';
explain
select max(l_orderkey) from lineitem
where l_suppkey in (1,4) group by l_suppkey;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE lineitem range i_l_suppkey i_l_suppkey 5 NULL # Using where; Using index
flush status;
select max(l_orderkey) from lineitem
where l_suppkey in (1,4) group by l_suppkey;
max(l_orderkey)
5988
5984
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
Handler_read_key 2
Handler_read_last 0
Handler_read_next 1230
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=on';
explain
select max(l_orderkey) from lineitem
where l_suppkey in (1,4) group by l_suppkey;
@ -584,37 +282,6 @@ Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
create index i_p_retailprice on part(p_retailprice);
set optimizer_switch='extended_keys=off';
explain
select o_orderkey, p_partkey
from part use index (i_p_retailprice),
lineitem use index (i_l_partkey), orders
where p_retailprice > 1100 and o_orderdate='1997-01-01'
and o_orderkey=l_orderkey and p_partkey=l_partkey;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE part range i_p_retailprice i_p_retailprice 9 NULL # Using where; Using index
1 SIMPLE orders ref PRIMARY,i_o_orderdate i_o_orderdate 4 const # Using index
1 SIMPLE lineitem ref i_l_partkey i_l_partkey 5 dbt3_s001.part.p_partkey # Using where; Using index
flush status;
select o_orderkey, p_partkey
from part use index (i_p_retailprice),
lineitem use index (i_l_partkey), orders
where p_retailprice > 1100 and o_orderdate='1997-01-01'
and o_orderkey=l_orderkey and p_partkey=l_partkey;
o_orderkey p_partkey
5895 200
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
Handler_read_key 3
Handler_read_last 0
Handler_read_next 26
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
set optimizer_switch='extended_keys=on';
explain
select o_orderkey, p_partkey
from part use index (i_p_retailprice),
@ -651,7 +318,6 @@ Handler_read_rnd_next 0
create table t0 (a int);
insert into t0 values (1), (2), (3), (4), (5);
create index i_p_size on part(p_size);
set optimizer_switch='extended_keys=on';
explain
select * from t0, part ignore index (primary)
where p_partkey=t0.a and p_size=1;
@ -672,7 +338,6 @@ use test;
#
set @save_optimizer_switch=@@optimizer_switch;
SET optimizer_switch='materialization=on,semijoin=on';
SET optimizer_switch='extended_keys=on';
CREATE TABLE t1 (a int, b int) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1,1), (2,2);
SELECT * FROM t1 WHERE 2 IN (SELECT MAX(s1.a) FROM t1 AS s1, t1 AS s2);
@ -693,9 +358,7 @@ set optimizer_switch=@save_optimizer_switch;
# + extended_keys = on
# (valgrinf complains fixed by the patch for bug #914560)
#
set @save_optimizer_switch=@@optimizer_switch;
SET optimizer_switch = 'derived_with_keys=on';
SET optimizer_switch = 'extended_keys=on';
CREATE TABLE t1 (a varchar(1)) ENGINE=MyISAM;
INSERT INTO t1 VALUES ('j'), ('v');
CREATE TABLE t2 (b varchar(1)) ENGINE=MyISAM;
@ -725,8 +388,6 @@ c int NOT NULL PRIMARY KEY
INSERT INTO t2 VALUES
(10), (11), (12), (13), (14),
(15), (16), (17), (18), (19), (24);
set @save_optimizer_switch=@@optimizer_switch;
SET optimizer_switch = 'extended_keys=off';
EXPLAIN
SELECT a FROM t1 AS t, t2
WHERE c = a AND b IN (SELECT b FROM t1, t2 WHERE b = t.b);
@ -739,20 +400,6 @@ SELECT a FROM t1 AS t, t2
WHERE c = a AND b IN (SELECT b FROM t1, t2 WHERE b = t.b);
a
24
SET optimizer_switch = 'extended_keys=on';
EXPLAIN
SELECT a FROM t1 AS t, t2
WHERE c = a AND b IN (SELECT b FROM t1, t2 WHERE b = t.b);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t index a,b b 7 NULL 10 Using index
1 PRIMARY t1 ref b b 3 test.t.b 2 Using index; Start temporary
1 PRIMARY t2 index NULL PRIMARY 4 NULL 11 Using index; End temporary; Using join buffer (flat, BNL join)
1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 test.t.a 1 Using index
SELECT a FROM t1 AS t, t2
WHERE c = a AND b IN (SELECT b FROM t1, t2 WHERE b = t.b);
a
24
set optimizer_switch=@save_optimizer_switch;
DROP TABLE t1,t2;
#
# LP Bug #923236: hash join + extended_keys = on
@ -761,12 +408,10 @@ CREATE TABLE t1 (a int) ENGINE=MyISAM;
CREATE TABLE t2 (b int) ENGINE=MyISAM;
INSERT INTO t1 (a) VALUES (4), (6);
INSERT INTO t2 (b) VALUES (0), (8);
set @save_optimizer_switch=@@optimizer_switch;
set @save_join_cache_level=@@join_cache_level;
SET join_cache_level=3;
SET optimizer_switch='join_cache_hashed=on';
SET optimizer_switch='join_cache_bka=on';
SET optimizer_switch='extended_keys=on';
EXPLAIN
SELECT * FROM t1, t2 WHERE b=a;
id select_type table type possible_keys key key_len ref rows Extra
@ -791,26 +436,16 @@ UNIQUE KEY uq (c2,c3),
KEY c3 (c3),
KEY c4 (c4)
) ENGINE=InnoDB AUTO_INCREMENT=5 DEFAULT CHARSET=utf8;
set @save_optimizer_switch=@@optimizer_switch;
set session optimizer_switch='extended_keys=off';
INSERT INTO t1 (c2, c3, c4) VALUES (58291525, 2580, 'foobar')
ON DUPLICATE KEY UPDATE c4 = VALUES(c4);
INSERT INTO t1 (c2, c3, c4) VALUES (58291525, 2580, 'foobar')
ON DUPLICATE KEY UPDATE c4 = VALUES(c4);
DELETE FROM t1;
set session optimizer_switch='extended_keys=on';
INSERT INTO t1 (c2, c3, c4) VALUES (58291525, 2580, 'foobar')
ON DUPLICATE KEY UPDATE c4 = VALUES(c4);
INSERT INTO t1 (c2, c3, c4) VALUES (58291525, 2580, 'foobar')
ON DUPLICATE KEY UPDATE c4 = VALUES(c4);
set optimizer_switch=@save_optimizer_switch;
DROP TABLE t1;
#
# Bug mdev-4220: using ref instead of eq_ref
# with extended_keys=on
# (performance regression introduced in the patch for mdev-3851)
#
set @save_optimizer_switch=@@optimizer_switch;
create table t1 (a int not null) engine=innodb;
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t2 (
@ -820,29 +455,6 @@ insert into t2
select
A.a + 10 * B.a, A.a + 10 * B.a, A.a + 10 * B.a
from t1 A, t1 B;
set optimizer_switch='extended_keys=off';
explain
select * from t1, t2 where t2.a=t1.a and t2.b < 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL #
1 SIMPLE t2 eq_ref a a 4 test.t1.a # Using where
flush status;
select * from t1, t2 where t2.a=t1.a and t2.b < 2;
a pk a b
0 0 0 0
1 1 1 1
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
Handler_read_key 10
Handler_read_last 0
Handler_read_next 0
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 11
set optimizer_switch='extended_keys=on';
explain
select * from t1, t2 where t2.a=t1.a and t2.b < 2;
id select_type table type possible_keys key key_len ref rows Extra
@ -881,18 +493,6 @@ test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
set optimizer_switch='extended_keys=off';
explain
select * from t1, t3 where t3.col1=t1.a and t3.col2=t1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where
1 SIMPLE t3 ref col1 col1 8 test.t1.a,test.t1.a # Using index
explain
select * from t1, t3 where t3.col1=t1.a and t3.col2=t1.a and t3.pk1=t1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where
1 SIMPLE t3 ref PRIMARY,col1 col1 8 test.t1.a,test.t1.a # Using where; Using index
set optimizer_switch='extended_keys=on';
explain
select * from t1, t3 where t3.col1=t1.a and t3.col2=t1.a;
id select_type table type possible_keys key key_len ref rows Extra
@ -907,7 +507,6 @@ drop table t1,t2,t3;
#
# Bug mdev-4340: performance regression with extended_keys=on
#
set @save_optimizer_switch=@@optimizer_switch;
CREATE TABLE t1 (
page_id int(8) unsigned NOT NULL AUTO_INCREMENT,
page_namespace int(11) NOT NULL DEFAULT '0',
@ -1007,16 +606,6 @@ INSERT INTO t3 VALUES
(89,'text-8008',''),(90,'text-9008',''),(91,'text-9',''),(92,'text-1009',''),
(93,'text-2009',''),(94,'text-3009',''),(95,'text-4009',''),(96,'text-5009',''),
(97,'text-6009',''),(98,'text-7009',''),(99,'text-8009',''),(100,'text-9009','');
set optimizer_switch='extended_keys=off';
EXPLAIN
SELECT * FROM t1, t2 IGNORE INDEX (PRIMARY), t3
WHERE page_id=rev_page AND rev_text_id=old_id AND page_namespace=4 AND page_title='Sandbox'
ORDER BY rev_timestamp ASC LIMIT 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY,name_title name_title 261 const,const 1
1 SIMPLE t2 ref page_timestamp page_timestamp 4 const 10 Using where
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.rev_text_id 1
set optimizer_switch='extended_keys=on';
EXPLAIN
SELECT * FROM t1, t2 IGNORE INDEX (PRIMARY), t3
WHERE page_id=rev_page AND rev_text_id=old_id AND page_namespace=4 AND page_title='Sandbox'
@ -1040,7 +629,6 @@ test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
set optimizer_switch='extended_keys=on';
explain select a from t1 where b is null order by a desc limit 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index b PRIMARY 8 NULL 2 Using where
@ -1055,14 +643,6 @@ select a from t2 where b is null order by a desc limit 2;
a
3
2
set optimizer_switch='extended_keys=off';
explain select a from t2 where b is null order by a desc limit 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range b b 9 NULL 3 Using where; Using filesort
select a from t2 where b is null order by a desc limit 2;
a
3
2
explain select a from t2 where b is null order by a desc;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index b PRIMARY 8 NULL 3 Using where
@ -1080,7 +660,6 @@ a
2
1
drop table t1, t2;
set optimizer_switch=@save_optimizer_switch;
#
# MDEV-10325: Queries examines all rows of a tables when it should not
#

View file

@ -1,9 +1,4 @@
--source include/have_innodb.inc
--disable_warnings
DROP TABLE IF EXISTS t1,t2,t3,t4;
DROP DATABASE IF EXISTS dbt3_s001;
--enable_warnings
--source include/innodb_prefix_index_cluster_optimization.inc
SET SESSION STORAGE_ENGINE='InnoDB';
@ -26,32 +21,12 @@ use dbt3_s001;
--enable_result_log
--enable_query_log
set @save_ext_key_optimizer_switch=@@optimizer_switch;
set optimizer_switch='extended_keys=off';
explain
select count(*) from lineitem where l_orderkey=130 and l_shipdate='1992-07-01';
flush status;
select count(*) from lineitem where l_orderkey=130 and l_shipdate='1992-07-01';
show status like 'handler_read%';
set optimizer_switch='extended_keys=on';
explain
select count(*) from lineitem where l_orderkey=130 and l_shipdate='1992-07-01';
flush status;
select count(*) from lineitem where l_orderkey=130 and l_shipdate='1992-07-01';
show status like 'handler_read%';
set optimizer_switch='extended_keys=off';
explain
select count(*) from lineitem
where l_orderkey=130 and l_linenumber=2 and l_shipdate='1992-07-01';
flush status;
select count(*) from lineitem
where l_orderkey=130 and l_linenumber=2 and l_shipdate='1992-07-01';
show status like 'handler_read%';
set optimizer_switch='extended_keys=on';
explain
select count(*) from lineitem
where l_orderkey=130 and l_linenumber=2 and l_shipdate='1992-07-01';
@ -60,7 +35,6 @@ select count(*) from lineitem
where l_orderkey=130 and l_linenumber=2 and l_shipdate='1992-07-01';
show status like 'handler_read%';
set optimizer_switch='extended_keys=off';
explain
select count(*) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000;
@ -69,16 +43,6 @@ select count(*) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000;
show status like 'handler_read%';
set optimizer_switch='extended_keys=on';
explain
select count(*) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000;
flush status;
select count(*) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000;
show status like 'handler_read%';
set optimizer_switch='extended_keys=off';
explain
select l_orderkey, l_linenumber from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
@ -87,30 +51,12 @@ select l_orderkey, l_linenumber from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
show status like 'handler_read%';
set optimizer_switch='extended_keys=on';
explain
select l_orderkey, l_linenumber from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
flush status;
select l_orderkey, l_linenumber from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
show status like 'handler_read%';
set optimizer_switch='extended_keys=off';
explain
select min(l_orderkey) from lineitem where l_shipdate='1992-07-01';
flush status;
select min(l_orderkey) from lineitem where l_shipdate='1992-07-01';
show status like 'handler_read%';
set optimizer_switch='extended_keys=on';
explain
select min(l_orderkey) from lineitem where l_shipdate='1992-07-01';
flush status;
select min(l_orderkey) from lineitem where l_shipdate='1992-07-01';
show status like 'handler_read%';
set optimizer_switch='extended_keys=off';
explain
select min(l_orderkey) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
@ -119,16 +65,6 @@ select min(l_orderkey) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
show status like 'handler_read%';
set optimizer_switch='extended_keys=on';
explain
select min(l_orderkey) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
flush status;
select min(l_orderkey) from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1001 and 2000;
show status like 'handler_read%';
set optimizer_switch='extended_keys=off';
explain
select max(l_linenumber) from lineitem
where l_shipdate='1992-07-01' and l_orderkey=130;
@ -137,16 +73,6 @@ select max(l_linenumber) from lineitem
where l_shipdate='1992-07-01' and l_orderkey=130;
show status like 'handler_read%';
set optimizer_switch='extended_keys=on';
explain
select max(l_linenumber) from lineitem
where l_shipdate='1992-07-01' and l_orderkey=130;
flush status;
select max(l_linenumber) from lineitem
where l_shipdate='1992-07-01' and l_orderkey=130;
show status like 'handler_read%';
set optimizer_switch='extended_keys=off';
explain
select l_orderkey, l_linenumber
from lineitem use index (i_l_shipdate, i_l_receiptdate)
@ -159,20 +85,6 @@ select l_orderkey, l_linenumber
or l_receiptdate='1992-07-01' and l_orderkey=5603;
show status like 'handler_read%';
set optimizer_switch='extended_keys=on';
explain
select l_orderkey, l_linenumber
from lineitem use index (i_l_shipdate, i_l_receiptdate)
where l_shipdate='1992-07-01' and l_orderkey=130
or l_receiptdate='1992-07-01' and l_orderkey=5603;
flush status;
select l_orderkey, l_linenumber
from lineitem use index (i_l_shipdate, i_l_receiptdate)
where l_shipdate='1992-07-01' and l_orderkey=130
or l_receiptdate='1992-07-01' and l_orderkey=5603;
show status like 'handler_read%';
set optimizer_switch='extended_keys=off';
explain
select l_orderkey, l_linenumber
from lineitem use index (i_l_shipdate, i_l_receiptdate)
@ -185,20 +97,6 @@ select l_orderkey, l_linenumber
or l_receiptdate='1992-07-01' and l_orderkey between 5001 and 6000;
show status like 'handler_read%';
set optimizer_switch='extended_keys=on';
explain
select l_orderkey, l_linenumber
from lineitem use index (i_l_shipdate, i_l_receiptdate)
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000
or l_receiptdate='1992-07-01' and l_orderkey between 5001 and 6000;
flush status;
select l_orderkey, l_linenumber
from lineitem use index (i_l_shipdate, i_l_receiptdate)
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000
or l_receiptdate='1992-07-01' and l_orderkey between 5001 and 6000;
show status like 'handler_read%';
set optimizer_switch='extended_keys=off';
explain
select l_orderkey, l_linenumber from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000
@ -209,18 +107,6 @@ select l_orderkey, l_linenumber from lineitem
or l_receiptdate='1992-07-01' and l_orderkey between 5001 and 6000;
show status like 'handler_read%';
set optimizer_switch='extended_keys=on';
explain
select l_orderkey, l_linenumber from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000
or l_receiptdate='1992-07-01' and l_orderkey between 5001 and 6000;
flush status;
select l_orderkey, l_linenumber from lineitem
where l_shipdate='1992-07-01' and l_orderkey between 1 and 1000
or l_receiptdate='1992-07-01' and l_orderkey between 5001 and 6000;
show status like 'handler_read%';
set optimizer_switch='extended_keys=off';
--replace_column 9 #
explain
select max(l_orderkey) from lineitem
@ -230,27 +116,6 @@ select max(l_orderkey) from lineitem
where l_partkey between 1 and 10 group by l_partkey;
show status like 'handler_read%';
set optimizer_switch='extended_keys=on';
--replace_column 9 #
explain
select max(l_orderkey) from lineitem
where l_partkey between 1 and 10 group by l_partkey;
flush status;
select max(l_orderkey) from lineitem
where l_partkey between 1 and 10 group by l_partkey;
show status like 'handler_read%';
set optimizer_switch='extended_keys=off';
--replace_column 9 #
explain
select max(l_orderkey) from lineitem
where l_suppkey in (1,4) group by l_suppkey;
flush status;
select max(l_orderkey) from lineitem
where l_suppkey in (1,4) group by l_suppkey;
show status like 'handler_read%';
set optimizer_switch='extended_keys=on';
--replace_column 9 #
explain
select max(l_orderkey) from lineitem
@ -262,23 +127,6 @@ show status like 'handler_read%';
create index i_p_retailprice on part(p_retailprice);
set optimizer_switch='extended_keys=off';
--replace_column 9 #
explain
select o_orderkey, p_partkey
from part use index (i_p_retailprice),
lineitem use index (i_l_partkey), orders
where p_retailprice > 1100 and o_orderdate='1997-01-01'
and o_orderkey=l_orderkey and p_partkey=l_partkey;
flush status;
select o_orderkey, p_partkey
from part use index (i_p_retailprice),
lineitem use index (i_l_partkey), orders
where p_retailprice > 1100 and o_orderdate='1997-01-01'
and o_orderkey=l_orderkey and p_partkey=l_partkey;
show status like 'handler_read%';
set optimizer_switch='extended_keys=on';
--replace_column 9 #
explain
select o_orderkey, p_partkey
@ -303,8 +151,6 @@ create table t0 (a int);
insert into t0 values (1), (2), (3), (4), (5);
create index i_p_size on part(p_size);
set optimizer_switch='extended_keys=on';
explain
select * from t0, part ignore index (primary)
where p_partkey=t0.a and p_size=1;
@ -327,7 +173,6 @@ use test;
set @save_optimizer_switch=@@optimizer_switch;
SET optimizer_switch='materialization=on,semijoin=on';
SET optimizer_switch='extended_keys=on';
CREATE TABLE t1 (a int, b int) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1,1), (2,2);
@ -346,10 +191,7 @@ set optimizer_switch=@save_optimizer_switch;
--echo # (valgrinf complains fixed by the patch for bug #914560)
--echo #
set @save_optimizer_switch=@@optimizer_switch;
SET optimizer_switch = 'derived_with_keys=on';
SET optimizer_switch = 'extended_keys=on';
CREATE TABLE t1 (a varchar(1)) ENGINE=MyISAM;
INSERT INTO t1 VALUES ('j'), ('v');
@ -389,24 +231,12 @@ INSERT INTO t2 VALUES
(10), (11), (12), (13), (14),
(15), (16), (17), (18), (19), (24);
set @save_optimizer_switch=@@optimizer_switch;
SET optimizer_switch = 'extended_keys=off';
EXPLAIN
SELECT a FROM t1 AS t, t2
WHERE c = a AND b IN (SELECT b FROM t1, t2 WHERE b = t.b);
SELECT a FROM t1 AS t, t2
WHERE c = a AND b IN (SELECT b FROM t1, t2 WHERE b = t.b);
SET optimizer_switch = 'extended_keys=on';
EXPLAIN
SELECT a FROM t1 AS t, t2
WHERE c = a AND b IN (SELECT b FROM t1, t2 WHERE b = t.b);
SELECT a FROM t1 AS t, t2
WHERE c = a AND b IN (SELECT b FROM t1, t2 WHERE b = t.b);
set optimizer_switch=@save_optimizer_switch;
DROP TABLE t1,t2;
--echo #
@ -420,13 +250,11 @@ CREATE TABLE t2 (b int) ENGINE=MyISAM;
INSERT INTO t1 (a) VALUES (4), (6);
INSERT INTO t2 (b) VALUES (0), (8);
set @save_optimizer_switch=@@optimizer_switch;
set @save_join_cache_level=@@join_cache_level;
SET join_cache_level=3;
SET optimizer_switch='join_cache_hashed=on';
SET optimizer_switch='join_cache_bka=on';
SET optimizer_switch='extended_keys=on';
EXPLAIN
SELECT * FROM t1, t2 WHERE b=a;
@ -455,24 +283,11 @@ KEY c4 (c4)
) ENGINE=InnoDB AUTO_INCREMENT=5 DEFAULT CHARSET=utf8;
set @save_optimizer_switch=@@optimizer_switch;
set session optimizer_switch='extended_keys=off';
INSERT INTO t1 (c2, c3, c4) VALUES (58291525, 2580, 'foobar')
ON DUPLICATE KEY UPDATE c4 = VALUES(c4);
INSERT INTO t1 (c2, c3, c4) VALUES (58291525, 2580, 'foobar')
ON DUPLICATE KEY UPDATE c4 = VALUES(c4);
DELETE FROM t1;
set session optimizer_switch='extended_keys=on';
INSERT INTO t1 (c2, c3, c4) VALUES (58291525, 2580, 'foobar')
ON DUPLICATE KEY UPDATE c4 = VALUES(c4);
INSERT INTO t1 (c2, c3, c4) VALUES (58291525, 2580, 'foobar')
ON DUPLICATE KEY UPDATE c4 = VALUES(c4);
set optimizer_switch=@save_optimizer_switch;
DROP TABLE t1;
--echo #
@ -481,8 +296,6 @@ DROP TABLE t1;
--echo # (performance regression introduced in the patch for mdev-3851)
--echo #
set @save_optimizer_switch=@@optimizer_switch;
create table t1 (a int not null) engine=innodb;
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
@ -496,15 +309,6 @@ select
A.a + 10 * B.a, A.a + 10 * B.a, A.a + 10 * B.a
from t1 A, t1 B;
set optimizer_switch='extended_keys=off';
--replace_column 9 #
explain
select * from t1, t2 where t2.a=t1.a and t2.b < 2;
flush status;
select * from t1, t2 where t2.a=t1.a and t2.b < 2;
show status like 'handler_read%';
set optimizer_switch='extended_keys=on';
--replace_column 9 #
explain
select * from t1, t2 where t2.a=t1.a and t2.b < 2;
@ -531,15 +335,6 @@ alter table t3 add primary key (pk1, pk2);
alter table t3 add key (col1, col2);
analyze table t1,t3;
set optimizer_switch='extended_keys=off';
--replace_column 9 #
explain
select * from t1, t3 where t3.col1=t1.a and t3.col2=t1.a;
--replace_column 9 #
explain
select * from t1, t3 where t3.col1=t1.a and t3.col2=t1.a and t3.pk1=t1.a;
set optimizer_switch='extended_keys=on';
--replace_column 9 #
explain
select * from t1, t3 where t3.col1=t1.a and t3.col2=t1.a;
@ -553,8 +348,6 @@ drop table t1,t2,t3;
--echo # Bug mdev-4340: performance regression with extended_keys=on
--echo #
set @save_optimizer_switch=@@optimizer_switch;
CREATE TABLE t1 (
page_id int(8) unsigned NOT NULL AUTO_INCREMENT,
page_namespace int(11) NOT NULL DEFAULT '0',
@ -658,13 +451,6 @@ INSERT INTO t3 VALUES
(97,'text-6009',''),(98,'text-7009',''),(99,'text-8009',''),(100,'text-9009','');
set optimizer_switch='extended_keys=off';
EXPLAIN
SELECT * FROM t1, t2 IGNORE INDEX (PRIMARY), t3
WHERE page_id=rev_page AND rev_text_id=old_id AND page_namespace=4 AND page_title='Sandbox'
ORDER BY rev_timestamp ASC LIMIT 10;
set optimizer_switch='extended_keys=on';
EXPLAIN
SELECT * FROM t1, t2 IGNORE INDEX (PRIMARY), t3
WHERE page_id=rev_page AND rev_text_id=old_id AND page_namespace=4 AND page_title='Sandbox'
@ -685,14 +471,10 @@ insert into t2 (b) values (null), (null), (null);
analyze table t1,t2;
set optimizer_switch='extended_keys=on';
explain select a from t1 where b is null order by a desc limit 2;
select a from t1 where b is null order by a desc limit 2;
explain select a from t2 where b is null order by a desc limit 2;
select a from t2 where b is null order by a desc limit 2;
set optimizer_switch='extended_keys=off';
explain select a from t2 where b is null order by a desc limit 2;
select a from t2 where b is null order by a desc limit 2;
explain select a from t2 where b is null order by a desc;
select a from t2 where b is null order by a desc;
@ -702,8 +484,6 @@ select a from t2 where b is null order by a desc,a,a;
drop table t1, t2;
set optimizer_switch=@save_optimizer_switch;
--echo #
--echo # MDEV-10325: Queries examines all rows of a tables when it should not
--echo #

View file

@ -327,6 +327,24 @@ ROLLBACK /* added by mysqlbinlog */;
/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/;
DROP TABLE t1,t2;
#
# MDEV-16372 ER_BASE64_DECODE_ERROR upon replaying binary log with system table
#
FLUSH BINARY LOGS;
CREATE TABLE t1 (pk INT PRIMARY KEY);
INSERT INTO t1 VALUES (1);
INSERT INTO t1 VALUES (1), (2) ON DUPLICATE KEY UPDATE pk= pk + 10;
FLUSH BINARY LOGS;
Proof: two subsequent patterns must be found
FOUND 1 /### UPDATE `test`.`t1`/ in mysqlbinlog.sql
FOUND 2 /### INSERT INTO `test`.`t1`/ in mysqlbinlog.sql
DROP TABLE t1;
SELECT * FROM t1;
pk
2
11
# Cleanup
DROP TABLE t1;
CREATE TABLE `t1` (
`id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT,
`is_deleted` BIT(1) DEFAULT b'0',
@ -351,23 +369,23 @@ FLUSH BINARY LOGS;
/*!40019 SET @@session.max_insert_delayed_threads=0*/;
/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
DELIMITER /*!*/;
# at 387
#<date> server id 1 end_log_pos 429 CRC32 XXX GTID 0-1-16
# at POS
#<date> server id 1 end_log_pos END_LOG_POS CRC32 XXX GTID D-S-N
/*!100101 SET @@session.skip_parallel_replication=0*//*!*/;
/*!100001 SET @@session.gtid_domain_id=0*//*!*/;
/*!100001 SET @@session.server_id=1*//*!*/;
/*!100001 SET @@session.gtid_seq_no=16*//*!*/;
/*!100001 SET @@session.gtid_seq_no=21*//*!*/;
START TRANSACTION
/*!*/;
# at 429
# at 543
#<date> server id 1 end_log_pos 543 CRC32 XXX Annotate_rows:
# at POS
# at POS
#<date> server id 1 end_log_pos END_LOG_POS CRC32 XXX Annotate_rows:
#Q> UPDATE t1 t1 INNER JOIN t2 t2 ON t1.ref_id = t2.id
#Q> SET t1.is_deleted = TRUE
#Q> WHERE t1.id =
#<date> server id 1 end_log_pos 594 CRC32 XXX Table_map: `test`.`t1` mapped to number 35
# at 594
#<date> server id 1 end_log_pos 643 CRC32 XXX Update_rows: table id 35 flags: STMT_END_F
#<date> server id 1 end_log_pos END_LOG_POS CRC32 XXX Table_map: `test`.`t1` mapped to number TID
# at POS
#<date> server id 1 end_log_pos END_LOG_POS CRC32 XXX Update_rows: table id TID flags: STMT_END_F
### UPDATE `test`.`t1`
### WHERE
### @1=1 /* LONGINT meta=0 nullable=0 is_null=0 */
@ -375,10 +393,10 @@ START TRANSACTION
### @2=b'1' /* BIT(1) meta=1 nullable=1 is_null=0 */
### @3=X /* TIMESTAMP(0) meta=0 nullable=0 is_null=0 */
# Number of rows: 1
# at 643
#<date> server id 1 end_log_pos 725 CRC32 XXX Query thread_id=5 exec_time=x error_code=0
# at POS
#<date> server id 1 end_log_pos END_LOG_POS CRC32 XXX Query thread_id=TID exec_time=x error_code=0
SET TIMESTAMP=X/*!*/;
SET @@session.pseudo_thread_id=5/*!*/;
SET @@session.pseudo_thread_id=TID/*!*/;
SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=0, @@session.unique_checks=1, @@session.autocommit=1, @@session.check_constraint_checks=1/*!*/;
SET @@session.sql_mode=#/*!*/;
SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/;
@ -388,8 +406,6 @@ SET @@session.lc_time_names=0/*!*/;
SET @@session.collation_database=DEFAULT/*!*/;
COMMIT
/*!*/;
# at 725
#<date> server id 1 end_log_pos 773 CRC32 XXX Rotate to master-bin.000004 pos: 4
DELIMITER ;
# End of log file
ROLLBACK /* added by mysqlbinlog */;

View file

@ -33,6 +33,33 @@ FLUSH BINARY LOGS;
DROP TABLE t1,t2;
--echo #
--echo # MDEV-16372 ER_BASE64_DECODE_ERROR upon replaying binary log with system table
--echo #
FLUSH BINARY LOGS;
CREATE TABLE t1 (pk INT PRIMARY KEY);
INSERT INTO t1 VALUES (1);
INSERT INTO t1 VALUES (1), (2) ON DUPLICATE KEY UPDATE pk= pk + 10;
--let $binlog = query_get_value(SHOW MASTER STATUS, File, 1)
FLUSH BINARY LOGS;
--exec $MYSQL_BINLOG --verbose $datadir/$binlog > $MYSQLTEST_VARDIR/tmp/mysqlbinlog.sql
--echo Proof: two subsequent patterns must be found
--let SEARCH_PATTERN= ### UPDATE `test`.`t1`
--let SEARCH_FILE= $MYSQLTEST_VARDIR/tmp/mysqlbinlog.sql
--source include/search_pattern_in_file.inc
--let SEARCH_PATTERN= ### INSERT INTO `test`.`t1`
--let SEARCH_FILE= $MYSQLTEST_VARDIR/tmp/mysqlbinlog.sql
--source include/search_pattern_in_file.inc
DROP TABLE t1;
--exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/mysqlbinlog.sql
SELECT * FROM t1;
--echo # Cleanup
DROP TABLE t1;
--remove_file $MYSQLTEST_VARDIR/tmp/mysqlbinlog.sql
#
# MDEV-14605 ON UPDATE CURRENT_TIMESTAMP fields by multi-table UPDATE are not logged with binlog_row_image=MINIMAL
#
@ -64,11 +91,12 @@ UPDATE t1 t1 INNER JOIN t2 t2 ON t1.ref_id = t2.id
WHERE t1.id = 1;
--let $binlog = query_get_value(SHOW MASTER STATUS, File, 1)
--let $binlog_end= query_get_value(SHOW MASTER STATUS, Position, 1)
FLUSH BINARY LOGS;
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
--replace_regex /\d{6} *\d*:\d\d:\d\d/<date>/ /SET TIMESTAMP=\d*/SET TIMESTAMP=X/ /exec_time=\d*/exec_time=x/ /@3=\d*/@3=X/ /CRC32 0x[0-9a-f]+/CRC32 XXX/ /@@session.sql_mode=\d+/@@session.sql_mode=#/ /collation_server=\d+/collation_server=#/
--exec $MYSQL_BINLOG --verbose --verbose --base64-output=DECODE-ROWS $datadir/$binlog --start-position=$binlog_pos
--replace_regex /table id \d*/table id TID/ /mapped to number \d*/mapped to number TID/ /at \d*/at POS/ /end_log_pos \d*/end_log_pos END_LOG_POS/ /GTID \d*-\d*-\d*/GTID D-S-N/ /\d{6} *\d*:\d\d:\d\d/<date>/ /SET TIMESTAMP=\d*/SET TIMESTAMP=X/ /exec_time=\d*/exec_time=x/ /@3=\d*/@3=X/ /CRC32 0x[0-9a-f]+/CRC32 XXX/ /@@session.sql_mode=\d+/@@session.sql_mode=#/ /collation_server=\d+/collation_server=#/ /thread_id=\d*/thread_id=TID/
--exec $MYSQL_BINLOG --verbose --verbose --base64-output=DECODE-ROWS $datadir/$binlog --start-position=$binlog_pos --stop-position=$binlog_end
DROP TABLE t1,t2;

View file

@ -3432,6 +3432,22 @@ SET sort_buffer_size= @save_sort_buffer_size;
SET max_length_for_sort_data= @save_max_length_for_sort_data;
DROP TABLE t1;
#
# MDEV-23596: Assertion `tab->ref.use_count' failed in join_read_key_unlock_row
#
CREATE TABLE t1 (a INT PRIMARY KEY, b INT, KEY(b));
INSERT INTO t1 VALUES (0, 1),(1, 2);
CREATE TABLE t2 SELECT * FROM t1;
EXPLAIN SELECT (SELECT 1 FROM t1 WHERE t1.a=t2.b ORDER BY t1.b LIMIT 1) AS c FROM t2;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 2
2 DEPENDENT SUBQUERY t1 index PRIMARY b 5 NULL 1 Using where
SELECT (SELECT 1 FROM t1 WHERE t1.a=t2.b ORDER BY t1.b LIMIT 1) AS c FROM t2;
c
1
NULL
DROP TABLE t1,t2;
# end of 10.1 tests
#
# MDEV-13994: Bad join results with orderby_uses_equalities=on
#
CREATE TABLE books (

View file

@ -2201,6 +2201,21 @@ SET sort_buffer_size= @save_sort_buffer_size;
SET max_length_for_sort_data= @save_max_length_for_sort_data;
DROP TABLE t1;
--echo #
--echo # MDEV-23596: Assertion `tab->ref.use_count' failed in join_read_key_unlock_row
--echo #
CREATE TABLE t1 (a INT PRIMARY KEY, b INT, KEY(b));
INSERT INTO t1 VALUES (0, 1),(1, 2);
CREATE TABLE t2 SELECT * FROM t1;
EXPLAIN SELECT (SELECT 1 FROM t1 WHERE t1.a=t2.b ORDER BY t1.b LIMIT 1) AS c FROM t2;
SELECT (SELECT 1 FROM t1 WHERE t1.a=t2.b ORDER BY t1.b LIMIT 1) AS c FROM t2;
DROP TABLE t1,t2;
--echo # end of 10.1 tests
--echo #
--echo # MDEV-13994: Bad join results with orderby_uses_equalities=on
--echo #

View file

@ -0,0 +1,13 @@
create table d1 (a int);
create temporary table t1 (a int);
create temporary table t2 (a int);
Got one of the listed errors
create temporary table t3 (a int) engine=Aria;
Got one of the listed errors
select * from information_schema.columns where table_schema='test';
Got one of the listed errors
flush tables;
select * from d1;
a
drop temporary table t1;
drop table d1;

View file

@ -0,0 +1,32 @@
source include/not_windows.inc;
#
# MDEV-23569 temporary tables can overwrite existing files
#
let datadir=`select @@datadir`;
create table d1 (a int);
create temporary table t1 (a int);
perl;
chdir "$ENV{MYSQL_TMP_DIR}/mysqld.1/";
for (<#sql*.MYI>) {
/^#sql(.*)_([0-9a-f]+_)([0-9a-f]+)\.MYI$/ or die;
symlink "$ENV{datadir}/test/d1.MYI", sprintf "#sql$1_$2%x.MYI", hex($3)+1;
symlink "$ENV{datadir}/test/d1.MYI", sprintf "#sql$1_$2%x.MAI", hex($3)+1;
symlink "$ENV{datadir}/test/d1.MYI", sprintf "#sql$1_$2%x.MAI", hex($3)+2;
symlink "$ENV{datadir}/test/d1.MYI", "#sql_$1_0.MAI";
}
EOF
error 1,1030;
create temporary table t2 (a int);
error 1,1030;
create temporary table t3 (a int) engine=Aria;
error 1,1030;
select * from information_schema.columns where table_schema='test';
flush tables;
select * from d1;
drop temporary table t1;
remove_files_wildcard $MYSQL_TMP_DIR/mysqld.1 *sql*;
drop table d1;

View file

@ -5356,7 +5356,5 @@ START TRANSACTION
#010909 4:46:40 server id 1 end_log_pos # CRC32 XXX Table_map: `test`.`t1dec102` mapped to number #
# at #
#010909 4:46:40 server id 1 end_log_pos # CRC32 XXX Write_rows: table id # flags: STMT_END_F
### INSERT INTO `test`.`t1dec102`
### SET
### @1=
Error: Found Old DECIMAL (mysql-4.1 or earlier). Not enough metadata to display the value.

View file

@ -449,7 +449,6 @@ START TRANSACTION
### DELETE FROM `test1`.`t1`
### WHERE
### @1=3 /* INT meta=0 nullable=1 is_null=0 */
'/*!*/;
# at #
#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;
@ -1073,7 +1072,6 @@ START TRANSACTION
### DELETE FROM `test1`.`t1`
### WHERE
### @1=3 /* INT meta=0 nullable=1 is_null=0 */
'/*!*/;
# at #
#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;

View file

@ -11,8 +11,7 @@ SELECT a into @a from t;
FLUSH LOGS;
DELETE FROM t;
# Todo: MDEV-10362 to test multi-row Rows_log_event:s in verbose mode
--exec $MYSQL_BINLOG -vv --debug-binlog-row-event-max-encoded-size=256 $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog.sql
--exec $MYSQL_BINLOG --verbose --debug-binlog-row-event-max-encoded-size=256 $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog.sql
--let SEARCH_PATTERN= BINLOG @binlog_fragment_0, @binlog_fragment_1
--let SEARCH_FILE= $MYSQLTEST_VARDIR/tmp/mysqlbinlog.sql

View file

@ -456,7 +456,6 @@ START TRANSACTION
### DELETE FROM `test1`.`t1`
### WHERE
### @1=3 /* INT meta=0 nullable=1 is_null=0 */
'/*!*/;
# at #
#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;

View file

@ -16,6 +16,7 @@ MDEV-16509 : MDEV-21523 galera.MDEV-16509
MDEV-20225 : MDEV-20886 galera.MDEV-20225
MW-286 : MDEV-18464 Killing thread can cause mutex deadlock if done concurrently with Galera/replication victim kill
MW-328A : MDEV-22666 galera.MW-328A MTR failed: "Semaphore wait has lasted > 600 seconds" and do not release port 16002
MW-328B : MDEV-22666 galera.MW-328A MTR failed: "Semaphore wait has lasted > 600 seconds" and do not release port 16002
MW-329 : MDEV-19962 Galera test failure on MW-329
galera.galera_defaults : MDEV-21494 Galera test sporadic failure on galera.galera_defaults
galera_as_slave_replication_bundle : MDEV-15785 OPTION_GTID_BEGIN is set in Gtid_log_event::do_apply_event()

View file

@ -1,5 +1,10 @@
connection node_2;
connection node_1;
#
# test phase with cascading foreign key through 3 tables
#
connection node_1;
set wsrep_sync_wait=0;
CREATE TABLE grandparent (
id INT NOT NULL PRIMARY KEY
) ENGINE=InnoDB;
@ -21,14 +26,15 @@ INSERT INTO grandparent VALUES (1),(2);
INSERT INTO parent VALUES (1,1), (2,2);
INSERT INTO child VALUES (1,1), (2,2);
connection node_2;
set wsrep_sync_wait=0;
DELETE FROM grandparent WHERE id = 1;
connection node_1;
SELECT COUNT(*) = 0 FROM parent WHERE grandparent_id = 1;
COUNT(*) = 0
1
SELECT COUNT(*) = 0 FROM child WHERE parent_id = 1;
COUNT(*) = 0
1
SELECT COUNT(*), COUNT(*) = 0 FROM parent WHERE grandparent_id = 1;
COUNT(*) COUNT(*) = 0
0 1
SELECT COUNT(*), COUNT(*) = 0 FROM child WHERE parent_id = 1;
COUNT(*) COUNT(*) = 0
0 1
DROP TABLE child;
DROP TABLE parent;
DROP TABLE grandparent;

View file

@ -0,0 +1,69 @@
connection node_2;
connection node_1;
#
# test phase with foreign key of varchar type
#
connection node_1;
CREATE TABLE parent (
`id` varchar(36) COLLATE utf8_unicode_ci NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
CREATE TABLE child (
`id` int NOT NULL,
`parent_id` varchar(36) COLLATE utf8_unicode_ci DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `parent_id` (`parent_id`),
CONSTRAINT `ipallocations_ibfk_1` FOREIGN KEY (`parent_id`) REFERENCES `parent` (`id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
INSERT INTO parent VALUES ('row one'), ('row two');
INSERT INTO child VALUES (1,'row one'), (2,'row two');
connection node_2;
DELETE FROM parent;
connection node_1;
SELECT COUNT(*), COUNT(*) = 0 FROM parent;
COUNT(*) COUNT(*) = 0
0 1
SELECT COUNT(*), COUNT(*) = 0 FROM child;
COUNT(*) COUNT(*) = 0
0 1
DROP TABLE child;
DROP TABLE parent;
#
# test phase with MM conflict in FK cascade
#
connection node_1;
set wsrep_retry_autocommit=0;
CREATE TABLE parent (
id INT NOT NULL PRIMARY KEY
) ENGINE=InnoDB;
CREATE TABLE child (
id INT NOT NULL PRIMARY KEY,
j int default 0,
parent_id INT,
FOREIGN KEY (parent_id)
REFERENCES parent(id)
ON DELETE CASCADE
) ENGINE=InnoDB;
INSERT INTO parent VALUES (1);
INSERT INTO child VALUES (1,0,1);
connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb";
connection node_2;
DELETE FROM parent;
connection node_1a;
SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
connection node_1;
update child set j=2;;
connection node_1a;
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
SET GLOBAL debug_dbug = "";
SET DEBUG_SYNC = "RESET";
connection node_1;
SELECT COUNT(*), COUNT(*) = 0 FROM parent;
COUNT(*) COUNT(*) = 0
0 1
SELECT COUNT(*), COUNT(*) = 0 FROM child;
COUNT(*) COUNT(*) = 0
0 1
DROP TABLE child;
DROP TABLE parent;

View file

@ -0,0 +1,34 @@
connection node_2;
connection node_1;
CREATE TABLE IF NOT EXISTS t1 (id int(10) not null primary key) engine=innodb;
CREATE OR REPLACE TRIGGER tr1
BEFORE INSERT ON t1 FOR EACH ROW
BEGIN
SET NEW.id = 100;
END|
INSERT INTO t1 VALUES (1);
SELECT * from t1;
id
100
CREATE OR REPLACE TRIGGER tr1
BEFORE INSERT ON t1 FOR EACH ROW
BEGIN
SET NEW.id = 200;
END|
connection node_2;
SET SESSION wsrep_sync_wait=15;
SELECT * FROM t1;
id
100
INSERT INTO t1 values (2);
SELECT * FROM t1;
id
100
200
connection node_1;
SELECT * FROM t1;
id
100
200
DROP TRIGGER tr1;
DROP TABLE t1;

View file

@ -3,7 +3,13 @@
#
--source include/galera_cluster.inc
--source include/have_innodb.inc
--echo #
--echo # test phase with cascading foreign key through 3 tables
--echo #
--connection node_1
set wsrep_sync_wait=0;
CREATE TABLE grandparent (
id INT NOT NULL PRIMARY KEY
@ -30,11 +36,17 @@ INSERT INTO parent VALUES (1,1), (2,2);
INSERT INTO child VALUES (1,1), (2,2);
--connection node_2
set wsrep_sync_wait=0;
--let $wait_condition = SELECT COUNT(*) = 2 FROM child;
--source include/wait_condition.inc
DELETE FROM grandparent WHERE id = 1;
--connection node_1
SELECT COUNT(*) = 0 FROM parent WHERE grandparent_id = 1;
SELECT COUNT(*) = 0 FROM child WHERE parent_id = 1;
--let $wait_condition = SELECT COUNT(*) = 1 FROM child;
--source include/wait_condition.inc
SELECT COUNT(*), COUNT(*) = 0 FROM parent WHERE grandparent_id = 1;
SELECT COUNT(*), COUNT(*) = 0 FROM child WHERE parent_id = 1;
DROP TABLE child;
DROP TABLE parent;

View file

@ -0,0 +1,98 @@
--source include/galera_cluster.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
--echo #
--echo # test phase with foreign key of varchar type
--echo #
--connection node_1
CREATE TABLE parent (
`id` varchar(36) COLLATE utf8_unicode_ci NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
CREATE TABLE child (
`id` int NOT NULL,
`parent_id` varchar(36) COLLATE utf8_unicode_ci DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `parent_id` (`parent_id`),
CONSTRAINT `ipallocations_ibfk_1` FOREIGN KEY (`parent_id`) REFERENCES `parent` (`id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
INSERT INTO parent VALUES ('row one'), ('row two');
INSERT INTO child VALUES (1,'row one'), (2,'row two');
--connection node_2
--let $wait_condition = SELECT COUNT(*) = 2 FROM child;
--source include/wait_condition.inc
DELETE FROM parent;
--connection node_1
--let $wait_condition = SELECT COUNT(*) = 0 FROM child;
--source include/wait_condition.inc
SELECT COUNT(*), COUNT(*) = 0 FROM parent;
SELECT COUNT(*), COUNT(*) = 0 FROM child;
DROP TABLE child;
DROP TABLE parent;
--echo #
--echo # test phase with MM conflict in FK cascade
--echo #
--connection node_1
set wsrep_retry_autocommit=0;
CREATE TABLE parent (
id INT NOT NULL PRIMARY KEY
) ENGINE=InnoDB;
CREATE TABLE child (
id INT NOT NULL PRIMARY KEY,
j int default 0,
parent_id INT,
FOREIGN KEY (parent_id)
REFERENCES parent(id)
ON DELETE CASCADE
) ENGINE=InnoDB;
INSERT INTO parent VALUES (1);
INSERT INTO child VALUES (1,0,1);
# block applier before applying
--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb";
--connection node_2
--let $wait_condition = SELECT COUNT(*) = 1 FROM child;
--source include/wait_condition.inc
DELETE FROM parent;
--connection node_1a
# wait until applier has reached the sync point
SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
--connection node_1
# issue conflicting write to child table, it should fail in certification
--error ER_LOCK_DEADLOCK
--send update child set j=2;
--connection node_1a
# release the applier
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
SET GLOBAL debug_dbug = "";
SET DEBUG_SYNC = "RESET";
--connection node_1
--reap
--let $wait_condition = SELECT COUNT(*) = 0 FROM child;
--source include/wait_condition.inc
SELECT COUNT(*), COUNT(*) = 0 FROM parent;
SELECT COUNT(*), COUNT(*) = 0 FROM child;
DROP TABLE child;
DROP TABLE parent;

View file

@ -0,0 +1,36 @@
--source include/galera_cluster.inc
#
# MDEV-21578 CREATE OR REPLACE TRIGGER in Galera cluster not replicating
#
CREATE TABLE IF NOT EXISTS t1 (id int(10) not null primary key) engine=innodb;
--delimiter |
CREATE OR REPLACE TRIGGER tr1
BEFORE INSERT ON t1 FOR EACH ROW
BEGIN
SET NEW.id = 100;
END|
--delimiter ;
INSERT INTO t1 VALUES (1);
SELECT * from t1;
--delimiter |
CREATE OR REPLACE TRIGGER tr1
BEFORE INSERT ON t1 FOR EACH ROW
BEGIN
SET NEW.id = 200;
END|
--delimiter ;
--connection node_2
SET SESSION wsrep_sync_wait=15;
SELECT * FROM t1;
INSERT INTO t1 values (2);
SELECT * FROM t1;
--connection node_1
SELECT * FROM t1;
DROP TRIGGER tr1;
DROP TABLE t1;

View file

@ -10,12 +10,6 @@
#
##############################################################################
galera_ipv6_mariabackup : MDEV-23573 Could not open '../galera/include/have_mariabackup.inc'
galera_ipv6_mariabackup_section : MDEV-23574 Could not open '../galera/include/have_mariabackup.inc'
galera_ipv6_mysqldump : MDEV-23576 WSREP_SST: [ERROR] rsync daemon port '16008' has been taken
galera_ipv6_rsyn : MDEV-23581 WSREP_SST: [ERROR] rsync daemon port '16008' has been taken
galera_ipv6_rsync_section : MDEV-23580 WSREP_SST: [ERROR] rsync daemon port '16008' has been taken
galera_ipv6_xtrabackup-v2 : MDEV-23575 WSREP_SST: [ERROR] innobackupex not in path
galera_ist_gcache_rollover : MDEV-23578 WSREP: exception caused by message: {v=0,t=1,ut=255,o=4,s=0,sr=0,as=1,f=6,src=50524cfe,srcvid=view_id(REG,50524cfe,4),insvid=view_id(UNKNOWN,00000000,0),ru=00000000,r=[-1,-1],fs=75,nl=(}
galera_slave_options_do :MDEV-8798
galera_slave_options_ignore : MDEV-8798

View file

@ -1,20 +0,0 @@
connection node_2;
connection node_1;
connection node_1;
connection node_2;
connection node_3;
connection node_1;
CREATE TABLE t1 (f1 INTEGER);
INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
connection node_2;
SELECT COUNT(*) = 10 FROM t1;
COUNT(*) = 10
1
Killing server ...
connection node_1;
INSERT INTO t1 VALUES (11),(12),(13),(14),(15),(16),(17),(18),(19),(20);
# restart
SELECT COUNT(*) = 20 FROM t1;
COUNT(*) = 20
1
DROP TABLE t1;

View file

@ -17,7 +17,6 @@ push @::global_suppressions,
qr(WSREP:.*down context.*),
qr(WSREP: Failed to send state UUID:),
qr(WSREP: last inactive check more than .* skipping check),
qr(WSREP: SQL statement was ineffective),
qr(WSREP: Releasing seqno [0-9]* before [0-9]* was assigned.),
qr|WSREP: access file\(.*gvwstate.dat\) failed\(No such file or directory\)|,
qr(WSREP: Quorum: No node with complete state),
@ -31,14 +30,34 @@ push @::global_suppressions,
qr(WSREP: user message in state LEAVING),
qr(WSREP: .* sending install message failed: Transport endpoint is not connected),
qr(WSREP: .* sending install message failed: Resource temporarily unavailable),
qr(WSREP: Sending JOIN failed: -107 \(Transport endpoint is not connected\). Will retry in new primary component.),
qr(WSREP: Maximum writeset size exceeded by .*),
qr(WSREP: transaction size exceeded.*),
qr(WSREP: RBR event .*),
qr(WSREP: Ignoring error for TO isolated action: .*),
qr(WSREP: transaction size limit .*),
qr(WSREP: rbr write fail, .*),
qr(WSREP: .*Backend not supported: foo.*),
qr(WSREP: .*Failed to initialize backend using .*),
qr(WSREP: .*Failed to open channel 'my_wsrep_cluster' at .*),
qr(WSREP: gcs connect failed: Socket type not supported),
qr(WSREP: failed to open gcomm backend connection: 110: failed to reach primary view: 110 .*),
qr(WSREP: .*Failed to open backend connection: -110 .*),
qr(WSREP: .*Failed to open channel 'my_wsrep_cluster' at .*),
qr(WSREP: gcs connect failed: Connection timed out),
qr|WSREP: wsrep::connect\(.*\) failed: 7|,
qr(WSREP: SYNC message from member .* in non-primary configuration. Ignored.),
qr(WSREP: Could not find peer:),
qr(WSREP: TO isolation failed for: .*),
qr|WSREP: gcs_caused\(\) returned .*|,
qr|WSREP: Protocol violation. JOIN message sender .* is not in state transfer \(SYNCED\). Message ignored.|,
qr|WSREP: Protocol violation. JOIN message sender .* is not in state transfer \(JOINED\). Message ignored.|,
qr|WSREP: Unsupported protocol downgrade: incremental data collection disabled. Expect abort.|,
qr(WSREP: Action message in non-primary configuration from member [0-9]*),
qr(WSREP: Last Applied Action message in non-primary configuration from member [0-9]*),
qr(WSREP: discarding established .*),
qr|WSREP: .*core_handle_uuid_msg.*|,
qr(WSREP: --wsrep-causal-reads=ON takes precedence over --wsrep-sync-wait=0. WSREP_SYNC_WAIT_BEFORE_READ is on),
qr(WSREP: JOIN message from member .* in non-primary configuration. Ignored.),
qr|WSREP: JOIN message from member .* in non-primary configuration. Ignored.|,
qr|Query apply failed:*|,
qr(WSREP: Ignoring error*),
qr(WSREP: Failed to remove page file .*),

View file

@ -1,4 +0,0 @@
!include ../galera_3nodes.cnf
[mysqld]
wsrep-causal-reads=OFF

View file

@ -1,80 +0,0 @@
#
# This test uses innobackupex to take a backup on node #2 and then restores that node from backup
#
--source include/galera_cluster.inc
--source include/have_innodb.inc
--source suite/galera/include/have_mariabackup.inc
--let $galera_connection_name = node_3
--let $galera_server_number = 3
--source include/galera_connect.inc
# Save original auto_increment_offset values.
--let $node_1=node_1
--let $node_2=node_2
--let $node_3=node_3
--source ../galera/include/auto_increment_offset_save.inc
--connection node_1
CREATE TABLE t1 (f1 INTEGER);
INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
--connection node_2
SELECT COUNT(*) = 10 FROM t1;
--exec rm -rf $MYSQL_TMP_DIR/innobackupex_backup
--exec mariabackup --innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group=mysqld.2 --galera-info --port=$NODE_MYPORT_2 --host=127.0.0.1 --no-timestamp $MYSQL_TMP_DIR/innobackupex_backup &> $MYSQL_TMP_DIR/innobackupex-backup.log
--exec mariabackup --innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group=mysqld.2 --apply-log --galera-info --port=$NODE_MYPORT_2 --host=127.0.0.1 --no-timestamp $MYSQL_TMP_DIR/innobackupex_backup &> $MYSQL_TMP_DIR/innobackupex-apply.log
--source ../galera/include/kill_galera.inc
--sleep 1
--connection node_1
INSERT INTO t1 VALUES (11),(12),(13),(14),(15),(16),(17),(18),(19),(20);
--exec rm -rf $MYSQLTEST_VARDIR/mysqld.2/data/*
--exec mariabackup --innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group=mysqld.2 --copy-back --port=$NODE_MYPORT_2 --host=127.0.0.1 $MYSQL_TMP_DIR/innobackupex_backup &> $MYSQL_TMP_DIR/innobackupex-restore.log
#
# Convert the xtrabackup_galera_info into a grastate.dat file
#
--perl
use strict;
my $xtrabackup_galera_info_file = $ENV{'MYSQL_TMP_DIR'}.'/innobackupex_backup/xtrabackup_galera_info';
open(XTRABACKUP_GALERA_INFO, $xtrabackup_galera_info_file) or die "Can not open $xtrabackup_galera_info_file: $!";
my $xtrabackup_galera_info = <XTRABACKUP_GALERA_INFO>;
my ($uuid, $seqno) = split(':', $xtrabackup_galera_info);
my $grastate_dat_file = $ENV{'MYSQLTEST_VARDIR'}.'/mysqld.2/data/grastate.dat';
die "grastate.dat already exists" if -e $grastate_dat_file;
open(GRASTATE_DAT, ">$grastate_dat_file") or die "Can not write to $grastate_dat_file: $!";
print GRASTATE_DAT "version: 2.1\n";
print GRASTATE_DAT "uuid: $uuid\n";
print GRASTATE_DAT "seqno: $seqno\n";
print GRASTATE_DAT "cert_index:\n";
exit(0);
EOF
--source include/start_mysqld.inc
--sleep 5
--source include/wait_until_connected_again.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
SELECT COUNT(*) = 20 FROM t1;
DROP TABLE t1;
--sleep 10
--let $galera_connection_name = node_2a
--let $galera_server_number = 2
--source include/galera_connect.inc
--let $node_2=node_2a
# Restore original auto_increment_offset values.
--source ../galera/include/auto_increment_offset_restore.inc

View file

@ -1,7 +1,8 @@
--source include/galera_cluster.inc
--source include/check_ipv6.inc
--source include/have_innodb.inc
--source ../galera/include/have_mariabackup.inc
--source include/have_mariabackup.inc
--source include/force_restart.inc
# Confirm that initial handshake happened over ipv6

View file

@ -1,7 +1,8 @@
--source include/galera_cluster.inc
--source include/check_ipv6.inc
--source include/have_innodb.inc
--source ../galera/include/have_mariabackup.inc
--source include/have_mariabackup.inc
--source include/force_restart.inc
# Confirm that initial handshake happened over ipv6

View file

@ -1,5 +1,6 @@
--source include/galera_cluster.inc
--source include/check_ipv6.inc
--source include/force_restart.inc
call mtr.add_suppression("WSREP: wsrep_sst_method is set to 'mysqldump' yet mysqld bind_address is set to'");
call mtr.add_suppression("Failed to load slave replication state from table mysql.gtid_slave_pos");

View file

@ -1,5 +1,6 @@
--source include/galera_cluster.inc
--source include/check_ipv6.inc
--source include/force_restart.inc
# Confirm that initial handshake happened over ipv6

View file

@ -1,5 +1,6 @@
--source include/galera_cluster.inc
--source include/check_ipv6.inc
--source include/force_restart.inc
# Confirm that initial handshake happened over ipv6

View file

@ -34,6 +34,9 @@ SET SESSION wsrep_sync_wait = 0;
SET SESSION wsrep_dirty_reads = 1;
--let $wait_condition = SELECT COUNT(*) = 1 FROM t1;
--source include/wait_condition.inc
# Those statements should succeed
--error 0
@ -111,6 +114,7 @@ SELECT COUNT(*) > 0 FROM INFORMATION_SCHEMA.PROCESSLIST;
# Restore cluster
SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
--source include/galera_wait_ready.inc
--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';

View file

@ -747,4 +747,66 @@ ANALYZE TABLE t1, t2;
--eval $query
DROP TABLE t1, t2;
if($support_virtual_index)
{
--echo #
--echo # MDEV-20618 Assertion `btr_validate_index(index, 0, false)' failed
--echo # in row_upd_sec_index_entry
--echo #
CREATE TABLE t1 (A BIT(15), VA BIT(10) GENERATED ALWAYS AS (A),PK INT,
PRIMARY KEY (PK), UNIQUE KEY (VA));
INSERT IGNORE INTO t1 VALUES ( '\r1','a',1);
--error ER_DATA_TOO_LONG
REPLACE INTO t1 (PK) VALUES (1);
DROP TABLE t1;
--echo #
--echo # MDEV-17890 Record in index was not found on update, server crash in
--echo # row_upd_build_difference_binary or
--echo # Assertion `0' failed in row_upd_sec_index_entry
--echo #
CREATE TABLE t1 (
pk BIGINT AUTO_INCREMENT,
b BIT(15),
v BIT(10) AS (b) VIRTUAL,
PRIMARY KEY(pk),
UNIQUE(v)
);
INSERT IGNORE INTO t1 (b) VALUES (b'101110001110100'),(b'011101');
SELECT pk, b INTO OUTFILE 'load.data' FROM t1;
--error ER_DATA_TOO_LONG
LOAD DATA INFILE 'load.data' REPLACE INTO TABLE t1 (pk, b);
--let $datadir= `SELECT @@datadir`
--remove_file $datadir/test/load.data
DROP TABLE t1;
--echo #
--echo # MDEV-17834 Server crashes in row_upd_build_difference_binary
--echo # on LOAD DATA into table with indexed virtual column
--echo #
CREATE TABLE t1 (
pk INT,
i TINYINT,
ts TIMESTAMP NULL,
vi TINYINT AS (i+1) PERSISTENT,
vts TIMESTAMP(5) AS (ts) VIRTUAL,
PRIMARY KEY(pk),
UNIQUE(vts)
);
INSERT IGNORE INTO t1 (pk,i) VALUES (1,127);
--write_file $MYSQLTEST_VARDIR/tmp/load.data
1 4 2019-01-01 00:00:00
EOF
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
--error ER_WARN_DATA_OUT_OF_RANGE
eval LOAD DATA INFILE '$MYSQLTEST_VARDIR/tmp/load.data' REPLACE INTO TABLE t1 (pk,i,ts);
--remove_file $MYSQLTEST_VARDIR/tmp/load.data
DROP TABLE t1;
}

View file

@ -833,6 +833,56 @@ a1 a2 b
0 NULL 1
DROP TABLE t1, t2;
#
# MDEV-20618 Assertion `btr_validate_index(index, 0, false)' failed
# in row_upd_sec_index_entry
#
CREATE TABLE t1 (A BIT(15), VA BIT(10) GENERATED ALWAYS AS (A),PK INT,
PRIMARY KEY (PK), UNIQUE KEY (VA));
INSERT IGNORE INTO t1 VALUES ( '\r1','a',1);
Warnings:
Warning 1906 The value specified for generated column 'VA' in table 't1' has been ignored
Warning 1264 Out of range value for column 'VA' at row 1
REPLACE INTO t1 (PK) VALUES (1);
ERROR 22001: Data too long for column 'VA' at row 1
DROP TABLE t1;
#
# MDEV-17890 Record in index was not found on update, server crash in
# row_upd_build_difference_binary or
# Assertion `0' failed in row_upd_sec_index_entry
#
CREATE TABLE t1 (
pk BIGINT AUTO_INCREMENT,
b BIT(15),
v BIT(10) AS (b) VIRTUAL,
PRIMARY KEY(pk),
UNIQUE(v)
);
INSERT IGNORE INTO t1 (b) VALUES (b'101110001110100'),(b'011101');
Warnings:
Warning 1264 Out of range value for column 'v' at row 1
SELECT pk, b INTO OUTFILE 'load.data' FROM t1;
LOAD DATA INFILE 'load.data' REPLACE INTO TABLE t1 (pk, b);
ERROR 22001: Data too long for column 'v' at row 1
DROP TABLE t1;
#
# MDEV-17834 Server crashes in row_upd_build_difference_binary
# on LOAD DATA into table with indexed virtual column
#
CREATE TABLE t1 (
pk INT,
i TINYINT,
ts TIMESTAMP NULL,
vi TINYINT AS (i+1) PERSISTENT,
vts TIMESTAMP(5) AS (ts) VIRTUAL,
PRIMARY KEY(pk),
UNIQUE(vts)
);
INSERT IGNORE INTO t1 (pk,i) VALUES (1,127);
Warnings:
Warning 1264 Out of range value for column 'vi' at row 1
LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/load.data' REPLACE INTO TABLE t1 (pk,i,ts);
ERROR 22003: Out of range value for column 'vi' at row 1
DROP TABLE t1;
#
# BUG#21365158 WL8149:ASSERTION `!TABLE || (!TABLE->WRITE_SET
#

View file

@ -833,6 +833,56 @@ a1 a2 b
0 NULL 1
DROP TABLE t1, t2;
#
# MDEV-20618 Assertion `btr_validate_index(index, 0, false)' failed
# in row_upd_sec_index_entry
#
CREATE TABLE t1 (A BIT(15), VA BIT(10) GENERATED ALWAYS AS (A),PK INT,
PRIMARY KEY (PK), UNIQUE KEY (VA));
INSERT IGNORE INTO t1 VALUES ( '\r1','a',1);
Warnings:
Warning 1906 The value specified for generated column 'VA' in table 't1' has been ignored
Warning 1264 Out of range value for column 'VA' at row 1
REPLACE INTO t1 (PK) VALUES (1);
ERROR 22001: Data too long for column 'VA' at row 1
DROP TABLE t1;
#
# MDEV-17890 Record in index was not found on update, server crash in
# row_upd_build_difference_binary or
# Assertion `0' failed in row_upd_sec_index_entry
#
CREATE TABLE t1 (
pk BIGINT AUTO_INCREMENT,
b BIT(15),
v BIT(10) AS (b) VIRTUAL,
PRIMARY KEY(pk),
UNIQUE(v)
);
INSERT IGNORE INTO t1 (b) VALUES (b'101110001110100'),(b'011101');
Warnings:
Warning 1264 Out of range value for column 'v' at row 1
SELECT pk, b INTO OUTFILE 'load.data' FROM t1;
LOAD DATA INFILE 'load.data' REPLACE INTO TABLE t1 (pk, b);
ERROR 22001: Data too long for column 'v' at row 1
DROP TABLE t1;
#
# MDEV-17834 Server crashes in row_upd_build_difference_binary
# on LOAD DATA into table with indexed virtual column
#
CREATE TABLE t1 (
pk INT,
i TINYINT,
ts TIMESTAMP NULL,
vi TINYINT AS (i+1) PERSISTENT,
vts TIMESTAMP(5) AS (ts) VIRTUAL,
PRIMARY KEY(pk),
UNIQUE(vts)
);
INSERT IGNORE INTO t1 (pk,i) VALUES (1,127);
Warnings:
Warning 1264 Out of range value for column 'vi' at row 1
LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/load.data' REPLACE INTO TABLE t1 (pk,i,ts);
ERROR 22003: Out of range value for column 'vi' at row 1
DROP TABLE t1;
DROP VIEW IF EXISTS v1,v2;
DROP TABLE IF EXISTS t1,t2,t3;
DROP PROCEDURE IF EXISTS p1;

View file

@ -255,6 +255,9 @@ ERROR 22007: Incorrect date value: '20190132' for column `test`.`t1`.`vb` at row
SELECT * FROM t1;
a b vb
ROLLBACK;
SELECT * FROM t1;
a b vb
1 20190132 0000-00-00
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK

View file

@ -278,7 +278,6 @@ DELETE FROM t1;
INSERT INTO t1 (a,b) VALUES(1,20190123);
SELECT * FROM t1;
ROLLBACK;
# MDEV-18366 FIXME: fix the crash and enable this
# SELECT * FROM t1;
SELECT * FROM t1;
CHECK TABLE t1;
DROP TABLE t1;

View file

@ -220,3 +220,19 @@ drop table t1,t2;
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails
drop table t1,t2;
ERROR 42S02: Unknown table 'test.t2'
#
# MDEV-23470 InnoDB: Failing assertion: cmp < 0 in
# row_ins_check_foreign_constraint
#
CREATE TABLE t1(f1 INT NOT NULL PRIMARY KEY, f2 INT NOT NULL)ENGINE=InnoDB;
CREATE TABLE t2(f1 VARCHAR(100), f2 INT NOT NULL,
INDEX(f2))ENGINE=InnoDB;
INSERT INTO t1 VALUES(99, 2);
ALTER TABLE t2 ADD FOREIGN KEY(f2) REFERENCES t1(f1);
SET FOREIGN_KEY_CHECKS=0;
DROP INDEX f2 ON t2;
SET FOREIGN_KEY_CHECKS=1;
INSERT INTO t2 VALUES('G', 3);
ERROR 23000: Cannot add or update a child row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`f2`) REFERENCES `t1` (`f1`))
DROP TABLE t2, t1;
SET FOREIGN_KEY_CHECKS=DEFAULT;

View file

@ -249,3 +249,22 @@ show create table t2;
drop table t1,t2;
--error ER_BAD_TABLE_ERROR
drop table t1,t2;
--echo #
--echo # MDEV-23470 InnoDB: Failing assertion: cmp < 0 in
--echo # row_ins_check_foreign_constraint
--echo #
CREATE TABLE t1(f1 INT NOT NULL PRIMARY KEY, f2 INT NOT NULL)ENGINE=InnoDB;
CREATE TABLE t2(f1 VARCHAR(100), f2 INT NOT NULL,
INDEX(f2))ENGINE=InnoDB;
INSERT INTO t1 VALUES(99, 2);
ALTER TABLE t2 ADD FOREIGN KEY(f2) REFERENCES t1(f1);
SET FOREIGN_KEY_CHECKS=0;
DROP INDEX f2 ON t2;
SET FOREIGN_KEY_CHECKS=1;
--error ER_NO_REFERENCED_ROW_2
INSERT INTO t2 VALUES('G', 3);
DROP TABLE t2, t1;
SET FOREIGN_KEY_CHECKS=DEFAULT;

View file

@ -7,6 +7,8 @@ SET @@GLOBAL.replicate_ignore_table="test.t4,test.t5,test.t6";
ERROR HY000: This operation cannot be performed as you have a running slave ''; run STOP SLAVE '' first
connection slave;
include/stop_slave.inc
SET @@GLOBAL.replicate_do_table="";
SET @@GLOBAL.replicate_ignore_table="";
SET @@GLOBAL.replicate_do_table="test.t1,test.t2,test.t3";
SET @@GLOBAL.replicate_ignore_table="test.t4,test.t5,test.t6";
include/start_slave.inc

View file

@ -7,6 +7,8 @@ SET @@GLOBAL.replicate_wild_ignore_table="test.b%";
ERROR HY000: This operation cannot be performed as you have a running slave ''; run STOP SLAVE '' first
connection slave;
include/stop_slave.inc
SET @@GLOBAL.replicate_wild_do_table="";
SET @@GLOBAL.replicate_wild_ignore_table="";
SET @@GLOBAL.replicate_wild_do_table="test.a%";
SET @@GLOBAL.replicate_wild_ignore_table="test.b%";
include/start_slave.inc

View file

@ -51,6 +51,8 @@ SET @@GLOBAL.replicate_ignore_table="test.t4,test.t5,test.t6";
connection slave;
source include/stop_slave.inc;
SET @@GLOBAL.replicate_do_table="";
SET @@GLOBAL.replicate_ignore_table="";
SET @@GLOBAL.replicate_do_table="test.t1,test.t2,test.t3";
SET @@GLOBAL.replicate_ignore_table="test.t4,test.t5,test.t6";
source include/start_slave.inc;

View file

@ -13,6 +13,8 @@ SET @@GLOBAL.replicate_wild_ignore_table="test.b%";
connection slave;
source include/stop_slave.inc;
SET @@GLOBAL.replicate_wild_do_table="";
SET @@GLOBAL.replicate_wild_ignore_table="";
SET @@GLOBAL.replicate_wild_do_table="test.a%";
SET @@GLOBAL.replicate_wild_ignore_table="test.b%";
source include/start_slave.inc;

View file

@ -476,8 +476,7 @@ select pk, col_bit+0, vcol_bit+0 from t1;
pk col_bit+0 vcol_bit+0
99 10000 1023
REPLACE LOW_PRIORITY INTO `t1` (`pk`) VALUES (99);
Warnings:
Warning 1264 Out of range value for column 'vcol_bit' at row 1
ERROR 22001: Data too long for column 'vcol_bit' at row 1
drop table t1;
#
# MDEV-17837 REPLACE on table with virtual_field can cause crash in set_ok_status()
@ -496,8 +495,7 @@ INSERT IGNORE INTO t1 (pk,i) VALUES (1,127);
Warnings:
Warning 1264 Out of range value for column 'vi' at row 1
REPLACE INTO t1 (pk,i) VALUES (1,2);
Warnings:
Warning 1264 Out of range value for column 'vi' at row 1
ERROR 22003: Out of range value for column 'vi' at row 1
DROP TABLE t1;
SET @sql_mode=@old_sql_mode;
#

View file

@ -448,6 +448,7 @@ replace INTO `t1` (`pk`,col_bit) VALUES (99,1000);
select pk, col_bit+0, vcol_bit+0 from t1;
replace INTO `t1` (`pk`,col_bit) VALUES (99,10000);
select pk, col_bit+0, vcol_bit+0 from t1;
--error ER_DATA_TOO_LONG
REPLACE LOW_PRIORITY INTO `t1` (`pk`) VALUES (99);
drop table t1;
@ -468,6 +469,7 @@ CREATE TABLE t1 (
INSERT INTO t1 (pk,i) VALUES (1,1);
TRUNCATE TABLE t1;
INSERT IGNORE INTO t1 (pk,i) VALUES (1,127);
--error ER_WARN_DATA_OUT_OF_RANGE
REPLACE INTO t1 (pk,i) VALUES (1,2);
DROP TABLE t1;
SET @sql_mode=@old_sql_mode;

View file

@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2011, Oracle and/or its affiliates
Copyright (c) 2010, 2015, MariaDB
Copyright (c) 2010, 2020, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -814,7 +814,8 @@ int _my_b_cache_read(IO_CACHE *info, uchar *Buffer, size_t Count)
info->read_pos=info->buffer+Count;
info->read_end=info->buffer+length;
info->pos_in_file=pos_in_file;
memcpy(Buffer, info->buffer, Count);
if (Count)
memcpy(Buffer, info->buffer, Count);
DBUG_RETURN(0);
}
@ -1315,7 +1316,8 @@ static int _my_b_cache_read_r(IO_CACHE *cache, uchar *Buffer, size_t Count)
DBUG_RETURN(1);
}
cnt= (len > Count) ? Count : len;
memcpy(Buffer, cache->read_pos, cnt);
if (cnt)
memcpy(Buffer, cache->read_pos, cnt);
Count -= cnt;
Buffer+= cnt;
left_length+= cnt;

View file

@ -1,5 +1,6 @@
/*
Copyright (c) 2000, 2010, Oracle and/or its affiliates
Copyright (c) 2010, 2020, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -479,7 +480,8 @@ char *strmake_root(MEM_ROOT *root, const char *str, size_t len)
char *pos;
if ((pos=alloc_root(root,len+1)))
{
memcpy(pos,str,len);
if (len)
memcpy(pos,str,len);
pos[len]=0;
}
return pos;

View file

@ -1,6 +1,6 @@
#!/bin/bash -ue
# Copyright (C) 2013 Percona Inc
# Copyright (C) 2017-2019 MariaDB
# Copyright (C) 2017-2020 MariaDB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@ -359,7 +359,7 @@ read_cnf()
iopts=$(parse_cnf sst inno-backup-opts "")
iapts=$(parse_cnf sst inno-apply-opts "")
impts=$(parse_cnf sst inno-move-opts "")
stimeout=$(parse_cnf sst sst-initial-timeout 100)
stimeout=$(parse_cnf sst sst-initial-timeout 300)
ssyslog=$(parse_cnf sst sst-syslog 0)
ssystag=$(parse_cnf mysqld_safe syslog-tag "${SST_SYSLOG_TAG:-}")
ssystag+="-"
@ -620,7 +620,8 @@ recv_joiner()
popd 1>/dev/null
if [[ ${RC[0]} -eq 124 ]];then
wsrep_log_error "Possible timeout in receving first data from donor in gtid stage"
wsrep_log_error "Possible timeout in receiving first data from "
"donor in gtid stage: exit codes: ${RC[@]}"
exit 32
fi

View file

@ -1,4 +1,5 @@
/* Copyright (c) 2009, 2013, Oracle and/or its affiliates.
Copyright (c) 2013, 2020, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -319,7 +320,8 @@ static char *debug_sync_bmove_len(char *to, char *to_end,
DBUG_ASSERT(to_end);
DBUG_ASSERT(!length || from);
set_if_smaller(length, (size_t) (to_end - to));
memcpy(to, from, length);
if (length)
memcpy(to, from, length);
return (to + length);
}

View file

@ -3878,7 +3878,7 @@ bool Log_event::print_base64(IO_CACHE* file,
ev->need_flashback_review= need_flashback_review;
if (print_event_info->verbose)
{
if (ev->print_verbose(file, print_event_info))
if (ev->print_verbose(&print_event_info->tail_cache, print_event_info))
goto err;
}
else
@ -3902,22 +3902,9 @@ bool Log_event::print_base64(IO_CACHE* file,
}
#else
if (print_event_info->verbose)
{
/*
Verbose event printout can't start before encoded data
got enquoted. This is done at this point though multi-row
statement remain vulnerable.
TODO: fix MDEV-10362 to remove this workaround.
*/
if (print_event_info->base64_output_mode !=
BASE64_OUTPUT_DECODE_ROWS)
my_b_printf(file, "'%s\n", print_event_info->delimiter);
error= ev->print_verbose(file, print_event_info);
}
error= ev->print_verbose(&print_event_info->tail_cache, print_event_info);
else
{
ev->count_row_events(print_event_info);
}
#endif
delete ev;
if (unlikely(error))
@ -12033,7 +12020,7 @@ bool copy_cache_to_file_wrapped(IO_CACHE *body,
FILE *file,
bool do_wrap,
const char *delimiter,
bool is_verbose)
bool is_verbose /*TODO: remove */)
{
const my_off_t cache_size= my_b_tell(body);
@ -12066,8 +12053,7 @@ bool copy_cache_to_file_wrapped(IO_CACHE *body,
my_fprintf(file, fmt_frag, 1);
if (my_b_copy_to_file(body, file, SIZE_T_MAX))
goto err;
if (!is_verbose)
my_fprintf(file, fmt_delim, delimiter);
my_fprintf(file, fmt_delim, delimiter);
my_fprintf(file, fmt_binlog2, delimiter);
}
@ -12076,8 +12062,7 @@ bool copy_cache_to_file_wrapped(IO_CACHE *body,
my_fprintf(file, str_binlog);
if (my_b_copy_to_file(body, file, SIZE_T_MAX))
goto err;
if (!is_verbose)
my_fprintf(file, fmt_delim, delimiter);
my_fprintf(file, fmt_delim, delimiter);
}
reinit_io_cache(body, WRITE_CACHE, 0, FALSE, TRUE);
@ -12163,7 +12148,6 @@ bool copy_cache_to_string_wrapped(IO_CACHE *cache,
goto err;
str += (add_to_len= uint32(cache->end_of_file - (cache_size/2 + 1)));
to->length += add_to_len;
if (!is_verbose)
{
str += (add_to_len= sprintf(str , fmt_delim, delimiter));
to->length += add_to_len;
@ -12179,7 +12163,6 @@ bool copy_cache_to_string_wrapped(IO_CACHE *cache,
goto err;
str += cache->end_of_file;
to->length += (size_t)cache->end_of_file;
if (!is_verbose)
to->length += sprintf(str , fmt_delim, delimiter);
}
@ -12227,6 +12210,7 @@ bool Rows_log_event::print_helper(FILE *file,
{
IO_CACHE *const head= &print_event_info->head_cache;
IO_CACHE *const body= &print_event_info->body_cache;
IO_CACHE *const tail= &print_event_info->tail_cache;
#ifdef WHEN_FLASHBACK_REVIEW_READY
IO_CACHE *const sql= &print_event_info->review_sql_cache;
#endif
@ -12257,7 +12241,8 @@ bool Rows_log_event::print_helper(FILE *file,
if (copy_event_cache_to_file_and_reinit(head, file) ||
copy_cache_to_file_wrapped(body, file, do_print_encoded,
print_event_info->delimiter,
print_event_info->verbose))
print_event_info->verbose) ||
copy_event_cache_to_file_and_reinit(tail, file))
goto err;
}
else
@ -12275,6 +12260,11 @@ bool Rows_log_event::print_helper(FILE *file,
return 1;
output_buf.append(tmp_str.str, tmp_str.length);
my_free(tmp_str.str);
if (copy_event_cache_to_string_and_reinit(tail, &tmp_str))
return 1;
output_buf.append(tmp_str.str, tmp_str.length);
my_free(tmp_str.str);
#ifdef WHEN_FLASHBACK_REVIEW_READY
if (copy_event_cache_to_string_and_reinit(sql, &tmp_str))
return 1;
@ -15097,6 +15087,7 @@ st_print_event_info::st_print_event_info()
base64_output_mode=BASE64_OUTPUT_UNSPEC;
open_cached_file(&head_cache, NULL, NULL, 0, flags);
open_cached_file(&body_cache, NULL, NULL, 0, flags);
open_cached_file(&tail_cache, NULL, NULL, 0, flags);
#ifdef WHEN_FLASHBACK_REVIEW_READY
open_cached_file(&review_sql_cache, NULL, NULL, 0, flags);
#endif

View file

@ -889,6 +889,7 @@ typedef struct st_print_event_info
*/
IO_CACHE head_cache;
IO_CACHE body_cache;
IO_CACHE tail_cache;
#ifdef WHEN_FLASHBACK_REVIEW_READY
/* Storing the SQL for reviewing */
IO_CACHE review_sql_cache;
@ -899,6 +900,7 @@ typedef struct st_print_event_info
~st_print_event_info() {
close_cached_file(&head_cache);
close_cached_file(&body_cache);
close_cached_file(&tail_cache);
#ifdef WHEN_FLASHBACK_REVIEW_READY
close_cached_file(&review_sql_cache);
#endif

View file

@ -1848,6 +1848,7 @@ bool Old_rows_log_event::print_helper(FILE *file,
{
IO_CACHE *const head= &print_event_info->head_cache;
IO_CACHE *const body= &print_event_info->body_cache;
IO_CACHE *const tail= &print_event_info->tail_cache;
bool do_print_encoded=
print_event_info->base64_output_mode != BASE64_OUTPUT_DECODE_ROWS &&
print_event_info->base64_output_mode != BASE64_OUTPUT_NEVER &&
@ -1867,8 +1868,9 @@ bool Old_rows_log_event::print_helper(FILE *file,
{
if (copy_event_cache_to_file_and_reinit(head, file) ||
copy_cache_to_file_wrapped(body, file, do_print_encoded,
print_event_info->delimiter,
print_event_info->verbose))
print_event_info->delimiter,
print_event_info->verbose) ||
copy_event_cache_to_file_and_reinit(tail, file))
goto err;
}
return 0;

View file

@ -3837,7 +3837,8 @@ rpl_make_log_name(const char *opt,
const char *ext)
{
DBUG_ENTER("rpl_make_log_name");
DBUG_PRINT("enter", ("opt: %s, def: %s, ext: %s", opt, def, ext));
DBUG_PRINT("enter", ("opt: %s, def: %s, ext: %s", opt ? opt : "(null)",
def, ext));
char buff[FN_REFLEN];
const char *base= opt ? opt : def;
unsigned int options=

View file

@ -607,7 +607,8 @@ net_write_buff(NET *net, const uchar *packet, size_t len)
return net_real_write(net, packet, len) ? 1 : 0;
/* Send out rest of the blocks as full sized blocks */
}
memcpy((char*) net->write_pos,packet,len);
if (len)
memcpy((char*) net->write_pos,packet,len);
net->write_pos+= len;
return 0;
}

View file

@ -8195,13 +8195,15 @@ SEL_TREE *Item_bool_func::get_full_func_mm_tree(RANGE_OPT_PARAM *param,
table_map param_comp= ~(param->prev_tables | param->read_tables |
param->current_table);
#ifdef HAVE_SPATIAL
Field::geometry_type sav_geom_type;
const bool geometry= field_item->field->type() == MYSQL_TYPE_GEOMETRY;
if (geometry)
Field::geometry_type sav_geom_type= Field::GEOM_GEOMETRY, *geom_type=
field_item->field->type() == MYSQL_TYPE_GEOMETRY
? &(static_cast<Field_geom*>(field_item->field))->geom_type
: NULL;
if (geom_type)
{
sav_geom_type= ((Field_geom*) field_item->field)->geom_type;
sav_geom_type= *geom_type;
/* We have to be able to store all sorts of spatial features here */
((Field_geom*) field_item->field)->geom_type= Field::GEOM_GEOMETRY;
*geom_type= Field::GEOM_GEOMETRY;
}
#endif /*HAVE_SPATIAL*/
@ -8232,9 +8234,9 @@ SEL_TREE *Item_bool_func::get_full_func_mm_tree(RANGE_OPT_PARAM *param,
}
#ifdef HAVE_SPATIAL
if (geometry)
if (geom_type)
{
((Field_geom*) field_item->field)->geom_type= sav_geom_type;
*geom_type= sav_geom_type;
}
#endif /*HAVE_SPATIAL*/
DBUG_RETURN(ftree);

View file

@ -349,16 +349,22 @@ Rpl_filter::set_do_table(const char* table_spec)
int status;
if (do_table_inited)
my_hash_reset(&do_table);
status= parse_filter_rule(table_spec, &Rpl_filter::add_do_table);
if (!do_table.records)
{
my_hash_free(&do_table);
do_table_inited= 0;
}
status= parse_filter_rule(table_spec, &Rpl_filter::add_do_table);
if (do_table_inited && status)
{
if (!do_table.records)
{
my_hash_free(&do_table);
do_table_inited= 0;
}
}
return status;
}
@ -369,16 +375,22 @@ Rpl_filter::set_ignore_table(const char* table_spec)
int status;
if (ignore_table_inited)
my_hash_reset(&ignore_table);
status= parse_filter_rule(table_spec, &Rpl_filter::add_ignore_table);
if (!ignore_table.records)
{
my_hash_free(&ignore_table);
ignore_table_inited= 0;
}
status= parse_filter_rule(table_spec, &Rpl_filter::add_ignore_table);
if (ignore_table_inited && status)
{
if (!ignore_table.records)
{
my_hash_free(&ignore_table);
ignore_table_inited= 0;
}
}
return status;
}
@ -411,14 +423,20 @@ Rpl_filter::set_wild_do_table(const char* table_spec)
int status;
if (wild_do_table_inited)
{
free_string_array(&wild_do_table);
wild_do_table_inited= 0;
}
status= parse_filter_rule(table_spec, &Rpl_filter::add_wild_do_table);
if (!wild_do_table.elements)
if (wild_do_table_inited && status)
{
delete_dynamic(&wild_do_table);
wild_do_table_inited= 0;
if (!wild_do_table.elements)
{
delete_dynamic(&wild_do_table);
wild_do_table_inited= 0;
}
}
return status;
@ -431,14 +449,20 @@ Rpl_filter::set_wild_ignore_table(const char* table_spec)
int status;
if (wild_ignore_table_inited)
{
free_string_array(&wild_ignore_table);
wild_ignore_table_inited= 0;
}
status= parse_filter_rule(table_spec, &Rpl_filter::add_wild_ignore_table);
if (!wild_ignore_table.elements)
if (wild_ignore_table_inited && status)
{
delete_dynamic(&wild_ignore_table);
wild_ignore_table_inited= 0;
if (!wild_ignore_table.elements)
{
delete_dynamic(&wild_ignore_table);
wild_ignore_table_inited= 0;
}
}
return status;

View file

@ -1815,15 +1815,13 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
}
if (table->vfield)
{
my_bool abort_on_warning= thd->abort_on_warning;
/*
We have not yet called update_virtual_fields(VOL_UPDATE_FOR_READ)
in handler methods for the just read row in record[1].
*/
table->move_fields(table->field, table->record[1], table->record[0]);
thd->abort_on_warning= 0;
table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_REPLACE);
thd->abort_on_warning= abort_on_warning;
if (table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_REPLACE))
goto err;
table->move_fields(table->field, table->record[0], table->record[1]);
}
if (info->handle_duplicates == DUP_UPDATE)

View file

@ -23512,6 +23512,9 @@ check_reverse_order:
else if (select && select->quick)
select->quick->need_sorted_output();
tab->read_record.unlock_row= (tab->type == JT_EQ_REF) ?
join_read_key_unlock_row : rr_unlock_row;
} // QEP has been modified
/*

View file

@ -388,10 +388,10 @@ void print_sjm(SJ_MATERIALIZATION_INFO *sjm)
/*
Debugging help: force List<...>::elem function not be removed as unused.
*/
Item* (List<Item>:: *dbug_list_item_elem_ptr)(uint)= &List<Item>::elem;
Item_equal* (List<Item_equal>:: *dbug_list_item_equal_elem_ptr)(uint)=
Item* (List<Item>::*dbug_list_item_elem_ptr)(uint)= &List<Item>::elem;
Item_equal* (List<Item_equal>::*dbug_list_item_equal_elem_ptr)(uint)=
&List<Item_equal>::elem;
TABLE_LIST* (List<TABLE_LIST>:: *dbug_list_table_list_elem_ptr)(uint) =
TABLE_LIST* (List<TABLE_LIST>::*dbug_list_table_list_elem_ptr)(uint) =
&List<TABLE_LIST>::elem;
#endif

View file

@ -2671,7 +2671,14 @@ int wsrep_create_trigger_query(THD *thd, uchar** buf, size_t* buf_len)
definer_host.length= 0;
}
stmt_query.append(STRING_WITH_LEN("CREATE "));
const LEX_CSTRING command[2]=
{{ C_STRING_WITH_LEN("CREATE ") },
{ C_STRING_WITH_LEN("CREATE OR REPLACE ") }};
if (thd->lex->create_info.or_replace())
stmt_query.append(command[1]);
else
stmt_query.append(command[0]);
append_definer(thd, &stmt_query, &definer_user, &definer_host);

View file

@ -104,6 +104,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include "srv0mon.h"
#include "srv0srv.h"
#include "srv0start.h"
#include "rem0rec.h"
#ifdef UNIV_DEBUG
#include "trx0purge.h"
#endif /* UNIV_DEBUG */
@ -5647,14 +5648,13 @@ innobase_vcol_build_templ(
mysql_row_templ_t* templ,
ulint col_no)
{
if (col->is_virtual()) {
templ->is_virtual = true;
templ->col_no = col_no;
templ->col_no = col_no;
templ->is_virtual = col->is_virtual();
if (templ->is_virtual) {
templ->clust_rec_field_no = ULINT_UNDEFINED;
templ->rec_field_no = col->ind;
} else {
templ->is_virtual = false;
templ->col_no = col_no;
templ->clust_rec_field_no = dict_col_get_clust_pos(
col, clust_index);
ut_a(templ->clust_rec_field_no != ULINT_UNDEFINED);
@ -20635,64 +20635,53 @@ innobase_get_field_from_update_vector(
Allocate a heap and record for calculating virtual fields
Used mainly for virtual fields in indexes
@param[in] thd MariaDB THD
@param[in] index Index in use
@param[out] heap Heap that holds temporary row
@param[in,out] table MariaDB table
@param[out] record Pointer to allocated MariaDB record
@param[out] storage Internal storage for blobs etc
@param[in] thd MariaDB THD
@param[in] index Index in use
@param[out] heap Heap that holds temporary row
@param[in,out] table MariaDB table
@param[out] record Pointer to allocated MariaDB record
@param[out] storage Internal storage for blobs etc
@retval false on success
@retval true on malloc failure or failed to open the maria table
@retval true on success
@retval false on malloc failure or failed to open the maria table
for purge thread.
*/
bool innobase_allocate_row_for_vcol(
THD * thd,
dict_index_t* index,
mem_heap_t** heap,
TABLE** table,
byte** record,
VCOL_STORAGE** storage)
bool innobase_allocate_row_for_vcol(THD *thd, dict_index_t *index,
mem_heap_t **heap, TABLE **table,
VCOL_STORAGE *storage)
{
TABLE *maria_table;
String *blob_value_storage;
if (!*table)
*table= innodb_find_table_for_vc(thd, index->table);
TABLE *maria_table;
String *blob_value_storage;
if (!*table)
*table = innodb_find_table_for_vc(thd, index->table);
/* For purge thread, there is a possiblity that table could have
dropped, corrupted or unaccessible. */
if (!*table)
return true;
maria_table= *table;
if (!*heap && !(*heap= mem_heap_create(srv_page_size)))
{
*storage= 0;
return TRUE;
}
*record= static_cast<byte*>(mem_heap_alloc(*heap,
maria_table->s->reclength));
*storage= static_cast<VCOL_STORAGE*>
(mem_heap_alloc(*heap, sizeof(**storage)));
blob_value_storage= static_cast<String*>
(mem_heap_alloc(*heap,
maria_table->s->virtual_not_stored_blob_fields *
sizeof(String)));
if (!*record || !*storage || !blob_value_storage)
{
*storage= 0;
return TRUE;
}
(*storage)->maria_table= maria_table;
(*storage)->innobase_record= *record;
(*storage)->maria_record= maria_table->field[0]->record_ptr();
(*storage)->blob_value_storage= blob_value_storage;
/* For purge thread, there is a possiblity that table could have
dropped, corrupted or unaccessible. */
if (!*table)
return false;
maria_table = *table;
if (!*heap && !(*heap = mem_heap_create(srv_page_size)))
return false;
maria_table->move_fields(maria_table->field, *record,
(*storage)->maria_record);
maria_table->remember_blob_values(blob_value_storage);
uchar *record = static_cast<byte *>(mem_heap_alloc(*heap,
maria_table->s->reclength));
return FALSE;
size_t len = maria_table->s->virtual_not_stored_blob_fields * sizeof(String);
blob_value_storage = static_cast<String *>(mem_heap_alloc(*heap, len));
if (!record || !blob_value_storage)
return false;
storage->maria_table = maria_table;
storage->innobase_record = record;
storage->maria_record = maria_table->field[0]->record_ptr();
storage->blob_value_storage = blob_value_storage;
maria_table->move_fields(maria_table->field, record, storage->maria_record);
maria_table->remember_blob_values(blob_value_storage);
return true;
}
@ -20707,6 +20696,13 @@ void innobase_free_row_for_vcol(VCOL_STORAGE *storage)
}
void innobase_report_computed_value_failed(dtuple_t *row)
{
ib::error() << "Compute virtual column values failed for "
<< rec_printer(row).str();
}
/** Get the computed value by supplying the base column values.
@param[in,out] row the data row
@param[in] col virtual column
@ -20834,13 +20830,6 @@ innobase_get_computed_value(
dbug_tmp_restore_column_map(mysql_table->write_set, old_write_set);
if (ret != 0) {
// FIXME: Why this error message is macro-hidden?
#ifdef INNODB_VIRTUAL_DEBUG
ib::warn() << "Compute virtual column values failed ";
fputs("InnoDB: Cannot compute value for following record ",
stderr);
dtuple_print(stderr, row);
#endif /* INNODB_VIRTUAL_DEBUG */
DBUG_RETURN(NULL);
}

View file

@ -1283,16 +1283,17 @@ struct rec_offsets_print
@param[in,out] o output stream
@param[in] r record to display
@return the output stream */
ATTRIBUTE_COLD
std::ostream&
operator<<(std::ostream& o, const rec_offsets_print& r);
# ifndef DBUG_OFF
/** Pretty-printer of records and tuples */
class rec_printer : public std::ostringstream {
public:
/** Construct a pretty-printed record.
@param rec record with header
@param offsets rec_get_offsets(rec, ...) */
ATTRIBUTE_COLD
rec_printer(const rec_t* rec, const rec_offs* offsets)
:
std::ostringstream ()
@ -1306,6 +1307,7 @@ public:
@param rec record, possibly lacking header
@param info rec_get_info_bits(rec)
@param offsets rec_get_offsets(rec, ...) */
ATTRIBUTE_COLD
rec_printer(const rec_t* rec, ulint info, const rec_offs* offsets)
:
std::ostringstream ()
@ -1315,6 +1317,7 @@ public:
/** Construct a pretty-printed tuple.
@param tuple data tuple */
ATTRIBUTE_COLD
rec_printer(const dtuple_t* tuple)
:
std::ostringstream ()
@ -1325,6 +1328,7 @@ public:
/** Construct a pretty-printed tuple.
@param field array of data tuple fields
@param n number of fields */
ATTRIBUTE_COLD
rec_printer(const dfield_t* field, ulint n)
:
std::ostringstream ()
@ -1341,7 +1345,7 @@ private:
/** Assignment operator */
rec_printer& operator=(const rec_printer& other);
};
# endif /* !DBUG_OFF */
# ifdef UNIV_DEBUG
/** Read the DB_TRX_ID of a clustered index record.

View file

@ -848,6 +848,8 @@ struct VCOL_STORAGE
byte *innobase_record;
byte *maria_record;
String *blob_value_storage;
VCOL_STORAGE(): maria_table(NULL), innobase_record(NULL),
maria_record(NULL), blob_value_storage(NULL) {}
};
/**
@ -870,12 +872,48 @@ bool innobase_allocate_row_for_vcol(
dict_index_t* index,
mem_heap_t** heap,
TABLE** table,
byte** record,
VCOL_STORAGE** storage);
VCOL_STORAGE* storage);
/** Free memory allocated by innobase_allocate_row_for_vcol() */
void innobase_free_row_for_vcol(VCOL_STORAGE *storage);
class ib_vcol_row
{
VCOL_STORAGE storage;
public:
mem_heap_t *heap;
ib_vcol_row(mem_heap_t *heap) : heap(heap) {}
byte *record(THD *thd, dict_index_t *index, TABLE **table)
{
if (!storage.innobase_record)
{
bool ok = innobase_allocate_row_for_vcol(thd, index, &heap, table,
&storage);
if (!ok)
return NULL;
}
return storage.innobase_record;
};
~ib_vcol_row()
{
if (heap)
{
if (storage.innobase_record)
innobase_free_row_for_vcol(&storage);
mem_heap_free(heap);
}
}
};
/** Report virtual value computation failure in ib::error
@param[in] row the data row
*/
ATTRIBUTE_COLD
void innobase_report_computed_value_failed(dtuple_t *row);
/** Get the computed value by supplying the base column values.
@param[in,out] row the data row
@param[in] col virtual column

View file

@ -29,6 +29,7 @@ Created 9/30/1995 Heikki Tuuri
#ifdef HAVE_LINUX_LARGE_PAGES
# include "mysqld.h"
#endif
#include "my_valgrind.h"
/* FreeBSD for example has only MAP_ANON, Linux has MAP_ANONYMOUS and
MAP_ANON but MAP_ANON is marked as deprecated */

View file

@ -879,16 +879,15 @@ row_ins_invalidate_query_cache(
@param[in] index clustered index of child table
@param[in] node parent update node
@param[in] foreign foreign key information
@param[out] err error code. */
@return error code. */
static
void
dberr_t
row_ins_foreign_fill_virtual(
upd_node_t* cascade,
const rec_t* rec,
dict_index_t* index,
upd_node_t* node,
dict_foreign_t* foreign,
dberr_t* err)
dict_foreign_t* foreign)
{
THD* thd = current_thd;
row_ext_t* ext;
@ -897,10 +896,7 @@ row_ins_foreign_fill_virtual(
const rec_offs* offsets =
rec_get_offsets(rec, index, offsets_, true,
ULINT_UNDEFINED, &cascade->heap);
mem_heap_t* v_heap = NULL;
TABLE* mysql_table= NULL;
VCOL_STORAGE* vcol_storage= NULL;
byte* record;
upd_t* update = cascade->update;
ulint n_v_fld = index->table->n_v_def;
ulint n_diff;
@ -920,12 +916,10 @@ row_ins_foreign_fill_virtual(
innobase_init_vc_templ(index->table);
}
if (innobase_allocate_row_for_vcol(thd, index, &v_heap,
&mysql_table,
&record, &vcol_storage)) {
if (v_heap) mem_heap_free(v_heap);
*err = DB_OUT_OF_MEMORY;
goto func_exit;
ib_vcol_row vc(NULL);
uchar *record = vc.record(thd, index, &mysql_table);
if (!record) {
return DB_OUT_OF_MEMORY;
}
for (ulint i = 0; i < n_v_fld; i++) {
@ -941,12 +935,11 @@ row_ins_foreign_fill_virtual(
dfield_t* vfield = innobase_get_computed_value(
update->old_vrow, col, index,
&v_heap, update->heap, NULL, thd, mysql_table,
&vc.heap, update->heap, NULL, thd, mysql_table,
record, NULL, NULL, NULL);
if (vfield == NULL) {
*err = DB_COMPUTE_VALUE_FAILED;
goto func_exit;
return DB_COMPUTE_VALUE_FAILED;
}
upd_field = upd_get_nth_field(update, n_diff);
@ -971,13 +964,12 @@ row_ins_foreign_fill_virtual(
dfield_t* new_vfield = innobase_get_computed_value(
update->old_vrow, col, index,
&v_heap, update->heap, NULL, thd,
&vc.heap, update->heap, NULL, thd,
mysql_table, record, NULL,
node->update, foreign);
if (new_vfield == NULL) {
*err = DB_COMPUTE_VALUE_FAILED;
goto func_exit;
return DB_COMPUTE_VALUE_FAILED;
}
dfield_copy(&(upd_field->new_val), new_vfield);
@ -987,14 +979,7 @@ row_ins_foreign_fill_virtual(
}
update->n_fields = n_diff;
*err = DB_SUCCESS;
func_exit:
if (v_heap) {
if (vcol_storage)
innobase_free_row_for_vcol(vcol_storage);
mem_heap_free(v_heap);
}
return DB_SUCCESS;
}
#ifdef WITH_WSREP
@ -1280,9 +1265,9 @@ row_ins_foreign_check_on_constraint(
if (foreign->v_cols != NULL
&& foreign->v_cols->size() > 0) {
row_ins_foreign_fill_virtual(
err = row_ins_foreign_fill_virtual(
cascade, clust_rec, clust_index,
node, foreign, &err);
node, foreign);
if (err != DB_SUCCESS) {
goto nonstandard_exit_func;
@ -1318,9 +1303,9 @@ row_ins_foreign_check_on_constraint(
node, foreign, tmp_heap, trx);
if (foreign->v_cols && !foreign->v_cols->empty()) {
row_ins_foreign_fill_virtual(
err = row_ins_foreign_fill_virtual(
cascade, clust_rec, clust_index,
node, foreign, &err);
node, foreign);
if (err != DB_SUCCESS) {
goto nonstandard_exit_func;
@ -1378,21 +1363,20 @@ row_ins_foreign_check_on_constraint(
btr_pcur_store_position(cascade->pcur, mtr);
}
#ifdef WITH_WSREP
err = wsrep_append_foreign_key(trx, foreign, clust_rec, clust_index,
FALSE, WSREP_SERVICE_KEY_EXCLUSIVE);
if (err != DB_SUCCESS) {
ib::info() << "WSREP: foreign key append failed: " << err;
goto nonstandard_exit_func;
}
#endif /* WITH_WSREP */
mtr_commit(mtr);
ut_a(cascade->pcur->rel_pos == BTR_PCUR_ON);
cascade->state = UPD_NODE_UPDATE_CLUSTERED;
#ifdef WITH_WSREP
err = wsrep_append_foreign_key(trx, foreign, cascade->pcur->old_rec,
clust_index,
FALSE, WSREP_SERVICE_KEY_EXCLUSIVE);
if (err != DB_SUCCESS) {
fprintf(stderr,
"WSREP: foreign key append failed: %d\n", err);
} else
#endif /* WITH_WSREP */
err = row_update_cascade_for_mysql(thr, cascade,
foreign->foreign_table);
@ -1912,6 +1896,39 @@ exit_func:
DBUG_RETURN(err);
}
/** Sets the values of the dtuple fields in ref_entry from the values of
foreign columns in entry.
@param[in] foreign foreign key constraint
@param[in] index clustered index
@param[in] entry tuple of clustered index
@param[in] ref_entry tuple of foreign columns
@return true if all foreign key fields present in clustered index */
static
bool row_ins_foreign_index_entry(dict_foreign_t *foreign,
const dict_index_t *index,
const dtuple_t *entry,
dtuple_t *ref_entry)
{
for (ulint i= 0; i < foreign->n_fields; i++)
{
for (ulint j= 0; j < index->n_fields; j++)
{
const char *col_name= dict_table_get_col_name(
index->table, dict_index_get_nth_col_no(index, j));
if (0 == innobase_strcasecmp(col_name, foreign->foreign_col_names[i]))
{
dfield_copy(&ref_entry->fields[i], &entry->fields[j]);
goto got_match;
}
}
return false;
got_match:
continue;
}
return true;
}
/***************************************************************//**
Checks if foreign key constraints fail for an index entry. If index
is not mentioned in any constraint, this function does nothing,
@ -1930,9 +1947,10 @@ row_ins_check_foreign_constraints(
que_thr_t* thr) /*!< in: query thread */
{
dict_foreign_t* foreign;
dberr_t err;
dberr_t err = DB_SUCCESS;
trx_t* trx;
ibool got_s_lock = FALSE;
mem_heap_t* heap = NULL;
DBUG_ASSERT(index->is_primary() == pk);
@ -1942,13 +1960,36 @@ row_ins_check_foreign_constraints(
"foreign_constraint_check_for_ins");
for (dict_foreign_set::iterator it = table->foreign_set.begin();
it != table->foreign_set.end();
err == DB_SUCCESS && it != table->foreign_set.end();
++it) {
foreign = *it;
if (foreign->foreign_index == index
|| (pk && !foreign->foreign_index)) {
dtuple_t* ref_tuple = entry;
if (UNIV_UNLIKELY(!foreign->foreign_index)) {
/* Change primary key entry to
foreign key index entry */
if (!heap) {
heap = mem_heap_create(1000);
} else {
mem_heap_empty(heap);
}
ref_tuple = dtuple_create(
heap, foreign->n_fields);
dtuple_set_n_fields_cmp(
ref_tuple, foreign->n_fields);
if (!row_ins_foreign_index_entry(
foreign, index, entry, ref_tuple)) {
err = DB_NO_REFERENCED_ROW;
break;
}
}
dict_table_t* ref_table = NULL;
dict_table_t* referenced_table
= foreign->referenced_table;
@ -1976,7 +2017,7 @@ row_ins_check_foreign_constraints(
table from being dropped while the check is running. */
err = row_ins_check_foreign_constraint(
TRUE, foreign, table, entry, thr);
TRUE, foreign, table, ref_tuple, thr);
if (referenced_table) {
foreign->foreign_table->dec_fk_checks();
@ -1989,15 +2030,14 @@ row_ins_check_foreign_constraints(
if (ref_table != NULL) {
dict_table_close(ref_table, FALSE, FALSE);
}
if (err != DB_SUCCESS) {
return(err);
}
}
}
return(DB_SUCCESS);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
return err;
}
/***************************************************************//**

View file

@ -520,8 +520,7 @@ row_merge_buf_add(
ulint bucket = 0;
doc_id_t write_doc_id;
ulint n_row_added = 0;
VCOL_STORAGE* vcol_storage= 0;
byte* record;
VCOL_STORAGE vcol_storage;
DBUG_ENTER("row_merge_buf_add");
if (buf->n_tuples >= buf->max_tuples) {
@ -555,23 +554,16 @@ row_merge_buf_add(
for (i = 0; i < n_fields; i++, field++, ifield++) {
ulint len;
const dict_col_t* col;
ulint col_no;
ulint fixed_len;
const dfield_t* row_field;
col = ifield->col;
const dict_v_col_t* v_col = NULL;
if (col->is_virtual()) {
v_col = reinterpret_cast<const dict_v_col_t*>(col);
}
col_no = dict_col_get_no(col);
const dict_col_t* const col = ifield->col;
const dict_v_col_t* const v_col = col->is_virtual()
? reinterpret_cast<const dict_v_col_t*>(col)
: NULL;
/* Process the Doc ID column */
if (*doc_id > 0
&& col_no == index->table->fts->doc_col
&& !col->is_virtual()) {
if (!v_col && *doc_id
&& col->ind == index->table->fts->doc_col) {
fts_write_doc_id((byte*) &write_doc_id, *doc_id);
/* Note: field->data now points to a value on the
@ -590,12 +582,15 @@ row_merge_buf_add(
field->type.len = ifield->col->len;
} else {
/* Use callback to get the virtual column value */
if (col->is_virtual()) {
if (v_col) {
dict_index_t* clust_index
= dict_table_get_first_index(new_table);
if (!vcol_storage &&
innobase_allocate_row_for_vcol(trx->mysql_thd, clust_index, v_heap, &my_table, &record, &vcol_storage)) {
if (!vcol_storage.innobase_record &&
!innobase_allocate_row_for_vcol(
trx->mysql_thd, clust_index,
v_heap, &my_table,
&vcol_storage)) {
*err = DB_OUT_OF_MEMORY;
goto error;
}
@ -603,8 +598,8 @@ row_merge_buf_add(
row_field = innobase_get_computed_value(
row, v_col, clust_index,
v_heap, NULL, ifield, trx->mysql_thd,
my_table, record, old_table, NULL,
NULL);
my_table, vcol_storage.innobase_record,
old_table, NULL, NULL);
if (row_field == NULL) {
*err = DB_COMPUTE_VALUE_FAILED;
@ -612,7 +607,8 @@ row_merge_buf_add(
}
dfield_copy(field, row_field);
} else {
row_field = dtuple_get_nth_field(row, col_no);
row_field = dtuple_get_nth_field(row,
col->ind);
dfield_copy(field, row_field);
}
@ -718,7 +714,7 @@ row_merge_buf_add(
} else if (!ext) {
} else if (dict_index_is_clust(index)) {
/* Flag externally stored fields. */
const byte* buf = row_ext_lookup(ext, col_no,
const byte* buf = row_ext_lookup(ext, col->ind,
&len);
if (UNIV_LIKELY_NULL(buf)) {
ut_a(buf != field_ref_zero);
@ -729,9 +725,9 @@ row_merge_buf_add(
len = dfield_get_len(field);
}
}
} else if (!col->is_virtual()) {
} else if (!v_col) {
/* Only non-virtual column are stored externally */
const byte* buf = row_ext_lookup(ext, col_no,
const byte* buf = row_ext_lookup(ext, col->ind,
&len);
if (UNIV_LIKELY_NULL(buf)) {
ut_a(buf != field_ref_zero);
@ -846,13 +842,13 @@ row_merge_buf_add(
}
end:
if (vcol_storage)
innobase_free_row_for_vcol(vcol_storage);
if (vcol_storage.innobase_record)
innobase_free_row_for_vcol(&vcol_storage);
DBUG_RETURN(n_row_added);
error:
if (vcol_storage)
innobase_free_row_for_vcol(vcol_storage);
if (vcol_storage.innobase_record)
innobase_free_row_for_vcol(&vcol_storage);
DBUG_RETURN(0);
}

View file

@ -156,11 +156,15 @@ fields are compared with collation!
must be protected by a page s-latch
@param[in] clust_index clustered index
@param[in] thr query thread
@return TRUE if the secondary record is equal to the corresponding
fields in the clustered record, when compared with collation;
FALSE if not equal or if the clustered record has been marked for deletion */
@retval DB_COMPUTE_VALUE_FAILED in case of virtual column value computation
failure.
@retval DB_SUCCESS_LOCKED_REC if the secondary record is equal to the
corresponding fields in the clustered record, when compared with
collation;
@retval DB_SUCCESS if not equal or if the clustered record has been marked
for deletion */
static
ibool
dberr_t
row_sel_sec_rec_is_for_clust_rec(
const rec_t* sec_rec,
dict_index_t* sec_index,
@ -178,9 +182,6 @@ row_sel_sec_rec_is_for_clust_rec(
rec_offs sec_offsets_[REC_OFFS_SMALL_SIZE];
rec_offs* clust_offs = clust_offsets_;
rec_offs* sec_offs = sec_offsets_;
ibool is_equal = TRUE;
VCOL_STORAGE* vcol_storage= 0;
byte* record;
rec_offs_init(clust_offsets_);
rec_offs_init(sec_offsets_);
@ -195,10 +196,11 @@ row_sel_sec_rec_is_for_clust_rec(
it is not visible in the read view. Besides,
if there are any externally stored columns,
some of them may have already been purged. */
return(FALSE);
return DB_SUCCESS;
}
heap = mem_heap_create(256);
ib_vcol_row vc(heap);
clust_offs = rec_get_offsets(clust_rec, clust_index, clust_offs,
true, ULINT_UNDEFINED, &heap);
@ -227,16 +229,9 @@ row_sel_sec_rec_is_for_clust_rec(
dfield_t* vfield;
row_ext_t* ext;
if (!vcol_storage)
{
TABLE *mysql_table= thr->prebuilt->m_mysql_table;
innobase_allocate_row_for_vcol(thr_get_trx(thr)->mysql_thd,
clust_index,
&heap,
&mysql_table,
&record,
&vcol_storage);
}
byte *record = vc.record(thr_get_trx(thr)->mysql_thd,
clust_index,
&thr->prebuilt->m_mysql_table);
v_col = reinterpret_cast<const dict_v_col_t*>(col);
@ -253,6 +248,10 @@ row_sel_sec_rec_is_for_clust_rec(
thr->prebuilt->m_mysql_table,
record, NULL, NULL, NULL);
if (vfield == NULL) {
innobase_report_computed_value_failed(row);
return DB_COMPUTE_VALUE_FAILED;
}
clust_len = vfield->len;
clust_field = static_cast<byte*>(vfield->data);
} else {
@ -286,7 +285,7 @@ row_sel_sec_rec_is_for_clust_rec(
sec_field, sec_len,
ifield->prefix_len,
clust_index->table)) {
goto inequal;
return DB_SUCCESS;
}
continue;
@ -321,28 +320,19 @@ row_sel_sec_rec_is_for_clust_rec(
rtr_read_mbr(sec_field, &sec_mbr);
if (!MBR_EQUAL_CMP(&sec_mbr, &tmp_mbr)) {
is_equal = FALSE;
goto func_exit;
return DB_SUCCESS;
}
} else {
if (0 != cmp_data_data(col->mtype, col->prtype,
clust_field, len,
sec_field, sec_len)) {
inequal:
is_equal = FALSE;
goto func_exit;
return DB_SUCCESS;
}
}
}
func_exit:
if (UNIV_LIKELY_NULL(heap)) {
if (UNIV_LIKELY_NULL(vcol_storage))
innobase_free_row_for_vcol(vcol_storage);
mem_heap_free(heap);
}
return(is_equal);
return DB_SUCCESS_LOCKED_REC;
}
/*********************************************************************//**
@ -908,7 +898,7 @@ row_sel_get_clust_rec(
dict_index_t* index;
rec_t* clust_rec;
rec_t* old_vers;
dberr_t err;
dberr_t err = DB_SUCCESS;
mem_heap_t* heap = NULL;
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs* offsets = offsets_;
@ -950,7 +940,7 @@ row_sel_get_clust_rec(
clustered index record did not exist in the read view of
trx. */
goto func_exit;
goto err_exit;
}
offsets = rec_get_offsets(clust_rec, index, offsets, true,
@ -1013,7 +1003,7 @@ row_sel_get_clust_rec(
clust_rec = old_vers;
if (clust_rec == NULL) {
goto func_exit;
goto err_exit;
}
}
@ -1030,13 +1020,14 @@ row_sel_get_clust_rec(
visit through secondary index records that would not really
exist in our snapshot. */
if ((old_vers
|| rec_get_deleted_flag(rec, dict_table_is_comp(
plan->table)))
&& !row_sel_sec_rec_is_for_clust_rec(rec, plan->index,
clust_rec, index,
thr)) {
goto func_exit;
if (old_vers || rec_get_deleted_flag(rec, dict_table_is_comp(
plan->table))) {
err = row_sel_sec_rec_is_for_clust_rec(rec,
plan->index, clust_rec,
index, thr);
if (err != DB_SUCCESS_LOCKED_REC) {
goto err_exit;
}
}
}
@ -1049,7 +1040,6 @@ row_sel_get_clust_rec(
row_sel_fetch_columns(index, clust_rec, offsets,
UT_LIST_GET_FIRST(plan->columns));
*out_rec = clust_rec;
func_exit:
err = DB_SUCCESS;
err_exit:
if (UNIV_LIKELY_NULL(heap)) {
@ -3455,10 +3445,18 @@ Row_sel_get_clust_rec_for_mysql::operator()(
|| trx->isolation_level <= TRX_ISO_READ_UNCOMMITTED
|| dict_index_is_spatial(sec_index)
|| rec_get_deleted_flag(rec, dict_table_is_comp(
sec_index->table)))
&& !row_sel_sec_rec_is_for_clust_rec(
rec, sec_index, clust_rec, clust_index, thr)) {
clust_rec = NULL;
sec_index->table)))) {
err = row_sel_sec_rec_is_for_clust_rec(rec, sec_index,
clust_rec, clust_index, thr);
switch (err) {
case DB_SUCCESS:
clust_rec = NULL;
break;
case DB_SUCCESS_LOCKED_REC:
break;
default:
goto err_exit;
}
}
err = DB_SUCCESS;
@ -4082,6 +4080,10 @@ bool row_search_with_covering_prefix(
const dict_index_t* index = prebuilt->index;
ut_ad(!dict_index_is_clust(index));
if (dict_index_is_spatial(index)) {
return false;
}
if (!srv_prefix_index_cluster_optimization) {
return false;
}
@ -4092,9 +4094,16 @@ bool row_search_with_covering_prefix(
return false;
}
/* We can avoid a clustered index lookup if
all of the following hold:
(1) all columns are in the secondary index
(2) all values for columns that are prefix-only
indexes are shorter than the prefix size
This optimization can avoid many IOs for certain schemas. */
for (ulint i = 0; i < prebuilt->n_template; i++) {
mysql_row_templ_t* templ = prebuilt->mysql_template + i;
ulint j = templ->rec_prefix_field_no;
ut_ad(!templ->mbminlen == !templ->mbmaxlen);
/** Condition (1) : is the field in the index. */
if (j == ULINT_UNDEFINED) {
@ -4104,33 +4113,29 @@ bool row_search_with_covering_prefix(
/** Condition (2): If this is a prefix index then
row's value size shorter than prefix length. */
if (!templ->rec_field_is_prefix) {
if (!templ->rec_field_is_prefix
|| rec_offs_nth_sql_null(offsets, j)) {
continue;
}
ulint rec_size = rec_offs_nth_size(offsets, j);
const dict_field_t* field = dict_index_get_nth_field(index, j);
ulint max_chars = field->prefix_len / templ->mbmaxlen;
ut_a(field->prefix_len > 0);
if (rec_size < max_chars) {
/* Record in bytes shorter than the index
prefix length in char. */
if (!field->prefix_len) {
continue;
}
if (rec_size * templ->mbminlen >= field->prefix_len) {
const ulint rec_size = rec_offs_nth_size(offsets, j);
if (rec_size >= field->prefix_len) {
/* Shortest representation string by the
byte length of the record is longer than the
maximum possible index prefix. */
return false;
}
size_t num_chars = rec_field_len_in_chars(
field->col, j, rec, offsets);
if (num_chars >= max_chars) {
if (templ->mbminlen != templ->mbmaxlen
&& rec_field_len_in_chars(field->col, j, rec, offsets)
>= field->prefix_len / templ->mbmaxlen) {
/* No of chars to store the record exceeds
the index prefix character length. */
return false;

View file

@ -1045,10 +1045,6 @@ row_upd_build_difference_binary(
for purge/mvcc purpose) */
if (n_v_fld > 0) {
row_ext_t* ext;
mem_heap_t* v_heap = NULL;
byte* record;
VCOL_STORAGE* vcol_storage;
THD* thd;
if (trx == NULL) {
@ -1059,9 +1055,8 @@ row_upd_build_difference_binary(
ut_ad(!update->old_vrow);
innobase_allocate_row_for_vcol(thd, index, &v_heap,
&mysql_table,
&record, &vcol_storage);
ib_vcol_row vc(NULL);
uchar *record = vc.record(thd, index, &mysql_table);
for (ulint i = 0; i < n_v_fld; i++) {
const dict_v_col_t* col
@ -1079,10 +1074,9 @@ row_upd_build_difference_binary(
dfield_t* vfield = innobase_get_computed_value(
update->old_vrow, col, index,
&v_heap, heap, NULL, thd, mysql_table, record,
&vc.heap, heap, NULL, thd, mysql_table, record,
NULL, NULL, NULL);
if (vfield == NULL) {
if (v_heap) mem_heap_free(v_heap);
*error = DB_COMPUTE_VALUE_FAILED;
return(NULL);
}
@ -1103,12 +1097,6 @@ row_upd_build_difference_binary(
upd_field_set_v_field_no(uf, i, index);
}
}
if (v_heap) {
if (vcol_storage)
innobase_free_row_for_vcol(vcol_storage);
mem_heap_free(v_heap);
}
}
update->n_fields = n_diff;
@ -2099,23 +2087,19 @@ row_upd_eval_new_vals(
@param[in,out] node row update node
@param[in] update an update vector if it is update
@param[in] thd mysql thread handle
@param[in,out] mysql_table mysql table object */
@param[in,out] mysql_table mysql table object
@return true if success
false if virtual column value computation fails. */
static
void
bool
row_upd_store_v_row(
upd_node_t* node,
const upd_t* update,
THD* thd,
TABLE* mysql_table)
{
mem_heap_t* heap = NULL;
dict_index_t* index = dict_table_get_first_index(node->table);
byte* record= 0;
VCOL_STORAGE *vcol_storage= 0;
if (!update)
innobase_allocate_row_for_vcol(thd, index, &heap, &mysql_table,
&record, &vcol_storage);
ib_vcol_row vc(NULL);
for (ulint col_no = 0; col_no < dict_table_get_n_v_cols(node->table);
col_no++) {
@ -2165,33 +2149,37 @@ row_upd_store_v_row(
dfield_dup(dfield, node->heap);
}
} else {
uchar *record = vc.record(thd, index,
&mysql_table);
/* Need to compute, this happens when
deleting row */
innobase_get_computed_value(
node->row, col, index,
&heap, node->heap, NULL,
thd, mysql_table, record, NULL,
NULL, NULL);
dfield_t* vfield =
innobase_get_computed_value(
node->row, col, index,
&vc.heap, node->heap,
NULL, thd, mysql_table,
record, NULL, NULL,
NULL);
if (vfield == NULL) {
return false;
}
}
}
}
}
if (heap) {
if (vcol_storage)
innobase_free_row_for_vcol(vcol_storage);
mem_heap_free(heap);
}
return true;
}
/** Stores to the heap the row on which the node->pcur is positioned.
@param[in] node row update node
@param[in] thd mysql thread handle
@param[in,out] mysql_table NULL, or mysql table object when
user thread invokes dml */
user thread invokes dml
@return false if virtual column value computation fails
true otherwise. */
static
void
bool
row_upd_store_row(
upd_node_t* node,
THD* thd,
@ -2235,8 +2223,12 @@ row_upd_store_row(
NULL, NULL, NULL, ext, node->heap);
if (node->table->n_v_cols) {
row_upd_store_v_row(node, node->is_delete ? NULL : node->update,
bool ok = row_upd_store_v_row(node,
node->is_delete ? NULL : node->update,
thd, mysql_table);
if (!ok) {
return false;
}
}
if (node->is_delete == PLAIN_DELETE) {
@ -2251,6 +2243,7 @@ row_upd_store_row(
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
return true;
}
/***********************************************************//**
@ -2978,9 +2971,12 @@ row_upd_del_mark_clust_rec(
/* Store row because we have to build also the secondary index
entries */
row_upd_store_row(node, trx->mysql_thd,
if (!row_upd_store_row(node, trx->mysql_thd,
thr->prebuilt && thr->prebuilt->table == node->table
? thr->prebuilt->m_mysql_table : NULL);
? thr->prebuilt->m_mysql_table : NULL)) {
err = DB_COMPUTE_VALUE_FAILED;
return err;
}
/* Mark the clustered index record deleted; we do not have to check
locks, because we assume that we have an x-lock on the record */
@ -3198,8 +3194,11 @@ row_upd_clust_step(
goto exit_func;
}
row_upd_store_row(node, trx->mysql_thd,
thr->prebuilt ? thr->prebuilt->m_mysql_table : NULL);
if(!row_upd_store_row(node, trx->mysql_thd,
thr->prebuilt ? thr->prebuilt->m_mysql_table : NULL)) {
err = DB_COMPUTE_VALUE_FAILED;
goto exit_func;
}
if (row_upd_changes_ord_field_binary(index, node->update, thr,
node->row, node->ext)) {

View file

@ -442,9 +442,11 @@ row_vers_impl_x_locked(
@param[in] clust_index clustered index
@param[in] index the secondary index
@param[in] heap heap used to build virtual dtuple
@param[in,out] vcol_info virtual column information. */
@param[in,out] vcol_info virtual column information.
@return true in case of success
false if virtual column computation fails */
static
void
bool
row_vers_build_clust_v_col(
dtuple_t* row,
dict_index_t* clust_index,
@ -452,11 +454,8 @@ row_vers_build_clust_v_col(
mem_heap_t* heap,
purge_vcol_info_t* vcol_info)
{
mem_heap_t* local_heap = NULL;
VCOL_STORAGE *vcol_storage= NULL;
THD* thd= current_thd;
TABLE* maria_table= 0;
byte* record= 0;
ut_ad(dict_index_has_virtual(index));
ut_ad(index->table == clust_index->table);
@ -466,15 +465,13 @@ row_vers_build_clust_v_col(
maria_table = vcol_info->table();
}
innobase_allocate_row_for_vcol(thd, index,
&local_heap,
&maria_table,
&record,
&vcol_storage);
ib_vcol_row vc(NULL);
byte *record = vc.record(thd, index, &maria_table);
if (vcol_info && !vcol_info->table()) {
vcol_info->set_table(maria_table);
goto func_exit;
// wait for second fetch
return true;
}
for (ulint i = 0; i < dict_index_get_n_fields(index); i++) {
@ -487,19 +484,18 @@ row_vers_build_clust_v_col(
col = reinterpret_cast<const dict_v_col_t*>(
ind_field->col);
innobase_get_computed_value(
row, col, clust_index, &local_heap,
dfield_t *vfield = innobase_get_computed_value(
row, col, clust_index, &vc.heap,
heap, NULL, thd, maria_table, record, NULL,
NULL, NULL);
if (vfield == NULL) {
innobase_report_computed_value_failed(row);
ut_ad(0);
return false;
}
}
}
func_exit:
if (local_heap) {
if (vcol_storage)
innobase_free_row_for_vcol(vcol_storage);
mem_heap_free(local_heap);
}
return true;
}
/** Build latest virtual column data from undo log
@ -834,8 +830,11 @@ row_vers_build_cur_vrow(
mtr->commit();
}
row_vers_build_clust_v_col(
bool res = row_vers_build_clust_v_col(
row, clust_index, index, heap, vcol_info);
if (!res) {
return NULL;
}
if (vcol_info != NULL && vcol_info->is_first_fetch()) {
return NULL;
@ -956,10 +955,14 @@ row_vers_old_has_index_entry(
mtr->commit();
}
row_vers_build_clust_v_col(
bool res = row_vers_build_clust_v_col(
row, clust_index, index, heap,
vcol_info);
if (!res) {
goto unsafe_to_purge;
}
if (vcol_info && vcol_info->is_first_fetch()) {
goto unsafe_to_purge;
}

View file

@ -318,7 +318,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
{
options|= HA_OPTION_TMP_TABLE;
tmp_table= TRUE;
create_mode|= O_NOFOLLOW;
create_mode|= O_NOFOLLOW | (internal_table ? 0 : O_EXCL);
/* "CREATE TEMPORARY" tables are not crash-safe (dropped at restart) */
ci->transactional= FALSE;
flags&= ~HA_CREATE_PAGE_CHECKSUM;
@ -891,8 +891,8 @@ int maria_create(const char *name, enum data_file_type datafile_type,
{
char *iext= strrchr(name, '.');
int have_iext= iext && !strcmp(iext, MARIA_NAME_IEXT);
fn_format(kfilename, name, "", MARIA_NAME_IEXT,
MY_UNPACK_FILENAME | MY_RETURN_REAL_PATH |
fn_format(kfilename, name, "", MARIA_NAME_IEXT, MY_UNPACK_FILENAME |
(internal_table ? 0 : MY_RETURN_REAL_PATH) |
(have_iext ? MY_REPLACE_EXT : MY_APPEND_EXT));
/*
Replace the current file.

View file

@ -184,7 +184,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
if (flags & HA_CREATE_TMP_TABLE)
{
options|= HA_OPTION_TMP_TABLE;
create_mode|= O_NOFOLLOW;
create_mode|= O_NOFOLLOW | (internal_table ? 0 : O_EXCL);
}
if (flags & HA_CREATE_CHECKSUM || (options & HA_OPTION_CHECKSUM))
{
@ -619,8 +619,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
{
char *iext= strrchr(name, '.');
int have_iext= iext && !strcmp(iext, MI_NAME_IEXT);
fn_format(kfilename, name, "", MI_NAME_IEXT,
MY_UNPACK_FILENAME | MY_RETURN_REAL_PATH |
fn_format(kfilename, name, "", MI_NAME_IEXT, MY_UNPACK_FILENAME |
(internal_table ? 0 : MY_RETURN_REAL_PATH) |
(have_iext ? MY_REPLACE_EXT : MY_APPEND_EXT));
/* Replace the current file */
create_flag=(flags & HA_CREATE_KEEP_FILES) ? 0 : MY_DELETE_OLD;