Merge remote-tracking branch 'origin/bb-10.2-ext' into 10.3

This commit is contained in:
Marko Mäkelä 2017-08-09 12:59:03 +03:00
commit 620ba97cfc
310 changed files with 15963 additions and 3908 deletions
.travis.compiler.sh.travis.yml
debian
extra
mysql-test
sql
storage/innobase

View file

@ -8,8 +8,8 @@ if [[ "${TRAVIS_OS_NAME}" == 'linux' ]]; then
CMAKE_OPT="${CMAKE_OPT} -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache"
fi
case ${GCC_VERSION} in
5) CXX=clang++-3.9 ;;
6) CXX=clang++-4.0 ;;
5) CXX=clang++-4.0 ;;
6) CXX=clang++-5.0 ;;
esac
export CXX CC=${CXX/++/}
elif [[ "${CXX}" == 'g++' ]]; then

View file

@ -136,18 +136,17 @@ addons:
apt:
sources:
- ubuntu-toolchain-r-test
- llvm-toolchain-trusty
- llvm-toolchain-trusty-3.9
- llvm-toolchain-trusty-4.0
- sourceline: 'deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-5.0 main'
packages: # make sure these match the build requirements
- gcc-5
- g++-5
- gcc-6
- g++-6
- clang-3.9
- llvm-3.9-dev
- clang-4.0
- llvm-4.0-dev
- clang-5.0
- llvm-5.0-dev
- libasan0
- bison
- chrpath

View file

@ -10,6 +10,7 @@ usr/share/mysql/estonian
usr/share/mysql/french
usr/share/mysql/german
usr/share/mysql/greek
usr/share/mysql/hindi
usr/share/mysql/hungarian
usr/share/mysql/italian
usr/share/mysql/japanese

View file

@ -79,7 +79,6 @@ IF(WITH_INNOBASE_STORAGE_ENGINE)
../storage/innobase/ut/ut0ut.cc
../storage/innobase/buf/buf0buf.cc
../storage/innobase/page/page0zip.cc
../storage/innobase/os/os0file.cc
../storage/innobase/fil/fil0crypt.cc
)

File diff suppressed because it is too large Load diff

View file

@ -1,60 +0,0 @@
# include/wait_innodb_all_purged.inc
#
# SUMMARY
#
# Waits until purged all undo records of innodb, or operation times out.
#
# USAGE
#
# --source include/wait_innodb_all_purged.inc
#
--source include/have_innodb.inc
if (`select version() like '%debug%'`) {
--disable_query_log
let $wait_counter_init= 300;
if ($wait_timeout)
{
let $wait_counter_init= `SELECT $wait_timeout * 10`;
}
# Reset $wait_timeout so that its value won't be used on subsequent
# calls, and default will be used instead.
let $wait_timeout= 0;
let $wait_counter= $wait_counter_init;
# Keep track of how many times the wait condition is tested
let $wait_condition_reps= 0;
let $prev_trx_age= 0;
while ($wait_counter)
{
let $trx_age = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS
WHERE VARIABLE_NAME = 'INNODB_PURGE_TRX_ID_AGE';`;
if ($trx_age != $prev_trx_age)
{
let $wait_counter= $wait_counter_init;
let $prev_trx_age= $trx_age;
}
let $success= `SELECT $trx_age < 1`;
inc $wait_condition_reps;
if ($success)
{
let $wait_counter= 0;
}
if (!$success)
{
set global innodb_purge_run_now=ON;
real_sleep 0.1;
dec $wait_counter;
}
}
if (!$success)
{
echo Timeout in wait_innodb_all_purged.inc for INNODB_PURGE_TRX_ID_AGE = $trx_age;
}
--enable_query_log
}

View file

@ -230,7 +230,7 @@ insert into t2 (a) values (1023);
do (f2(23));
Warnings:
Error 1062 Duplicate entry '23' for key 'a'
Note 4070 At line 4 in test.f2
Note 4091 At line 4 in test.f2
select * from t2;
a
1023

View file

@ -160,7 +160,7 @@ Note 1050 Table 'v1' already exists
DROP VIEW IF EXISTS v1;
DROP VIEW IF EXISTS v1;
Warnings:
Note 4068 Unknown VIEW: 'test.v1'
Note 4089 Unknown VIEW: 'test.v1'
SHOW BINLOG EVENTS;
Log_name Pos Event_type Server_id End_log_pos Info
# # Format_desc 1 # VER

View file

@ -55,5 +55,5 @@ id
DROP VIEW IF EXISTS v1;
DROP VIEW IF EXISTS v1;
Warnings:
Note 4068 Unknown VIEW: 'test.v1'
Note 4089 Unknown VIEW: 'test.v1'
DROP TABLE t1;

View file

@ -209,10 +209,10 @@ Note 1051 Unknown table 'test.table1'
Note 1051 Unknown table 'test.table2'
DROP VIEW IF EXISTS view1,view2,view3,view4;
Warnings:
Note 4068 Unknown VIEW: 'test.view1'
Note 4068 Unknown VIEW: 'test.view2'
Note 4068 Unknown VIEW: 'test.view3'
Note 4068 Unknown VIEW: 'test.view4'
Note 4089 Unknown VIEW: 'test.view1'
Note 4089 Unknown VIEW: 'test.view2'
Note 4089 Unknown VIEW: 'test.view3'
Note 4089 Unknown VIEW: 'test.view4'
# Test error message when trigger does not find table
CREATE TABLE table1(a int);

View file

@ -356,6 +356,12 @@ json_keys('foo')
NULL
Warnings:
Warning 4038 Syntax error in JSON text in argument 1 to function 'json_keys' at position 1
select json_keys('{"a":{"c":1, "d":2}, "b":2, "c":1, "a":3, "b":1, "c":2}');
json_keys('{"a":{"c":1, "d":2}, "b":2, "c":1, "a":3, "b":1, "c":2}')
["a", "b", "c"]
select json_keys('{"c1": "value 1", "c1": "value 2"}');
json_keys('{"c1": "value 1", "c1": "value 2"}')
["c1"]
SET @j = '["abc", [{"k": "10"}, "def"], {"x":"abc"}, {"y":"bcd"}]';
select json_search(@j, 'one', 'abc');
json_search(@j, 'one', 'abc')
@ -642,6 +648,33 @@ SELECT JSON_KEYS(f) FROM t1 ORDER BY 1;
JSON_KEYS(f)
NULL
DROP TABLE t1;
SELECT JSON_EXTRACT( '{"foo":"bar"}', '$[*].*' );
JSON_EXTRACT( '{"foo":"bar"}', '$[*].*' )
NULL
SELECT JSON_EXTRACT( '{"foo":"bar"}', '$[*]' );
JSON_EXTRACT( '{"foo":"bar"}', '$[*]' )
NULL
select JSON_EXTRACT('{"name":"value"}', '$.name') = 'value';
JSON_EXTRACT('{"name":"value"}', '$.name') = 'value'
1
select JSON_EXTRACT('{\"asdf\":true}', "$.\"asdf\"") = true;
JSON_EXTRACT('{\"asdf\":true}', "$.\"asdf\"") = true
0
Warnings:
Warning 1292 Truncated incorrect DOUBLE value: 'true'
select JSON_EXTRACT('{\"asdf\":true}', "$.\"asdf\"") = false;
JSON_EXTRACT('{\"asdf\":true}', "$.\"asdf\"") = false
1
Warnings:
Warning 1292 Truncated incorrect DOUBLE value: 'true'
select JSON_EXTRACT('{\"asdf\":true}', "$.\"asdf\"") = 1;
JSON_EXTRACT('{\"asdf\":true}', "$.\"asdf\"") = 1
0
Warnings:
Warning 1292 Truncated incorrect DOUBLE value: 'true'
select JSON_EXTRACT('{\"input1\":\"\\u00f6\"}', '$.\"input1\"');
JSON_EXTRACT('{\"input1\":\"\\u00f6\"}', '$.\"input1\"')
"\u00f6"
#
# Start of 10.3 tests
#

View file

@ -590,7 +590,7 @@ DROP PROCEDURE p1;
SHOW WARNINGS;
Level Code Message
Error 54321 MESSAGE_TEXT text
Note 4070 At line 16 in test.p1
Note 4091 At line 16 in test.p1
CREATE PROCEDURE p1()
BEGIN
DECLARE var INT;

View file

@ -61,6 +61,34 @@ POINT(102 0.5)
SELECT st_astext(st_geomfromgeojson('{ "type": "FeatureCollection", "features": [{ "type": "Feature", "geometry": { "type": "Point", "coordinates": [102.0, 0.5] }, "properties": { "prop0": "value0" } }]}'));
st_astext(st_geomfromgeojson('{ "type": "FeatureCollection", "features": [{ "type": "Feature", "geometry": { "type": "Point", "coordinates": [102.0, 0.5] }, "properties": { "prop0": "value0" } }]}'))
GEOMETRYCOLLECTION(POINT(102 0.5))
SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',5));
ERROR HY000: Incorrect option value: '5' for function ST_GeometryFromJSON
SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',1));
ERROR 22023: Invalid GIS data provided to function ST_GeometryFromJSON.
SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',2));
ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',2))
POINT(5.3 15)
SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',3));
ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',3))
POINT(5.3 15)
SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',4));
ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',4))
POINT(5.3 15)
SELECT ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),2);
ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),2)
{"type": "Point", "coordinates": [5.36, 7.27]}
SELECT ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),1);
ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),1)
{"type": "Point", "coordinates": [5.4, 7.3]}
SELECT ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),10);
ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),10)
{"type": "Point", "coordinates": [5.363, 7.266]}
SELECT ST_AsGeoJSON(ST_GeomFromText("POINT(10 11)"), 100, 1);
ST_AsGeoJSON(ST_GeomFromText("POINT(10 11)"), 100, 1)
{"bbox": [10, 11, 10, 11], "type": "Point", "coordinates": [10, 11]}
SELECT ST_AsGeoJSON(ST_GeomFromText("POINT(10 11)"), 100, 5);
ST_AsGeoJSON(ST_GeomFromText("POINT(10 11)"), 100, 5)
{"bbox": [10, 11, 10, 11], "type": "Point", "coordinates": [10, 11]}
#
# End of 10.2 tests
#

View file

@ -1428,7 +1428,7 @@ Warnings:
Note 1305 FUNCTION test.test_function does not exist
drop view if exists v1;
Warnings:
Note 4068 Unknown VIEW: 'test.v1'
Note 4089 Unknown VIEW: 'test.v1'
create table test (col1 varchar(30));
create function test_function() returns varchar(30)
begin

View file

@ -415,7 +415,7 @@ select @@profiling;
drop table if exists t1, t2, t3;
drop view if exists v1;
Warnings:
Note 4068 Unknown VIEW: 'test.v1'
Note 4089 Unknown VIEW: 'test.v1'
drop function if exists f1;
set session profiling = OFF;
set global profiling_history_size= @start_value;

View file

@ -1715,7 +1715,7 @@ show warnings $$
Level Code Message
Warning 1012 Raising a warning
Error 5555 RESIGNAL to not found
Note 4070 At line 9 in test.test_resignal
Note 4091 At line 9 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@ -1740,7 +1740,7 @@ show warnings $$
Level Code Message
Warning 1012 Raising a warning
Error 5555 RESIGNAL to error
Note 4070 At line 9 in test.test_resignal
Note 4091 At line 9 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@ -1789,7 +1789,7 @@ show warnings $$
Level Code Message
Error 1012 Raising a not found
Error 5555 RESIGNAL to not found
Note 4070 At line 9 in test.test_resignal
Note 4091 At line 9 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@ -1814,7 +1814,7 @@ show warnings $$
Level Code Message
Error 1012 Raising a not found
Error 5555 RESIGNAL to error
Note 4070 At line 9 in test.test_resignal
Note 4091 At line 9 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@ -1863,7 +1863,7 @@ show warnings $$
Level Code Message
Error 1012 Raising an error
Error 5555 RESIGNAL to not found
Note 4070 At line 9 in test.test_resignal
Note 4091 At line 9 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@ -1888,7 +1888,7 @@ show warnings $$
Level Code Message
Error 1012 Raising an error
Error 5555 RESIGNAL to error
Note 4070 At line 9 in test.test_resignal
Note 4091 At line 9 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@ -1931,7 +1931,7 @@ show warnings $$
Level Code Message
Warning 1264 Out of range value for column 'a' at row 1
Error 5555 RESIGNAL to a not found
Note 4070 At line 8 in test.test_resignal
Note 4091 At line 8 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@ -1953,7 +1953,7 @@ show warnings $$
Level Code Message
Warning 1264 Out of range value for column 'a' at row 1
Error 5555 RESIGNAL to an error
Note 4070 At line 8 in test.test_resignal
Note 4091 At line 8 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@ -2004,7 +2004,7 @@ show warnings $$
Level Code Message
Error 1329 No data - zero rows fetched, selected, or processed
Error 5555 RESIGNAL to a not found
Note 4070 At line 10 in test.test_resignal
Note 4091 At line 10 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@ -2030,7 +2030,7 @@ show warnings $$
Level Code Message
Error 1329 No data - zero rows fetched, selected, or processed
Error 5555 RESIGNAL to an error
Note 4070 At line 10 in test.test_resignal
Note 4091 At line 10 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@ -2073,7 +2073,7 @@ show warnings $$
Level Code Message
Error 1051 Unknown table 'test.no_such_table'
Error 5555 RESIGNAL to a not found
Note 4070 At line 8 in test.test_resignal
Note 4091 At line 8 in test.test_resignal
drop procedure test_resignal $$
create procedure test_resignal()
begin
@ -2095,7 +2095,7 @@ show warnings $$
Level Code Message
Error 1051 Unknown table 'test.no_such_table'
Error 5555 RESIGNAL to an error
Note 4070 At line 8 in test.test_resignal
Note 4091 At line 8 in test.test_resignal
drop procedure test_resignal $$
#
# More complex cases
@ -2142,7 +2142,7 @@ ERROR 42000: Hi, I am a useless error message
show warnings $$
Level Code Message
Error 9999 Hi, I am a useless error message
Note 4070 At line 7 in test.peter_p2
Note 4091 At line 7 in test.peter_p2
drop procedure peter_p1 $$
drop procedure peter_p2 $$
CREATE PROCEDURE peter_p1 ()
@ -2198,16 +2198,16 @@ Level Code Message
Error 1231 Variable 'sql_mode' can't be set to the value of 'NULL'
Error 1232 Variable 'sql_mode' can't be set to the value of 'NULL'
Error 9999 Variable 'sql_mode' can't be set to the value of 'NULL'
Note 4070 At line 8 in test.peter_p1
Note 4091 At line 8 in test.peter_p1
ERROR 42000: Hi, I am a useless error message
show warnings $$
Level Code Message
Error 1231 Variable 'sql_mode' can't be set to the value of 'NULL'
Error 1232 Variable 'sql_mode' can't be set to the value of 'NULL'
Error 9999 Variable 'sql_mode' can't be set to the value of 'NULL'
Note 4070 At line 8 in test.peter_p1
Note 4091 At line 8 in test.peter_p1
Error 9999 Hi, I am a useless error message
Note 4070 At line 10 in test.peter_p2
Note 4091 At line 10 in test.peter_p2
drop procedure peter_p1 $$
drop procedure peter_p2 $$
drop procedure if exists peter_p3 $$
@ -2225,7 +2225,7 @@ show warnings $$
Level Code Message
Error 1 Original
Error 2 Original
Note 4070 At line 4 in test.peter_p3
Note 4091 At line 4 in test.peter_p3
drop procedure peter_p3 $$
drop table t_warn;
drop table t_cursor;

View file

@ -79,23 +79,23 @@ show warnings;
Level Code Message
Error 1051 Unknown table 'demo.oops_it_is_not_here'
Error 1644 Oops in proc_9
Note 4070 At line 4 in demo.proc_9
Note 4091 At line 4 in demo.proc_9
Error 1644 Oops in proc_8
Note 4070 At line 4 in demo.proc_8
Note 4091 At line 4 in demo.proc_8
Error 1644 Oops in proc_7
Note 4070 At line 4 in demo.proc_7
Note 4091 At line 4 in demo.proc_7
Error 1644 Oops in proc_6
Note 4070 At line 4 in demo.proc_6
Note 4091 At line 4 in demo.proc_6
Error 1644 Oops in proc_5
Note 4070 At line 4 in demo.proc_5
Note 4091 At line 4 in demo.proc_5
Error 1644 Oops in proc_4
Note 4070 At line 4 in demo.proc_4
Note 4091 At line 4 in demo.proc_4
Error 1644 Oops in proc_3
Note 4070 At line 4 in demo.proc_3
Note 4091 At line 4 in demo.proc_3
Error 1644 Oops in proc_2
Note 4070 At line 4 in demo.proc_2
Note 4091 At line 4 in demo.proc_2
Error 1644 Oops in proc_1
Note 4070 At line 4 in demo.proc_1
Note 4091 At line 4 in demo.proc_1
SET @@session.max_error_count = 5;
SELECT @@session.max_error_count;
@@session.max_error_count
@ -104,11 +104,11 @@ call proc_1();
ERROR 45000: Oops in proc_1
show warnings;
Level Code Message
Note 4070 At line 4 in demo.proc_3
Note 4091 At line 4 in demo.proc_3
Error 1644 Oops in proc_2
Note 4070 At line 4 in demo.proc_2
Note 4091 At line 4 in demo.proc_2
Error 1644 Oops in proc_1
Note 4070 At line 4 in demo.proc_1
Note 4091 At line 4 in demo.proc_1
SET @@session.max_error_count = 7;
SELECT @@session.max_error_count;
@@session.max_error_count
@ -117,13 +117,13 @@ call proc_1();
ERROR 45000: Oops in proc_1
show warnings;
Level Code Message
Note 4070 At line 4 in demo.proc_4
Note 4091 At line 4 in demo.proc_4
Error 1644 Oops in proc_3
Note 4070 At line 4 in demo.proc_3
Note 4091 At line 4 in demo.proc_3
Error 1644 Oops in proc_2
Note 4070 At line 4 in demo.proc_2
Note 4091 At line 4 in demo.proc_2
Error 1644 Oops in proc_1
Note 4070 At line 4 in demo.proc_1
Note 4091 At line 4 in demo.proc_1
SET @@session.max_error_count = 9;
SELECT @@session.max_error_count;
@@session.max_error_count
@ -132,15 +132,15 @@ call proc_1();
ERROR 45000: Oops in proc_1
show warnings;
Level Code Message
Note 4070 At line 4 in demo.proc_5
Note 4091 At line 4 in demo.proc_5
Error 1644 Oops in proc_4
Note 4070 At line 4 in demo.proc_4
Note 4091 At line 4 in demo.proc_4
Error 1644 Oops in proc_3
Note 4070 At line 4 in demo.proc_3
Note 4091 At line 4 in demo.proc_3
Error 1644 Oops in proc_2
Note 4070 At line 4 in demo.proc_2
Note 4091 At line 4 in demo.proc_2
Error 1644 Oops in proc_1
Note 4070 At line 4 in demo.proc_1
Note 4091 At line 4 in demo.proc_1
drop database demo;
SET @@global.max_error_count = @start_global_value;
SELECT @@global.max_error_count;

View file

@ -1990,8 +1990,8 @@ Warning 1264 Out of range value for column 'a' at row 1
Note 1292 Truncated incorrect INTEGER value: '222222 '
Warning 1264 Out of range value for column 'b' at row 1
Error 1048 Column 'c' cannot be null
Note 4070 At line 6 in test.t1_bi
Note 4070 At line 2 in test.p1
Note 4091 At line 6 in test.t1_bi
Note 4091 At line 2 in test.p1
DROP TABLE t1;
DROP TABLE t2;

View file

@ -3,7 +3,7 @@ Warnings:
Note 1051 Unknown table 'test.t1'
drop view if exists view_t1;
Warnings:
Note 4068 Unknown VIEW: 'test.view_t1'
Note 4089 Unknown VIEW: 'test.view_t1'
SET sql_mode=ONLY_FULL_GROUP_BY;
CREATE TABLE t1 (
pk INT,

View file

@ -3211,7 +3211,7 @@ drop procedure bug10961|
DROP PROCEDURE IF EXISTS bug6866|
DROP VIEW IF EXISTS tv|
Warnings:
Note 4068 Unknown VIEW: 'test.tv'
Note 4089 Unknown VIEW: 'test.tv'
DROP TABLE IF EXISTS tt1,tt2,tt3|
Warnings:
Note 1051 Unknown table 'test.tt1'
@ -7823,7 +7823,7 @@ ERROR 23000: Duplicate entry '2' for key 'PRIMARY'
show warnings;
Level Code Message
Error 1062 Duplicate entry '2' for key 'PRIMARY'
Note 4070 At line 5 in test.p1
Note 4091 At line 5 in test.p1
select * from t1;
id
1

View file

@ -5236,7 +5236,7 @@ CREATE TABLE t4 (i4 INT);
INSERT INTO t4 VALUES (1),(2);
DROP VIEW IF EXISTS v1;
Warnings:
Note 4068 Unknown VIEW: 'test.v1'
Note 4089 Unknown VIEW: 'test.v1'
CREATE VIEW v1 AS select coalesce(j1,i3) AS v1_field1 from t2 join t3 left join t1 on ( i1 = i2 );
CREATE VIEW v2 AS select v1_field1 from t4 join v1;
prepare my_stmt from "select v1_field1 from v2";

View file

@ -353,7 +353,7 @@ ERROR 23000: Duplicate entry '11' for key 'a'
SHOW WARNINGS;
Level Code Message
Note 4070 At line 4 in test.f1
Note 4091 At line 4 in test.f1
Error 1062 Duplicate entry '11' for key 'a'
DROP TABLE t1;

View file

@ -3174,6 +3174,18 @@ Nth_value(i,1) OVER()
1
DROP TABLE t1;
#
# A regression after MDEV-13351:
# MDEV-13374 : Server crashes in first_linear_tab / st_select_lex::set_explain_type
# upon UNION with aggregate function
#
CREATE TABLE t1 (i INT) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1),(2);
SELECT i AS fld FROM t1 UNION SELECT COUNT(*) AS fld FROM t1;
fld
1
2
DROP TABLE t1;
#
# Start of 10.3 tests
#
#

View file

@ -176,4 +176,14 @@ ERROR 23000: Duplicate entry '4' for key 'PRIMARY'
# There must be no UPDATE query event;
include/show_binlog_events.inc
drop table t1, t2;
*** MDEV-11937: InnoDB flushes redo log too often ***
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
SET @old_flush = @@GLOBAL.innodb_flush_log_at_trx_commit;
SET GLOBAL innodb_flush_log_at_trx_commit=1;
SELECT IF(@num_sync < 100*1.5, "OK",
CONCAT("ERROR: More than 1 fsync per commit (saw ", @num_sync/100, ")")) AS status;
status
OK
DROP TABLE t1;
SET GLOBAL innodb_flush_log_at_trx_commit=@old_flush;
End of tests

View file

@ -172,4 +172,33 @@ source include/show_binlog_events.inc;
# cleanup bug#27716
drop table t1, t2;
--echo *** MDEV-11937: InnoDB flushes redo log too often ***
# Count number of log fsyncs reported by InnoDB per commit.
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
SET @old_flush = @@GLOBAL.innodb_flush_log_at_trx_commit;
SET GLOBAL innodb_flush_log_at_trx_commit=1;
--let $syncs1 = query_get_value(SHOW STATUS LIKE 'Innodb_os_log_fsyncs', Value, 1)
--let $ROWS = 100
--disable_query_log
let $count = $ROWS;
while ($count) {
eval INSERT INTO t1 VALUES ($count);
dec $count;
}
--let $syncs2 = query_get_value(SHOW STATUS LIKE 'Innodb_os_log_fsyncs', Value, 1)
eval SET @num_sync = $syncs2 - $syncs1;
--enable_query_log
# Allow a bit of slack, in case some background process or something
# is introducing a few more syncs.
eval SELECT IF(@num_sync < $ROWS*1.5, "OK",
CONCAT("ERROR: More than 1 fsync per commit (saw ", @num_sync/$ROWS, ")")) AS status;
DROP TABLE t1;
SET GLOBAL innodb_flush_log_at_trx_commit=@old_flush;
--echo End of tests

View file

@ -1,3 +1,4 @@
SET GLOBAL innodb_file_per_table = ON;
set global innodb_compression_algorithm = 1;
# Create and populate a tables
CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
@ -5,17 +6,31 @@ CREATE TABLE t2 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB ROW_FOR
CREATE TABLE t3 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB ROW_FORMAT=COMPRESSED ENCRYPTED=NO;
CREATE TABLE t4 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB PAGE_COMPRESSED=1;
CREATE TABLE t5 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB PAGE_COMPRESSED=1 ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
# Write file to make mysql-test-run.pl expect the "crash", but don't
# start it until it's told to
# We give 30 seconds to do a clean shutdown because we do not want
# to redo apply the pages of t1.ibd at the time of recovery.
# We want SQL to initiate the first access to t1.ibd.
# Wait until disconnected.
CREATE TABLE t6 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB;
# Run innochecksum on t1
# Run innochecksum on t2
# Run innochecksum on t3
# Run innochecksum on t4
# Run innochecksum on t4
# Write file to make mysql-test-run.pl start up the server again
# Cleanup
DROP TABLE t1, t2, t3, t4, t5;
# Run innochecksum on t5
# Run innochecksum on t6
# Backup tables before corrupting
# Corrupt FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
# Run innochecksum on t2
# Run innochecksum on t3
# no encryption corrupting the field should not have effect
# Run innochecksum on t6
# no encryption corrupting the field should not have effect
# Restore the original tables
# Corrupt FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION+4 (post encryption checksum)
# Run innochecksum on t2
# Run innochecksum on t3
# Run innochecksum on t6
# no encryption corrupting the field should not have effect
# Restore the original tables
# Corrupt FIL_DATA+10 (data)
# Run innochecksum on t2
# Run innochecksum on t3
# Run innochecksum on t6
# Restore the original tables
DROP TABLE t1, t2, t3, t4, t5, t6;

View file

@ -25,6 +25,7 @@ CREATE TEMPORARY TABLE t LIKE t0;
INSERT INTO t VALUES
(NULL,1,1,'private','secret'),(NULL,2,2,'sacred','success'),
(NULL,3,3,'story','secure'),(NULL,4,4,'security','sacrament');
SET GLOBAL innodb_change_buffering=none;
SET GLOBAL innodb_flush_log_at_trx_commit=1;
INSERT INTO t0
SELECT NULL, t1.col_int, t1.col_int_key, t1.col_char, t1.col_char_key

View file

@ -2,19 +2,20 @@
# MDEV-8773: InnoDB innochecksum does not work with encrypted or page compressed tables
#
# Don't test under embedded
--source include/innodb_page_size_small.inc
# Don't test under embedded as we restart server
-- source include/not_embedded.inc
# Require InnoDB
-- source include/have_innodb.inc
-- source include/have_file_key_management_plugin.inc
-- source include/innodb_page_size_small.inc
if (!$INNOCHECKSUM) {
--echo Need innochecksum binary
--die Need innochecksum binary
}
let $innodb_compression_algorithm_orig=`SELECT @@innodb_compression_algorithm`;
SET GLOBAL innodb_file_per_table = ON;
# zlib
set global innodb_compression_algorithm = 1;
@ -24,9 +25,11 @@ CREATE TABLE t2 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB ROW_FOR
CREATE TABLE t3 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB ROW_FORMAT=COMPRESSED ENCRYPTED=NO;
CREATE TABLE t4 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB PAGE_COMPRESSED=1;
CREATE TABLE t5 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB PAGE_COMPRESSED=1 ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
CREATE TABLE t6 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB;
--disable_query_log
--let $i = 1000
begin;
while ($i)
{
INSERT INTO t1 (b) VALUES (REPEAT('abcdefghijklmnopqrstuvwxyz', 100));
@ -36,6 +39,8 @@ INSERT INTO t2 SELECT * FROM t1;
INSERT INTO t3 SELECT * FROM t1;
INSERT INTO t4 SELECT * FROM t1;
INSERT INTO t5 SELECT * FROM t1;
INSERT INTO t6 SELECT * FROM t1;
commit;
--enable_query_log
let $MYSQLD_DATADIR=`select @@datadir`;
@ -44,18 +49,12 @@ let t2_IBD = $MYSQLD_DATADIR/test/t2.ibd;
let t3_IBD = $MYSQLD_DATADIR/test/t3.ibd;
let t4_IBD = $MYSQLD_DATADIR/test/t4.ibd;
let t5_IBD = $MYSQLD_DATADIR/test/t5.ibd;
let t6_IBD = $MYSQLD_DATADIR/test/t6.ibd;
--echo # Write file to make mysql-test-run.pl expect the "crash", but don't
--echo # start it until it's told to
--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
let INNODB_PAGE_SIZE=`select @@innodb_page_size`;
let MYSQLD_DATADIR=`select @@datadir`;
--echo # We give 30 seconds to do a clean shutdown because we do not want
--echo # to redo apply the pages of t1.ibd at the time of recovery.
--echo # We want SQL to initiate the first access to t1.ibd.
shutdown_server 30;
--echo # Wait until disconnected.
--source include/wait_until_disconnected.inc
--source include/shutdown_mysqld.inc
--echo # Run innochecksum on t1
-- disable_result_log
@ -77,17 +76,193 @@ shutdown_server 30;
--exec $INNOCHECKSUM $t4_IBD
--echo # Run innochecksum on t5
--exec $INNOCHECKSUM $t5_IBD
--echo # Run innochecksum on t6
--exec $INNOCHECKSUM $t6_IBD
--enable_result_log
--echo # Write file to make mysql-test-run.pl start up the server again
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
--enable_reconnect
--source include/wait_until_connected_again.inc
--echo # Backup tables before corrupting
--copy_file $MYSQLD_DATADIR/test/t1.ibd $MYSQLD_DATADIR/test/t1.ibd.backup
--copy_file $MYSQLD_DATADIR/test/t2.ibd $MYSQLD_DATADIR/test/t2.ibd.backup
--copy_file $MYSQLD_DATADIR/test/t3.ibd $MYSQLD_DATADIR/test/t3.ibd.backup
--copy_file $MYSQLD_DATADIR/test/t4.ibd $MYSQLD_DATADIR/test/t4.ibd.backup
--copy_file $MYSQLD_DATADIR/test/t5.ibd $MYSQLD_DATADIR/test/t5.ibd.backup
--copy_file $MYSQLD_DATADIR/test/t6.ibd $MYSQLD_DATADIR/test/t6.ibd.backup
--echo # Cleanup
DROP TABLE t1, t2, t3, t4, t5;
#
# MDEV-11939: innochecksum mistakes a file for an encrypted one
#
# reset system
--disable_query_log
EVAL SET GLOBAL innodb_compression_algorithm = $innodb_compression_algorithm_orig;
--enable_query_log
--echo # Corrupt FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
perl;
open(FILE, "+<", "$ENV{MYSQLD_DATADIR}/test/t1.ibd") or die "open";
binmode FILE;
seek(FILE, $ENV{'INNODB_PAGE_SIZE'} * 3 + 26, SEEK_SET) or die "seek";
print FILE pack("H*", "c00lcafedeadb017");
close FILE or die "close";
open(FILE, "+<", "$ENV{MYSQLD_DATADIR}/test/t2.ibd") or die "open";
binmode FILE;
seek(FILE, $ENV{'INNODB_PAGE_SIZE'} * 3 + 26, SEEK_SET) or die "seek";
print FILE pack("H*", "c00lcafedeadb017");
close FILE or die "close";
open(FILE, "+<", "$ENV{MYSQLD_DATADIR}/test/t3.ibd") or die "open";
binmode FILE;
seek(FILE, $ENV{'INNODB_PAGE_SIZE'} * 3 + 26, SEEK_SET) or die "seek";
print FILE pack("H*", "c00lcafedeadb017");
close FILE or die "close";
open(FILE, "+<", "$ENV{MYSQLD_DATADIR}/test/t6.ibd") or die "open";
binmode FILE;
seek(FILE, $ENV{'INNODB_PAGE_SIZE'} * 3 + 26, SEEK_SET) or die "seek";
print FILE pack("H*", "c00lcafedeadb017");
close FILE or die "close";
EOF
-- disable_result_log
--error 1
--exec $INNOCHECKSUM $t1_IBD
--echo # Run innochecksum on t2
--error 1
--exec $INNOCHECKSUM $t2_IBD
--echo # Run innochecksum on t3
--echo # no encryption corrupting the field should not have effect
--exec $INNOCHECKSUM $t3_IBD
--echo # Run innochecksum on t6
--echo # no encryption corrupting the field should not have effect
--exec $INNOCHECKSUM $t6_IBD
--enable_result_log
--echo # Restore the original tables
--remove_file $MYSQLD_DATADIR/test/t1.ibd
--remove_file $MYSQLD_DATADIR/test/t2.ibd
--remove_file $MYSQLD_DATADIR/test/t3.ibd
--remove_file $MYSQLD_DATADIR/test/t4.ibd
--remove_file $MYSQLD_DATADIR/test/t5.ibd
--remove_file $MYSQLD_DATADIR/test/t6.ibd
--copy_file $MYSQLD_DATADIR/test/t1.ibd.backup $MYSQLD_DATADIR/test/t1.ibd
--copy_file $MYSQLD_DATADIR/test/t2.ibd.backup $MYSQLD_DATADIR/test/t2.ibd
--copy_file $MYSQLD_DATADIR/test/t3.ibd.backup $MYSQLD_DATADIR/test/t3.ibd
--copy_file $MYSQLD_DATADIR/test/t4.ibd.backup $MYSQLD_DATADIR/test/t4.ibd
--copy_file $MYSQLD_DATADIR/test/t5.ibd.backup $MYSQLD_DATADIR/test/t5.ibd
--copy_file $MYSQLD_DATADIR/test/t6.ibd.backup $MYSQLD_DATADIR/test/t6.ibd
--echo # Corrupt FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION+4 (post encryption checksum)
perl;
open(FILE, "+<", "$ENV{MYSQLD_DATADIR}/test/t1.ibd") or die "open";
binmode FILE;
seek(FILE, $ENV{'INNODB_PAGE_SIZE'} * 3 + 30, SEEK_SET) or die "seek";
print FILE pack("H*", "c00lcafedeadb017");
close FILE or die "close";
open(FILE, "+<", "$ENV{MYSQLD_DATADIR}/test/t2.ibd") or die "open";
binmode FILE;
seek(FILE, $ENV{'INNODB_PAGE_SIZE'} * 3 + 30, SEEK_SET) or die "seek";
print FILE pack("H*", "c00lcafedeadb017");
close FILE or die "close";
open(FILE, "+<", "$ENV{MYSQLD_DATADIR}/test/t3.ibd") or die "open";
binmode FILE;
seek(FILE, $ENV{'INNODB_PAGE_SIZE'} * 3 + 30, SEEK_SET) or die "seek";
print FILE pack("H*", "c00lcafedeadb017");
close FILE or die "close";
open(FILE, "+<", "$ENV{MYSQLD_DATADIR}/test/t6.ibd") or die "open";
binmode FILE;
seek(FILE, $ENV{'INNODB_PAGE_SIZE'} * 3 + 30, SEEK_SET) or die "seek";
print FILE pack("H*", "c00lcafedeadb017");
close FILE or die "close";
EOF
-- disable_result_log
--error 1
--exec $INNOCHECKSUM $t1_IBD
--echo # Run innochecksum on t2
--error 1
--exec $INNOCHECKSUM $t2_IBD
--echo # Run innochecksum on t3
--error 1
--exec $INNOCHECKSUM $t3_IBD
--echo # Run innochecksum on t6
--echo # no encryption corrupting the field should not have effect
--exec $INNOCHECKSUM $t6_IBD
--enable_result_log
--echo # Restore the original tables
--remove_file $MYSQLD_DATADIR/test/t1.ibd
--remove_file $MYSQLD_DATADIR/test/t2.ibd
--remove_file $MYSQLD_DATADIR/test/t3.ibd
--remove_file $MYSQLD_DATADIR/test/t4.ibd
--remove_file $MYSQLD_DATADIR/test/t5.ibd
--remove_file $MYSQLD_DATADIR/test/t6.ibd
--copy_file $MYSQLD_DATADIR/test/t1.ibd.backup $MYSQLD_DATADIR/test/t1.ibd
--copy_file $MYSQLD_DATADIR/test/t2.ibd.backup $MYSQLD_DATADIR/test/t2.ibd
--copy_file $MYSQLD_DATADIR/test/t3.ibd.backup $MYSQLD_DATADIR/test/t3.ibd
--copy_file $MYSQLD_DATADIR/test/t4.ibd.backup $MYSQLD_DATADIR/test/t4.ibd
--copy_file $MYSQLD_DATADIR/test/t5.ibd.backup $MYSQLD_DATADIR/test/t5.ibd
--copy_file $MYSQLD_DATADIR/test/t6.ibd.backup $MYSQLD_DATADIR/test/t6.ibd
--echo # Corrupt FIL_DATA+10 (data)
perl;
open(FILE, "+<", "$ENV{MYSQLD_DATADIR}/test/t1.ibd") or die "open";
binmode FILE;
seek(FILE, $ENV{'INNODB_PAGE_SIZE'} * 3 + 48, SEEK_SET) or die "seek";
print FILE pack("H*", "c00lcafedeadb017");
close FILE or die "close";
open(FILE, "+<", "$ENV{MYSQLD_DATADIR}/test/t2.ibd") or die "open";
binmode FILE;
seek(FILE, $ENV{'INNODB_PAGE_SIZE'} * 3 + 48, SEEK_SET) or die "seek";
print FILE pack("H*", "c00lcafedeadb017");
close FILE or die "close";
open(FILE, "+<", "$ENV{MYSQLD_DATADIR}/test/t3.ibd") or die "open";
binmode FILE;
seek(FILE, $ENV{'INNODB_PAGE_SIZE'} * 3 + 48, SEEK_SET) or die "seek";
print FILE pack("H*", "c00lcafedeadb017");
close FILE or die "close";
open(FILE, "+<", "$ENV{MYSQLD_DATADIR}/test/t6.ibd") or die "open";
binmode FILE;
seek(FILE, $ENV{'INNODB_PAGE_SIZE'} * 3 + 48, SEEK_SET) or die "seek";
print FILE pack("H*", "c00lcafedeadb017");
close FILE or die "close";
EOF
-- disable_result_log
--error 1
--exec $INNOCHECKSUM $t1_IBD
--echo # Run innochecksum on t2
--error 1
--exec $INNOCHECKSUM $t2_IBD
--echo # Run innochecksum on t3
--error 1
--exec $INNOCHECKSUM $t3_IBD
--echo # Run innochecksum on t6
--error 1
--exec $INNOCHECKSUM $t6_IBD
--enable_result_log
--echo # Restore the original tables
--move_file $MYSQLD_DATADIR/test/t1.ibd.backup $MYSQLD_DATADIR/test/t1.ibd
--move_file $MYSQLD_DATADIR/test/t2.ibd.backup $MYSQLD_DATADIR/test/t2.ibd
--move_file $MYSQLD_DATADIR/test/t3.ibd.backup $MYSQLD_DATADIR/test/t3.ibd
--move_file $MYSQLD_DATADIR/test/t4.ibd.backup $MYSQLD_DATADIR/test/t4.ibd
--move_file $MYSQLD_DATADIR/test/t5.ibd.backup $MYSQLD_DATADIR/test/t5.ibd
--move_file $MYSQLD_DATADIR/test/t6.ibd.backup $MYSQLD_DATADIR/test/t6.ibd
--source include/start_mysqld.inc
DROP TABLE t1, t2, t3, t4, t5, t6;

View file

@ -32,6 +32,11 @@ INSERT INTO t VALUES
(NULL,1,1,'private','secret'),(NULL,2,2,'sacred','success'),
(NULL,3,3,'story','secure'),(NULL,4,4,'security','sacrament');
# Prevent change buffering of key(col_char_key), so that
# after the restart, the data ('secret','success','secure','sacrament')
# cannot be emitted to the unencrypted redo log by change buffer merge.
SET GLOBAL innodb_change_buffering=none;
# Force a redo log flush at the next commit.
SET GLOBAL innodb_flush_log_at_trx_commit=1;
INSERT INTO t0

View file

@ -4314,7 +4314,7 @@ CREATE VIEW v2 AS Select * from test.v1;
ERROR 42S02: Table 'test.v1' doesn't exist
DROP VIEW IF EXISTS v2;
Warnings:
Note 4068 Unknown VIEW: 'test.v2'
Note 4089 Unknown VIEW: 'test.v2'
Testcase 3.3.1.25
--------------------------------------------------------------------------------
@ -7566,7 +7566,7 @@ Call sp1() ;
ERROR 42000: PROCEDURE test.sp1 does not exist
Drop view if exists test.v1 ;
Warnings:
Note 4068 Unknown VIEW: 'test.v1'
Note 4089 Unknown VIEW: 'test.v1'
Drop procedure sp1 ;
ERROR 42000: PROCEDURE test.sp1 does not exist
@ -21312,7 +21312,7 @@ CREATE VIEW v1 AS SELECT f1 FROM t1;
DROP VIEW IF EXISTS v1;
DROP VIEW IF EXISTS v1;
Warnings:
Note 4068 Unknown VIEW: 'test.v1'
Note 4089 Unknown VIEW: 'test.v1'
Testcase 3.3.1.68
--------------------------------------------------------------------------------

View file

@ -4315,7 +4315,7 @@ CREATE VIEW v2 AS Select * from test.v1;
ERROR 42S02: Table 'test.v1' doesn't exist
DROP VIEW IF EXISTS v2;
Warnings:
Note 4068 Unknown VIEW: 'test.v2'
Note 4089 Unknown VIEW: 'test.v2'
Testcase 3.3.1.25
--------------------------------------------------------------------------------
@ -7567,7 +7567,7 @@ Call sp1() ;
ERROR 42000: PROCEDURE test.sp1 does not exist
Drop view if exists test.v1 ;
Warnings:
Note 4068 Unknown VIEW: 'test.v1'
Note 4089 Unknown VIEW: 'test.v1'
Drop procedure sp1 ;
ERROR 42000: PROCEDURE test.sp1 does not exist
@ -21314,7 +21314,7 @@ CREATE VIEW v1 AS SELECT f1 FROM t1;
DROP VIEW IF EXISTS v1;
DROP VIEW IF EXISTS v1;
Warnings:
Note 4068 Unknown VIEW: 'test.v1'
Note 4089 Unknown VIEW: 'test.v1'
Testcase 3.3.1.68
--------------------------------------------------------------------------------

View file

@ -1,5 +1,7 @@
set default_storage_engine=innodb;
set @old_dbug=@@global.debug_dbug;
SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency = 1;
CREATE TABLE `t` (
`a` BLOB,
`b` BLOB,
@ -12,9 +14,7 @@ INSERT INTO t VALUES (REPEAT('a', 16000), REPEAT('b', 16000), DEFAULT, "mm", 2);
CREATE INDEX idx ON t(c(100));
SET global debug_dbug="+d,ib_purge_virtual_index_callback";
UPDATE t SET a = REPEAT('m', 16000) WHERE a like "aaa%";
select sleep(3);
sleep(3)
0
InnoDB 0 transactions not purged
SET global debug_dbug=@old_dbug;
DROP TABLE t;
CREATE TABLE t (
@ -29,9 +29,7 @@ INSERT INTO t VALUES (REPEAT('a', 100), REPEAT('b', 100), DEFAULT, "mm", 2);
CREATE INDEX idx ON t(c(100));
SET global debug_dbug="+d,ib_purge_virtual_index_callback";
UPDATE t SET a = REPEAT('m', 100) WHERE a like "aaa%";
select sleep(3);
sleep(3)
0
InnoDB 0 transactions not purged
SET global debug_dbug=@old_dbug;
DROP TABLE t;
CREATE TABLE t1 (
@ -52,9 +50,7 @@ insert into t1 values(4, 18, default);
CREATE INDEX idx ON t1(x);
SET global debug_dbug="+d,ib_purge_virtual_index_callback";
UPDATE t1 SET id = 10 WHERE id = 1;
select sleep(3);
sleep(3)
0
InnoDB 0 transactions not purged
SET global debug_dbug=@old_dbug;
DROP TABLE t1;
connect con1,localhost,root,,;
@ -80,7 +76,7 @@ SET DEBUG_SYNC= 'now WAIT_FOR uncommitted';
# enable purge
COMMIT;
# wait for purge to process the deleted records.
Timeout in wait_innodb_all_purged.inc for INNODB_PURGE_TRX_ID_AGE = 4
InnoDB 0 transactions not purged
SET DEBUG_SYNC= 'now SIGNAL purged';
connection default;
/* connection default */ ALTER TABLE t1 ADD COLUMN c INT GENERATED ALWAYS AS(a+b), ADD INDEX idx (c), ALGORITHM=INPLACE, LOCK=SHARED;
@ -119,6 +115,7 @@ INSERT INTO t1(a, b) VALUES (8, 8);
# enable purge
COMMIT;
# wait for purge to process the deleted/updated records.
InnoDB 1 transactions not purged
SET DEBUG_SYNC= 'now SIGNAL purged';
disconnect con1;
connection default;
@ -141,20 +138,26 @@ DROP TABLE t0, t1;
create table t (a blob, b blob, c blob as (concat(a,b)), h varchar(10), index (c(100)));
insert t(a,b,h) values (repeat('g', 16000), repeat('x', 16000), "kk");
insert t(a,b,h) values (repeat('a', 16000), repeat('b', 16000), "mm");
set global innodb_purge_stop_now = 1;
set global debug_dbug="+d,ib_purge_virtual_index_callback";
connect prevent_purge, localhost, root;
start transaction with consistent snapshot;
connection default;
update t set a = repeat('m', 16000) where a like "aaa%";
connect con1, localhost, root;
lock table t write;
disconnect prevent_purge;
connection default;
set global innodb_purge_run_now=1;
select variable_value>1 from information_schema.global_status where variable_name='innodb_purge_trx_id_age';
variable_value>1
1
disconnect con1;
start transaction with consistent snapshot;
commit;
InnoDB 0 transactions not purged
select variable_value>1 from information_schema.global_status where variable_name='innodb_purge_trx_id_age';
variable_value>1
0
set global debug_dbug=@old_dbug;
drop table t;
set debug_sync=reset;
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;

View file

@ -1,3 +1,5 @@
SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency = 1;
#
# Bug#21869656 UNDO LOG DOES NOT CONTAIN ENOUGH INFORMATION
# ON INDEXED VIRTUAL COLUMNS
@ -21,6 +23,7 @@ connection con1;
COMMIT;
UPDATE t1 SET a=1;
connection default;
InnoDB 0 transactions not purged
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
@ -119,6 +122,7 @@ connection con1;
COMMIT;
disconnect con1;
connection default;
InnoDB 0 transactions not purged
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
@ -138,3 +142,4 @@ CREATE TABLE t1 (a VARCHAR(30), b INT, a2 VARCHAR(30) GENERATED ALWAYS AS (a) VI
CREATE INDEX idx ON t1(a2(10), b, a2(20));
ERROR 42S21: Duplicate column name 'a2'
DROP TABLE t1;
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;

View file

@ -5,6 +5,9 @@
set default_storage_engine=innodb;
set @old_dbug=@@global.debug_dbug;
# Ensure that the history list length will actually be decremented by purge.
SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency = 1;
CREATE TABLE `t` (
`a` BLOB,
@ -21,11 +24,10 @@ CREATE INDEX idx ON t(c(100));
SET global debug_dbug="+d,ib_purge_virtual_index_callback";
UPDATE t SET a = REPEAT('m', 16000) WHERE a like "aaa%";
select sleep(3);
--source ../../innodb/include/wait_all_purged.inc
SET global debug_dbug=@old_dbug;
DROP TABLE t;
CREATE TABLE t (
a TINYBLOB,
b TINYBLOB,
@ -41,7 +43,7 @@ CREATE INDEX idx ON t(c(100));
SET global debug_dbug="+d,ib_purge_virtual_index_callback";
UPDATE t SET a = REPEAT('m', 100) WHERE a like "aaa%";
select sleep(3);
--source ../../innodb/include/wait_all_purged.inc
SET global debug_dbug=@old_dbug;
DROP TABLE t;
@ -68,7 +70,7 @@ CREATE INDEX idx ON t1(x);
SET global debug_dbug="+d,ib_purge_virtual_index_callback";
UPDATE t1 SET id = 10 WHERE id = 1;
select sleep(3);
--source ../../innodb/include/wait_all_purged.inc
SET global debug_dbug=@old_dbug;
DROP TABLE t1;
@ -109,7 +111,7 @@ SET DEBUG_SYNC= 'now WAIT_FOR uncommitted';
COMMIT;
--echo # wait for purge to process the deleted records.
--source include/wait_innodb_all_purged.inc
--source ../../innodb/include/wait_all_purged.inc
SET DEBUG_SYNC= 'now SIGNAL purged';
@ -154,7 +156,7 @@ INSERT INTO t1(a, b) VALUES (8, 8);
COMMIT;
--echo # wait for purge to process the deleted/updated records.
--source include/wait_innodb_all_purged.inc
--source ../../innodb/include/wait_all_purged.inc
SET DEBUG_SYNC= 'now SIGNAL purged';
@ -175,20 +177,24 @@ DROP TABLE t0, t1;
create table t (a blob, b blob, c blob as (concat(a,b)), h varchar(10), index (c(100)));
insert t(a,b,h) values (repeat('g', 16000), repeat('x', 16000), "kk");
insert t(a,b,h) values (repeat('a', 16000), repeat('b', 16000), "mm");
set global innodb_purge_stop_now = 1;
set global debug_dbug="+d,ib_purge_virtual_index_callback";
connect(prevent_purge, localhost, root);
start transaction with consistent snapshot;
connection default;
update t set a = repeat('m', 16000) where a like "aaa%";
connect(con1, localhost, root);
lock table t write;
disconnect prevent_purge;
connection default;
set global innodb_purge_run_now=1;
sleep 3;
select variable_value>1 from information_schema.global_status where variable_name='innodb_purge_trx_id_age';
disconnect con1;
sleep 3;
start transaction with consistent snapshot;
commit;
--source ../../innodb/include/wait_all_purged.inc
select variable_value>1 from information_schema.global_status where variable_name='innodb_purge_trx_id_age';
set global debug_dbug=@old_dbug;
drop table t;
--source include/wait_until_count_sessions.inc
set debug_sync=reset;
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;

View file

@ -1,6 +1,10 @@
--source include/have_innodb.inc
--source include/count_sessions.inc
# Ensure that the history list length will actually be decremented by purge.
SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency = 1;
--echo #
--echo # Bug#21869656 UNDO LOG DOES NOT CONTAIN ENOUGH INFORMATION
--echo # ON INDEXED VIRTUAL COLUMNS
@ -32,8 +36,7 @@ COMMIT;
UPDATE t1 SET a=1;
connection default;
# wait for purge to process the update_undo record (in debug builds)
--source include/wait_innodb_all_purged.inc
--source ../../innodb/include/wait_all_purged.inc
CHECK TABLE t1;
SELECT b1 FROM t1;
@ -118,8 +121,7 @@ COMMIT;
disconnect con1;
connection default;
# wait for purge to process the update_undo record (in debug builds)
--source include/wait_innodb_all_purged.inc
--source ../../innodb/include/wait_all_purged.inc
CHECK TABLE t1;
SELECT b1 FROM t1;
@ -136,3 +138,4 @@ CREATE INDEX idx ON t1(a2(10), b, a2(20));
DROP TABLE t1;
--source include/wait_until_count_sessions.inc
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;

View file

@ -6,7 +6,6 @@
#
--source include/have_innodb.inc
--source include/have_debug.inc
--source include/have_innodb_16k.inc
# turn on flags
@ -47,17 +46,17 @@ INFORMATION_SCHEMA.INNODB_BUFFER_PAGE s2
where s1.SPACE = s2.SPACE AND NAME like 'test/tab1%'
and PAGE_TYPE = "INDEX" order by PAGE_NUMBER, NUMBER_RECORDS;
set global innodb_purge_stop_now=ON;
begin;
delete from tab1 where a = 12;
delete from tab1 where a = 13;
delete from tab1 where a = 14;
delete from tab1 where a = 5;
delete from tab1 where a = 6;
delete from tab1 where a = 7;
set global innodb_purge_run_now=ON;
commit;
# wait for purge view progress (records are deleted actually by purge)
--source include/wait_innodb_all_purged.inc
--source include/wait_all_purged.inc
# not merged yet
# | 1,2,3,4 | 8,9,10,11 |
@ -72,34 +71,27 @@ INFORMATION_SCHEMA.INNODB_BUFFER_PAGE s2
where s1.SPACE = s2.SPACE AND NAME like 'test/tab1%'
and PAGE_TYPE = "INDEX" order by PAGE_NUMBER, NUMBER_RECORDS;
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 11;
set global innodb_purge_run_now=ON;
# wait for purge view progress (records are deleted actually by purge)
--source include/wait_innodb_all_purged.inc
--source include/wait_all_purged.inc
--echo # check page merge happens (MERGE_THRESHOLD=50 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 10;
set global innodb_purge_run_now=ON;
# wait for purge view progress (records are deleted actually by purge)
--source include/wait_innodb_all_purged.inc
--source include/wait_all_purged.inc
--echo # check page merge happens (MERGE_THRESHOLD=35 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 9;
set global innodb_purge_run_now=ON;
# wait for purge view progress (records are deleted actually by purge)
--source include/wait_innodb_all_purged.inc
--source include/wait_all_purged.inc
--echo # check page merge happens (MERGE_THRESHOLD=25 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics

View file

@ -7,7 +7,6 @@
#
--source include/have_innodb.inc
--source include/have_debug.inc
--source include/have_innodb_16k.inc
# turn on flags
@ -79,7 +78,7 @@ INFORMATION_SCHEMA.INNODB_BUFFER_PAGE s2
where s1.SPACE = s2.SPACE AND NAME like 'test/tab1%'
and PAGE_TYPE = "INDEX" order by PAGE_NUMBER, NUMBER_RECORDS;
set global innodb_purge_stop_now=ON;
begin;
delete from tab1 where a = 33;
delete from tab1 where a = 34;
delete from tab1 where a = 35;
@ -100,10 +99,10 @@ delete from tab1 where a = 18;
delete from tab1 where a = 19;
delete from tab1 where a = 20;
delete from tab1 where a = 21;
set global innodb_purge_run_now=ON;
commit;
# wait for purge view progress (records are deleted actually by purge)
--source include/wait_innodb_all_purged.inc
--source include/wait_all_purged.inc
# secondary index is not merged yet
# | 1,..,11 | 22,..,32 |
@ -119,33 +118,27 @@ where s1.SPACE = s2.SPACE AND NAME like 'test/tab1%'
and PAGE_TYPE = "INDEX" order by PAGE_NUMBER, NUMBER_RECORDS;
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 32;
set global innodb_purge_run_now=ON;
# wait for purge view progress (records are deleted actually by purge)
--source include/wait_innodb_all_purged.inc
--source include/wait_all_purged.inc
--echo # check page merge happens (MERGE_THRESHOLD=50 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 31;
set global innodb_purge_run_now=ON;
# wait for purge view progress (records are deleted actually by purge)
--source include/wait_innodb_all_purged.inc
--source include/wait_all_purged.inc
--echo # check page merge happens (MERGE_THRESHOLD=45 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 30;
set global innodb_purge_run_now=ON;
# wait for purge view progress (records are deleted actually by purge)
--source include/wait_innodb_all_purged.inc
--source include/wait_all_purged.inc
--echo # check page merge happens (MERGE_THRESHOLD=40 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics

View file

@ -6,7 +6,6 @@
#
--source include/have_innodb.inc
--source include/have_debug.inc
--source include/have_innodb_16k.inc
# turn on flags

View file

@ -1,3 +1,5 @@
SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency = 1;
CREATE TABLE tab(a BIGINT PRIMARY KEY,c1 TINYTEXT,c2 TEXT,c3 MEDIUMTEXT,
c4 TINYBLOB,c5 BLOB,c6 MEDIUMBLOB,c7 LONGBLOB) ENGINE=InnoDB;
CREATE INDEX index1 ON tab(c1(255)) COMMENT 'Check index level merge MERGE_THRESHOLD=51';
@ -205,14 +207,15 @@ PAGE_NUMBER NUMBER_RECORDS
3 2
4 7
5 7
set global innodb_purge_stop_now=ON;
begin;
delete from tab1 where a = 12;
delete from tab1 where a = 13;
delete from tab1 where a = 14;
delete from tab1 where a = 5;
delete from tab1 where a = 6;
delete from tab1 where a = 7;
set global innodb_purge_run_now=ON;
commit;
InnoDB 0 transactions not purged
# check page merge happens (nothing is expected)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
@ -228,27 +231,24 @@ PAGE_NUMBER NUMBER_RECORDS
3 2
4 4
5 4
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 11;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=50 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
name count_reset
index_page_merge_attempts 1
index_page_merge_successful 1
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 10;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=35 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
name count_reset
index_page_merge_attempts 2
index_page_merge_successful 2
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 9;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=25 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
@ -288,14 +288,15 @@ PAGE_NUMBER NUMBER_RECORDS
3 2
4 7
5 7
set global innodb_purge_stop_now=ON;
begin;
delete from tab1 where a = 12;
delete from tab1 where a = 13;
delete from tab1 where a = 14;
delete from tab1 where a = 5;
delete from tab1 where a = 6;
delete from tab1 where a = 7;
set global innodb_purge_run_now=ON;
commit;
InnoDB 0 transactions not purged
# check page merge happens (nothing is expected)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
@ -311,27 +312,24 @@ PAGE_NUMBER NUMBER_RECORDS
3 2
4 4
5 4
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 11;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=50 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
name count_reset
index_page_merge_attempts 0
index_page_merge_successful 0
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 10;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=35 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
name count_reset
index_page_merge_attempts 1
index_page_merge_successful 1
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 9;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=25 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
@ -371,14 +369,15 @@ PAGE_NUMBER NUMBER_RECORDS
3 2
4 7
5 7
set global innodb_purge_stop_now=ON;
begin;
delete from tab1 where a = 12;
delete from tab1 where a = 13;
delete from tab1 where a = 14;
delete from tab1 where a = 5;
delete from tab1 where a = 6;
delete from tab1 where a = 7;
set global innodb_purge_run_now=ON;
commit;
InnoDB 0 transactions not purged
# check page merge happens (nothing is expected)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
@ -394,27 +393,24 @@ PAGE_NUMBER NUMBER_RECORDS
3 2
4 4
5 4
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 11;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=50 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
name count_reset
index_page_merge_attempts 0
index_page_merge_successful 0
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 10;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=35 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
name count_reset
index_page_merge_attempts 0
index_page_merge_successful 0
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 9;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=25 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
@ -459,14 +455,15 @@ PAGE_NUMBER NUMBER_RECORDS
3 2
4 7
5 7
set global innodb_purge_stop_now=ON;
begin;
delete from tab1 where a = 12;
delete from tab1 where a = 13;
delete from tab1 where a = 14;
delete from tab1 where a = 5;
delete from tab1 where a = 6;
delete from tab1 where a = 7;
set global innodb_purge_run_now=ON;
commit;
InnoDB 0 transactions not purged
# check page merge happens (nothing is expected)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
@ -483,27 +480,24 @@ PAGE_NUMBER NUMBER_RECORDS
3 2
4 4
5 4
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 11;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=50 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
name count_reset
index_page_merge_attempts 0
index_page_merge_successful 0
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 10;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=35 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
name count_reset
index_page_merge_attempts 1
index_page_merge_successful 1
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 9;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=25 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
@ -872,7 +866,7 @@ PAGE_NUMBER NUMBER_RECORDS
4 2
27 21
28 21
set global innodb_purge_stop_now=ON;
begin;
delete from tab1 where a = 33;
delete from tab1 where a = 34;
delete from tab1 where a = 35;
@ -893,7 +887,8 @@ delete from tab1 where a = 18;
delete from tab1 where a = 19;
delete from tab1 where a = 20;
delete from tab1 where a = 21;
set global innodb_purge_run_now=ON;
commit;
InnoDB 0 transactions not purged
# check page merge happens (nothing is expected)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
@ -910,27 +905,24 @@ PAGE_NUMBER NUMBER_RECORDS
4 2
27 11
28 11
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 32;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=50 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
name count_reset
index_page_merge_attempts 1
index_page_merge_successful 1
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 31;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=45 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
name count_reset
index_page_merge_attempts 2
index_page_merge_successful 2
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 30;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=40 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
@ -1000,7 +992,7 @@ PAGE_NUMBER NUMBER_RECORDS
4 2
27 21
28 21
set global innodb_purge_stop_now=ON;
begin;
delete from tab1 where a = 33;
delete from tab1 where a = 34;
delete from tab1 where a = 35;
@ -1021,7 +1013,8 @@ delete from tab1 where a = 18;
delete from tab1 where a = 19;
delete from tab1 where a = 20;
delete from tab1 where a = 21;
set global innodb_purge_run_now=ON;
commit;
InnoDB 0 transactions not purged
# check page merge happens (nothing is expected)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
@ -1038,27 +1031,24 @@ PAGE_NUMBER NUMBER_RECORDS
4 2
27 11
28 11
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 32;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=50 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
name count_reset
index_page_merge_attempts 0
index_page_merge_successful 0
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 31;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=45 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
name count_reset
index_page_merge_attempts 1
index_page_merge_successful 1
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 30;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=40 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
@ -1128,7 +1118,7 @@ PAGE_NUMBER NUMBER_RECORDS
4 2
27 21
28 21
set global innodb_purge_stop_now=ON;
begin;
delete from tab1 where a = 33;
delete from tab1 where a = 34;
delete from tab1 where a = 35;
@ -1149,7 +1139,8 @@ delete from tab1 where a = 18;
delete from tab1 where a = 19;
delete from tab1 where a = 20;
delete from tab1 where a = 21;
set global innodb_purge_run_now=ON;
commit;
InnoDB 0 transactions not purged
# check page merge happens (nothing is expected)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
@ -1166,27 +1157,24 @@ PAGE_NUMBER NUMBER_RECORDS
4 2
27 11
28 11
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 32;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=50 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
name count_reset
index_page_merge_attempts 0
index_page_merge_successful 0
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 31;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=45 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
name count_reset
index_page_merge_attempts 0
index_page_merge_successful 0
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 30;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=40 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
@ -1257,7 +1245,7 @@ PAGE_NUMBER NUMBER_RECORDS
4 2
27 21
28 21
set global innodb_purge_stop_now=ON;
begin;
delete from tab1 where a = 33;
delete from tab1 where a = 34;
delete from tab1 where a = 35;
@ -1278,7 +1266,8 @@ delete from tab1 where a = 18;
delete from tab1 where a = 19;
delete from tab1 where a = 20;
delete from tab1 where a = 21;
set global innodb_purge_run_now=ON;
commit;
InnoDB 0 transactions not purged
# check page merge happens (nothing is expected)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
@ -1295,27 +1284,24 @@ PAGE_NUMBER NUMBER_RECORDS
4 2
27 11
28 11
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 32;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=50 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
name count_reset
index_page_merge_attempts 0
index_page_merge_successful 0
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 31;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=45 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
name count_reset
index_page_merge_attempts 1
index_page_merge_successful 1
set global innodb_purge_stop_now=ON;
delete from tab1 where a = 30;
set global innodb_purge_run_now=ON;
InnoDB 0 transactions not purged
# check page merge happens (MERGE_THRESHOLD=40 causes merge here)
SELECT name,count_reset FROM information_schema.innodb_metrics
WHERE name like 'index_page_merge_%';
@ -1323,3 +1309,4 @@ name count_reset
index_page_merge_attempts 2
index_page_merge_successful 2
DROP TABLE tab1;
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;

View file

@ -1,5 +1,3 @@
DROP TABLE if exists t1;
DROP TABLE if exists t2;
Testing tables with large records
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256), c VARCHAR(256), KEY SECOND(a, b,c)) ENGINE=INNODB;
INSERT INTO t1 VALUES (1, REPEAT('A', 256), REPEAT('B', 256));
@ -13,6 +11,7 @@ INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
SET GLOBAL innodb_fast_shutdown = 0;
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
@ -57,6 +56,7 @@ insert into t1 values (204, REPEAT('A', 256), REPEAT('B', 256));
DROP TABLE t1;
Testing table with small records
CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(16), c VARCHAR(32), KEY SECOND(a,b,c)) ENGINE=INNODB;
SET GLOBAL innodb_fast_shutdown = 0;
optimize table t2;
Table Op Msg_type Msg_text
test.t2 optimize status OK

View file

@ -11,9 +11,11 @@
# Check actual behavior for table, partitioned table and temporary table
# #############################################################
--source include/have_innodb_16k.inc
--source include/have_debug.inc
--source include/have_partition.inc
SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency = 1;
# Check index merge threshold by create index on all datatypes
CREATE TABLE tab(a BIGINT PRIMARY KEY,c1 TINYTEXT,c2 TEXT,c3 MEDIUMTEXT,
@ -187,3 +189,4 @@ CREATE INDEX index1 ON tab1(b(750)) COMMENT 'MERGE_THRESHOLD=45';
--source suite/innodb/include/innodb_merge_threshold_secondary.inc
DROP TABLE tab1;
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;

View file

@ -4,11 +4,6 @@
--source include/not_embedded.inc
--source include/have_innodb_16k.inc
--disable_warnings
DROP TABLE if exists t1;
DROP TABLE if exists t2;
--enable_warnings
--echo Testing tables with large records
# Create table.
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256), c VARCHAR(256), KEY SECOND(a, b,c)) ENGINE=INNODB;
@ -36,7 +31,7 @@ while ($size)
}
--enable_query_log
--source include/wait_innodb_all_purged.inc
SET GLOBAL innodb_fast_shutdown = 0;
--source include/restart_mysqld.inc
optimize table t1;
@ -135,7 +130,7 @@ while ($size)
}
--enable_query_log
--source include/wait_innodb_all_purged.inc
SET GLOBAL innodb_fast_shutdown = 0;
--source include/restart_mysqld.inc
optimize table t2;
@ -206,5 +201,3 @@ if ($second_before == $second_after) {
}
DROP TABLE t2;
--source include/wait_innodb_all_purged.inc

View file

@ -31,6 +31,7 @@ allow-mismatches 0
write crc32
page-type-summary FALSE
page-type-dump MYSQLTEST_VARDIR/tmp/dump.txt
per-page-details FALSE
log (No default value)
leaf FALSE
merge 0
@ -41,7 +42,7 @@ innochecksum Ver #.#.#
Copyright (c) YEAR, YEAR , Oracle, MariaDB Corporation Ab and others.
InnoDB offline file checksum utility.
Usage: innochecksum [-c] [-s <start page>] [-e <end page>] [-p <page>] [-v] [-a <allow mismatches>] [-n] [-C <strict-check>] [-w <write>] [-S] [-D <page type dump>] [-l <log>] [-e] <filename or [-]>
Usage: innochecksum [-c] [-s <start page>] [-e <end page>] [-p <page>] [-i] [-v] [-a <allow mismatches>] [-n] [-C <strict-check>] [-w <write>] [-S] [-D <page type dump>] [-l <log>] [-l] [-m <merge pages>] <filename or [-]>
-?, --help Displays this help and exits.
-I, --info Synonym for --help.
-V, --version Displays version information and exits.
@ -62,8 +63,10 @@ Usage: innochecksum [-c] [-s <start page>] [-e <end page>] [-p <page>] [-v] [-a
Display a count of each page type in a tablespace.
-D, --page-type-dump=name
Dump the page type info for each page in a tablespace.
-i, --per-page-details
Print out per-page detail information.
-l, --log=name log output.
-e, --leaf Examine leaf index pages
-f, --leaf Examine leaf index pages
-m, --merge=# leaf page count if merge given number of consecutive
pages
@ -81,6 +84,7 @@ allow-mismatches 0
write crc32
page-type-summary FALSE
page-type-dump (No default value)
per-page-details FALSE
log (No default value)
leaf FALSE
merge 0

View file

@ -1,5 +1,6 @@
# Set the environmental variables
call mtr.add_suppression("InnoDB: Unable to read tablespace .* page no .* into the buffer pool after 100 attempts");
call mtr.add_suppression("InnoDB: innodb_checksum_algorithm is set to.*");
[1]: Further Test are for rewrite checksum (innodb|crc32|none) for all ibd file & start the server.
CREATE TABLE tab1 (pk INTEGER NOT NULL PRIMARY KEY,
linestring_key GEOMETRY NOT NULL,
@ -105,6 +106,7 @@ File::tab#.ibd
# Page compressed page
# Page compressed encrypted page
# Other type of page
===============================================
Additional information:
Undo page type: # insert, # update, # other
@ -139,6 +141,7 @@ File::tab#.ibd
# Page compressed page
# Page compressed encrypted page
# Other type of page
===============================================
Additional information:
Undo page type: # insert, # update, # other
@ -160,14 +163,14 @@ Filename::tab#.ibd
==============================================================================
PAGE_NO | PAGE_TYPE | EXTRA INFO
==============================================================================
#:: # | File Space Header | -
#:: # | Insert Buffer Bitmap | -
#:: # | Inode page | -
#:: # | Index page | index id=#, page level=# leaf #, No. of records=#, garbage=#, n_recs=#, -
#:: # | Index page | index id=#, page level=# leaf #, No. of records=#, garbage=#, n_recs=#, -
#:: # | Index page | index id=#, page level=# leaf #, No. of records=#, garbage=#, n_recs=#, -
#:: # | Index page | index id=#, page level=# leaf #, No. of records=#, garbage=#, n_recs=#, -
#:: # | Freshly allocated page | -
#::# | File Space Header | -
#::# | Insert Buffer Bitmap | -
#::# | Inode page | -
#::# | Index page | index id=#, page level=#, No. of records=#, garbage=#, -
#::# | Index page | index id=#, page level=#, No. of records=#, garbage=#, -
#::# | Index page | index id=#, page level=#, No. of records=#, garbage=#, -
#::# | Index page | index id=#, page level=#, No. of records=#, garbage=#, -
#::# | Freshly allocated page | -
# Variables used by page type dump for ibdata1
Variables (--variable-name=value)
@ -184,6 +187,7 @@ allow-mismatches 0
write crc32
page-type-summary FALSE
page-type-dump MYSQLTEST_VARDIR/tmp/dump.txt
per-page-details FALSE
log (No default value)
leaf FALSE
merge 0
@ -194,14 +198,14 @@ Filename::tab#.ibd
==============================================================================
PAGE_NO | PAGE_TYPE | EXTRA INFO
==============================================================================
#:: # | File Space Header | -
#:: # | Insert Buffer Bitmap | -
#:: # | Inode page | -
#:: # | Index page | index id=#, page level=# leaf #, No. of records=#, garbage=#, n_recs=#, -
#:: # | Index page | index id=#, page level=# leaf #, No. of records=#, garbage=#, n_recs=#, -
#:: # | Index page | index id=#, page level=# leaf #, No. of records=#, garbage=#, n_recs=#, -
#:: # | Index page | index id=#, page level=# leaf #, No. of records=#, garbage=#, n_recs=#, -
#:: # | Freshly allocated page | -
#::# | File Space Header | -
#::# | Insert Buffer Bitmap | -
#::# | Inode page | -
#::# | Index page | index id=#, page level=#, No. of records=#, garbage=#, -
#::# | Index page | index id=#, page level=#, No. of records=#, garbage=#, -
#::# | Index page | index id=#, page level=#, No. of records=#, garbage=#, -
#::# | Index page | index id=#, page level=#, No. of records=#, garbage=#, -
#::# | Freshly allocated page | -
[6]: check the valid lower bound values for option
# allow-mismatches,page,start-page,end-page
[9]: check the both short and long options "page" and "start-page" when

View file

@ -15,7 +15,9 @@
let MYSQLD_BASEDIR= `SELECT @@basedir`;
let MYSQLD_DATADIR= `SELECT @@datadir`;
let SEARCH_FILE= $MYSQLTEST_VARDIR/log/my_restart.err;
call mtr.add_suppression("InnoDB: Unable to read tablespace .* page no .* into the buffer pool after 100 attempts");
call mtr.add_suppression("InnoDB: innodb_checksum_algorithm is set to.*");
--echo [1]: Further Test are for rewrite checksum (innodb|crc32|none) for all ibd file & start the server.

View file

@ -25,7 +25,7 @@ ID NAME DESCRIPTION MAX_MONTH_NAME_LENGTH MAX_DAY_NAME_LENGTH DECIMAL_POINT THOU
22 gl_ES Galician - Galician 8 8 , english
23 gu_IN Gujarati - India 10 8 . , english
24 he_IL Hebrew - Israel 7 5 . , english
25 hi_IN Hindi - India 7 9 . , english
25 hi_IN Hindi - India 7 9 . , hindi
26 hr_HR Croatian - Croatia 8 11 , english
27 hu_HU Hungarian - Hungary 10 9 , . hungarian
28 id_ID Indonesian - Indonesia 9 6 , . english
@ -138,7 +138,7 @@ Id Name Description Error_Message_Language
22 gl_ES Galician - Galician english
23 gu_IN Gujarati - India english
24 he_IL Hebrew - Israel english
25 hi_IN Hindi - India english
25 hi_IN Hindi - India hindi
26 hr_HR Croatian - Croatia english
27 hu_HU Hungarian - Hungary hungarian
28 id_ID Indonesian - Indonesia english

View file

@ -99,7 +99,7 @@ DROP VIEW v1;
ERROR 42S02: Unknown VIEW: 'test.v1'
DROP VIEW IF EXISTS v2;
Warnings:
Note 4068 Unknown VIEW: 'test.v2'
Note 4089 Unknown VIEW: 'test.v2'
# Syncing slave with master
connection slave;
SELECT * FROM v1;

View file

@ -128,7 +128,7 @@ show warnings;
Level Code Message
Error 1062 Duplicate entry '20' for key 'a'
Warning 1196 Some non-transactional changed tables couldn't be rolled back
Note 4070 At line 4 in mysqltest1.foo4
Note 4091 At line 4 in mysqltest1.foo4
select * from t2;
a
20
@ -291,7 +291,7 @@ end|
do fn1(100);
Warnings:
Error 1062 Duplicate entry '100' for key 'a'
Note 4070 At line 3 in mysqltest1.fn1
Note 4091 At line 3 in mysqltest1.fn1
Warning 1196 Some non-transactional changed tables couldn't be rolled back
select fn1(20);
ERROR 23000: Duplicate entry '20' for key 'a'

View file

@ -212,7 +212,7 @@ ERROR 42S02: 'test.t1' is not a SEQUENCE
drop table t1;
alter sequence if exists t1 minvalue=100;
Warnings:
Note 4067 Unknown SEQUENCE: 'test.t1'
Note 4088 Unknown SEQUENCE: 'test.t1'
alter sequence t1 minvalue=100;
ERROR 42S02: Table 'test.t1' doesn't exist
create sequence t1;

View file

@ -165,7 +165,7 @@ drop sequence t1;
ERROR 42S02: 'test.t1' is not a SEQUENCE
drop sequence if exists t1;
Warnings:
Note 4067 Unknown SEQUENCE: 'test.t1'
Note 4088 Unknown SEQUENCE: 'test.t1'
create sequence t1 start with 10 maxvalue=9;
ERROR HY000: Sequence 'test.t1' values are conflicting
create sequence t1 minvalue= 100 maxvalue=10;
@ -377,7 +377,7 @@ key key1 (next_not_cached_value)
ERROR HY000: Sequence 'test.t1' table structure is invalid (Sequence tables cannot have any keys)
drop sequence if exists t1;
Warnings:
Note 4067 Unknown SEQUENCE: 'test.t1'
Note 4088 Unknown SEQUENCE: 'test.t1'
create sequence t1;
create sequence t2;
create table t3 (a int) engine=myisam;
@ -387,8 +387,8 @@ CREATE SEQUENCE s1;
drop sequence s1;
drop sequence if exists t1,t2,t3,t4;
Warnings:
Note 4067 Unknown SEQUENCE: 'test.t3'
Note 4067 Unknown SEQUENCE: 'test.t4'
Note 4088 Unknown SEQUENCE: 'test.t3'
Note 4088 Unknown SEQUENCE: 'test.t4'
drop table if exists t1,t2,t3;
Warnings:
Note 1051 Unknown table 'test.t1'
@ -414,9 +414,9 @@ CREATE TABLE t2 (a int);
CREATE SEQUENCE s1;
drop sequence if exists t1,t2,s1,s2;
Warnings:
Note 4067 Unknown SEQUENCE: 'test.t1'
Note 4067 Unknown SEQUENCE: 'test.t2'
Note 4067 Unknown SEQUENCE: 'test.s2'
Note 4088 Unknown SEQUENCE: 'test.t1'
Note 4088 Unknown SEQUENCE: 'test.t2'
Note 4088 Unknown SEQUENCE: 'test.s2'
drop table if exists t1,t2;
CREATE TEMPORARY SEQUENCE s1;
DROP SEQUENCE s1;

View file

@ -138,6 +138,11 @@ select json_keys('{"a":{"c":1, "d":2}, "b":2}');
select json_keys('{"a":{"c":1, "d":2}, "b":2}', "$.a");
select json_keys('{"a":{"c":1, "d":2}, "b":2}', "$.b");
select json_keys('foo');
#
# mdev-12789 JSON_KEYS returns duplicate keys twice
#
select json_keys('{"a":{"c":1, "d":2}, "b":2, "c":1, "a":3, "b":1, "c":2}');
select json_keys('{"c1": "value 1", "c1": "value 2"}');
SET @j = '["abc", [{"k": "10"}, "def"], {"x":"abc"}, {"y":"bcd"}]';
select json_search(@j, 'one', 'abc');
@ -296,6 +301,22 @@ INSERT INTO t1 VALUES (0);
SELECT JSON_KEYS(f) FROM t1 ORDER BY 1;
DROP TABLE t1;
#
# MDEV-12324 Wrong result (phantom array value) on JSON_EXTRACT.
#
SELECT JSON_EXTRACT( '{"foo":"bar"}', '$[*].*' );
SELECT JSON_EXTRACT( '{"foo":"bar"}', '$[*]' );
#
# MDEV-12604 Comparison of JSON_EXTRACT result differs with Mysql.
#
select JSON_EXTRACT('{"name":"value"}', '$.name') = 'value';
select JSON_EXTRACT('{\"asdf\":true}', "$.\"asdf\"") = true;
select JSON_EXTRACT('{\"asdf\":true}', "$.\"asdf\"") = false;
select JSON_EXTRACT('{\"asdf\":true}', "$.\"asdf\"") = 1;
select JSON_EXTRACT('{\"input1\":\"\\u00f6\"}', '$.\"input1\"');
--echo #
--echo # Start of 10.3 tests
--echo #

View file

@ -23,6 +23,23 @@ SELECT st_astext(st_geomfromgeojson('{"type""point"}'));
SELECT st_astext(st_geomfromgeojson('{ "type": "Feature", "geometry": { "type": "Point", "coordinates": [102.0, 0.5] } }'));
SELECT st_astext(st_geomfromgeojson('{ "type": "FeatureCollection", "features": [{ "type": "Feature", "geometry": { "type": "Point", "coordinates": [102.0, 0.5] }, "properties": { "prop0": "value0" } }]}'));
--error ER_WRONG_VALUE_FOR_TYPE
SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',5));
--error ER_GIS_INVALID_DATA
SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',1));
SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',2));
SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',3));
SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',4));
SELECT ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),2);
SELECT ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),1);
SELECT ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),10);
SELECT ST_AsGeoJSON(ST_GeomFromText("POINT(10 11)"), 100, 1);
SELECT ST_AsGeoJSON(ST_GeomFromText("POINT(10 11)"), 100, 5);
--echo #
--echo # End of 10.2 tests
--echo #

View file

@ -1954,6 +1954,17 @@ UNION ALL
;
DROP TABLE t1;
--echo #
--echo # A regression after MDEV-13351:
--echo # MDEV-13374 : Server crashes in first_linear_tab / st_select_lex::set_explain_type
--echo # upon UNION with aggregate function
--echo #
CREATE TABLE t1 (i INT) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1),(2);
SELECT i AS fld FROM t1 UNION SELECT COUNT(*) AS fld FROM t1;
DROP TABLE t1;
--echo #
--echo # Start of 10.3 tests
--echo #

View file

@ -3361,6 +3361,12 @@ void handler::print_error(int error, myf errflag)
DBUG_ENTER("handler::print_error");
DBUG_PRINT("enter",("error: %d",error));
if (ha_thd()->transaction_rollback_request)
{
/* Ensure this becomes a true error */
errflag&= ~(ME_JUST_WARNING | ME_JUST_INFO);
}
int textno= -1; // impossible value
switch (error) {
case EACCES:
@ -3506,10 +3512,14 @@ void handler::print_error(int error, myf errflag)
textno=ER_LOCK_TABLE_FULL;
break;
case HA_ERR_LOCK_DEADLOCK:
textno=ER_LOCK_DEADLOCK;
/* cannot continue. the statement was already aborted in the engine */
SET_FATAL_ERROR;
break;
{
String str, full_err_msg(ER_DEFAULT(ER_LOCK_DEADLOCK), system_charset_info);
get_error_message(error, &str);
full_err_msg.append(str);
my_printf_error(ER_LOCK_DEADLOCK, "%s", errflag, full_err_msg.c_ptr_safe());
DBUG_VOID_RETURN;
}
case HA_ERR_READ_ONLY_TRANSACTION:
textno=ER_READ_ONLY_TRANSACTION;
break;

View file

@ -545,6 +545,16 @@ bool Arg_comparator::set_cmp_func_string()
if (owner->agg_arg_charsets_for_comparison(&m_compare_collation, a, b))
return true;
}
if ((*a)->is_json_type() ^ (*b)->is_json_type())
{
Item **j_item= (*a)->is_json_type() ? a : b;
Item *uf= new(thd->mem_root) Item_func_json_unquote(thd, *j_item);
if (!uf || uf->fix_fields(thd, &uf))
return 1;
*j_item= uf;
}
a= cache_converted_constant(thd, a, &a_cache, compare_type_handler());
b= cache_converted_constant(thd, b, &b_cache, compare_type_handler());
return false;

View file

@ -122,13 +122,27 @@ String *Item_func_geometry_from_json::val_str(String *str)
Geometry_buffer buffer;
String *js= args[0]->val_str_ascii(&tmp_js);
uint32 srid= 0;
longlong options= 0;
json_engine_t je;
if ((null_value= args[0]->null_value))
return 0;
if ((arg_count == 2) && !args[1]->null_value)
srid= (uint32)args[1]->val_int();
if (arg_count > 1 && !args[1]->null_value)
{
options= args[1]->val_int();
if (options > 4 || options < 1)
{
String *sv= args[1]->val_str(&tmp_js);
my_error(ER_WRONG_VALUE_FOR_TYPE, MYF(0),
"option", sv->c_ptr(), "ST_GeometryFromJSON");
null_value= 1;
return 0;
}
}
if ((arg_count == 3) && !args[2]->null_value)
srid= (uint32)args[2]->val_int();
str->set_charset(&my_charset_bin);
if (str->reserve(SRID_SIZE, 512))
@ -139,7 +153,7 @@ String *Item_func_geometry_from_json::val_str(String *str)
json_scan_start(&je, js->charset(), (const uchar *) js->ptr(),
(const uchar *) js->end());
if ((null_value= !Geometry::create_from_json(&buffer, &je, str)))
if ((null_value= !Geometry::create_from_json(&buffer, &je, options==1, str)))
{
int code= 0;
@ -154,6 +168,9 @@ String *Item_func_geometry_from_json::val_str(String *str)
case Geometry::GEOJ_POLYGON_NOT_CLOSED:
code= ER_GEOJSON_NOT_CLOSED;
break;
case Geometry::GEOJ_DIMENSION_NOT_SUPPORTED:
my_error(ER_GIS_INVALID_DATA, MYF(0), "ST_GeometryFromJSON");
break;
default:
report_json_error_ex(js, &je, func_name(), 0, Sql_condition::WARN_LEVEL_WARN);
return NULL;
@ -233,6 +250,8 @@ String *Item_func_as_geojson::val_str_ascii(String *str)
DBUG_ASSERT(fixed == 1);
String arg_val;
String *swkb= args[0]->val_str(&arg_val);
uint max_dec= FLOATING_POINT_DECIMALS;
longlong options= 0;
Geometry_buffer buffer;
Geometry *geom= NULL;
const char *dummy;
@ -242,12 +261,41 @@ String *Item_func_as_geojson::val_str_ascii(String *str)
!(geom= Geometry::construct(&buffer, swkb->ptr(), swkb->length())))))
return 0;
if (arg_count > 1)
{
max_dec= (uint) args[1]->val_int();
if (args[1]->null_value)
max_dec= FLOATING_POINT_DECIMALS;
if (arg_count > 2)
{
options= args[2]->val_int();
if (args[2]->null_value)
options= 0;
}
}
str->length(0);
str->set_charset(&my_charset_latin1);
if ((null_value= geom->as_json(str, FLOATING_POINT_DECIMALS, &dummy)))
if (str->reserve(1, 512))
return 0;
str->qs_append('{');
if (options & 1)
{
if (geom->bbox_as_json(str) || str->append(", ", 2))
goto error;
}
if ((geom->as_json(str, max_dec, &dummy) || str->append("}", 1)))
goto error;
return str;
error:
null_value= 1;
return 0;
}

View file

@ -587,24 +587,40 @@ void Item_func_json_unquote::fix_length_and_dec()
}
String *Item_func_json_unquote::val_str(String *str)
String *Item_func_json_unquote::read_json(json_engine_t *je)
{
String *js= args[0]->val_json(&tmp_s);
json_engine_t je;
int c_len;
if ((null_value= args[0]->null_value))
return NULL;
return 0;
json_scan_start(&je, js->charset(),(const uchar *) js->ptr(),
json_scan_start(je, js->charset(),(const uchar *) js->ptr(),
(const uchar *) js->ptr() + js->length());
je.value_type= (enum json_value_types) -1; /* To report errors right. */
je->value_type= (enum json_value_types) -1; /* To report errors right. */
if (json_read_value(&je))
if (json_read_value(je))
goto error;
if (je.value_type != JSON_VALUE_STRING)
return js;
error:
if (je->value_type == JSON_VALUE_STRING)
report_json_error(js, je, 0);
return js;
}
String *Item_func_json_unquote::val_str(String *str)
{
json_engine_t je;
int c_len;
String *js;
if (!(js= read_json(&je)))
return NULL;
if (je.s.error || je.value_type != JSON_VALUE_STRING)
return js;
str->length(0);
@ -621,13 +637,86 @@ String *Item_func_json_unquote::val_str(String *str)
return str;
error:
if (je.value_type == JSON_VALUE_STRING)
report_json_error(js, &je, 0);
/* We just return the argument's value in the case of error. */
report_json_error(js, &je, 0);
return js;
}
double Item_func_json_unquote::val_real()
{
json_engine_t je;
double d= 0.0;
String *js;
if ((js= read_json(&je)) != NULL)
{
switch (je.value_type)
{
case JSON_VALUE_NUMBER:
{
char *end;
int err;
d= my_strntod(je.s.cs, (char *) je.value, je.value_len, &end, &err);
break;
}
case JSON_VALUE_TRUE:
d= 1.0;
break;
case JSON_VALUE_STRING:
{
char *end;
int err;
d= my_strntod(js->charset(), (char *) js->ptr(), js->length(),
&end, &err);
break;
}
default:
break;
};
}
return d;
}
longlong Item_func_json_unquote::val_int()
{
json_engine_t je;
longlong i= 0;
String *js;
if ((js= read_json(&je)) != NULL)
{
switch (je.value_type)
{
case JSON_VALUE_NUMBER:
{
char *end;
int err;
i= my_strntoll(je.s.cs, (char *) je.value, je.value_len, 10,
&end, &err);
break;
}
case JSON_VALUE_TRUE:
i= 1;
break;
case JSON_VALUE_STRING:
{
char *end;
int err;
i= my_strntoll(js->charset(), (char *) js->ptr(), js->length(), 10,
&end, &err);
break;
}
default:
break;
};
}
return i;
}
static int alloc_tmp_paths(THD *thd, uint n_paths,
json_path_with_flags **paths,String **tmp_paths)
{
@ -1397,6 +1486,8 @@ void Item_func_json_array::fix_length_and_dec()
ulonglong char_length= 2;
uint n_arg;
result_limit= 0;
if (arg_count == 0)
{
collation.set(&my_charset_utf8_general_ci);
@ -1413,7 +1504,6 @@ void Item_func_json_array::fix_length_and_dec()
fix_char_length_ulonglong(char_length);
tmp_val.set_charset(collation.collation);
result_limit= 0;
}
@ -2692,6 +2782,41 @@ void Item_func_json_keys::fix_length_and_dec()
}
/*
That function is for Item_func_json_keys::val_str exclusively.
It utilizes the fact the resulting string is in specific format:
["key1", "key2"...]
*/
static int check_key_in_list(String *res,
const uchar *key, int key_len)
{
const uchar *c= (const uchar *) res->ptr() + 2; /* beginning '["' */
const uchar *end= (const uchar *) res->end() - 1; /* ending '"' */
while (c < end)
{
int n_char;
for (n_char=0; c[n_char] != '"' && n_char < key_len; n_char++)
{
if (c[n_char] != key[n_char])
break;
}
if (c[n_char] == '"')
{
if (n_char == key_len)
return 1;
}
else
{
while (c[n_char] != '"')
n_char++;
}
c+= n_char + 4; /* skip ', "' */
}
return 0;
}
String *Item_func_json_keys::val_str(String *str)
{
json_engine_t je;
@ -2748,6 +2873,7 @@ skip_search:
while (json_scan_next(&je) == 0 && je.state != JST_OBJ_END)
{
const uchar *key_start, *key_end;
int key_len;
switch (je.state)
{
@ -2757,13 +2883,19 @@ skip_search:
{
key_end= je.s.c_str;
} while (json_read_keyname_chr(&je) == 0);
if (je.s.error ||
(n_keys > 0 && str->append(", ", 2)) ||
if (je.s.error)
goto err_return;
key_len= key_end - key_start;
if (!check_key_in_list(str, key_start, key_len))
{
if ((n_keys > 0 && str->append(", ", 2)) ||
str->append("\"", 1) ||
append_simple(str, key_start, key_end - key_start) ||
append_simple(str, key_start, key_len) ||
str->append("\"", 1))
goto err_return;
n_keys++;
n_keys++;
}
break;
case JST_OBJ_START:
case JST_ARRAY_START:

View file

@ -125,12 +125,14 @@ class Item_func_json_unquote: public Item_str_func
{
protected:
String tmp_s;
String *read_json(json_engine_t *je);
public:
Item_func_json_unquote(THD *thd, Item *s): Item_str_func(thd, s) {}
const char *func_name() const { return "json_unquote"; }
void fix_length_and_dec();
String *val_str(String *);
double val_real();
longlong val_int();
Item *get_copy(THD *thd, MEM_ROOT *mem_root)
{ return get_item_copy<Item_func_json_unquote>(thd, mem_root, this); }
};

View file

@ -38,6 +38,7 @@ russian
czech
french
serbian
hindi
)
SET(files

File diff suppressed because it is too large Load diff

207
sql/sp.cc
View file

@ -711,6 +711,23 @@ Sp_handler::db_find_routine(THD *thd,
}
int
Sp_handler::db_find_and_cache_routine(THD *thd,
const Database_qualified_name *name,
sp_head **sp) const
{
int rc= db_find_routine(thd, name, sp);
if (rc == SP_OK)
{
sp_cache_insert(get_cache(thd), *sp);
DBUG_PRINT("info", ("added new: 0x%lx, level: %lu, flags %x",
(ulong) sp[0], sp[0]->m_recursion_level,
sp[0]->m_flags));
}
return rc;
}
/**
Silence DEPRECATED SYNTAX warnings when loading a stored procedure
into the cache.
@ -1335,6 +1352,26 @@ done:
}
static bool
append_suid(String *buf, enum_sp_suid_behaviour suid)
{
return suid == SP_IS_NOT_SUID &&
buf->append(STRING_WITH_LEN(" SQL SECURITY INVOKER\n"));
}
static bool
append_comment(String *buf, const LEX_CSTRING &comment)
{
if (!comment.length)
return false;
if (buf->append(STRING_WITH_LEN(" COMMENT ")))
return true;
append_unescaped(buf, comment.str, comment.length);
return buf->append('\n');
}
/**
Delete the record for the stored routine object from mysql.proc
and do binary logging.
@ -1711,6 +1748,85 @@ Sp_handler::sp_show_create_routine(THD *thd,
}
/*
In case of recursions, we create multiple copies of the same SP.
This methods checks the current recursion depth.
In case if the recursion limit exceeded, it throws an error
and returns NULL.
Otherwise, depending on the current recursion level, it:
- either returns the original SP,
- or makes and returns a new clone of SP
*/
sp_head *
Sp_handler::sp_clone_and_link_routine(THD *thd,
const Database_qualified_name *name,
sp_head *sp) const
{
DBUG_ENTER("sp_link_routine");
ulong level;
sp_head *new_sp;
LEX_CSTRING returns= empty_clex_str;
/*
String buffer for RETURNS data type must have system charset;
64 -- size of "returns" column of mysql.proc.
*/
String retstr(64);
retstr.set_charset(sp->get_creation_ctx()->get_client_cs());
DBUG_PRINT("info", ("found: 0x%lx", (ulong)sp));
if (sp->m_first_free_instance)
{
DBUG_PRINT("info", ("first free: 0x%lx level: %lu flags %x",
(ulong)sp->m_first_free_instance,
sp->m_first_free_instance->m_recursion_level,
sp->m_first_free_instance->m_flags));
DBUG_ASSERT(!(sp->m_first_free_instance->m_flags & sp_head::IS_INVOKED));
if (sp->m_first_free_instance->m_recursion_level > recursion_depth(thd))
{
recursion_level_error(thd, sp);
DBUG_RETURN(0);
}
DBUG_RETURN(sp->m_first_free_instance);
}
/*
Actually depth could be +1 than the actual value in case a SP calls
SHOW CREATE PROCEDURE. Hence, the linked list could hold up to one more
instance.
*/
level= sp->m_last_cached_sp->m_recursion_level + 1;
if (level > recursion_depth(thd))
{
recursion_level_error(thd, sp);
DBUG_RETURN(0);
}
if (type() == TYPE_ENUM_FUNCTION)
{
sp_returns_type(thd, retstr, sp);
returns= retstr.lex_cstring();
}
if (db_load_routine(thd, name, &new_sp,
sp->m_sql_mode, sp->m_params, returns,
sp->m_body, sp->chistics(),
sp->m_definer,
sp->m_created, sp->m_modified,
sp->get_creation_ctx()) == SP_OK)
{
sp->m_last_cached_sp->m_next_cached_sp= new_sp;
new_sp->m_recursion_level= level;
new_sp->m_first_instance= sp;
sp->m_last_cached_sp= sp->m_first_free_instance= new_sp;
DBUG_PRINT("info", ("added level: 0x%lx, level: %lu, flags %x",
(ulong)new_sp, new_sp->m_recursion_level,
new_sp->m_flags));
DBUG_RETURN(new_sp);
}
DBUG_RETURN(0);
}
/**
Obtain object representing stored procedure/function by its name from
stored procedures cache and looking into mysql.proc if needed.
@ -1731,88 +1847,18 @@ sp_head *
Sp_handler::sp_find_routine(THD *thd, const Database_qualified_name *name,
bool cache_only) const
{
sp_cache **cp= get_cache(thd);
sp_head *sp;
DBUG_ENTER("sp_find_routine");
DBUG_PRINT("enter", ("name: %.*s.%.*s type: %s cache only %d",
(int) name->m_db.length, name->m_db.str,
(int) name->m_name.length, name->m_name.str,
type_str(), cache_only));
sp_cache **cp= get_cache(thd);
sp_head *sp;
if ((sp= sp_cache_lookup(cp, name)))
{
ulong level;
sp_head *new_sp;
LEX_CSTRING returns= empty_clex_str;
/*
String buffer for RETURNS data type must have system charset;
64 -- size of "returns" column of mysql.proc.
*/
String retstr(64);
retstr.set_charset(sp->get_creation_ctx()->get_client_cs());
DBUG_PRINT("info", ("found: 0x%lx", (ulong)sp));
if (sp->m_first_free_instance)
{
DBUG_PRINT("info", ("first free: 0x%lx level: %lu flags %x",
(ulong)sp->m_first_free_instance,
sp->m_first_free_instance->m_recursion_level,
sp->m_first_free_instance->m_flags));
DBUG_ASSERT(!(sp->m_first_free_instance->m_flags & sp_head::IS_INVOKED));
if (sp->m_first_free_instance->m_recursion_level > recursion_depth(thd))
{
recursion_level_error(thd, sp);
DBUG_RETURN(0);
}
DBUG_RETURN(sp->m_first_free_instance);
}
/*
Actually depth could be +1 than the actual value in case a SP calls
SHOW CREATE PROCEDURE. Hence, the linked list could hold up to one more
instance.
*/
level= sp->m_last_cached_sp->m_recursion_level + 1;
if (level > recursion_depth(thd))
{
recursion_level_error(thd, sp);
DBUG_RETURN(0);
}
if (type() == TYPE_ENUM_FUNCTION)
{
sp_returns_type(thd, retstr, sp);
returns= retstr.lex_cstring();
}
if (db_load_routine(thd, name, &new_sp,
sp->m_sql_mode, sp->m_params, returns,
sp->m_body, sp->chistics(),
sp->m_definer,
sp->m_created, sp->m_modified,
sp->get_creation_ctx()) == SP_OK)
{
sp->m_last_cached_sp->m_next_cached_sp= new_sp;
new_sp->m_recursion_level= level;
new_sp->m_first_instance= sp;
sp->m_last_cached_sp= sp->m_first_free_instance= new_sp;
DBUG_PRINT("info", ("added level: 0x%lx, level: %lu, flags %x",
(ulong)new_sp, new_sp->m_recursion_level,
new_sp->m_flags));
DBUG_RETURN(new_sp);
}
DBUG_RETURN(0);
}
DBUG_RETURN(sp_clone_and_link_routine(thd, name, sp));
if (!cache_only)
{
if (db_find_routine(thd, name, &sp) == SP_OK)
{
sp_cache_insert(cp, sp);
DBUG_PRINT("info", ("added new: 0x%lx, level: %lu, flags %x",
(ulong)sp, sp->m_recursion_level,
sp->m_flags));
}
}
db_find_and_cache_routine(thd, name, &sp);
DBUG_RETURN(sp);
}
@ -2137,10 +2183,9 @@ int Sp_handler::sp_cache_routine(THD *thd,
DBUG_RETURN(SP_OK);
}
switch ((ret= db_find_routine(thd, name, sp)))
switch ((ret= db_find_and_cache_routine(thd, name, sp)))
{
case SP_OK:
sp_cache_insert(spc, *sp);
break;
case SP_KEY_NOT_FOUND:
ret= SP_OK;
@ -2243,14 +2288,8 @@ Sp_handler::show_create_sp(THD *thd, String *buf,
}
if (chistics.detistic)
buf->append(STRING_WITH_LEN(" DETERMINISTIC\n"));
if (chistics.suid == SP_IS_NOT_SUID)
buf->append(STRING_WITH_LEN(" SQL SECURITY INVOKER\n"));
if (chistics.comment.length)
{
buf->append(STRING_WITH_LEN(" COMMENT "));
append_unescaped(buf, chistics.comment.str, chistics.comment.length);
buf->append('\n');
}
append_suid(buf, chistics.suid);
append_comment(buf, chistics.comment);
buf->append(body);
thd->variables.sql_mode= old_sql_mode;
return false;

View file

@ -60,6 +60,9 @@ class Sp_handler
TABLE *table) const;
int db_find_routine(THD *thd, const Database_qualified_name *name,
sp_head **sphp) const;
int db_find_and_cache_routine(THD *thd,
const Database_qualified_name *name,
sp_head **sp) const;
int db_load_routine(THD *thd, const Database_qualified_name *name,
sp_head **sphp,
sql_mode_t sql_mode,
@ -73,7 +76,12 @@ class Sp_handler
int sp_drop_routine_internal(THD *thd,
const Database_qualified_name *name,
TABLE *table) const;
sp_head *sp_clone_and_link_routine(THD *thd,
const Database_qualified_name *name,
sp_head *sp) const;
public:
virtual ~Sp_handler() {}
static const Sp_handler *handler(enum enum_sql_command cmd);
static const Sp_handler *handler(stored_procedure_type type);
static const Sp_handler *handler(MDL_key::enum_mdl_namespace ns);

View file

@ -480,7 +480,7 @@ sp_name::sp_name(const MDL_key *key, char *qname_buff)
*/
bool
check_routine_name(LEX_CSTRING *ident)
check_routine_name(const LEX_CSTRING *ident)
{
DBUG_ASSERT(ident);
DBUG_ASSERT(ident->str);

View file

@ -125,7 +125,7 @@ public:
bool
check_routine_name(LEX_CSTRING *ident);
check_routine_name(const LEX_CSTRING *ident);
class sp_head :private Query_arena,
public Database_qualified_name

View file

@ -21,6 +21,10 @@
#include "gstream.h" // Gis_read_stream
#include "sql_string.h" // String
/* This is from item_func.h. Didn't want to #include the whole file. */
double my_double_round(double value, longlong dec, bool dec_unsigned,
bool truncate);
#ifdef HAVE_SPATIAL
/*
@ -250,6 +254,8 @@ static const uchar feature_type[]= "feature";
static const int feature_type_len= 7;
static const uchar feature_coll_type[]= "featurecollection";
static const int feature_coll_type_len= 17;
static const uchar bbox_keyname[]= "bbox";
static const int bbox_keyname_len= 4;
int Geometry::as_json(String *wkt, uint max_dec_digits, const char **end)
@ -258,7 +264,7 @@ int Geometry::as_json(String *wkt, uint max_dec_digits, const char **end)
if (wkt->reserve(4 + type_keyname_len + 2 + len + 2 + 2 +
coord_keyname_len + 4, 512))
return 1;
wkt->qs_append("{\"", 2);
wkt->qs_append("\"", 1);
wkt->qs_append((const char *) type_keyname, type_keyname_len);
wkt->qs_append("\": \"", 4);
wkt->qs_append(get_class_info()->m_geojson_name.str, len);
@ -269,10 +275,35 @@ int Geometry::as_json(String *wkt, uint max_dec_digits, const char **end)
wkt->qs_append((const char *) coord_keyname, coord_keyname_len);
wkt->qs_append("\": ", 3);
if (get_data_as_json(wkt, max_dec_digits, end) ||
wkt->reserve(1))
if (get_data_as_json(wkt, max_dec_digits, end))
return 1;
wkt->qs_append('}');
return 0;
}
int Geometry::bbox_as_json(String *wkt)
{
MBR mbr;
const char *end;
if (wkt->reserve(5 + bbox_keyname_len + (FLOATING_POINT_DECIMALS+2)*4, 512))
return 1;
wkt->qs_append("\"", 1);
wkt->qs_append((const char *) bbox_keyname, bbox_keyname_len);
wkt->qs_append("\": [", 4);
if (get_mbr(&mbr, &end))
return 1;
wkt->qs_append(mbr.xmin);
wkt->qs_append(", ", 2);
wkt->qs_append(mbr.ymin);
wkt->qs_append(", ", 2);
wkt->qs_append(mbr.xmax);
wkt->qs_append(", ", 2);
wkt->qs_append(mbr.ymax);
wkt->qs_append("]", 1);
return 0;
}
@ -339,7 +370,7 @@ Geometry *Geometry::create_from_wkb(Geometry_buffer *buffer,
Geometry *Geometry::create_from_json(Geometry_buffer *buffer,
json_engine_t *je, String *res)
json_engine_t *je, bool er_on_3D, String *res)
{
Class_info *ci= NULL;
const uchar *coord_start= NULL, *geom_start= NULL,
@ -514,14 +545,14 @@ create_geom:
result= (*ci->m_create_func)(buffer->data);
res->q_append((char) wkb_ndr);
res->q_append((uint32) result->get_class_info()->m_type_id);
if (result->init_from_json(je, res))
if (result->init_from_json(je, er_on_3D, res))
goto err_return;
return result;
handle_geometry_key:
json_scan_start(je, je->s.cs, geometry_start, je->s.str_end);
return create_from_json(buffer, je, res);
return create_from_json(buffer, je, er_on_3D, res);
err_return:
return NULL;
@ -670,6 +701,11 @@ static void append_json_point(String *txt, uint max_dec, const char *data)
{
double x,y;
get_point(&x, &y, data);
if (max_dec < FLOATING_POINT_DECIMALS)
{
x= my_double_round(x, max_dec, FALSE, FALSE);
y= my_double_round(y, max_dec, FALSE, FALSE);
}
txt->qs_append('[');
txt->qs_append(x);
txt->qs_append(", ", 2);
@ -780,7 +816,8 @@ uint Gis_point::init_from_wkb(const char *wkb, uint len,
}
static int read_point_from_json(json_engine_t *je, double *x, double *y)
static int read_point_from_json(json_engine_t *je, bool er_on_3D,
double *x, double *y)
{
int n_coord= 0, err;
double tmp, *d;
@ -803,14 +840,17 @@ static int read_point_from_json(json_engine_t *je, double *x, double *y)
n_coord++;
}
return 0;
if (n_coord <= 2 || !er_on_3D)
return 0;
je->s.error= Geometry::GEOJ_DIMENSION_NOT_SUPPORTED;
return 1;
bad_coordinates:
je->s.error= Geometry::GEOJ_INCORRECT_GEOJSON;
return 1;
}
bool Gis_point::init_from_json(json_engine_t *je, String *wkb)
bool Gis_point::init_from_json(json_engine_t *je, bool er_on_3D, String *wkb)
{
double x, y;
if (json_read_value(je))
@ -822,7 +862,7 @@ bool Gis_point::init_from_json(json_engine_t *je, String *wkb)
return TRUE;
}
if (read_point_from_json(je, &x, &y) ||
if (read_point_from_json(je, er_on_3D, &x, &y) ||
wkb->reserve(POINT_DATA_SIZE))
return TRUE;
@ -971,7 +1011,8 @@ uint Gis_line_string::init_from_wkb(const char *wkb, uint len,
}
bool Gis_line_string::init_from_json(json_engine_t *je, String *wkb)
bool Gis_line_string::init_from_json(json_engine_t *je, bool er_on_3D,
String *wkb)
{
uint32 n_points= 0;
uint32 np_pos= wkb->length();
@ -994,7 +1035,7 @@ bool Gis_line_string::init_from_json(json_engine_t *je, String *wkb)
{
DBUG_ASSERT(je->state == JST_VALUE);
if (p.init_from_json(je, wkb))
if (p.init_from_json(je, er_on_3D, wkb))
return TRUE;
n_points++;
}
@ -1364,7 +1405,7 @@ uint Gis_polygon::init_from_wkb(const char *wkb, uint len, wkbByteOrder bo,
}
bool Gis_polygon::init_from_json(json_engine_t *je, String *wkb)
bool Gis_polygon::init_from_json(json_engine_t *je, bool er_on_3D, String *wkb)
{
uint32 n_linear_rings= 0;
uint32 lr_pos= wkb->length();
@ -1389,7 +1430,7 @@ bool Gis_polygon::init_from_json(json_engine_t *je, String *wkb)
DBUG_ASSERT(je->state == JST_VALUE);
uint32 ls_pos=wkb->length();
if (ls.init_from_json(je, wkb))
if (ls.init_from_json(je, er_on_3D, wkb))
return TRUE;
ls.set_data_ptr(wkb->ptr() + ls_pos, wkb->length() - ls_pos);
if (ls.is_closed(&closed) || !closed)
@ -1855,7 +1896,8 @@ uint Gis_multi_point::init_from_wkb(const char *wkb, uint len, wkbByteOrder bo,
}
bool Gis_multi_point::init_from_json(json_engine_t *je, String *wkb)
bool Gis_multi_point::init_from_json(json_engine_t *je, bool er_on_3D,
String *wkb)
{
uint32 n_points= 0;
uint32 np_pos= wkb->length();
@ -1883,7 +1925,7 @@ bool Gis_multi_point::init_from_json(json_engine_t *je, String *wkb)
wkb->q_append((char) wkb_ndr);
wkb->q_append((uint32) wkb_point);
if (p.init_from_json(je, wkb))
if (p.init_from_json(je, er_on_3D, wkb))
return TRUE;
n_points++;
}
@ -2123,7 +2165,8 @@ uint Gis_multi_line_string::init_from_wkb(const char *wkb, uint len,
}
bool Gis_multi_line_string::init_from_json(json_engine_t *je, String *wkb)
bool Gis_multi_line_string::init_from_json(json_engine_t *je, bool er_on_3D,
String *wkb)
{
uint32 n_line_strings= 0;
uint32 ls_pos= wkb->length();
@ -2151,7 +2194,7 @@ bool Gis_multi_line_string::init_from_json(json_engine_t *je, String *wkb)
wkb->q_append((char) wkb_ndr);
wkb->q_append((uint32) wkb_linestring);
if (ls.init_from_json(je, wkb))
if (ls.init_from_json(je, er_on_3D, wkb))
return TRUE;
n_line_strings++;
@ -2511,7 +2554,8 @@ uint Gis_multi_polygon::init_from_opresult(String *bin,
}
bool Gis_multi_polygon::init_from_json(json_engine_t *je, String *wkb)
bool Gis_multi_polygon::init_from_json(json_engine_t *je, bool er_on_3D,
String *wkb)
{
uint32 n_polygons= 0;
int np_pos= wkb->length();
@ -2539,7 +2583,7 @@ bool Gis_multi_polygon::init_from_json(json_engine_t *je, String *wkb)
wkb->q_append((char) wkb_ndr);
wkb->q_append((uint32) wkb_polygon);
if (p.init_from_json(je, wkb))
if (p.init_from_json(je, er_on_3D, wkb))
return TRUE;
n_polygons++;
@ -2986,7 +3030,8 @@ uint Gis_geometry_collection::init_from_wkb(const char *wkb, uint len,
}
bool Gis_geometry_collection::init_from_json(json_engine_t *je, String *wkb)
bool Gis_geometry_collection::init_from_json(json_engine_t *je, bool er_on_3D,
String *wkb)
{
uint32 n_objects= 0;
uint32 no_pos= wkb->length();
@ -3012,7 +3057,7 @@ bool Gis_geometry_collection::init_from_json(json_engine_t *je, String *wkb)
DBUG_ASSERT(je->state == JST_VALUE);
if (!(g= create_from_json(&buffer, je, wkb)))
if (!(g= create_from_json(&buffer, je, er_on_3D, wkb)))
return TRUE;
*je= sav_je;
@ -3097,12 +3142,14 @@ bool Gis_geometry_collection::get_data_as_json(String *txt, uint max_dec_digits,
if (!(geom= create_by_typeid(&buffer, wkb_type)))
return 1;
geom->set_data_ptr(data, (uint) (m_data_end - data));
if (geom->as_json(txt, max_dec_digits, &data) ||
txt->append(STRING_WITH_LEN(", "), 512))
if (txt->append("{", 1) ||
geom->as_json(txt, max_dec_digits, &data) ||
txt->append(STRING_WITH_LEN("}, "), 512))
return 1;
}
txt->length(txt->length() - 2);
txt->qs_append(']');
if (txt->append("]", 1))
return 1;
*end= data;
return 0;

View file

@ -255,6 +255,7 @@ public:
GEOJ_INCORRECT_GEOJSON= 1,
GEOJ_TOO_FEW_POINTS= 2,
GEOJ_POLYGON_NOT_CLOSED= 3,
GEOJ_DIMENSION_NOT_SUPPORTED= 4,
};
@ -281,7 +282,8 @@ public:
virtual uint init_from_opresult(String *bin,
const char *opres, uint res_len)
{ return init_from_wkb(opres + 4, UINT_MAX32, wkb_ndr, bin) + 4; }
virtual bool init_from_json(json_engine_t *je, String *wkb) {return true;}
virtual bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb)
{ return true; }
virtual bool get_data_as_wkt(String *txt, const char **end) const=0;
virtual bool get_data_as_json(String *txt, uint max_dec_digits,
@ -315,12 +317,13 @@ public:
bool init_stream=1);
static Geometry *create_from_wkb(Geometry_buffer *buffer,
const char *wkb, uint32 len, String *res);
static Geometry *create_from_json(Geometry_buffer *buffer,
json_engine_t *je, String *res);
static Geometry *create_from_json(Geometry_buffer *buffer, json_engine_t *je,
bool er_on_3D, String *res);
static Geometry *create_from_opresult(Geometry_buffer *g_buf,
String *res, Gcalc_result_receiver &rr);
int as_wkt(String *wkt, const char **end);
int as_json(String *wkt, uint max_dec_digits, const char **end);
int bbox_as_json(String *wkt);
inline void set_data_ptr(const char *data, uint32 data_len)
{
@ -395,7 +398,7 @@ public:
uint32 get_data_size() const;
bool init_from_wkt(Gis_read_stream *trs, String *wkb);
uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res);
bool init_from_json(json_engine_t *je, String *wkb);
bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb);
bool get_data_as_wkt(String *txt, const char **end) const;
bool get_data_as_json(String *txt, uint max_dec_digits,
const char **end) const;
@ -450,7 +453,7 @@ public:
uint32 get_data_size() const;
bool init_from_wkt(Gis_read_stream *trs, String *wkb);
uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res);
bool init_from_json(json_engine_t *je, String *wkb);
bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb);
bool get_data_as_wkt(String *txt, const char **end) const;
bool get_data_as_json(String *txt, uint max_dec_digits,
const char **end) const;
@ -484,7 +487,7 @@ public:
bool init_from_wkt(Gis_read_stream *trs, String *wkb);
uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res);
uint init_from_opresult(String *bin, const char *opres, uint res_len);
bool init_from_json(json_engine_t *je, String *wkb);
bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb);
bool get_data_as_wkt(String *txt, const char **end) const;
bool get_data_as_json(String *txt, uint max_dec_digits,
const char **end) const;
@ -521,7 +524,7 @@ public:
bool init_from_wkt(Gis_read_stream *trs, String *wkb);
uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res);
uint init_from_opresult(String *bin, const char *opres, uint res_len);
bool init_from_json(json_engine_t *je, String *wkb);
bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb);
bool get_data_as_wkt(String *txt, const char **end) const;
bool get_data_as_json(String *txt, uint max_dec_digits,
const char **end) const;
@ -550,7 +553,7 @@ public:
bool init_from_wkt(Gis_read_stream *trs, String *wkb);
uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res);
uint init_from_opresult(String *bin, const char *opres, uint res_len);
bool init_from_json(json_engine_t *je, String *wkb);
bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb);
bool get_data_as_wkt(String *txt, const char **end) const;
bool get_data_as_json(String *txt, uint max_dec_digits,
const char **end) const;
@ -580,7 +583,7 @@ public:
uint32 get_data_size() const;
bool init_from_wkt(Gis_read_stream *trs, String *wkb);
uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res);
bool init_from_json(json_engine_t *je, String *wkb);
bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb);
bool get_data_as_wkt(String *txt, const char **end) const;
bool get_data_as_json(String *txt, uint max_dec_digits,
const char **end) const;
@ -612,7 +615,7 @@ public:
bool init_from_wkt(Gis_read_stream *trs, String *wkb);
uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res);
uint init_from_opresult(String *bin, const char *opres, uint res_len);
bool init_from_json(json_engine_t *je, String *wkb);
bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb);
bool get_data_as_wkt(String *txt, const char **end) const;
bool get_data_as_json(String *txt, uint max_dec_digits,
const char **end) const;

View file

@ -362,8 +362,8 @@ static bool show_database_privileges(THD *, const char *, const char *,
char *, size_t);
static bool show_table_and_column_privileges(THD *, const char *, const char *,
char *, size_t);
static int show_routine_grants(THD *, const char *, const char *, HASH *,
const char *, int, char *, int);
static int show_routine_grants(THD *, const char *, const char *,
const Sp_handler *sph, char *, int);
class Grant_tables;
class User_table;
@ -8484,12 +8484,12 @@ static bool print_grants_for_role(THD *thd, ACL_ROLE * role)
if (show_table_and_column_privileges(thd, role->user.str, "", buff, sizeof(buff)))
return TRUE;
if (show_routine_grants(thd, role->user.str, "", &proc_priv_hash,
STRING_WITH_LEN("PROCEDURE"), buff, sizeof(buff)))
if (show_routine_grants(thd, role->user.str, "", &sp_handler_procedure,
buff, sizeof(buff)))
return TRUE;
if (show_routine_grants(thd, role->user.str, "", &func_priv_hash,
STRING_WITH_LEN("FUNCTION"), buff, sizeof(buff)))
if (show_routine_grants(thd, role->user.str, "", &sp_handler_function,
buff, sizeof(buff)))
return TRUE;
return FALSE;
@ -8709,12 +8709,12 @@ bool mysql_show_grants(THD *thd, LEX_USER *lex_user)
if (show_table_and_column_privileges(thd, username, hostname, buff, sizeof(buff)))
goto end;
if (show_routine_grants(thd, username, hostname, &proc_priv_hash,
STRING_WITH_LEN("PROCEDURE"), buff, sizeof(buff)))
if (show_routine_grants(thd, username, hostname, &sp_handler_procedure,
buff, sizeof(buff)))
goto end;
if (show_routine_grants(thd, username, hostname, &func_priv_hash,
STRING_WITH_LEN("FUNCTION"), buff, sizeof(buff)))
if (show_routine_grants(thd, username, hostname, &sp_handler_function,
buff, sizeof(buff)))
goto end;
if (show_proxy_grants(thd, username, hostname, buff, sizeof(buff)))
@ -9094,12 +9094,13 @@ static bool show_table_and_column_privileges(THD *thd, const char *username,
static int show_routine_grants(THD* thd,
const char *username, const char *hostname,
HASH *hash, const char *type, int typelen,
const Sp_handler *sph,
char *buff, int buffsize)
{
uint counter, index;
int error= 0;
Protocol *protocol= thd->protocol;
HASH *hash= sph->get_priv_hash();
/* Add routine access */
for (index=0 ; index < hash->records ; index++)
{
@ -9153,7 +9154,7 @@ static int show_routine_grants(THD* thd,
}
}
global.append(STRING_WITH_LEN(" ON "));
global.append(type,typelen);
global.append(sph->type_lex_cstring());
global.append(' ');
append_identifier(thd, &global, grant_proc->db,
strlen(grant_proc->db));

View file

@ -5798,7 +5798,7 @@ bool LEX::sp_block_finalize(THD *thd, const Lex_spblock_st spblock,
}
sp_name *LEX::make_sp_name(THD *thd, LEX_CSTRING *name)
sp_name *LEX::make_sp_name(THD *thd, const LEX_CSTRING *name)
{
sp_name *res;
LEX_CSTRING db;
@ -5810,16 +5810,20 @@ sp_name *LEX::make_sp_name(THD *thd, LEX_CSTRING *name)
}
sp_name *LEX::make_sp_name(THD *thd, LEX_CSTRING *name1, LEX_CSTRING *name2)
sp_name *LEX::make_sp_name(THD *thd, const LEX_CSTRING *name1,
const LEX_CSTRING *name2)
{
sp_name *res;
if (!name1->str || check_db_name((LEX_STRING*) name1))
LEX_CSTRING norm_name1;
if (!name1->str ||
!thd->make_lex_string(&norm_name1, name1->str, name1->length) ||
check_db_name((LEX_STRING *) &norm_name1))
{
my_error(ER_WRONG_DB_NAME, MYF(0), name1->str);
return NULL;
}
if (check_routine_name(name2) ||
(!(res= new (thd->mem_root) sp_name(name1, name2, true))))
(!(res= new (thd->mem_root) sp_name(&norm_name1, name2, true))))
return NULL;
return res;
}
@ -6570,6 +6574,18 @@ Item *LEX::create_item_limit(THD *thd,
}
bool LEX::set_user_variable(THD *thd, const LEX_CSTRING *name, Item *val)
{
Item_func_set_user_var *item;
set_var_user *var;
if (!(item= new (thd->mem_root) Item_func_set_user_var(thd, name, val)) ||
!(var= new (thd->mem_root) set_var_user(item)))
return true;
var_list.push_back(var, thd->mem_root);
return false;
}
/*
Perform assignment for a trigger, a system variable, or an SP variable.
"variable" be previously set by init_internal_variable(variable, name).
@ -7131,3 +7147,42 @@ bool LEX::add_create_view(THD *thd, DDL_options_st ddl,
return true;
return create_or_alter_view_finalize(thd, table_ident);
}
bool LEX::call_statement_start(THD *thd, sp_name *name)
{
sql_command= SQLCOM_CALL;
spname= name;
value_list.empty();
sp_handler_procedure.add_used_routine(this, thd, name);
return false;
}
bool LEX::call_statement_start(THD *thd, const LEX_CSTRING *name)
{
sp_name *spname= make_sp_name(thd, name);
return !spname || call_statement_start(thd, spname);
}
bool LEX::call_statement_start(THD *thd, const LEX_CSTRING *name1,
const LEX_CSTRING *name2)
{
sp_name *spname= make_sp_name(thd, name1, name2);
return !spname || call_statement_start(thd, spname);
}
bool LEX::add_grant_command(THD *thd, enum_sql_command sql_command_arg,
stored_procedure_type type_arg)
{
if (columns.elements)
{
thd->parse_error();
return true;
}
sql_command= sql_command_arg,
type= type_arg;
return false;
}

View file

@ -3153,9 +3153,11 @@ public:
bool set_trigger_new_row(LEX_CSTRING *name, Item *val);
bool set_system_variable(struct sys_var_with_base *tmp,
enum enum_var_type var_type, Item *val);
bool set_user_variable(THD *thd, const LEX_CSTRING *name, Item *val);
void set_stmt_init();
sp_name *make_sp_name(THD *thd, LEX_CSTRING *name);
sp_name *make_sp_name(THD *thd, LEX_CSTRING *name1, LEX_CSTRING *name2);
sp_name *make_sp_name(THD *thd, const LEX_CSTRING *name);
sp_name *make_sp_name(THD *thd, const LEX_CSTRING *name1,
const LEX_CSTRING *name2);
sp_head *make_sp_head(THD *thd, const sp_name *name, const Sp_handler *sph);
sp_head *make_sp_head_no_recursive(THD *thd, const sp_name *name,
const Sp_handler *sph)
@ -3173,6 +3175,10 @@ public:
return NULL;
return make_sp_head_no_recursive(thd, name, sph);
}
bool call_statement_start(THD *thd, sp_name *name);
bool call_statement_start(THD *thd, const LEX_CSTRING *name);
bool call_statement_start(THD *thd, const LEX_CSTRING *name1,
const LEX_CSTRING *name2);
bool init_internal_variable(struct sys_var_with_base *variable,
const LEX_CSTRING *name);
bool init_internal_variable(struct sys_var_with_base *variable,
@ -3651,6 +3657,9 @@ public:
bool add_create_view(THD *thd, DDL_options_st ddl,
uint16 algorithm, enum_view_suid suid,
Table_ident *table_ident);
bool add_grant_command(THD *thd, enum_sql_command sql_command_arg,
stored_procedure_type type_arg);
};

View file

@ -32,7 +32,7 @@ enum err_msgs_index
{
en_US= 0, cs_CZ, da_DK, nl_NL, et_EE, fr_FR, de_DE, el_GR, hu_HU, it_IT,
ja_JP, ko_KR, no_NO, nn_NO, pl_PL, pt_PT, ro_RO, ru_RU, sr_RS, sk_SK,
es_ES, sv_SE, uk_UA
es_ES, sv_SE, uk_UA, hi_IN
} ERR_MSGS_INDEX;
@ -61,6 +61,7 @@ MY_LOCALE_ERRMSGS global_errmsgs[]=
{"spanish", NULL},
{"swedish", NULL},
{"ukrainian", NULL},
{"hindi", NULL},
{NULL, NULL}
};
@ -889,7 +890,7 @@ MY_LOCALE my_locale_hi_IN
'.', /* decimal point hi_IN */
',', /* thousands_sep hi_IN */
"\x03", /* grouping hi_IN */
&global_errmsgs[en_US]
&global_errmsgs[hi_IN]
);
/***** LOCALE END hi_IN *****/

View file

@ -124,14 +124,6 @@ static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length,
@{
*/
/* Used in error handling only */
#define SP_COM_STRING(LP) \
((LP)->sql_command == SQLCOM_CREATE_SPFUNCTION || \
(LP)->sql_command == SQLCOM_ALTER_FUNCTION || \
(LP)->sql_command == SQLCOM_SHOW_CREATE_FUNC || \
(LP)->sql_command == SQLCOM_DROP_FUNCTION ? \
"FUNCTION" : "PROCEDURE")
static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables);
static void sql_kill(THD *thd, longlong id, killed_state state, killed_type type);
static void sql_kill_user(THD *thd, LEX_USER *user, killed_state state);
@ -5007,7 +4999,8 @@ end_with_restore_list:
goto error;
if (!(res= sql_set_variables(thd, lex_var_list, true)))
{
my_ok(thd);
if (!thd->is_error())
my_ok(thd);
}
else
{
@ -5841,11 +5834,11 @@ end_with_restore_list:
break;
case SP_KEY_NOT_FOUND:
my_error(ER_SP_DOES_NOT_EXIST, MYF(0),
SP_COM_STRING(lex), ErrConvDQName(lex->spname).ptr());
sph->type_str(), ErrConvDQName(lex->spname).ptr());
goto error;
default:
my_error(ER_SP_CANT_ALTER, MYF(0),
SP_COM_STRING(lex), ErrConvDQName(lex->spname).ptr());
sph->type_str(), ErrConvDQName(lex->spname).ptr());
goto error;
}
break;
@ -5950,18 +5943,18 @@ end_with_restore_list:
res= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_SP_DOES_NOT_EXIST, ER_THD(thd, ER_SP_DOES_NOT_EXIST),
SP_COM_STRING(lex),
sph->type_str(),
ErrConvDQName(lex->spname).ptr());
if (!res)
my_ok(thd);
break;
}
my_error(ER_SP_DOES_NOT_EXIST, MYF(0),
SP_COM_STRING(lex), ErrConvDQName(lex->spname).ptr());
sph->type_str(), ErrConvDQName(lex->spname).ptr());
goto error;
default:
my_error(ER_SP_DROP_FAILED, MYF(0),
SP_COM_STRING(lex), ErrConvDQName(lex->spname).ptr());
sph->type_str(), ErrConvDQName(lex->spname).ptr());
goto error;
}
break;
@ -5992,7 +5985,7 @@ end_with_restore_list:
{
/* We don't distinguish between errors for now */
my_error(ER_SP_DOES_NOT_EXIST, MYF(0),
SP_COM_STRING(lex), lex->spname->m_name.str);
sph->type_str(), lex->spname->m_name.str);
goto error;
}
break;
@ -9677,7 +9670,7 @@ LEX_USER *create_definer(THD *thd, LEX_CSTRING *user_name,
The function is not used in existing code but can be useful later?
*/
bool check_string_byte_length(LEX_CSTRING *str, uint err_msg,
bool check_string_byte_length(const LEX_CSTRING *str, uint err_msg,
uint max_byte_length)
{
if (str->length <= max_byte_length)
@ -9707,7 +9700,7 @@ bool check_string_byte_length(LEX_CSTRING *str, uint err_msg,
*/
bool check_string_char_length(LEX_CSTRING *str, uint err_msg,
bool check_string_char_length(const LEX_CSTRING *str, uint err_msg,
uint max_char_length, CHARSET_INFO *cs,
bool no_error)
{
@ -9726,7 +9719,7 @@ bool check_string_char_length(LEX_CSTRING *str, uint err_msg,
}
bool check_ident_length(LEX_CSTRING *ident)
bool check_ident_length(const LEX_CSTRING *ident)
{
if (check_string_char_length(ident, 0, NAME_CHAR_LEN, system_charset_info, 1))
{

View file

@ -73,12 +73,12 @@ LEX_USER *create_default_definer(THD *thd, bool role);
LEX_USER *create_definer(THD *thd, LEX_CSTRING *user_name, LEX_CSTRING *host_name);
LEX_USER *get_current_user(THD *thd, LEX_USER *user, bool lock=true);
bool sp_process_definer(THD *thd);
bool check_string_byte_length(LEX_CSTRING *str, uint err_msg,
bool check_string_byte_length(const LEX_CSTRING *str, uint err_msg,
uint max_byte_length);
bool check_string_char_length(LEX_CSTRING *str, uint err_msg,
bool check_string_char_length(const LEX_CSTRING *str, uint err_msg,
uint max_char_length, CHARSET_INFO *cs,
bool no_error);
bool check_ident_length(LEX_CSTRING *ident);
bool check_ident_length(const LEX_CSTRING *ident);
CHARSET_INFO* merge_charset_and_collation(CHARSET_INFO *cs, CHARSET_INFO *cl);
CHARSET_INFO *find_bin_collation(CHARSET_INFO *cs);
bool check_host_name(LEX_CSTRING *str);

View file

@ -8634,7 +8634,7 @@ JOIN_TAB *first_top_level_tab(JOIN *join, enum enum_with_const_tables const_tbls
JOIN_TAB *tab= join->join_tab;
if (const_tbls == WITHOUT_CONST_TABLES)
{
if (join->const_tables == join->table_count)
if (join->const_tables == join->table_count || !tab)
return NULL;
tab += join->const_tables;
}
@ -8657,6 +8657,10 @@ JOIN_TAB *first_linear_tab(JOIN *join,
enum enum_with_const_tables const_tbls)
{
JOIN_TAB *first= join->join_tab;
if (!first)
return NULL;
if (const_tbls == WITHOUT_CONST_TABLES)
first+= join->const_tables;
@ -8743,7 +8747,7 @@ JOIN_TAB *first_depth_first_tab(JOIN* join)
{
JOIN_TAB* tab;
/* This means we're starting the enumeration */
if (join->const_tables == join->top_join_tab_count)
if (join->const_tables == join->top_join_tab_count || !join->join_tab)
return NULL;
tab= join->join_tab + join->const_tables;

View file

@ -341,7 +341,7 @@ bool sequence_insert(THD *thd, LEX *lex, TABLE_LIST *table_list)
/* Create a SQUENCE object */
SEQUENCE::SEQUENCE() :all_values_used(0), initialized(SEQ_UNINTIALIZED), table(0)
SEQUENCE::SEQUENCE() :all_values_used(0), initialized(SEQ_UNINTIALIZED)
{
mysql_rwlock_init(key_LOCK_SEQUENCE, &mutex);
}
@ -389,7 +389,7 @@ void SEQUENCE::read_unlock(TABLE *table)
This is called from ha_open() when the table is not yet locked
*/
int SEQUENCE::read_initial_values(TABLE *table_arg)
int SEQUENCE::read_initial_values(TABLE *table)
{
int error= 0;
enum thr_lock_type save_lock_type;
@ -398,7 +398,6 @@ int SEQUENCE::read_initial_values(TABLE *table_arg)
if (likely(initialized != SEQ_UNINTIALIZED))
DBUG_RETURN(0);
table= table_arg;
write_lock(table);
if (likely(initialized == SEQ_UNINTIALIZED))
{
@ -438,7 +437,8 @@ int SEQUENCE::read_initial_values(TABLE *table_arg)
thd->mdl_context.release_lock(mdl_request.ticket);
DBUG_RETURN(HA_ERR_LOCK_WAIT_TIMEOUT);
}
if (!(error= read_stored_values()))
DBUG_ASSERT(table->reginfo.lock_type == TL_READ);
if (!(error= read_stored_values(table)))
initialized= SEQ_READY_TO_USE;
mysql_unlock_tables(thd, lock, 0);
if (mdl_lock_used)
@ -467,7 +467,7 @@ int SEQUENCE::read_initial_values(TABLE *table_arg)
Called once from when table is opened
*/
int SEQUENCE::read_stored_values()
int SEQUENCE::read_stored_values(TABLE *table)
{
int error;
my_bitmap_map *save_read_set;
@ -595,6 +595,12 @@ int sequence_definition::write(TABLE *table, bool all_fields)
else
table->rpl_write_set= &table->s->all_set;
/*
The following is needed to fix comparison of rows in
ha_update_first_row() for InnoDB
*/
memcpy(table->record[1],table->s->default_values, table->s->reclength);
/* Update table */
save_write_set= table->write_set;
save_read_set= table->read_set;
@ -621,7 +627,7 @@ int sequence_definition::write(TABLE *table, bool all_fields)
push_warning_printf(WARN_LEVEL_WARN) has been called
 @retval 0 Next number or error. Check error variable
@retval 0 Next number or error. Check error variable
# Next sequence number
NOTES:
@ -755,7 +761,7 @@ void SEQUENCE_LAST_VALUE::set_version(TABLE *table)
@param in next_round Round for 'next_value' (in cace of cycles)
@param in is_used 1 if next_val is already used
 @retval 0 ok, value adjusted
@retval 0 ok, value adjusted
1 value was less than current value or
error when storing value

View file

@ -93,7 +93,7 @@ public:
SEQUENCE();
~SEQUENCE();
int read_initial_values(TABLE *table);
int read_stored_values();
int read_stored_values(TABLE *table);
void write_lock(TABLE *table);
void write_unlock(TABLE *table);
void read_lock(TABLE *table);
@ -132,7 +132,6 @@ public:
seq_init initialized;
private:
TABLE *table;
mysql_rwlock_t mutex;
};

View file

@ -3041,12 +3041,8 @@ sp_suid:
call:
CALL_SYM sp_name
{
LEX *lex = Lex;
lex->sql_command= SQLCOM_CALL;
lex->spname= $2;
lex->value_list.empty();
sp_handler_procedure.add_used_routine(lex, thd, $2);
if (Lex->call_statement_start(thd, $2))
MYSQL_YYABORT;
}
opt_sp_cparam_list {}
;
@ -15174,14 +15170,8 @@ option_value_no_option_type:
}
| '@' ident_or_text equal expr
{
Item_func_set_user_var *item;
item= new (thd->mem_root) Item_func_set_user_var(thd, &$2, $4);
if (item == NULL)
if (Lex->set_user_variable(thd, &$2, $4))
MYSQL_YYABORT;
set_var_user *var= new (thd->mem_root) set_var_user(item);
if (var == NULL)
MYSQL_YYABORT;
Lex->var_list.push_back(var, thd->mem_root);
}
| '@' '@' opt_var_ident_type internal_variable_name equal set_expr_or_default
{
@ -15603,25 +15593,13 @@ revoke_command:
}
| grant_privileges ON FUNCTION_SYM grant_ident FROM user_and_role_list
{
LEX *lex= Lex;
if (lex->columns.elements)
{
thd->parse_error();
if (Lex->add_grant_command(thd, SQLCOM_REVOKE, TYPE_ENUM_FUNCTION))
MYSQL_YYABORT;
}
lex->sql_command= SQLCOM_REVOKE;
lex->type= TYPE_ENUM_FUNCTION;
}
| grant_privileges ON PROCEDURE_SYM grant_ident FROM user_and_role_list
{
LEX *lex= Lex;
if (lex->columns.elements)
{
thd->parse_error();
if (Lex->add_grant_command(thd, SQLCOM_REVOKE, TYPE_ENUM_PROCEDURE))
MYSQL_YYABORT;
}
lex->sql_command= SQLCOM_REVOKE;
lex->type= TYPE_ENUM_PROCEDURE;
}
| ALL opt_privileges ',' GRANT OPTION FROM user_and_role_list
{
@ -15665,26 +15643,14 @@ grant_command:
| grant_privileges ON FUNCTION_SYM grant_ident TO_SYM grant_list
opt_require_clause opt_grant_options
{
LEX *lex= Lex;
if (lex->columns.elements)
{
thd->parse_error();
if (Lex->add_grant_command(thd, SQLCOM_GRANT, TYPE_ENUM_FUNCTION))
MYSQL_YYABORT;
}
lex->sql_command= SQLCOM_GRANT;
lex->type= TYPE_ENUM_FUNCTION;
}
| grant_privileges ON PROCEDURE_SYM grant_ident TO_SYM grant_list
opt_require_clause opt_grant_options
{
LEX *lex= Lex;
if (lex->columns.elements)
{
thd->parse_error();
if (Lex->add_grant_command(thd, SQLCOM_GRANT, TYPE_ENUM_PROCEDURE))
MYSQL_YYABORT;
}
lex->sql_command= SQLCOM_GRANT;
lex->type= TYPE_ENUM_PROCEDURE;
}
| PROXY_SYM ON user TO_SYM grant_list opt_grant_option
{

View file

@ -2487,12 +2487,8 @@ sp_suid:
call:
CALL_SYM sp_name
{
LEX *lex = Lex;
lex->sql_command= SQLCOM_CALL;
lex->spname= $2;
lex->value_list.empty();
sp_handler_procedure.add_used_routine(lex, thd, $2);
if (Lex->call_statement_start(thd, $2))
MYSQL_YYABORT;
}
opt_sp_cparam_list {}
;
@ -3375,22 +3371,14 @@ sp_statement:
| ident_directly_assignable
{
// Direct procedure call (without the CALL keyword)
LEX *lex = Lex;
if (!(lex->spname= lex->make_sp_name(thd, &$1)))
if (Lex->call_statement_start(thd, &$1))
MYSQL_YYABORT;
lex->sql_command= SQLCOM_CALL;
lex->value_list.empty();
sp_handler_procedure.add_used_routine(lex, thd, lex->spname);
}
opt_sp_cparam_list
| ident_directly_assignable '.' ident
{
LEX *lex = Lex;
if (!(lex->spname= lex->make_sp_name(thd, &$1, &$3)))
if (Lex->call_statement_start(thd, &$1, &$3))
MYSQL_YYABORT;
lex->sql_command= SQLCOM_CALL;
lex->value_list.empty();
sp_handler_procedure.add_used_routine(lex, thd, lex->spname);
}
opt_sp_cparam_list
;
@ -15387,14 +15375,8 @@ option_value_no_option_type:
}
| '@' ident_or_text equal expr
{
Item_func_set_user_var *item;
item= new (thd->mem_root) Item_func_set_user_var(thd, &$2, $4);
if (item == NULL)
if (Lex->set_user_variable(thd, &$2, $4))
MYSQL_YYABORT;
set_var_user *var= new (thd->mem_root) set_var_user(item);
if (var == NULL)
MYSQL_YYABORT;
Lex->var_list.push_back(var, thd->mem_root);
}
| '@' '@' opt_var_ident_type internal_variable_name equal set_expr_or_default
{
@ -15840,25 +15822,13 @@ revoke_command:
}
| grant_privileges ON FUNCTION_SYM grant_ident FROM user_and_role_list
{
LEX *lex= Lex;
if (lex->columns.elements)
{
thd->parse_error();
if (Lex->add_grant_command(thd, SQLCOM_REVOKE, TYPE_ENUM_FUNCTION))
MYSQL_YYABORT;
}
lex->sql_command= SQLCOM_REVOKE;
lex->type= TYPE_ENUM_FUNCTION;
}
| grant_privileges ON PROCEDURE_SYM grant_ident FROM user_and_role_list
{
LEX *lex= Lex;
if (lex->columns.elements)
{
thd->parse_error();
if (Lex->add_grant_command(thd, SQLCOM_REVOKE, TYPE_ENUM_PROCEDURE))
MYSQL_YYABORT;
}
lex->sql_command= SQLCOM_REVOKE;
lex->type= TYPE_ENUM_PROCEDURE;
}
| ALL opt_privileges ',' GRANT OPTION FROM user_and_role_list
{
@ -15902,26 +15872,14 @@ grant_command:
| grant_privileges ON FUNCTION_SYM grant_ident TO_SYM grant_list
opt_require_clause opt_grant_options
{
LEX *lex= Lex;
if (lex->columns.elements)
{
thd->parse_error();
if (Lex->add_grant_command(thd, SQLCOM_GRANT, TYPE_ENUM_FUNCTION))
MYSQL_YYABORT;
}
lex->sql_command= SQLCOM_GRANT;
lex->type= TYPE_ENUM_FUNCTION;
}
| grant_privileges ON PROCEDURE_SYM grant_ident TO_SYM grant_list
opt_require_clause opt_grant_options
{
LEX *lex= Lex;
if (lex->columns.elements)
{
thd->parse_error();
if (Lex->add_grant_command(thd, SQLCOM_GRANT, TYPE_ENUM_PROCEDURE))
MYSQL_YYABORT;
}
lex->sql_command= SQLCOM_GRANT;
lex->type= TYPE_ENUM_PROCEDURE;
}
| PROXY_SYM ON user TO_SYM grant_list opt_grant_option
{

View file

@ -31,16 +31,17 @@ The database buffer buf_pool
Created 11/5/1995 Heikki Tuuri
*******************************************************/
#include "ha_prototypes.h"
#include "univ.i"
#include "mtr0types.h"
#include "mach0data.h"
#include "page0size.h"
#include "buf0buf.h"
#include "os0api.h"
#include <string.h>
#ifdef UNIV_NONINL
#include "buf0buf.ic"
#endif
#ifdef UNIV_INNOCHECKSUM
#include "string.h"
#include "mach0data.h"
#endif /* UNIV_INNOCHECKSUM */
#ifndef UNIV_INNOCHECKSUM
#include "mem0mem.h"
#include "btr0btr.h"
@ -64,7 +65,6 @@ Created 11/5/1995 Heikki Tuuri
#include "fsp0sysspace.h"
#endif /* !UNIV_INNOCHECKSUM */
#include "page0zip.h"
#include "buf0checksum.h"
#include "sync0sync.h"
#include "buf0dump.h"
#include "ut0new.h"
@ -645,10 +645,10 @@ buf_page_is_checksum_valid_crc32(
#ifdef UNIV_INNOCHECKSUM
if (log_file
&& srv_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_STRICT_CRC32) {
fprintf(log_file, "page::%lu;"
fprintf(log_file, "page::%llu;"
" crc32 calculated = %u;"
" recorded checksum field1 = %lu recorded"
" checksum field2 =%lu\n", cur_page_num,
" recorded checksum field1 = " ULINTPF " recorded"
" checksum field2 =" ULINTPF "\n", cur_page_num,
crc32, checksum_field1, checksum_field2);
}
#endif /* UNIV_INNOCHECKSUM */
@ -702,33 +702,34 @@ buf_page_is_checksum_valid_innodb(
#ifdef UNIV_INNOCHECKSUM
if (log_file
&& srv_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_INNODB) {
fprintf(log_file, "page::%lu;"
fprintf(log_file, "page::%llu;"
" old style: calculated ="
" %lu; recorded = %lu\n",
" " ULINTPF "; recorded = " ULINTPF "\n",
cur_page_num, old_checksum,
checksum_field2);
fprintf(log_file, "page::%lu;"
fprintf(log_file, "page::%llu;"
" new style: calculated ="
" %lu; crc32 = %u; recorded = %lu\n",
" " ULINTPF "; crc32 = %u; recorded = " ULINTPF "\n",
cur_page_num, new_checksum,
buf_calc_page_crc32(read_buf), checksum_field1);
}
if (log_file
&& srv_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB) {
fprintf(log_file, "page::%lu;"
fprintf(log_file, "page::%llu;"
" old style: calculated ="
" %lu; recorded checksum = %lu\n",
" " ULINTPF "; recorded checksum = " ULINTPF "\n",
cur_page_num, old_checksum,
checksum_field2);
fprintf(log_file, "page::%lu;"
fprintf(log_file, "page::%llu;"
" new style: calculated ="
" %lu; recorded checksum = %lu\n",
" " ULINTPF "; recorded checksum = " ULINTPF "\n",
cur_page_num, new_checksum,
checksum_field1);
}
#endif /* UNIV_INNOCHECKSUM */
if (checksum_field2 != mach_read_from_4(read_buf + FIL_PAGE_LSN)
&& checksum_field2 != old_checksum) {
DBUG_LOG("checksum",
@ -788,9 +789,9 @@ buf_page_is_checksum_valid_none(
if (log_file
&& srv_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_STRICT_NONE) {
fprintf(log_file,
"page::%lu; none checksum: calculated"
" = %lu; recorded checksum_field1 = %lu"
" recorded checksum_field2 = %lu\n",
"page::%llu; none checksum: calculated"
" = " ULINTPF "; recorded checksum_field1 = " ULINTPF
" recorded checksum_field2 = " ULINTPF "\n",
cur_page_num, BUF_NO_CHECKSUM_MAGIC,
checksum_field1, checksum_field2);
}
@ -811,16 +812,18 @@ buf_page_is_corrupted(
bool check_lsn,
const byte* read_buf,
const page_size_t& page_size,
#ifndef UNIV_INNOCHECKSUM
const fil_space_t* space)
#else
const void* space)
#endif
{
ulint checksum_field1;
ulint checksum_field2;
size_t checksum_field1 = 0;
size_t checksum_field2 = 0;
#ifndef UNIV_INNOCHECKSUM
DBUG_EXECUTE_IF("buf_page_import_corrupt_failure", return(true); );
ulint page_type = mach_read_from_2(
read_buf + FIL_PAGE_TYPE);
#endif
ulint page_type = mach_read_from_2(read_buf + FIL_PAGE_TYPE);
/* We can trust page type if page compression is set on tablespace
flags because page compression flag means file must have been
@ -833,15 +836,12 @@ buf_page_is_corrupted(
decompressed at this stage). */
if ((page_type == FIL_PAGE_PAGE_COMPRESSED ||
page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED)
&& space && FSP_FLAGS_HAS_PAGE_COMPRESSION(space->flags)) {
return(false);
}
#else
if (mach_read_from_4(read_buf+FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION) != 0
|| mach_read_from_2(read_buf+FIL_PAGE_TYPE) == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED) {
return(false);
}
#ifndef UNIV_INNOCHECKSUM
&& space && FSP_FLAGS_HAS_PAGE_COMPRESSION(space->flags)
#endif
) {
return(false);
}
if (!page_size.is_compressed()
&& memcmp(read_buf + FIL_PAGE_LSN + 4,
@ -850,14 +850,13 @@ buf_page_is_corrupted(
/* Stored log sequence numbers at the start and the end
of page do not match */
#ifndef UNIV_INNOCHECKSUM
ib::info() << "Log sequence number at the start "
<< mach_read_from_4(read_buf + FIL_PAGE_LSN + 4)
<< " and the end "
<< mach_read_from_4(read_buf + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM + 4)
<< " do not match";
#endif
#endif /* UNIV_INNOCHECKSUM */
return(true);
}
@ -920,10 +919,9 @@ buf_page_is_corrupted(
&& *reinterpret_cast<const ib_uint64_t*>(
read_buf + FIL_PAGE_LSN) == 0) {
ulint i;
/* make sure that the page is really empty */
ulint i;
for (i = 0; i < page_size.logical(); ++i) {
/* The FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID has been
@ -944,7 +942,7 @@ buf_page_is_corrupted(
#ifdef UNIV_INNOCHECKSUM
if (i >= page_size.logical()) {
if (log_file) {
fprintf(log_file, "Page::%lu"
fprintf(log_file, "Page::%llu"
" is empty and uncorrupted\n",
cur_page_num);
}
@ -978,31 +976,34 @@ buf_page_is_corrupted(
if (buf_page_is_checksum_valid_none(read_buf,
checksum_field1, checksum_field2)) {
#ifndef UNIV_INNOCHECKSUM
if (curr_algo
== SRV_CHECKSUM_ALGORITHM_STRICT_CRC32) {
#ifndef UNIV_INNOCHECKSUM
page_warn_strict_checksum(
curr_algo,
SRV_CHECKSUM_ALGORITHM_NONE,
page_id);
#endif /* !UNIV_INNOCHECKSUM */
}
#else /* !UNIV_INNOCHECKSUM */
#ifdef UNIV_INNOCHECKSUM
if (log_file) {
fprintf(log_file, "page::%lu;"
fprintf(log_file, "page::%llu;"
" old style: calculated = %u;"
" recorded = " ULINTPF "\n",
" recorded = " ULINTPF ";\n",
cur_page_num,
buf_calc_page_old_checksum(read_buf),
checksum_field2);
fprintf(log_file, "page::%lu;"
fprintf(log_file, "page::%llu;"
" new style: calculated = %u;"
" crc32 = %u; recorded = " ULINTPF "\n",
" crc32 = %u; recorded = " ULINTPF ";\n",
cur_page_num,
buf_calc_page_new_checksum(read_buf),
buf_calc_page_crc32(read_buf),
checksum_field1);
}
#endif /* !UNIV_INNOCHECKSUM */
#endif /* UNIV_INNOCHECKSUM */
return(false);
}
@ -1022,15 +1023,16 @@ buf_page_is_corrupted(
if (buf_page_is_checksum_valid_innodb(read_buf,
checksum_field1, checksum_field2)) {
#ifndef UNIV_INNOCHECKSUM
if (curr_algo
== SRV_CHECKSUM_ALGORITHM_STRICT_CRC32) {
#ifndef UNIV_INNOCHECKSUM
page_warn_strict_checksum(
curr_algo,
SRV_CHECKSUM_ALGORITHM_INNODB,
page_id);
#endif
}
#endif /* !UNIV_INNOCHECKSUM */
return(false);
}
@ -1038,13 +1040,13 @@ buf_page_is_corrupted(
if (!legacy_checksum_checked && buf_page_is_checksum_valid_crc32(
read_buf, checksum_field1, checksum_field2, true)) {
legacy_big_endian_checksum = true;
return(false);
legacy_big_endian_checksum = true;
return(false);
}
#ifdef UNIV_INNOCHECKSUM
if (log_file) {
fprintf(log_file, "Fail; page %lu"
fprintf(log_file, "Fail; page::%llu;"
" invalid (fails crc32 checksum)\n",
cur_page_num);
}
@ -1061,30 +1063,32 @@ buf_page_is_corrupted(
if (buf_page_is_checksum_valid_none(read_buf,
checksum_field1, checksum_field2)) {
#ifndef UNIV_INNOCHECKSUM
if (curr_algo
== SRV_CHECKSUM_ALGORITHM_STRICT_INNODB) {
#ifndef UNIV_INNOCHECKSUM
page_warn_strict_checksum(
curr_algo,
SRV_CHECKSUM_ALGORITHM_NONE,
page_id);
#endif
}
#else /* !UNIV_INNOCHECKSUM */
#ifdef UNIV_INNOCHECKSUM
if (log_file) {
fprintf(log_file, "page::%lu;"
fprintf(log_file, "page::%llu;"
" old style: calculated = %u;"
" recorded = %lu\n", cur_page_num,
" recorded = %zu;\n", cur_page_num,
buf_calc_page_old_checksum(read_buf),
checksum_field2);
fprintf(log_file, "page::%lu;"
fprintf(log_file, "page::%llu;"
" new style: calculated = %u;"
" crc32 = %u; recorded = %lu\n",
" crc32 = %u; recorded = %zu;\n",
cur_page_num,
buf_calc_page_new_checksum(read_buf),
buf_calc_page_crc32(read_buf),
checksum_field1);
}
#endif /* !UNIV_INNOCHECKSUM */
#endif /* UNIV_INNOCHECKSUM */
return(false);
}
@ -1092,26 +1096,28 @@ buf_page_is_corrupted(
checksum_field1, checksum_field2, false)
|| buf_page_is_checksum_valid_crc32(read_buf,
checksum_field1, checksum_field2, true)) {
#ifndef UNIV_INNOCHECKSUM
if (curr_algo
== SRV_CHECKSUM_ALGORITHM_STRICT_INNODB) {
#ifndef UNIV_INNOCHECKSUM
page_warn_strict_checksum(
curr_algo,
SRV_CHECKSUM_ALGORITHM_CRC32,
page_id);
#endif
}
#endif /* !UNIV_INNOCHECKSUM */
return(false);
}
#ifdef UNIV_INNOCHECKSUM
if (log_file) {
fprintf(log_file, "Fail; page %lu"
fprintf(log_file, "Fail; page::%llu;"
" invalid (fails innodb checksum)\n",
cur_page_num);
}
#endif /* UNIV_INNOCHECKSUM */
return(true);
case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
@ -1147,11 +1153,12 @@ buf_page_is_corrupted(
#ifdef UNIV_INNOCHECKSUM
if (log_file) {
fprintf(log_file, "Fail; page %lu"
fprintf(log_file, "Fail; page::%llu;"
" invalid (fails none checksum)\n",
cur_page_num);
}
#endif /* UNIV_INNOCHECKSUM */
return(true);
case SRV_CHECKSUM_ALGORITHM_NONE:

View file

@ -24,6 +24,7 @@ Modified Jan Lindström jan.lindstrom@mariadb.com
*******************************************************/
#include "fil0fil.h"
#include "mtr0types.h"
#include "mach0data.h"
#include "page0size.h"
#include "page0zip.h"

View file

@ -1612,18 +1612,6 @@ thd_is_replication_slave_thread(
return thd && ((ibool) thd_slave_thread(thd));
}
/******************************************************************//**
Gets information on the durability property requested by thread.
Used when writing either a prepare or commit record to the log
buffer. @return the durability property. */
enum durability_properties
thd_requested_durability(
/*=====================*/
const THD* thd) /*!< in: thread handle */
{
return(thd_get_durability_property(thd));
}
/******************************************************************//**
Returns true if transaction should be flagged as read-only.
@return true if the thd is marked as read-only */
@ -4640,10 +4628,6 @@ innobase_commit(
this one, to allow then to group commit with us. */
thd_wakeup_subsequent_commits(thd, 0);
if (!read_only) {
trx->flush_log_later = false;
}
/* Now do a write + flush of logs. */
trx_commit_complete_for_mysql(trx);
@ -9224,7 +9208,7 @@ ha_innobase::update_row(
innobase_srv_conc_enter_innodb(m_prebuilt);
error = row_update_for_mysql((byte*) old_row, m_prebuilt);
error = row_update_for_mysql(m_prebuilt);
if (error == DB_SUCCESS && autoinc) {
/* A value for an AUTO_INCREMENT column
@ -9339,7 +9323,7 @@ ha_innobase::delete_row(
innobase_srv_conc_enter_innodb(m_prebuilt);
error = row_update_for_mysql((byte*) record, m_prebuilt);
error = row_update_for_mysql(m_prebuilt);
innobase_srv_conc_exit_innodb(m_prebuilt);

View file

@ -570,11 +570,6 @@ bool thd_binlog_filter_ok(const MYSQL_THD thd);
*/
bool thd_sqlcom_can_generate_row_events(const MYSQL_THD thd);
/** Gets information on the durability property requested by a thread.
@param thd Thread handle
@return a durability property. */
durability_properties thd_get_durability_property(const MYSQL_THD thd);
/** Is strict sql_mode set.
@param thd Thread object
@return True if sql_mode has strict mode (all or trans), false otherwise. */

View file

@ -534,7 +534,6 @@ ibuf_init_at_db_start(void)
fseg_n_reserved_pages(header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER,
&n_used, &mtr);
ibuf_enter(&mtr);
ut_ad(n_used >= 2);
@ -556,7 +555,7 @@ ibuf_init_at_db_start(void)
mutex_exit(&ibuf_mutex);
ibuf->empty = page_is_empty(root);
ibuf_mtr_commit(&mtr);
mtr.commit();
ibuf->index = dict_mem_index_create(
"innodb_change_buffer", "CLUST_IND",

View file

@ -211,6 +211,7 @@ struct buf_pools_list_size_t {
ulint unzip_LRU_bytes; /*!< unzip_LRU size in bytes */
ulint flush_list_bytes; /*!< flush_list size in bytes */
};
#endif /* !UNIV_INNOCHECKSUM */
/** Page identifier. */
class page_id_t {
@ -333,6 +334,7 @@ operator<<(
std::ostream& out,
const page_id_t& page_id);
#ifndef UNIV_INNOCHECKSUM
/********************************************************************//**
Acquire mutex on all buffer pool instances */
UNIV_INLINE
@ -823,9 +825,16 @@ buf_page_is_corrupted(
bool check_lsn,
const byte* read_buf,
const page_size_t& page_size,
const fil_space_t* space = NULL)
MY_ATTRIBUTE((warn_unused_result));
#ifndef UNIV_INNOCHECKSUM
const fil_space_t* space = NULL)
#else
const void* space = NULL)
#endif
MY_ATTRIBUTE((warn_unused_result));
#ifndef UNIV_INNOCHECKSUM
/**********************************************************************//**
Gets the space id, page offset, and byte offset within page of a
pointer pointing to a buffer frame containing a file page. */

View file

@ -30,6 +30,7 @@ Created 04/01/2015 Jan Lindström
#include "os0event.h"
#include "my_crypt.h"
#endif /*! UNIV_INNOCHECKSUM */
/**
* Magic pattern in start of crypt data on page 0
@ -101,6 +102,8 @@ struct fil_space_rotate_state_t
} scrubbing;
};
#ifndef UNIV_INNOCHECKSUM
struct fil_space_crypt_t : st_encryption_scheme
{
public:

View file

@ -28,8 +28,6 @@ Created 10/25/1995 Heikki Tuuri
#define fil0fil_h
#include "univ.i"
struct fil_space_t;
#ifndef UNIV_INNOCHECKSUM
#include "log0recv.h"
@ -281,22 +279,23 @@ but in the MySQL Embedded Server Library and mysqlbackup it is not the default
directory, and we must set the base file path explicitly */
extern const char* fil_path_to_mysql_datadir;
/** Initial size of a single-table tablespace in pages */
#define FIL_IBD_FILE_INITIAL_SIZE 4
/** 'null' (undefined) page offset in the context of file spaces */
#define FIL_NULL ULINT32_UNDEFINED
/* Space address data type; this is intended to be used when
addresses accurate to a byte are stored in file pages. If the page part
of the address is FIL_NULL, the address is considered undefined. */
typedef byte fil_faddr_t; /*!< 'type' definition in C: an address
stored in a file page is a string of bytes */
#endif /* !UNIV_INNOCHECKSUM */
/** Initial size of a single-table tablespace in pages */
#define FIL_IBD_FILE_INITIAL_SIZE 4
/** 'null' (undefined) page offset in the context of file spaces */
#define FIL_NULL ULINT32_UNDEFINED
#define FIL_ADDR_PAGE 0 /* first in address is the page offset */
#define FIL_ADDR_BYTE 4 /* then comes 2-byte byte offset within page*/
#endif /* !UNIV_INNOCHECKSUM */
#define FIL_ADDR_SIZE 6 /* address size is 6 bytes */
#ifndef UNIV_INNOCHECKSUM
@ -431,8 +430,6 @@ index */
#define fil_page_index_page_check(page) \
fil_page_type_is_index(fil_page_get_type(page))
#ifndef UNIV_INNOCHECKSUM
/** Enum values for encryption table option */
enum fil_encryption_t {
/** Encrypted if innodb_encrypt_tables=ON (srv_encrypt_tables) */
@ -454,6 +451,8 @@ extern ulint fil_n_pending_tablespace_flushes;
/** Number of files currently open */
extern ulint fil_n_file_opened;
#ifndef UNIV_INNOCHECKSUM
/** Look up a tablespace.
The caller should hold an InnoDB table lock or a MDL that prevents
the tablespace from being dropped during the operation,
@ -1315,6 +1314,7 @@ fil_page_reset_type(
byte* page,
ulint type,
mtr_t* mtr);
/** Get the file page type.
@param[in] page file page
@return page type */
@ -1325,6 +1325,7 @@ fil_page_get_type(
{
return(mach_read_from_2(page + FIL_PAGE_TYPE));
}
/** Check (and if needed, reset) the page type.
Data files created before MySQL 5.1 may contain
garbage in the FIL_PAGE_TYPE field.

View file

@ -158,8 +158,6 @@ descriptor page, but used only in the first. */
FSP_FREE_LIMIT at a time */
/* @} */
#ifndef UNIV_INNOCHECKSUM
/* @defgroup File Segment Inode Constants (moved from fsp0fsp.c) @{ */
/* FILE SEGMENT INODE
@ -293,6 +291,7 @@ the extent are free and which contain old tuple version to clean. */
/** Offset of the descriptor array on a descriptor page */
#define XDES_ARR_OFFSET (FSP_HEADER_OFFSET + FSP_HEADER_SIZE)
#ifndef UNIV_INNOCHECKSUM
/* @} */
/**********************************************************************//**

View file

@ -53,6 +53,7 @@ xdes_calc_descriptor_index(
return(ut_2pow_remainder(offset, page_size.physical())
/ FSP_EXTENT_SIZE);
}
#endif /*!UNIV_INNOCHECKSUM */
/**********************************************************************//**
Gets a descriptor bit of a page.
@ -80,6 +81,7 @@ xdes_get_bit(
bit_index));
}
#ifndef UNIV_INNOCHECKSUM
/** Calculates the page where the descriptor of a page resides.
@param[in] page_size page size
@param[in] offset page offset

View file

@ -63,6 +63,7 @@ mach_write_to_2(
/*============*/
byte* b, /*!< in: pointer to two bytes where to store */
ulint n); /*!< in: ulint integer to be stored, >= 0, < 64k */
#endif /* !UNIV_INNOCHECKSUM */
/** The following function is used to fetch data from 2 consecutive
bytes. The most significant byte is at the lowest address.
@param[in] b pointer to 2 bytes where to store
@ -72,6 +73,8 @@ uint16_t
mach_read_from_2(
const byte* b)
MY_ATTRIBUTE((warn_unused_result));
#ifndef UNIV_INNOCHECKSUM
/********************************************************//**
The following function is used to convert a 16-bit data item
to the canonical format, for fast bytewise equality test
@ -362,6 +365,8 @@ mach_write_ulonglong(
ulint len, /*!< in: length of dest */
bool usign); /*!< in: signed or unsigned flag */
#endif /* !UNIV_INNOCHECKSUM */
/** Read 1 to 4 bytes from a file page buffered in the buffer pool.
@param[in] ptr pointer where to read
@param[in] type MLOG_1BYTE, MLOG_2BYTES, or MLOG_4BYTES
@ -373,8 +378,6 @@ mach_read_ulint(
mlog_id_t type)
MY_ATTRIBUTE((warn_unused_result));
#endif /* !UNIV_INNOCHECKSUM */
#include "mach0data.ic"
#endif

View file

@ -865,6 +865,8 @@ mach_write_ulonglong(
}
}
#endif /* !UNIV_INNOCHECKSUM */
/** Read 1 to 4 bytes from a file page buffered in the buffer pool.
@param[in] ptr pointer where to read
@param[in] type MLOG_1BYTE, MLOG_2BYTES, or MLOG_4BYTES
@ -889,5 +891,3 @@ mach_read_ulint(
ut_error;
return(0);
}
#endif /* !UNIV_INNOCHECKSUM */

View file

@ -85,8 +85,6 @@ Otherwise written as 0. @see PAGE_ROOT_AUTO_INC */
This field should not be written to after
page creation. */
#ifndef UNIV_INNOCHECKSUM
#define PAGE_BTR_SEG_LEAF 36 /* file segment header for the leaf pages in
a B-tree: defined only on the root page of a
B-tree, but not in the root of an ibuf tree */
@ -141,6 +139,8 @@ Otherwise written as 0. @see PAGE_ROOT_AUTO_INC */
#define PAGE_SAME_PAGE 4
#define PAGE_NO_DIRECTION 5
#ifndef UNIV_INNOCHECKSUM
/* PAGE DIRECTORY
==============
*/

View file

@ -27,8 +27,8 @@ Created 2/2/1994 Heikki Tuuri
#ifndef page0page_ic
#define page0page_ic
#include "mach0data.h"
#ifndef UNIV_INNOCHECKSUM
#include "mach0data.h"
#ifdef UNIV_DEBUG
# include "log0recv.h"
#endif /* !UNIV_DEBUG */
@ -40,6 +40,7 @@ Created 2/2/1994 Heikki Tuuri
#undef UNIV_INLINE
#define UNIV_INLINE
#endif
#endif /* !UNIV_INNOCHECKSUM */
/************************************************************//**
Gets the start of a page.
@ -53,6 +54,7 @@ page_align(
return((page_t*) ut_align_down(ptr, UNIV_PAGE_SIZE));
}
#ifndef UNIV_INNOCHECKSUM
/************************************************************//**
Gets the offset within a page.
@return offset from the start of the page */
@ -181,7 +183,6 @@ page_header_get_field(
}
#ifndef UNIV_INNOCHECKSUM
/*************************************************************//**
Sets the given header field. */
UNIV_INLINE
@ -285,6 +286,8 @@ page_header_reset_last_insert(
}
}
#endif /* !UNIV_INNOCHECKSUM */
/************************************************************//**
Determine whether the page is in new-style compact format.
@return nonzero if the page is in compact format, zero if it is in
@ -298,6 +301,7 @@ page_is_comp(
return(page[PAGE_HEADER + PAGE_N_HEAP] & 0x80);
}
#ifndef UNIV_INNOCHECKSUM
/************************************************************//**
TRUE if the record is on a page in compact format.
@return nonzero if in compact format */
@ -326,6 +330,8 @@ page_rec_get_heap_no(
}
}
#endif /* !UNIV_INNOCHECKSUM */
/************************************************************//**
Determine whether the page is a B-tree leaf.
@return true if the page is a B-tree leaf (PAGE_LEVEL = 0) */
@ -338,6 +344,7 @@ page_is_leaf(
return(!*(const uint16*) (page + (PAGE_HEADER + PAGE_LEVEL)));
}
#ifndef UNIV_INNOCHECKSUM
/************************************************************//**
Determine whether the page is empty.
@return true if the page is empty (PAGE_N_RECS = 0) */
@ -627,6 +634,8 @@ page_get_middle_rec(
return(page_rec_get_nth(page, middle));
}
#endif /* !UNIV_INNOCHECKSUM */
/*************************************************************//**
Gets the page number.
@return page number */
@ -640,6 +649,7 @@ page_get_page_no(
return(mach_read_from_4(page + FIL_PAGE_OFFSET));
}
#ifndef UNIV_INNOCHECKSUM
/*************************************************************//**
Gets the tablespace identifier.
@return space id */
@ -653,6 +663,8 @@ page_get_space_id(
return(mach_read_from_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID));
}
#endif /* !UNIV_INNOCHECKSUM */
/*************************************************************//**
Gets the number of user records on page (infimum and supremum records
are not user records).
@ -666,6 +678,7 @@ page_get_n_recs(
return(page_header_get_field(page, PAGE_N_RECS));
}
#ifndef UNIV_INNOCHECKSUM
/*************************************************************//**
Gets the number of dir slots in directory.
@return number of slots */
@ -1054,6 +1067,8 @@ page_rec_get_base_extra_size(
return(REC_N_NEW_EXTRA_BYTES + (ulint) !page_rec_is_comp(rec));
}
#endif /* UNIV_INNOCHECKSUM */
/************************************************************//**
Returns the sum of the sizes of the records in the record list, excluding
the infimum and supremum records.
@ -1077,6 +1092,7 @@ page_get_data_size(
return(ret);
}
#ifndef UNIV_INNOCHECKSUM
/************************************************************//**
Allocates a block of memory from the free list of an index page. */
UNIV_INLINE

View file

@ -27,12 +27,14 @@ Created 5/30/1994 Heikki Tuuri
#ifndef rem0rec_h
#define rem0rec_h
#ifndef UNIV_INNOCHECKSUM
#include "univ.i"
#include "data0data.h"
#include "rem0types.h"
#include "mtr0types.h"
#include "page0types.h"
#include "trx0types.h"
#endif /*! UNIV_INNOCHECKSUM */
#include <ostream>
#include <sstream>
@ -92,6 +94,7 @@ offsets[] array, first passed to rec_get_offsets() */
#define REC_OFFS_NORMAL_SIZE OFFS_IN_REC_NORMAL_SIZE
#define REC_OFFS_SMALL_SIZE 10
#ifndef UNIV_INNOCHECKSUM
/******************************************************//**
The following function is used to get the pointer of the next chained record
on the same page.
@ -1112,4 +1115,5 @@ int wsrep_rec_get_foreign_key(
#include "rem0rec.ic"
#endif /* !UNIV_INNOCHECKSUM */
#endif /* rem0rec_h */

View file

@ -272,13 +272,10 @@ row_table_got_default_clust_index(
const dict_table_t* table); /*!< in: table */
/** Does an update or delete of a row for MySQL.
@param[in] mysql_rec row in the MySQL format
@param[in,out] prebuilt prebuilt struct in MySQL handle
@return error code or DB_SUCCESS */
dberr_t
row_update_for_mysql(
const byte* mysql_rec,
row_prebuilt_t* prebuilt)
row_update_for_mysql(row_prebuilt_t* prebuilt)
MY_ATTRIBUTE((warn_unused_result));
/** This can only be used when srv_locks_unsafe_for_binlog is TRUE or this

View file

@ -108,13 +108,8 @@ support cross-platform development and expose comonly used SQL names. */
#include <unistd.h>
#endif
#ifdef UNIV_INNOCHECKSUM
extern bool strict_verify;
extern FILE* log_file;
extern uintmax_t cur_page_num;
#endif /* UNIV_INNOCHECKSUM */
#include "my_pthread.h"
/* Following defines are to enable performance schema
instrumentation in each of five InnoDB modules if
HAVE_PSI_INTERFACE is defined. */
@ -463,6 +458,12 @@ in both 32-bit and 64-bit environments. */
# define UINT64PFx "%016" PRIx64
#endif
#ifdef UNIV_INNOCHECKSUM
extern bool strict_verify;
extern FILE* log_file;
extern unsigned long long cur_page_num;
#endif /* UNIV_INNOCHECKSUM */
typedef int64_t ib_int64_t;
typedef uint64_t ib_uint64_t;
typedef uint32_t ib_uint32_t;

View file

@ -359,7 +359,6 @@ log_reserve_and_open(
loop:
ut_ad(log_mutex_own());
ut_ad(!recv_no_log_write);
if (log_sys->is_extending) {
log_mutex_exit();
@ -416,7 +415,6 @@ log_write_low(
ut_ad(log_mutex_own());
part_loop:
ut_ad(!recv_no_log_write);
/* Calculate a part length */
data_len = (log->buf_free % OS_FILE_LOG_BLOCK_SIZE) + str_len;
@ -2291,6 +2289,7 @@ log_pad_current_log_block(void)
ulint i;
lsn_t lsn;
ut_ad(!recv_no_log_write);
/* We retrieve lsn only because otherwise gcc crashed on HP-UX */
lsn = log_reserve_and_open(OS_FILE_LOG_BLOCK_SIZE);

View file

@ -821,13 +821,6 @@ recv_find_max_checkpoint_0(log_group_t** max_group, ulint* max_field)
continue;
}
group->state = LOG_GROUP_OK;
group->lsn = mach_read_from_8(
buf + LOG_CHECKPOINT_LSN);
group->lsn_offset = static_cast<ib_uint64_t>(
mach_read_from_4(buf + OFFSET_HIGH32)) << 32
| mach_read_from_4(buf + OFFSET_LOW32);
checkpoint_no = mach_read_from_8(
buf + LOG_CHECKPOINT_NO);
@ -838,12 +831,21 @@ recv_find_max_checkpoint_0(log_group_t** max_group, ulint* max_field)
DBUG_PRINT("ib_log",
("checkpoint " UINT64PF " at " LSN_PF " found",
checkpoint_no, group->lsn));
checkpoint_no,
mach_read_from_8(buf + LOG_CHECKPOINT_LSN)));
if (checkpoint_no >= max_no) {
*max_group = group;
*max_field = field;
max_no = checkpoint_no;
group->state = LOG_GROUP_OK;
group->lsn = mach_read_from_8(
buf + LOG_CHECKPOINT_LSN);
group->lsn_offset = static_cast<ib_uint64_t>(
mach_read_from_4(buf + OFFSET_HIGH32)) << 32
| mach_read_from_4(buf + OFFSET_LOW32);
}
}
@ -1043,22 +1045,22 @@ recv_find_max_checkpoint(ulint* max_field)
continue;
}
group->state = LOG_GROUP_OK;
group->lsn = mach_read_from_8(
buf + LOG_CHECKPOINT_LSN);
group->lsn_offset = mach_read_from_8(
buf + LOG_CHECKPOINT_OFFSET);
checkpoint_no = mach_read_from_8(
buf + LOG_CHECKPOINT_NO);
DBUG_PRINT("ib_log",
("checkpoint " UINT64PF " at " LSN_PF " found ",
checkpoint_no, group->lsn));
("checkpoint " UINT64PF " at " LSN_PF " found",
checkpoint_no, mach_read_from_8(
buf + LOG_CHECKPOINT_LSN)));
if (checkpoint_no >= max_no) {
*max_field = field;
max_no = checkpoint_no;
group->state = LOG_GROUP_OK;
group->lsn = mach_read_from_8(
buf + LOG_CHECKPOINT_LSN);
group->lsn_offset = mach_read_from_8(
buf + LOG_CHECKPOINT_OFFSET);
}
}
@ -3212,7 +3214,11 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
err = recv_find_max_checkpoint(&max_cp_field);
if (err != DB_SUCCESS) {
if (err != DB_SUCCESS
|| (log_sys->log.format != 0
&& (log_sys->log.format & ~LOG_HEADER_FORMAT_ENCRYPTED)
!= LOG_HEADER_FORMAT_CURRENT)) {
log_mutex_exit();
return(err);
}
@ -3242,8 +3248,7 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
case 0:
log_mutex_exit();
return(recv_log_format_0_recover(checkpoint_lsn));
case LOG_HEADER_FORMAT_CURRENT:
case LOG_HEADER_FORMAT_CURRENT | LOG_HEADER_FORMAT_ENCRYPTED:
default:
if (end_lsn == 0) {
break;
}
@ -3251,8 +3256,6 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
contiguous_lsn = end_lsn;
break;
}
/* fall through */
default:
recv_sys->found_corrupt_log = true;
log_mutex_exit();
return(DB_ERROR);

View file

@ -478,6 +478,7 @@ mtr_write_log(
const ulint len = log->size();
mtr_write_log_t write_log;
ut_ad(!recv_no_log_write);
DBUG_PRINT("ib_log",
(ULINTPF " extra bytes written at " LSN_PF,
len, log_sys->lsn));
@ -799,6 +800,8 @@ mtr_t::release_page(const void* ptr, mtr_memo_type_t type)
ulint
mtr_t::Command::prepare_write()
{
ut_ad(!recv_no_log_write);
switch (m_impl->m_log_mode) {
case MTR_LOG_SHORT_INSERTS:
ut_ad(0);

View file

@ -5063,24 +5063,19 @@ page_zip_verify_checksum(
const void* data, /*!< in: compressed page */
ulint size) /*!< in: size of compressed page */
{
const unsigned char* p = static_cast<const unsigned char*>(data)
+ FIL_PAGE_SPACE_OR_CHKSUM;
ib_uint32_t stored;
ib_uint32_t calc;
const uint32_t stored = static_cast<uint32_t>(
mach_read_from_4(p));
stored = static_cast<ib_uint32_t>(mach_read_from_4(
static_cast<const unsigned char*>(data) + FIL_PAGE_SPACE_OR_CHKSUM));
#ifdef UNIV_INNOCHECKSUM
p = static_cast<const unsigned char*>(data) + FIL_PAGE_TYPE;
bool no_checksum = (mach_read_from_2(p) == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED);
p = static_cast<const unsigned char*>(data) + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION;
bool encrypted = (mach_read_from_4(p) != 0);
p = static_cast<const unsigned char*>(data) + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + 4;
const uint32_t checksum = static_cast<uint32_t>(mach_read_from_4(p));
if (no_checksum) {
return (TRUE);
}
#endif
ulint page_no MY_ATTRIBUTE((unused)) =
mach_read_from_4(static_cast<const unsigned char*>
(data) + FIL_PAGE_OFFSET);
ulint space_id MY_ATTRIBUTE((unused)) =
mach_read_from_4(static_cast<const unsigned char*>
(data) + FIL_PAGE_SPACE_ID);
const page_id_t page_id(space_id, page_no);
#if FIL_PAGE_LSN % 8
#error "FIL_PAGE_LSN must be 64 bit aligned"
@ -5100,7 +5095,7 @@ page_zip_verify_checksum(
}
if (i >= size) {
if (log_file) {
fprintf(log_file, "Page::%lu is empty and"
fprintf(log_file, "Page::%llu is empty and"
" uncorrupted\n", cur_page_num);
}
@ -5124,21 +5119,12 @@ page_zip_verify_checksum(
return(TRUE);
}
#ifndef UNIV_INNOCHECKSUM
ulint page_no = mach_read_from_4(static_cast<
const unsigned char*>
(data) + FIL_PAGE_OFFSET);
ulint space_id = mach_read_from_4(static_cast<
const unsigned char*>
(data) + FIL_PAGE_SPACE_ID);
const page_id_t page_id(space_id, page_no);
#endif /* UNIV_INNOCHECKSUM */
const uint32_t calc = page_zip_calc_checksum(data, size, curr_algo);
calc = static_cast<ib_uint32_t>(page_zip_calc_checksum(
data, size, curr_algo));
#ifdef UNIV_INNOCHECKSUM
if (log_file) {
fprintf(log_file, "page::%lu;"
fprintf(log_file, "page::%llu;"
" %s checksum: calculated = %u;"
" recorded = %u\n", cur_page_num,
buf_checksum_algorithm_name(
@ -5153,21 +5139,17 @@ page_zip_verify_checksum(
data, size, SRV_CHECKSUM_ALGORITHM_CRC32);
if (log_file) {
fprintf(log_file, "page::%lu: crc32 checksum:"
fprintf(log_file, "page::%llu: crc32 checksum:"
" calculated = %u; recorded = %u\n",
cur_page_num, crc32, stored);
fprintf(log_file, "page::%lu: none checksum:"
fprintf(log_file, "page::%llu: none checksum:"
" calculated = %lu; recorded = %u\n",
cur_page_num, BUF_NO_CHECKSUM_MAGIC, stored);
}
}
#endif /* UNIV_INNOCHECKSUM */
if (stored == calc
#ifdef UNIV_INNOCHECKSUM
|| ( encrypted == true && stored == checksum)
#endif
) {
if (stored == calc) {
return(TRUE);
}
@ -5199,11 +5181,7 @@ page_zip_verify_checksum(
if (legacy_big_endian_checksum) {
const uint32_t calculated =
page_zip_calc_checksum(data, size, curr_algo, true);
if (stored == calculated
#ifdef UNIV_INNOCHECKSUM
|| ( encrypted == true && calculated == checksum)
#endif
) {
if (stored == calculated) {
return(TRUE);
}
@ -5213,11 +5191,7 @@ page_zip_verify_checksum(
uint32_t calculated =
page_zip_calc_checksum(data, size, SRV_CHECKSUM_ALGORITHM_INNODB);
if (stored == calculated
#ifdef UNIV_INNOCHECKSUM
|| ( encrypted == true && stored == checksum)
#endif
) {
if (stored == calculated) {
#ifndef UNIV_INNOCHECKSUM
if (curr_algo
@ -5237,11 +5211,7 @@ page_zip_verify_checksum(
/* If legacy checksum is not checked, do it now. */
if ((legacy_checksum_checked
&& stored == calculated)
#ifdef UNIV_INNOCHECKSUM
|| ( encrypted == true && calculated == checksum)
#endif
) {
&& stored == calculated)) {
legacy_big_endian_checksum = true;
return(TRUE);
}
@ -5251,11 +5221,7 @@ page_zip_verify_checksum(
case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
case SRV_CHECKSUM_ALGORITHM_INNODB: {
if (stored == BUF_NO_CHECKSUM_MAGIC
#ifdef UNIV_INNOCHECKSUM
|| ( encrypted == true && checksum == BUF_NO_CHECKSUM_MAGIC)
#endif
) {
if (stored == BUF_NO_CHECKSUM_MAGIC) {
#ifndef UNIV_INNOCHECKSUM
if (curr_algo
== SRV_CHECKSUM_ALGORITHM_STRICT_INNODB) {
@ -5274,14 +5240,8 @@ page_zip_verify_checksum(
uint32_t calculated1;
if (stored == calculated
#ifdef UNIV_INNOCHECKSUM
|| ( encrypted == true && checksum == calculated)
#endif
|| stored == (calculated1 =
|| stored == (calculated1 =
page_zip_calc_checksum(data, size, SRV_CHECKSUM_ALGORITHM_CRC32, true))
#ifdef UNIV_INNOCHECKSUM
|| ( encrypted == true && checksum == calculated1)
#endif
) {
#ifndef UNIV_INNOCHECKSUM
if (curr_algo
@ -5305,12 +5265,7 @@ page_zip_verify_checksum(
data, size, SRV_CHECKSUM_ALGORITHM_CRC32, true);
if (stored == calculated
|| stored == calculated1
#ifdef UNIV_INNOCHECKSUM
|| ( encrypted == true && checksum == calculated)
|| ( encrypted == true && checksum == calculated1)
#endif
) {
|| stored == calculated1) {
#ifndef UNIV_INNOCHECKSUM
page_warn_strict_checksum(
curr_algo,
@ -5323,11 +5278,7 @@ page_zip_verify_checksum(
calculated = page_zip_calc_checksum(
data, size, SRV_CHECKSUM_ALGORITHM_INNODB);
if (stored == calculated
#ifdef UNIV_INNOCHECKSUM
|| ( encrypted == true && checksum == calculated)
#endif
) {
if (stored == calculated) {
#ifndef UNIV_INNOCHECKSUM
page_warn_strict_checksum(

View file

@ -740,6 +740,8 @@ row_mysql_handle_errors(
{
dberr_t err;
DBUG_ENTER("row_mysql_handle_errors");
handle_new_error:
err = trx->error_state;
@ -747,6 +749,9 @@ handle_new_error:
trx->error_state = DB_SUCCESS;
DBUG_LOG("trx", "handle error: " << ut_strerr(err)
<< ";id=" << ib::hex(trx->id) << ", " << trx);
switch (err) {
case DB_LOCK_WAIT_TIMEOUT:
if (row_rollback_on_timeout) {
@ -795,7 +800,7 @@ handle_new_error:
*new_err = err;
return(true);
DBUG_RETURN(true);
case DB_DEADLOCK:
case DB_LOCK_TABLE_FULL:
@ -840,7 +845,7 @@ handle_new_error:
trx->error_state = DB_SUCCESS;
return(false);
DBUG_RETURN(false);
}
/********************************************************************//**
@ -1806,14 +1811,10 @@ public:
/** Does an update or delete of a row for MySQL.
@param[in] mysql_rec row in the MySQL format
@param[in,out] prebuilt prebuilt struct in MySQL handle
@return error code or DB_SUCCESS */
static
dberr_t
row_update_for_mysql_using_upd_graph(
const byte* mysql_rec,
row_prebuilt_t* prebuilt)
row_update_for_mysql(row_prebuilt_t* prebuilt)
{
trx_savept_t savept;
dberr_t err;
@ -1829,13 +1830,13 @@ row_update_for_mysql_using_upd_graph(
upd_cascade_t* processed_cascades;
bool got_s_lock = false;
DBUG_ENTER("row_update_for_mysql_using_upd_graph");
DBUG_ENTER("row_update_for_mysql");
ut_ad(trx);
ut_a(prebuilt->magic_n == ROW_PREBUILT_ALLOCATED);
ut_a(prebuilt->magic_n2 == ROW_PREBUILT_ALLOCATED);
ut_a(prebuilt->template_type == ROW_MYSQL_WHOLE_ROW);
ut_ad(table->stat_initialized);
UT_NOT_USED(mysql_rec);
if (!table->is_readable()) {
return(row_mysql_get_table_status(table, trx, true));
@ -2154,19 +2155,6 @@ error:
DBUG_RETURN(err);
}
/** Does an update or delete of a row for MySQL.
@param[in] mysql_rec row in the MySQL format
@param[in,out] prebuilt prebuilt struct in MySQL handle
@return error code or DB_SUCCESS */
dberr_t
row_update_for_mysql(
const byte* mysql_rec,
row_prebuilt_t* prebuilt)
{
ut_a(prebuilt->template_type == ROW_MYSQL_WHOLE_ROW);
return(row_update_for_mysql_using_upd_graph(mysql_rec, prebuilt));
}
/** This can only be used when srv_locks_unsafe_for_binlog is TRUE or this
session is using a READ COMMITTED or READ UNCOMMITTED isolation level.
Before calling this function row_search_for_mysql() must have

View file

@ -293,14 +293,16 @@ trx_purge_add_update_undo_to_history(
After the purge thread has been given permission to exit,
in fast shutdown, we may roll back transactions (trx->undo_no==0)
in THD::cleanup() invoked from unlink_thd(). */
in THD::cleanup() invoked from unlink_thd(), and we may also
continue to execute user transactions. */
ut_ad(srv_undo_sources
|| ((srv_startup_is_before_trx_rollback_phase
|| trx_rollback_or_clean_is_active)
&& purge_sys->state == PURGE_STATE_INIT)
|| (srv_force_recovery >= SRV_FORCE_NO_BACKGROUND
&& purge_sys->state == PURGE_STATE_DISABLED)
|| (trx->undo_no == 0 && srv_fast_shutdown));
|| ((trx->undo_no == 0 || trx->in_mysql_trx_list)
&& srv_fast_shutdown));
/* Add the log as the first in the history list */
flst_add_first(rseg_header + TRX_RSEG_HISTORY,

Some files were not shown because too many files have changed in this diff Show more