merge 5.5 => 5.5-mtr

This commit is contained in:
Bjorn Munch 2011-10-12 17:29:19 +02:00
commit 9a32e8e5a9
138 changed files with 1602 additions and 1647 deletions

View file

@ -1,4 +1,4 @@
MYSQL_VERSION_MAJOR=5
MYSQL_VERSION_MINOR=5
MYSQL_VERSION_PATCH=17
MYSQL_VERSION_PATCH=18
MYSQL_VERSION_EXTRA=

View file

@ -4308,7 +4308,7 @@ sql_real_connect(char *host,char *database,char *user,char *password,
{
char init_command[100];
sprintf(init_command,
"SET SQL_SAFE_UPDATES=1,SQL_SELECT_LIMIT=%lu,SQL_MAX_JOIN_SIZE=%lu",
"SET SQL_SAFE_UPDATES=1,SQL_SELECT_LIMIT=%lu,MAX_JOIN_SIZE=%lu",
select_limit,max_join_size);
mysql_options(&mysql, MYSQL_INIT_COMMAND, init_command);
}

View file

@ -5364,6 +5364,7 @@ do_handle_error:
var_set_errno(0);
handle_no_error(command);
revert_properties();
return 1; /* Connected */
}
@ -7319,6 +7320,7 @@ void run_query_normal(struct st_connection *cn, struct st_command *command,
/* If we come here the query is both executed and read successfully */
handle_no_error(command);
revert_properties();
end:
@ -7514,8 +7516,6 @@ void handle_no_error(struct st_command *command)
die("query '%s' succeeded - should have failed with sqlstate %s...",
command->query, command->expected_errors.err[0].code.sqlstate);
}
revert_properties();
DBUG_VOID_RETURN;
}
@ -7546,9 +7546,6 @@ void run_query_stmt(MYSQL *mysql, struct st_command *command,
DBUG_ENTER("run_query_stmt");
DBUG_PRINT("query", ("'%-.60s'", query));
/* Remember disable_result_log since handle_no_error() may reset it */
my_bool dis_res= disable_result_log;
/*
Init a new stmt if it's not already one created for this connection
*/
@ -7644,7 +7641,7 @@ void run_query_stmt(MYSQL *mysql, struct st_command *command,
/* If we got here the statement was both executed and read successfully */
handle_no_error(command);
if (!dis_res)
if (!disable_result_log)
{
/*
Not all statements creates a result set. If there is one we can
@ -7720,7 +7717,7 @@ end:
dynstr_free(&ds_prepare_warnings);
dynstr_free(&ds_execute_warnings);
}
revert_properties();
/* Close the statement if - no reconnect, need new prepare */
if (mysql->reconnect)

View file

@ -51,6 +51,7 @@ typedef struct st_heapinfo /* Struct from heap_info */
uint reclength; /* Length of one record */
int errkey;
ulonglong auto_increment;
time_t create_time;
} HEAPINFO;
@ -147,6 +148,7 @@ typedef struct st_heap_share
uint open_count;
uchar *del_link; /* Link to next block with del. rec */
char * name; /* Name of "memory-file" */
time_t create_time;
THR_LOCK lock;
mysql_mutex_t intern_lock; /* Locking for use with _locking */
my_bool delete_on_close;

View file

@ -161,6 +161,21 @@ int Handshake_client::write_packet(Blob &data)
keep all the data.
*/
unsigned block_count= data.len()/512 + ((data.len() % 512) ? 1 : 0);
#if !defined(DBUG_OFF) && defined(WINAUTH_USE_DBUG_LIB)
/*
For testing purposes, use wrong block count to see how server
handles this.
*/
DBUG_EXECUTE_IF("winauth_first_packet_test",{
block_count= data.len() == 601 ? 0 :
data.len() == 602 ? 1 :
block_count;
});
#endif
DBUG_ASSERT(block_count < (unsigned)0x100);
saved_byte= data[254];
data[254] = block_count;

View file

@ -1,59 +0,0 @@
# BUG#59338 Inconsistency in binlog for statements that don't change any rows STATEMENT SBR
# In SBR, if a statement does not fail, it is always written to the binary log,
# regardless if rows are changed or not. If there is a failure, a statement is
# only written to the binary log if a non-transactional (.e.g. MyIsam) engine
# is updated. INSERT ON DUPLICATE KEY UPDATE was not following the rule above
# and was not written to the binary log, if then engine was Innodb.
#
# In this test case, we check if INSERT ON DUPLICATE KEY UPDATE that does not
# change anything is still written to the binary log.
# Prepare environment
--connection master
eval CREATE TABLE t1 (
a INT UNSIGNED NOT NULL PRIMARY KEY
) ENGINE=$engine_type;
eval CREATE TABLE t2 (
a INT UNSIGNED
) ENGINE=$engine_type;
INSERT INTO t1 VALUES (1);
INSERT INTO t2 VALUES (1);
# An insert duplicate that does not update anything must be written to the binary
# log in SBR and MIXED modes. We check this property by summing a before and after
# the update and comparing the binlog positions. The sum should be the same at both
# points and the statement should be in the binary log.
--let $binlog_file= query_get_value("SHOW MASTER STATUS", File, 1)
--let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1)
--let $statement_file=INSERT INTO t1 SELECT t2.a FROM t2 ORDER BY t2.a ON DUPLICATE KEY UPDATE t1.a= t1.a
--eval $statement_file
--let $assert_cond= SUM(a) = 1 FROM t1
--let $assert_text= Sum of elements in t1 should be 1.
--source include/assert.inc
if (`SELECT @@BINLOG_FORMAT = 'ROW'`)
{
--let $binlog_position_cmp= =
--let $assert_cond= [SHOW MASTER STATUS, Position, 1] $binlog_position_cmp $binlog_start
--let $assert_text= In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.
}
if (`SELECT @@BINLOG_FORMAT != 'ROW'`)
{
--let $assert_cond= \'[\'SHOW BINLOG EVENTS IN "$binlog_file" FROM $binlog_start LIMIT 1, 1\', Info, 1]\' LIKE \'%$statement_file\'
--let $assert_text= In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.
}
--source include/assert.inc
# Compare master and slave
--sync_slave_with_master
--let $diff_tables= master:test.t1 , slave:test.t1
--source include/diff_tables.inc
# Clean up
--connection master
drop table t1, t2;
--sync_slave_with_master

View file

@ -77,6 +77,7 @@ eval create table t2(b int auto_increment, c int, key(b)) engine=$engine_type;
insert into t1 values (10);
insert into t1 values (null),(null),(null);
insert into t2 values (5,0);
--disable_warnings ONCE
insert into t2 (c) select * from t1 ORDER BY a;
select * from t2 ORDER BY b;
sync_slave_with_master;
@ -113,8 +114,10 @@ set @@session.sql_auto_is_null=1;
eval create table t1(a int auto_increment, key(a)) engine=$engine_type;
eval create table t2(a int) engine=$engine_type;
insert into t1 (a) values (null);
--disable_warnings
insert into t2 (a) select a from t1 where a is null;
insert into t2 (a) select a from t1 where a is null;
--enable_warnings
select * from t2;
sync_slave_with_master;
connection slave;
@ -172,17 +175,15 @@ begin
end|
delimiter ;|
--disable_warnings
--disable_warnings ONCE
insert into t1 (last_id) values (0);
--enable_warnings
drop trigger t1_bi;
# Check that nested call doesn't affect outer context.
select last_insert_id();
--disable_warnings
--disable_warnings ONCE
select bug15728_insert();
--enable_warnings
select last_insert_id();
insert into t1 (last_id) values (bug15728());
# This should be exactly one greater than in the previous call.
@ -190,9 +191,8 @@ select last_insert_id();
# BUG#20339 - stored procedure using LAST_INSERT_ID() does not
# replicate statement-based
--disable_warnings
--disable_warnings ONCE
drop procedure if exists foo;
--enable_warnings
delimiter |;
create procedure foo()
begin
@ -252,6 +252,7 @@ select * from t1 order by n;
# table's counter, the counter for next row is bigger than the
# after-value of the updated row.
connection master;
--disable_warnings ONCE
insert into t1 values (NULL,400),(3,500),(NULL,600) on duplicate key UPDATE n=1000;
select * from t1 order by n;
sync_slave_with_master;
@ -270,6 +271,7 @@ delete from t1 where b <> 100;
select * from t1 order by n;
connection master;
--disable_warnings ONCE
insert into t1 values(null,100),(null,350) on duplicate key update n=2;
select * from t1 order by n;
sync_slave_with_master;
@ -287,6 +289,7 @@ connection master;
# testcase with INSERT VALUES
eval CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b INT,
UNIQUE(b)) ENGINE=$engine_type;
--disable_warnings ONCE
INSERT INTO t1(b) VALUES(1),(1),(2) ON DUPLICATE KEY UPDATE t1.b=10;
SELECT * FROM t1 ORDER BY a;
sync_slave_with_master;
@ -314,19 +317,23 @@ INSERT INTO t2 (field_a, field_b, field_c) VALUES (3, 'c', '3c');
INSERT INTO t2 (field_a, field_b, field_c) VALUES (4, 'd', '4d');
INSERT INTO t2 (field_a, field_b, field_c) VALUES (5, 'e', '5e');
# Updating table t1 based on values from table t2
--disable_warnings
INSERT INTO t1 (field_1, field_2, field_3)
SELECT t2.field_a, t2.field_b, t2.field_c
FROM t2
ON DUPLICATE KEY UPDATE
t1.field_3 = t2.field_c;
--enable_warnings
# Inserting new record into t2
INSERT INTO t2 (field_a, field_b, field_c) VALUES (6, 'f', '6f');
# Updating t1 again
--disable_warnings
INSERT INTO t1 (field_1, field_2, field_3)
SELECT t2.field_a, t2.field_b, t2.field_c
FROM t2
ON DUPLICATE KEY UPDATE
t1.field_3 = t2.field_c;
--enable_warnings
SELECT * FROM t1 ORDER BY id;
sync_slave_with_master;
SELECT * FROM t1 ORDER BY id;
@ -433,9 +440,8 @@ delimiter ;|
INSERT INTO t1 VALUES (NULL, -1);
CALL p1();
--disable_warnings
--disable_warnings ONCE
SELECT f1();
--enable_warnings
INSERT INTO t1 VALUES (NULL, f2()), (NULL, LAST_INSERT_ID()),
(NULL, LAST_INSERT_ID()), (NULL, f2()), (NULL, f2());
INSERT INTO t1 VALUES (NULL, f2());
@ -504,16 +510,14 @@ insert into t2 (id) values(1),(2),(3);
delete from t2;
set sql_log_bin=1;
#inside SELECT, then inside INSERT
--disable_warnings
--disable_warnings ONCE
select insid();
--enable_warnings
set sql_log_bin=0;
insert into t2 (id) values(5),(6),(7);
delete from t2 where id>=5;
set sql_log_bin=1;
--disable_warnings
--disable_warnings ONCE
insert into t1 select insid();
--enable_warnings
select * from t1 order by id;
select * from t2 order by id;
@ -537,6 +541,7 @@ begin
insert into t2 values(null,3);
end|
delimiter ;|
--disable_warnings ONCE
call foo();
select * from t1 order by n;
select * from t2 order by id;

View file

@ -31,7 +31,7 @@ INSERT INTO t2 VALUES (3, 5);
INSERT INTO t2 VALUES (4, 3);
INSERT INTO t2 VALUES (5, 4);
INSERT INTO t2 VALUES (6, 6);
--disable_warnings ONCE
INSERT IGNORE INTO t1 SELECT NULL, t2.b FROM t2 ORDER BY t2.a;
--let $assert_cond= COUNT(*) = 6 FROM t1
--let $assert_text= Count of elements in t1 should be 6.
@ -51,25 +51,70 @@ INSERT IGNORE INTO t1 SELECT NULL, t2.b FROM t2 ORDER BY t2.a;
--let $binlog_file= query_get_value("SHOW MASTER STATUS", File, 1)
--let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1)
--let $statement_file=INSERT IGNORE INTO t1 SELECT NULL, t2.b FROM t2 ORDER BY t2.a
--disable_warnings ONCE
--eval $statement_file
--let $assert_cond= COUNT(*) = 6 FROM t1
--let $assert_text= Count of elements in t1 should be 6.
--source include/assert.inc
if (`SELECT @@BINLOG_FORMAT = 'ROW'`)
if (`SELECT @@BINLOG_FORMAT != 'STATEMENT'`)
{
--let $binlog_position_cmp= =
--let $assert_cond= [SHOW MASTER STATUS, Position, 1] $binlog_position_cmp $binlog_start
--let $assert_text= In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.
}
if (`SELECT @@BINLOG_FORMAT != 'ROW'`)
if (`SELECT @@BINLOG_FORMAT = 'STATEMENT'`)
{
--let $assert_cond= \'[\'SHOW BINLOG EVENTS IN "$binlog_file" FROM $binlog_start LIMIT 2, 1\', Info, 1]\' LIKE \'%$statement_file\'
--let $assert_text= In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.
}
--source include/assert.inc
# An insert duplicate that does not update anything must be written to the binary
# log in SBR and MIXED modes. We check this property by summing a before and after
# the update and comparing the binlog positions. The sum should be the same at both
# points and the statement should be in the binary log.
--disable_warnings
DROP TABLE t1;
DROP TABLE t2;
--enable_warnings
eval CREATE TABLE t1 (
a INT UNSIGNED NOT NULL PRIMARY KEY
) ENGINE=$engine_type;
eval CREATE TABLE t2 (
a INT UNSIGNED
) ENGINE=$engine_type;
INSERT INTO t1 VALUES (1);
INSERT INTO t2 VALUES (1);
--let $binlog_file= query_get_value("SHOW MASTER STATUS", File, 1)
--let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1)
--let $statement_file=INSERT INTO t1 SELECT t2.a FROM t2 ORDER BY t2.a ON DUPLICATE KEY UPDATE t1.a= t1.a
--disable_warnings ONCE
--eval $statement_file
--let $assert_cond= SUM(a) = 1 FROM t1
--let $assert_text= Sum of elements in t1 should be 1.
--source include/assert.inc
if (`SELECT @@BINLOG_FORMAT != 'STATEMENT'`)
{
--let $binlog_position_cmp= =
--let $assert_cond= [SHOW MASTER STATUS, Position, 1] $binlog_position_cmp $binlog_start
--let $assert_text= In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.
}
if (`SELECT @@BINLOG_FORMAT = 'STATEMENT'`)
{
--let $assert_cond= \'[\'SHOW BINLOG EVENTS IN "$binlog_file" FROM $binlog_start LIMIT 1, 1\', Info, 1]\' LIKE \'%$statement_file\'
--let $assert_text= In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.
}
--source include/assert.inc
# Clean up
--connection master
drop table t1, t2;

View file

@ -521,10 +521,34 @@ commit;
call p_verify_status_increment(2, 2, 2, 2);
--echo # 15. Read-write statement: UPDATE IGNORE, change 0 rows.
--echo #
--disable_warnings
update ignore t1 set a=2 where a=1;
call p_verify_status_increment(2, 2, 1, 0);
--enable_warnings
if (`select @@binlog_format = 'STATEMENT'`)
{
--disable_query_log
call p_verify_status_increment(2, 2, 1, 0);
--enable_query_log
}
if (`select @@binlog_format != 'STATEMENT'`)
{
--disable_query_log
call p_verify_status_increment(1, 0, 1, 0);
--enable_query_log
}
commit;
call p_verify_status_increment(2, 2, 1, 0);
if (`select @@binlog_format = 'STATEMENT'`)
{
--disable_query_log
call p_verify_status_increment(2, 2, 1, 0);
--enable_query_log
}
if (`select @@binlog_format != 'STATEMENT'`)
{
--disable_query_log
call p_verify_status_increment(1, 0, 1, 0);
--enable_query_log
}
--echo #
--echo # Create a stored function that modifies a
--echo # non-transactional table. Demonstrate that changes in
@ -603,7 +627,9 @@ call p_verify_status_increment(2, 0, 1, 0);
--echo # 21. Read-write statement: UPDATE, change 0 (transactional) rows.
--echo #
--disable_warnings
update t1 set a=2 where a=f1()+10;
--enable_warnings
call p_verify_status_increment(2, 0, 1, 0);
commit;
call p_verify_status_increment(2, 0, 1, 0);
@ -703,7 +729,9 @@ call p_verify_status_increment(4, 4, 4, 4);
--echo #
insert into t2 select a from t1;
commit;
--disable_warnings
replace into t2 select a from t1;
--enable_warnings
commit;
call p_verify_status_increment(8, 8, 8, 8);
#

View file

@ -549,11 +549,9 @@ SUCCESS
# 15. Read-write statement: UPDATE IGNORE, change 0 rows.
#
update ignore t1 set a=2 where a=1;
call p_verify_status_increment(2, 2, 1, 0);
SUCCESS
commit;
call p_verify_status_increment(2, 2, 1, 0);
SUCCESS
#

View file

@ -1126,5 +1126,19 @@ NULL
Warnings:
Warning 1301 Result of repeat() was larger than max_allowed_packet (1048576) - truncated
#
# Bug#11750518 41090: ORDER BY TRUNCATES GROUP_CONCAT RESULT
#
SET NAMES utf8, @@character_set_connection=utf16;
SELECT id, CHAR_LENGTH(GROUP_CONCAT(body)) AS l
FROM (SELECT 'a' AS id, REPEAT('foo bar', 100) AS body
UNION ALL
SELECT 'a' AS id, REPEAT('bla bla', 100) AS body) t1
GROUP BY id
ORDER BY l DESC;
id l
a 512
Warnings:
Warning 1260 Row 1 was cut by GROUP_CONCAT()
#
# End of 5.5 tests
#

View file

@ -1167,5 +1167,19 @@ CASE s1 WHEN 'a' THEN 'b' ELSE 'c' END
b
DROP TABLE t1;
#
# Bug#11750518 41090: ORDER BY TRUNCATES GROUP_CONCAT RESULT
#
SET NAMES utf8, @@character_set_connection=utf32;
SELECT id, CHAR_LENGTH(GROUP_CONCAT(body)) AS l
FROM (SELECT 'a' AS id, REPEAT('foo bar', 100) AS body
UNION ALL
SELECT 'a' AS id, REPEAT('bla bla', 100) AS body) t1
GROUP BY id
ORDER BY l DESC;
id l
a 256
Warnings:
Warning 1260 Row 1 was cut by GROUP_CONCAT()
#
# End of 5.5 tests
#

View file

@ -2788,7 +2788,7 @@ create table t1 as select group_concat(1,2,3) as c1;
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`c1` varchar(342) CHARACTER SET utf8 DEFAULT NULL
`c1` text CHARACTER SET utf8
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
create table t1 as select 1 as c1 union select 'a';
@ -5010,5 +5010,19 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
Warnings:
Note 1003 select 'abcdÁÂÃÄÅ' AS `abcdÁÂÃÄÅ`,_latin1'abcd\xC3\x81\xC3\x82\xC3\x83\xC3\x84\xC3\x85' AS `abcdÁÃÃÄÅ`,_utf8'abcd\xC3\x81\xC3\x82\xC3\x83\xC3\x84\xC3\x85' AS `abcdÁÂÃÄÅ`
#
# Bug#11750518 41090: ORDER BY TRUNCATES GROUP_CONCAT RESULT
#
SET NAMES utf8;
SELECT id, CHAR_LENGTH(GROUP_CONCAT(body)) AS l
FROM (SELECT 'a' AS id, REPEAT('foo bar', 100) AS body
UNION ALL
SELECT 'a' AS id, REPEAT('bla bla', 100) AS body) t1
GROUP BY id
ORDER BY l DESC;
id l
a 1024
Warnings:
Warning 1260 Row 2 was cut by GROUP_CONCAT()
#
# End of 5.5 tests
#

View file

@ -83,5 +83,18 @@ a a b filler
SET SESSION debug = DEFAULT;
DROP TABLE t1, t2;
#
# Bug#11747970 34660: CRASH WHEN FEDERATED TABLE LOSES CONNECTION DURING INSERT ... SELECT
#
CREATE TABLE t1(f1 INT, KEY(f1));
CREATE TABLE t2(f1 INT);
INSERT INTO t1 VALUES (1),(2);
INSERT INTO t2 VALUES (1),(2);
SET SESSION debug='d,bug11747970_simulate_error';
INSERT IGNORE INTO t2 SELECT f1 FROM t1 a WHERE NOT EXISTS (SELECT 1 FROM t2 b WHERE a.f1 = b.f1);
Warnings:
Error 1105 Unknown error
SET SESSION debug = DEFAULT;
DROP TABLE t1,t2;
#
# End of 5.1 tests
#

View file

@ -2803,8 +2803,8 @@ SELECT ((rpad(1.0,2048,1)) = ('4(') ^ (0.1));
Warnings:
Warning 1292 Truncated incorrect INTEGER value: '4('
SELECT
pow((rpad(1.0,2048,1)),(b'1111111111111111111111111111111111111111111'));
ERROR 22003: DOUBLE value is out of range in 'pow(rpad(1.0,2048,1),0x07ffffffffff)'
pow((rpad(10.0,2048,1)),(b'1111111111111111111111111111111111111111111'));
ERROR 22003: DOUBLE value is out of range in 'pow(rpad(10.0,2048,1),0x07ffffffffff)'
SELECT ((rpad(1.0,2048,1)) + (0) ^ ('../'));
((rpad(1.0,2048,1)) + (0) ^ ('../'))
1.011111111111111

View file

@ -1676,7 +1676,7 @@ c (SELECT a FROM t1 WHERE b = c)
SELECT b c, (SELECT a FROM t1 WHERE b = c)
FROM t1
HAVING b = 10;
ERROR 42000: non-grouping field 'b' is used in HAVING clause
ERROR 42000: Non-grouping field 'b' is used in HAVING clause
SELECT MAX(b) c, (SELECT a FROM t1 WHERE b = c)
FROM t1
HAVING b = 10;

View file

@ -419,7 +419,7 @@ select f1 from t1 group by f1 having max(f1)=f1;
f1
set session sql_mode='ONLY_FULL_GROUP_BY';
select f1 from t1 having max(f1)=f1;
ERROR 42000: non-grouping field 'f1' is used in HAVING clause
ERROR 42000: Non-grouping field 'f1' is used in HAVING clause
select f1 from t1 group by f1 having max(f1)=f1;
f1
set session sql_mode='';

View file

@ -715,8 +715,8 @@ create table t1 (c char(10)) engine=memory;
create table t2 (c varchar(10)) engine=memory;
show table status like 't_';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 MEMORY 10 Fixed 0 11 0 # 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t2 MEMORY 10 Fixed 0 12 0 # 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t1 MEMORY 10 Fixed 0 11 0 # 0 0 NULL # NULL NULL latin1_swedish_ci NULL
t2 MEMORY 10 Fixed 0 12 0 # 0 0 NULL # NULL NULL latin1_swedish_ci NULL
drop table t1, t2;
CREATE TABLE t1(a VARCHAR(1), b VARCHAR(2), c VARCHAR(256),
KEY(a), KEY(b), KEY(c)) ENGINE=MEMORY;

View file

@ -30,7 +30,7 @@ ERROR HY000: You are using safe update mode and you tried to update a table with
delete from t1 where a+0=1;
ERROR HY000: You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column
select 1 from t1,t1 as t2,t1 as t3,t1 as t4,t1 as t5;
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay
update t1 set b="a" limit 1;
update t1 set b="a" where b="b" limit 2;
delete from t1 where b="test" limit 1;
@ -42,7 +42,7 @@ SELECT @@MAX_JOIN_SIZE, @@SQL_BIG_SELECTS;
2 0
insert into t1 values (null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a");
SELECT * from t1 order by a;
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay
SET SQL_BIG_SELECTS=1;
SELECT * from t1 order by a;
a b
@ -52,7 +52,7 @@ a b
5 a
SET MAX_JOIN_SIZE=2;
SELECT * from t1;
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay
SET MAX_JOIN_SIZE=DEFAULT;
SELECT * from t1;
a b
@ -82,12 +82,12 @@ insert into t1 select * from t1;
insert into t1 select * from t1;
set local max_join_size=8;
select * from (select * from t1) x;
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay
set local max_join_size=1;
select * from (select a.a as aa, b.a as ba from t1 a, t1 b) x;
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay
set local max_join_size=1;
select * from (select 1 union select 2 union select 3) x;
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay
drop table t1;
SET SQL_SAFE_UPDATES=0,SQL_SELECT_LIMIT=DEFAULT, MAX_JOIN_SIZE=DEFAULT;

View file

@ -460,57 +460,57 @@ insert into t2 values (1),(2);
insert into t3 values (1,1),(2,2);
show table status;
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 MEMORY 10 Fixed 2 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t2 MEMORY 10 Fixed 2 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t3 MEMORY 10 Fixed 2 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t1 MEMORY 10 Fixed 2 # # # # 0 NULL # NULL NULL latin1_swedish_ci NULL
t2 MEMORY 10 Fixed 2 # # # # 0 NULL # NULL NULL latin1_swedish_ci NULL
t3 MEMORY 10 Fixed 2 # # # # 0 NULL # NULL NULL latin1_swedish_ci NULL
insert into t1 values (3),(4);
insert into t2 values (3),(4);
insert into t3 values (3,3),(4,4);
show table status;
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 MEMORY 10 Fixed 4 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t2 MEMORY 10 Fixed 4 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t3 MEMORY 10 Fixed 4 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t1 MEMORY 10 Fixed 4 # # # # 0 NULL # NULL NULL latin1_swedish_ci NULL
t2 MEMORY 10 Fixed 4 # # # # 0 NULL # NULL NULL latin1_swedish_ci NULL
t3 MEMORY 10 Fixed 4 # # # # 0 NULL # NULL NULL latin1_swedish_ci NULL
insert into t1 values (5);
insert into t2 values (5);
insert into t3 values (5,5);
show table status;
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 MEMORY 10 Fixed 5 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t2 MEMORY 10 Fixed 5 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t3 MEMORY 10 Fixed 5 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t1 MEMORY 10 Fixed 5 # # # # 0 NULL # NULL NULL latin1_swedish_ci NULL
t2 MEMORY 10 Fixed 5 # # # # 0 NULL # NULL NULL latin1_swedish_ci NULL
t3 MEMORY 10 Fixed 5 # # # # 0 NULL # NULL NULL latin1_swedish_ci NULL
delete from t1 where a=3;
delete from t2 where b=3;
delete from t3 where a=3;
show table status;
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 MEMORY 10 Fixed 4 # # # # # NULL NULL NULL NULL latin1_swedish_ci NULL
t2 MEMORY 10 Fixed 4 # # # # # NULL NULL NULL NULL latin1_swedish_ci NULL
t3 MEMORY 10 Fixed 4 # # # # # NULL NULL NULL NULL latin1_swedish_ci NULL
t1 MEMORY 10 Fixed 4 # # # # # NULL # NULL NULL latin1_swedish_ci NULL
t2 MEMORY 10 Fixed 4 # # # # # NULL # NULL NULL latin1_swedish_ci NULL
t3 MEMORY 10 Fixed 4 # # # # # NULL # NULL NULL latin1_swedish_ci NULL
truncate table t1;
truncate table t2;
truncate table t3;
show table status;
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 MEMORY 10 Fixed 0 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t2 MEMORY 10 Fixed 0 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t3 MEMORY 10 Fixed 0 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t1 MEMORY 10 Fixed 0 # # # # 0 NULL # NULL NULL latin1_swedish_ci NULL
t2 MEMORY 10 Fixed 0 # # # # 0 NULL # NULL NULL latin1_swedish_ci NULL
t3 MEMORY 10 Fixed 0 # # # # 0 NULL # NULL NULL latin1_swedish_ci NULL
insert into t1 values (5);
insert into t2 values (5);
insert into t3 values (5,5);
show table status;
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 MEMORY 10 Fixed 1 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t2 MEMORY 10 Fixed 1 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t3 MEMORY 10 Fixed 1 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t1 MEMORY 10 Fixed 1 # # # # 0 NULL # NULL NULL latin1_swedish_ci NULL
t2 MEMORY 10 Fixed 1 # # # # 0 NULL # NULL NULL latin1_swedish_ci NULL
t3 MEMORY 10 Fixed 1 # # # # 0 NULL # NULL NULL latin1_swedish_ci NULL
delete from t1 where a=5;
delete from t2 where b=5;
delete from t3 where a=5;
show table status;
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 MEMORY 10 Fixed 0 # # # # # NULL NULL NULL NULL latin1_swedish_ci NULL
t2 MEMORY 10 Fixed 0 # # # # # NULL NULL NULL NULL latin1_swedish_ci NULL
t3 MEMORY 10 Fixed 0 # # # # # NULL NULL NULL NULL latin1_swedish_ci NULL
t1 MEMORY 10 Fixed 0 # # # # # NULL # NULL NULL latin1_swedish_ci NULL
t2 MEMORY 10 Fixed 0 # # # # # NULL # NULL NULL latin1_swedish_ci NULL
t3 MEMORY 10 Fixed 0 # # # # # NULL # NULL NULL latin1_swedish_ci NULL
drop table t1, t2, t3;
create database mysqltest;
show create database mysqltest;
@ -662,7 +662,7 @@ DROP TABLE t1;
flush tables;
SHOW TABLE STATUS like 't1';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 NULL NULL NULL NULL # # # # NULL NULL NULL NULL NULL NULL NULL NULL Incorrect information in file: './test/t1.frm'
t1 NULL NULL NULL NULL # # # # NULL NULL # NULL NULL NULL NULL NULL Incorrect information in file: './test/t1.frm'
Warnings:
Warning 1033 Incorrect information in file: './test/t1.frm'
show create table t1;
@ -1334,7 +1334,7 @@ CREATE DATABASE `
CREATE TABLE `ä`.`ä` (a int) ENGINE=Memory;
SHOW TABLE STATUS FROM `ä` LIKE 'ä';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
ä MEMORY 10 Fixed 0 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
ä MEMORY 10 Fixed 0 # # # # 0 NULL # NULL NULL latin1_swedish_ci NULL
DROP DATABASE `ä`;
show columns from `#mysql50#????????`;
Got one of the listed errors

Binary file not shown.

View file

@ -99,6 +99,8 @@ alter table t1 drop b;
create table t3 like t1;
insert into t1 select * from t3;
replace into t1 select * from t3;
Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.
select * from t1;
a
select * from t2;

View file

@ -2352,6 +2352,7 @@ Note 1592 Unsafe statement written to the binary log using statement format sinc
REPLACE INTO t1 SELECT * FROM t1 LIMIT 1;
Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted.
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.
UPDATE t1 SET a=1 LIMIT 1;
Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted.
@ -2368,6 +2369,7 @@ END|
CALL p1();
Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted.
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.
DROP PROCEDURE p1;
DROP TABLE t1;
DROP TABLE IF EXISTS t1;
@ -2651,4 +2653,40 @@ a
13:46:40
1970-01-12 13:46:40
DROP TABLE t1;
CREATE TABLE filler_table (a INT, b INT);
INSERT INTO filler_table values (1,1),(1,2);
CREATE TABLE insert_table (a INT, b INT, PRIMARY KEY(a));
CREATE TABLE replace_table (a INT, b INT, PRIMARY KEY(a));
INSERT INTO replace_table values (1,1),(2,2);
CREATE TABLE update_table (a INT, b INT, PRIMARY KEY(a));
INSERT INTO update_table values (1,1),(2,2);
INSERT IGNORE INTO insert_table SELECT * FROM filler_table;
Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. INSERT IGNORE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.
TRUNCATE TABLE insert_table;
INSERT INTO insert_table SELECT * FROM filler_table ON DUPLICATE KEY UPDATE a = 1;
Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. INSERT... SELECT... ON DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are updated. This order cannot be predicted and may differ on master and the slave.
TRUNCATE TABLE insert_table;
REPLACE INTO replace_table SELECT * FROM filler_table;
Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.
UPDATE IGNORE update_table SET a=2;
Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. UPDATE IGNORE is unsafe because the order in which rows are updated determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.
CREATE TABLE create_ignore_test (a INT, b INT, PRIMARY KEY(b)) IGNORE SELECT * FROM filler_table;
Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. CREATE... IGNORE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.
CREATE TABLE create_replace_test (a INT, b INT, PRIMARY KEY(b)) REPLACE SELECT * FROM filler_table;
Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. CREATE... REPLACE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.
CREATE TEMPORARY TABLE temp1 (a INT, b INT, PRIMARY KEY(b)) REPLACE SELECT * FROM filler_table;
Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. CREATE... REPLACE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.
DROP TABLE filler_table;
DROP TABLE insert_table;
DROP TABLE update_table;
DROP TABLE replace_table;
DROP TABLE create_ignore_test;
DROP TABLE create_replace_test;
"End of tests"

View file

@ -12,6 +12,11 @@
# - insert into two autoinc columns;
# - statements using UDF's.
# - statements reading from log tables in the mysql database.
# - INSERT ... SELECT ... ON DUPLICATE KEY UPDATE
# - REPLACE ... SELECT
# - CREATE TABLE [IGNORE/REPLACE] SELECT
# - INSERT IGNORE...SELECT
# - UPDATE IGNORE
#
# Note that statements that use stored functions, stored procedures,
# triggers, views, or prepared statements that invoke unsafe
@ -79,6 +84,7 @@
# BUG#45785: LIMIT in SP does not cause RBL if binlog_format=MIXED
# BUG#47995: Mark user functions as unsafe
# BUG#49222: Mark RAND() unsafe
# BUG#11758262: MARK INSERT...SEL...ON DUP KEY UPD,REPLACE...SEL,CREATE...[IGN|REPL] SEL
#
# ==== Related test cases ====
#
@ -699,5 +705,47 @@ INSERT INTO t1 VALUES
SELECT * FROM t1;
DROP TABLE t1;
#
#BUG#11758262-50439: MARK INSERT...SEL...ON DUP KEY UPD,REPLACE..
#The following statement may be unsafe when logged in statement format.
#INSERT IGNORE...SELECT
#INSERT ... SELECT ... ON DUPLICATE KEY UPDATE
#REPLACE ... SELECT
#UPDATE IGNORE
#CREATE TABLE... IGNORE SELECT
#CREATE TABLE... REPLACE SELECT
#setup tables
CREATE TABLE filler_table (a INT, b INT);
INSERT INTO filler_table values (1,1),(1,2);
CREATE TABLE insert_table (a INT, b INT, PRIMARY KEY(a));
CREATE TABLE replace_table (a INT, b INT, PRIMARY KEY(a));
INSERT INTO replace_table values (1,1),(2,2);
CREATE TABLE update_table (a INT, b INT, PRIMARY KEY(a));
INSERT INTO update_table values (1,1),(2,2);
#INSERT IGNORE... SELECT
INSERT IGNORE INTO insert_table SELECT * FROM filler_table;
TRUNCATE TABLE insert_table;
#INSERT ... SELECT ... ON DUPLICATE KEY UPDATE
INSERT INTO insert_table SELECT * FROM filler_table ON DUPLICATE KEY UPDATE a = 1;
TRUNCATE TABLE insert_table;
#REPLACE...SELECT
REPLACE INTO replace_table SELECT * FROM filler_table;
#UPDATE IGNORE
UPDATE IGNORE update_table SET a=2;
#CREATE TABLE [IGNORE/REPLACE] SELECT
CREATE TABLE create_ignore_test (a INT, b INT, PRIMARY KEY(b)) IGNORE SELECT * FROM filler_table;
CREATE TABLE create_replace_test (a INT, b INT, PRIMARY KEY(b)) REPLACE SELECT * FROM filler_table;
#temporary tables should not throw the warning.
CREATE TEMPORARY TABLE temp1 (a INT, b INT, PRIMARY KEY(b)) REPLACE SELECT * FROM filler_table;
###clean up
DROP TABLE filler_table;
DROP TABLE insert_table;
DROP TABLE update_table;
DROP TABLE replace_table;
DROP TABLE create_ignore_test;
DROP TABLE create_replace_test;
--echo "End of tests"

View file

@ -1,4 +1,5 @@
DROP TABLE IF EXISTS t1,t2,t3;
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL PRIMARY KEY);
SET TIMESTAMP=1171346973;
INSERT INTO t1 (c1) VALUES(NOW());
@ -89,3 +90,4 @@ c1
2007-02-16 12:10:34
2007-02-17 13:10:34
DROP TABLE t1;
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,4 +1,5 @@
DROP TABLE IF EXISTS t1;
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL, c2 DATE NULL, c3 DATE NULL, PRIMARY KEY(c1), UNIQUE(c2));
SET TIMESTAMP=1171346973;
INSERT INTO t1 (c1,c2,c3) VALUES(NOW(),ADDTIME(NOW(),'4 04:01:01'),NOW());
@ -128,3 +129,4 @@ c1 c2 c3
2003 2001 2000
2004 2000 2000
DROP TABLE t1;
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,4 +1,5 @@
DROP TABLE IF EXISTS t1;
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL PRIMARY KEY);
SET TIMESTAMP=1171346973;
INSERT INTO t1 (c1) VALUES(NOW());
@ -82,3 +83,4 @@ c1
2000
2011
DROP TABLE t1;
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,4 +1,5 @@
DROP TABLE IF EXISTS t1;
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL PRIMARY KEY);
SET TIMESTAMP=1171346973;
INSERT INTO t1 (c1) VALUES(NOW());
@ -87,3 +88,4 @@ c1
1999
2000
DROP TABLE t1;
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,4 +1,5 @@
DROP TABLE IF EXISTS t1;
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL PRIMARY KEY);
SET TIMESTAMP=1171346973;
INSERT INTO t1 (c1) VALUES(NOW());
@ -87,3 +88,4 @@ c1
1999
2000
DROP TABLE t1;
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,4 +1,5 @@
DROP TABLE IF EXISTS t1;
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NULL UNIQUE);
SET TIMESTAMP=1171346973;
INSERT INTO t1 (c1) VALUES(NOW());
@ -82,3 +83,4 @@ c1
2000
2011
DROP TABLE t1;
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,4 +1,5 @@
DROP TABLE IF EXISTS t1;
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NULL UNIQUE);
SET TIMESTAMP=1171346973;
INSERT INTO t1 (c1) VALUES(NOW());
@ -87,3 +88,4 @@ c1
1999
2000
DROP TABLE t1;
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,4 +1,5 @@
DROP TABLE IF EXISTS t1;
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NULL UNIQUE);
SET TIMESTAMP=1171346973;
INSERT INTO t1 (c1) VALUES(NOW());
@ -81,3 +82,4 @@ c1
1999
2000
DROP TABLE t1;
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,4 +1,5 @@
DROP TABLE IF EXISTS t1;
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL, c2 DATE NOT NULL, c3 DATE NOT NULL, PRIMARY KEY(c1,c2,c3));
SET TIMESTAMP=1171346973;
INSERT INTO t1 (c1,c2,c3) VALUES(NOW(),NOW(),NOW());
@ -144,3 +145,4 @@ c1 c2 c3
2011 2011 2000
2011 2011 2011
DROP TABLE t1;
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,4 +1,5 @@
DROP TABLE IF EXISTS t1;
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL, c2 DATE NOT NULL, c3 DATE NOT NULL, PRIMARY KEY(c1,c2,c3));
SET TIMESTAMP=1171346973;
INSERT INTO t1 (c1,c2,c3) VALUES(NOW(),NOW(),NOW());
@ -147,3 +148,4 @@ c1 c2 c3
1999 2000 1999
2000 1999 1999
DROP TABLE t1;
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,4 +1,5 @@
DROP TABLE IF EXISTS t1;
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL, c2 DATE NOT NULL, c3 DATE NOT NULL, PRIMARY KEY(c1,c2,c3));
SET TIMESTAMP=1171346973;
INSERT INTO t1 (c1,c2,c3) VALUES(NOW(),NOW(),NOW());
@ -147,3 +148,4 @@ c1 c2 c3
1999 2000 1999
2000 1999 1999
DROP TABLE t1;
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,4 +1,5 @@
DROP TABLE IF EXISTS t1;
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NULL, c2 DATE NULL, c3 DATE NULL, UNIQUE(c1,c2,c3));
SET TIMESTAMP=1171346973;
INSERT INTO t1 (c1,c2,c3) VALUES(NOW(),NOW(),NOW());
@ -144,3 +145,4 @@ c1 c2 c3
2011 2011 2000
2011 2011 2011
DROP TABLE t1;
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,4 +1,5 @@
DROP TABLE IF EXISTS t1;
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NULL, c2 DATE NULL, c3 DATE NULL, UNIQUE(c1,c2,c3));
SET TIMESTAMP=1171346973;
INSERT INTO t1 (c1,c2,c3) VALUES(NOW(),NOW(),NOW());
@ -147,3 +148,4 @@ c1 c2 c3
1999 2000 1999
2000 1999 1999
DROP TABLE t1;
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,4 +1,5 @@
DROP TABLE IF EXISTS t1;
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NULL, c2 DATE NULL, c3 DATE NULL, UNIQUE(c1,c2,c3));
SET TIMESTAMP=1171346973;
INSERT INTO t1 (c1,c2,c3) VALUES(NOW(),NOW(),NOW());
@ -139,3 +140,4 @@ c1 c2 c3
1999 2000 1999
2000 1999 1999
DROP TABLE t1;
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,4 +1,5 @@
DROP TABLE IF EXISTS t1;
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL, c2 DATE NOT NULL, c3 DATE NOT NULL, PRIMARY KEY(c1,c2,c3));
SET TIMESTAMP=1171346973;
INSERT INTO t1 (c1,c2,c3) VALUES(NOW(),NOW(),NOW());
@ -144,3 +145,4 @@ c1 c2 c3
2011 2011 2000
2011 2011 2011
DROP TABLE t1;
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,4 +1,5 @@
DROP TABLE IF EXISTS t1,t2,t3;
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL);
SET TIMESTAMP=1171346973;
INSERT INTO t1 (c1) VALUES(NOW());
@ -104,3 +105,4 @@ c1
2007-02-13 09:09:33
2007-02-14 10:10:34
DROP TABLE t1;
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,6 +1,8 @@
--disable_warnings
DROP TABLE IF EXISTS t1,t2,t3;
--enable_warnings
# Set Correct timezone to match result
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL PRIMARY KEY);
SET TIMESTAMP=1171346973; # 2007-02-13 15:09:33
INSERT INTO t1 (c1) VALUES(NOW());
@ -45,4 +47,5 @@ SELECT * FROM t1 ORDER BY c1;
DELETE FROM t1 WHERE c1 <= ADDTIME(NOW(),'2 02:01:01');
SELECT * FROM t1 ORDER BY c1;
DROP TABLE t1;
# Restore timezone to default
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,6 +1,8 @@
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Set Correct timezone to match result
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL, c2 DATE NULL, c3 DATE NULL, PRIMARY KEY(c1), UNIQUE(c2));
SET TIMESTAMP=1171346973; # 2007-02-13 15:09:33
INSERT INTO t1 (c1,c2,c3) VALUES(NOW(),ADDTIME(NOW(),'4 04:01:01'),NOW());
@ -70,4 +72,6 @@ INSERT INTO t1 (c1,c2,c3) VALUES(2000,2000,2000) ON DUPLICATE KEY UPDATE c3=2011
--sorted_result
SELECT * FROM t1;
DROP TABLE t1;
# Restore timezone to default
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,6 +1,8 @@
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Set Correct timezone to match result
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL PRIMARY KEY);
SET TIMESTAMP=1171346973; # 2007-02-13 15:09:33
INSERT INTO t1 (c1) VALUES(NOW());
@ -61,4 +63,5 @@ INSERT INTO t1 (c1) VALUES(1999) ON DUPLICATE KEY UPDATE c1=2011;
--sorted_result
SELECT * FROM t1;
DROP TABLE t1;
# Restore timezone to default
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,6 +1,8 @@
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Set Correct timezone to match result
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL PRIMARY KEY);
SET TIMESTAMP=1171346973; # 2007-02-13 15:09:33
INSERT INTO t1 (c1) VALUES(NOW());
@ -61,4 +63,5 @@ INSERT INTO t1 (c1) VALUES(2000);
INSERT INTO t1 (c1) VALUES(1999);
SELECT * FROM t1;
DROP TABLE t1;
# Restore timezone to default
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,6 +1,8 @@
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Set Correct timezone to match result
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL PRIMARY KEY);
SET TIMESTAMP=1171346973; # 2007-02-13 15:09:33
INSERT INTO t1 (c1) VALUES(NOW());
@ -61,4 +63,6 @@ INSERT INTO t1 (c1) VALUES(2000);
INSERT INTO t1 (c1) VALUES(1999);
SELECT * FROM t1;
DROP TABLE t1;
# Restore timezone to default
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,6 +1,8 @@
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Set Correct timezone to match result
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NULL UNIQUE);
SET TIMESTAMP=1171346973; # 2007-02-13 15:09:33
INSERT INTO t1 (c1) VALUES(NOW());
@ -61,4 +63,5 @@ INSERT INTO t1 (c1) VALUES(1999) ON DUPLICATE KEY UPDATE c1=2011;
--sorted_result
SELECT * FROM t1;
DROP TABLE t1;
# Restore timezone to default
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,6 +1,8 @@
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Set Correct timezone to match result
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NULL UNIQUE);
SET TIMESTAMP=1171346973; # 2007-02-13 15:09:33
INSERT INTO t1 (c1) VALUES(NOW());
@ -61,4 +63,5 @@ INSERT INTO t1 (c1) VALUES(2000);
INSERT INTO t1 (c1) VALUES(1999);
SELECT * FROM t1;
DROP TABLE t1;
# Restore timezone to default
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,6 +1,8 @@
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Set Correct timezone to match result
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NULL UNIQUE);
SET TIMESTAMP=1171346973; # 2007-02-13 15:09:33
INSERT INTO t1 (c1) VALUES(NOW());
@ -53,4 +55,5 @@ INSERT INTO t1 (c1) VALUES(2000);
INSERT IGNORE INTO t1 (c1) VALUES(1999);
SELECT * FROM t1;
DROP TABLE t1;
#restore timezone to default
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,6 +1,8 @@
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Set Correct timezone to match result
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL, c2 DATE NOT NULL, c3 DATE NOT NULL, PRIMARY KEY(c1,c2,c3));
SET TIMESTAMP=1171346973; # 2007-02-13 15:09:33
INSERT INTO t1 (c1,c2,c3) VALUES(NOW(),NOW(),NOW());
@ -85,4 +87,6 @@ INSERT INTO t1 (c1,c2,c3) VALUES(1999,1999,2000) ON DUPLICATE KEY UPDATE c1=2011
--sorted_result
SELECT * FROM t1;
DROP TABLE t1;
# Restore timezone to default
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,6 +1,8 @@
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Set Correct timezone to match result
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL, c2 DATE NOT NULL, c3 DATE NOT NULL, PRIMARY KEY(c1,c2,c3));
SET TIMESTAMP=1171346973; # 2007-02-13 15:09:33
INSERT INTO t1 (c1,c2,c3) VALUES(NOW(),NOW(),NOW());
@ -93,4 +95,5 @@ INSERT INTO t1 (c1,c2,c3) VALUES(1999,1999,1999);
INSERT INTO t1 (c1,c2,c3) VALUES(1999,1999,2000);
SELECT * FROM t1;
DROP TABLE t1;
# Restore timezone to default
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,6 +1,8 @@
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Set Correct timezone to match result
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL, c2 DATE NOT NULL, c3 DATE NOT NULL, PRIMARY KEY(c1,c2,c3));
SET TIMESTAMP=1171346973; # 2007-02-13 15:09:33
INSERT INTO t1 (c1,c2,c3) VALUES(NOW(),NOW(),NOW());
@ -93,4 +95,5 @@ INSERT INTO t1 (c1,c2,c3) VALUES(1999,1999,1999);
INSERT INTO t1 (c1,c2,c3) VALUES(1999,1999,2000);
SELECT * FROM t1;
DROP TABLE t1;
# Restore timezone to default
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,6 +1,8 @@
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Set Correct timezone to match result
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NULL, c2 DATE NULL, c3 DATE NULL, UNIQUE(c1,c2,c3));
SET TIMESTAMP=1171346973; # 2007-02-13 15:09:33
INSERT INTO t1 (c1,c2,c3) VALUES(NOW(),NOW(),NOW());
@ -85,4 +87,5 @@ INSERT INTO t1 (c1,c2,c3) VALUES(1999,1999,2000) ON DUPLICATE KEY UPDATE c1=2011
--sorted_result
SELECT * FROM t1;
DROP TABLE t1;
# Restore timezone to default
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,6 +1,8 @@
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Set Correct timezone to match result
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NULL, c2 DATE NULL, c3 DATE NULL, UNIQUE(c1,c2,c3));
SET TIMESTAMP=1171346973; # 2007-02-13 15:09:33
INSERT INTO t1 (c1,c2,c3) VALUES(NOW(),NOW(),NOW());
@ -93,4 +95,5 @@ INSERT INTO t1 (c1,c2,c3) VALUES(1999,1999,1999);
INSERT INTO t1 (c1,c2,c3) VALUES(1999,1999,2000);
SELECT * FROM t1;
DROP TABLE t1;
# Restore timezone to default
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,6 +1,8 @@
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Set Correct timezone to match result
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NULL, c2 DATE NULL, c3 DATE NULL, UNIQUE(c1,c2,c3));
SET TIMESTAMP=1171346973; # 2007-02-13 15:09:33
INSERT INTO t1 (c1,c2,c3) VALUES(NOW(),NOW(),NOW());
@ -85,4 +87,6 @@ INSERT IGNORE INTO t1 (c1,c2,c3) VALUES(1999,1999,2000);
--sorted_result
SELECT * FROM t1;
DROP TABLE t1;
# Restore timezone to default
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,6 +1,8 @@
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Set Correct timezone to match result
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL, c2 DATE NOT NULL, c3 DATE NOT NULL, PRIMARY KEY(c1,c2,c3));
SET TIMESTAMP=1171346973; # 2007-02-13 15:09:33
INSERT INTO t1 (c1,c2,c3) VALUES(NOW(),NOW(),NOW());
@ -93,4 +95,6 @@ INSERT INTO t1 (c1,c2,c3) VALUES(1999,1999,2000) ON DUPLICATE KEY UPDATE c1=2011
--sorted_result
SELECT * FROM t1;
DROP TABLE t1;
# Restore timezone to default
SET TIME_ZONE= @@global.time_zone;

View file

@ -1,6 +1,8 @@
--disable_warnings
DROP TABLE IF EXISTS t1,t2,t3;
--enable_warnings
# Set Correct timezone to match result
SET TIME_ZONE="+03:00";
CREATE TABLE t1(c1 DATE NOT NULL);
SET TIMESTAMP=1171346973; # 2007-02-13 15:09:33
INSERT INTO t1 (c1) VALUES(NOW());
@ -53,4 +55,5 @@ UPDATE t1 SET c1 = NOW() WHERE c1 >= ADDTIME(NOW(),'2 02:01:01');
--sorted_result
SELECT * FROM t1 ORDER BY c1;
DROP TABLE t1;
# Restore timezone to default
SET TIME_ZONE= @@global.time_zone;

View file

@ -0,0 +1,19 @@
include/master-slave.inc
[connection master]
call mtr.add_suppression("Error in Log_event::read_log_event()");
include/rpl_stop_server.inc [server_number=1]
include/rpl_start_server.inc [server_number=1]
show binlog events;
ERROR HY000: Error when executing command SHOW BINLOG EVENTS: Wrong offset or I/O error
call mtr.add_suppression("Slave I/O: Got fatal error 1236 from master when reading data from binary log");
stop slave;
reset slave;
start slave;
include/wait_for_slave_param.inc [Last_IO_Errno]
Last_IO_Errno = '1236'
Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'binlog truncated in the middle of event; consider out of disk space on master; the last event was read from 'master-bin.000001' at 316, the last byte read was read from 'master-bin.000001' at 335.''
reset master;
stop slave;
reset slave;
drop table t;
End of the tests

View file

@ -1,29 +0,0 @@
include/master-slave.inc
[connection master]
CREATE TABLE t1 (
a INT UNSIGNED NOT NULL PRIMARY KEY
) ENGINE=innodb;
CREATE TABLE t2 (
a INT UNSIGNED
) ENGINE=innodb;
INSERT INTO t1 VALUES (1);
INSERT INTO t2 VALUES (1);
INSERT INTO t1 SELECT t2.a FROM t2 ORDER BY t2.a ON DUPLICATE KEY UPDATE t1.a= t1.a;
include/assert.inc [Sum of elements in t1 should be 1.]
include/assert.inc [In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.]
include/diff_tables.inc [master:test.t1 , slave:test.t1]
drop table t1, t2;
CREATE TABLE t1 (
a INT UNSIGNED NOT NULL PRIMARY KEY
) ENGINE=myisam;
CREATE TABLE t2 (
a INT UNSIGNED
) ENGINE=myisam;
INSERT INTO t1 VALUES (1);
INSERT INTO t2 VALUES (1);
INSERT INTO t1 SELECT t2.a FROM t2 ORDER BY t2.a ON DUPLICATE KEY UPDATE t1.a= t1.a;
include/assert.inc [Sum of elements in t1 should be 1.]
include/assert.inc [In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.]
include/diff_tables.inc [master:test.t1 , slave:test.t1]
drop table t1, t2;
include/rpl_end.inc

View file

@ -26,6 +26,19 @@ include/diff_tables.inc [master:test.t1 , slave:test.t1]
INSERT IGNORE INTO t1 SELECT NULL, t2.b FROM t2 ORDER BY t2.a;
include/assert.inc [Count of elements in t1 should be 6.]
include/assert.inc [In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.]
DROP TABLE t1;
DROP TABLE t2;
CREATE TABLE t1 (
a INT UNSIGNED NOT NULL PRIMARY KEY
) ENGINE=innodb;
CREATE TABLE t2 (
a INT UNSIGNED
) ENGINE=innodb;
INSERT INTO t1 VALUES (1);
INSERT INTO t2 VALUES (1);
INSERT INTO t1 SELECT t2.a FROM t2 ORDER BY t2.a ON DUPLICATE KEY UPDATE t1.a= t1.a;
include/assert.inc [Sum of elements in t1 should be 1.]
include/assert.inc [In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.]
drop table t1, t2;
CREATE TABLE t1 (
a int unsigned not null auto_increment primary key,
@ -52,5 +65,18 @@ include/diff_tables.inc [master:test.t1 , slave:test.t1]
INSERT IGNORE INTO t1 SELECT NULL, t2.b FROM t2 ORDER BY t2.a;
include/assert.inc [Count of elements in t1 should be 6.]
include/assert.inc [In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.]
DROP TABLE t1;
DROP TABLE t2;
CREATE TABLE t1 (
a INT UNSIGNED NOT NULL PRIMARY KEY
) ENGINE=myisam;
CREATE TABLE t2 (
a INT UNSIGNED
) ENGINE=myisam;
INSERT INTO t1 VALUES (1);
INSERT INTO t2 VALUES (1);
INSERT INTO t1 SELECT t2.a FROM t2 ORDER BY t2.a ON DUPLICATE KEY UPDATE t1.a= t1.a;
include/assert.inc [Sum of elements in t1 should be 1.]
include/assert.inc [In SBR or MIXED modes, the event in the binlog should be the same that was executed. In RBR mode, binlog position should stay unchanged.]
drop table t1, t2;
include/rpl_end.inc

View file

@ -1,14 +0,0 @@
include/master-slave.inc
[connection master]
create table t1 (n int not null primary key);
insert into t1 values (1);
create table t2 (n int);
insert into t2 values (1);
insert ignore into t1 select * from t2;
insert into t1 values (2);
select * from t1;
n
1
2
drop table t1,t2;
include/rpl_end.inc

View file

@ -42,12 +42,16 @@ SELECT t2.field_a, t2.field_b, t2.field_c
FROM t2
ON DUPLICATE KEY UPDATE
t1.field_3 = t2.field_c;
Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. INSERT... SELECT... ON DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are updated. This order cannot be predicted and may differ on master and the slave.
INSERT INTO t2 (field_a, field_b, field_c) VALUES (6, 'f', '6f');
INSERT INTO t1 (field_1, field_2, field_3)
SELECT t2.field_a, t2.field_b, t2.field_c
FROM t2
ON DUPLICATE KEY UPDATE
t1.field_3 = t2.field_c;
Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. INSERT... SELECT... ON DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are updated. This order cannot be predicted and may differ on master and the slave.
SELECT * FROM t1;
id field_1 field_2 field_3
1 1 a 1a

View file

@ -9,7 +9,7 @@ change master to master_log_pos=MASTER_LOG_POS;
Read_Master_Log_Pos = '75'
start slave;
include/wait_for_slave_io_error.inc [errno=1236]
Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'log event entry exceeded max_allowed_packet; Increase max_allowed_packet on master''
Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'log event entry exceeded max_allowed_packet; Increase max_allowed_packet on master; the last event was read from 'master-bin.000001' at 75, the last byte read was read from 'master-bin.000001' at 94.''
include/stop_slave_sql.inc
show master status;
File Position Binlog_Do_DB Binlog_Ignore_DB

View file

@ -5,7 +5,7 @@ CREATE TABLE t1(c1 INT);
FLUSH LOGS;
call mtr.add_suppression('Got fatal error 1236 from master when reading data from binary log: .*could not find next log');
include/wait_for_slave_io_error.inc [errno=1236]
Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'could not find next log''
Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'could not find next log; the last event was read from 'master-bin.000002' at 237, the last byte read was read from 'master-bin.000002' at 237.''
CREATE TABLE t2(c1 INT);
FLUSH LOGS;
CREATE TABLE t3(c1 INT);

View file

@ -37,7 +37,7 @@ DROP TABLE t1;
CREATE TABLE t1 (f1 int PRIMARY KEY, f2 LONGTEXT, f3 LONGTEXT) ENGINE=MyISAM;
INSERT INTO t1(f1, f2, f3) VALUES(1, REPEAT('a', @@global.max_allowed_packet), REPEAT('b', @@global.max_allowed_packet));
include/wait_for_slave_io_error.inc [errno=1236]
Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'log event entry exceeded max_allowed_packet; Increase max_allowed_packet on master''
Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'log event entry exceeded max_allowed_packet; Increase max_allowed_packet on master; the last event was read from 'master-bin.000001' at 463, the last byte read was read from 'master-bin.000001' at 482.''
STOP SLAVE;
RESET SLAVE;
RESET MASTER;

View file

@ -0,0 +1,64 @@
#
# Bug#11747416 : 32228 A disk full makes binary log corrupt.
#
#
# The test demonstrates reading from binlog error propagation to slave
# and reporting there.
# Conditions for the bug include a crash at time of the last event to
# the binlog was written partly. With the fixes the event is not sent out
# any longer, but rather the dump thread sends out a sound error message.
#
# Crash is not simulated. A binlog with partly written event in its end is installed
# and replication is started from it.
#
--source include/master-slave.inc
--source include/have_binlog_format_mixed.inc
call mtr.add_suppression("Error in Log_event::read_log_event()");
--connection master
--let $datadir= `SELECT @@datadir`
--let $rpl_server_number= 1
--source include/rpl_stop_server.inc
--remove_file $datadir/master-bin.000001
--copy_file $MYSQL_TEST_DIR/std_data/bug11747416_32228_binlog.000001 $datadir/master-bin.000001
--let $rpl_server_number= 1
--source include/rpl_start_server.inc
--source include/wait_until_connected_again.inc
# evidence of the partial binlog
--error ER_ERROR_WHEN_EXECUTING_COMMAND
show binlog events;
--connection slave
call mtr.add_suppression("Slave I/O: Got fatal error 1236 from master when reading data from binary log");
stop slave;
reset slave;
start slave;
# ER_MASTER_FATAL_ERROR_READING_BINLOG 1236
--let $slave_param=Last_IO_Errno
--let $slave_param_value=1236
--source include/wait_for_slave_param.inc
--let $status_items= Last_IO_Errno, Last_IO_Error
--source include/show_slave_status.inc
#
# Cleanup
#
--connection master
reset master;
--connection slave
stop slave;
reset slave;
drop table t; # table was created from binlog. it does not exist on master.
--echo End of the tests

View file

@ -1,14 +0,0 @@
#########################################
# Wrapper for rpl_insert_duplicate.test #
#########################################
-- source include/master-slave.inc
-- source include/have_innodb.inc
#-- source include/have_binlog_format_mixed_or_statement.inc
let $engine_type=innodb;
-- source extra/rpl_tests/rpl_insert_duplicate.test
let $engine_type=myisam;
-- source extra/rpl_tests/rpl_insert_duplicate.test
--source include/rpl_end.inc

View file

@ -1,20 +0,0 @@
# Testcase for BUG#10456 - INSERT INTO ... SELECT violating a primary key
# breaks replication
-- source include/master-slave.inc
connection master;
create table t1 (n int not null primary key);
insert into t1 values (1);
create table t2 (n int);
insert into t2 values (1);
insert ignore into t1 select * from t2;
insert into t1 values (2);
sync_slave_with_master;
connection slave;
select * from t1;
connection master;
drop table t1,t2;
sync_slave_with_master;
--source include/rpl_end.inc

View file

@ -7,7 +7,7 @@ source include/have_debug.inc;
source include/master-slave.inc;
# Currently only statement-based-specific bugs are here
-- source include/have_binlog_format_mixed_or_statement.inc
-- source include/have_binlog_format_statement.inc
#
# This is to test that slave properly detects if

View file

@ -41,7 +41,7 @@ id name id name
SET @@session.max_join_size=8;
## Since total joins are more than max_join_size value so error will occur ##
SELECT * FROM t1 INNER JOIN t2 ON t1.id = t2.id;
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay
'#--------------------FN_DYNVARS_079_03-------------------------#'
## Setting global value of variable ##
SET @@global.max_join_size=8;
@ -52,7 +52,7 @@ SELECT @@global.max_join_size;
8
## Since total joins are more than max_join_size value so error will occur ##
SELECT * FROM t1 INNER JOIN t2 ON t1.id = t2.id;
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay
## Dropping both the tables ##
Drop table t1, t2;
## Restoring values ##

View file

@ -21,7 +21,7 @@ INSERT INTO t2 VALUES('aa4','bb');
'#--------------------FN_DYNVARS_154_01-------------------------#'
Expected error "Too big select"
SELECT * FROM t1 INNER JOIN t2 ON t1.a = t2.a;
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay
Expected error The SELECT would examine more than MAX_JOIN_SIZE rows.
'#--------------------FN_DYNVARS_154_02-------------------------#'
SET SESSION SQL_BIG_SELECTS = 1;

View file

@ -19,7 +19,7 @@ SET SESSION sql_max_join_size=9;
Warnings:
Warning 1287 The syntax '@@sql_max_join_size' is deprecated and will be removed in MySQL 7.0.
SELECT * FROM t1 INNER JOIN t2 ON t1.a = t2.a;
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay
Expected error The SELECT would examine more than MAX_JOIN_SIZE rows.
'#--------------------FN_DYNVARS_161_02-------------------------#'
SET SESSION SQL_BIG_SELECTS = 1;

View file

@ -762,6 +762,20 @@ DROP TABLE t1;
SELECT space(date_add(101, INTERVAL CHAR('1' USING utf16) hour_second));
--echo #
--echo # Bug#11750518 41090: ORDER BY TRUNCATES GROUP_CONCAT RESULT
--echo #
SET NAMES utf8, @@character_set_connection=utf16;
SELECT id, CHAR_LENGTH(GROUP_CONCAT(body)) AS l
FROM (SELECT 'a' AS id, REPEAT('foo bar', 100) AS body
UNION ALL
SELECT 'a' AS id, REPEAT('bla bla', 100) AS body) t1
GROUP BY id
ORDER BY l DESC;
#
## TODO: add tests for all engines
#

View file

@ -840,6 +840,19 @@ INSERT INTO t1 VALUES ('a');
SELECT CASE s1 WHEN 'a' THEN 'b' ELSE 'c' END FROM t1;
DROP TABLE t1;
--echo #
--echo # Bug#11750518 41090: ORDER BY TRUNCATES GROUP_CONCAT RESULT
--echo #
SET NAMES utf8, @@character_set_connection=utf32;
SELECT id, CHAR_LENGTH(GROUP_CONCAT(body)) AS l
FROM (SELECT 'a' AS id, REPEAT('foo bar', 100) AS body
UNION ALL
SELECT 'a' AS id, REPEAT('bla bla', 100) AS body) t1
GROUP BY id
ORDER BY l DESC;
--echo #
--echo # End of 5.5 tests
--echo #

View file

@ -1561,6 +1561,18 @@ EXPLAIN EXTENDED SELECT 'abcdÁÂÃÄÅ', _latin1'abcdÁÂÃÄÅ', _utf8'abcdÁ
SET NAMES utf8;
EXPLAIN EXTENDED SELECT 'abcdÁÂÃÄÅ', _latin1'abcdÁÂÃÄÅ', _utf8'abcdÁÂÃÄÅ';
--echo #
--echo # Bug#11750518 41090: ORDER BY TRUNCATES GROUP_CONCAT RESULT
--echo #
SET NAMES utf8;
SELECT id, CHAR_LENGTH(GROUP_CONCAT(body)) AS l
FROM (SELECT 'a' AS id, REPEAT('foo bar', 100) AS body
UNION ALL
SELECT 'a' AS id, REPEAT('bla bla', 100) AS body) t1
GROUP BY id
ORDER BY l DESC;
--echo #
--echo # End of 5.5 tests
--echo #

View file

@ -89,6 +89,20 @@ SET SESSION debug = DEFAULT;
DROP TABLE t1, t2;
--echo #
--echo # Bug#11747970 34660: CRASH WHEN FEDERATED TABLE LOSES CONNECTION DURING INSERT ... SELECT
--echo #
CREATE TABLE t1(f1 INT, KEY(f1));
CREATE TABLE t2(f1 INT);
INSERT INTO t1 VALUES (1),(2);
INSERT INTO t2 VALUES (1),(2);
SET SESSION debug='d,bug11747970_simulate_error';
INSERT IGNORE INTO t2 SELECT f1 FROM t1 a WHERE NOT EXISTS (SELECT 1 FROM t2 b WHERE a.f1 = b.f1);
SET SESSION debug = DEFAULT;
DROP TABLE t1,t2;
--echo #
--echo # End of 5.1 tests
--echo #

View file

@ -15,9 +15,9 @@ if ($dir_bin =~ m|/usr/|) {
# RPM package
$dir_docs = $dir_bin;
$dir_docs =~ s|/lib|/share/doc|;
if(-d "$dir_docs/packages/MySQL-server") {
# SuSE
$dir_docs = "$dir_docs/packages/MySQL-server";
if(-d "$dir_docs/packages") {
# SuSE: "packages/" in the documentation path
$dir_docs = glob "$dir_docs/packages/MySQL-server*";
} else {
# RedHat: version number in directory name
$dir_docs = glob "$dir_docs/MySQL-server*";
@ -25,9 +25,9 @@ if ($dir_bin =~ m|/usr/|) {
} elsif ($dir_bin =~ m|/usr$|) {
# RPM build during development
$dir_docs = "$dir_bin/share/doc";
if(-d "$dir_docs/packages/MySQL-server") {
# SuSE
$dir_docs = "$dir_docs/packages/MySQL-server";
if(-d "$dir_docs/packages") {
# SuSE: "packages/" in the documentation path
$dir_docs = glob "$dir_docs/packages/MySQL-server*";
} else {
# RedHat: version number in directory name
$dir_docs = glob "$dir_docs/MySQL-server*";

View file

@ -1448,7 +1448,7 @@ SELECT ((rpad(1.0,2048,1)) = ('4(') ^ (0.1));
--error 1690
SELECT
pow((rpad(1.0,2048,1)),(b'1111111111111111111111111111111111111111111'));
pow((rpad(10.0,2048,1)),(b'1111111111111111111111111111111111111111111'));
SELECT ((rpad(1.0,2048,1)) + (0) ^ ('../'));
SELECT stddev_samp(rpad(1.0,2048,1));
SELECT ((127.1) not in ((rpad(1.0,2048,1)),(''),(-1.1)));

View file

@ -454,7 +454,7 @@ drop table t1;
#
create table t1 (c char(10)) engine=memory;
create table t2 (c varchar(10)) engine=memory;
--replace_column 8 #
--replace_column 8 # 12 #
show table status like 't_';
drop table t1, t2;

View file

@ -295,37 +295,37 @@ CREATE TABLE t3 (
insert into t1 values (1),(2);
insert into t2 values (1),(2);
insert into t3 values (1,1),(2,2);
--replace_column 6 # 7 # 8 # 9 #
--replace_column 6 # 7 # 8 # 9 # 12 #
show table status;
insert into t1 values (3),(4);
insert into t2 values (3),(4);
insert into t3 values (3,3),(4,4);
--replace_column 6 # 7 # 8 # 9 #
--replace_column 6 # 7 # 8 # 9 # 12 #
show table status;
insert into t1 values (5);
insert into t2 values (5);
insert into t3 values (5,5);
--replace_column 6 # 7 # 8 # 9 #
--replace_column 6 # 7 # 8 # 9 # 12 #
show table status;
delete from t1 where a=3;
delete from t2 where b=3;
delete from t3 where a=3;
--replace_column 6 # 7 # 8 # 9 # 10 #
--replace_column 6 # 7 # 8 # 9 # 10 # 12 #
show table status;
truncate table t1;
truncate table t2;
truncate table t3;
--replace_column 6 # 7 # 8 # 9 #
--replace_column 6 # 7 # 8 # 9 # 12 #
show table status;
insert into t1 values (5);
insert into t2 values (5);
insert into t3 values (5,5);
--replace_column 6 # 7 # 8 # 9 #
--replace_column 6 # 7 # 8 # 9 # 12 #
show table status;
delete from t1 where a=5;
delete from t2 where b=5;
delete from t3 where a=5;
--replace_column 6 # 7 # 8 # 9 # 10 #
--replace_column 6 # 7 # 8 # 9 # 10 # 12 #
show table status;
drop table t1, t2, t3;
@ -443,7 +443,7 @@ flush tables;
# Create a junk frm file on disk
let $MYSQLD_DATADIR= `select @@datadir`;
system echo "this is a junk file for test" >> $MYSQLD_DATADIR/test/t1.frm ;
--replace_column 6 # 7 # 8 # 9 #
--replace_column 6 # 7 # 8 # 9 # 12 #
SHOW TABLE STATUS like 't1';
--error ER_NOT_FORM_FILE
show create table t1;
@ -1073,7 +1073,7 @@ set names latin1;
SET NAMES latin1;
CREATE DATABASE `ä`;
CREATE TABLE `ä`.`ä` (a int) ENGINE=Memory;
--replace_column 6 # 7 # 8 # 9 #
--replace_column 6 # 7 # 8 # 9 # 12 #
SHOW TABLE STATUS FROM `ä` LIKE 'ä';
DROP DATABASE `ä`;

View file

@ -592,7 +592,7 @@ public:
void init_make_field(Send_field *tmp_field,enum enum_field_types type);
virtual void cleanup();
virtual void make_field(Send_field *field);
Field *make_string_field(TABLE *table);
virtual Field *make_string_field(TABLE *table);
virtual bool fix_fields(THD *, Item **);
/*
should be used in case where we are sure that we do not need

View file

@ -1690,7 +1690,7 @@ subselect_single_select_engine(st_select_lex *select,
select_subselect *result_arg,
Item_subselect *item_arg)
:subselect_engine(item_arg, result_arg),
prepared(0), optimized(0), executed(0),
prepared(0), optimized(0), executed(0), optimize_error(0),
select_lex(select), join(0)
{
select_lex->master_unit()->item= item_arg;
@ -1700,7 +1700,7 @@ subselect_single_select_engine(st_select_lex *select,
void subselect_single_select_engine::cleanup()
{
DBUG_ENTER("subselect_single_select_engine::cleanup");
prepared= optimized= executed= 0;
prepared= optimized= executed= optimize_error= 0;
join= 0;
result->cleanup();
DBUG_VOID_RETURN;
@ -1892,6 +1892,10 @@ int join_read_next_same_or_null(READ_RECORD *info);
int subselect_single_select_engine::exec()
{
DBUG_ENTER("subselect_single_select_engine::exec");
if (optimize_error)
DBUG_RETURN(1);
char const *save_where= thd->where;
SELECT_LEX *save_select= thd->lex->current_select;
thd->lex->current_select= select_lex;
@ -1899,12 +1903,15 @@ int subselect_single_select_engine::exec()
{
SELECT_LEX_UNIT *unit= select_lex->master_unit();
DBUG_EXECUTE_IF("bug11747970_simulate_error",
DBUG_SET("+d,bug11747970_raise_error"););
optimized= 1;
unit->set_limit(unit->global_parameters);
if (join->optimize())
{
thd->where= save_where;
executed= 1;
optimize_error= 1;
thd->lex->current_select= save_select;
DBUG_RETURN(join->error ? join->error : 1);
}

View file

@ -429,6 +429,7 @@ class subselect_single_select_engine: public subselect_engine
my_bool prepared; /* simple subselect is prepared */
my_bool optimized; /* simple subselect is optimized */
my_bool executed; /* simple subselect is executed */
my_bool optimize_error; ///< simple subselect optimization failed
st_select_lex *select_lex; /* corresponding select_lex */
JOIN * join; /* corresponding JOIN structure */
public:

View file

@ -3128,6 +3128,31 @@ void Item_func_group_concat::cleanup()
}
Field *Item_func_group_concat::make_string_field(TABLE *table)
{
Field *field;
DBUG_ASSERT(collation.collation);
/*
max_characters is maximum number of characters
what can fit into max_length size. It's necessary
to use field size what allows to store group_concat
result without truncation. For this purpose we use
max_characters * CS->mbmaxlen.
*/
const uint32 max_characters= max_length / collation.collation->mbminlen;
if (max_characters > CONVERT_IF_BIGGER_TO_BLOB)
field= new Field_blob(max_characters * collation.collation->mbmaxlen,
maybe_null, name, collation.collation, TRUE);
else
field= new Field_varstring(max_characters * collation.collation->mbmaxlen,
maybe_null, name, table->s, collation.collation);
if (field)
field->init(table);
return field;
}
Item *Item_func_group_concat::copy_or_same(THD* thd)
{
return new (thd->mem_root) Item_func_group_concat(thd, this);

View file

@ -1411,6 +1411,7 @@ public:
enum Sumfunctype sum_func () const {return GROUP_CONCAT_FUNC;}
const char *func_name() const { return "group_concat"; }
virtual Item_result result_type () const { return STRING_RESULT; }
virtual Field *make_string_field(TABLE *table);
enum_field_types field_type() const
{
if (max_length/collation.collation->mbmaxlen > CONVERT_IF_BIGGER_TO_BLOB )

View file

@ -2494,10 +2494,10 @@ ER_TOO_BIG_SELECT 42000
cze "Zadan-Bý SELECT by procházel příliš mnoho záznamů a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v pořádku, použijte SET SQL_BIG_SELECTS=1"
dan "SELECT ville undersøge for mange poster og ville sandsynligvis tage meget lang tid. Undersøg WHERE delen og brug SET SQL_BIG_SELECTS=1 hvis udtrykket er korrekt"
nla "Het SELECT-statement zou te veel records analyseren en dus veel tijd in beslagnemen. Kijk het WHERE-gedeelte van de query na en kies SET SQL_BIG_SELECTS=1 als het stament in orde is."
eng "The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay"
eng "The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay"
est "SELECT lause peab läbi vaatama suure hulga kirjeid ja võtaks tõenäoliselt liiga kaua aega. Tasub kontrollida WHERE klauslit ja vajadusel kasutada käsku SET SQL_BIG_SELECTS=1"
fre "SELECT va devoir examiner beaucoup d'enregistrements ce qui va prendre du temps. Vérifiez la clause WHERE et utilisez SET SQL_BIG_SELECTS=1 si SELECT se passe bien"
ger "Die Ausführung des SELECT würde zu viele Datensätze untersuchen und wahrscheinlich sehr lange dauern. Bitte WHERE-Klausel überprüfen und gegebenenfalls SET SQL_BIG_SELECTS=1 oder SET SQL_MAX_JOIN_SIZE=# verwenden"
ger "Die Ausführung des SELECT würde zu viele Datensätze untersuchen und wahrscheinlich sehr lange dauern. Bitte WHERE-Klausel überprüfen und gegebenenfalls SET SQL_BIG_SELECTS=1 oder SET MAX_JOIN_SIZE=# verwenden"
greek "Το SELECT θα εξετάσει μεγάλο αριθμό εγγραφών και πιθανώς θα καθυστερήσει. Παρακαλώ εξετάστε τις παραμέτρους του WHERE και χρησιμοποιείστε SET SQL_BIG_SELECTS=1 αν το SELECT είναι σωστό"
hun "A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT okay"
ita "La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET SQL_BIG_SELECTS=1 se e` tutto a posto."
@ -4701,14 +4701,14 @@ ER_NOT_SUPPORTED_YET 42000
spa "Esta versión de MySQL no soporta todavia '%s'"
swe "Denna version av MySQL kan ännu inte utföra '%s'"
ER_MASTER_FATAL_ERROR_READING_BINLOG
nla "Kreeg fatale fout %d: '%-.128s' van master tijdens lezen van data uit binaire log"
eng "Got fatal error %d from master when reading data from binary log: '%-.128s'"
ger "Schwerer Fehler %d: '%-.128s vom Master beim Lesen des binären Logs"
ita "Errore fatale %d: '%-.128s' dal master leggendo i dati dal log binario"
por "Obteve fatal erro %d: '%-.128s' do master quando lendo dados do binary log"
rus "Получена неисправимая ошибка %d: '%-.128s' от головного сервера в процессе выборки данных из двоичного журнала"
spa "Recibió fatal error %d: '%-.128s' del master cuando leyendo datos del binary log"
swe "Fick fatalt fel %d: '%-.128s' från master vid läsning av binärloggen"
nla "Kreeg fatale fout %d: '%-.256s' van master tijdens lezen van data uit binaire log"
eng "Got fatal error %d from master when reading data from binary log: '%-.256s'"
ger "Schwerer Fehler %d: '%-.256s vom Master beim Lesen des binären Logs"
ita "Errore fatale %d: '%-.256s' dal master leggendo i dati dal log binario"
por "Obteve fatal erro %d: '%-.256s' do master quando lendo dados do binary log"
rus "Получена неисправимая ошибка %d: '%-.256s' от головного сервера в процессе выборки данных из двоичного журнала"
spa "Recibió fatal error %d: '%-.256s' del master cuando leyendo datos del binary log"
swe "Fick fatalt fel %d: '%-.256s' från master vid läsning av binärloggen"
ER_SLAVE_IGNORED_TABLE
eng "Slave SQL thread ignored the query because of replicate-*-table rules"
ger "Slave-SQL-Thread hat die Abfrage aufgrund von replicate-*-table-Regeln ignoriert"
@ -5036,7 +5036,7 @@ ER_FEATURE_DISABLED
ger "Das Feature '%s' ist ausgeschaltet, Sie müssen MySQL mit '%s' übersetzen, damit es verfügbar ist"
por "O recurso '%s' foi desativado; você necessita MySQL construído com '%s' para ter isto funcionando"
spa "El recurso '%s' fue deshabilitado; usted necesita construir MySQL con '%s' para tener eso funcionando"
swe "'%s' är inte aktiverad; För att aktivera detta måste du bygga om MySQL med '%s' definerad"
swe "'%s' är inte aktiverad; För att aktivera detta måste du bygga om MySQL med '%s' definierad"
ER_OPTION_PREVENTS_STATEMENT
eng "The MySQL server is running with the %s option so it cannot execute this statement"
ger "Der MySQL-Server läuft mit der Option %s und kann diese Anweisung deswegen nicht ausführen"
@ -5577,7 +5577,8 @@ ER_VIEW_OTHER_USER
eng "You need the SUPER privilege for creation view with '%-.192s'@'%-.192s' definer"
ger "Sie brauchen die SUPER-Berechtigung, um einen View mit dem Definierer '%-.192s'@'%-.192s' zu erzeugen"
ER_NO_SUCH_USER
eng "The user specified as a definer ('%-.64s'@'%-.64s') does not exist"
eng "The user specified as a definer ('%-.64s'@'%-.64s') does not exist"
ger "Der als Definierer angegebene Benutzer ('%-.64s'@'%-.64s') existiert nicht"
ER_FORBID_SCHEMA_CHANGE
eng "Changing schema from '%-.192s' to '%-.192s' is not allowed."
ger "Wechsel des Schemas von '%-.192s' auf '%-.192s' ist nicht erlaubt"
@ -5618,7 +5619,7 @@ ER_VIEW_RECURSIVE
eng "`%-.192s`.`%-.192s` contains view recursion"
ger "`%-.192s`.`%-.192s` enthält View-Rekursion"
ER_NON_GROUPING_FIELD_USED 42000
eng "non-grouping field '%-.192s' is used in %-.64s clause"
eng "Non-grouping field '%-.192s' is used in %-.64s clause"
ger "In der %-.192s-Klausel wird das die Nicht-Gruppierungsspalte '%-.64s' verwendet"
ER_TABLE_CANT_HANDLE_SPKEYS
eng "The used table type doesn't support SPATIAL indexes"
@ -5645,15 +5646,20 @@ ER_NON_INSERTABLE_TABLE
eng "The target table %-.100s of the %s is not insertable-into"
ger "Die Zieltabelle %-.100s von %s ist nicht einfügbar"
ER_ADMIN_WRONG_MRG_TABLE
eng "Table '%-.64s' is differently defined or of non-MyISAM type or doesn't exist"
eng "Table '%-.64s' is differently defined or of non-MyISAM type or doesn't exist"
ger "Tabelle '%-.64s' ist unterschiedlich definiert, nicht vom Typ MyISAM oder existiert nicht"
ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT
eng "Too high level of nesting for select"
eng "Too high level of nesting for select"
ger "Zu tief verschachtelte SELECT-Anweisungen"
ER_NAME_BECOMES_EMPTY
eng "Name '%-.64s' has become ''"
eng "Name '%-.64s' has become ''"
ger "Name '%-.64s' wurde zu ''"
ER_AMBIGUOUS_FIELD_TERM
eng "First character of the FIELDS TERMINATED string is ambiguous; please use non-optional and non-empty FIELDS ENCLOSED BY"
eng "First character of the FIELDS TERMINATED string is ambiguous; please use non-optional and non-empty FIELDS ENCLOSED BY"
ger "Das erste Zeichen der Zeichenkette FIELDS TERMINATED ist mehrdeutig; bitte benutzen Sie nicht optionale und nicht leere FIELDS ENCLOSED BY"
ER_FOREIGN_SERVER_EXISTS
eng "The foreign server, %s, you are trying to create already exists."
eng "The foreign server, %s, you are trying to create already exists."
ger "Der entfernte Server %s, den Sie versuchen zu erzeugen, existiert schon."
ER_FOREIGN_SERVER_DOESNT_EXIST
eng "The foreign server name you are trying to reference does not exist. Data source error: %-.64s"
ger "Die externe Verbindung, auf die Sie zugreifen wollen, existiert nicht. Datenquellenfehlermeldung: %-.64s"
@ -5678,8 +5684,8 @@ ER_PARTITION_SUBPARTITION_ERROR
swe "Subpartitioner kan bara vara hash och key partitioner"
ER_PARTITION_SUBPART_MIX_ERROR
eng "Must define subpartitions on all partitions if on one partition"
ger "Unterpartitionen können nur Hash- oder Key-Partitionen sein"
swe "Subpartitioner måste definieras på alla partitioner om på en"
ger "Wenn Sie Unterpartitionen auf einer Partition definieren, müssen Sie das für alle Partitionen tun"
swe "Subpartitioner måste definieras på alla partitioner om på en"
ER_PARTITION_WRONG_NO_PART_ERROR
eng "Wrong number of partitions defined, mismatch with previous setting"
ger "Falsche Anzahl von Partitionen definiert, stimmt nicht mit vorherigen Einstellungen überein"
@ -5854,7 +5860,7 @@ ER_CREATE_FILEGROUP_FAILED
ger "Anlegen von %s fehlgeschlagen"
ER_DROP_FILEGROUP_FAILED
eng "Failed to drop %s"
ger "Löschen (drop) von %s fehlgeschlagen"
ger "Löschen von %s fehlgeschlagen"
ER_TABLESPACE_AUTO_EXTEND_ERROR
eng "The handler doesn't support autoextend of tablespaces"
ger "Der Handler unterstützt keine automatische Erweiterung (Autoextend) von Tablespaces"
@ -5898,7 +5904,8 @@ ER_EVENT_ENDS_BEFORE_STARTS
eng "ENDS is either invalid or before STARTS"
ger "ENDS ist entweder ungültig oder liegt vor STARTS"
ER_EVENT_EXEC_TIME_IN_THE_PAST
eng "Event execution time is in the past. Event has been disabled"
eng "Event execution time is in the past. Event has been disabled"
ger "Ausführungszeit des Events liegt in der Vergangenheit. Event wurde deaktiviert"
ER_EVENT_OPEN_TABLE_FAILED
eng "Failed to open mysql.event"
ger "Öffnen von mysql.event fehlgeschlagen"
@ -5925,7 +5932,7 @@ ER_EVENT_DATA_TOO_LONG
ger "Daten der Spalte '%s' zu lang"
ER_DROP_INDEX_FK
eng "Cannot drop index '%-.192s': needed in a foreign key constraint"
ger "Kann Index '%-.192s' nicht löschen: wird für einen Fremdschlüssel benötigt"
ger "Kann Index '%-.192s' nicht löschen: wird für eine Fremdschlüsselbeschränkung benötigt"
# When using this error message, use the ER_WARN_DEPRECATED_SYNTAX error
# code.
ER_WARN_DEPRECATED_SYNTAX_WITH_VER
@ -5936,10 +5943,10 @@ ER_CANT_WRITE_LOCK_LOG_TABLE
ger "Eine Log-Tabelle kann nicht schreibgesperrt werden. Es ist ohnehin nur Lesezugriff möglich"
ER_CANT_LOCK_LOG_TABLE
eng "You can't use locks with log tables."
ger "Log-Tabellen können nicht mit normalen Lesesperren gesperrt werden. Verwenden Sie statt dessen READ LOCAL"
ger "Log-Tabellen können nicht gesperrt werden."
ER_FOREIGN_DUPLICATE_KEY 23000 S1009
eng "Upholding foreign key constraints for table '%.192s', entry '%-.192s', key %d would lead to a duplicate entry"
ger "Aufrechterhalten der Fremdschlüssel-Constraints für Tabelle '%.192s', Eintrag '%-.192s', Schlüssel %d würde zu einem doppelten Eintrag führen"
ger "Aufrechterhalten der Fremdschlüssel-Beschränkungen für Tabelle '%.192s', Eintrag '%-.192s', Schlüssel %d würde zu einem doppelten Eintrag führen"
ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE
eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MySQL %d, now running %d. Please use mysql_upgrade to fix this error."
ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MySQL %d, jetzt unter %d. Bitte benutzen Sie mysql_upgrade, um den Fehler zu beheben"
@ -5995,6 +6002,7 @@ ER_CANT_ACTIVATE_LOG
ger "Kann Logdatei '%-.64s' nicht aktivieren"
ER_RBR_NOT_AVAILABLE
eng "The server was not built with row-based replication"
ger "Der Server wurde nicht mit zeilenbasierter Replikation gebaut"
ER_BASE64_DECODE_ERROR
eng "Decoding of base64 string failed"
swe "Avkodning av base64 sträng misslyckades"
@ -6016,7 +6024,7 @@ ER_BAD_LOG_STATEMENT
ger "Sie können eine Logtabelle nicht '%s', wenn Loggen angeschaltet ist"
ER_CANT_RENAME_LOG_TABLE
eng "Cannot rename '%s'. When logging enabled, rename to/from log table must rename two tables: the log table to an archive table and another table back to '%s'"
ger "Kann '%s' nicht umbenennen. Wenn Loggen angeschaltet ist, müssen beim Umbenennen zu/von einer Logtabelle zwei Tabellen angegeben werden: die Logtabelle zu einer Archivtabelle und eine weitere Tabelle zurück zu '%s'"
ger "Kann '%s' nicht umbenennen. Wenn Loggen angeschaltet ist, müssen zwei Tabellen umbenannt werden: die Logtabelle zu einer Archivtabelle, und eine weitere Tabelle zu '%s'"
ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT 42000
eng "Incorrect parameter count in the call to native function '%-.192s'"
ger "Falsche Anzahl von Parametern beim Aufruf der nativen Funktion '%-.192s'"
@ -6057,206 +6065,240 @@ ER_DUP_ENTRY_WITH_KEY_NAME 23000 S1009
swe "Dubbel nyckel '%-.64s' för nyckel '%-.192s'"
ukr "Дублюючий запис '%-.64s' для ключа '%-.192s'"
ER_BINLOG_PURGE_EMFILE
eng "Too many files opened, please execute the command again"
ger "Zu viele offene Dateien, bitte führen Sie den Befehl noch einmal aus"
eng "Too many files opened, please execute the command again"
ger "Zu viele offene Dateien, bitte führen Sie den Befehl noch einmal aus"
ER_EVENT_CANNOT_CREATE_IN_THE_PAST
eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."
eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."
ger "Ausführungszeit des Events liegt in der Vergangenheit, und es wurde ON COMPLETION NOT PRESERVE gesetzt. Das Event wurde unmittelbar nach Erzeugung gelöscht."
ER_EVENT_CANNOT_ALTER_IN_THE_PAST
eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."
eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."
ger "Ausführungszeit des Events liegt in der Vergangenheit, und es wurde ON COMPLETION NOT PRESERVE gesetzt. Das Event wurde unmittelbar nach Erzeugung gelöscht."
ER_SLAVE_INCIDENT
eng "The incident %s occured on the master. Message: %-.64s"
eng "The incident %s occured on the master. Message: %-.64s"
ger "Der Vorfall %s passierte auf dem Master. Meldung: %-.64s"
ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT
eng "Table has no partition for some existing values"
eng "Table has no partition for some existing values"
ger "Tabelle hat für einige bestehende Werte keine Partition"
ER_BINLOG_UNSAFE_STATEMENT
eng "Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. %s"
eng "Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. %s"
swe "Detta är inte säkert att logga i statement-format, för BINLOG_FORMAT = STATEMENT. %s"
ger "Unsichere Anweisung ins Binärlog geschrieben, weil Anweisungsformat BINLOG_FORMAT = STATEMENT. %s"
ER_SLAVE_FATAL_ERROR
eng "Fatal error: %s"
eng "Fatal error: %s"
ger "Fataler Fehler: %s"
ER_SLAVE_RELAY_LOG_READ_FAILURE
eng "Relay log read failure: %s"
eng "Relay log read failure: %s"
ger "Relaylog-Lesefehler: %s"
ER_SLAVE_RELAY_LOG_WRITE_FAILURE
eng "Relay log write failure: %s"
eng "Relay log write failure: %s"
ger "Relaylog-Schreibfehler: %s"
ER_SLAVE_CREATE_EVENT_FAILURE
eng "Failed to create %s"
eng "Failed to create %s"
ger "Erzeugen von %s fehlgeschlagen"
ER_SLAVE_MASTER_COM_FAILURE
eng "Master command %s failed: %s"
eng "Master command %s failed: %s"
ger "Master-Befehl %s fehlgeschlagen: %s"
ER_BINLOG_LOGGING_IMPOSSIBLE
eng "Binary logging not possible. Message: %s"
eng "Binary logging not possible. Message: %s"
ger "Binärlogging nicht möglich. Meldung: %s"
ER_VIEW_NO_CREATION_CTX
eng "View `%-.64s`.`%-.64s` has no creation context"
ger "View `%-.64s`.`%-.64s` hat keinen Erzeugungskontext"
ER_VIEW_INVALID_CREATION_CTX
eng "Creation context of view `%-.64s`.`%-.64s' is invalid"
ger "Erzeugungskontext des Views`%-.64s`.`%-.64s' ist ungültig"
ER_SR_INVALID_CREATION_CTX
eng "Creation context of stored routine `%-.64s`.`%-.64s` is invalid"
ger "Erzeugungskontext der gespeicherten Routine`%-.64s`.`%-.64s` ist ungültig"
ER_TRG_CORRUPTED_FILE
eng "Corrupted TRG file for table `%-.64s`.`%-.64s`"
ger "Beschädigte TRG-Datei für Tabelle `%-.64s`.`%-.64s`"
ER_TRG_NO_CREATION_CTX
eng "Triggers for table `%-.64s`.`%-.64s` have no creation context"
ger "Trigger für Tabelle `%-.64s`.`%-.64s` haben keinen Erzeugungskontext"
ER_TRG_INVALID_CREATION_CTX
eng "Trigger creation context of table `%-.64s`.`%-.64s` is invalid"
ger "Trigger-Erzeugungskontext der Tabelle `%-.64s`.`%-.64s` ist ungültig"
ER_EVENT_INVALID_CREATION_CTX
eng "Creation context of event `%-.64s`.`%-.64s` is invalid"
ger "Erzeugungskontext des Events `%-.64s`.`%-.64s` ist ungültig"
ER_TRG_CANT_OPEN_TABLE
eng "Cannot open table for trigger `%-.64s`.`%-.64s`"
ger "Kann Tabelle für den Trigger `%-.64s`.`%-.64s` nicht öffnen"
ER_CANT_CREATE_SROUTINE
eng "Cannot create stored routine `%-.64s`. Check warnings"
ger "Kann gespeicherte Routine `%-.64s` nicht erzeugen. Beachten Sie die Warnungen"
ER_NEVER_USED
eng "Ambiguous slave modes combination. %s"
ger "Mehrdeutige Kombination von Slave-Modi. %s"
ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT
eng "The BINLOG statement of type `%s` was not preceded by a format description BINLOG statement."
ger "Der BINLOG-Anweisung vom Typ `%s` ging keine BINLOG-Anweisung zur Formatbeschreibung voran."
ER_SLAVE_CORRUPT_EVENT
eng "Corrupted replication event was detected"
ger "Beschädigtes Replikationsereignis entdeckt"
ER_LOAD_DATA_INVALID_COLUMN
eng "Invalid column reference (%-.64s) in LOAD DATA"
ER_LOG_PURGE_NO_FILE
ger "Ungültige Spaltenreferenz (%-.64s) bei LOAD DATA"
ER_LOG_PURGE_NO_FILE
eng "Being purged log %s was not found"
ger "Zu bereinigende Logdatei %s wurde nicht gefunden"
ER_XA_RBTIMEOUT XA106
eng "XA_RBTIMEOUT: Transaction branch was rolled back: took too long"
eng "XA_RBTIMEOUT: Transaction branch was rolled back: took too long"
ger "XA_RBTIMEOUT: Transaktionszweig wurde zurückgerollt: Zeitüberschreitung"
ER_XA_RBDEADLOCK XA102
eng "XA_RBDEADLOCK: Transaction branch was rolled back: deadlock was detected"
eng "XA_RBDEADLOCK: Transaction branch was rolled back: deadlock was detected"
ger "XA_RBDEADLOCK: Transaktionszweig wurde zurückgerollt: Deadlock entdeckt"
ER_NEED_REPREPARE
eng "Prepared statement needs to be re-prepared"
ER_DELAYED_NOT_SUPPORTED
ger "Vorbereitete Anweisungen müssen noch einmal vorbereitet werden"
ER_DELAYED_NOT_SUPPORTED
eng "DELAYED option not supported for table '%-.192s'"
ger "Die DELAYED-Option wird für Tabelle '%-.192s' nicht unterstützt"
WARN_NO_MASTER_INFO
eng "The master info structure does not exist"
ger "Die Master-Info-Struktur existiert nicht"
WARN_OPTION_IGNORED
eng "<%-.64s> option ignored"
ger "Option <%-.64s> ignoriert"
WARN_PLUGIN_DELETE_BUILTIN
eng "Built-in plugins cannot be deleted"
ger "Eingebaute Plugins können nicht gelöscht werden"
WARN_PLUGIN_BUSY
eng "Plugin is busy and will be uninstalled on shutdown"
ger "Plugin wird verwendet und wird erst beim Herunterfahren deinstalliert"
ER_VARIABLE_IS_READONLY
eng "%s variable '%s' is read-only. Use SET %s to assign the value"
ger "%s Variable '%s' ist nur lesbar. Benutzen Sie SET %s, um einen Wert zuzuweisen"
ER_WARN_ENGINE_TRANSACTION_ROLLBACK
eng "Storage engine %s does not support rollback for this statement. Transaction rolled back and must be restarted"
ger "Speicher-Engine %s unterstützt für diese Anweisung kein Rollback. Transaktion wurde zurückgerollt und muss neu gestartet werden"
ER_SLAVE_HEARTBEAT_FAILURE
eng "Unexpected master's heartbeat data: %s"
ger "Unerwartete Daten vom Heartbeat des Masters: %s"
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE
eng "The requested value for the heartbeat period is either negative or exceeds the maximum allowed (%s seconds)."
ER_NDB_REPLICATION_SCHEMA_ERROR
eng "Bad schema for mysql.ndb_replication table. Message: %-.64s"
eng "Bad schema for mysql.ndb_replication table. Message: %-.64s"
ger "Fehlerhaftes Schema für mysql.ndb_replication table. Meldung: %-.64s"
ER_CONFLICT_FN_PARSE_ERROR
eng "Error in parsing conflict function. Message: %-.64s"
eng "Error in parsing conflict function. Message: %-.64s"
ger "Fehler beim Parsen einer Konflikt-Funktion. Meldung: %-.64s"
ER_EXCEPTIONS_WRITE_ERROR
eng "Write to exceptions table failed. Message: %-.128s""
eng "Write to exceptions table failed. Message: %-.128s""
ger "Schreiben in Ausnahme-Tabelle fehlgeschlagen. Meldung: %-.128s""
ER_TOO_LONG_TABLE_COMMENT
eng "Comment for table '%-.64s' is too long (max = %lu)"
por "Comentário para a tabela '%-.64s' é longo demais (max = %lu)"
ger "Kommentar für Tabelle '%-.64s' ist zu lang (max = %lu)"
ER_TOO_LONG_FIELD_COMMENT
eng "Comment for field '%-.64s' is too long (max = %lu)"
por "Comentário para o campo '%-.64s' é longo demais (max = %lu)"
ger "Kommentar für Feld '%-.64s' ist zu lang (max = %lu)"
ER_FUNC_INEXISTENT_NAME_COLLISION 42000
eng "FUNCTION %s does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual"
ger "FUNCTION %s existiert nicht. Erläuterungen im Abschnitt 'Function Name Parsing and Resolution' im Referenzhandbuch"
# When updating these, please update EXPLAIN_FILENAME_MAX_EXTRA_LENGTH in
# sql_table.h with the new maximal additional length for explain_filename.
ER_DATABASE_NAME
eng "Database"
swe "Databas"
ger "Datenbank"
ER_TABLE_NAME
eng "Table"
swe "Tabell"
ger "Tabelle"
ER_PARTITION_NAME
eng "Partition"
swe "Partition"
ger "Partition"
ER_SUBPARTITION_NAME
eng "Subpartition"
swe "Subpartition"
ger "Unterpartition"
ER_TEMPORARY_NAME
eng "Temporary"
swe "Temporär"
ger "Temporär"
ER_RENAMED_NAME
eng "Renamed"
swe "Namnändrad"
ger "Umbenannt"
ER_TOO_MANY_CONCURRENT_TRXS
eng "Too many active concurrent transactions"
ger "Zu viele aktive simultane Transaktionen"
WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED
eng "Non-ASCII separator arguments are not fully supported"
ger "Nicht-ASCII-Trennargumente werden nicht vollständig unterstützt"
ER_DEBUG_SYNC_TIMEOUT
eng "debug sync point wait timed out"
ger "Debug Sync Point Wartezeit überschritten"
ER_DEBUG_SYNC_HIT_LIMIT
eng "debug sync point hit limit reached"
ger "Debug Sync Point Hit Limit erreicht"
ER_DUP_SIGNAL_SET 42000
eng "Duplicate condition information item '%s'"
eng "Duplicate condition information item '%s'"
ger "Informationselement '%s' für Duplikatbedingung"
# Note that the SQLSTATE is not 01000, it is provided by SIGNAL/RESIGNAL
ER_SIGNAL_WARN 01000
eng "Unhandled user-defined warning condition"
eng "Unhandled user-defined warning condition"
ger "Unbehandelte benutzerdefinierte Warnbedingung"
# Note that the SQLSTATE is not 02000, it is provided by SIGNAL/RESIGNAL
ER_SIGNAL_NOT_FOUND 02000
eng "Unhandled user-defined not found condition"
eng "Unhandled user-defined not found condition"
ger "Unbehandelte benutzerdefinierte Nicht-gefunden-Bedingung"
# Note that the SQLSTATE is not HY000, it is provided by SIGNAL/RESIGNAL
ER_SIGNAL_EXCEPTION HY000
eng "Unhandled user-defined exception condition"
eng "Unhandled user-defined exception condition"
ger "Unbehandelte benutzerdefinierte Ausnahmebedingung"
ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER 0K000
eng "RESIGNAL when handler not active"
eng "RESIGNAL when handler not active"
ger "RESIGNAL bei nicht aktivem Handler"
ER_SIGNAL_BAD_CONDITION_TYPE
eng "SIGNAL/RESIGNAL can only use a CONDITION defined with SQLSTATE"
eng "SIGNAL/RESIGNAL can only use a CONDITION defined with SQLSTATE"
ger "SIGNAL/RESIGNAL kann nur mit einer Bedingung (CONDITION) benutzt werden, die bei SQLSTATE definiert wurde"
WARN_COND_ITEM_TRUNCATED
eng "Data truncated for condition item '%s'"
eng "Data truncated for condition item '%s'"
ger "Daten gekürzt für Bedingungselement '%s'"
ER_COND_ITEM_TOO_LONG
eng "Data too long for condition item '%s'"
eng "Data too long for condition item '%s'"
ger "Daten zu lang für Bedingungselement '%s'"
ER_UNKNOWN_LOCALE
eng "Unknown locale: '%-.64s'"
eng "Unknown locale: '%-.64s'"
ger "Unbekannte Locale: '%-.64s'"
ER_SLAVE_IGNORE_SERVER_IDS
eng "The requested server id %d clashes with the slave startup option --replicate-same-server-id"
ger "Die angeforderte Server-ID %d steht im Konflikt mit der Startoption --replicate-same-server-id für den Slave"
ER_QUERY_CACHE_DISABLED
eng "Query cache is disabled; restart the server with query_cache_type=1 to enable it"
ger "Abfragen-Cache ist deaktiviert. Starten Sie den Server neu mit query_cache_type=1, um ihn zu aktivieren"
ER_SAME_NAME_PARTITION_FIELD
eng "Duplicate partition field name '%-.192s'"
ger "Partitionsfeld '%-.192s' ist ein Duplikat"
ER_PARTITION_COLUMN_LIST_ERROR
eng "Inconsistency in usage of column lists for partitioning"
ger "Inkonsistenz bei der Benutzung von Spaltenlisten für Partitionierung"
ER_WRONG_TYPE_COLUMN_VALUE_ERROR
eng "Partition column values of incorrect type"
ger "Partitionsspaltenwerte sind vom falschen Typ"
ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR
eng "Too many fields in '%-.192s'"
ger "Zu viele Felder in '%-.192s'"
ER_MAXVALUE_IN_VALUES_IN
eng "Cannot use MAXVALUE as value in VALUES IN"
ger "MAXVALUE kann nicht als Wert in VALUES IN verwendet werden"
ER_TOO_MANY_VALUES_ERROR
eng "Cannot have more than one value for this type of %-.64s partitioning"
ger "Für den Partionierungstyp %-.64s darf es nicht mehr als einen Wert geben"
ER_ROW_SINGLE_PARTITION_FIELD_ERROR
eng "Row expressions in VALUES IN only allowed for multi-field column partitioning"
ger "Zeilenausdrücke in VALUES IN sind nur für Mehrfeld-Spaltenpartionierung erlaubt"
ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD
eng "Field '%-.192s' is of a not allowed type for this type of partitioning"
ger "Feld '%-.192s' ist für diese Art von Partitionierung von einem nicht zulässigen Typ"
ER_PARTITION_FIELDS_TOO_LONG
eng "The total length of the partitioning fields is too large"
ger "Die Gesamtlänge der Partitionsfelder ist zu groß"
ER_BINLOG_ROW_ENGINE_AND_STMT_ENGINE
eng "Cannot execute statement: impossible to write to binary log since both row-incapable engines and statement-incapable engines are involved."
ER_BINLOG_ROW_MODE_AND_STMT_ENGINE
@ -6318,7 +6360,7 @@ ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT
eng "Cannot change the binlog direct flag inside a stored function or trigger"
ER_SPATIAL_MUST_HAVE_GEOM_COL 42000
eng "A SPATIAL index may only contain a geometrical type column"
ger "Ein raumbezogener Index (SPATIAL) darf nur Spalten geometrischen Typs enthalten"
ER_TOO_LONG_INDEX_COMMENT
eng "Comment for index '%-.64s' is too long (max = %lu)"
@ -6421,6 +6463,24 @@ ER_INDEX_CORRUPT
ER_UNDO_RECORD_TOO_BIG
eng "Undo log record is too big."
ER_BINLOG_UNSAFE_INSERT_IGNORE_SELECT
eng "INSERT IGNORE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave."
ER_BINLOG_UNSAFE_INSERT_SELECT_UPDATE
eng "INSERT... SELECT... ON DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are updated. This order cannot be predicted and may differ on master and the slave."
ER_BINLOG_UNSAFE_REPLACE_SELECT
eng "REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave."
ER_BINLOG_UNSAFE_CREATE_IGNORE_SELECT
eng "CREATE... IGNORE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave."
ER_BINLOG_UNSAFE_CREATE_REPLACE_SELECT
eng "CREATE... REPLACE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave."
ER_BINLOG_UNSAFE_UPDATE_IGNORE
eng "UPDATE IGNORE is unsafe because the order in which rows are updated determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave."
ER_PLUGIN_NO_UNINSTALL
eng "Plugin '%s' is marked as not dynamically uninstallable. You have to stop the server to uninstall it."

View file

@ -59,6 +59,12 @@ Query_tables_list::binlog_stmt_unsafe_errcode[BINLOG_STMT_UNSAFE_COUNT] =
ER_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS,
ER_BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE,
ER_BINLOG_UNSAFE_MIXED_STATEMENT,
ER_BINLOG_UNSAFE_INSERT_IGNORE_SELECT,
ER_BINLOG_UNSAFE_INSERT_SELECT_UPDATE,
ER_BINLOG_UNSAFE_REPLACE_SELECT,
ER_BINLOG_UNSAFE_CREATE_IGNORE_SELECT,
ER_BINLOG_UNSAFE_CREATE_REPLACE_SELECT,
ER_BINLOG_UNSAFE_UPDATE_IGNORE
};

View file

@ -1253,6 +1253,48 @@ public:
*/
BINLOG_STMT_UNSAFE_MIXED_STATEMENT,
/**
INSERT...IGNORE SELECT is unsafe because which rows are ignored depends
on the order that rows are retrieved by SELECT. This order cannot be
predicted and may differ on master and the slave.
*/
BINLOG_STMT_UNSAFE_INSERT_IGNORE_SELECT,
/**
INSERT...SELECT...UPDATE is unsafe because which rows are updated depends
on the order that rows are retrieved by SELECT. This order cannot be
predicted and may differ on master and the slave.
*/
BINLOG_STMT_UNSAFE_INSERT_SELECT_UPDATE,
/**
INSERT...REPLACE SELECT is unsafe because which rows are replaced depends
on the order that rows are retrieved by SELECT. This order cannot be
predicted and may differ on master and the slave.
*/
BINLOG_STMT_UNSAFE_REPLACE_SELECT,
/**
CREATE TABLE... IGNORE... SELECT is unsafe because which rows are ignored
depends on the order that rows are retrieved by SELECT. This order cannot
be predicted and may differ on master and the slave.
*/
BINLOG_STMT_UNSAFE_CREATE_IGNORE_SELECT,
/**
CREATE TABLE...REPLACE... SELECT is unsafe because which rows are replaced
depends on the order that rows are retrieved from SELECT. This order
cannot be predicted and may differ on master and the slave
*/
BINLOG_STMT_UNSAFE_CREATE_REPLACE_SELECT,
/**
UPDATE...IGNORE is unsafe because which rows are ignored depends on the
order that rows are updated. This order cannot be predicted and may differ
on master and the slave.
*/
BINLOG_STMT_UNSAFE_UPDATE_IGNORE,
/* The last element of this enumeration type. */
BINLOG_STMT_UNSAFE_COUNT
};

View file

@ -2386,6 +2386,19 @@ case SQLCOM_PREPARE:
{
select_result *result;
/*
CREATE TABLE...IGNORE/REPLACE SELECT... can be unsafe, unless
ORDER BY PRIMARY KEY clause is used in SELECT statement. We therefore
use row based logging if mixed or row based logging is available.
TODO: Check if the order of the output of the select statement is
deterministic. Waiting for BUG#42415
*/
if(lex->ignore)
lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_CREATE_IGNORE_SELECT);
if(lex->duplicates == DUP_REPLACE)
lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_CREATE_REPLACE_SELECT);
/*
If:
a) we inside an SP and there was NAME_CONST substitution,
@ -2720,6 +2733,16 @@ end_with_restore_list:
DBUG_ASSERT(first_table == all_tables && first_table != 0);
if (update_precheck(thd, all_tables))
break;
/*
UPDATE IGNORE can be unsafe. We therefore use row based
logging if mixed or row based logging is available.
TODO: Check if the order of the output of the select statement is
deterministic. Waiting for BUG#42415
*/
if (lex->ignore)
lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_UPDATE_IGNORE);
DBUG_ASSERT(select_lex->offset_limit == 0);
unit->set_limit(select_lex);
MYSQL_UPDATE_START(thd->query());
@ -2886,6 +2909,23 @@ end_with_restore_list:
DBUG_ASSERT(first_table == all_tables && first_table != 0);
if ((res= insert_precheck(thd, all_tables)))
break;
/*
INSERT...SELECT...ON DUPLICATE KEY UPDATE/REPLACE SELECT/
INSERT...IGNORE...SELECT can be unsafe, unless ORDER BY PRIMARY KEY
clause is used in SELECT statement. We therefore use row based
logging if mixed or row based logging is available.
TODO: Check if the order of the output of the select statement is
deterministic. Waiting for BUG#42415
*/
if (lex->sql_command == SQLCOM_INSERT_SELECT &&
lex->duplicates == DUP_UPDATE)
lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_INSERT_SELECT_UPDATE);
if (lex->sql_command == SQLCOM_INSERT_SELECT && lex->ignore)
lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_INSERT_IGNORE_SELECT);
if (lex->sql_command == SQLCOM_REPLACE_SELECT)
lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_REPLACE_SELECT);
/* Fix lock for first table */
if (first_table->lock_type == TL_WRITE_DELAYED)

View file

@ -352,7 +352,7 @@ Increase max_allowed_packet on master";
*errmsg = "memory allocation failed reading log event";
break;
case LOG_READ_TRUNC:
*errmsg = "binlog truncated in the middle of event";
*errmsg = "binlog truncated in the middle of event; consider out of disk space on master";
break;
default:
*errmsg = "unknown error reading log event on the master";
@ -447,6 +447,9 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos,
String* packet = &thd->packet;
int error;
const char *errmsg = "Unknown error";
const char *fmt= "%s; the last event was read from '%s' at %s, the last byte read was read from '%s' at %s.";
char llbuff1[22], llbuff2[22];
char error_text[MAX_SLAVE_ERRMSG]; // to be send to slave via my_message()
NET* net = &thd->net;
mysql_mutex_t *log_lock;
mysql_cond_t *log_cond;
@ -677,10 +680,8 @@ impossible position";
if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
goto err;
my_off_t prev_pos= pos;
while (!(error = Log_event::read_log_event(&log, packet, log_lock)))
while (!(error= Log_event::read_log_event(&log, packet, log_lock)))
{
prev_pos= my_b_tell(&log);
#ifndef DBUG_OFF
if (max_binlog_dump_events && !left_events--)
{
@ -766,17 +767,6 @@ impossible position";
goto err;
}
/*
here we were reading binlog that was not closed properly (as a result
of a crash ?). treat any corruption as EOF
*/
if (binlog_can_be_corrupted &&
error != LOG_READ_MEM && error != LOG_READ_EOF)
{
my_b_seek(&log, prev_pos);
error=LOG_READ_EOF;
}
/*
TODO: now that we are logging the offset, check to make sure
the recorded offset and the actual match.
@ -1021,6 +1011,21 @@ end:
err:
thd_proc_info(thd, "Waiting to finalize termination");
if (my_errno == ER_MASTER_FATAL_ERROR_READING_BINLOG && my_b_inited(&log))
{
/*
detailing the fatal error message with coordinates
of the last position read.
*/
char b_start[FN_REFLEN], b_end[FN_REFLEN];
fn_format(b_start, coord->file_name, "", "", MY_REPLACE_DIR);
fn_format(b_end, log_file_name, "", "", MY_REPLACE_DIR);
my_snprintf(error_text, sizeof(error_text), fmt, errmsg,
b_start, (llstr(coord->pos, llbuff1), llbuff1),
b_end, (llstr(my_b_tell(&log), llbuff2), llbuff2));
}
else
strcpy(error_text, errmsg);
end_io_cache(&log);
RUN_HOOK(binlog_transmit, transmit_stop, (thd, flags));
/*
@ -1037,7 +1042,7 @@ err:
mysql_file_close(file, MYF(MY_WME));
thd->variables.max_allowed_packet= old_max_allowed_packet;
my_message(my_errno, errmsg, MYF(0));
my_message(my_errno, error_text, MYF(0));
DBUG_VOID_RETURN;
}

View file

@ -2691,6 +2691,16 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables_arg, COND *conds,
table_vector[i]=s->table=table=tables->table;
table->pos_in_table_list= tables;
error= table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
DBUG_EXECUTE_IF("bug11747970_raise_error",
{
if (!error)
{
my_error(ER_UNKNOWN_ERROR, MYF(0));
goto error;
}
});
if (error)
{
table->file->print_error(error, MYF(0));

View file

@ -416,6 +416,7 @@ int ha_heap::info(uint flag)
stats.index_file_length= hp_info.index_length;
stats.max_data_file_length= hp_info.max_records * hp_info.reclength;
stats.delete_length= hp_info.deleted * hp_info.reclength;
stats.create_time= (ulong) hp_info.create_time;
if (flag & HA_STATUS_AUTO)
stats.auto_increment_value= hp_info.auto_increment;
/*

View file

@ -186,6 +186,7 @@ int heap_create(const char *name, HP_CREATE_INFO *create_info,
share->auto_key= create_info->auto_key;
share->auto_key_type= create_info->auto_key_type;
share->auto_increment= create_info->auto_increment;
share->create_time= (long) time((time_t*) 0);
/* Must be allocated separately for rename to work */
if (!(share->name= my_strdup(name,MYF(0))))
{

View file

@ -53,6 +53,7 @@ int heap_info(reg1 HP_INFO *info,reg2 HEAPINFO *x, int flag )
x->index_length = info->s->index_length;
x->max_records = info->s->max_records;
x->errkey = info->errkey;
x->create_time = info->s->create_time;
if (flag & HA_STATUS_AUTO)
x->auto_increment= info->s->auto_increment + 1;
DBUG_RETURN(0);

View file

@ -1,6 +1,6 @@
/*****************************************************************************
Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 1994, 2010, Innobase Oy. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -906,29 +906,28 @@ btr_page_alloc_for_ibuf(
/**************************************************************//**
Allocates a new file page to be used in an index tree. NOTE: we assume
that the caller has made the reservation for free extents!
@return allocated page number, FIL_NULL if out of space */
static __attribute__((nonnull(1,5), warn_unused_result))
ulint
btr_page_alloc_low(
/*===============*/
@return new allocated block, x-latched; NULL if out of space */
UNIV_INTERN
buf_block_t*
btr_page_alloc(
/*===========*/
dict_index_t* index, /*!< in: index */
ulint hint_page_no, /*!< in: hint of a good page */
byte file_direction, /*!< in: direction where a possible
page split is made */
ulint level, /*!< in: level where the page is placed
in the tree */
mtr_t* mtr, /*!< in/out: mini-transaction
for the allocation */
mtr_t* init_mtr) /*!< in/out: mini-transaction
in which the page should be
initialized (may be the same
as mtr), or NULL if it should
not be initialized (the page
at hint was previously freed
in mtr) */
mtr_t* mtr) /*!< in: mtr */
{
fseg_header_t* seg_header;
page_t* root;
buf_block_t* new_block;
ulint new_page_no;
if (dict_index_is_ibuf(index)) {
return(btr_page_alloc_for_ibuf(index, mtr));
}
root = btr_root_get(index, mtr);
@ -942,42 +941,8 @@ btr_page_alloc_low(
reservation for free extents, and thus we know that a page can
be allocated: */
return(fseg_alloc_free_page_general(
seg_header, hint_page_no, file_direction,
TRUE, mtr, init_mtr));
}
/**************************************************************//**
Allocates a new file page to be used in an index tree. NOTE: we assume
that the caller has made the reservation for free extents!
@return new allocated block, x-latched; NULL if out of space */
UNIV_INTERN
buf_block_t*
btr_page_alloc(
/*===========*/
dict_index_t* index, /*!< in: index */
ulint hint_page_no, /*!< in: hint of a good page */
byte file_direction, /*!< in: direction where a possible
page split is made */
ulint level, /*!< in: level where the page is placed
in the tree */
mtr_t* mtr, /*!< in/out: mini-transaction
for the allocation */
mtr_t* init_mtr) /*!< in/out: mini-transaction
for x-latching and initializing
the page */
{
buf_block_t* new_block;
ulint new_page_no;
if (dict_index_is_ibuf(index)) {
return(btr_page_alloc_for_ibuf(index, mtr));
}
new_page_no = btr_page_alloc_low(
index, hint_page_no, file_direction, level, mtr, init_mtr);
new_page_no = fseg_alloc_free_page_general(seg_header, hint_page_no,
file_direction, TRUE, mtr);
if (new_page_no == FIL_NULL) {
return(NULL);
@ -985,16 +950,9 @@ btr_page_alloc(
new_block = buf_page_get(dict_index_get_space(index),
dict_table_zip_size(index->table),
new_page_no, RW_X_LATCH, init_mtr);
new_page_no, RW_X_LATCH, mtr);
buf_block_dbg_add_level(new_block, SYNC_TREE_NODE_NEW);
if (mtr->freed_clust_leaf) {
mtr_memo_release(mtr, new_block, MTR_MEMO_FREE_CLUST_LEAF);
ut_ad(!mtr_memo_contains(mtr, new_block,
MTR_MEMO_FREE_CLUST_LEAF));
}
ut_ad(btr_freed_leaves_validate(mtr));
return(new_block);
}
@ -1107,15 +1065,6 @@ btr_page_free_low(
fseg_free_page(seg_header,
buf_block_get_space(block),
buf_block_get_page_no(block), mtr);
/* The page was marked free in the allocation bitmap, but it
should remain buffer-fixed until mtr_commit(mtr) or until it
is explicitly freed from the mini-transaction. */
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
/* TODO: Discard any operations on the page from the redo log
and remove the block from the flush list and the buffer pool.
This would free up buffer pool earlier and reduce writes to
both the tablespace and the redo log. */
}
/**************************************************************//**
@ -1129,140 +1078,13 @@ btr_page_free(
buf_block_t* block, /*!< in: block to be freed, x-latched */
mtr_t* mtr) /*!< in: mtr */
{
const page_t* page = buf_block_get_frame(block);
ulint level = btr_page_get_level(page, mtr);
ulint level;
level = btr_page_get_level(buf_block_get_frame(block), mtr);
ut_ad(fil_page_get_type(block->frame) == FIL_PAGE_INDEX);
btr_page_free_low(index, block, level, mtr);
/* The handling of MTR_MEMO_FREE_CLUST_LEAF assumes this. */
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
if (level == 0 && dict_index_is_clust(index)) {
/* We may have to call btr_mark_freed_leaves() to
temporarily mark the block nonfree for invoking
btr_store_big_rec_extern_fields_func() after an
update. Remember that the block was freed. */
mtr->freed_clust_leaf = TRUE;
mtr_memo_push(mtr, block, MTR_MEMO_FREE_CLUST_LEAF);
}
ut_ad(btr_freed_leaves_validate(mtr));
}
/**************************************************************//**
Marks all MTR_MEMO_FREE_CLUST_LEAF pages nonfree or free.
For invoking btr_store_big_rec_extern_fields() after an update,
we must temporarily mark freed clustered index pages allocated, so
that off-page columns will not be allocated from them. Between the
btr_store_big_rec_extern_fields() and mtr_commit() we have to
mark the pages free again, so that no pages will be leaked. */
UNIV_INTERN
void
btr_mark_freed_leaves(
/*==================*/
dict_index_t* index, /*!< in/out: clustered index */
mtr_t* mtr, /*!< in/out: mini-transaction */
ibool nonfree)/*!< in: TRUE=mark nonfree, FALSE=mark freed */
{
/* This is loosely based on mtr_memo_release(). */
ulint offset;
ut_ad(dict_index_is_clust(index));
ut_ad(mtr->magic_n == MTR_MAGIC_N);
ut_ad(mtr->state == MTR_ACTIVE);
if (!mtr->freed_clust_leaf) {
return;
}
offset = dyn_array_get_data_size(&mtr->memo);
while (offset > 0) {
mtr_memo_slot_t* slot;
buf_block_t* block;
offset -= sizeof *slot;
slot = dyn_array_get_element(&mtr->memo, offset);
if (slot->type != MTR_MEMO_FREE_CLUST_LEAF) {
continue;
}
/* Because btr_page_alloc() does invoke
mtr_memo_release on MTR_MEMO_FREE_CLUST_LEAF, all
blocks tagged with MTR_MEMO_FREE_CLUST_LEAF in the
memo must still be clustered index leaf tree pages. */
block = slot->object;
ut_a(buf_block_get_space(block)
== dict_index_get_space(index));
ut_a(fil_page_get_type(buf_block_get_frame(block))
== FIL_PAGE_INDEX);
ut_a(page_is_leaf(buf_block_get_frame(block)));
if (nonfree) {
/* Allocate the same page again. */
ulint page_no;
page_no = btr_page_alloc_low(
index, buf_block_get_page_no(block),
FSP_NO_DIR, 0, mtr, NULL);
ut_a(page_no == buf_block_get_page_no(block));
} else {
/* Assert that the page is allocated and free it. */
btr_page_free_low(index, block, 0, mtr);
}
}
ut_ad(btr_freed_leaves_validate(mtr));
}
#ifdef UNIV_DEBUG
/**************************************************************//**
Validates all pages marked MTR_MEMO_FREE_CLUST_LEAF.
@see btr_mark_freed_leaves()
@return TRUE */
UNIV_INTERN
ibool
btr_freed_leaves_validate(
/*======================*/
mtr_t* mtr) /*!< in: mini-transaction */
{
ulint offset;
ut_ad(mtr->magic_n == MTR_MAGIC_N);
ut_ad(mtr->state == MTR_ACTIVE);
offset = dyn_array_get_data_size(&mtr->memo);
while (offset > 0) {
const mtr_memo_slot_t* slot;
const buf_block_t* block;
offset -= sizeof *slot;
slot = dyn_array_get_element(&mtr->memo, offset);
if (slot->type != MTR_MEMO_FREE_CLUST_LEAF) {
continue;
}
ut_a(mtr->freed_clust_leaf);
/* Because btr_page_alloc() does invoke
mtr_memo_release on MTR_MEMO_FREE_CLUST_LEAF, all
blocks tagged with MTR_MEMO_FREE_CLUST_LEAF in the
memo must still be clustered index leaf tree pages. */
block = slot->object;
ut_a(fil_page_get_type(buf_block_get_frame(block))
== FIL_PAGE_INDEX);
ut_a(page_is_leaf(buf_block_get_frame(block)));
}
return(TRUE);
}
#endif /* UNIV_DEBUG */
/**************************************************************//**
Sets the child node file address in a node pointer. */
UNIV_INLINE
@ -1987,7 +1809,7 @@ btr_root_raise_and_insert(
level = btr_page_get_level(root, mtr);
new_block = btr_page_alloc(index, 0, FSP_NO_DIR, level, mtr, mtr);
new_block = btr_page_alloc(index, 0, FSP_NO_DIR, level, mtr);
new_page = buf_block_get_frame(new_block);
new_page_zip = buf_block_get_page_zip(new_block);
ut_a(!new_page_zip == !root_page_zip);
@ -2458,7 +2280,7 @@ btr_attach_half_pages(
/*==================*/
dict_index_t* index, /*!< in: the index tree */
buf_block_t* block, /*!< in/out: page to be split */
const rec_t* split_rec, /*!< in: first record on upper
rec_t* split_rec, /*!< in: first record on upper
half page */
buf_block_t* new_block, /*!< in/out: the new half page */
ulint direction, /*!< in: FSP_UP or FSP_DOWN */
@ -2723,7 +2545,7 @@ func_start:
/* 2. Allocate a new page to the index */
new_block = btr_page_alloc(cursor->index, hint_page_no, direction,
btr_page_get_level(page, mtr), mtr, mtr);
btr_page_get_level(page, mtr), mtr);
new_page = buf_block_get_frame(new_block);
new_page_zip = buf_block_get_page_zip(new_block);
btr_page_create(new_block, new_page_zip, cursor->index,
@ -3173,16 +2995,15 @@ btr_node_ptr_delete(
ut_a(err == DB_SUCCESS);
if (!compressed) {
btr_cur_compress_if_useful(&cursor, FALSE, mtr);
btr_cur_compress_if_useful(&cursor, mtr);
}
}
/*************************************************************//**
If page is the only on its level, this function moves its records to the
father page, thus reducing the tree height.
@return father block */
father page, thus reducing the tree height. */
static
buf_block_t*
void
btr_lift_page_up(
/*=============*/
dict_index_t* index, /*!< in: index tree */
@ -3299,8 +3120,6 @@ btr_lift_page_up(
}
ut_ad(page_validate(father_page, index));
ut_ad(btr_check_node_ptr(index, father_block, mtr));
return(father_block);
}
/*************************************************************//**
@ -3317,13 +3136,11 @@ UNIV_INTERN
ibool
btr_compress(
/*=========*/
btr_cur_t* cursor, /*!< in/out: cursor on the page to merge
or lift; the page must not be empty:
when deleting records, use btr_discard_page()
if the page would become empty */
ibool adjust, /*!< in: TRUE if should adjust the
cursor position even if compression occurs */
mtr_t* mtr) /*!< in/out: mini-transaction */
btr_cur_t* cursor, /*!< in: cursor on the page to merge or lift;
the page must not be empty: in record delete
use btr_discard_page if the page would become
empty */
mtr_t* mtr) /*!< in: mtr */
{
dict_index_t* index;
ulint space;
@ -3341,14 +3158,12 @@ btr_compress(
ulint* offsets;
ulint data_size;
ulint n_recs;
ulint nth_rec = 0; /* remove bogus warning */
ulint max_ins_size;
ulint max_ins_size_reorg;
block = btr_cur_get_block(cursor);
page = btr_cur_get_page(cursor);
index = btr_cur_get_index(cursor);
ut_a((ibool) !!page_is_comp(page) == dict_table_is_comp(index->table));
ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
@ -3369,10 +3184,6 @@ btr_compress(
offsets = btr_page_get_father_block(NULL, heap, index, block, mtr,
&father_cursor);
if (adjust) {
nth_rec = page_rec_get_n_recs_before(btr_cur_get_rec(cursor));
}
/* Decide the page to which we try to merge and which will inherit
the locks */
@ -3399,9 +3210,9 @@ btr_compress(
} else {
/* The page is the only one on the level, lift the records
to the father */
merge_block = btr_lift_page_up(index, block, mtr);
goto func_exit;
btr_lift_page_up(index, block, mtr);
mem_heap_free(heap);
return(TRUE);
}
n_recs = page_get_n_recs(page);
@ -3483,10 +3294,6 @@ err_exit:
btr_node_ptr_delete(index, block, mtr);
lock_update_merge_left(merge_block, orig_pred, block);
if (adjust) {
nth_rec += page_rec_get_n_recs_before(orig_pred);
}
} else {
rec_t* orig_succ;
#ifdef UNIV_BTR_DEBUG
@ -3551,6 +3358,7 @@ err_exit:
}
btr_blob_dbg_remove(page, index, "btr_compress");
mem_heap_free(heap);
if (!dict_index_is_clust(index) && page_is_leaf(merge_page)) {
/* Update the free bits of the B-tree page in the
@ -3602,16 +3410,6 @@ err_exit:
btr_page_free(index, block, mtr);
ut_ad(btr_check_node_ptr(index, merge_block, mtr));
func_exit:
mem_heap_free(heap);
if (adjust) {
btr_cur_position(
index,
page_rec_get_nth(merge_block->frame, nth_rec),
merge_block, cursor);
}
return(TRUE);
}

View file

@ -1895,7 +1895,7 @@ btr_cur_update_in_place(
was_delete_marked = rec_get_deleted_flag(
rec, page_is_comp(buf_block_get_frame(block)));
is_hashed = block->is_hashed;
is_hashed = (block->index != NULL);
if (is_hashed) {
/* TO DO: Can we skip this if none of the fields
@ -1985,6 +1985,7 @@ btr_cur_optimistic_update(
ulint old_rec_size;
dtuple_t* new_entry;
roll_ptr_t roll_ptr;
trx_t* trx;
mem_heap_t* heap;
ulint i;
ulint n_ext;
@ -2001,10 +2002,9 @@ btr_cur_optimistic_update(
heap = mem_heap_create(1024);
offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap);
#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
ut_a(!rec_offs_any_null_extern(rec, offsets)
|| trx_is_recv(thr_get_trx(thr)));
#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
#ifdef UNIV_BLOB_NULL_DEBUG
ut_a(!rec_offs_any_null_extern(rec, offsets));
#endif /* UNIV_BLOB_NULL_DEBUG */
#ifdef UNIV_DEBUG
if (btr_cur_print_record_ops && thr) {
@ -2127,11 +2127,13 @@ any_extern:
page_cur_move_to_prev(page_cursor);
trx = thr_get_trx(thr);
if (!(flags & BTR_KEEP_SYS_FLAG)) {
row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR,
roll_ptr);
row_upd_index_entry_sys_field(new_entry, index, DATA_TRX_ID,
thr_get_trx(thr)->id);
trx->id);
}
/* There are no externally stored columns in new_entry */
@ -2217,9 +2219,7 @@ btr_cur_pessimistic_update(
/*=======================*/
ulint flags, /*!< in: undo logging, locking, and rollback
flags */
btr_cur_t* cursor, /*!< in/out: cursor on the record to update;
cursor may become invalid if *big_rec == NULL
|| !(flags & BTR_KEEP_POS_FLAG) */
btr_cur_t* cursor, /*!< in: cursor on the record to update */
mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
big_rec_t** big_rec,/*!< out: big rec vector whose fields have to
be stored externally by the caller, or NULL */
@ -2358,7 +2358,7 @@ btr_cur_pessimistic_update(
record to be inserted: we have to remember which fields were such */
ut_ad(!page_is_comp(page) || !rec_get_node_ptr_flag(rec));
ut_ad(rec_offs_validate(rec, index, offsets));
offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, heap);
n_ext += btr_push_update_extern_fields(new_entry, update, *heap);
if (UNIV_LIKELY_NULL(page_zip)) {
@ -2381,10 +2381,6 @@ make_external:
err = DB_TOO_BIG_RECORD;
goto return_after_reservations;
}
ut_ad(page_is_leaf(page));
ut_ad(dict_index_is_clust(index));
ut_ad(flags & BTR_KEEP_POS_FLAG);
}
/* Store state of explicit locks on rec on the page infimum record,
@ -2412,8 +2408,6 @@ make_external:
rec = btr_cur_insert_if_possible(cursor, new_entry, n_ext, mtr);
if (rec) {
page_cursor->rec = rec;
lock_rec_restore_from_page_infimum(btr_cur_get_block(cursor),
rec, block);
@ -2427,10 +2421,7 @@ make_external:
rec, index, offsets, mtr);
}
btr_cur_compress_if_useful(
cursor,
big_rec_vec != NULL && (flags & BTR_KEEP_POS_FLAG),
mtr);
btr_cur_compress_if_useful(cursor, mtr);
if (page_zip && !dict_index_is_clust(index)
&& page_is_leaf(page)) {
@ -2450,21 +2441,6 @@ make_external:
}
}
if (big_rec_vec) {
ut_ad(page_is_leaf(page));
ut_ad(dict_index_is_clust(index));
ut_ad(flags & BTR_KEEP_POS_FLAG);
/* btr_page_split_and_insert() in
btr_cur_pessimistic_insert() invokes
mtr_memo_release(mtr, index->lock, MTR_MEMO_X_LOCK).
We must keep the index->lock when we created a
big_rec, so that row_upd_clust_rec() can store the
big_rec in the same mini-transaction. */
mtr_x_lock(dict_index_get_lock(index), mtr);
}
/* Was the record to be updated positioned as the first user
record on its page? */
was_first = page_cur_is_before_first(page_cursor);
@ -2480,7 +2456,6 @@ make_external:
ut_a(rec);
ut_a(err == DB_SUCCESS);
ut_a(dummy_big_rec == NULL);
page_cursor->rec = rec;
if (dict_index_is_sec_or_ibuf(index)) {
/* Update PAGE_MAX_TRX_ID in the index page header.
@ -2915,12 +2890,10 @@ UNIV_INTERN
ibool
btr_cur_compress_if_useful(
/*=======================*/
btr_cur_t* cursor, /*!< in/out: cursor on the page to compress;
cursor does not stay valid if !adjust and
compression occurs */
ibool adjust, /*!< in: TRUE if should adjust the
cursor position even if compression occurs */
mtr_t* mtr) /*!< in/out: mini-transaction */
btr_cur_t* cursor, /*!< in: cursor on the page to compress;
cursor does not stay valid if compression
occurs */
mtr_t* mtr) /*!< in: mtr */
{
ut_ad(mtr_memo_contains(mtr,
dict_index_get_lock(btr_cur_get_index(cursor)),
@ -2929,7 +2902,7 @@ btr_cur_compress_if_useful(
MTR_MEMO_PAGE_X_FIX));
return(btr_cur_compress_recommendation(cursor, mtr)
&& btr_compress(cursor, adjust, mtr));
&& btr_compress(cursor, mtr));
}
/*******************************************************//**
@ -3171,7 +3144,7 @@ return_after_reservations:
mem_heap_free(heap);
if (ret == FALSE) {
ret = btr_cur_compress_if_useful(cursor, FALSE, mtr);
ret = btr_cur_compress_if_useful(cursor, mtr);
}
if (n_extents > 0) {
@ -4160,9 +4133,6 @@ btr_store_big_rec_extern_fields_func(
the "external storage" flags in offsets
will not correspond to rec when
this function returns */
const big_rec_t*big_rec_vec, /*!< in: vector containing fields
to be stored externally */
#ifdef UNIV_DEBUG
mtr_t* local_mtr, /*!< in: mtr containing the
latch to rec and to the tree */
@ -4171,11 +4141,9 @@ btr_store_big_rec_extern_fields_func(
ibool update_in_place,/*! in: TRUE if the record is updated
in place (not delete+insert) */
#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
mtr_t* alloc_mtr) /*!< in/out: in an insert, NULL;
in an update, local_mtr for
allocating BLOB pages and
updating BLOB pointers; alloc_mtr
must not have freed any leaf pages */
const big_rec_t*big_rec_vec) /*!< in: vector containing fields
to be stored externally */
{
ulint rec_page_no;
byte* field_ref;
@ -4194,9 +4162,6 @@ btr_store_big_rec_extern_fields_func(
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(rec_offs_any_extern(offsets));
ut_ad(local_mtr);
ut_ad(!alloc_mtr || alloc_mtr == local_mtr);
ut_ad(!update_in_place || alloc_mtr);
ut_ad(mtr_memo_contains(local_mtr, dict_index_get_lock(index),
MTR_MEMO_X_LOCK));
ut_ad(mtr_memo_contains(local_mtr, rec_block, MTR_MEMO_PAGE_X_FIX));
@ -4212,25 +4177,6 @@ btr_store_big_rec_extern_fields_func(
rec_page_no = buf_block_get_page_no(rec_block);
ut_a(fil_page_get_type(page_align(rec)) == FIL_PAGE_INDEX);
if (alloc_mtr) {
/* Because alloc_mtr will be committed after
mtr, it is possible that the tablespace has been
extended when the B-tree record was updated or
inserted, or it will be extended while allocating
pages for big_rec.
TODO: In mtr (not alloc_mtr), write a redo log record
about extending the tablespace to its current size,
and remember the current size. Whenever the tablespace
grows as pages are allocated, write further redo log
records to mtr. (Currently tablespace extension is not
covered by the redo log. If it were, the record would
only be written to alloc_mtr, which is committed after
mtr.) */
} else {
alloc_mtr = &mtr;
}
if (UNIV_LIKELY_NULL(page_zip)) {
int err;
@ -4307,7 +4253,7 @@ btr_store_big_rec_extern_fields_func(
}
block = btr_page_alloc(index, hint_page_no,
FSP_NO_DIR, 0, alloc_mtr, &mtr);
FSP_NO_DIR, 0, &mtr);
if (UNIV_UNLIKELY(block == NULL)) {
mtr_commit(&mtr);
@ -4434,15 +4380,11 @@ btr_store_big_rec_extern_fields_func(
goto next_zip_page;
}
if (alloc_mtr == &mtr) {
rec_block = buf_page_get(
space_id, zip_size,
rec_page_no,
RW_X_LATCH, &mtr);
buf_block_dbg_add_level(
rec_block,
SYNC_NO_ORDER_CHECK);
}
rec_block = buf_page_get(space_id, zip_size,
rec_page_no,
RW_X_LATCH, &mtr);
buf_block_dbg_add_level(rec_block,
SYNC_NO_ORDER_CHECK);
if (err == Z_STREAM_END) {
mach_write_to_4(field_ref
@ -4476,8 +4418,7 @@ btr_store_big_rec_extern_fields_func(
page_zip_write_blob_ptr(
page_zip, rec, index, offsets,
big_rec_vec->fields[i].field_no,
alloc_mtr);
big_rec_vec->fields[i].field_no, &mtr);
next_zip_page:
prev_page_no = page_no;
@ -4522,23 +4463,19 @@ next_zip_page:
extern_len -= store_len;
if (alloc_mtr == &mtr) {
rec_block = buf_page_get(
space_id, zip_size,
rec_page_no,
RW_X_LATCH, &mtr);
buf_block_dbg_add_level(
rec_block,
SYNC_NO_ORDER_CHECK);
}
rec_block = buf_page_get(space_id, zip_size,
rec_page_no,
RW_X_LATCH, &mtr);
buf_block_dbg_add_level(rec_block,
SYNC_NO_ORDER_CHECK);
mlog_write_ulint(field_ref + BTR_EXTERN_LEN, 0,
MLOG_4BYTES, alloc_mtr);
MLOG_4BYTES, &mtr);
mlog_write_ulint(field_ref
+ BTR_EXTERN_LEN + 4,
big_rec_vec->fields[i].len
- extern_len,
MLOG_4BYTES, alloc_mtr);
MLOG_4BYTES, &mtr);
if (prev_page_no == FIL_NULL) {
btr_blob_dbg_add_blob(
@ -4548,19 +4485,18 @@ next_zip_page:
mlog_write_ulint(field_ref
+ BTR_EXTERN_SPACE_ID,
space_id, MLOG_4BYTES,
alloc_mtr);
space_id,
MLOG_4BYTES, &mtr);
mlog_write_ulint(field_ref
+ BTR_EXTERN_PAGE_NO,
page_no, MLOG_4BYTES,
alloc_mtr);
page_no,
MLOG_4BYTES, &mtr);
mlog_write_ulint(field_ref
+ BTR_EXTERN_OFFSET,
FIL_PAGE_DATA,
MLOG_4BYTES,
alloc_mtr);
MLOG_4BYTES, &mtr);
}
prev_page_no = page_no;

View file

@ -44,12 +44,8 @@ Created 2/17/1996 Heikki Tuuri
#include "ha0ha.h"
/** Flag: has the search system been enabled?
Protected by btr_search_latch and btr_search_enabled_mutex. */
Protected by btr_search_latch. */
UNIV_INTERN char btr_search_enabled = TRUE;
UNIV_INTERN ibool btr_search_fully_disabled = FALSE;
/** Mutex protecting btr_search_enabled */
static mutex_t btr_search_enabled_mutex;
#ifdef UNIV_PFS_MUTEX
/* Key to register btr_search_enabled_mutex with performance schema */
@ -180,8 +176,6 @@ btr_search_sys_create(
rw_lock_create(btr_search_latch_key, &btr_search_latch,
SYNC_SEARCH_SYS);
mutex_create(btr_search_enabled_mutex_key,
&btr_search_enabled_mutex, SYNC_SEARCH_SYS_CONF);
btr_search_sys = mem_alloc(sizeof(btr_search_sys_t));
@ -211,27 +205,37 @@ void
btr_search_disable(void)
/*====================*/
{
mutex_enter(&btr_search_enabled_mutex);
dict_table_t* table;
mutex_enter(&dict_sys->mutex);
rw_lock_x_lock(&btr_search_latch);
/* Disable access to hash index, also tell ha_insert_for_fold()
stop adding new nodes to hash index, but still allow updating
existing nodes */
btr_search_enabled = FALSE;
/* Clear all block->is_hashed flags and remove all entries
from btr_search_sys->hash_index. */
buf_pool_drop_hash_index();
/* Clear the index->search_info->ref_count of every index in
the data dictionary cache. */
for (table = UT_LIST_GET_FIRST(dict_sys->table_LRU); table;
table = UT_LIST_GET_NEXT(table_LRU, table)) {
/* hash index has been cleaned up, disallow any operation to
the hash index */
btr_search_fully_disabled = TRUE;
dict_index_t* index;
/* btr_search_enabled_mutex should guarantee this. */
ut_ad(!btr_search_enabled);
for (index = dict_table_get_first_index(table); index;
index = dict_table_get_next_index(index)) {
index->search_info->ref_count = 0;
}
}
mutex_exit(&dict_sys->mutex);
/* Set all block->index = NULL. */
buf_pool_clear_hash_index();
/* Clear the adaptive hash index. */
hash_table_clear(btr_search_sys->hash_index);
mem_heap_empty(btr_search_sys->hash_index->heap);
rw_lock_x_unlock(&btr_search_latch);
mutex_exit(&btr_search_enabled_mutex);
}
/********************************************************************//**
@ -241,14 +245,11 @@ void
btr_search_enable(void)
/*====================*/
{
mutex_enter(&btr_search_enabled_mutex);
rw_lock_x_lock(&btr_search_latch);
btr_search_enabled = TRUE;
btr_search_fully_disabled = FALSE;
rw_lock_x_unlock(&btr_search_latch);
mutex_exit(&btr_search_enabled_mutex);
}
/*****************************************************************//**
@ -471,7 +472,7 @@ btr_search_update_block_hash_info(
&& (block->n_bytes == info->n_bytes)
&& (block->left_side == info->left_side)) {
if ((block->is_hashed)
if ((block->index)
&& (block->curr_n_fields == info->n_fields)
&& (block->curr_n_bytes == info->n_bytes)
&& (block->curr_left_side == info->left_side)) {
@ -500,7 +501,7 @@ btr_search_update_block_hash_info(
/ BTR_SEARCH_PAGE_BUILD_LIMIT)
&& (info->n_hash_potential >= BTR_SEARCH_BUILD_LIMIT)) {
if ((!block->is_hashed)
if ((!block->index)
|| (block->n_hash_helps
> 2 * page_get_n_recs(block->frame))
|| (block->n_fields != block->curr_n_fields)
@ -532,9 +533,9 @@ btr_search_update_hash_ref(
buf_block_t* block, /*!< in: buffer block where cursor positioned */
btr_cur_t* cursor) /*!< in: cursor */
{
dict_index_t* index;
ulint fold;
rec_t* rec;
index_id_t index_id;
const rec_t* rec;
ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
#ifdef UNIV_SYNC_DEBUG
@ -545,13 +546,15 @@ btr_search_update_hash_ref(
ut_ad(page_align(btr_cur_get_rec(cursor))
== buf_block_get_frame(block));
if (!block->is_hashed) {
index = block->index;
if (!index) {
return;
}
ut_a(block->index == cursor->index);
ut_a(!dict_index_is_ibuf(cursor->index));
ut_a(index == cursor->index);
ut_a(!dict_index_is_ibuf(index));
if ((info->n_hash_potential > 0)
&& (block->curr_n_fields == info->n_fields)
@ -568,12 +571,11 @@ btr_search_update_hash_ref(
return;
}
index_id = cursor->index->id;
fold = rec_fold(rec,
rec_get_offsets(rec, cursor->index, offsets_,
rec_get_offsets(rec, index, offsets_,
ULINT_UNDEFINED, &heap),
block->curr_n_fields,
block->curr_n_bytes, index_id);
block->curr_n_bytes, index->id);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
@ -837,7 +839,7 @@ btr_search_guess_on_hash(
{
buf_pool_t* buf_pool;
buf_block_t* block;
rec_t* rec;
const rec_t* rec;
ulint fold;
index_id_t index_id;
#ifdef notdefined
@ -923,7 +925,7 @@ btr_search_guess_on_hash(
ut_ad(page_rec_is_user_rec(rec));
btr_cur_position(index, rec, block, cursor);
btr_cur_position(index, (rec_t*) rec, block, cursor);
/* Check the validity of the guess within the page */
@ -1053,15 +1055,16 @@ btr_search_drop_page_hash_index(
retry:
rw_lock_s_lock(&btr_search_latch);
page = block->frame;
index = block->index;
if (UNIV_LIKELY(!block->is_hashed)) {
if (UNIV_LIKELY(!index)) {
rw_lock_s_unlock(&btr_search_latch);
return;
}
ut_a(!dict_index_is_ibuf(index));
table = btr_search_sys->hash_index;
#ifdef UNIV_SYNC_DEBUG
@ -1072,8 +1075,6 @@ retry:
n_fields = block->curr_n_fields;
n_bytes = block->curr_n_bytes;
index = block->index;
ut_a(!dict_index_is_ibuf(index));
/* NOTE: The fields of block must not be accessed after
releasing btr_search_latch, as the index page might only
@ -1083,6 +1084,7 @@ retry:
ut_a(n_fields + n_bytes > 0);
page = block->frame;
n_recs = page_get_n_recs(page);
/* Calculate and cache fold values into an array for fast deletion
@ -1131,7 +1133,7 @@ next_rec:
rw_lock_x_lock(&btr_search_latch);
if (UNIV_UNLIKELY(!block->is_hashed)) {
if (UNIV_UNLIKELY(!block->index)) {
/* Someone else has meanwhile dropped the hash index */
goto cleanup;
@ -1159,9 +1161,8 @@ next_rec:
ut_a(index->search_info->ref_count > 0);
index->search_info->ref_count--;
block->is_hashed = FALSE;
block->index = NULL;
cleanup:
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (UNIV_UNLIKELY(block->n_pointers)) {
@ -1187,8 +1188,8 @@ cleanup:
}
/********************************************************************//**
Drops a page hash index when a page is freed from a fseg to the file system.
Drops possible hash index if the page happens to be in the buffer pool. */
Drops a possible page hash index when a page is evicted from the buffer pool
or freed in a file segment. */
UNIV_INTERN
void
btr_search_drop_page_hash_when_freed(
@ -1201,28 +1202,19 @@ btr_search_drop_page_hash_when_freed(
buf_block_t* block;
mtr_t mtr;
if (!buf_page_peek_if_search_hashed(space, page_no)) {
return;
}
mtr_start(&mtr);
/* We assume that if the caller has a latch on the page, then the
caller has already dropped the hash index for the page, and we never
get here. Therefore we can acquire the s-latch to the page without
having to fear a deadlock. */
/* If the caller has a latch on the page, then the caller must
have a x-latch on the page and it must have already dropped
the hash index for the page. Because of the x-latch that we
are possibly holding, we cannot s-latch the page, but must
(recursively) x-latch it, even though we are only reading. */
block = buf_page_get_gen(space, zip_size, page_no, RW_S_LATCH, NULL,
block = buf_page_get_gen(space, zip_size, page_no, RW_X_LATCH, NULL,
BUF_PEEK_IF_IN_POOL, __FILE__, __LINE__,
&mtr);
/* Because the buffer pool mutex was released by
buf_page_peek_if_search_hashed(), it is possible that the
block was removed from the buffer pool by another thread
before buf_page_get_gen() got a chance to acquire the buffer
pool mutex again. Thus, we must check for a NULL return. */
if (UNIV_LIKELY(block != NULL)) {
if (block && block->index) {
buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
@ -1254,7 +1246,6 @@ btr_search_build_page_hash_index(
rec_t* next_rec;
ulint fold;
ulint next_fold;
index_id_t index_id;
ulint n_cached;
ulint n_recs;
ulint* folds;
@ -1268,9 +1259,6 @@ btr_search_build_page_hash_index(
ut_ad(index);
ut_a(!dict_index_is_ibuf(index));
table = btr_search_sys->hash_index;
page = buf_block_get_frame(block);
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
@ -1279,9 +1267,17 @@ btr_search_build_page_hash_index(
rw_lock_s_lock(&btr_search_latch);
if (block->is_hashed && ((block->curr_n_fields != n_fields)
|| (block->curr_n_bytes != n_bytes)
|| (block->curr_left_side != left_side))) {
if (!btr_search_enabled) {
rw_lock_s_unlock(&btr_search_latch);
return;
}
table = btr_search_sys->hash_index;
page = buf_block_get_frame(block);
if (block->index && ((block->curr_n_fields != n_fields)
|| (block->curr_n_bytes != n_bytes)
|| (block->curr_left_side != left_side))) {
rw_lock_s_unlock(&btr_search_latch);
@ -1318,7 +1314,7 @@ btr_search_build_page_hash_index(
n_cached = 0;
index_id = btr_page_get_index_id(page);
ut_a(index->id == btr_page_get_index_id(page));
rec = page_rec_get_next(page_get_infimum_rec(page));
@ -1333,7 +1329,7 @@ btr_search_build_page_hash_index(
}
}
fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id);
if (left_side) {
@ -1360,7 +1356,7 @@ btr_search_build_page_hash_index(
offsets = rec_get_offsets(next_rec, index, offsets,
n_fields + (n_bytes > 0), &heap);
next_fold = rec_fold(next_rec, offsets, n_fields,
n_bytes, index_id);
n_bytes, index->id);
if (fold != next_fold) {
/* Insert an entry into the hash index */
@ -1385,13 +1381,13 @@ btr_search_build_page_hash_index(
rw_lock_x_lock(&btr_search_latch);
if (UNIV_UNLIKELY(btr_search_fully_disabled)) {
if (UNIV_UNLIKELY(!btr_search_enabled)) {
goto exit_func;
}
if (block->is_hashed && ((block->curr_n_fields != n_fields)
|| (block->curr_n_bytes != n_bytes)
|| (block->curr_left_side != left_side))) {
if (block->index && ((block->curr_n_fields != n_fields)
|| (block->curr_n_bytes != n_bytes)
|| (block->curr_left_side != left_side))) {
goto exit_func;
}
@ -1400,11 +1396,10 @@ btr_search_build_page_hash_index(
rebuild hash index for a page that is already hashed, we
have to take care not to increment the counter in that
case. */
if (!block->is_hashed) {
if (!block->index) {
index->search_info->ref_count++;
}
block->is_hashed = TRUE;
block->n_hash_helps = 0;
block->curr_n_fields = n_fields;
@ -1452,14 +1447,15 @@ btr_search_move_or_delete_hash_entries(
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
ut_a(!new_block->is_hashed || new_block->index == index);
ut_a(!block->is_hashed || block->index == index);
ut_a(!(new_block->is_hashed || block->is_hashed)
|| !dict_index_is_ibuf(index));
rw_lock_s_lock(&btr_search_latch);
if (new_block->is_hashed) {
ut_a(!new_block->index || new_block->index == index);
ut_a(!block->index || block->index == index);
ut_a(!(new_block->index || block->index)
|| !dict_index_is_ibuf(index));
if (new_block->index) {
rw_lock_s_unlock(&btr_search_latch);
@ -1468,7 +1464,7 @@ btr_search_move_or_delete_hash_entries(
return;
}
if (block->is_hashed) {
if (block->index) {
n_fields = block->curr_n_fields;
n_bytes = block->curr_n_bytes;
@ -1505,42 +1501,48 @@ btr_search_update_hash_on_delete(
{
hash_table_t* table;
buf_block_t* block;
rec_t* rec;
const rec_t* rec;
ulint fold;
index_id_t index_id;
dict_index_t* index;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
mem_heap_t* heap = NULL;
rec_offs_init(offsets_);
rec = btr_cur_get_rec(cursor);
block = btr_cur_get_block(cursor);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
if (!block->is_hashed) {
index = block->index;
if (!index) {
return;
}
ut_a(block->index == cursor->index);
ut_a(index == cursor->index);
ut_a(block->curr_n_fields + block->curr_n_bytes > 0);
ut_a(!dict_index_is_ibuf(cursor->index));
ut_a(!dict_index_is_ibuf(index));
table = btr_search_sys->hash_index;
index_id = cursor->index->id;
fold = rec_fold(rec, rec_get_offsets(rec, cursor->index, offsets_,
rec = btr_cur_get_rec(cursor);
fold = rec_fold(rec, rec_get_offsets(rec, index, offsets_,
ULINT_UNDEFINED, &heap),
block->curr_n_fields, block->curr_n_bytes, index_id);
block->curr_n_fields, block->curr_n_bytes, index->id);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
rw_lock_x_lock(&btr_search_latch);
ha_search_and_delete_if_found(table, fold, rec);
if (block->index) {
ut_a(block->index == index);
ha_search_and_delete_if_found(table, fold, rec);
}
rw_lock_x_unlock(&btr_search_latch);
}
@ -1558,6 +1560,7 @@ btr_search_update_hash_node_on_insert(
{
hash_table_t* table;
buf_block_t* block;
dict_index_t* index;
rec_t* rec;
rec = btr_cur_get_rec(cursor);
@ -1568,16 +1571,25 @@ btr_search_update_hash_node_on_insert(
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
if (!block->is_hashed) {
index = block->index;
if (!index) {
return;
}
ut_a(block->index == cursor->index);
ut_a(!dict_index_is_ibuf(cursor->index));
ut_a(cursor->index == index);
ut_a(!dict_index_is_ibuf(index));
rw_lock_x_lock(&btr_search_latch);
if (!block->index) {
goto func_exit;
}
ut_a(block->index == index);
if ((cursor->flag == BTR_CUR_HASH)
&& (cursor->n_fields == block->curr_n_fields)
&& (cursor->n_bytes == block->curr_n_bytes)
@ -1588,6 +1600,7 @@ btr_search_update_hash_node_on_insert(
ha_search_and_update_if_found(table, cursor->fold, rec,
block, page_rec_get_next(rec));
func_exit:
rw_lock_x_unlock(&btr_search_latch);
} else {
rw_lock_x_unlock(&btr_search_latch);
@ -1609,10 +1622,10 @@ btr_search_update_hash_on_insert(
{
hash_table_t* table;
buf_block_t* block;
dict_index_t* index;
rec_t* rec;
rec_t* ins_rec;
rec_t* next_rec;
index_id_t index_id;
ulint fold;
ulint ins_fold;
ulint next_fold = 0; /* remove warning (??? bug ???) */
@ -1637,15 +1650,15 @@ btr_search_update_hash_on_insert(
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
if (!block->is_hashed) {
index = block->index;
if (!index) {
return;
}
ut_a(block->index == cursor->index);
ut_a(!dict_index_is_ibuf(cursor->index));
index_id = cursor->index->id;
ut_a(index == cursor->index);
ut_a(!dict_index_is_ibuf(index));
n_fields = block->curr_n_fields;
n_bytes = block->curr_n_bytes;
@ -1654,21 +1667,21 @@ btr_search_update_hash_on_insert(
ins_rec = page_rec_get_next(rec);
next_rec = page_rec_get_next(ins_rec);
offsets = rec_get_offsets(ins_rec, cursor->index, offsets,
offsets = rec_get_offsets(ins_rec, index, offsets,
ULINT_UNDEFINED, &heap);
ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index_id);
ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index->id);
if (!page_rec_is_supremum(next_rec)) {
offsets = rec_get_offsets(next_rec, cursor->index, offsets,
offsets = rec_get_offsets(next_rec, index, offsets,
n_fields + (n_bytes > 0), &heap);
next_fold = rec_fold(next_rec, offsets, n_fields,
n_bytes, index_id);
n_bytes, index->id);
}
if (!page_rec_is_infimum(rec)) {
offsets = rec_get_offsets(rec, cursor->index, offsets,
offsets = rec_get_offsets(rec, index, offsets,
n_fields + (n_bytes > 0), &heap);
fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id);
} else {
if (left_side) {
@ -1676,6 +1689,10 @@ btr_search_update_hash_on_insert(
locked = TRUE;
if (!btr_search_enabled) {
goto function_exit;
}
ha_insert_for_fold(table, ins_fold, block, ins_rec);
}
@ -1689,6 +1706,10 @@ btr_search_update_hash_on_insert(
rw_lock_x_lock(&btr_search_latch);
locked = TRUE;
if (!btr_search_enabled) {
goto function_exit;
}
}
if (!left_side) {
@ -1707,6 +1728,10 @@ check_next_rec:
rw_lock_x_lock(&btr_search_latch);
locked = TRUE;
if (!btr_search_enabled) {
goto function_exit;
}
}
ha_insert_for_fold(table, ins_fold, block, ins_rec);
@ -1722,6 +1747,10 @@ check_next_rec:
rw_lock_x_lock(&btr_search_latch);
locked = TRUE;
if (!btr_search_enabled) {
goto function_exit;
}
}
if (!left_side) {
@ -1729,7 +1758,7 @@ check_next_rec:
ha_insert_for_fold(table, ins_fold, block, ins_rec);
/*
fputs("Hash insert for ", stderr);
dict_index_name_print(stderr, cursor->index);
dict_index_name_print(stderr, index);
fprintf(stderr, " fold %lu\n", ins_fold);
*/
} else {
@ -1832,21 +1861,20 @@ btr_search_validate(void)
ut_a(!dict_index_is_ibuf(block->index));
offsets = rec_get_offsets((const rec_t*) node->data,
page_index_id = btr_page_get_index_id(block->frame);
offsets = rec_get_offsets(node->data,
block->index, offsets,
block->curr_n_fields
+ (block->curr_n_bytes > 0),
&heap);
page_index_id = btr_page_get_index_id(block->frame);
if (UNIV_UNLIKELY
(!block->is_hashed || node->fold
!= rec_fold((rec_t*)(node->data),
offsets,
block->curr_n_fields,
block->curr_n_bytes,
page_index_id))) {
if (!block->index || node->fold
!= rec_fold(node->data,
offsets,
block->curr_n_fields,
block->curr_n_bytes,
page_index_id)) {
const page_t* page = block->frame;
ok = FALSE;
@ -1862,20 +1890,19 @@ btr_search_validate(void)
node->data,
(ullint) page_index_id,
(ulong) node->fold,
(ulong) rec_fold((rec_t*)(node->data),
(ulong) rec_fold(node->data,
offsets,
block->curr_n_fields,
block->curr_n_bytes,
page_index_id));
fputs("InnoDB: Record ", stderr);
rec_print_new(stderr, (rec_t*)node->data,
offsets);
rec_print_new(stderr, node->data, offsets);
fprintf(stderr, "\nInnoDB: on that page."
" Page mem address %p, is hashed %lu,"
" Page mem address %p, is hashed %p,"
" n fields %lu, n bytes %lu\n"
"InnoDB: side %lu\n",
(void*) page, (ulong) block->is_hashed,
(void*) page, (void*) block->index,
(ulong) block->curr_n_fields,
(ulong) block->curr_n_bytes,
(ulong) block->curr_left_side);

View file

@ -873,8 +873,6 @@ buf_block_init(
block->check_index_page_at_flush = FALSE;
block->index = NULL;
block->is_hashed = FALSE;
#ifdef UNIV_DEBUG
block->page.in_page_hash = FALSE;
block->page.in_zip_hash = FALSE;
@ -1279,108 +1277,47 @@ buf_pool_free(
}
/********************************************************************//**
Drops adaptive hash index for a buffer pool instance. */
static
void
buf_pool_drop_hash_index_instance(
/*==============================*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
ibool* released_search_latch) /*!< out: flag for signalling
whether the search latch was
released */
{
buf_chunk_t* chunks = buf_pool->chunks;
buf_chunk_t* chunk = chunks + buf_pool->n_chunks;
while (--chunk >= chunks) {
ulint i;
buf_block_t* block = chunk->blocks;
for (i = chunk->size; i--; block++) {
/* block->is_hashed cannot be modified
when we have an x-latch on btr_search_latch;
see the comment in buf0buf.h */
if (!block->is_hashed) {
continue;
}
/* To follow the latching order, we
have to release btr_search_latch
before acquiring block->latch. */
rw_lock_x_unlock(&btr_search_latch);
/* When we release the search latch,
we must rescan all blocks, because
some may become hashed again. */
*released_search_latch = TRUE;
rw_lock_x_lock(&block->lock);
/* This should be guaranteed by the
callers, which will be holding
btr_search_enabled_mutex. */
ut_ad(!btr_search_enabled);
/* Because we did not buffer-fix the
block by calling buf_block_get_gen(),
it is possible that the block has been
allocated for some other use after
btr_search_latch was released above.
We do not care which file page the
block is mapped to. All we want to do
is to drop any hash entries referring
to the page. */
/* It is possible that
block->page.state != BUF_FILE_PAGE.
Even that does not matter, because
btr_search_drop_page_hash_index() will
check block->is_hashed before doing
anything. block->is_hashed can only
be set on uncompressed file pages. */
btr_search_drop_page_hash_index(block);
rw_lock_x_unlock(&block->lock);
rw_lock_x_lock(&btr_search_latch);
ut_ad(!btr_search_enabled);
}
}
}
/********************************************************************//**
Drops the adaptive hash index. To prevent a livelock, this function
is only to be called while holding btr_search_latch and while
btr_search_enabled == FALSE. */
Clears the adaptive hash index on all pages in the buffer pool. */
UNIV_INTERN
void
buf_pool_drop_hash_index(void)
/*==========================*/
buf_pool_clear_hash_index(void)
/*===========================*/
{
ibool released_search_latch;
ulint p;
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(!btr_search_enabled);
do {
ulint i;
for (p = 0; p < srv_buf_pool_instances; p++) {
buf_pool_t* buf_pool = buf_pool_from_array(p);
buf_chunk_t* chunks = buf_pool->chunks;
buf_chunk_t* chunk = chunks + buf_pool->n_chunks;
released_search_latch = FALSE;
while (--chunk >= chunks) {
buf_block_t* block = chunk->blocks;
ulint i = chunk->size;
for (i = 0; i < srv_buf_pool_instances; i++) {
buf_pool_t* buf_pool;
for (; i--; block++) {
dict_index_t* index = block->index;
buf_pool = buf_pool_from_array(i);
/* We can set block->index = NULL
when we have an x-latch on btr_search_latch;
see the comment in buf0buf.h */
buf_pool_drop_hash_index_instance(
buf_pool, &released_search_latch);
if (!index) {
/* Not hashed */
continue;
}
block->index = NULL;
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
block->n_pointers = 0;
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
}
}
} while (released_search_latch);
}
}
/********************************************************************//**
@ -1716,35 +1653,28 @@ buf_page_set_accessed_make_young(
}
/********************************************************************//**
Returns the current state of is_hashed of a page. FALSE if the page is
not in the pool. NOTE that this operation does not fix the page in the
pool if it is found there.
@return TRUE if page hash index is built in search system */
Resets the check_index_page_at_flush field of a page if found in the buffer
pool. */
UNIV_INTERN
ibool
buf_page_peek_if_search_hashed(
/*===========================*/
void
buf_reset_check_index_page_at_flush(
/*================================*/
ulint space, /*!< in: space id */
ulint offset) /*!< in: page number */
{
buf_block_t* block;
ibool is_hashed;
buf_pool_t* buf_pool = buf_pool_get(space, offset);
buf_pool_mutex_enter(buf_pool);
block = (buf_block_t*) buf_page_hash_get(buf_pool, space, offset);
if (!block || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
is_hashed = FALSE;
} else {
if (block && buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE) {
ut_ad(!buf_pool_watch_is_sentinel(buf_pool, &block->page));
is_hashed = block->is_hashed;
block->check_index_page_at_flush = FALSE;
}
buf_pool_mutex_exit(buf_pool);
return(is_hashed);
}
#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
@ -1956,7 +1886,6 @@ buf_block_init_low(
block->index = NULL;
block->n_hash_helps = 0;
block->is_hashed = FALSE;
block->n_fields = 1;
block->n_bytes = 0;
block->left_side = TRUE;

View file

@ -273,7 +273,7 @@ next_page:
mutex_enter(&((buf_block_t*) bpage)->mutex);
is_fixed = bpage->buf_fix_count > 0
|| !((buf_block_t*) bpage)->is_hashed;
|| !((buf_block_t*) bpage)->index;
mutex_exit(&((buf_block_t*) bpage)->mutex);
if (is_fixed) {
@ -405,7 +405,7 @@ scan_again:
if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
/* This is a compressed-only block
descriptor. Do nothing. */
} else if (((buf_block_t*) bpage)->is_hashed) {
} else if (((buf_block_t*) bpage)->index) {
ulint page_no;
ulint zip_size;
@ -417,7 +417,7 @@ scan_again:
mutex_exit(block_mutex);
/* Note that the following call will acquire
an S-latch on the page */
and release an X-latch on the page. */
btr_search_drop_page_hash_when_freed(
id, zip_size, page_no);

View file

@ -1,6 +1,6 @@
/*****************************************************************************
Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -311,9 +311,8 @@ fsp_fill_free_list(
descriptor page and ibuf bitmap page;
then we do not allocate more extents */
ulint space, /*!< in: space */
fsp_header_t* header, /*!< in/out: space header */
mtr_t* mtr) /*!< in/out: mini-transaction */
UNIV_COLD __attribute__((nonnull));
fsp_header_t* header, /*!< in: space header */
mtr_t* mtr); /*!< in: mtr */
/**********************************************************************//**
Allocates a single free page from a segment. This function implements
the intelligent allocation strategy which tries to minimize file space
@ -326,20 +325,14 @@ fseg_alloc_free_page_low(
ulint space, /*!< in: space */
ulint zip_size,/*!< in: compressed page size in bytes
or 0 for uncompressed pages */
fseg_inode_t* seg_inode, /*!< in/out: segment inode */
fseg_inode_t* seg_inode, /*!< in: segment inode */
ulint hint, /*!< in: hint of which page would be desirable */
byte direction, /*!< in: if the new page is needed because
of an index page split, and records are
inserted there in order, into which
direction they go alphabetically: FSP_DOWN,
FSP_UP, FSP_NO_DIR */
mtr_t* mtr, /*!< in/out: mini-transaction */
mtr_t* init_mtr)/*!< in/out: mini-transaction in which the
page should be initialized
(may be the same as mtr), or NULL if it
should not be initialized (the page at hint
was previously freed in mtr) */
__attribute__((warn_unused_result, nonnull(3,6)));
mtr_t* mtr); /*!< in: mtr handle */
#endif /* !UNIV_HOTBACKUP */
/**********************************************************************//**
@ -707,18 +700,17 @@ list, if not free limit == space size. This adding is necessary to make the
descriptor defined, as they are uninitialized above the free limit.
@return pointer to the extent descriptor, NULL if the page does not
exist in the space or if the offset exceeds the free limit */
UNIV_INLINE __attribute__((nonnull, warn_unused_result))
UNIV_INLINE
xdes_t*
xdes_get_descriptor_with_space_hdr(
/*===============================*/
fsp_header_t* sp_header, /*!< in/out: space header, x-latched
in mtr */
ulint space, /*!< in: space id */
ulint offset, /*!< in: page offset; if equal
to the free limit, we try to
add new extents to the space
free list */
mtr_t* mtr) /*!< in/out: mini-transaction */
fsp_header_t* sp_header,/*!< in/out: space header, x-latched */
ulint space, /*!< in: space id */
ulint offset, /*!< in: page offset;
if equal to the free limit,
we try to add new extents to
the space free list */
mtr_t* mtr) /*!< in: mtr handle */
{
ulint limit;
ulint size;
@ -726,9 +718,11 @@ xdes_get_descriptor_with_space_hdr(
ulint descr_page_no;
page_t* descr_page;
ut_ad(mtr);
ut_ad(mtr_memo_contains(mtr, fil_space_get_latch(space, NULL),
MTR_MEMO_X_LOCK));
ut_ad(mtr_memo_contains_page(mtr, sp_header, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr_memo_contains_page(mtr, sp_header, MTR_MEMO_PAGE_S_FIX)
|| mtr_memo_contains_page(mtr, sp_header, MTR_MEMO_PAGE_X_FIX));
ut_ad(page_offset(sp_header) == FSP_HEADER_OFFSET);
/* Read free limit and space size */
limit = mach_read_from_4(sp_header + FSP_FREE_LIMIT);
@ -778,7 +772,7 @@ is necessary to make the descriptor defined, as they are uninitialized
above the free limit.
@return pointer to the extent descriptor, NULL if the page does not
exist in the space or if the offset exceeds the free limit */
static __attribute__((nonnull, warn_unused_result))
static
xdes_t*
xdes_get_descriptor(
/*================*/
@ -787,7 +781,7 @@ xdes_get_descriptor(
or 0 for uncompressed pages */
ulint offset, /*!< in: page offset; if equal to the free limit,
we try to add new extents to the space free list */
mtr_t* mtr) /*!< in/out: mini-transaction */
mtr_t* mtr) /*!< in: mtr handle */
{
buf_block_t* block;
fsp_header_t* sp_header;
@ -1165,14 +1159,14 @@ fsp_header_get_tablespace_size(void)
Tries to extend a single-table tablespace so that a page would fit in the
data file.
@return TRUE if success */
static UNIV_COLD __attribute__((nonnull, warn_unused_result))
static
ibool
fsp_try_extend_data_file_with_pages(
/*================================*/
ulint space, /*!< in: space */
ulint page_no, /*!< in: page number */
fsp_header_t* header, /*!< in/out: space header */
mtr_t* mtr) /*!< in/out: mini-transaction */
fsp_header_t* header, /*!< in: space header */
mtr_t* mtr) /*!< in: mtr */
{
ibool success;
ulint actual_size;
@ -1197,7 +1191,7 @@ fsp_try_extend_data_file_with_pages(
/***********************************************************************//**
Tries to extend the last data file of a tablespace if it is auto-extending.
@return FALSE if not auto-extending */
static UNIV_COLD __attribute__((nonnull))
static
ibool
fsp_try_extend_data_file(
/*=====================*/
@ -1207,8 +1201,8 @@ fsp_try_extend_data_file(
the actual file size rounded down to
megabyte */
ulint space, /*!< in: space */
fsp_header_t* header, /*!< in/out: space header */
mtr_t* mtr) /*!< in/out: mini-transaction */
fsp_header_t* header, /*!< in: space header */
mtr_t* mtr) /*!< in: mtr */
{
ulint size;
ulint zip_size;
@ -1344,7 +1338,7 @@ fsp_fill_free_list(
then we do not allocate more extents */
ulint space, /*!< in: space */
fsp_header_t* header, /*!< in/out: space header */
mtr_t* mtr) /*!< in/out: mini-transaction */
mtr_t* mtr) /*!< in: mtr */
{
ulint limit;
ulint size;
@ -1542,47 +1536,10 @@ fsp_alloc_free_extent(
return(descr);
}
/**********************************************************************//**
Allocates a single free page from a space. */
static __attribute__((nonnull))
void
fsp_alloc_from_free_frag(
/*=====================*/
fsp_header_t* header, /*!< in/out: tablespace header */
xdes_t* descr, /*!< in/out: extent descriptor */
ulint bit, /*!< in: slot to allocate in the extent */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
ulint frag_n_used;
ut_ad(xdes_get_state(descr, mtr) == XDES_FREE_FRAG);
ut_a(xdes_get_bit(descr, XDES_FREE_BIT, bit, mtr));
xdes_set_bit(descr, XDES_FREE_BIT, bit, FALSE, mtr);
/* Update the FRAG_N_USED field */
frag_n_used = mtr_read_ulint(header + FSP_FRAG_N_USED, MLOG_4BYTES,
mtr);
frag_n_used++;
mlog_write_ulint(header + FSP_FRAG_N_USED, frag_n_used, MLOG_4BYTES,
mtr);
if (xdes_is_full(descr, mtr)) {
/* The fragment is full: move it to another list */
flst_remove(header + FSP_FREE_FRAG, descr + XDES_FLST_NODE,
mtr);
xdes_set_state(descr, XDES_FULL_FRAG, mtr);
flst_add_last(header + FSP_FULL_FRAG, descr + XDES_FLST_NODE,
mtr);
mlog_write_ulint(header + FSP_FRAG_N_USED,
frag_n_used - FSP_EXTENT_SIZE, MLOG_4BYTES,
mtr);
}
}
/**********************************************************************//**
Allocates a single free page from a space. The page is marked as used.
@return the page offset, FIL_NULL if no page could be allocated */
static __attribute__((nonnull, warn_unused_result))
static
ulint
fsp_alloc_free_page(
/*================*/
@ -1590,22 +1547,19 @@ fsp_alloc_free_page(
ulint zip_size,/*!< in: compressed page size in bytes
or 0 for uncompressed pages */
ulint hint, /*!< in: hint of which page would be desirable */
mtr_t* mtr, /*!< in/out: mini-transaction */
mtr_t* init_mtr)/*!< in/out: mini-transaction in which the
page should be initialized
(may be the same as mtr) */
mtr_t* mtr) /*!< in: mtr handle */
{
fsp_header_t* header;
fil_addr_t first;
xdes_t* descr;
buf_block_t* block;
ulint free;
ulint frag_n_used;
ulint page_no;
ulint space_size;
ibool success;
ut_ad(mtr);
ut_ad(init_mtr);
header = fsp_get_space_header(space, zip_size, mtr);
@ -1687,19 +1641,38 @@ fsp_alloc_free_page(
}
}
fsp_alloc_from_free_frag(header, descr, free, mtr);
xdes_set_bit(descr, XDES_FREE_BIT, free, FALSE, mtr);
/* Update the FRAG_N_USED field */
frag_n_used = mtr_read_ulint(header + FSP_FRAG_N_USED, MLOG_4BYTES,
mtr);
frag_n_used++;
mlog_write_ulint(header + FSP_FRAG_N_USED, frag_n_used, MLOG_4BYTES,
mtr);
if (xdes_is_full(descr, mtr)) {
/* The fragment is full: move it to another list */
flst_remove(header + FSP_FREE_FRAG, descr + XDES_FLST_NODE,
mtr);
xdes_set_state(descr, XDES_FULL_FRAG, mtr);
flst_add_last(header + FSP_FULL_FRAG, descr + XDES_FLST_NODE,
mtr);
mlog_write_ulint(header + FSP_FRAG_N_USED,
frag_n_used - FSP_EXTENT_SIZE, MLOG_4BYTES,
mtr);
}
/* Initialize the allocated page to the buffer pool, so that it can
be obtained immediately with buf_page_get without need for a disk
read. */
buf_page_create(space, page_no, zip_size, init_mtr);
buf_page_create(space, page_no, zip_size, mtr);
block = buf_page_get(space, zip_size, page_no, RW_X_LATCH, init_mtr);
block = buf_page_get(space, zip_size, page_no, RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
/* Prior contents of the page should be ignored */
fsp_init_file_page(block, init_mtr);
fsp_init_file_page(block, mtr);
return(page_no);
}
@ -1935,7 +1908,7 @@ fsp_alloc_seg_inode_page(
zip_size = dict_table_flags_to_zip_size(
mach_read_from_4(FSP_SPACE_FLAGS + space_header));
page_no = fsp_alloc_free_page(space, zip_size, 0, mtr, mtr);
page_no = fsp_alloc_free_page(space, zip_size, 0, mtr);
if (page_no == FIL_NULL) {
@ -2347,7 +2320,7 @@ fseg_create_general(
if (page == 0) {
page = fseg_alloc_free_page_low(space, zip_size,
inode, 0, FSP_UP, mtr, mtr);
inode, 0, FSP_UP, mtr);
if (page == FIL_NULL) {
@ -2596,19 +2569,14 @@ fseg_alloc_free_page_low(
ulint space, /*!< in: space */
ulint zip_size,/*!< in: compressed page size in bytes
or 0 for uncompressed pages */
fseg_inode_t* seg_inode, /*!< in/out: segment inode */
fseg_inode_t* seg_inode, /*!< in: segment inode */
ulint hint, /*!< in: hint of which page would be desirable */
byte direction, /*!< in: if the new page is needed because
of an index page split, and records are
inserted there in order, into which
direction they go alphabetically: FSP_DOWN,
FSP_UP, FSP_NO_DIR */
mtr_t* mtr, /*!< in/out: mini-transaction */
mtr_t* init_mtr)/*!< in/out: mini-transaction in which the
page should be initialized
(may be the same as mtr), or NULL if it
should not be initialized (the page at hint
was previously freed in mtr) */
mtr_t* mtr) /*!< in: mtr handle */
{
fsp_header_t* space_header;
ulint space_size;
@ -2619,6 +2587,7 @@ fseg_alloc_free_page_low(
ulint ret_page; /*!< the allocated page offset, FIL_NULL
if could not be allocated */
xdes_t* ret_descr; /*!< the extent of the allocated page */
ibool frag_page_allocated = FALSE;
ibool success;
ulint n;
@ -2640,8 +2609,6 @@ fseg_alloc_free_page_low(
if (descr == NULL) {
/* Hint outside space or too high above free limit: reset
hint */
ut_a(init_mtr);
/* The file space header page is always allocated. */
hint = 0;
descr = xdes_get_descriptor(space, zip_size, hint, mtr);
}
@ -2652,20 +2619,15 @@ fseg_alloc_free_page_low(
&& mach_read_from_8(descr + XDES_ID) == seg_id
&& (xdes_get_bit(descr, XDES_FREE_BIT,
hint % FSP_EXTENT_SIZE, mtr) == TRUE)) {
take_hinted_page:
/* 1. We can take the hinted page
=================================*/
ret_descr = descr;
ret_page = hint;
/* Skip the check for extending the tablespace. If the
page hint were not within the size of the tablespace,
we would have got (descr == NULL) above and reset the hint. */
goto got_hinted_page;
/*-----------------------------------------------------------*/
} else if (xdes_get_state(descr, mtr) == XDES_FREE
&& (!init_mtr
|| ((reserved - used < reserved / FSEG_FILLFACTOR)
&& used >= FSEG_FRAG_LIMIT))) {
} else if ((xdes_get_state(descr, mtr) == XDES_FREE)
&& ((reserved - used) < reserved / FSEG_FILLFACTOR)
&& (used >= FSEG_FRAG_LIMIT)) {
/* 2. We allocate the free extent from space and can take
=========================================================
@ -2683,20 +2645,8 @@ take_hinted_page:
/* Try to fill the segment free list */
fseg_fill_free_list(seg_inode, space, zip_size,
hint + FSP_EXTENT_SIZE, mtr);
goto take_hinted_page;
/*-----------------------------------------------------------*/
} else if (!init_mtr) {
ut_a(xdes_get_state(descr, mtr) == XDES_FREE_FRAG);
fsp_alloc_from_free_frag(space_header, descr,
hint % FSP_EXTENT_SIZE, mtr);
ret_page = hint;
ret_descr = NULL;
/* Put the page in the fragment page array of the segment */
n = fseg_find_free_frag_page_slot(seg_inode, mtr);
ut_a(n != FIL_NULL);
fseg_set_nth_frag_page_no(seg_inode, n, ret_page, mtr);
goto got_hinted_page;
/*-----------------------------------------------------------*/
} else if ((direction != FSP_NO_DIR)
&& ((reserved - used) < reserved / FSEG_FILLFACTOR)
&& (used >= FSEG_FRAG_LIMIT)
@ -2755,10 +2705,11 @@ take_hinted_page:
} else if (used < FSEG_FRAG_LIMIT) {
/* 6. We allocate an individual page from the space
===================================================*/
ret_page = fsp_alloc_free_page(space, zip_size, hint,
mtr, init_mtr);
ret_page = fsp_alloc_free_page(space, zip_size, hint, mtr);
ret_descr = NULL;
frag_page_allocated = TRUE;
if (ret_page != FIL_NULL) {
/* Put the page in the fragment page array of the
segment */
@ -2768,10 +2719,6 @@ take_hinted_page:
fseg_set_nth_frag_page_no(seg_inode, n, ret_page,
mtr);
}
/* fsp_alloc_free_page() invoked fsp_init_file_page()
already. */
return(ret_page);
/*-----------------------------------------------------------*/
} else {
/* 7. We allocate a new extent and take its first page
@ -2819,34 +2766,26 @@ take_hinted_page:
}
}
got_hinted_page:
{
if (!frag_page_allocated) {
/* Initialize the allocated page to buffer pool, so that it
can be obtained immediately with buf_page_get without need
for a disk read */
buf_block_t* block;
ulint zip_size = dict_table_flags_to_zip_size(
mach_read_from_4(FSP_SPACE_FLAGS + space_header));
mtr_t* block_mtr = init_mtr ? init_mtr : mtr;
block = buf_page_create(space, ret_page, zip_size, block_mtr);
block = buf_page_create(space, ret_page, zip_size, mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
if (UNIV_UNLIKELY(block != buf_page_get(space, zip_size,
ret_page, RW_X_LATCH,
block_mtr))) {
mtr))) {
ut_error;
}
if (init_mtr) {
/* The prior contents of the page should be ignored */
fsp_init_file_page(block, init_mtr);
}
}
/* The prior contents of the page should be ignored */
fsp_init_file_page(block, mtr);
/* ret_descr == NULL if the block was allocated from free_frag
(XDES_FREE_FRAG) */
if (ret_descr != NULL) {
/* At this point we know the extent and the page offset.
The extent is still in the appropriate list (FSEG_NOT_FULL
or FSEG_FREE), and the page is not yet marked as used. */
@ -2859,6 +2798,8 @@ got_hinted_page:
fseg_mark_page_used(seg_inode, space, zip_size, ret_page, mtr);
}
buf_reset_check_index_page_at_flush(space, ret_page);
return(ret_page);
}
@ -2871,7 +2812,7 @@ UNIV_INTERN
ulint
fseg_alloc_free_page_general(
/*=========================*/
fseg_header_t* seg_header,/*!< in/out: segment header */
fseg_header_t* seg_header,/*!< in: segment header */
ulint hint, /*!< in: hint of which page would be desirable */
byte direction,/*!< in: if the new page is needed because
of an index page split, and records are
@ -2883,11 +2824,7 @@ fseg_alloc_free_page_general(
with fsp_reserve_free_extents, then there
is no need to do the check for this individual
page */
mtr_t* mtr, /*!< in/out: mini-transaction handle */
mtr_t* init_mtr)/*!< in/out: mtr or another mini-transaction
in which the page should be initialized,
or NULL if this is a "fake allocation" of
a page that was previously freed in mtr */
mtr_t* mtr) /*!< in: mtr handle */
{
fseg_inode_t* inode;
ulint space;
@ -2929,8 +2866,7 @@ fseg_alloc_free_page_general(
}
page_no = fseg_alloc_free_page_low(space, zip_size,
inode, hint, direction,
mtr, init_mtr);
inode, hint, direction, mtr);
if (!has_done_reservation) {
fil_space_release_free_extents(space, n_reserved);
}
@ -2938,6 +2874,28 @@ fseg_alloc_free_page_general(
return(page_no);
}
/**********************************************************************//**
Allocates a single free page from a segment. This function implements
the intelligent allocation strategy which tries to minimize file space
fragmentation.
@return allocated page offset, FIL_NULL if no page could be allocated */
UNIV_INTERN
ulint
fseg_alloc_free_page(
/*=================*/
fseg_header_t* seg_header,/*!< in: segment header */
ulint hint, /*!< in: hint of which page would be desirable */
byte direction,/*!< in: if the new page is needed because
of an index page split, and records are
inserted there in order, into which
direction they go alphabetically: FSP_DOWN,
FSP_UP, FSP_NO_DIR */
mtr_t* mtr) /*!< in: mtr handle */
{
return(fseg_alloc_free_page_general(seg_header, hint, direction,
FALSE, mtr));
}
/**********************************************************************//**
Checks that we have at least 2 frag pages free in the first extent of a
single-table tablespace, and they are also physically initialized to the data

View file

@ -1,6 +1,6 @@
/*****************************************************************************
Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved.
Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -88,40 +88,6 @@ ha_create_func(
return(table);
}
/*************************************************************//**
Empties a hash table and frees the memory heaps. */
UNIV_INTERN
void
ha_clear(
/*=====*/
hash_table_t* table) /*!< in, own: hash table */
{
ulint i;
ulint n;
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EXCLUSIVE));
#endif /* UNIV_SYNC_DEBUG */
#ifndef UNIV_HOTBACKUP
/* Free the memory heaps. */
n = table->n_mutexes;
for (i = 0; i < n; i++) {
mem_heap_free(table->heaps[i]);
}
#endif /* !UNIV_HOTBACKUP */
/* Clear the hash table. */
n = hash_get_n_cells(table);
for (i = 0; i < n; i++) {
hash_get_nth_cell(table, i)->node = NULL;
}
}
/*************************************************************//**
Inserts an entry into a hash table. If an entry with the same fold number
is found, its node is updated to point to the new data, and no new node
@ -140,7 +106,7 @@ ha_insert_for_fold_func(
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block, /*!< in: buffer block containing the data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
void* data) /*!< in: data, must not be NULL */
const rec_t* data) /*!< in: data, must not be NULL */
{
hash_cell_t* cell;
ha_node_t* node;
@ -153,7 +119,11 @@ ha_insert_for_fold_func(
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(block->frame == page_align(data));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
ASSERT_HASH_MUTEX_OWN(table, fold);
ut_ad(btr_search_enabled);
hash = hash_calc_hash(fold, table);
@ -173,7 +143,6 @@ ha_insert_for_fold_func(
prev_block->n_pointers--;
block->n_pointers++;
}
ut_ad(!btr_search_fully_disabled);
# endif /* !UNIV_HOTBACKUP */
prev_node->block = block;
@ -186,13 +155,6 @@ ha_insert_for_fold_func(
prev_node = prev_node->next;
}
/* We are in the process of disabling hash index, do not add
new chain node */
if (!btr_search_enabled) {
ut_ad(!btr_search_fully_disabled);
return(TRUE);
}
/* We have to allocate a new chain node */
node = mem_heap_alloc(hash_get_heap(table, fold), sizeof(ha_node_t));
@ -250,6 +212,10 @@ ha_delete_hash_node(
{
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(btr_search_enabled);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
# ifndef UNIV_HOTBACKUP
if (table->adaptive) {
@ -272,11 +238,11 @@ ha_search_and_update_if_found_func(
/*===============================*/
hash_table_t* table, /*!< in/out: hash table */
ulint fold, /*!< in: folded value of the searched data */
void* data, /*!< in: pointer to the data */
const rec_t* data, /*!< in: pointer to the data */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* new_block,/*!< in: block containing new_data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
void* new_data)/*!< in: new pointer to the data */
const rec_t* new_data)/*!< in: new pointer to the data */
{
ha_node_t* node;
@ -286,6 +252,13 @@ ha_search_and_update_if_found_func(
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(new_block->frame == page_align(new_data));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
if (!btr_search_enabled) {
return;
}
node = ha_search_with_data(table, fold, data);
@ -322,6 +295,10 @@ ha_remove_all_nodes_to_page(
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
ASSERT_HASH_MUTEX_OWN(table, fold);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(btr_search_enabled);
node = ha_chain_get_first(table, fold);

Some files were not shown because too many files have changed in this diff Show more