mirror of
https://github.com/MariaDB/server.git
synced 2025-01-19 13:32:33 +01:00
merged 5.1-5.1.29-rc -> bug 38912
This commit is contained in:
commit
2ebb45512f
30 changed files with 746 additions and 177 deletions
|
@ -139,15 +139,6 @@ drop table t1,t2,t3;
|
|||
# table
|
||||
#
|
||||
CREATE TABLE t1(a INT) ENGINE=BLACKHOLE;
|
||||
# NOTE: After exchanging open_ltable() by open_and_lock_tables() in
|
||||
# handle_delayed_insert() to fix problems with MERGE tables (Bug#26379),
|
||||
# problems with INSERT DELAYED and BLACKHOLE popped up. open_ltable()
|
||||
# does not check if the binlogging capabilities of the statement and the
|
||||
# table match. So the below used to succeed. But since INSERT DELAYED
|
||||
# switches to row-based logging in mixed-mode and BLACKHOLE cannot do
|
||||
# row-based logging, it could not really work. Until this problem is
|
||||
# correctly fixed, we have that error here.
|
||||
--error ER_BINLOG_LOGGING_IMPOSSIBLE
|
||||
INSERT DELAYED INTO t1 VALUES(1);
|
||||
DROP TABLE t1;
|
||||
|
||||
|
|
32
mysql-test/extra/rpl_tests/rpl_blackhole.test
Normal file
32
mysql-test/extra/rpl_tests/rpl_blackhole.test
Normal file
|
@ -0,0 +1,32 @@
|
|||
# Check replication of one statement assuming that the engine on the
|
||||
# slave is a blackhole engine.
|
||||
|
||||
# Input:
|
||||
# $statement Statement to evaluate, it is assumed to change t1
|
||||
|
||||
# 1. Evaluate statement on master, it is assumed to change t1
|
||||
# 2. Wait for statement to be processed on slave
|
||||
# 3. SELECT from table t1 to see what was written
|
||||
# 4. Compare position on slave before executing statement and after
|
||||
# executing statement. If difference is >0, then something was
|
||||
# written to the binary log on the slave.
|
||||
|
||||
connection slave;
|
||||
let $before = query_get_value("SHOW MASTER STATUS", Position, 1);
|
||||
|
||||
--echo [on master]
|
||||
connection master;
|
||||
eval $statement;
|
||||
|
||||
--echo [on slave]
|
||||
sync_slave_with_master;
|
||||
--echo # Expect 0
|
||||
SELECT COUNT(*) FROM t1;
|
||||
let $after = query_get_value("SHOW MASTER STATUS", Position, 1);
|
||||
let $something_written = `select $after - $before != 0`;
|
||||
if ($something_written) {
|
||||
--echo >>> Something was written to binary log <<<
|
||||
}
|
||||
if (!$something_written) {
|
||||
--echo >>> Nothing was written to binary log <<<
|
||||
}
|
|
@ -312,7 +312,7 @@ sub mtr_report_stats ($) {
|
|||
/Slave: According to the master's version/ or
|
||||
/Slave: Column [0-9]* type mismatch/ or
|
||||
/Slave: Error .* doesn't exist/ or
|
||||
/Slave: Error .*Deadlock found/ or
|
||||
/Slave: Deadlock found/ or
|
||||
/Slave: Error .*Unknown table/ or
|
||||
/Slave: Error in Write_rows event: / or
|
||||
/Slave: Field .* of table .* has no default value/ or
|
||||
|
|
|
@ -1416,4 +1416,41 @@ SELECT AVG(a), CAST(AVG(a) AS DECIMAL) FROM t1;
|
|||
AVG(a) CAST(AVG(a) AS DECIMAL)
|
||||
15 15
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE derived1 (a bigint(21));
|
||||
INSERT INTO derived1 VALUES (2);
|
||||
CREATE TABLE D (
|
||||
pk int(11) NOT NULL AUTO_INCREMENT,
|
||||
int_nokey int(11) DEFAULT NULL,
|
||||
int_key int(11) DEFAULT NULL,
|
||||
filler blob,
|
||||
PRIMARY KEY (pk),
|
||||
KEY int_key (int_key)
|
||||
);
|
||||
INSERT INTO D VALUES
|
||||
(39,40,4,repeat(' X', 42)),
|
||||
(43,56,4,repeat(' X', 42)),
|
||||
(47,12,4,repeat(' X', 42)),
|
||||
(71,28,4,repeat(' X', 42)),
|
||||
(76,54,4,repeat(' X', 42)),
|
||||
(83,45,4,repeat(' X', 42)),
|
||||
(105,53,12,NULL);
|
||||
SELECT
|
||||
(SELECT COUNT( int_nokey )
|
||||
FROM derived1 AS X
|
||||
WHERE
|
||||
X.int_nokey < 61
|
||||
GROUP BY pk
|
||||
LIMIT 1)
|
||||
FROM D AS X
|
||||
WHERE X.int_key < 13
|
||||
GROUP BY int_nokey LIMIT 1;
|
||||
(SELECT COUNT( int_nokey )
|
||||
FROM derived1 AS X
|
||||
WHERE
|
||||
X.int_nokey < 61
|
||||
GROUP BY pk
|
||||
LIMIT 1)
|
||||
1
|
||||
DROP TABLE derived1;
|
||||
DROP TABLE D;
|
||||
End of 5.0 tests
|
||||
|
|
|
@ -1637,4 +1637,74 @@ select count(*) from t1, t2 where t1.createdDate = t2.createdDate;
|
|||
count(*)
|
||||
1
|
||||
drop table t1, t2;
|
||||
create table t1 (s1 int) partition by hash(s1) partitions 2;
|
||||
create index i on t1 (s1);
|
||||
insert into t1 values (1);
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1 order by s1 desc;
|
||||
select * from t1;
|
||||
s1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
drop table t1;
|
||||
create table t1 (s1 int) partition by range(s1)
|
||||
(partition pa1 values less than (10),
|
||||
partition pa2 values less than MAXVALUE);
|
||||
create index i on t1 (s1);
|
||||
insert into t1 values (1);
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1 order by s1 desc;
|
||||
select * from t1;
|
||||
s1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
drop table t1;
|
||||
create table t1 (s1 int) partition by range(s1)
|
||||
(partition pa1 values less than (10),
|
||||
partition pa2 values less than MAXVALUE);
|
||||
create index i on t1 (s1);
|
||||
insert into t1 values (20);
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1 order by s1 desc;
|
||||
select * from t1;
|
||||
s1
|
||||
20
|
||||
20
|
||||
20
|
||||
20
|
||||
20
|
||||
20
|
||||
20
|
||||
20
|
||||
drop table t1;
|
||||
create table t1 (s1 int) partition by range(s1)
|
||||
(partition pa1 values less than (10),
|
||||
partition pa2 values less than MAXVALUE);
|
||||
create index i on t1 (s1);
|
||||
insert into t1 values (1), (2), (3), (4), (5), (6), (7), (8);
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1 order by s1 desc;
|
||||
insert into t1 select s1 from t1 where s1=3;
|
||||
select count(*) from t1;
|
||||
count(*)
|
||||
288
|
||||
drop table t1;
|
||||
End of 5.1 tests
|
||||
|
|
|
@ -161,3 +161,30 @@ SELECT @a, @b;
|
|||
1 1
|
||||
DROP TABLE t2, t1;
|
||||
End of 5.0 tests
|
||||
BUG#31612
|
||||
Trigger fired multiple times leads to gaps in auto_increment sequence
|
||||
create table t1 (a int, val char(1)) engine=InnoDB;
|
||||
create table t2 (b int auto_increment primary key,
|
||||
val char(1)) engine=InnoDB;
|
||||
create trigger t1_after_insert after
|
||||
insert on t1 for each row insert into t2 set val=NEW.val;
|
||||
insert into t1 values ( 123, 'a'), ( 123, 'b'), ( 123, 'c'),
|
||||
(123, 'd'), (123, 'e'), (123, 'f'), (123, 'g');
|
||||
insert into t1 values ( 654, 'a'), ( 654, 'b'), ( 654, 'c'),
|
||||
(654, 'd'), (654, 'e'), (654, 'f'), (654, 'g');
|
||||
select * from t2 order by b;
|
||||
b val
|
||||
1 a
|
||||
2 b
|
||||
3 c
|
||||
4 d
|
||||
5 e
|
||||
6 f
|
||||
7 g
|
||||
8 a
|
||||
9 b
|
||||
10 c
|
||||
11 d
|
||||
12 e
|
||||
13 f
|
||||
14 g
|
||||
|
|
|
@ -43,8 +43,6 @@ INSERT INTO t1n VALUES (1,1), (1,2), (2,1), (2,2);
|
|||
UPDATE t1m, t1b SET m = 2, b = 3 WHERE n = c;
|
||||
UPDATE t1m, t1n SET m = 2, e = 3 WHERE n = f;
|
||||
ERROR HY000: Binary logging not possible. Message: Statement cannot be written atomically since more than one engine involved and at least one engine is self-logging
|
||||
UPDATE t1n, t1b SET e = 2, b = 3 WHERE f = c;
|
||||
ERROR HY000: Binary logging not possible. Message: Statement cannot be written atomically since more than one engine involved and at least one engine is self-logging
|
||||
TRUNCATE t1m;
|
||||
TRUNCATE t1b;
|
||||
TRUNCATE t1n;
|
||||
|
@ -68,20 +66,21 @@ RESET MASTER;
|
|||
SET SESSION BINLOG_FORMAT=ROW;
|
||||
INSERT INTO t1m VALUES (1,1), (1,2), (2,1), (2,2);
|
||||
INSERT INTO t1b VALUES (1,1), (1,2), (2,1), (2,2);
|
||||
ERROR HY000: Binary logging not possible. Message: Row-based format required for this statement, but not allowed by this combination of engines
|
||||
INSERT INTO t1n VALUES (1,1), (1,2), (2,1), (2,2);
|
||||
UPDATE t1m, t1b SET m = 2, b = 3 WHERE n = c;
|
||||
ERROR HY000: Binary logging not possible. Message: Row-based format required for this statement, but not allowed by this combination of engines
|
||||
UPDATE t1m, t1n SET m = 2, e = 3 WHERE n = f;
|
||||
ERROR HY000: Binary logging not possible. Message: Statement cannot be written atomically since more than one engine involved and at least one engine is self-logging
|
||||
UPDATE t1n, t1b SET e = 2, b = 3 WHERE f = c;
|
||||
ERROR HY000: Binary logging not possible. Message: Row-based format required for this statement, but not allowed by this combination of engines
|
||||
ERROR HY000: Binary logging not possible. Message: Statement cannot be written atomically since more than one engine involved and at least one engine is self-logging
|
||||
show binlog events from <binlog_start>;
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 # Query # # use `test`; BEGIN
|
||||
master-bin.000001 # Table_map # # table_id: # (test.t1m)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Query # # use `test`; COMMIT
|
||||
master-bin.000001 # Query # # use `test`; BEGIN
|
||||
master-bin.000001 # Table_map # # table_id: # (test.t1b)
|
||||
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Query # # use `test`; COMMIT
|
||||
master-bin.000001 # Query # # BEGIN
|
||||
master-bin.000001 # Table_map # # table_id: # (test.t1n)
|
||||
master-bin.000001 # Table_map # # table_id: # (mysql.ndb_apply_status)
|
||||
|
|
|
@ -141,7 +141,6 @@ master-bin.000001 # Query # # use `test`; COMMIT
|
|||
drop table t1,t2,t3;
|
||||
CREATE TABLE t1(a INT) ENGINE=BLACKHOLE;
|
||||
INSERT DELAYED INTO t1 VALUES(1);
|
||||
ERROR HY000: Binary logging not possible. Message: Row-based format required for this statement, but not allowed by this combination of engines
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1(a INT, b INT) ENGINE=BLACKHOLE;
|
||||
DELETE FROM t1 WHERE a=10;
|
||||
|
|
|
@ -69,9 +69,6 @@ UPDATE t1m, t1n SET m = 2, e = 3 WHERE n = f;
|
|||
|
||||
#UPDATE t1m, t1n SET m = 2, e = 3 WHERE n = f;
|
||||
|
||||
error ER_BINLOG_LOGGING_IMPOSSIBLE;
|
||||
UPDATE t1n, t1b SET e = 2, b = 3 WHERE f = c;
|
||||
|
||||
TRUNCATE t1m;
|
||||
TRUNCATE t1b;
|
||||
TRUNCATE t1n;
|
||||
|
@ -83,12 +80,10 @@ RESET MASTER;
|
|||
SET SESSION BINLOG_FORMAT=ROW;
|
||||
|
||||
INSERT INTO t1m VALUES (1,1), (1,2), (2,1), (2,2);
|
||||
error ER_BINLOG_LOGGING_IMPOSSIBLE;
|
||||
|
||||
INSERT INTO t1b VALUES (1,1), (1,2), (2,1), (2,2);
|
||||
INSERT INTO t1n VALUES (1,1), (1,2), (2,1), (2,2);
|
||||
|
||||
error ER_BINLOG_LOGGING_IMPOSSIBLE;
|
||||
UPDATE t1m, t1b SET m = 2, b = 3 WHERE n = c;
|
||||
error ER_BINLOG_LOGGING_IMPOSSIBLE;
|
||||
UPDATE t1m, t1n SET m = 2, e = 3 WHERE n = f;
|
||||
|
||||
|
|
|
@ -12,6 +12,5 @@
|
|||
partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table
|
||||
|
||||
ndb_partition_error2 : HF is not sure if the test can work as internded on all the platforms
|
||||
ndb_index_ordered : Bug#38370 The test ndb.ndb_index_ordered fails with the community features on
|
||||
|
||||
# the below testcase have been reworked to avoid the bug, test contains comment, keep bug open
|
||||
|
|
100
mysql-test/suite/rpl/r/rpl_blackhole.result
Normal file
100
mysql-test/suite/rpl/r/rpl_blackhole.result
Normal file
|
@ -0,0 +1,100 @@
|
|||
stop slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
reset master;
|
||||
reset slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
start slave;
|
||||
CREATE TABLE t1 (a INT, b INT, c INT);
|
||||
CREATE TABLE t2 (a INT, b INT, c INT);
|
||||
ALTER TABLE t1 ENGINE=BLACKHOLE;
|
||||
INSERT INTO t2 VALUES (1,9,1), (2,9,2), (3,9,3), (4,9,4);
|
||||
[on master]
|
||||
INSERT INTO t1 VALUES (1,1,1),(2,1,2),(3,1,3),(4,1,4);
|
||||
[on slave]
|
||||
# Expect 0
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
0
|
||||
>>> Something was written to binary log <<<
|
||||
[on master]
|
||||
UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 1;
|
||||
[on slave]
|
||||
# Expect 0
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
0
|
||||
>>> Something was written to binary log <<<
|
||||
[on master]
|
||||
DELETE FROM t1 WHERE a % 2 = 0 AND b = 1;
|
||||
[on slave]
|
||||
# Expect 0
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
0
|
||||
>>> Something was written to binary log <<<
|
||||
[on master]
|
||||
INSERT INTO t1 SELECT * FROM t2;
|
||||
[on slave]
|
||||
# Expect 0
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
0
|
||||
>>> Something was written to binary log <<<
|
||||
[on master]
|
||||
INSERT INTO t2 SELECT * FROM t1;
|
||||
[on slave]
|
||||
# Expect 0
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
0
|
||||
>>> Something was written to binary log <<<
|
||||
ALTER TABLE t1 ADD PRIMARY KEY pk_t1 (a,b);
|
||||
[on master]
|
||||
INSERT INTO t1 VALUES (1,2,1),(2,2,2),(3,2,3),(4,2,4);
|
||||
[on slave]
|
||||
# Expect 0
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
0
|
||||
>>> Something was written to binary log <<<
|
||||
[on master]
|
||||
UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 2;
|
||||
[on slave]
|
||||
# Expect 0
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
0
|
||||
>>> Something was written to binary log <<<
|
||||
[on master]
|
||||
DELETE FROM t1 WHERE a % 2 = 0 AND b = 2;
|
||||
[on slave]
|
||||
# Expect 0
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
0
|
||||
>>> Something was written to binary log <<<
|
||||
ALTER TABLE t1 DROP PRIMARY KEY, ADD KEY key_t1 (a);
|
||||
[on master]
|
||||
INSERT INTO t1 VALUES (1,3,1),(2,3,2),(3,3,3),(4,3,4);
|
||||
[on slave]
|
||||
# Expect 0
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
0
|
||||
>>> Something was written to binary log <<<
|
||||
[on master]
|
||||
UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 3;
|
||||
[on slave]
|
||||
# Expect 0
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
0
|
||||
>>> Something was written to binary log <<<
|
||||
[on master]
|
||||
DELETE FROM t1 WHERE a % 2 = 0 AND b = 3;
|
||||
[on slave]
|
||||
# Expect 0
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
0
|
||||
>>> Something was written to binary log <<<
|
80
mysql-test/suite/rpl/t/rpl_blackhole.test
Normal file
80
mysql-test/suite/rpl/t/rpl_blackhole.test
Normal file
|
@ -0,0 +1,80 @@
|
|||
# PURPOSE. Test that blackhole works with replication in all three
|
||||
# modes: STATEMENT, MIXED, and ROW.
|
||||
#
|
||||
# METHOD. We start by creating a table on the master and then change
|
||||
# the engine to use blackhole on the slave.
|
||||
#
|
||||
# After insert/update/delete of one or more rows, the test the
|
||||
# proceeds to check that replication is running after replicating an
|
||||
# change, that the blackhole engine does not contain anything (which
|
||||
# is just a check that the correct engine is used), and that something
|
||||
# is written to the binary log.
|
||||
#
|
||||
# Whe check INSERT, UPDATE, and DELETE statement for tables with no
|
||||
# key (forcing a range search on the slave), primary keys (using a
|
||||
# primary key lookup), and index/key with multiple matches (forcing an
|
||||
# index search).
|
||||
|
||||
source include/master-slave.inc;
|
||||
source include/have_blackhole.inc;
|
||||
|
||||
# We start with no primary key
|
||||
CREATE TABLE t1 (a INT, b INT, c INT);
|
||||
CREATE TABLE t2 (a INT, b INT, c INT);
|
||||
|
||||
sync_slave_with_master;
|
||||
ALTER TABLE t1 ENGINE=BLACKHOLE;
|
||||
|
||||
connection master;
|
||||
INSERT INTO t2 VALUES (1,9,1), (2,9,2), (3,9,3), (4,9,4);
|
||||
sync_slave_with_master;
|
||||
|
||||
# Test insert, no primary key
|
||||
let $statement = INSERT INTO t1 VALUES (1,1,1),(2,1,2),(3,1,3),(4,1,4);
|
||||
source extra/rpl_tests/rpl_blackhole.test;
|
||||
|
||||
# Test update, no primary key
|
||||
let $statement = UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 1;
|
||||
source extra/rpl_tests/rpl_blackhole.test;
|
||||
|
||||
# Test delete, no primary key
|
||||
let $statement = DELETE FROM t1 WHERE a % 2 = 0 AND b = 1;
|
||||
source extra/rpl_tests/rpl_blackhole.test;
|
||||
|
||||
# Test INSERT-SELECT into Blackhole, no primary key
|
||||
let $statement = INSERT INTO t1 SELECT * FROM t2;
|
||||
source extra/rpl_tests/rpl_blackhole.test;
|
||||
|
||||
# Test INSERT-SELECT from Blackhole, no primary key
|
||||
let $statement = INSERT INTO t2 SELECT * FROM t1;
|
||||
source extra/rpl_tests/rpl_blackhole.test;
|
||||
|
||||
connection master;
|
||||
ALTER TABLE t1 ADD PRIMARY KEY pk_t1 (a,b);
|
||||
|
||||
# Test insert, primary key
|
||||
let $statement = INSERT INTO t1 VALUES (1,2,1),(2,2,2),(3,2,3),(4,2,4);
|
||||
source extra/rpl_tests/rpl_blackhole.test;
|
||||
|
||||
# Test update, primary key
|
||||
let $statement = UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 2;
|
||||
source extra/rpl_tests/rpl_blackhole.test;
|
||||
|
||||
# Test delete, primary key
|
||||
let $statement = DELETE FROM t1 WHERE a % 2 = 0 AND b = 2;
|
||||
source extra/rpl_tests/rpl_blackhole.test;
|
||||
|
||||
connection master;
|
||||
ALTER TABLE t1 DROP PRIMARY KEY, ADD KEY key_t1 (a);
|
||||
|
||||
# Test insert, key
|
||||
let $statement = INSERT INTO t1 VALUES (1,3,1),(2,3,2),(3,3,3),(4,3,4);
|
||||
source extra/rpl_tests/rpl_blackhole.test;
|
||||
|
||||
# Test update, key
|
||||
let $statement = UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 3;
|
||||
source extra/rpl_tests/rpl_blackhole.test;
|
||||
|
||||
# Test delete, key
|
||||
let $statement = DELETE FROM t1 WHERE a % 2 = 0 AND b = 3;
|
||||
source extra/rpl_tests/rpl_blackhole.test;
|
|
@ -933,5 +933,45 @@ SELECT AVG(a), CAST(AVG(a) AS DECIMAL) FROM t1;
|
|||
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Bug #37348: Crash in or immediately after JOIN::make_sum_func_list
|
||||
#
|
||||
|
||||
CREATE TABLE derived1 (a bigint(21));
|
||||
INSERT INTO derived1 VALUES (2);
|
||||
|
||||
|
||||
CREATE TABLE D (
|
||||
pk int(11) NOT NULL AUTO_INCREMENT,
|
||||
int_nokey int(11) DEFAULT NULL,
|
||||
int_key int(11) DEFAULT NULL,
|
||||
filler blob,
|
||||
PRIMARY KEY (pk),
|
||||
KEY int_key (int_key)
|
||||
);
|
||||
|
||||
INSERT INTO D VALUES
|
||||
(39,40,4,repeat(' X', 42)),
|
||||
(43,56,4,repeat(' X', 42)),
|
||||
(47,12,4,repeat(' X', 42)),
|
||||
(71,28,4,repeat(' X', 42)),
|
||||
(76,54,4,repeat(' X', 42)),
|
||||
(83,45,4,repeat(' X', 42)),
|
||||
(105,53,12,NULL);
|
||||
|
||||
SELECT
|
||||
(SELECT COUNT( int_nokey )
|
||||
FROM derived1 AS X
|
||||
WHERE
|
||||
X.int_nokey < 61
|
||||
GROUP BY pk
|
||||
LIMIT 1)
|
||||
FROM D AS X
|
||||
WHERE X.int_key < 13
|
||||
GROUP BY int_nokey LIMIT 1;
|
||||
|
||||
DROP TABLE derived1;
|
||||
DROP TABLE D;
|
||||
|
||||
###
|
||||
--echo End of 5.0 tests
|
||||
|
|
|
@ -1791,4 +1791,53 @@ select count(*) from t1, t2 where t1.createdDate = t2.createdDate;
|
|||
|
||||
drop table t1, t2;
|
||||
|
||||
#
|
||||
# Bug #38005 Partitions: error with insert select
|
||||
#
|
||||
|
||||
create table t1 (s1 int) partition by hash(s1) partitions 2;
|
||||
create index i on t1 (s1);
|
||||
insert into t1 values (1);
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1 order by s1 desc;
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
create table t1 (s1 int) partition by range(s1)
|
||||
(partition pa1 values less than (10),
|
||||
partition pa2 values less than MAXVALUE);
|
||||
create index i on t1 (s1);
|
||||
insert into t1 values (1);
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1 order by s1 desc;
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
create table t1 (s1 int) partition by range(s1)
|
||||
(partition pa1 values less than (10),
|
||||
partition pa2 values less than MAXVALUE);
|
||||
create index i on t1 (s1);
|
||||
insert into t1 values (20);
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1 order by s1 desc;
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
create table t1 (s1 int) partition by range(s1)
|
||||
(partition pa1 values less than (10),
|
||||
partition pa2 values less than MAXVALUE);
|
||||
create index i on t1 (s1);
|
||||
insert into t1 values (1), (2), (3), (4), (5), (6), (7), (8);
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1;
|
||||
insert into t1 select s1 from t1 order by s1 desc;
|
||||
insert into t1 select s1 from t1 where s1=3;
|
||||
select count(*) from t1;
|
||||
drop table t1;
|
||||
|
||||
--echo End of 5.1 tests
|
||||
|
|
|
@ -162,3 +162,16 @@ DROP TABLE t2, t1;
|
|||
|
||||
|
||||
--echo End of 5.0 tests
|
||||
|
||||
--echo BUG#31612
|
||||
--echo Trigger fired multiple times leads to gaps in auto_increment sequence
|
||||
create table t1 (a int, val char(1)) engine=InnoDB;
|
||||
create table t2 (b int auto_increment primary key,
|
||||
val char(1)) engine=InnoDB;
|
||||
create trigger t1_after_insert after
|
||||
insert on t1 for each row insert into t2 set val=NEW.val;
|
||||
insert into t1 values ( 123, 'a'), ( 123, 'b'), ( 123, 'c'),
|
||||
(123, 'd'), (123, 'e'), (123, 'f'), (123, 'g');
|
||||
insert into t1 values ( 654, 'a'), ( 654, 'b'), ( 654, 'c'),
|
||||
(654, 'd'), (654, 'e'), (654, 'f'), (654, 'g');
|
||||
select * from t2 order by b;
|
||||
|
|
|
@ -1717,6 +1717,14 @@ error:
|
|||
|
||||
void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
|
||||
{
|
||||
/*
|
||||
Fix for bug#38751, some engines needs info-calls in ALTER.
|
||||
Archive need this since it flushes in ::info.
|
||||
HA_STATUS_AUTO is optimized so it will not always be forwarded
|
||||
to all partitions, but HA_STATUS_VARIABLE will.
|
||||
*/
|
||||
info(HA_STATUS_VARIABLE);
|
||||
|
||||
info(HA_STATUS_AUTO);
|
||||
|
||||
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
|
||||
|
@ -4282,6 +4290,17 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
|
|||
break;
|
||||
case partition_index_first:
|
||||
DBUG_PRINT("info", ("index_first on partition %d", i));
|
||||
/* MyISAM engine can fail if we call index_first() when indexes disabled */
|
||||
/* that happens if the table is empty. */
|
||||
/* Here we use file->stats.records instead of file->records() because */
|
||||
/* file->records() is supposed to return an EXACT count, and it can be */
|
||||
/* possibly slow. We don't need an exact number, an approximate one- from*/
|
||||
/* the last ::info() call - is sufficient. */
|
||||
if (file->stats.records == 0)
|
||||
{
|
||||
error= HA_ERR_END_OF_FILE;
|
||||
break;
|
||||
}
|
||||
error= file->index_first(buf);
|
||||
break;
|
||||
case partition_index_first_unordered:
|
||||
|
@ -4369,10 +4388,32 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
|
|||
m_start_key.flag);
|
||||
break;
|
||||
case partition_index_first:
|
||||
/* MyISAM engine can fail if we call index_first() when indexes disabled */
|
||||
/* that happens if the table is empty. */
|
||||
/* Here we use file->stats.records instead of file->records() because */
|
||||
/* file->records() is supposed to return an EXACT count, and it can be */
|
||||
/* possibly slow. We don't need an exact number, an approximate one- from*/
|
||||
/* the last ::info() call - is sufficient. */
|
||||
if (file->stats.records == 0)
|
||||
{
|
||||
error= HA_ERR_END_OF_FILE;
|
||||
break;
|
||||
}
|
||||
error= file->index_first(rec_buf_ptr);
|
||||
reverse_order= FALSE;
|
||||
break;
|
||||
case partition_index_last:
|
||||
/* MyISAM engine can fail if we call index_last() when indexes disabled */
|
||||
/* that happens if the table is empty. */
|
||||
/* Here we use file->stats.records instead of file->records() because */
|
||||
/* file->records() is supposed to return an EXACT count, and it can be */
|
||||
/* possibly slow. We don't need an exact number, an approximate one- from*/
|
||||
/* the last ::info() call - is sufficient. */
|
||||
if (file->stats.records == 0)
|
||||
{
|
||||
error= HA_ERR_END_OF_FILE;
|
||||
break;
|
||||
}
|
||||
error= file->index_last(rec_buf_ptr);
|
||||
reverse_order= TRUE;
|
||||
break;
|
||||
|
|
|
@ -2165,7 +2165,12 @@ prev_insert_id(ulonglong nr, struct system_variables *variables)
|
|||
- In both cases, the reserved intervals are remembered in
|
||||
thd->auto_inc_intervals_in_cur_stmt_for_binlog if statement-based
|
||||
binlogging; the last reserved interval is remembered in
|
||||
auto_inc_interval_for_cur_row.
|
||||
auto_inc_interval_for_cur_row. The number of reserved intervals is
|
||||
remembered in auto_inc_intervals_count. It differs from the number of
|
||||
elements in thd->auto_inc_intervals_in_cur_stmt_for_binlog() because the
|
||||
latter list is cumulative over all statements forming one binlog event
|
||||
(when stored functions and triggers are used), and collapses two
|
||||
contiguous intervals in one (see its append() method).
|
||||
|
||||
The idea is that generated auto_increment values are predictable and
|
||||
independent of the column values in the table. This is needed to be
|
||||
|
@ -2249,8 +2254,6 @@ int handler::update_auto_increment()
|
|||
handler::estimation_rows_to_insert was set by
|
||||
handler::ha_start_bulk_insert(); if 0 it means "unknown".
|
||||
*/
|
||||
uint nb_already_reserved_intervals=
|
||||
thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements();
|
||||
ulonglong nb_desired_values;
|
||||
/*
|
||||
If an estimation was given to the engine:
|
||||
|
@ -2262,17 +2265,17 @@ int handler::update_auto_increment()
|
|||
start, starting from AUTO_INC_DEFAULT_NB_ROWS.
|
||||
Don't go beyond a max to not reserve "way too much" (because
|
||||
reservation means potentially losing unused values).
|
||||
Note that in prelocked mode no estimation is given.
|
||||
*/
|
||||
if (nb_already_reserved_intervals == 0 &&
|
||||
(estimation_rows_to_insert > 0))
|
||||
if ((auto_inc_intervals_count == 0) && (estimation_rows_to_insert > 0))
|
||||
nb_desired_values= estimation_rows_to_insert;
|
||||
else /* go with the increasing defaults */
|
||||
{
|
||||
/* avoid overflow in formula, with this if() */
|
||||
if (nb_already_reserved_intervals <= AUTO_INC_DEFAULT_NB_MAX_BITS)
|
||||
if (auto_inc_intervals_count <= AUTO_INC_DEFAULT_NB_MAX_BITS)
|
||||
{
|
||||
nb_desired_values= AUTO_INC_DEFAULT_NB_ROWS *
|
||||
(1 << nb_already_reserved_intervals);
|
||||
nb_desired_values= AUTO_INC_DEFAULT_NB_ROWS *
|
||||
(1 << auto_inc_intervals_count);
|
||||
set_if_smaller(nb_desired_values, AUTO_INC_DEFAULT_NB_MAX);
|
||||
}
|
||||
else
|
||||
|
@ -2285,7 +2288,7 @@ int handler::update_auto_increment()
|
|||
&nb_reserved_values);
|
||||
if (nr == ~(ulonglong) 0)
|
||||
DBUG_RETURN(HA_ERR_AUTOINC_READ_FAILED); // Mark failure
|
||||
|
||||
|
||||
/*
|
||||
That rounding below should not be needed when all engines actually
|
||||
respect offset and increment in get_auto_increment(). But they don't
|
||||
|
@ -2296,7 +2299,7 @@ int handler::update_auto_increment()
|
|||
*/
|
||||
nr= compute_next_insert_id(nr-1, variables);
|
||||
}
|
||||
|
||||
|
||||
if (table->s->next_number_keypart == 0)
|
||||
{
|
||||
/* We must defer the appending until "nr" has been possibly truncated */
|
||||
|
@ -2340,8 +2343,9 @@ int handler::update_auto_increment()
|
|||
{
|
||||
auto_inc_interval_for_cur_row.replace(nr, nb_reserved_values,
|
||||
variables->auto_increment_increment);
|
||||
auto_inc_intervals_count++;
|
||||
/* Row-based replication does not need to store intervals in binlog */
|
||||
if (!thd->current_stmt_binlog_row_based)
|
||||
if (mysql_bin_log.is_open() && !thd->current_stmt_binlog_row_based)
|
||||
thd->auto_inc_intervals_in_cur_stmt_for_binlog.append(auto_inc_interval_for_cur_row.minimum(),
|
||||
auto_inc_interval_for_cur_row.values(),
|
||||
variables->auto_increment_increment);
|
||||
|
@ -2461,6 +2465,7 @@ void handler::ha_release_auto_increment()
|
|||
release_auto_increment();
|
||||
insert_id_for_cur_row= 0;
|
||||
auto_inc_interval_for_cur_row.replace(0, 0, 0);
|
||||
auto_inc_intervals_count= 0;
|
||||
if (next_insert_id > 0)
|
||||
{
|
||||
next_insert_id= 0;
|
||||
|
|
|
@ -1129,6 +1129,13 @@ public:
|
|||
inserter.
|
||||
*/
|
||||
Discrete_interval auto_inc_interval_for_cur_row;
|
||||
/**
|
||||
Number of reserved auto-increment intervals. Serves as a heuristic
|
||||
when we have no estimation of how many records the statement will insert:
|
||||
the more intervals we have reserved, the bigger the next one. Reset in
|
||||
handler::ha_release_auto_increment().
|
||||
*/
|
||||
uint auto_inc_intervals_count;
|
||||
|
||||
handler(handlerton *ht_arg, TABLE_SHARE *share_arg)
|
||||
:table_share(share_arg), table(0),
|
||||
|
@ -1137,7 +1144,8 @@ public:
|
|||
ref_length(sizeof(my_off_t)),
|
||||
ft_handler(0), inited(NONE),
|
||||
locked(FALSE), implicit_emptied(0),
|
||||
pushed_cond(0), next_insert_id(0), insert_id_for_cur_row(0)
|
||||
pushed_cond(0), next_insert_id(0), insert_id_for_cur_row(0),
|
||||
auto_inc_intervals_count(0)
|
||||
{}
|
||||
virtual ~handler(void)
|
||||
{
|
||||
|
|
|
@ -1338,6 +1338,7 @@ public:
|
|||
else
|
||||
Item_ident::print(str, query_type);
|
||||
}
|
||||
virtual Ref_Type ref_type() { return AGGREGATE_REF; }
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -2126,7 +2126,7 @@ class Item_ref :public Item_ident
|
|||
protected:
|
||||
void set_properties();
|
||||
public:
|
||||
enum Ref_Type { REF, DIRECT_REF, VIEW_REF, OUTER_REF };
|
||||
enum Ref_Type { REF, DIRECT_REF, VIEW_REF, OUTER_REF, AGGREGATE_REF };
|
||||
Field *result_field; /* Save result here */
|
||||
Item **ref;
|
||||
Item_ref(Name_resolution_context *context_arg,
|
||||
|
|
|
@ -4011,11 +4011,6 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
|
|||
DBUG_PRINT("info",("number of auto_inc intervals: %u",
|
||||
thd->auto_inc_intervals_in_cur_stmt_for_binlog.
|
||||
nb_elements()));
|
||||
/*
|
||||
If the auto_increment was second in a table's index (possible with
|
||||
MyISAM or BDB) (table->next_number_keypart != 0), such event is
|
||||
in fact not necessary. We could avoid logging it.
|
||||
*/
|
||||
Intvar_log_event e(thd, (uchar) INSERT_ID_EVENT,
|
||||
thd->auto_inc_intervals_in_cur_stmt_for_binlog.
|
||||
minimum());
|
||||
|
|
|
@ -8613,10 +8613,10 @@ int Rows_log_event::find_row(const Relay_log_info *rli)
|
|||
the necessary bits on the bytes and don't set the filler bits
|
||||
correctly.
|
||||
*/
|
||||
my_ptrdiff_t const pos=
|
||||
table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
|
||||
table->record[0][pos]= 0xFF;
|
||||
|
||||
if (table->s->null_bytes > 0)
|
||||
table->record[0][table->s->null_bytes - 1]|=
|
||||
256U - (1U << table->s->last_null_bit_pos);
|
||||
|
||||
if ((error= table->file->index_read_map(table->record[0], m_key,
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT)))
|
||||
|
|
|
@ -7537,13 +7537,13 @@ static void mysql_init_variables(void)
|
|||
have_community_features = SHOW_OPTION_YES;
|
||||
#else
|
||||
have_community_features = SHOW_OPTION_NO;
|
||||
#endif
|
||||
global_system_variables.ndb_index_stat_enable=FALSE;
|
||||
max_system_variables.ndb_index_stat_enable=TRUE;
|
||||
global_system_variables.ndb_index_stat_cache_entries=32;
|
||||
max_system_variables.ndb_index_stat_cache_entries=~0L;
|
||||
global_system_variables.ndb_index_stat_update_freq=20;
|
||||
max_system_variables.ndb_index_stat_update_freq=~0L;
|
||||
#endif
|
||||
#ifdef HAVE_OPENSSL
|
||||
have_ssl=SHOW_OPTION_YES;
|
||||
#else
|
||||
|
|
|
@ -3145,10 +3145,12 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
|
|||
ppar->subpart_fields););
|
||||
/* Find the subpartition (it's HASH/KEY so we always have one) */
|
||||
partition_info *part_info= ppar->part_info;
|
||||
uint32 subpart_id= part_info->get_subpartition_id(part_info);
|
||||
|
||||
uint32 part_id, subpart_id;
|
||||
|
||||
if (part_info->get_subpartition_id(part_info, &subpart_id))
|
||||
return 0;
|
||||
|
||||
/* Mark this partition as used in each subpartition. */
|
||||
uint32 part_id;
|
||||
while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) !=
|
||||
NOT_A_PARTITION_ID)
|
||||
{
|
||||
|
|
|
@ -25,8 +25,9 @@ class partition_info;
|
|||
typedef int (*get_part_id_func)(partition_info *part_info,
|
||||
uint32 *part_id,
|
||||
longlong *func_value);
|
||||
typedef uint32 (*get_subpart_id_func)(partition_info *part_info);
|
||||
|
||||
typedef int (*get_subpart_id_func)(partition_info *part_info,
|
||||
uint32 *part_id);
|
||||
|
||||
struct st_ddl_log_memory_entry;
|
||||
|
||||
class partition_info : public Sql_alloc
|
||||
|
|
|
@ -1524,6 +1524,9 @@ public:
|
|||
then the latter INSERT will insert no rows
|
||||
(first_successful_insert_id_in_cur_stmt == 0), but storing "INSERT_ID=3"
|
||||
in the binlog is still needed; the list's minimum will contain 3.
|
||||
This variable is cumulative: if several statements are written to binlog
|
||||
as one (stored functions or triggers are used) this list is the
|
||||
concatenation of all intervals reserved by all statements.
|
||||
*/
|
||||
Discrete_intervals_list auto_inc_intervals_in_cur_stmt_for_binlog;
|
||||
/* Used by replication and SET INSERT_ID */
|
||||
|
|
|
@ -73,10 +73,8 @@ static int get_part_id_charset_func_subpart(partition_info *part_info,
|
|||
static int get_part_part_id_charset_func(partition_info *part_info,
|
||||
uint32 *part_id,
|
||||
longlong *func_value);
|
||||
static uint32 get_subpart_id_charset_func(partition_info *part_info);
|
||||
int get_partition_id_list(partition_info *part_info,
|
||||
uint32 *part_id,
|
||||
longlong *func_value);
|
||||
static int get_subpart_id_charset_func(partition_info *part_info,
|
||||
uint32 *part_id);
|
||||
int get_partition_id_list(partition_info *part_info,
|
||||
uint32 *part_id,
|
||||
longlong *func_value);
|
||||
|
@ -119,10 +117,14 @@ int get_partition_id_list_sub_linear_hash(partition_info *part_info,
|
|||
int get_partition_id_list_sub_linear_key(partition_info *part_info,
|
||||
uint32 *part_id,
|
||||
longlong *func_value);
|
||||
uint32 get_partition_id_hash_sub(partition_info *part_info);
|
||||
uint32 get_partition_id_key_sub(partition_info *part_info);
|
||||
uint32 get_partition_id_linear_hash_sub(partition_info *part_info);
|
||||
uint32 get_partition_id_linear_key_sub(partition_info *part_info);
|
||||
int get_partition_id_hash_sub(partition_info *part_info,
|
||||
uint32 *part_id);
|
||||
int get_partition_id_key_sub(partition_info *part_info,
|
||||
uint32 *part_id);
|
||||
int get_partition_id_linear_hash_sub(partition_info *part_info,
|
||||
uint32 *part_id);
|
||||
int get_partition_id_linear_key_sub(partition_info *part_info,
|
||||
uint32 *part_id);
|
||||
static uint32 get_next_partition_via_walking(PARTITION_ITERATOR*);
|
||||
static void set_up_range_analysis_info(partition_info *part_info);
|
||||
static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR*);
|
||||
|
@ -2232,17 +2234,24 @@ bool partition_key_modified(TABLE *table, const MY_BITMAP *fields)
|
|||
SYNOPSIS
|
||||
part_val_int()
|
||||
item_expr The item expression to evaluate
|
||||
out:result The value of the partition function,
|
||||
LONGLONG_MIN if any null value in function
|
||||
RETURN VALUES
|
||||
The value of the partition function, LONGLONG_MIN if any null value
|
||||
in function
|
||||
TRUE Error in val_int()
|
||||
FALSE ok
|
||||
*/
|
||||
|
||||
static inline longlong part_val_int(Item *item_expr)
|
||||
static inline int part_val_int(Item *item_expr, longlong *result)
|
||||
{
|
||||
longlong value= item_expr->val_int();
|
||||
*result= item_expr->val_int();
|
||||
if (item_expr->null_value)
|
||||
value= LONGLONG_MIN;
|
||||
return value;
|
||||
{
|
||||
if (current_thd->is_error())
|
||||
return TRUE;
|
||||
else
|
||||
*result= LONGLONG_MIN;
|
||||
}
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
|
||||
|
@ -2319,24 +2328,29 @@ static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id,
|
|||
get_part_id_hash()
|
||||
no_parts Number of hash partitions
|
||||
part_expr Item tree of hash function
|
||||
out:func_value Value of hash function
|
||||
out:part_id The returned partition id
|
||||
out:func_value Value of hash function
|
||||
|
||||
RETURN VALUE
|
||||
Calculated partition id
|
||||
!= 0 Error code
|
||||
FALSE Success
|
||||
*/
|
||||
|
||||
inline
|
||||
static uint32 get_part_id_hash(uint no_parts,
|
||||
Item *part_expr,
|
||||
longlong *func_value)
|
||||
static int get_part_id_hash(uint no_parts,
|
||||
Item *part_expr,
|
||||
uint32 *part_id,
|
||||
longlong *func_value)
|
||||
{
|
||||
longlong int_hash_id;
|
||||
DBUG_ENTER("get_part_id_hash");
|
||||
|
||||
*func_value= part_val_int(part_expr);
|
||||
if (part_val_int(part_expr, func_value))
|
||||
DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
|
||||
|
||||
int_hash_id= *func_value % no_parts;
|
||||
|
||||
DBUG_RETURN(int_hash_id < 0 ? (uint32) -int_hash_id : (uint32) int_hash_id);
|
||||
*part_id= int_hash_id < 0 ? (uint32) -int_hash_id : (uint32) int_hash_id;
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2349,24 +2363,29 @@ static uint32 get_part_id_hash(uint no_parts,
|
|||
desired information is given
|
||||
no_parts Number of hash partitions
|
||||
part_expr Item tree of hash function
|
||||
out:part_id The returned partition id
|
||||
out:func_value Value of hash function
|
||||
|
||||
RETURN VALUE
|
||||
Calculated partition id
|
||||
!= 0 Error code
|
||||
0 OK
|
||||
*/
|
||||
|
||||
inline
|
||||
static uint32 get_part_id_linear_hash(partition_info *part_info,
|
||||
uint no_parts,
|
||||
Item *part_expr,
|
||||
longlong *func_value)
|
||||
static int get_part_id_linear_hash(partition_info *part_info,
|
||||
uint no_parts,
|
||||
Item *part_expr,
|
||||
uint32 *part_id,
|
||||
longlong *func_value)
|
||||
{
|
||||
DBUG_ENTER("get_part_id_linear_hash");
|
||||
|
||||
*func_value= part_val_int(part_expr);
|
||||
DBUG_RETURN(get_part_id_from_linear_hash(*func_value,
|
||||
part_info->linear_hash_mask,
|
||||
no_parts));
|
||||
if (part_val_int(part_expr, func_value))
|
||||
DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
|
||||
|
||||
*part_id= get_part_id_from_linear_hash(*func_value,
|
||||
part_info->linear_hash_mask,
|
||||
no_parts);
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2503,49 +2522,7 @@ static void restore_part_field_pointers(Field **ptr, uchar **restore_ptr)
|
|||
}
|
||||
return;
|
||||
}
|
||||
/*
|
||||
This function is used to calculate the partition id where all partition
|
||||
fields have been prepared to point to a record where the partition field
|
||||
values are bound.
|
||||
|
||||
SYNOPSIS
|
||||
get_partition_id()
|
||||
part_info A reference to the partition_info struct where all the
|
||||
desired information is given
|
||||
out:part_id The partition id is returned through this pointer
|
||||
out: func_value Value of partition function (longlong)
|
||||
|
||||
RETURN VALUE
|
||||
part_id Partition id of partition that would contain
|
||||
row with given values of PF-fields
|
||||
HA_ERR_NO_PARTITION_FOUND The fields of the partition function didn't
|
||||
fit into any partition and thus the values of
|
||||
the PF-fields are not allowed.
|
||||
|
||||
DESCRIPTION
|
||||
A routine used from write_row, update_row and delete_row from any
|
||||
handler supporting partitioning. It is also a support routine for
|
||||
get_partition_set used to find the set of partitions needed to scan
|
||||
for a certain index scan or full table scan.
|
||||
|
||||
It is actually 14 different variants of this function which are called
|
||||
through a function pointer.
|
||||
|
||||
get_partition_id_list
|
||||
get_partition_id_range
|
||||
get_partition_id_hash_nosub
|
||||
get_partition_id_key_nosub
|
||||
get_partition_id_linear_hash_nosub
|
||||
get_partition_id_linear_key_nosub
|
||||
get_partition_id_range_sub_hash
|
||||
get_partition_id_range_sub_key
|
||||
get_partition_id_range_sub_linear_hash
|
||||
get_partition_id_range_sub_linear_key
|
||||
get_partition_id_list_sub_hash
|
||||
get_partition_id_list_sub_key
|
||||
get_partition_id_list_sub_linear_hash
|
||||
get_partition_id_list_sub_linear_key
|
||||
*/
|
||||
|
||||
/*
|
||||
This function is used to calculate the main partition to use in the case of
|
||||
|
@ -2557,14 +2534,13 @@ static void restore_part_field_pointers(Field **ptr, uchar **restore_ptr)
|
|||
part_info A reference to the partition_info struct where all the
|
||||
desired information is given
|
||||
out:part_id The partition id is returned through this pointer
|
||||
out: func_value The value calculated by partition function
|
||||
out:func_value The value calculated by partition function
|
||||
|
||||
RETURN VALUE
|
||||
part_id Partition id of partition that would contain
|
||||
row with given values of PF-fields
|
||||
HA_ERR_NO_PARTITION_FOUND The fields of the partition function didn't
|
||||
fit into any partition and thus the values of
|
||||
the PF-fields are not allowed.
|
||||
0 OK
|
||||
|
||||
DESCRIPTION
|
||||
|
||||
|
@ -2640,13 +2616,14 @@ static int get_part_part_id_charset_func(partition_info *part_info,
|
|||
}
|
||||
|
||||
|
||||
static uint32 get_subpart_id_charset_func(partition_info *part_info)
|
||||
static int get_subpart_id_charset_func(partition_info *part_info,
|
||||
uint32 *part_id)
|
||||
{
|
||||
int res;
|
||||
copy_to_part_field_buffers(part_info->subpart_charset_field_array,
|
||||
part_info->subpart_field_buffers,
|
||||
part_info->restore_subpart_field_ptrs);
|
||||
res= part_info->get_subpartition_id_charset(part_info);
|
||||
res= part_info->get_subpartition_id_charset(part_info, part_id);
|
||||
restore_part_field_pointers(part_info->subpart_charset_field_array,
|
||||
part_info->restore_subpart_field_ptrs);
|
||||
return res;
|
||||
|
@ -2661,11 +2638,15 @@ int get_partition_id_list(partition_info *part_info,
|
|||
int list_index;
|
||||
int min_list_index= 0;
|
||||
int max_list_index= part_info->no_list_values - 1;
|
||||
longlong part_func_value= part_val_int(part_info->part_expr);
|
||||
longlong part_func_value;
|
||||
int error= part_val_int(part_info->part_expr, &part_func_value);
|
||||
longlong list_value;
|
||||
bool unsigned_flag= part_info->part_expr->unsigned_flag;
|
||||
DBUG_ENTER("get_partition_id_list");
|
||||
|
||||
if (error)
|
||||
goto notfound;
|
||||
|
||||
if (part_info->part_expr->null_value)
|
||||
{
|
||||
if (part_info->has_null_value)
|
||||
|
@ -2809,10 +2790,14 @@ int get_partition_id_range(partition_info *part_info,
|
|||
uint min_part_id= 0;
|
||||
uint max_part_id= max_partition;
|
||||
uint loc_part_id;
|
||||
longlong part_func_value= part_val_int(part_info->part_expr);
|
||||
longlong part_func_value;
|
||||
int error= part_val_int(part_info->part_expr, &part_func_value);
|
||||
bool unsigned_flag= part_info->part_expr->unsigned_flag;
|
||||
DBUG_ENTER("get_partition_id_range");
|
||||
|
||||
if (error)
|
||||
DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
|
||||
|
||||
if (part_info->part_expr->null_value)
|
||||
{
|
||||
*part_id= 0;
|
||||
|
@ -2970,9 +2955,8 @@ int get_partition_id_hash_nosub(partition_info *part_info,
|
|||
uint32 *part_id,
|
||||
longlong *func_value)
|
||||
{
|
||||
*part_id= get_part_id_hash(part_info->no_parts, part_info->part_expr,
|
||||
func_value);
|
||||
return 0;
|
||||
return get_part_id_hash(part_info->no_parts, part_info->part_expr,
|
||||
part_id, func_value);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2980,9 +2964,8 @@ int get_partition_id_linear_hash_nosub(partition_info *part_info,
|
|||
uint32 *part_id,
|
||||
longlong *func_value)
|
||||
{
|
||||
*part_id= get_part_id_linear_hash(part_info, part_info->no_parts,
|
||||
part_info->part_expr, func_value);
|
||||
return 0;
|
||||
return get_part_id_linear_hash(part_info, part_info->no_parts,
|
||||
part_info->part_expr, part_id, func_value);
|
||||
}
|
||||
|
||||
|
||||
|
@ -3016,6 +2999,8 @@ int get_partition_id_range_sub_hash(partition_info *part_info,
|
|||
longlong local_func_value;
|
||||
int error;
|
||||
DBUG_ENTER("get_partition_id_range_sub_hash");
|
||||
LINT_INIT(loc_part_id);
|
||||
LINT_INIT(sub_part_id);
|
||||
|
||||
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
|
||||
func_value))))
|
||||
|
@ -3023,8 +3008,12 @@ int get_partition_id_range_sub_hash(partition_info *part_info,
|
|||
DBUG_RETURN(error);
|
||||
}
|
||||
no_subparts= part_info->no_subparts;
|
||||
sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr,
|
||||
&local_func_value);
|
||||
if (unlikely((error= get_part_id_hash(no_subparts, part_info->subpart_expr,
|
||||
&sub_part_id, &local_func_value))))
|
||||
{
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
@ -3039,6 +3028,8 @@ int get_partition_id_range_sub_linear_hash(partition_info *part_info,
|
|||
longlong local_func_value;
|
||||
int error;
|
||||
DBUG_ENTER("get_partition_id_range_sub_linear_hash");
|
||||
LINT_INIT(loc_part_id);
|
||||
LINT_INIT(sub_part_id);
|
||||
|
||||
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
|
||||
func_value))))
|
||||
|
@ -3046,9 +3037,14 @@ int get_partition_id_range_sub_linear_hash(partition_info *part_info,
|
|||
DBUG_RETURN(error);
|
||||
}
|
||||
no_subparts= part_info->no_subparts;
|
||||
sub_part_id= get_part_id_linear_hash(part_info, no_subparts,
|
||||
part_info->subpart_expr,
|
||||
&local_func_value);
|
||||
if (unlikely((error= get_part_id_linear_hash(part_info, no_subparts,
|
||||
part_info->subpart_expr,
|
||||
&sub_part_id,
|
||||
&local_func_value))))
|
||||
{
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
@ -3063,6 +3059,7 @@ int get_partition_id_range_sub_key(partition_info *part_info,
|
|||
longlong local_func_value;
|
||||
int error;
|
||||
DBUG_ENTER("get_partition_id_range_sub_key");
|
||||
LINT_INIT(loc_part_id);
|
||||
|
||||
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
|
||||
func_value))))
|
||||
|
@ -3086,6 +3083,7 @@ int get_partition_id_range_sub_linear_key(partition_info *part_info,
|
|||
longlong local_func_value;
|
||||
int error;
|
||||
DBUG_ENTER("get_partition_id_range_sub_linear_key");
|
||||
LINT_INIT(loc_part_id);
|
||||
|
||||
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
|
||||
func_value))))
|
||||
|
@ -3110,6 +3108,7 @@ int get_partition_id_list_sub_hash(partition_info *part_info,
|
|||
longlong local_func_value;
|
||||
int error;
|
||||
DBUG_ENTER("get_partition_id_list_sub_hash");
|
||||
LINT_INIT(sub_part_id);
|
||||
|
||||
if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
|
||||
func_value))))
|
||||
|
@ -3117,8 +3116,12 @@ int get_partition_id_list_sub_hash(partition_info *part_info,
|
|||
DBUG_RETURN(error);
|
||||
}
|
||||
no_subparts= part_info->no_subparts;
|
||||
sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr,
|
||||
&local_func_value);
|
||||
if (unlikely((error= get_part_id_hash(no_subparts, part_info->subpart_expr,
|
||||
&sub_part_id, &local_func_value))))
|
||||
{
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
@ -3133,6 +3136,7 @@ int get_partition_id_list_sub_linear_hash(partition_info *part_info,
|
|||
longlong local_func_value;
|
||||
int error;
|
||||
DBUG_ENTER("get_partition_id_list_sub_linear_hash");
|
||||
LINT_INIT(sub_part_id);
|
||||
|
||||
if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
|
||||
func_value))))
|
||||
|
@ -3140,9 +3144,14 @@ int get_partition_id_list_sub_linear_hash(partition_info *part_info,
|
|||
DBUG_RETURN(error);
|
||||
}
|
||||
no_subparts= part_info->no_subparts;
|
||||
sub_part_id= get_part_id_linear_hash(part_info, no_subparts,
|
||||
part_info->subpart_expr,
|
||||
&local_func_value);
|
||||
if (unlikely((error= get_part_id_linear_hash(part_info, no_subparts,
|
||||
part_info->subpart_expr,
|
||||
&sub_part_id,
|
||||
&local_func_value))))
|
||||
{
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
@ -3219,36 +3228,43 @@ int get_partition_id_list_sub_linear_key(partition_info *part_info,
|
|||
get_partition_id_linear_key_sub
|
||||
*/
|
||||
|
||||
uint32 get_partition_id_hash_sub(partition_info *part_info)
|
||||
int get_partition_id_hash_sub(partition_info *part_info,
|
||||
uint32 *part_id)
|
||||
{
|
||||
longlong func_value;
|
||||
return get_part_id_hash(part_info->no_subparts, part_info->subpart_expr,
|
||||
&func_value);
|
||||
part_id, &func_value);
|
||||
}
|
||||
|
||||
|
||||
uint32 get_partition_id_linear_hash_sub(partition_info *part_info)
|
||||
int get_partition_id_linear_hash_sub(partition_info *part_info,
|
||||
uint32 *part_id)
|
||||
{
|
||||
longlong func_value;
|
||||
return get_part_id_linear_hash(part_info, part_info->no_subparts,
|
||||
part_info->subpart_expr, &func_value);
|
||||
part_info->subpart_expr, part_id,
|
||||
&func_value);
|
||||
}
|
||||
|
||||
|
||||
uint32 get_partition_id_key_sub(partition_info *part_info)
|
||||
int get_partition_id_key_sub(partition_info *part_info,
|
||||
uint32 *part_id)
|
||||
{
|
||||
longlong func_value;
|
||||
return get_part_id_key(part_info->subpart_field_array,
|
||||
part_info->no_subparts, &func_value);
|
||||
*part_id= get_part_id_key(part_info->subpart_field_array,
|
||||
part_info->no_subparts, &func_value);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
|
||||
uint32 get_partition_id_linear_key_sub(partition_info *part_info)
|
||||
int get_partition_id_linear_key_sub(partition_info *part_info,
|
||||
uint32 *part_id)
|
||||
{
|
||||
longlong func_value;
|
||||
return get_part_id_linear_key(part_info,
|
||||
part_info->subpart_field_array,
|
||||
part_info->no_subparts, &func_value);
|
||||
*part_id= get_part_id_linear_key(part_info,
|
||||
part_info->subpart_field_array,
|
||||
part_info->no_subparts, &func_value);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
|
||||
|
@ -3337,37 +3353,40 @@ static bool check_part_func_bound(Field **ptr)
|
|||
buf A buffer that can be used to evaluate the partition function
|
||||
key_info The index object
|
||||
key_spec A key_range containing key and key length
|
||||
out:part_id The returned partition id
|
||||
|
||||
RETURN VALUES
|
||||
part_id Subpartition id to use
|
||||
TRUE All fields in partition function are set
|
||||
FALSE Not all fields in partition function are set
|
||||
|
||||
DESCRIPTION
|
||||
Use key buffer to set-up record in buf, move field pointers and
|
||||
get the partition identity and restore field pointers afterwards.
|
||||
*/
|
||||
|
||||
static uint32 get_sub_part_id_from_key(const TABLE *table,uchar *buf,
|
||||
KEY *key_info,
|
||||
const key_range *key_spec)
|
||||
static int get_sub_part_id_from_key(const TABLE *table,uchar *buf,
|
||||
KEY *key_info,
|
||||
const key_range *key_spec,
|
||||
uint32 *part_id)
|
||||
{
|
||||
uchar *rec0= table->record[0];
|
||||
partition_info *part_info= table->part_info;
|
||||
uint32 part_id;
|
||||
int res;
|
||||
DBUG_ENTER("get_sub_part_id_from_key");
|
||||
|
||||
key_restore(buf, (uchar*)key_spec->key, key_info, key_spec->length);
|
||||
if (likely(rec0 == buf))
|
||||
{
|
||||
part_id= part_info->get_subpartition_id(part_info);
|
||||
res= part_info->get_subpartition_id(part_info, part_id);
|
||||
}
|
||||
else
|
||||
{
|
||||
Field **part_field_array= part_info->subpart_field_array;
|
||||
set_field_ptr(part_field_array, buf, rec0);
|
||||
part_id= part_info->get_subpartition_id(part_info);
|
||||
res= part_info->get_subpartition_id(part_info, part_id);
|
||||
set_field_ptr(part_field_array, rec0, buf);
|
||||
}
|
||||
DBUG_RETURN(part_id);
|
||||
DBUG_RETURN(res);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3586,7 +3605,13 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
|
|||
else if (part_info->is_sub_partitioned())
|
||||
{
|
||||
if (part_info->all_fields_in_SPF.is_set(index))
|
||||
sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
|
||||
{
|
||||
if (get_sub_part_id_from_key(table, buf, key_info, key_spec, &sub_part))
|
||||
{
|
||||
part_spec->start_part= no_parts;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
}
|
||||
else if (part_info->all_fields_in_PPF.is_set(index))
|
||||
{
|
||||
if (get_part_id_from_key(table,buf,key_info,
|
||||
|
@ -3632,7 +3657,14 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
|
|||
else if (part_info->is_sub_partitioned())
|
||||
{
|
||||
if (check_part_func_bound(part_info->subpart_field_array))
|
||||
sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
|
||||
{
|
||||
if (get_sub_part_id_from_key(table, buf, key_info, key_spec, &sub_part))
|
||||
{
|
||||
part_spec->start_part= no_parts;
|
||||
clear_indicator_in_key_fields(key_info);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
}
|
||||
else if (check_part_func_bound(part_info->part_field_array))
|
||||
{
|
||||
if (get_part_id_from_key(table,buf,key_info,key_spec,&part_part))
|
||||
|
@ -6836,9 +6868,11 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
|
|||
field->set_null();
|
||||
if (is_subpart)
|
||||
{
|
||||
part_id= part_info->get_subpartition_id(part_info);
|
||||
init_single_partition_iterator(part_id, part_iter);
|
||||
return 1; /* Ok, iterator initialized */
|
||||
if (!part_info->get_subpartition_id(part_info, &part_id))
|
||||
{
|
||||
init_single_partition_iterator(part_id, part_iter);
|
||||
return 1; /* Ok, iterator initialized */
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -7007,13 +7041,18 @@ static uint32 get_next_partition_via_walking(PARTITION_ITERATOR *part_iter)
|
|||
static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR *part_iter)
|
||||
{
|
||||
Field *field= part_iter->part_info->subpart_field_array[0];
|
||||
uint32 res;
|
||||
if (part_iter->field_vals.cur == part_iter->field_vals.end)
|
||||
{
|
||||
part_iter->field_vals.cur= part_iter->field_vals.start;
|
||||
return NOT_A_PARTITION_ID;
|
||||
}
|
||||
field->store(part_iter->field_vals.cur++, FALSE);
|
||||
return part_iter->part_info->get_subpartition_id(part_iter->part_info);
|
||||
if (part_iter->part_info->get_subpartition_id(part_iter->part_info,
|
||||
&res))
|
||||
return NOT_A_PARTITION_ID;
|
||||
return res;
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -14804,6 +14804,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
|
|||
Item *pos;
|
||||
List_iterator_fast<Item> li(all_fields);
|
||||
Copy_field *copy= NULL;
|
||||
IF_DBUG(Copy_field *copy_start);
|
||||
res_selected_fields.empty();
|
||||
res_all_fields.empty();
|
||||
List_iterator_fast<Item> itr(res_all_fields);
|
||||
|
@ -14816,12 +14817,19 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
|
|||
goto err2;
|
||||
|
||||
param->copy_funcs.empty();
|
||||
IF_DBUG(copy_start= copy);
|
||||
for (i= 0; (pos= li++); i++)
|
||||
{
|
||||
Field *field;
|
||||
uchar *tmp;
|
||||
Item *real_pos= pos->real_item();
|
||||
if (real_pos->type() == Item::FIELD_ITEM)
|
||||
/*
|
||||
Aggregate functions can be substituted for fields (by e.g. temp tables).
|
||||
We need to filter those substituted fields out.
|
||||
*/
|
||||
if (real_pos->type() == Item::FIELD_ITEM &&
|
||||
!(real_pos != pos &&
|
||||
((Item_ref *)pos)->ref_type() == Item_ref::AGGREGATE_REF))
|
||||
{
|
||||
Item_field *item;
|
||||
if (!(item= new Item_field(thd, ((Item_field*) real_pos))))
|
||||
|
@ -14868,6 +14876,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
|
|||
goto err;
|
||||
if (copy)
|
||||
{
|
||||
DBUG_ASSERT (param->field_count > (uint) (copy - copy_start));
|
||||
copy->set(tmp, item->result_field);
|
||||
item->result_field->move_field(copy->to_ptr,copy->to_null_ptr,1);
|
||||
#ifdef HAVE_purify
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#pragma implementation // gcc: Class implementation
|
||||
#endif
|
||||
|
||||
#define MYSQL_SERVER 1
|
||||
#include "mysql_priv.h"
|
||||
#include "ha_blackhole.h"
|
||||
|
||||
|
@ -100,6 +101,24 @@ int ha_blackhole::write_row(uchar * buf)
|
|||
DBUG_RETURN(table->next_number_field ? update_auto_increment() : 0);
|
||||
}
|
||||
|
||||
int ha_blackhole::update_row(const uchar *old_data, uchar *new_data)
|
||||
{
|
||||
DBUG_ENTER("ha_blackhole::update_row");
|
||||
THD *thd= ha_thd();
|
||||
if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query == NULL)
|
||||
DBUG_RETURN(0);
|
||||
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
|
||||
}
|
||||
|
||||
int ha_blackhole::delete_row(const uchar *buf)
|
||||
{
|
||||
DBUG_ENTER("ha_blackhole::delete_row");
|
||||
THD *thd= ha_thd();
|
||||
if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query == NULL)
|
||||
DBUG_RETURN(0);
|
||||
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
|
||||
}
|
||||
|
||||
int ha_blackhole::rnd_init(bool scan)
|
||||
{
|
||||
DBUG_ENTER("ha_blackhole::rnd_init");
|
||||
|
@ -110,6 +129,9 @@ int ha_blackhole::rnd_init(bool scan)
|
|||
int ha_blackhole::rnd_next(uchar *buf)
|
||||
{
|
||||
DBUG_ENTER("ha_blackhole::rnd_next");
|
||||
THD *thd= ha_thd();
|
||||
if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query == NULL)
|
||||
DBUG_RETURN(0);
|
||||
DBUG_RETURN(HA_ERR_END_OF_FILE);
|
||||
}
|
||||
|
||||
|
@ -189,6 +211,9 @@ int ha_blackhole::index_read_map(uchar * buf, const uchar * key,
|
|||
enum ha_rkey_function find_flag)
|
||||
{
|
||||
DBUG_ENTER("ha_blackhole::index_read");
|
||||
THD *thd= ha_thd();
|
||||
if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query == NULL)
|
||||
DBUG_RETURN(0);
|
||||
DBUG_RETURN(HA_ERR_END_OF_FILE);
|
||||
}
|
||||
|
||||
|
@ -198,6 +223,9 @@ int ha_blackhole::index_read_idx_map(uchar * buf, uint idx, const uchar * key,
|
|||
enum ha_rkey_function find_flag)
|
||||
{
|
||||
DBUG_ENTER("ha_blackhole::index_read_idx");
|
||||
THD *thd= ha_thd();
|
||||
if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query == NULL)
|
||||
DBUG_RETURN(0);
|
||||
DBUG_RETURN(HA_ERR_END_OF_FILE);
|
||||
}
|
||||
|
||||
|
@ -206,6 +234,9 @@ int ha_blackhole::index_read_last_map(uchar * buf, const uchar * key,
|
|||
key_part_map keypart_map)
|
||||
{
|
||||
DBUG_ENTER("ha_blackhole::index_read_last");
|
||||
THD *thd= ha_thd();
|
||||
if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query == NULL)
|
||||
DBUG_RETURN(0);
|
||||
DBUG_RETURN(HA_ERR_END_OF_FILE);
|
||||
}
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ public:
|
|||
ulonglong table_flags() const
|
||||
{
|
||||
return(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
|
||||
HA_BINLOG_STMT_CAPABLE |
|
||||
HA_BINLOG_STMT_CAPABLE | HA_BINLOG_ROW_CAPABLE |
|
||||
HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
|
||||
HA_FILE_BASED | HA_CAN_GEOMETRY | HA_CAN_INSERT_DELAYED);
|
||||
}
|
||||
|
@ -72,7 +72,6 @@ public:
|
|||
uint max_supported_key_part_length() const { return BLACKHOLE_MAX_KEY_LENGTH; }
|
||||
int open(const char *name, int mode, uint test_if_locked);
|
||||
int close(void);
|
||||
int write_row(uchar * buf);
|
||||
int rnd_init(bool scan);
|
||||
int rnd_next(uchar *buf);
|
||||
int rnd_pos(uchar * buf, uchar *pos);
|
||||
|
@ -94,4 +93,8 @@ public:
|
|||
THR_LOCK_DATA **store_lock(THD *thd,
|
||||
THR_LOCK_DATA **to,
|
||||
enum thr_lock_type lock_type);
|
||||
private:
|
||||
virtual int write_row(uchar *buf);
|
||||
virtual int update_row(const uchar *old_data, uchar *new_data);
|
||||
virtual int delete_row(const uchar *buf);
|
||||
};
|
||||
|
|
Loading…
Reference in a new issue