This commit is contained in:
Ingo Struewing 2008-10-01 12:27:57 +02:00
commit 9a65aa3857
37 changed files with 1946 additions and 137 deletions

View file

@ -2396,8 +2396,15 @@ static uint get_table_structure(char *table, char *db, char *table_type,
fprintf(sql_file, ",\n %s %s",
quote_name(row[0], name_buff, 0), row[1]);
}
/*
Stand-in tables are always MyISAM tables as the default
engine might have a column-limit that's lower than the
number of columns in the view, and MyISAM support is
guaranteed to be in the server anyway.
*/
fprintf(sql_file,
"\n) */;\n"
"\n) ENGINE=MyISAM */;\n"
"SET character_set_client = @saved_cs_client;\n");
check_io(sql_file);

View file

@ -29,6 +29,14 @@ extern ulong locks_immediate,locks_waited ;
enum thr_lock_type { TL_IGNORE=-1,
TL_UNLOCK, /* UNLOCK ANY LOCK */
/*
Parser only! At open_tables() becomes TL_READ or
TL_READ_NO_INSERT depending on the binary log format
(SBR/RBR) and on the table category (log table).
Used for tables that are read by statements which
modify tables.
*/
TL_READ_DEFAULT,
TL_READ, /* Read lock */
TL_READ_WITH_SHARED_LOCKS,
/* High prior. than TL_WRITE. Allow concurrent insert */

View file

@ -90,4 +90,9 @@ Note 1276 Field or reference 'test.t2.a' of SELECT #2 was resolved in SELECT #1
Note 1276 Field or reference 'test.t2.a' of SELECT #2 was resolved in SELECT #1
Note 1003 select `test`.`t2`.`a` AS `a`,(select count(0) AS `COUNT(*)` from `test`.`t1` where ((`test`.`t1`.`b` = `test`.`t2`.`a`) and (concat(`test`.`t1`.`b`,`test`.`t1`.`c`) = concat('0',`test`.`t2`.`a`,'01')))) AS `x` from `test`.`t2` order by `test`.`t2`.`a`
DROP TABLE t1,t2;
CREATE TABLE t1 (a TIMESTAMP);
INSERT INTO t1 VALUES (NOW()),(NOW()),(NOW());
SELECT * FROM t1 WHERE a > '2008-01-01' AND a = '0000-00-00';
a
DROP TABLE t1;
End of 5.0 tests

View file

@ -328,4 +328,81 @@ create event
очень_очень_очень_очень_очень_очень_очень_очень_длинная_строка_66
on schedule every 2 year do select 1;
ERROR 42000: Identifier name 'очень_очень_очень_очень_очень_очень_очень_очень_длинна' is too long
create event event_35981 on schedule every 6 month on completion preserve
disable
do
select 1;
The following SELECTs should all give 1
select count(*) from information_schema.events
where event_schema = database() and event_name = 'event_35981' and
on_completion = 'PRESERVE';
count(*)
1
alter event event_35981 enable;
select count(*) from information_schema.events
where event_schema = database() and event_name = 'event_35981' and
on_completion = 'PRESERVE';
count(*)
1
alter event event_35981 on completion not preserve;
select count(*) from information_schema.events
where event_schema = database() and event_name = 'event_35981' and
on_completion = 'NOT PRESERVE';
count(*)
1
alter event event_35981 disable;
select count(*) from information_schema.events
where event_schema = database() and event_name = 'event_35981' and
on_completion = 'NOT PRESERVE';
count(*)
1
alter event event_35981 on completion preserve;
select count(*) from information_schema.events
where event_schema = database() and event_name = 'event_35981' and
on_completion = 'PRESERVE';
count(*)
1
drop event event_35981;
create event event_35981 on schedule every 6 month disable
do
select 1;
select count(*) from information_schema.events
where event_schema = database() and event_name = 'event_35981' and
on_completion = 'NOT PRESERVE';
count(*)
1
drop event event_35981;
create event event_35981 on schedule every 1 hour starts current_timestamp
on completion not preserve
do
select 1;
alter event event_35981 on schedule every 1 hour starts '1999-01-01 00:00:00'
ends '1999-01-02 00:00:00';
ERROR HY000: Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation.
drop event event_35981;
create event event_35981 on schedule every 1 hour starts current_timestamp
on completion not preserve
do
select 1;
alter event event_35981 on schedule every 1 hour starts '1999-01-01 00:00:00'
ends '1999-01-02 00:00:00' on completion preserve;
Warnings:
Note 1544 Event execution time is in the past. Event has been disabled
drop event event_35981;
create event event_35981 on schedule every 1 hour starts current_timestamp
on completion preserve
do
select 1;
alter event event_35981 on schedule every 1 hour starts '1999-01-01 00:00:00'
ends '1999-01-02 00:00:00';
Warnings:
Note 1544 Event execution time is in the past. Event has been disabled
alter event event_35981 on schedule every 1 hour starts '1999-01-01 00:00:00'
ends '1999-01-02 00:00:00' on completion not preserve;
ERROR HY000: Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation.
alter event event_35981 on schedule every 1 hour starts '1999-01-01 00:00:00'
ends '1999-01-02 00:00:00' on completion preserve;
Warnings:
Note 1544 Event execution time is in the past. Event has been disabled
drop event event_35981;
drop database events_test;

View file

@ -832,6 +832,35 @@ Execute select '000 001 002 003 004 005 006 007 008 009010 011 012 013 014 015 0
Query set global general_log = off
deallocate prepare long_query;
set global general_log = @old_general_log_state;
DROP TABLE IF EXISTS log_count;
DROP TABLE IF EXISTS slow_log_copy;
DROP TABLE IF EXISTS general_log_copy;
CREATE TABLE log_count (count BIGINT(21));
SET @old_general_log_state = @@global.general_log;
SET @old_slow_log_state = @@global.slow_query_log;
SET GLOBAL general_log = ON;
SET GLOBAL slow_query_log = ON;
CREATE TABLE slow_log_copy SELECT * FROM mysql.slow_log;
INSERT INTO slow_log_copy SELECT * FROM mysql.slow_log;
INSERT INTO log_count (count) VALUES ((SELECT count(*) FROM mysql.slow_log));
DROP TABLE slow_log_copy;
CREATE TABLE general_log_copy SELECT * FROM mysql.general_log;
INSERT INTO general_log_copy SELECT * FROM mysql.general_log;
INSERT INTO log_count (count) VALUES ((SELECT count(*) FROM mysql.general_log));
DROP TABLE general_log_copy;
SET GLOBAL general_log = OFF;
SET GLOBAL slow_query_log = OFF;
CREATE TABLE slow_log_copy SELECT * FROM mysql.slow_log;
INSERT INTO slow_log_copy SELECT * FROM mysql.slow_log;
INSERT INTO log_count (count) VALUES ((SELECT count(*) FROM mysql.slow_log));
DROP TABLE slow_log_copy;
CREATE TABLE general_log_copy SELECT * FROM mysql.general_log;
INSERT INTO general_log_copy SELECT * FROM mysql.general_log;
INSERT INTO log_count (count) VALUES ((SELECT count(*) FROM mysql.general_log));
DROP TABLE general_log_copy;
SET GLOBAL general_log = @old_general_log_state;
SET GLOBAL slow_query_log = @old_slow_log_state;
DROP TABLE log_count;
SET @old_slow_log_state = @@global.slow_query_log;
SET SESSION long_query_time = 0;
SET GLOBAL slow_query_log = ON;

View file

@ -277,3 +277,16 @@ drop table t3;
drop table t4;
drop table t5;
drop table t6;
SELECT @@global.storage_engine INTO @old_engine;
SET GLOBAL storage_engine=InnoDB;
CREATE VIEW v1 AS SELECT * FROM t1;
INSERT INTO t1 VALUES();
SELECT COUNT(*) FROM v1;
COUNT(*)
1
SELECT COUNT(*) FROM v1;
COUNT(*)
1
DROP VIEW v1;
DROP TABLE t1;
SET GLOBAL storage_engine=@old_engine;

View file

@ -2004,7 +2004,7 @@ SET @saved_cs_client = @@character_set_client;
SET character_set_client = utf8;
/*!50001 CREATE TABLE `v2` (
`a` varchar(30)
) */;
) ENGINE=MyISAM */;
SET character_set_client = @saved_cs_client;
/*!50001 DROP TABLE `v2`*/;
/*!50001 DROP VIEW IF EXISTS `v2`*/;
@ -2101,7 +2101,7 @@ SET @saved_cs_client = @@character_set_client;
SET character_set_client = utf8;
/*!50001 CREATE TABLE `v1` (
`a` int(11)
) */;
) ENGINE=MyISAM */;
SET character_set_client = @saved_cs_client;
/*!50001 DROP TABLE `v1`*/;
/*!50001 DROP VIEW IF EXISTS `v1`*/;
@ -2175,7 +2175,7 @@ SET @saved_cs_client = @@character_set_client;
SET character_set_client = utf8;
/*!50001 CREATE TABLE `v2` (
`a` varchar(30)
) */;
) ENGINE=MyISAM */;
SET character_set_client = @saved_cs_client;
/*!50001 DROP TABLE `v2`*/;
/*!50001 DROP VIEW IF EXISTS `v2`*/;
@ -2291,7 +2291,7 @@ SET character_set_client = utf8;
`a` int(11),
`b` int(11),
`c` varchar(30)
) */;
) ENGINE=MyISAM */;
SET character_set_client = @saved_cs_client;
DROP TABLE IF EXISTS `v2`;
/*!50001 DROP VIEW IF EXISTS `v2`*/;
@ -2299,7 +2299,7 @@ SET @saved_cs_client = @@character_set_client;
SET character_set_client = utf8;
/*!50001 CREATE TABLE `v2` (
`a` int(11)
) */;
) ENGINE=MyISAM */;
SET character_set_client = @saved_cs_client;
DROP TABLE IF EXISTS `v3`;
/*!50001 DROP VIEW IF EXISTS `v3`*/;
@ -2309,7 +2309,7 @@ SET character_set_client = utf8;
`a` int(11),
`b` int(11),
`c` varchar(30)
) */;
) ENGINE=MyISAM */;
SET character_set_client = @saved_cs_client;
/*!50001 DROP TABLE `v1`*/;
/*!50001 DROP VIEW IF EXISTS `v1`*/;
@ -3046,7 +3046,7 @@ SET character_set_client = utf8;
`a` int(11),
`b` varchar(32),
`c` varchar(32)
) */;
) ENGINE=MyISAM */;
SET character_set_client = @saved_cs_client;
DROP TABLE IF EXISTS `v1`;
/*!50001 DROP VIEW IF EXISTS `v1`*/;
@ -3056,7 +3056,7 @@ SET character_set_client = utf8;
`a` int(11),
`b` varchar(32),
`c` varchar(32)
) */;
) ENGINE=MyISAM */;
SET character_set_client = @saved_cs_client;
DROP TABLE IF EXISTS `v2`;
/*!50001 DROP VIEW IF EXISTS `v2`*/;
@ -3066,7 +3066,7 @@ SET character_set_client = utf8;
`a` int(11),
`b` varchar(32),
`c` varchar(32)
) */;
) ENGINE=MyISAM */;
SET character_set_client = @saved_cs_client;
USE `test`;
@ -3446,7 +3446,7 @@ SET @saved_cs_client = @@character_set_client;
SET character_set_client = utf8;
/*!50001 CREATE TABLE `v1` (
`id` int(11)
) */;
) ENGINE=MyISAM */;
SET character_set_client = @saved_cs_client;
USE `mysqldump_test_db`;
@ -3506,7 +3506,7 @@ SET @saved_cs_client = @@character_set_client;
SET character_set_client = utf8;
/*!50001 CREATE TABLE `nasishnasifu` (
`id` bigint(20) unsigned
) */;
) ENGINE=MyISAM */;
SET character_set_client = @saved_cs_client;
USE `mysqldump_tables`;
@ -3964,7 +3964,7 @@ SET @saved_cs_client = @@character_set_client;
SET character_set_client = utf8;
/*!50001 CREATE TABLE `v1` (
`id` int(11)
) */;
) ENGINE=MyISAM */;
SET character_set_client = @saved_cs_client;
USE `mysqldump_test_db`;

View file

@ -742,3 +742,23 @@ WHERE (a >= '2004-07-01' AND a <= '2004-09-30') OR
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p407,p408,p409,p507,p508,p509 ALL NULL NULL NULL NULL 18 Using where
DROP TABLE t1;
create table t1 (a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
CREATE TABLE t2 (
defid int(10) unsigned NOT NULL,
day int(10) unsigned NOT NULL,
count int(10) unsigned NOT NULL,
filler char(200),
KEY (defid,day)
)
PARTITION BY RANGE (day) (
PARTITION p7 VALUES LESS THAN (20070401) ,
PARTITION p8 VALUES LESS THAN (20070501));
insert into t2 select 20, 20070311, 1, 'filler' from t1 A, t1 B;
insert into t2 select 20, 20070411, 1, 'filler' from t1 A, t1 B;
insert into t2 values(52, 20070321, 123, 'filler') ;
insert into t2 values(52, 20070322, 456, 'filler') ;
select sum(count) from t2 ch where ch.defid in (50,52) and ch.day between 20070320 and 20070401 group by defid;
sum(count)
579
drop table t1, t2;

View file

@ -121,8 +121,8 @@ select @a:=0;
select @a+0, @a:=@a+0+count(*), count(*), @a+0 from t1 group by i;
@a+0 @a:=@a+0+count(*) count(*) @a+0
0 1 1 0
1 3 2 0
3 6 3 0
0 2 2 0
0 3 3 0
set @a=0;
select @a,@a:="hello",@a,@a:=3,@a,@a:="hello again" from t1 group by i;
@a @a:="hello" @a @a:=3 @a @a:="hello again"
@ -370,4 +370,33 @@ select @rownum := @rownum + 1 as row,
@prev_score := a as score
from t1 order by score desc;
drop table t1;
create table t1(b bigint);
insert into t1 (b) values (10), (30), (10);
set @var := 0;
select if(b=@var, 999, b) , @var := b from t1 order by b;
if(b=@var, 999, b) @var := b
10 10
999 10
30 30
drop table t1;
create temporary table t1 (id int);
insert into t1 values (2), (3), (3), (4);
set @lastid=-1;
select @lastid != id, @lastid, @lastid := id from t1;
@lastid != id @lastid @lastid := id
1 -1 2
1 2 3
0 3 3
1 3 4
drop table t1;
create temporary table t1 (id bigint);
insert into t1 values (2), (3), (3), (4);
set @lastid=-1;
select @lastid != id, @lastid, @lastid := id from t1;
@lastid != id @lastid @lastid := id
1 -1 2
1 2 3
0 3 3
1 3 4
drop table t1;
End of 5.1 tests

View file

@ -0,0 +1,71 @@
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
SET GLOBAL BINLOG_FORMAT = STATEMENT;
SET SESSION BINLOG_FORMAT = STATEMENT;
CREATE TABLE t1 (a INT);
CREATE TABLE t2 LIKE t1;
select @@SESSION.BINLOG_FORMAT;
@@SESSION.BINLOG_FORMAT
STATEMENT
INSERT INTO t1 VALUES(1);
INSERT INTO t2 VALUES(2);
#
# Ensure that INSERT INTO .. SELECT FROM under SBR takes a read
# lock that will prevent the source table from being modified.
#
# con1
SELECT GET_LOCK('Bug#34306', 120);
GET_LOCK('Bug#34306', 120)
1
# con2
PREPARE stmt FROM "INSERT INTO t1 SELECT * FROM t2 WHERE GET_LOCK('Bug#34306', 120)";
EXECUTE stmt;;
# default
INSERT INTO t2 VALUES (3);;
# con1
SELECT RELEASE_LOCK('Bug#34306');
RELEASE_LOCK('Bug#34306')
1
# con2
SELECT RELEASE_LOCK('Bug#34306');
RELEASE_LOCK('Bug#34306')
1
# default
#
# Ensure that INSERT INTO .. SELECT FROM prepared under SBR does
# not prevent the source table from being modified if under RBR.
#
# con2
SET SESSION BINLOG_FORMAT = ROW;
# con1
SELECT GET_LOCK('Bug#34306', 120);
GET_LOCK('Bug#34306', 120)
1
# con2
EXECUTE stmt;;
# default
# con1
INSERT INTO t2 VALUES (4);
SELECT RELEASE_LOCK('Bug#34306');
RELEASE_LOCK('Bug#34306')
1
# con2
# default
# Show binlog events
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS t1
master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS t2
master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT)
master-bin.000001 # Query # # use `test`; CREATE TABLE t2 LIKE t1
master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES(1)
master-bin.000001 # Query # # use `test`; INSERT INTO t2 VALUES(2)
master-bin.000001 # Query # # use `test`; INSERT INTO t1 SELECT * FROM t2 WHERE GET_LOCK('Bug#34306', 120)
master-bin.000001 # Query # # use `test`; INSERT INTO t2 VALUES (3)
master-bin.000001 # Query # # use `test`; INSERT INTO t2 VALUES (4)
master-bin.000001 # Query # # use `test`; BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # use `test`; COMMIT
DROP TABLE t1;
DROP TABLE t2;

View file

@ -0,0 +1,107 @@
--source include/have_log_bin.inc
--source include/have_binlog_format_row_or_statement.inc
# Get rid of previous tests binlog
--disable_query_log
reset master;
--enable_query_log
#
# Bug#34306: Can't make copy of log tables when server binary log is enabled
#
# This is an additional test for Bug#34306 in order to ensure that INSERT INTO
# .. SELECT FROM is properly replicated under SBR and RBR and that the proper
# read lock type are acquired.
#
--disable_warnings
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
--enable_warnings
SET GLOBAL BINLOG_FORMAT = STATEMENT;
SET SESSION BINLOG_FORMAT = STATEMENT;
CREATE TABLE t1 (a INT);
CREATE TABLE t2 LIKE t1;
select @@SESSION.BINLOG_FORMAT;
INSERT INTO t1 VALUES(1);
INSERT INTO t2 VALUES(2);
--connect(con1,localhost,root,,)
--connect(con2,localhost,root,,)
--echo #
--echo # Ensure that INSERT INTO .. SELECT FROM under SBR takes a read
--echo # lock that will prevent the source table from being modified.
--echo #
--connection con1
--echo # con1
SELECT GET_LOCK('Bug#34306', 120);
--connection con2
--echo # con2
PREPARE stmt FROM "INSERT INTO t1 SELECT * FROM t2 WHERE GET_LOCK('Bug#34306', 120)";
--send EXECUTE stmt;
--connection default
--echo # default
let $wait_condition=
SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE
state = "User lock" AND
info = "INSERT INTO t1 SELECT * FROM t2 WHERE GET_LOCK('Bug#34306', 120)";
--source include/wait_condition.inc
--send INSERT INTO t2 VALUES (3);
--connection con1
--echo # con1
let $wait_condition=
SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE
state = "Locked" and info = "INSERT INTO t2 VALUES (3)";
--source include/wait_condition.inc
SELECT RELEASE_LOCK('Bug#34306');
--connection con2
--echo # con2
--reap
SELECT RELEASE_LOCK('Bug#34306');
--connection default
--echo # default
--reap
--echo #
--echo # Ensure that INSERT INTO .. SELECT FROM prepared under SBR does
--echo # not prevent the source table from being modified if under RBR.
--echo #
--connection con2
--echo # con2
SET SESSION BINLOG_FORMAT = ROW;
--connection con1
--echo # con1
SELECT GET_LOCK('Bug#34306', 120);
--connection con2
--echo # con2
--send EXECUTE stmt;
--connection default
--echo # default
let $wait_condition=
SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE
state = "User lock" AND
info = "INSERT INTO t1 SELECT * FROM t2 WHERE GET_LOCK('Bug#34306', 120)";
--source include/wait_condition.inc
--connection con1
--echo # con1
INSERT INTO t2 VALUES (4);
SELECT RELEASE_LOCK('Bug#34306');
--connection con2
--echo # con2
--reap
--disconnect con1
--disconnect con2
--connection default
--echo # default
--echo # Show binlog events
source include/show_binlog_events.inc;
DROP TABLE t1;
DROP TABLE t2;

View file

@ -76,4 +76,13 @@ FROM t2 ORDER BY a;
DROP TABLE t1,t2;
#
# Bug #39353: Multiple conditions on timestamp column crashes server
#
CREATE TABLE t1 (a TIMESTAMP);
INSERT INTO t1 VALUES (NOW()),(NOW()),(NOW());
SELECT * FROM t1 WHERE a > '2008-01-01' AND a = '0000-00-00';
DROP TABLE t1;
--echo End of 5.0 tests

View file

@ -491,12 +491,12 @@ SELECT DISTINCT fruit_id, fruit_name INTO OUTFILE
'../tmp/data1.tmp' FROM t1 WHERE fruit_name = 'APPLE';
LOAD DATA INFILE '../tmp/data1.tmp' INTO TABLE t2;
--error 0,1
--remove_file $MYSQL_TEST_DIR/var/tmp/data1.tmp
--remove_file $MYSQLTEST_VARDIR/tmp/data1.tmp
SELECT DISTINCT @v19:= fruit_id, @v20:= fruit_name INTO OUTFILE
'../tmp/data2.tmp' FROM t1 WHERE fruit_name = 'APPLE';
LOAD DATA INFILE '../tmp/data2.tmp' INTO TABLE t2;
--remove_file $MYSQL_TEST_DIR/var/tmp/data2.tmp
--remove_file $MYSQLTEST_VARDIR/tmp/data2.tmp
SELECT @v19, @v20;
SELECT * FROM t2;

View file

@ -411,6 +411,108 @@ create event
очень_очень_очень_очень_очень_очень_очень_очень_длинная_строка_66
on schedule every 2 year do select 1;
#
# Bug#35981: ALTER EVENT causes the server to change the PRESERVE option.
#
create event event_35981 on schedule every 6 month on completion preserve
disable
do
select 1;
echo The following SELECTs should all give 1;
# show current ON_COMPLETION
select count(*) from information_schema.events
where event_schema = database() and event_name = 'event_35981' and
on_completion = 'PRESERVE';
# show ON_COMPLETION remains "PRESERVE" when not given in ALTER EVENT
alter event event_35981 enable;
select count(*) from information_schema.events
where event_schema = database() and event_name = 'event_35981' and
on_completion = 'PRESERVE';
# show we can change ON_COMPLETION
alter event event_35981 on completion not preserve;
select count(*) from information_schema.events
where event_schema = database() and event_name = 'event_35981' and
on_completion = 'NOT PRESERVE';
# show ON_COMPLETION remains "NOT PRESERVE" when not given in ALTER EVENT
alter event event_35981 disable;
select count(*) from information_schema.events
where event_schema = database() and event_name = 'event_35981' and
on_completion = 'NOT PRESERVE';
# show we can change ON_COMPLETION
alter event event_35981 on completion preserve;
select count(*) from information_schema.events
where event_schema = database() and event_name = 'event_35981' and
on_completion = 'PRESERVE';
drop event event_35981;
create event event_35981 on schedule every 6 month disable
do
select 1;
# show that the defaults for CREATE EVENT are still correct (NOT PRESERVE)
select count(*) from information_schema.events
where event_schema = database() and event_name = 'event_35981' and
on_completion = 'NOT PRESERVE';
drop event event_35981;
# show that backdating doesn't break
create event event_35981 on schedule every 1 hour starts current_timestamp
on completion not preserve
do
select 1;
# should fail thanks to above's NOT PRESERVE
--error ER_EVENT_CANNOT_ALTER_IN_THE_PAST
alter event event_35981 on schedule every 1 hour starts '1999-01-01 00:00:00'
ends '1999-01-02 00:00:00';
drop event event_35981;
create event event_35981 on schedule every 1 hour starts current_timestamp
on completion not preserve
do
select 1;
# succeed with warning
alter event event_35981 on schedule every 1 hour starts '1999-01-01 00:00:00'
ends '1999-01-02 00:00:00' on completion preserve;
drop event event_35981;
create event event_35981 on schedule every 1 hour starts current_timestamp
on completion preserve
do
select 1;
# this should succeed thanks to above PRESERVE! give a warning though.
alter event event_35981 on schedule every 1 hour starts '1999-01-01 00:00:00'
ends '1999-01-02 00:00:00';
# this should fail, as the event would have passed already
--error ER_EVENT_CANNOT_ALTER_IN_THE_PAST
alter event event_35981 on schedule every 1 hour starts '1999-01-01 00:00:00'
ends '1999-01-02 00:00:00' on completion not preserve;
# should succeed giving a warning
alter event event_35981 on schedule every 1 hour starts '1999-01-01 00:00:00'
ends '1999-01-02 00:00:00' on completion preserve;
drop event event_35981;
#
# End of tests
#

View file

@ -936,6 +936,52 @@ select command_type, argument from mysql.general_log where thread_id = @thread_i
deallocate prepare long_query;
set global general_log = @old_general_log_state;
#
# Bug#34306: Can't make copy of log tables when server binary log is enabled
#
--disable_warnings
DROP TABLE IF EXISTS log_count;
DROP TABLE IF EXISTS slow_log_copy;
DROP TABLE IF EXISTS general_log_copy;
--enable_warnings
CREATE TABLE log_count (count BIGINT(21));
SET @old_general_log_state = @@global.general_log;
SET @old_slow_log_state = @@global.slow_query_log;
SET GLOBAL general_log = ON;
SET GLOBAL slow_query_log = ON;
CREATE TABLE slow_log_copy SELECT * FROM mysql.slow_log;
INSERT INTO slow_log_copy SELECT * FROM mysql.slow_log;
INSERT INTO log_count (count) VALUES ((SELECT count(*) FROM mysql.slow_log));
DROP TABLE slow_log_copy;
CREATE TABLE general_log_copy SELECT * FROM mysql.general_log;
INSERT INTO general_log_copy SELECT * FROM mysql.general_log;
INSERT INTO log_count (count) VALUES ((SELECT count(*) FROM mysql.general_log));
DROP TABLE general_log_copy;
SET GLOBAL general_log = OFF;
SET GLOBAL slow_query_log = OFF;
CREATE TABLE slow_log_copy SELECT * FROM mysql.slow_log;
INSERT INTO slow_log_copy SELECT * FROM mysql.slow_log;
INSERT INTO log_count (count) VALUES ((SELECT count(*) FROM mysql.slow_log));
DROP TABLE slow_log_copy;
CREATE TABLE general_log_copy SELECT * FROM mysql.general_log;
INSERT INTO general_log_copy SELECT * FROM mysql.general_log;
INSERT INTO log_count (count) VALUES ((SELECT count(*) FROM mysql.general_log));
DROP TABLE general_log_copy;
SET GLOBAL general_log = @old_general_log_state;
SET GLOBAL slow_query_log = @old_slow_log_state;
DROP TABLE log_count;
#
# Bug #31700: thd->examined_row_count not incremented for 'const' type queries
#

File diff suppressed because it is too large Load diff

View file

@ -807,24 +807,24 @@ DROP TABLE t1;
#
# BUG#30573: get wrong result with "group by" on PARTITIONed table
#
#create table t1 (a int);
#insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
#CREATE TABLE t2 (
# defid int(10) unsigned NOT NULL,
# day int(10) unsigned NOT NULL,
# count int(10) unsigned NOT NULL,
# filler char(200),
# KEY (defid,day)
#)
#PARTITION BY RANGE (day) (
# PARTITION p7 VALUES LESS THAN (20070401) ,
# PARTITION p8 VALUES LESS THAN (20070501));
create table t1 (a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
CREATE TABLE t2 (
defid int(10) unsigned NOT NULL,
day int(10) unsigned NOT NULL,
count int(10) unsigned NOT NULL,
filler char(200),
KEY (defid,day)
)
PARTITION BY RANGE (day) (
PARTITION p7 VALUES LESS THAN (20070401) ,
PARTITION p8 VALUES LESS THAN (20070501));
#insert into t2 select 20, 20070311, 1, 'filler' from t1 A, t1 B;
#insert into t2 select 20, 20070411, 1, 'filler' from t1 A, t1 B;
#insert into t2 values(52, 20070321, 123, 'filler') ;
#insert into t2 values(52, 20070322, 456, 'filler') ;
insert into t2 select 20, 20070311, 1, 'filler' from t1 A, t1 B;
insert into t2 select 20, 20070411, 1, 'filler' from t1 A, t1 B;
insert into t2 values(52, 20070321, 123, 'filler') ;
insert into t2 values(52, 20070322, 456, 'filler') ;
#select sum(count) from t2 ch where ch.defid in (50,52) and ch.day between 20070320 and 20070401 group by defid;
#drop table t1, t2;
select sum(count) from t2 ch where ch.defid in (50,52) and ch.day between 20070320 and 20070401 group by defid;
drop table t1, t2;

View file

@ -263,4 +263,26 @@ from t1 order by score desc;
--enable_result_log
drop table t1;
#
# Bug#26020: User-Defined Variables are not consistent with columns data types
#
create table t1(b bigint);
insert into t1 (b) values (10), (30), (10);
set @var := 0;
select if(b=@var, 999, b) , @var := b from t1 order by b;
drop table t1;
create temporary table t1 (id int);
insert into t1 values (2), (3), (3), (4);
set @lastid=-1;
select @lastid != id, @lastid, @lastid := id from t1;
drop table t1;
create temporary table t1 (id bigint);
insert into t1 values (2), (3), (3), (4);
set @lastid=-1;
select @lastid != id, @lastid, @lastid := id from t1;
drop table t1;
--echo End of 5.1 tests

View file

@ -185,6 +185,8 @@ mysql_event_fill_row(THD *thd,
DBUG_PRINT("info", ("dbname=[%s]", et->dbname.str));
DBUG_PRINT("info", ("name =[%s]", et->name.str));
DBUG_ASSERT(et->on_completion != Event_parse_data::ON_COMPLETION_DEFAULT);
if (table->s->fields < ET_FIELD_COUNT)
{
/*
@ -745,6 +747,18 @@ Event_db_repository::update_event(THD *thd, Event_parse_data *parse_data,
store_record(table,record[1]);
/*
We check whether ALTER EVENT was given dates that are in the past.
However to know how to react, we need the ON COMPLETION type. The
check is deferred to this point because by now we have the previous
setting (from the event-table) to fall back on if nothing was specified
in the ALTER EVENT-statement.
*/
if (parse_data->check_dates(thd,
(int) table->field[ET_FIELD_ON_COMPLETION]->val_int()))
goto end;
/* Don't update create on row update. */
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;

View file

@ -45,7 +45,7 @@ Event_parse_data::new_instance(THD *thd)
*/
Event_parse_data::Event_parse_data()
:on_completion(Event_parse_data::ON_COMPLETION_DROP),
:on_completion(Event_parse_data::ON_COMPLETION_DEFAULT),
status(Event_parse_data::ENABLED),
do_not_create(FALSE),
body_changed(FALSE),
@ -114,6 +114,12 @@ Event_parse_data::check_if_in_the_past(THD *thd, my_time_t ltime_utc)
if (ltime_utc >= (my_time_t) thd->query_start())
return;
/*
We'll come back later when we have the real on_completion value
*/
if (on_completion == Event_parse_data::ON_COMPLETION_DEFAULT)
return;
if (on_completion == Event_parse_data::ON_COMPLETION_DROP)
{
switch (thd->lex->sql_command) {
@ -141,6 +147,42 @@ Event_parse_data::check_if_in_the_past(THD *thd, my_time_t ltime_utc)
}
/*
Check time/dates in ALTER EVENT
We check whether ALTER EVENT was given dates that are in the past.
However to know how to react, we need the ON COMPLETION type. Hence,
the check is deferred until we have the previous ON COMPLETION type
from the event-db to fall back on if nothing was specified in the
ALTER EVENT-statement.
SYNOPSIS
Event_parse_data::check_dates()
thd Thread
on_completion ON COMPLETION value currently in event-db.
Will be overridden by value in ALTER EVENT if given.
RETURN VALUE
TRUE an error occurred, do not ALTER
FALSE OK
*/
bool
Event_parse_data::check_dates(THD *thd, int previous_on_completion)
{
if (on_completion == Event_parse_data::ON_COMPLETION_DEFAULT)
{
on_completion= previous_on_completion;
if (!ends_null)
check_if_in_the_past(thd, ends);
if (!execute_at_null)
check_if_in_the_past(thd, execute_at);
}
return do_not_create;
}
/*
Sets time for execution for one-time event.

View file

@ -38,7 +38,12 @@ public:
enum enum_on_completion
{
ON_COMPLETION_DROP = 1,
/*
On CREATE EVENT, DROP is the DEFAULT as per the docs.
On ALTER EVENT, "no change" is the DEFAULT.
*/
ON_COMPLETION_DEFAULT = 0,
ON_COMPLETION_DROP,
ON_COMPLETION_PRESERVE
};
@ -80,6 +85,9 @@ public:
bool
check_parse_data(THD *thd);
bool
check_dates(THD *thd, int previous_on_completion);
private:
void

View file

@ -3679,10 +3679,12 @@ int ha_partition::index_read_map(uchar *buf, const uchar *key,
enum ha_rkey_function find_flag)
{
DBUG_ENTER("ha_partition::index_read_map");
end_range= 0;
m_index_scan_type= partition_index_read;
DBUG_RETURN(common_index_read(buf, key, keypart_map, find_flag));
m_start_key.key= key;
m_start_key.keypart_map= keypart_map;
m_start_key.flag= find_flag;
DBUG_RETURN(common_index_read(buf, TRUE));
}
@ -3690,41 +3692,63 @@ int ha_partition::index_read_map(uchar *buf, const uchar *key,
Common routine for a number of index_read variants
SYNOPSIS
common_index_read
see index_read for rest
ha_partition::common_index_read()
buf Buffer where the record should be returned
have_start_key TRUE <=> the left endpoint is available, i.e.
we're in index_read call or in read_range_first
call and the range has left endpoint
FALSE <=> there is no left endpoint (we're in
read_range_first() call and the range has no left
endpoint)
DESCRIPTION
Start scanning the range (when invoked from read_range_first()) or doing
an index lookup (when invoked from index_read_XXX):
- If possible, perform partition selection
- Find the set of partitions we're going to use
- Depending on whether we need ordering:
NO: Get the first record from first used partition (see
handle_unordered_scan_next_partition)
YES: Fill the priority queue and get the record that is the first in
the ordering
RETURN
0 OK
other HA_ERR_END_OF_FILE or other error code.
*/
int ha_partition::common_index_read(uchar *buf, const uchar *key,
key_part_map keypart_map,
enum ha_rkey_function find_flag)
int ha_partition::common_index_read(uchar *buf, bool have_start_key)
{
int error;
uint key_len;
bool reverse_order= FALSE;
uint key_len= calculate_key_len(table, active_index, key, keypart_map);
DBUG_ENTER("ha_partition::common_index_read");
LINT_INIT(key_len); /* used if have_start_key==TRUE */
memcpy((void*)m_start_key.key, key, key_len);
m_start_key.keypart_map= keypart_map;
m_start_key.length= key_len;
m_start_key.flag= find_flag;
if ((error= partition_scan_set_up(buf, TRUE)))
if (have_start_key)
{
m_start_key.length= key_len= calculate_key_len(table, active_index,
m_start_key.key,
m_start_key.keypart_map);
}
if ((error= partition_scan_set_up(buf, have_start_key)))
{
DBUG_RETURN(error);
}
if (find_flag == HA_READ_PREFIX_LAST ||
find_flag == HA_READ_PREFIX_LAST_OR_PREV ||
find_flag == HA_READ_BEFORE_KEY)
if (have_start_key &&
(m_start_key.flag == HA_READ_PREFIX_LAST ||
m_start_key.flag == HA_READ_PREFIX_LAST_OR_PREV ||
m_start_key.flag == HA_READ_BEFORE_KEY))
{
reverse_order= TRUE;
m_ordered_scan_ongoing= TRUE;
}
if (!m_ordered_scan_ongoing ||
(find_flag == HA_READ_KEY_EXACT &&
(key_len >= m_curr_key_info->key_length ||
key_len == 0)))
{
(have_start_key && m_start_key.flag == HA_READ_KEY_EXACT &&
(key_len >= m_curr_key_info->key_length || key_len == 0)))
{
/*
We use unordered index scan either when read_range is used and flag
is set to not use ordered or when an exact key is used and in this
@ -3815,7 +3839,7 @@ int ha_partition::index_last(uchar * buf)
Common routine for index_first/index_last
SYNOPSIS
common_index_first_last
ha_partition::common_first_last()
see index_first for rest
*/
@ -3859,7 +3883,10 @@ int ha_partition::index_read_last_map(uchar *buf, const uchar *key,
m_ordered= TRUE; // Safety measure
end_range= 0;
m_index_scan_type= partition_index_read_last;
DBUG_RETURN(common_index_read(buf, key, keypart_map, HA_READ_PREFIX_LAST));
m_start_key.key= key;
m_start_key.keypart_map= keypart_map;
m_start_key.flag= HA_READ_PREFIX_LAST;
DBUG_RETURN(common_index_read(buf, TRUE));
}
@ -3990,23 +4017,15 @@ int ha_partition::read_range_first(const key_range *start_key,
((end_key->flag == HA_READ_BEFORE_KEY) ? 1 :
(end_key->flag == HA_READ_AFTER_KEY) ? -1 : 0);
}
range_key_part= m_curr_key_info->key_part;
if (!start_key) // Read first record
{
if (m_ordered)
m_index_scan_type= partition_index_first;
else
m_index_scan_type= partition_index_first_unordered;
error= common_first_last(m_rec0);
}
range_key_part= m_curr_key_info->key_part;
if (start_key)
m_start_key= *start_key;
else
{
m_index_scan_type= partition_index_read;
error= common_index_read(m_rec0,
start_key->key,
start_key->keypart_map, start_key->flag);
}
m_start_key.key= NULL;
m_index_scan_type= partition_read_range;
error= common_index_read(m_rec0, test(start_key));
DBUG_RETURN(error);
}
@ -4028,26 +4047,36 @@ int ha_partition::read_range_next()
if (m_ordered)
{
DBUG_RETURN(handler::read_range_next());
DBUG_RETURN(handle_ordered_next(table->record[0], eq_range));
}
DBUG_RETURN(handle_unordered_next(m_rec0, eq_range));
DBUG_RETURN(handle_unordered_next(table->record[0], eq_range));
}
/*
Common routine to set up scans
Common routine to set up index scans
SYNOPSIS
buf Buffer to later return record in
idx_read_flag Is it index scan
ha_partition::partition_scan_set_up()
buf Buffer to later return record in (this function
needs it to calculcate partitioning function
values)
idx_read_flag TRUE <=> m_start_key has range start endpoint which
probably can be used to determine the set of partitions
to scan.
FALSE <=> there is no start endpoint.
DESCRIPTION
Find out which partitions we'll need to read when scanning the specified
range.
If we need to scan only one partition, set m_ordered_scan_ongoing=FALSE
as we will not need to do merge ordering.
RETURN VALUE
>0 Error code
0 Success
DESCRIPTION
This is where we check which partitions to actually scan if not all
of them
*/
int ha_partition::partition_scan_set_up(uchar * buf, bool idx_read_flag)
@ -4138,10 +4167,19 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
DBUG_ENTER("ha_partition::handle_unordered_next");
/*
We should consider if this should be split into two functions as
next_same is alwas a local constant
We should consider if this should be split into three functions as
partition_read_range is_next_same are always local constants
*/
if (is_next_same)
if (m_index_scan_type == partition_read_range)
{
if (!(error= file->read_range_next()))
{
m_last_part= m_part_spec.start_part;
DBUG_RETURN(0);
}
}
else if (is_next_same)
{
if (!(error= file->index_next_same(buf, m_start_key.key,
m_start_key.length)))
@ -4150,15 +4188,13 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
DBUG_RETURN(0);
}
}
else if (!(error= file->index_next(buf)))
else
{
if (!(file->index_flags(active_index, 0, 1) & HA_READ_ORDER) ||
compare_key(end_range) <= 0)
if (!(error= file->index_next(buf)))
{
m_last_part= m_part_spec.start_part;
DBUG_RETURN(0); // Row was in range
}
error= HA_ERR_END_OF_FILE;
}
if (error == HA_ERR_END_OF_FILE)
@ -4202,6 +4238,11 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
file= m_file[i];
m_part_spec.start_part= i;
switch (m_index_scan_type) {
case partition_read_range:
DBUG_PRINT("info", ("read_range_first on partition %d", i));
error= file->read_range_first(m_start_key.key? &m_start_key: NULL,
end_range, eq_range, FALSE);
break;
case partition_index_read:
DBUG_PRINT("info", ("index_read on partition %d", i));
error= file->index_read_map(buf, m_start_key.key,
@ -4230,13 +4271,8 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
}
if (!error)
{
if (!(file->index_flags(active_index, 0, 1) & HA_READ_ORDER) ||
compare_key(end_range) <= 0)
{
m_last_part= i;
DBUG_RETURN(0);
}
error= HA_ERR_END_OF_FILE;
m_last_part= i;
DBUG_RETURN(0);
}
if ((error != HA_ERR_END_OF_FILE) && (error != HA_ERR_KEY_NOT_FOUND))
DBUG_RETURN(error);
@ -4315,6 +4351,17 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
m_start_key.keypart_map);
reverse_order= TRUE;
break;
case partition_read_range:
{
/*
This can only read record to table->record[0], as it was set when
the table was being opened. We have to memcpy data ourselves.
*/
error= file->read_range_first(&m_start_key, end_range, eq_range, TRUE);
memcpy(rec_buf_ptr, table->record[0], m_rec_length);
reverse_order= FALSE;
break;
}
default:
DBUG_ASSERT(FALSE);
DBUG_RETURN(HA_ERR_END_OF_FILE);
@ -4395,8 +4442,13 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
uint part_id= m_top_entry;
handler *file= m_file[part_id];
DBUG_ENTER("ha_partition::handle_ordered_next");
if (!is_next_same)
if (m_index_scan_type == partition_read_range)
{
error= file->read_range_next();
memcpy(rec_buf(part_id), table->record[0], m_rec_length);
}
else if (!is_next_same)
error= file->index_next(rec_buf(part_id));
else
error= file->index_next_same(rec_buf(part_id), m_start_key.key,

View file

@ -49,7 +49,8 @@ private:
partition_index_first_unordered= 2,
partition_index_last= 3,
partition_index_read_last= 4,
partition_no_index_scan= 5
partition_read_range = 5,
partition_no_index_scan= 6
};
/* Data for the partition handler */
int m_mode; // Open mode
@ -63,8 +64,6 @@ private:
handler **m_reorged_file; // Reorganised partitions
handler **m_added_file; // Added parts kept for errors
partition_info *m_part_info; // local reference to partition
uchar *m_start_key_ref; // Reference of start key in current
// index scan info
Field **m_part_field_array; // Part field array locally to save acc
uchar *m_ordered_rec_buffer; // Row and key buffer for ord. idx scan
KEY *m_curr_key_info; // Current index
@ -429,9 +428,7 @@ public:
virtual int read_range_next();
private:
int common_index_read(uchar * buf, const uchar * key,
key_part_map keypart_map,
enum ha_rkey_function find_flag);
int common_index_read(uchar * buf, bool have_start_key);
int common_first_last(uchar * buf);
int partition_scan_set_up(uchar * buf, bool idx_read_flag);
int handle_unordered_next(uchar * buf, bool next_same);

View file

@ -4342,7 +4342,12 @@ Item *Item_field::equal_fields_propagator(uchar *arg)
item= this;
else if (field && (field->flags & ZEROFILL_FLAG) && IS_NUM(field->type()))
{
if (item && cmp_context != INT_RESULT)
/*
We don't need to zero-fill timestamp columns here because they will be
first converted to a string (in date/time format) and compared as such if
compared with another string.
*/
if (item && field->type() != FIELD_TYPE_TIMESTAMP && cmp_context != INT_RESULT)
convert_zerofill_number_to_string(&item, (Field_num *)field);
else
item= this;

View file

@ -3805,6 +3805,25 @@ static user_var_entry *get_variable(HASH *hash, LEX_STRING &name,
return entry;
}
bool Item_func_set_user_var::set_entry(THD *thd, bool create_if_not_exists)
{
if (thd == entry_thd && entry)
goto end; // update entry->update_query_id for PS
entry_thd= thd;
if (!(entry= get_variable(&thd->user_vars, name, create_if_not_exists)))
return TRUE;
/*
Remember the last query which updated it, this way a query can later know
if this variable is a constant item in the query (it is if update_query_id
is different from query_id).
*/
end:
entry->update_query_id= thd->query_id;
return FALSE;
}
/*
When a user variable is updated (in a SET command or a query like
SELECT @a:= ).
@ -3814,15 +3833,8 @@ bool Item_func_set_user_var::fix_fields(THD *thd, Item **ref)
{
DBUG_ASSERT(fixed == 0);
/* fix_fields will call Item_func_set_user_var::fix_length_and_dec */
if (Item_func::fix_fields(thd, ref) ||
!(entry= get_variable(&thd->user_vars, name, 1)))
if (Item_func::fix_fields(thd, ref) || set_entry(thd, TRUE))
return TRUE;
/*
Remember the last query which updated it, this way a query can later know
if this variable is a constant item in the query (it is if update_query_id
is different from query_id).
*/
entry->update_query_id= thd->query_id;
/*
As it is wrong and confusing to associate any
character set with NULL, @a should be latin2

View file

@ -1294,6 +1294,17 @@ class Item_func_set_user_var :public Item_func
{
enum Item_result cached_result_type;
user_var_entry *entry;
/*
The entry_thd variable is used:
1) to skip unnecessary updates of the entry field (see above);
2) to reset the entry field that was initialized in the other thread
(for example, an item tree of a trigger that updates user variables
may be shared between several connections, and the entry_thd field
prevents updates of one connection user variables from a concurrent
connection calling the same trigger that initially updated some
user variable it the first connection context).
*/
THD *entry_thd;
char buffer[MAX_FIELD_WIDTH];
String value;
my_decimal decimal_buff;
@ -1309,7 +1320,8 @@ class Item_func_set_user_var :public Item_func
public:
LEX_STRING name; // keep it public
Item_func_set_user_var(LEX_STRING a,Item *b)
:Item_func(b), cached_result_type(INT_RESULT), name(a)
:Item_func(b), cached_result_type(INT_RESULT),
entry(NULL), entry_thd(NULL), name(a)
{}
enum Functype functype() const { return SUSERVAR_FUNC; }
double val_real();
@ -1340,6 +1352,7 @@ public:
}
void save_org_in_field(Field *field) { (void)save_in_field(field, 1, 0); }
bool register_field_in_read_map(uchar *arg);
bool set_entry(THD *thd, bool create_if_not_exists);
};

View file

@ -854,7 +854,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
if ((table=table_ptr[i])->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE)
continue;
lock_type= table->reginfo.lock_type;
DBUG_ASSERT (lock_type != TL_WRITE_DEFAULT);
DBUG_ASSERT(lock_type != TL_WRITE_DEFAULT && lock_type != TL_READ_DEFAULT);
if (lock_type >= TL_WRITE_ALLOW_WRITE)
{
*write_lock_used=table;

View file

@ -8061,7 +8061,6 @@ Write_rows_log_event::do_before_row_operations(const Slave_reporting_capability
*/
}
m_table->file->ha_start_bulk_insert(0);
/*
We need TIMESTAMP_NO_AUTO_SET otherwise ha_write_row() will not use fill
any TIMESTAMP column with data from the row but instead will use
@ -8200,7 +8199,16 @@ Rows_log_event::write_row(const Relay_log_info *const rli,
/* unpack row into table->record[0] */
error= unpack_current_row(rli); // TODO: how to handle errors?
if (m_curr_row == m_rows_buf)
{
/* this is the first row to be inserted, we estimate the rows with
the size of the first row and use that value to initialize
storage engine for bulk insertion */
ulong estimated_rows= (m_rows_end - m_curr_row) / (m_curr_row_end - m_curr_row);
m_table->file->ha_start_bulk_insert(estimated_rows);
}
#ifndef DBUG_OFF
DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
DBUG_PRINT_BITSET("debug", "write_set = %s", table->write_set);

View file

@ -1265,6 +1265,7 @@ bool fix_merge_after_open(TABLE_LIST *old_child_list, TABLE_LIST **old_last,
TABLE_LIST *new_child_list, TABLE_LIST **new_last);
bool reopen_table(TABLE *table);
bool reopen_tables(THD *thd,bool get_locks,bool in_refresh);
thr_lock_type read_lock_type_for_table(THD *thd, TABLE *table);
void close_data_files_and_morph_locks(THD *thd, const char *db,
const char *table_name);
void close_handle_and_leave_table_as_lock(TABLE *table);
@ -1938,7 +1939,7 @@ extern bool opt_using_transactions;
extern bool mysqld_embedded;
#endif /* MYSQL_SERVER || INNODB_COMPATIBILITY_HOOKS */
#ifdef MYSQL_SERVER
extern bool using_update_log, opt_large_files, server_id_supplied;
extern bool opt_large_files, server_id_supplied;
extern bool opt_update_log, opt_bin_log, opt_error_log;
extern my_bool opt_log, opt_slow_log;
extern ulong log_output_options;

View file

@ -382,7 +382,7 @@ my_bool opt_character_set_client_handshake= 1;
bool server_id_supplied = 0;
bool opt_endinfo, using_udf_functions;
my_bool locked_in_memory;
bool opt_using_transactions, using_update_log;
bool opt_using_transactions;
bool volatile abort_loop;
bool volatile shutdown_in_progress;
/**
@ -3815,12 +3815,6 @@ server.");
{
unireg_abort(1);
}
/*
Used to specify which type of lock we need to use for queries of type
INSERT ... SELECT. This will change when we have row level logging.
*/
using_update_log=1;
}
/* call ha_init_key_cache() on all key caches to init them */
@ -7431,7 +7425,7 @@ static void mysql_init_variables(void)
slave_open_temp_tables= 0;
cached_thread_count= 0;
opt_endinfo= using_udf_functions= 0;
opt_using_transactions= using_update_log= 0;
opt_using_transactions= 0;
abort_loop= select_thread_in_use= signal_thread_in_use= 0;
ready_to_exit= shutdown_in_progress= grant_option= 0;
aborted_threads= aborted_connects= 0;

View file

@ -4355,6 +4355,38 @@ bool fix_merge_after_open(TABLE_LIST *old_child_list, TABLE_LIST **old_last,
}
/*
Return a appropriate read lock type given a table object.
@param thd Thread context
@param table TABLE object for table to be locked
@remark Due to a statement-based replication limitation, statements such as
INSERT INTO .. SELECT FROM .. and CREATE TABLE .. SELECT FROM need
to grab a TL_READ_NO_INSERT lock on the source table in order to
prevent the replication of a concurrent statement that modifies the
source table. If such a statement gets applied on the slave before
the INSERT .. SELECT statement finishes, data on the master could
differ from data on the slave and end-up with a discrepancy between
the binary log and table state. Furthermore, this does not apply to
I_S and log tables as it's always unsafe to replicate such tables
under statement-based replication as the table on the slave might
contain other data (ie: general_log is enabled on the slave). The
statement will be marked as unsafe for SBR in decide_logging_format().
*/
thr_lock_type read_lock_type_for_table(THD *thd, TABLE *table)
{
bool log_on= mysql_bin_log.is_open() && (thd->options & OPTION_BIN_LOG);
ulong binlog_format= thd->variables.binlog_format;
if ((log_on == FALSE) || (binlog_format == BINLOG_FORMAT_ROW) ||
(table->s->table_category == TABLE_CATEGORY_PERFORMANCE))
return TL_READ;
else
return TL_READ_NO_INSERT;
}
/*
Open all tables in list
@ -4629,6 +4661,9 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags)
{
if (tables->lock_type == TL_WRITE_DEFAULT)
tables->table->reginfo.lock_type= thd->update_lock_default;
else if (tables->lock_type == TL_READ_DEFAULT)
tables->table->reginfo.lock_type=
read_lock_type_for_table(thd, tables->table);
else if (tables->table->s->tmp_table == NO_TMP_TABLE)
tables->table->reginfo.lock_type= tables->lock_type;
}
@ -5036,7 +5071,11 @@ int decide_logging_format(THD *thd, TABLE_LIST *tables)
void* prev_ht= NULL;
for (TABLE_LIST *table= tables; table; table= table->next_global)
{
if (!table->placeholder() && table->lock_type >= TL_WRITE_ALLOW_WRITE)
if (table->placeholder())
continue;
if (table->table->s->table_category == TABLE_CATEGORY_PERFORMANCE)
thd->lex->set_stmt_unsafe();
if (table->lock_type >= TL_WRITE_ALLOW_WRITE)
{
ulonglong const flags= table->table->file->ha_table_flags();
DBUG_PRINT("info", ("table: %s; ha_table_flags: %s%s",
@ -7296,6 +7335,22 @@ bool setup_fields(THD *thd, Item **ref_pointer_array,
if (ref_pointer_array)
bzero(ref_pointer_array, sizeof(Item *) * fields.elements);
/*
We call set_entry() there (before fix_fields() of the whole list of field
items) because:
1) the list of field items has same order as in the query, and the
Item_func_get_user_var item may go before the Item_func_set_user_var:
SELECT @a, @a := 10 FROM t;
2) The entry->update_query_id value controls constantness of
Item_func_get_user_var items, so in presence of Item_func_set_user_var
items we have to refresh their entries before fixing of
Item_func_get_user_var items.
*/
List_iterator<Item_func_set_user_var> li(thd->lex->set_var_list);
Item_func_set_user_var *var;
while ((var= li++))
var->set_entry(thd, FALSE);
Item **ref= ref_pointer_array;
thd->lex->current_select->cur_pos_in_select_list= 0;
while ((item= it++))

View file

@ -1542,10 +1542,9 @@ void Query_cache::invalidate_locked_for_write(TABLE_LIST *tables_used)
for (; tables_used; tables_used= tables_used->next_local)
{
thd_proc_info(thd, "invalidating query cache entries (table)");
if (tables_used->lock_type & (TL_WRITE_LOW_PRIORITY | TL_WRITE) &&
if (tables_used->lock_type >= TL_WRITE_ALLOW_WRITE &&
tables_used->table)
{
THD *thd= current_thd;
invalidate_table(thd, tables_used->table);
}
}

View file

@ -293,6 +293,7 @@ void lex_start(THD *thd)
lex->select_lex.init_query();
lex->value_list.empty();
lex->update_list.empty();
lex->set_var_list.empty();
lex->param_list.empty();
lex->view_list.empty();
lex->prepared_stmt_params.empty();

View file

@ -1549,6 +1549,7 @@ typedef struct st_lex : public Query_tables_list
List<Item> *insert_list,field_list,value_list,update_list;
List<List_item> many_values;
List<set_var_base> var_list;
List<Item_func_set_user_var> set_var_list; // in-query assignment list
List<Item_param> param_list;
List<LEX_STRING> view_list; // view list (list of field names in view)
/*

View file

@ -5628,7 +5628,7 @@ void mysql_init_multi_delete(LEX *lex)
lex->select_lex.select_limit= 0;
lex->unit.select_limit_cnt= HA_POS_ERROR;
lex->select_lex.table_list.save_and_clear(&lex->auxiliary_table_list);
lex->lock_option= using_update_log ? TL_READ_NO_INSERT : TL_READ;
lex->lock_option= TL_READ_DEFAULT;
lex->query_tables= 0;
lex->query_tables_last= &lex->query_tables;
}

View file

@ -1039,7 +1039,7 @@ reopen_tables:
correct order of statements. Otherwise, we use a TL_READ lock to
improve performance.
*/
tl->lock_type= using_update_log ? TL_READ_NO_INSERT : TL_READ;
tl->lock_type= read_lock_type_for_table(thd, table);
tl->updating= 0;
/* Update TABLE::lock_type accordingly. */
if (!tl->placeholder() && !using_lock_tables)

View file

@ -1817,6 +1817,8 @@ event_tail:
if (!(lex->event_parse_data= Event_parse_data::new_instance(thd)))
MYSQL_YYABORT;
lex->event_parse_data->identifier= $3;
lex->event_parse_data->on_completion=
Event_parse_data::ON_COMPLETION_DROP;
lex->sql_command= SQLCOM_CREATE_EVENT;
/* We need that for disallowing subqueries */
@ -4299,7 +4301,7 @@ create_select:
SELECT_SYM
{
LEX *lex=Lex;
lex->lock_option= using_update_log ? TL_READ_NO_INSERT : TL_READ;
lex->lock_option= TL_READ_DEFAULT;
if (lex->sql_command == SQLCOM_INSERT)
lex->sql_command= SQLCOM_INSERT_SELECT;
else if (lex->sql_command == SQLCOM_REPLACE)
@ -8062,11 +8064,13 @@ variable:
variable_aux:
ident_or_text SET_VAR expr
{
$$= new (YYTHD->mem_root) Item_func_set_user_var($1, $3);
Item_func_set_user_var *item;
$$= item= new (YYTHD->mem_root) Item_func_set_user_var($1, $3);
if ($$ == NULL)
MYSQL_YYABORT;
LEX *lex= Lex;
lex->uncacheable(UNCACHEABLE_RAND);
lex->set_var_list.push_back(item);
}
| ident_or_text
{
@ -9394,7 +9398,7 @@ insert:
lex->duplicates= DUP_ERROR;
mysql_init_select(lex);
/* for subselects */
lex->lock_option= (using_update_log) ? TL_READ_NO_INSERT : TL_READ;
lex->lock_option= TL_READ_DEFAULT;
}
insert_lock_option
opt_ignore insert2