Merge bodhi.local:/opt/local/work/mysql-5.0-root

into  bodhi.local:/opt/local/work/mysql-5.0-runtime


sql/ha_ndbcluster.cc:
  Auto merged
sql/item.cc:
  Auto merged
sql/mysql_priv.h:
  Auto merged
sql/sql_parse.cc:
  Auto merged
sql/sql_table.cc:
  Auto merged
sql/sql_yacc.yy:
  Auto merged
sql/sql_update.cc:
  Manual merge.
This commit is contained in:
unknown 2006-07-07 22:09:43 +04:00
commit f5340f8c7d
33 changed files with 1130 additions and 100 deletions

View file

@ -152,7 +152,16 @@ enum ha_extra_function {
other fields intact. When this is off (by default) InnoDB will use memcpy
to overwrite entire row.
*/
HA_EXTRA_KEYREAD_PRESERVE_FIELDS
HA_EXTRA_KEYREAD_PRESERVE_FIELDS,
/*
Informs handler that write_row() which tries to insert new row into the
table and encounters some already existing row with same primary/unique
key can replace old row with new row instead of reporting error (basically
it informs handler that we do REPLACE instead of simple INSERT).
Off by default.
*/
HA_EXTRA_WRITE_CAN_REPLACE,
HA_EXTRA_WRITE_CANNOT_REPLACE
};
/* The following is parameter to ha_panic() */

View file

@ -101,15 +101,15 @@ uninstall-local:
@RM@ -f -r $(DESTDIR)$(testdir)
std_data/client-key.pem: $(top_srcdir)/SSL/$(@F)
@CP@ $(top_srcdir)/SSL/$(@F) $(srcdir)/std_data
@CP@ -f $(top_srcdir)/SSL/$(@F) $(srcdir)/std_data
std_data/client-cert.pem: $(top_srcdir)/SSL/$(@F)
@CP@ $(top_srcdir)/SSL/$(@F) $(srcdir)/std_data
@CP@ -f $(top_srcdir)/SSL/$(@F) $(srcdir)/std_data
std_data/cacert.pem: $(top_srcdir)/SSL/$(@F)
@CP@ $(top_srcdir)/SSL/$(@F) $(srcdir)/std_data
@CP@ -f $(top_srcdir)/SSL/$(@F) $(srcdir)/std_data
std_data/server-cert.pem: $(top_srcdir)/SSL/$(@F)
@CP@ $(top_srcdir)/SSL/$(@F) $(srcdir)/std_data
@CP@ -f $(top_srcdir)/SSL/$(@F) $(srcdir)/std_data
std_data/server-key.pem: $(top_srcdir)/SSL/$(@F)
@CP@ $(top_srcdir)/SSL/$(@F) $(srcdir)/std_data
@CP@ -f $(top_srcdir)/SSL/$(@F) $(srcdir)/std_data
SUFFIXES = .sh

View file

@ -1689,6 +1689,34 @@ id c1 c2
9 abc ppc
drop table federated.t1, federated.t2;
drop table federated.t1, federated.t2;
drop table if exists federated.t1;
create table federated.t1 (a int, b int, c int);
drop table if exists federated.t1;
drop table if exists federated.t2;
create table federated.t1 (a int, b int, c int) engine=federated connection='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1';
create trigger federated.t1_bi before insert on federated.t1 for each row set new.c= new.a * new.b;
create table federated.t2 (a int, b int);
insert into federated.t2 values (13, 17), (19, 23);
insert into federated.t1 (a, b) values (1, 2), (3, 5), (7, 11);
select * from federated.t1;
a b c
1 2 2
3 5 15
7 11 77
delete from federated.t1;
insert into federated.t1 (a, b) select * from federated.t2;
select * from federated.t1;
a b c
13 17 221
19 23 437
delete from federated.t1;
load data infile '../std_data_ln/loaddata5.dat' into table federated.t1 fields terminated by '' enclosed by '' ignore 1 lines (a, b);
select * from federated.t1;
a b c
3 4 12
5 6 30
drop tables federated.t1, federated.t2;
drop table federated.t1;
DROP TABLE IF EXISTS federated.t1;
DROP DATABASE IF EXISTS federated;
DROP TABLE IF EXISTS federated.t1;

View file

@ -22,3 +22,117 @@ set GLOBAL init_connect="adsfsdfsdfs";
select @a;
Got one of the listed errors
drop table t1;
End of 4.1 tests
create table t1 (x int);
insert into t1 values (3), (5), (7);
create table t2 (y int);
create user mysqltest1@localhost;
grant all privileges on test.* to mysqltest1@localhost;
set global init_connect="create procedure p1() select * from t1";
call p1();
x
3
5
7
drop procedure p1;
set global init_connect="create procedure p1(x int)\
begin\
select count(*) from t1;\
select * from t1;\
set @x = x;
end";
call p1(42);
count(*)
3
x
3
5
7
select @x;
@x
42
set global init_connect="call p1(4711)";
select @x;
@x
4711
set global init_connect="drop procedure if exists p1";
call p1();
ERROR 42000: PROCEDURE test.p1 does not exist
create procedure p1(out sum int)
begin
declare n int default 0;
declare c cursor for select * from t1;
declare exit handler for not found
begin
close c;
set sum = n;
end;
open c;
loop
begin
declare x int;
fetch c into x;
if x > 3 then
set n = n + x;
end if;
end;
end loop;
end|
set global init_connect="call p1(@sum)";
select @sum;
@sum
12
drop procedure p1;
create procedure p1(tbl char(10), v int)
begin
set @s = concat('insert into ', tbl, ' values (?)');
set @v = v;
prepare stmt1 from @s;
execute stmt1 using @v;
deallocate prepare stmt1;
end|
set global init_connect="call p1('t1', 11)";
select * from t1;
x
3
5
7
11
drop procedure p1;
create function f1() returns int
begin
declare n int;
select count(*) into n from t1;
return n;
end|
set global init_connect="set @x = f1()";
select @x;
@x
4
set global init_connect="create view v1 as select f1()";
select * from v1;
f1()
4
set global init_connect="drop view v1";
select * from v1;
ERROR 42S02: Table 'test.v1' doesn't exist
drop function f1;
create trigger trg1
after insert on t2
for each row
insert into t1 values (new.y);
set global init_connect="insert into t2 values (13), (17), (19)";
select * from t1;
x
3
5
7
11
13
17
19
drop trigger trg1;
set global init_connect=default;
revoke all privileges, grant option from mysqltest1@localhost;
drop user mysqltest1@localhost;
drop table t1, t2;

View file

@ -1 +1,16 @@
ok
end of 4.1 tests
select * from t1;
x
3
5
7
11
13
select * from t2;
y
30
3
11
13
drop table t1, t2;

View file

@ -30,7 +30,8 @@ REPLACE INTO t1 (i,j) VALUES (17,2);
SELECT * from t1 ORDER BY i;
i j k
3 1 42
17 2 24
17 2 NULL
DROP TABLE t1;
CREATE TABLE t2 (a INT(11) NOT NULL,
b INT(11) NOT NULL,
c INT(11) NOT NULL,
@ -52,3 +53,47 @@ SELECT * FROM t2 ORDER BY id;
a b c x y z id i
1 1 1 b b b 5 2
DROP TABLE t2;
drop table if exists t1;
create table t1 (pk int primary key, apk int unique, data int) engine=ndbcluster;
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
replace into t1 (pk, apk) values (4, 1), (5, 2);
select * from t1 order by pk;
pk apk data
3 3 3
4 1 NULL
5 2 NULL
delete from t1;
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
replace into t1 (pk, apk) values (1, 4), (2, 5);
select * from t1 order by pk;
pk apk data
1 4 NULL
2 5 NULL
3 3 3
delete from t1;
insert into t1 values (1, 1, 1), (4, 4, 4), (6, 6, 6);
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk);
select * from t1 order by pk;
pk apk data
1 1 1
3 4 NULL
5 6 NULL
delete from t1;
insert into t1 values (1, 1, 1), (3, 3, 3), (5, 5, 5);
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk);
select * from t1 order by pk;
pk apk data
1 1 1
3 4 NULL
5 6 NULL
delete from t1;
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
replace into t1 (pk, apk) select 4, 1;
replace into t1 (pk, apk) select 2, 4;
select * from t1 order by pk;
pk apk data
2 4 NULL
3 3 3
4 1 NULL
drop table t1;
End of 5.0 tests.

View file

@ -0,0 +1,119 @@
drop table if exists t1, t2, t3;
create table t1 (id int primary key, a int not null, b decimal (63,30) default 0) engine=ndb;
create table t2 (op char(1), a int not null, b decimal (63,30));
create table t3 select 1 as i;
create trigger t1_bu before update on t1 for each row
begin
insert into t2 values ("u", old.a, old.b);
set new.b = old.b + 10;
end;//
create trigger t1_bd before delete on t1 for each row
begin
insert into t2 values ("d", old.a, old.b);
end;//
insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (3, 3, 3.05), (4, 4, 4.05);
update t1 set a=5 where a != 3;
select * from t1 order by id;
id a b
1 5 11.050000000000000000000000000000
2 5 12.050000000000000000000000000000
3 3 3.050000000000000000000000000000
4 5 14.050000000000000000000000000000
select * from t2 order by op, a, b;
op a b
u 1 1.050000000000000000000000000000
u 2 2.050000000000000000000000000000
u 4 4.050000000000000000000000000000
delete from t2;
update t1, t3 set a=6 where a = 5;
select * from t1 order by id;
id a b
1 6 21.050000000000000000000000000000
2 6 22.050000000000000000000000000000
3 3 3.050000000000000000000000000000
4 6 24.050000000000000000000000000000
select * from t2 order by op, a, b;
op a b
u 5 11.050000000000000000000000000000
u 5 12.050000000000000000000000000000
u 5 14.050000000000000000000000000000
delete from t2;
delete from t1 where a != 3;
select * from t1 order by id;
id a b
3 3 3.050000000000000000000000000000
select * from t2 order by op, a, b;
op a b
d 6 21.050000000000000000000000000000
d 6 22.050000000000000000000000000000
d 6 24.050000000000000000000000000000
delete from t2;
insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (4, 4, 4.05);
delete t1 from t1, t3 where a != 3;
select * from t1 order by id;
id a b
3 3 3.050000000000000000000000000000
select * from t2 order by op, a, b;
op a b
d 1 1.050000000000000000000000000000
d 2 2.050000000000000000000000000000
d 4 4.050000000000000000000000000000
delete from t2;
insert into t1 values (4, 4, 4.05);
insert into t1 (id, a) values (4, 1), (3, 1) on duplicate key update a= a + 1;
select * from t1 order by id;
id a b
3 4 13.050000000000000000000000000000
4 5 14.050000000000000000000000000000
select * from t2 order by op, a, b;
op a b
u 3 3.050000000000000000000000000000
u 4 4.050000000000000000000000000000
delete from t2;
delete from t3;
insert into t3 values (4), (3);
insert into t1 (id, a) (select i, 1 from t3) on duplicate key update a= a + 1;
select * from t1 order by id;
id a b
3 5 23.050000000000000000000000000000
4 6 24.050000000000000000000000000000
select * from t2 order by op, a, b;
op a b
u 4 13.050000000000000000000000000000
u 5 14.050000000000000000000000000000
delete from t2;
replace into t1 (id, a) values (4, 1), (3, 1);
select * from t1 order by id;
id a b
3 1 0.000000000000000000000000000000
4 1 0.000000000000000000000000000000
select * from t2 order by op, a, b;
op a b
d 5 23.050000000000000000000000000000
d 6 24.050000000000000000000000000000
delete from t1;
delete from t2;
insert into t1 values (3, 1, 1.05), (4, 1, 2.05);
replace into t1 (id, a) (select i, 2 from t3);
select * from t1 order by id;
id a b
3 2 0.000000000000000000000000000000
4 2 0.000000000000000000000000000000
select * from t2 order by op, a, b;
op a b
d 1 1.050000000000000000000000000000
d 1 2.050000000000000000000000000000
delete from t1;
delete from t2;
insert into t1 values (3, 1, 1.05), (5, 2, 2.05);
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (id, a);
select * from t1 order by id;
id a b
3 4 0.000000000000000000000000000000
5 6 0.000000000000000000000000000000
select * from t2 order by op, a, b;
op a b
d 1 1.050000000000000000000000000000
d 2 2.050000000000000000000000000000
drop tables t1, t2, t3;
End of 5.0 tests

View file

@ -659,3 +659,56 @@ DROP VIEW test2.t3;
DROP TABLE test2.t1, test1.t0;
DROP DATABASE test2;
DROP DATABASE test1;
DROP VIEW IF EXISTS v1;
DROP VIEW IF EXISTS v2;
DROP VIEW IF EXISTS v3;
DROP FUNCTION IF EXISTS f1;
DROP FUNCTION IF EXISTS f2;
DROP PROCEDURE IF EXISTS p1;
CREATE SQL SECURITY DEFINER VIEW v1 AS SELECT CURRENT_USER() AS cu;
CREATE FUNCTION f1() RETURNS VARCHAR(77) SQL SECURITY INVOKER
RETURN CURRENT_USER();
CREATE SQL SECURITY DEFINER VIEW v2 AS SELECT f1() AS cu;
CREATE PROCEDURE p1(OUT cu VARCHAR(77)) SQL SECURITY INVOKER
SET cu= CURRENT_USER();
CREATE FUNCTION f2() RETURNS VARCHAR(77) SQL SECURITY INVOKER
BEGIN
DECLARE cu VARCHAR(77);
CALL p1(cu);
RETURN cu;
END|
CREATE SQL SECURITY DEFINER VIEW v3 AS SELECT f2() AS cu;
CREATE USER mysqltest_u1@localhost;
GRANT ALL ON test.* TO mysqltest_u1@localhost;
The following tests should all return 1.
SELECT CURRENT_USER() = 'mysqltest_u1@localhost';
CURRENT_USER() = 'mysqltest_u1@localhost'
1
SELECT f1() = 'mysqltest_u1@localhost';
f1() = 'mysqltest_u1@localhost'
1
CALL p1(@cu);
SELECT @cu = 'mysqltest_u1@localhost';
@cu = 'mysqltest_u1@localhost'
1
SELECT f2() = 'mysqltest_u1@localhost';
f2() = 'mysqltest_u1@localhost'
1
SELECT cu = 'root@localhost' FROM v1;
cu = 'root@localhost'
1
SELECT cu = 'root@localhost' FROM v2;
cu = 'root@localhost'
1
SELECT cu = 'root@localhost' FROM v3;
cu = 'root@localhost'
1
DROP VIEW v3;
DROP FUNCTION f2;
DROP PROCEDURE p1;
DROP FUNCTION f1;
DROP VIEW v2;
DROP VIEW v1;
DROP USER mysqltest_u1@localhost;

View file

@ -1 +1,29 @@
select * from mysql.user as t1, mysql.user as t2, mysql.user as t3;
use test;
drop table if exists t1;
create table t1 (x int);
drop table if exists t2;
create table t2 (y int);
drop procedure if exists p1;
create definer=root@localhost procedure p1() select * from t1;
call p1();
drop procedure p1;
create definer=root@localhost procedure p1() insert into t1 values (3),(5),(7);
call p1();
drop function if exists f1;
create definer=root@localhost function f1() returns int return (select count(*) from t1);
insert into t2 set y = f1()*10;
drop view if exists v1;
create definer=root@localhost view v1 as select f1();
insert into t2 (y) select * from v1;
create trigger trg1 after insert on t2 for each row insert into t1 values (new.y);
insert into t2 values (11), (13);
drop procedure p1;
drop function f1;
drop view v1;

View file

@ -1365,4 +1365,46 @@ drop table federated.t1, federated.t2;
connection slave;
drop table federated.t1, federated.t2;
#
# Additional test for bug#18437 "Wrong values inserted with a before
# update trigger on NDB table". SQL-layer didn't properly inform
# handler about fields which were read and set in triggers. In some
# cases this resulted in incorrect (garbage) values of OLD variables
# and lost changes to NEW variables.
# Since for federated engine only operation which is affected by wrong
# fields mark-up is handler::write_row() this file constains coverage
# for ON INSERT triggers only. Tests for other types of triggers reside
# in ndb_trigger.test.
#
--disable_warnings
drop table if exists federated.t1;
--enable_warnings
create table federated.t1 (a int, b int, c int);
connection master;
--disable_warnings
drop table if exists federated.t1;
drop table if exists federated.t2;
--enable_warnings
--replace_result $SLAVE_MYPORT SLAVE_PORT
eval create table federated.t1 (a int, b int, c int) engine=federated connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
create trigger federated.t1_bi before insert on federated.t1 for each row set new.c= new.a * new.b;
create table federated.t2 (a int, b int);
insert into federated.t2 values (13, 17), (19, 23);
# Each of three statements should correctly set values for all three fields
# insert
insert into federated.t1 (a, b) values (1, 2), (3, 5), (7, 11);
select * from federated.t1;
delete from federated.t1;
# insert ... select
insert into federated.t1 (a, b) select * from federated.t2;
select * from federated.t1;
delete from federated.t1;
# load
load data infile '../std_data_ln/loaddata5.dat' into table federated.t1 fields terminated by '' enclosed by '' ignore 1 lines (a, b);
select * from federated.t1;
drop tables federated.t1, federated.t2;
connection slave;
drop table federated.t1;
source include/federated_cleanup.inc;

View file

@ -35,4 +35,205 @@ select @a;
connection con0;
drop table t1;
# End of 4.1 tests
disconnect con1;
disconnect con2;
disconnect con3;
disconnect con4;
disconnect con5;
--echo End of 4.1 tests
#
# Test 5.* features
#
create table t1 (x int);
insert into t1 values (3), (5), (7);
create table t2 (y int);
create user mysqltest1@localhost;
grant all privileges on test.* to mysqltest1@localhost;
#
# Create a simple procedure
#
set global init_connect="create procedure p1() select * from t1";
connect (con1,localhost,mysqltest1,,);
connection con1;
call p1();
drop procedure p1;
connection con0;
disconnect con1;
#
# Create a multi-result set procedure
#
set global init_connect="create procedure p1(x int)\
begin\
select count(*) from t1;\
select * from t1;\
set @x = x;
end";
connect (con1,localhost,mysqltest1,,);
connection con1;
call p1(42);
select @x;
connection con0;
disconnect con1;
#
# Just call it - this will not generate any output
#
set global init_connect="call p1(4711)";
connect (con1,localhost,mysqltest1,,);
connection con1;
select @x;
connection con0;
disconnect con1;
#
# Drop the procedure
#
set global init_connect="drop procedure if exists p1";
connect (con1,localhost,mysqltest1,,);
connection con1;
--error ER_SP_DOES_NOT_EXIST
call p1();
connection con0;
disconnect con1;
#
# Execution of a more complex procedure
#
delimiter |;
create procedure p1(out sum int)
begin
declare n int default 0;
declare c cursor for select * from t1;
declare exit handler for not found
begin
close c;
set sum = n;
end;
open c;
loop
begin
declare x int;
fetch c into x;
if x > 3 then
set n = n + x;
end if;
end;
end loop;
end|
delimiter ;|
# Call the procedure with a cursor
set global init_connect="call p1(@sum)";
connect (con1,localhost,mysqltest1,,);
connection con1;
select @sum;
connection con0;
disconnect con1;
drop procedure p1;
#
# Test Dynamic SQL
#
delimiter |;
create procedure p1(tbl char(10), v int)
begin
set @s = concat('insert into ', tbl, ' values (?)');
set @v = v;
prepare stmt1 from @s;
execute stmt1 using @v;
deallocate prepare stmt1;
end|
delimiter ;|
# Call the procedure with prepared statements
set global init_connect="call p1('t1', 11)";
connect (con1,localhost,mysqltest1,,);
connection con1;
select * from t1;
connection con0;
disconnect con1;
drop procedure p1;
#
# Stored functions
#
delimiter |;
create function f1() returns int
begin
declare n int;
select count(*) into n from t1;
return n;
end|
delimiter ;|
# Invoke a function
set global init_connect="set @x = f1()";
connect (con1,localhost,mysqltest1,,);
connection con1;
select @x;
connection con0;
disconnect con1;
#
# Create a view
#
set global init_connect="create view v1 as select f1()";
connect (con1,localhost,mysqltest1,,);
connection con1;
select * from v1;
connection con0;
disconnect con1;
#
# Drop the view
#
set global init_connect="drop view v1";
connect (con1,localhost,mysqltest1,,);
connection con1;
--error ER_NO_SUCH_TABLE
select * from v1;
connection con0;
disconnect con1;
drop function f1;
# We can't test "create trigger", since this requires super privileges
# in 5.0, but with super privileges, init_connect is not executed.
# (However, this can be tested in 5.1)
#
#set global init_connect="create trigger trg1\
# after insert on t2\
# for each row\
# insert into t1 values (new.y)";
#connect (con1,localhost,mysqltest1,,);
#connection con1;
#insert into t2 values (2), (4);
#select * from t1;
#
#connection con0;
#disconnect con1;
create trigger trg1
after insert on t2
for each row
insert into t1 values (new.y);
# Invoke trigger
set global init_connect="insert into t2 values (13), (17), (19)";
connect (con1,localhost,mysqltest1,,);
connection con1;
select * from t1;
connection con0;
disconnect con1;
drop trigger trg1;
set global init_connect=default;
revoke all privileges, grant option from mysqltest1@localhost;
drop user mysqltest1@localhost;
drop table t1, t2;

View file

@ -6,5 +6,15 @@
# mysql-test/t/init_file-master.opt for the actual test
#
# End of 4.1 tests
echo ok;
--echo ok
--echo end of 4.1 tests
#
# Chec 5.x features
#
# Expected:
# 3, 5, 7, 11, 13
select * from t1;
# Expected:
# 30, 3, 11, 13
select * from t2;
drop table t1, t2;

View file

@ -39,6 +39,7 @@ INSERT INTO t1 VALUES (1,1,23),(2,2,24);
REPLACE INTO t1 (j,k) VALUES (1,42);
REPLACE INTO t1 (i,j) VALUES (17,2);
SELECT * from t1 ORDER BY i;
DROP TABLE t1;
# bug#19906
CREATE TABLE t2 (a INT(11) NOT NULL,
@ -64,4 +65,40 @@ SELECT * FROM t2 ORDER BY id;
DROP TABLE t2;
#
# Bug #20728 "REPLACE does not work correctly for NDB table with PK and
# unique index"
#
--disable_warnings
drop table if exists t1;
--enable_warnings
create table t1 (pk int primary key, apk int unique, data int) engine=ndbcluster;
# Test for plain replace which updates pk
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
replace into t1 (pk, apk) values (4, 1), (5, 2);
select * from t1 order by pk;
delete from t1;
# Another test for plain replace which doesn't touch pk
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
replace into t1 (pk, apk) values (1, 4), (2, 5);
select * from t1 order by pk;
delete from t1;
# Test for load data replace which updates pk
insert into t1 values (1, 1, 1), (4, 4, 4), (6, 6, 6);
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk);
select * from t1 order by pk;
delete from t1;
# Now test for load data replace which doesn't touch pk
insert into t1 values (1, 1, 1), (3, 3, 3), (5, 5, 5);
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (pk, apk);
select * from t1 order by pk;
delete from t1;
# Finally test for both types of replace ... select
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
replace into t1 (pk, apk) select 4, 1;
replace into t1 (pk, apk) select 2, 4;
select * from t1 order by pk;
# Clean-up
drop table t1;
--echo End of 5.0 tests.

View file

@ -0,0 +1,92 @@
# Tests which involve triggers and NDB storage engine
--source include/have_ndb.inc
--source include/not_embedded.inc
#
# Test for bug#18437 "Wrong values inserted with a before update
# trigger on NDB table". SQL-layer didn't properly inform handler
# about fields which were read and set in triggers. In some cases
# this resulted in incorrect (garbage) values of OLD variables and
# lost changes to NEW variables.
# You can find similar tests for ON INSERT triggers in federated.test
# since this engine so far is the only engine in MySQL which cares
# about field mark-up during handler::write_row() operation.
#
--disable_warnings
drop table if exists t1, t2, t3;
--enable_warnings
create table t1 (id int primary key, a int not null, b decimal (63,30) default 0) engine=ndb;
create table t2 (op char(1), a int not null, b decimal (63,30));
create table t3 select 1 as i;
delimiter //;
create trigger t1_bu before update on t1 for each row
begin
insert into t2 values ("u", old.a, old.b);
set new.b = old.b + 10;
end;//
create trigger t1_bd before delete on t1 for each row
begin
insert into t2 values ("d", old.a, old.b);
end;//
delimiter ;//
insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (3, 3, 3.05), (4, 4, 4.05);
# Check that usual update works as it should
update t1 set a=5 where a != 3;
select * from t1 order by id;
select * from t2 order by op, a, b;
delete from t2;
# Check that everything works for multi-update
update t1, t3 set a=6 where a = 5;
select * from t1 order by id;
select * from t2 order by op, a, b;
delete from t2;
# Check for delete
delete from t1 where a != 3;
select * from t1 order by id;
select * from t2 order by op, a, b;
delete from t2;
# Check for multi-delete
insert into t1 values (1, 1, 1.05), (2, 2, 2.05), (4, 4, 4.05);
delete t1 from t1, t3 where a != 3;
select * from t1 order by id;
select * from t2 order by op, a, b;
delete from t2;
# Check for insert ... on duplicate key update
insert into t1 values (4, 4, 4.05);
insert into t1 (id, a) values (4, 1), (3, 1) on duplicate key update a= a + 1;
select * from t1 order by id;
select * from t2 order by op, a, b;
delete from t2;
# Check for insert ... select ... on duplicate key update
delete from t3;
insert into t3 values (4), (3);
insert into t1 (id, a) (select i, 1 from t3) on duplicate key update a= a + 1;
select * from t1 order by id;
select * from t2 order by op, a, b;
delete from t2;
# Check for replace
replace into t1 (id, a) values (4, 1), (3, 1);
select * from t1 order by id;
select * from t2 order by op, a, b;
delete from t1;
delete from t2;
# Check for replace ... select ...
insert into t1 values (3, 1, 1.05), (4, 1, 2.05);
replace into t1 (id, a) (select i, 2 from t3);
select * from t1 order by id;
select * from t2 order by op, a, b;
delete from t1;
delete from t2;
# Check for load data replace
insert into t1 values (3, 1, 1.05), (5, 2, 2.05);
load data infile '../std_data_ln/loaddata5.dat' replace into table t1 fields terminated by '' enclosed by '' ignore 1 lines (id, a);
select * from t1 order by id;
select * from t2 order by op, a, b;
drop tables t1, t2, t3;
--echo End of 5.0 tests

View file

@ -866,3 +866,65 @@ DROP VIEW test2.t3;
DROP TABLE test2.t1, test1.t0;
DROP DATABASE test2;
DROP DATABASE test1;
#
# BUG#20570: CURRENT_USER() in a VIEW with SQL SECURITY DEFINER
# returns invoker name
#
--disable_warnings
DROP VIEW IF EXISTS v1;
DROP VIEW IF EXISTS v2;
DROP VIEW IF EXISTS v3;
DROP FUNCTION IF EXISTS f1;
DROP FUNCTION IF EXISTS f2;
DROP PROCEDURE IF EXISTS p1;
--enable_warnings
CREATE SQL SECURITY DEFINER VIEW v1 AS SELECT CURRENT_USER() AS cu;
CREATE FUNCTION f1() RETURNS VARCHAR(77) SQL SECURITY INVOKER
RETURN CURRENT_USER();
CREATE SQL SECURITY DEFINER VIEW v2 AS SELECT f1() AS cu;
CREATE PROCEDURE p1(OUT cu VARCHAR(77)) SQL SECURITY INVOKER
SET cu= CURRENT_USER();
delimiter |;
CREATE FUNCTION f2() RETURNS VARCHAR(77) SQL SECURITY INVOKER
BEGIN
DECLARE cu VARCHAR(77);
CALL p1(cu);
RETURN cu;
END|
delimiter ;|
CREATE SQL SECURITY DEFINER VIEW v3 AS SELECT f2() AS cu;
CREATE USER mysqltest_u1@localhost;
GRANT ALL ON test.* TO mysqltest_u1@localhost;
connect (conn1, localhost, mysqltest_u1,,);
--echo
--echo The following tests should all return 1.
--echo
SELECT CURRENT_USER() = 'mysqltest_u1@localhost';
SELECT f1() = 'mysqltest_u1@localhost';
CALL p1(@cu);
SELECT @cu = 'mysqltest_u1@localhost';
SELECT f2() = 'mysqltest_u1@localhost';
SELECT cu = 'root@localhost' FROM v1;
SELECT cu = 'root@localhost' FROM v2;
SELECT cu = 'root@localhost' FROM v3;
disconnect conn1;
connection default;
DROP VIEW v3;
DROP FUNCTION f2;
DROP PROCEDURE p1;
DROP FUNCTION f1;
DROP VIEW v2;
DROP VIEW v1;
DROP USER mysqltest_u1@localhost;
# End of 5.0 tests.

View file

@ -3217,20 +3217,11 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
break;
case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/
DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));
if (current_thd->lex->sql_command == SQLCOM_REPLACE && !m_has_unique_index)
{
DBUG_PRINT("info", ("Turning ON use of write instead of insert"));
m_use_write= TRUE;
} else
{
DBUG_PRINT("info", ("Ignoring duplicate key"));
m_ignore_dup_key= TRUE;
}
DBUG_PRINT("info", ("Ignoring duplicate key"));
m_ignore_dup_key= TRUE;
break;
case HA_EXTRA_NO_IGNORE_DUP_KEY:
DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY"));
DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
m_use_write= FALSE;
m_ignore_dup_key= FALSE;
break;
case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those
@ -3260,7 +3251,19 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
DBUG_PRINT("info", ("HA_EXTRA_KEYREAD_PRESERVE_FIELDS"));
break;
case HA_EXTRA_WRITE_CAN_REPLACE:
DBUG_PRINT("info", ("HA_EXTRA_WRITE_CAN_REPLACE"));
if (!m_has_unique_index)
{
DBUG_PRINT("info", ("Turning ON use of write instead of insert"));
m_use_write= TRUE;
}
break;
case HA_EXTRA_WRITE_CANNOT_REPLACE:
DBUG_PRINT("info", ("HA_EXTRA_WRITE_CANNOT_REPLACE"));
DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
m_use_write= FALSE;
break;
}
DBUG_RETURN(0);

View file

@ -5373,9 +5373,14 @@ void Item_insert_value::print(String *str)
void Item_trigger_field::setup_field(THD *thd, TABLE *table,
GRANT_INFO *table_grant_info)
{
/*
There is no sense in marking fields used by trigger with current value
of THD::query_id since it is completely unrelated to the THD::query_id
value for statements which will invoke trigger. So instead we use
Table_triggers_list::mark_fields_used() method which is called during
execution of these statements.
*/
bool save_set_query_id= thd->set_query_id;
/* TODO: Think more about consequences of this step. */
thd->set_query_id= 0;
/*
Try to find field by its name and if it will be found

View file

@ -296,12 +296,6 @@ Item *create_func_pow(Item* a, Item *b)
return new Item_func_pow(a,b);
}
Item *create_func_current_user()
{
current_thd->lex->safe_to_cache_query= 0;
return new Item_func_user(TRUE);
}
Item *create_func_radians(Item *a)
{
return new Item_func_units((char*) "radians",a,M_PI/180,0.0);

View file

@ -73,7 +73,6 @@ Item *create_func_period_add(Item* a, Item *b);
Item *create_func_period_diff(Item* a, Item *b);
Item *create_func_pi(void);
Item *create_func_pow(Item* a, Item *b);
Item *create_func_current_user(void);
Item *create_func_radians(Item *a);
Item *create_func_release_lock(Item* a);
Item *create_func_repeat(Item* a, Item *b);

View file

@ -1670,42 +1670,51 @@ String *Item_func_database::val_str(String *str)
return str;
}
// TODO: make USER() replicate properly (currently it is replicated to "")
String *Item_func_user::val_str(String *str)
/*
TODO: make USER() replicate properly (currently it is replicated to "")
*/
bool Item_func_user::init(const char *user, const char *host)
{
DBUG_ASSERT(fixed == 1);
THD *thd=current_thd;
CHARSET_INFO *cs= system_charset_info;
const char *host, *user;
uint res_length;
if (is_current)
{
user= thd->security_ctx->priv_user;
host= thd->security_ctx->priv_host;
}
else
{
user= thd->main_security_ctx.user;
host= thd->main_security_ctx.host_or_ip;
}
// For system threads (e.g. replication SQL thread) user may be empty
if (!user)
return &my_empty_string;
res_length= (strlen(user)+strlen(host)+2) * cs->mbmaxlen;
if (str->alloc(res_length))
if (user)
{
null_value=1;
return 0;
CHARSET_INFO *cs= str_value.charset();
uint res_length= (strlen(user)+strlen(host)+2) * cs->mbmaxlen;
if (str_value.alloc(res_length))
{
null_value=1;
return TRUE;
}
res_length=cs->cset->snprintf(cs, (char*)str_value.ptr(), res_length,
"%s@%s", user, host);
str_value.length(res_length);
str_value.mark_as_const();
}
res_length=cs->cset->snprintf(cs, (char*)str->ptr(), res_length, "%s@%s",
user, host);
str->length(res_length);
str->set_charset(cs);
return str;
return FALSE;
}
bool Item_func_user::fix_fields(THD *thd, Item **ref)
{
return (Item_func_sysconst::fix_fields(thd, ref) ||
init(thd->main_security_ctx.user,
thd->main_security_ctx.host_or_ip));
}
bool Item_func_current_user::fix_fields(THD *thd, Item **ref)
{
if (Item_func_sysconst::fix_fields(thd, ref))
return TRUE;
Security_context *ctx= (context->security_ctx
? context->security_ctx : thd->security_ctx);
return init(ctx->priv_user, ctx->priv_host);
}

View file

@ -385,21 +385,40 @@ public:
class Item_func_user :public Item_func_sysconst
{
bool is_current;
protected:
bool init (const char *user, const char *host);
public:
Item_func_user(bool is_current_arg)
:Item_func_sysconst(), is_current(is_current_arg) {}
String *val_str(String *);
Item_func_user()
{
str_value.set("", 0, system_charset_info);
}
String *val_str(String *)
{
DBUG_ASSERT(fixed == 1);
return (null_value ? 0 : &str_value);
}
bool fix_fields(THD *thd, Item **ref);
void fix_length_and_dec()
{
max_length= ((USERNAME_LENGTH + HOSTNAME_LENGTH + 1) *
system_charset_info->mbmaxlen);
}
const char *func_name() const
{ return is_current ? "current_user" : "user"; }
const char *fully_qualified_func_name() const
{ return is_current ? "current_user()" : "user()"; }
const char *func_name() const { return "user"; }
const char *fully_qualified_func_name() const { return "user()"; }
};
class Item_func_current_user :public Item_func_user
{
Name_resolution_context *context;
public:
Item_func_current_user(Name_resolution_context *context_arg)
: context(context_arg) {}
bool fix_fields(THD *thd, Item **ref);
const char *func_name() const { return "current_user"; }
const char *fully_qualified_func_name() const { return "current_user()"; }
};

View file

@ -727,9 +727,7 @@ bool mysql_alter_table(THD *thd, char *new_db, char *new_name,
TABLE_LIST *table_list,
List<create_field> &fields,
List<Key> &keys,
uint order_num, ORDER *order,
enum enum_duplicates handle_duplicates,
bool ignore,
uint order_num, ORDER *order, bool ignore,
ALTER_INFO *alter_info, bool do_send_ok);
bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool do_send_ok);
bool mysql_create_like_table(THD *thd, TABLE_LIST *table,
@ -765,6 +763,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields,
bool ignore);
int check_that_all_fields_are_given_values(THD *thd, TABLE *entry,
TABLE_LIST *table_list);
void mark_fields_used_by_triggers_for_insert_stmt(THD *thd, TABLE *table,
enum_duplicates duplic);
bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds);
bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
SQL_LIST *order, ha_rows rows, ulonglong options,

View file

@ -1946,6 +1946,7 @@ void Security_context::init()
{
host= user= priv_user= ip= 0;
host_or_ip= "connecting host";
priv_host[0]= '\0';
#ifndef NO_EMBEDDED_ACCESS_CHECKS
db_access= NO_ACCESS;
#endif

View file

@ -194,6 +194,10 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
deleted=0L;
init_ftfuncs(thd, select_lex, 1);
thd->proc_info="updating";
if (table->triggers)
table->triggers->mark_fields_used(thd, TRG_EVENT_DELETE);
while (!(error=info.read_record(&info)) && !thd->killed &&
!thd->net.report_error)
{
@ -387,7 +391,7 @@ extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b)
bool mysql_multi_delete_prepare(THD *thd)
{
LEX *lex= thd->lex;
TABLE_LIST *aux_tables= (TABLE_LIST *)lex->auxilliary_table_list.first;
TABLE_LIST *aux_tables= (TABLE_LIST *)lex->auxiliary_table_list.first;
TABLE_LIST *target_tbl;
DBUG_ENTER("mysql_multi_delete_prepare");
@ -507,6 +511,8 @@ multi_delete::initialize_tables(JOIN *join)
transactional_tables= 1;
else
normal_tables= 1;
if (tbl->triggers)
tbl->triggers->mark_fields_used(thd, TRG_EVENT_DELETE);
}
else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) &&
walk == delete_tables)

View file

@ -241,6 +241,33 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
}
/*
Mark fields used by triggers for INSERT-like statement.
SYNOPSIS
mark_fields_used_by_triggers_for_insert_stmt()
thd The current thread
table Table to which insert will happen
duplic Type of duplicate handling for insert which will happen
NOTE
For REPLACE there is no sense in marking particular fields
used by ON DELETE trigger as to execute it properly we have
to retrieve and store values for all table columns anyway.
*/
void mark_fields_used_by_triggers_for_insert_stmt(THD *thd, TABLE *table,
enum_duplicates duplic)
{
if (table->triggers)
{
table->triggers->mark_fields_used(thd, TRG_EVENT_INSERT);
if (duplic == DUP_UPDATE)
table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE);
}
}
bool mysql_insert(THD *thd,TABLE_LIST *table_list,
List<Item> &fields,
List<List_item> &values_list,
@ -400,6 +427,17 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
thd->proc_info="update";
if (duplic != DUP_ERROR || ignore)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (duplic == DUP_REPLACE)
{
if (!table->triggers || !table->triggers->has_delete_triggers())
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
/*
REPLACE should change values of all columns so we should mark
all columns as columns to be set. As nice side effect we will
retrieve columns which values are needed for ON DELETE triggers.
*/
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
}
/*
let's *try* to start bulk inserts. It won't necessary
start them as values_list.elements should be greater than
@ -428,6 +466,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
error= 1;
}
mark_fields_used_by_triggers_for_insert_stmt(thd, table, duplic);
if (table_list->prepare_where(thd, 0, TRUE) ||
table_list->prepare_check_option(thd))
error= 1;
@ -598,6 +638,9 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
thd->next_insert_id=0; // Reset this if wrongly used
if (duplic != DUP_ERROR || ignore)
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
if (duplic == DUP_REPLACE &&
(!table->triggers || !table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
/* Reset value of LAST_INSERT_ID if no rows where inserted */
if (!info.copied && thd->insert_id_used)
@ -1879,7 +1922,8 @@ bool delayed_insert::handle_inserts(void)
{
int error;
ulong max_rows;
bool using_ignore=0, using_bin_log=mysql_bin_log.is_open();
bool using_ignore= 0, using_opt_replace= 0;
bool using_bin_log= mysql_bin_log.is_open();
delayed_row *row;
DBUG_ENTER("handle_inserts");
@ -1941,6 +1985,13 @@ bool delayed_insert::handle_inserts(void)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
using_ignore=1;
}
if (info.handle_duplicates == DUP_REPLACE &&
(!table->triggers ||
!table->triggers->has_delete_triggers()))
{
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
using_opt_replace= 1;
}
thd.clear_error(); // reset error for binlog
if (write_record(&thd, table, &info))
{
@ -1953,6 +2004,11 @@ bool delayed_insert::handle_inserts(void)
using_ignore=0;
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
}
if (using_opt_replace)
{
using_opt_replace= 0;
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
}
if (row->query && row->log_query && using_bin_log)
{
Query_log_event qinfo(&thd, row->query, row->query_length, 0, FALSE);
@ -2198,6 +2254,12 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
thd->cuted_fields=0;
if (info.ignore || info.handle_duplicates != DUP_ERROR)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (info.handle_duplicates == DUP_REPLACE)
{
if (!table->triggers || !table->triggers->has_delete_triggers())
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
}
thd->no_trans_update= 0;
thd->abort_on_warning= (!info.ignore &&
(thd->variables.sql_mode &
@ -2207,6 +2269,10 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
check_that_all_fields_are_given_values(thd, table, table_list)) ||
table_list->prepare_where(thd, 0, TRUE) ||
table_list->prepare_check_option(thd));
if (!res)
mark_fields_used_by_triggers_for_insert_stmt(thd, table,
info.handle_duplicates);
DBUG_RETURN(res);
}
@ -2372,6 +2438,7 @@ bool select_insert::send_eof()
error= (!thd->prelocked_mode) ? table->file->end_bulk_insert():0;
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
/*
We must invalidate the table in the query cache before binlog writing
@ -2601,6 +2668,12 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
thd->cuted_fields=0;
if (info.ignore || info.handle_duplicates != DUP_ERROR)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (info.handle_duplicates == DUP_REPLACE)
{
if (!table->triggers || !table->triggers->has_delete_triggers())
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
}
if (!thd->prelocked_mode)
table->file->start_bulk_insert((ha_rows) 0);
thd->no_trans_update= 0;
@ -2640,6 +2713,7 @@ bool select_create::send_eof()
else
{
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
VOID(pthread_mutex_lock(&LOCK_open));
mysql_unlock_tables(thd, lock);
/*
@ -2673,6 +2747,7 @@ void select_create::abort()
if (table)
{
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
enum db_type table_type=table->s->db_type;
if (!table->s->tmp_table)
{

View file

@ -873,7 +873,7 @@ typedef struct st_lex : public Query_tables_list
*/
List<Name_resolution_context> context_stack;
SQL_LIST proc_list, auxilliary_table_list, save_list;
SQL_LIST proc_list, auxiliary_table_list, save_list;
create_field *last_field;
Item_sum *in_sum_func;
udf_func udf;

View file

@ -225,6 +225,8 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
DBUG_RETURN(TRUE);
}
mark_fields_used_by_triggers_for_insert_stmt(thd, table, handle_duplicates);
uint tot_length=0;
bool use_blobs= 0, use_vars= 0;
List_iterator_fast<Item> it(fields_vars);
@ -357,6 +359,13 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
if (ignore ||
handle_duplicates == DUP_REPLACE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (handle_duplicates == DUP_REPLACE)
{
if (!table->triggers ||
!table->triggers->has_delete_triggers())
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
}
if (!thd->prelocked_mode)
table->file->start_bulk_insert((ha_rows) 0);
table->copy_blobs=1;
@ -381,6 +390,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
error= 1;
}
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
table->next_number_field=0;
}
ha_enable_transaction(thd, TRUE);

View file

@ -1250,6 +1250,12 @@ pthread_handler_t handle_bootstrap(void *arg)
thd->version=refresh_version;
thd->security_ctx->priv_user=
thd->security_ctx->user= (char*) my_strdup("boot", MYF(MY_WME));
/*
Make the "client" handle multiple results. This is necessary
to enable stored procedures with SELECTs and Dynamic SQL
in init-file.
*/
thd->client_capabilities|= CLIENT_MULTI_RESULTS;
buff= (char*) thd->net.buff;
thd->init_for_queries();
@ -3083,8 +3089,7 @@ end_with_restore_list:
lex->key_list,
select_lex->order_list.elements,
(ORDER *) select_lex->order_list.first,
lex->duplicates, lex->ignore, &lex->alter_info,
1);
lex->ignore, &lex->alter_info, 1);
}
break;
}
@ -3457,7 +3462,7 @@ end_with_restore_list:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
TABLE_LIST *aux_tables=
(TABLE_LIST *)thd->lex->auxilliary_table_list.first;
(TABLE_LIST *)thd->lex->auxiliary_table_list.first;
multi_delete *result;
if (!thd->locked_tables &&
@ -5773,7 +5778,7 @@ void mysql_init_multi_delete(LEX *lex)
mysql_init_select(lex);
lex->select_lex.select_limit= 0;
lex->unit.select_limit_cnt= HA_POS_ERROR;
lex->select_lex.table_list.save_and_clear(&lex->auxilliary_table_list);
lex->select_lex.table_list.save_and_clear(&lex->auxiliary_table_list);
lex->lock_option= using_update_log ? TL_READ_NO_INSERT : TL_READ;
lex->query_tables= 0;
lex->query_tables_last= &lex->query_tables;
@ -7033,7 +7038,7 @@ bool mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys)
DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->table_name,
&create_info, table_list,
fields, keys, 0, (ORDER*)0,
DUP_ERROR, 0, &alter_info, 1));
0, &alter_info, 1));
}
@ -7051,7 +7056,7 @@ bool mysql_drop_index(THD *thd, TABLE_LIST *table_list, ALTER_INFO *alter_info)
DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->table_name,
&create_info, table_list,
fields, keys, 0, (ORDER*)0,
DUP_ERROR, 0, alter_info, 1));
0, alter_info, 1));
}
@ -7152,7 +7157,7 @@ bool multi_delete_precheck(THD *thd, TABLE_LIST *tables)
{
SELECT_LEX *select_lex= &thd->lex->select_lex;
TABLE_LIST *aux_tables=
(TABLE_LIST *)thd->lex->auxilliary_table_list.first;
(TABLE_LIST *)thd->lex->auxiliary_table_list.first;
TABLE_LIST **save_query_tables_own_last= thd->lex->query_tables_own_last;
DBUG_ENTER("multi_delete_precheck");
@ -7206,7 +7211,7 @@ bool multi_delete_set_locks_and_link_aux_tables(LEX *lex)
lex->table_count= 0;
for (target_tbl= (TABLE_LIST *)lex->auxilliary_table_list.first;
for (target_tbl= (TABLE_LIST *)lex->auxiliary_table_list.first;
target_tbl; target_tbl= target_tbl->next_local)
{
lex->table_count++;

View file

@ -35,9 +35,7 @@ const char *primary_key_name="PRIMARY";
static bool check_if_keyname_exists(const char *name,KEY *start, KEY *end);
static char *make_unique_key_name(const char *field_name,KEY *start,KEY *end);
static int copy_data_between_tables(TABLE *from,TABLE *to,
List<create_field> &create,
enum enum_duplicates handle_duplicates,
bool ignore,
List<create_field> &create, bool ignore,
uint order_num, ORDER *order,
ha_rows *copied,ha_rows *deleted);
static bool prepare_blob_field(THD *thd, create_field *sql_field);
@ -3141,8 +3139,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
HA_CREATE_INFO *create_info,
TABLE_LIST *table_list,
List<create_field> &fields, List<Key> &keys,
uint order_num, ORDER *order,
enum enum_duplicates handle_duplicates, bool ignore,
uint order_num, ORDER *order, bool ignore,
ALTER_INFO *alter_info, bool do_send_ok)
{
TABLE *table,*new_table=0;
@ -3740,8 +3737,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
{
new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
new_table->next_number_field=new_table->found_next_number_field;
error=copy_data_between_tables(table,new_table,create_list,
handle_duplicates, ignore,
error=copy_data_between_tables(table, new_table, create_list, ignore,
order_num, order, &copied, &deleted);
}
thd->last_insert_id=next_insert_id; // Needed for correct log
@ -3964,7 +3960,6 @@ end_temporary:
static int
copy_data_between_tables(TABLE *from,TABLE *to,
List<create_field> &create,
enum enum_duplicates handle_duplicates,
bool ignore,
uint order_num, ORDER *order,
ha_rows *copied,
@ -4067,8 +4062,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
*/
from->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
if (ignore ||
handle_duplicates == DUP_REPLACE)
if (ignore)
to->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
thd->row_count= 0;
restore_record(to, s->default_values); // Create empty record
@ -4095,8 +4089,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
}
if ((error=to->file->write_row((byte*) to->record[0])))
{
if ((!ignore &&
handle_duplicates != DUP_REPLACE) ||
if (!ignore ||
(error != HA_ERR_FOUND_DUPP_KEY &&
error != HA_ERR_FOUND_DUPP_UNIQUE))
{
@ -4174,7 +4167,7 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info,
table_list, lex->create_list,
lex->key_list, 0, (ORDER *) 0,
DUP_ERROR, 0, &lex->alter_info, do_send_ok));
0, &lex->alter_info, do_send_ok));
}

View file

@ -1024,8 +1024,15 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
}
/*
Let us bind Item_trigger_field objects representing access to fields
in old/new versions of row in trigger to Field objects in table being
Gather all Item_trigger_field objects representing access to fields
in old/new versions of row in trigger into lists containing all such
objects for the triggers with same action and timing.
*/
triggers->trigger_fields[lex.trg_chistics.event]
[lex.trg_chistics.action_time]=
(Item_trigger_field *)(lex.trg_table_fields.first);
/*
Also let us bind these objects to Field objects in table being
opened.
We ignore errors here, because if even something is wrong we still
@ -1535,6 +1542,39 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event,
}
/*
Mark fields of subject table which we read/set in its triggers as such.
SYNOPSIS
mark_fields_used()
thd Current thread context
event Type of event triggers for which we are going to inspect
DESCRIPTION
This method marks fields of subject table which are read/set in its
triggers as such (by setting Field::query_id equal to THD::query_id)
and thus informs handler that values for these fields should be
retrieved/stored during execution of statement.
*/
void Table_triggers_list::mark_fields_used(THD *thd, trg_event_type event)
{
int action_time;
Item_trigger_field *trg_field;
for (action_time= 0; action_time < (int)TRG_ACTION_MAX; action_time++)
{
for (trg_field= trigger_fields[event][action_time]; trg_field;
trg_field= trg_field->next_trg_field)
{
/* We cannot mark fields which does not present in table. */
if (trg_field->field_idx != (uint)-1)
table->field[trg_field->field_idx]->query_id = thd->query_id;
}
}
}
/*
Trigger BUG#14090 compatibility hook

View file

@ -25,6 +25,11 @@ class Table_triggers_list: public Sql_alloc
{
/* Triggers as SPs grouped by event, action_time */
sp_head *bodies[TRG_EVENT_MAX][TRG_ACTION_MAX];
/*
Heads of the lists linking items for all fields used in triggers
grouped by event and action_time.
*/
Item_trigger_field *trigger_fields[TRG_EVENT_MAX][TRG_ACTION_MAX];
/*
Copy of TABLE::Field array with field pointers set to TABLE::record[1]
buffer instead of TABLE::record[0] (used for OLD values in on UPDATE
@ -82,6 +87,7 @@ public:
record1_field(0), table(table_arg)
{
bzero((char *)bodies, sizeof(bodies));
bzero((char *)trigger_fields, sizeof(trigger_fields));
bzero((char *)&subject_table_grants, sizeof(subject_table_grants));
}
~Table_triggers_list();
@ -119,6 +125,8 @@ public:
void set_table(TABLE *new_table);
void mark_fields_used(THD *thd, trg_event_type event);
friend class Item_trigger_field;
friend int sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex,
TABLE_LIST *table);

View file

@ -434,13 +434,15 @@ int mysql_update(THD *thd,
(MODE_STRICT_TRANS_TABLES |
MODE_STRICT_ALL_TABLES)));
if (table->triggers)
table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE);
/*
We can use compare_record() to optimize away updates if
the table handler is returning all columns
*/
can_compare_record= !(table->file->table_flags() &
HA_PARTIAL_COLUMN_READ);
while (!(error=info.read_record(&info)) && !thd->killed)
{
if (!(select && select->skip_record()))
@ -763,6 +765,9 @@ reopen_tables:
DBUG_RETURN(TRUE);
}
if (table->triggers)
table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE);
DBUG_PRINT("info",("setting table `%s` for update", tl->alias));
/*
If table will be updated we should not downgrade lock for it and

View file

@ -4453,7 +4453,10 @@ simple_expr:
Lex->safe_to_cache_query=0;
}
| CURRENT_USER optional_braces
{ $$= create_func_current_user(); }
{
$$= new Item_func_current_user(Lex->current_context());
Lex->safe_to_cache_query= 0;
}
| DATE_ADD_INTERVAL '(' expr ',' interval_expr interval ')'
{ $$= new Item_date_add_interval($3,$5,$6,0); }
| DATE_SUB_INTERVAL '(' expr ',' interval_expr interval ')'
@ -4810,7 +4813,7 @@ simple_expr:
| UNIX_TIMESTAMP '(' expr ')'
{ $$= new Item_func_unix_timestamp($3); }
| USER '(' ')'
{ $$= new Item_func_user(FALSE); Lex->safe_to_cache_query=0; }
{ $$= new Item_func_user(); Lex->safe_to_cache_query=0; }
| UTC_DATE_SYM optional_braces
{ $$= new Item_func_curdate_utc(); Lex->safe_to_cache_query=0;}
| UTC_TIME_SYM optional_braces