mariadb/mysql-test/t/multi_update.test

655 lines
20 KiB
Text
Raw Normal View History

2001-06-24 21:11:00 +02:00
#
# Test of update statement that uses many tables.
2001-06-24 21:11:00 +02:00
#
# Requires grants, so won't work with embedded server test
2007-06-19 11:09:22 +02:00
source include/not_embedded.inc;
source include/have_log_bin.inc;
--disable_warnings
drop table if exists t1,t2,t3;
drop database if exists mysqltest;
2004-12-30 23:44:00 +01:00
drop view if exists v1;
--error 0,ER_NONEXISTING_GRANT,ER_NONEXISTING_TABLE_GRANT
revoke all privileges on mysqltest.t1 from mysqltest_1@localhost;
--error 0,ER_NONEXISTING_GRANT,ER_NONEXISTING_TABLE_GRANT
revoke all privileges on mysqltest.* from mysqltest_1@localhost;
delete from mysql.user where user=_binary'mysqltest_1';
--enable_warnings
create table t1(id1 int not null auto_increment primary key, t char(12));
2001-07-01 12:20:53 +02:00
create table t2(id2 int not null, t char(12));
create table t3(id3 int not null, t char(12), index(id3));
disable_query_log;
let $1 = 100;
while ($1)
{
let $2 = 5;
eval insert into t1(t) values ('$1');
while ($2)
{
eval insert into t2(id2,t) values ($1,'$2');
let $3 = 10;
while ($3)
{
eval insert into t3(id3,t) values ($1,'$2');
dec $3;
}
dec $2;
}
dec $1;
}
enable_query_log;
select count(*) from t1 where id1 > 95;
select count(*) from t2 where id2 > 95;
select count(*) from t3 where id3 > 95;
update t1,t2,t3 set t1.t="aaa", t2.t="bbb", t3.t="cc" where t1.id1 = t2.id2 and t2.id2 = t3.id3 and t1.id1 > 90;
select count(*) from t1 where t = "aaa";
select count(*) from t1 where id1 > 90;
select count(*) from t2 where t = "bbb";
select count(*) from t2 where id2 > 90;
select count(*) from t3 where t = "cc";
select count(*) from t3 where id3 > 90;
delete t1.*, t2.*, t3.* from t1,t2,t3 where t1.id1 = t2.id2 and t2.id2 = t3.id3 and t1.id1 > 95;
check table t1, t2, t3;
select count(*) from t1 where id1 > 95;
select count(*) from t2 where id2 > 95;
select count(*) from t3 where id3 > 95;
delete t1, t2, t3 from t1,t2,t3 where t1.id1 = t2.id2 and t2.id2 = t3.id3 and t1.id1 > 5;
select count(*) from t1 where id1 > 5;
select count(*) from t2 where id2 > 5;
select count(*) from t3 where id3 > 5;
2001-06-24 21:11:00 +02:00
delete from t1, t2, t3 using t1,t2,t3 where t1.id1 = t2.id2 and t2.id2 = t3.id3 and t1.id1 > 0;
2001-06-24 21:11:00 +02:00
# These queries will force a scan of the table
select count(*) from t1 where id1;
select count(*) from t2 where id2;
select count(*) from t3 where id3;
drop table t1,t2,t3;
create table t1(id1 int not null primary key, t varchar(100)) pack_keys = 1;
create table t2(id2 int not null, t varchar(100), index(id2)) pack_keys = 1;
disable_query_log;
let $1 = 1000;
while ($1)
{
let $2 = 5;
eval insert into t1 values ($1,'aaaaaaaaaaaaaaaaaaaa');
while ($2)
{
eval insert into t2(id2,t) values ($1,'bbbbbbbbbbbbbbbbb');
dec $2;
}
dec $1;
}
enable_query_log;
delete t1 from t1,t2 where t1.id1 = t2.id2 and t1.id1 > 500;
drop table t1,t2;
CREATE TABLE t1 (
id int(11) NOT NULL default '0',
name varchar(10) default NULL,
PRIMARY KEY (id)
) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1,'aaa'),(2,'aaa'),(3,'aaa');
CREATE TABLE t2 (
id int(11) NOT NULL default '0',
name varchar(10) default NULL,
PRIMARY KEY (id)
) ENGINE=MyISAM;
INSERT INTO t2 VALUES (2,'bbb'),(3,'bbb'),(4,'bbb');
CREATE TABLE t3 (
id int(11) NOT NULL default '0',
mydate datetime default NULL,
PRIMARY KEY (id)
) ENGINE=MyISAM;
INSERT INTO t3 VALUES (1,'2002-02-04 00:00:00'),(3,'2002-05-12 00:00:00'),(5,'2002-05-12 00:00:00'),(6,'2002-06-22
00:00:00'),(7,'2002-07-22 00:00:00');
delete t1,t2,t3 from t1,t2,t3 where to_days(now())-to_days(t3.mydate)>=30 and t3.id=t1.id and t3.id=t2.id;
select * from t3;
DROP TABLE t1,t2,t3;
CREATE TABLE IF NOT EXISTS `t1` (
2002-08-06 20:24:12 +02:00
`id` int(11) NOT NULL auto_increment,
`tst` text,
`tst1` text,
PRIMARY KEY (`id`)
) ENGINE=MyISAM;
2002-08-06 20:24:12 +02:00
CREATE TABLE IF NOT EXISTS `t2` (
2002-08-06 20:24:12 +02:00
`ID` int(11) NOT NULL auto_increment,
`ParId` int(11) default NULL,
`tst` text,
`tst1` text,
PRIMARY KEY (`ID`),
KEY `IX_ParId_t2` (`ParId`),
FOREIGN KEY (`ParId`) REFERENCES `t1` (`id`)
) ENGINE=MyISAM;
2002-08-06 20:24:12 +02:00
INSERT INTO t1(tst,tst1) VALUES("MySQL","MySQL AB"), ("MSSQL","Microsoft"), ("ORACLE","ORACLE");
2002-08-06 20:24:12 +02:00
INSERT INTO t2(ParId) VALUES(1), (2), (3);
2002-08-06 20:24:12 +02:00
select * from t2;
2002-08-06 20:24:12 +02:00
UPDATE t2, t1 SET t2.tst = t1.tst, t2.tst1 = t1.tst1 WHERE t2.ParId = t1.Id;
2002-08-06 20:24:12 +02:00
select * from t2;
drop table t1, t2 ;
2002-10-12 20:36:39 +02:00
create table t1 (n numeric(10));
create table t2 (n numeric(10));
insert into t2 values (1),(2),(4),(8),(16),(32);
select * from t2 left outer join t1 using (n);
delete t1,t2 from t2 left outer join t1 using (n);
select * from t2 left outer join t1 using (n);
drop table t1,t2 ;
#
# Test with locking
#
create table t1 (n int(10) not null primary key, d int(10));
create table t2 (n int(10) not null primary key, d int(10));
insert into t1 values(1,1);
insert into t2 values(1,10),(2,20);
LOCK TABLES t1 write, t2 read;
--error ER_TABLE_NOT_LOCKED_FOR_WRITE
DELETE t1.*, t2.* FROM t1,t2 where t1.n=t2.n;
--error ER_TABLE_NOT_LOCKED_FOR_WRITE
UPDATE t1,t2 SET t1.d=t2.d,t2.d=30 WHERE t1.n=t2.n;
UPDATE t1,t2 SET t1.d=t2.d WHERE t1.n=t2.n;
unlock tables;
LOCK TABLES t1 write, t2 write;
UPDATE t1,t2 SET t1.d=t2.d WHERE t1.n=t2.n;
select * from t1;
DELETE t1.*, t2.* FROM t1,t2 where t1.n=t2.n;
select * from t1;
select * from t2;
unlock tables;
drop table t1,t2;
#
# Test safe updates and timestamps
#
set sql_safe_updates=1;
create table t1 (n int(10), d int(10));
create table t2 (n int(10), d int(10));
insert into t1 values(1,1);
insert into t2 values(1,10),(2,20);
--error ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
UPDATE t1,t2 SET t1.d=t2.d WHERE t1.n=t2.n;
set sql_safe_updates=0;
drop table t1,t2;
set timestamp=1038401397;
create table t1 (n int(10) not null primary key, d int(10), t timestamp);
create table t2 (n int(10) not null primary key, d int(10), t timestamp);
insert into t1 values(1,1,NULL);
insert into t2 values(1,10,NULL),(2,20,NULL);
set timestamp=1038000000;
UPDATE t1,t2 SET t1.d=t2.d WHERE t1.n=t2.n;
select n,d,unix_timestamp(t) from t1;
select n,d,unix_timestamp(t) from t2;
--error ER_PARSE_ERROR
UPDATE t1,t2 SET 1=2 WHERE t1.n=t2.n;
drop table t1,t2;
set timestamp=0;
set sql_safe_updates=0;
create table t1 (n int(10) not null primary key, d int(10));
create table t2 (n int(10) not null primary key, d int(10));
insert into t1 values(1,1), (3,3);
insert into t2 values(1,10),(2,20);
UPDATE t2 left outer join t1 on t1.n=t2.n SET t1.d=t2.d;
select * from t1;
select * from t2;
drop table t1,t2;
create table t1 (n int(10), d int(10));
create table t2 (n int(10), d int(10));
insert into t1 values(1,1),(1,2);
insert into t2 values(1,10),(2,20);
UPDATE t1,t2 SET t1.d=t2.d,t2.d=30 WHERE t1.n=t2.n;
select * from t1;
select * from t2;
drop table t1,t2;
create table t1 (n int(10), d int(10));
create table t2 (n int(10), d int(10));
insert into t1 values(1,1),(3,2);
insert into t2 values(1,10),(1,20);
UPDATE t1,t2 SET t1.d=t2.d,t2.d=30 WHERE t1.n=t2.n;
select * from t1;
select * from t2;
UPDATE t1 a ,t2 b SET a.d=b.d,b.d=30 WHERE a.n=b.n;
select * from t1;
select * from t2;
DELETE a, b FROM t1 a,t2 b where a.n=b.n;
select * from t1;
select * from t2;
drop table t1,t2;
CREATE TABLE t1 ( broj int(4) unsigned NOT NULL default '0', naziv char(25) NOT NULL default 'NEPOZNAT', PRIMARY KEY (broj)) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1,'jedan'),(2,'dva'),(3,'tri'),(4,'xxxxxxxxxx'),(5,'a'),(10,''),(11,''),(12,''),(13,'');
CREATE TABLE t2 ( broj int(4) unsigned NOT NULL default '0', naziv char(25) NOT NULL default 'NEPOZNAT', PRIMARY KEY (broj)) ENGINE=MyISAM;
INSERT INTO t2 VALUES (1,'jedan'),(2,'dva'),(3,'tri'),(4,'xxxxxxxxxx'),(5,'a');
CREATE TABLE t3 ( broj int(4) unsigned NOT NULL default '0', naziv char(25) NOT NULL default 'NEPOZNAT', PRIMARY KEY (broj)) ENGINE=MyISAM;
INSERT INTO t3 VALUES (1,'jedan'),(2,'dva');
update t1,t2 set t1.naziv="aaaa" where t1.broj=t2.broj;
update t1,t2,t3 set t1.naziv="bbbb", t2.naziv="aaaa" where t1.broj=t2.broj and t2.broj=t3.broj;
drop table t1,t2,t3;
#
# Test multi update with different join methods
#
CREATE TABLE t1 (a int not null primary key, b int not null, key (b));
CREATE TABLE t2 (a int not null primary key, b int not null, key (b));
INSERT INTO t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
INSERT INTO t2 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
# Full join, without key
update t1,t2 set t1.a=t1.a+100;
select * from t1;
# unique key
update t1,t2 set t1.a=t1.a+100 where t1.a=101;
select * from t1;
# ref key
update t1,t2 set t1.b=t1.b+10 where t1.b=2;
select * from t1;
# Range key (in t1)
update t1,t2 set t1.b=t1.b+2,t2.b=t1.b+10 where t1.b between 3 and 5 and t2.a=t1.a-100;
select * from t1;
select * from t2;
# test for non-updating table which is also used in sub-select
update t1,t2 set t1.b=t2.b, t1.a=t2.a where t1.a=t2.a and not exists (select * from t2 where t2.a > 10);
drop table t1,t2;
CREATE TABLE t3 ( KEY1 varchar(50) NOT NULL default '', PARAM_CORR_DISTANCE_RUSH double default NULL, PARAM_CORR_DISTANCE_GEM double default NULL, PARAM_AVG_TARE double default NULL, PARAM_AVG_NB_DAYS double default NULL, PARAM_DEFAULT_PROP_GEM_SRVC varchar(50) default NULL, PARAM_DEFAULT_PROP_GEM_NO_ETIK varchar(50) default NULL, PARAM_SCENARIO_COSTS varchar(50) default NULL, PARAM_DEFAULT_WAGON_COST double default NULL, tmp int(11) default NULL, PRIMARY KEY (KEY1)) ENGINE=MyISAM;
INSERT INTO t3 VALUES ('A',1,1,22,3.2,'R','R','BASE2',0.24,NULL);
create table t1 (A varchar(1));
insert into t1 values ("A") ,("B"),("C"),("D");
create table t2(Z varchar(15));
insert into t2(Z) select concat(a.a,b.a,c.a,d.a) from t1 as a, t1 as b, t1 as c, t1 as d;
update t2,t3 set Z =param_scenario_costs;
drop table t1,t2,t3;
create table t1 (a int, b int);
create table t2 (a int, b int);
insert into t1 values (1,1),(2,1),(3,1);
insert into t2 values (1,1), (3,1);
update t1 left join t2 on t1.a=t2.a set t1.b=2, t2.b=2 where t1.b=1 and t2.b=1 or t2.a is NULL;
select t1.a, t1.b,t2.a, t2.b from t1 left join t2 on t1.a=t2.a where t1.b=1 and t2.b=1 or t2.a is NULL;
drop table t1,t2;
2003-11-18 15:04:52 +01:00
#
# Test reuse of same table
#
create table t1 (a int not null auto_increment primary key, b int not null);
insert into t1 (b) values (1),(2),(3),(4);
update t1, t1 as t2 set t1.b=t2.b+1 where t1.a=t2.a;
select * from t1;
drop table t1;
2003-12-13 03:04:38 +01:00
# Test multi-update and multi-delete with impossible where
create table t1(id1 smallint(5), field char(5));
create table t2(id2 smallint(5), field char(5));
insert into t1 values (1, 'a'), (2, 'aa');
insert into t2 values (1, 'b'), (2, 'bb');
select * from t1;
select * from t2;
update t2 inner join t1 on t1.id1=t2.id2
set t2.field=t1.field
where 0=1;
update t2, t1 set t2.field=t1.field
where t1.id1=t2.id2 and 0=1;
delete t1, t2 from t2 inner join t1 on t1.id1=t2.id2
where 0=1;
delete t1, t2 from t2,t1
where t1.id1=t2.id2 and 0=1;
drop table t1,t2;
#
# Test alias (this is not correct in 4.0)
#
CREATE TABLE t1 ( a int );
CREATE TABLE t2 ( a int );
DELETE t1 FROM t1, t2 AS t3;
DELETE t4 FROM t1, t1 AS t4;
DELETE t3 FROM t1 AS t3, t1 AS t4;
--error ER_UNKNOWN_TABLE
DELETE t1 FROM t1 AS t3, t2 AS t4;
INSERT INTO t1 values (1),(2);
INSERT INTO t2 values (1),(2);
DELETE t1 FROM t1 AS t2, t2 AS t1 where t1.a=t2.a and t1.a=1;
SELECT * from t1;
SELECT * from t2;
DELETE t2 FROM t1 AS t2, t2 AS t1 where t1.a=t2.a and t1.a=2;
SELECT * from t1;
SELECT * from t2;
DROP TABLE t1,t2;
#
# Test update with const tables
#
2004-03-16 01:59:39 +01:00
create table `t1` (`p_id` int(10) unsigned NOT NULL auto_increment, `p_code` varchar(20) NOT NULL default '', `p_active` tinyint(1) unsigned NOT NULL default '1', PRIMARY KEY (`p_id`) );
create table `t2` (`c2_id` int(10) unsigned NULL auto_increment, `c2_p_id` int(10) unsigned NOT NULL default '0', `c2_note` text NOT NULL, `c2_active` tinyint(1) unsigned NOT NULL default '1', PRIMARY KEY (`c2_id`), KEY `c2_p_id` (`c2_p_id`) );
insert into t1 values (0,'A01-Comp',1);
insert into t1 values (0,'B01-Comp',1);
insert into t2 values (0,1,'A Note',1);
update t1 left join t2 on p_id = c2_p_id set c2_note = 'asdf-1' where p_id = 2;
select * from t1;
select * from t2;
drop table t1, t2;
2004-04-07 19:07:44 +02:00
#
# privilege check for multiupdate with other tables
#
connect (root,localhost,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK);
connection root;
--disable_warnings
create database mysqltest;
--enable_warnings
create table mysqltest.t1 (a int, b int, primary key (a));
create table mysqltest.t2 (a int, b int, primary key (a));
create table mysqltest.t3 (a int, b int, primary key (a));
grant select on mysqltest.* to mysqltest_1@localhost;
grant update on mysqltest.t1 to mysqltest_1@localhost;
connect (user1,localhost,mysqltest_1,,mysqltest,$MASTER_MYPORT,$MASTER_MYSOCK);
connection user1;
update t1, t2 set t1.b=1 where t1.a=t2.a;
update t1, t2 set t1.b=(select t3.b from t3 where t1.a=t3.a) where t1.a=t2.a;
connection root;
revoke all privileges on mysqltest.t1 from mysqltest_1@localhost;
revoke all privileges on mysqltest.* from mysqltest_1@localhost;
delete from mysql.user where user=_binary'mysqltest_1';
drop database mysqltest;
connection default;
disconnect user1;
disconnect root;
#
# multi delete wrong table check
#
create table t1 (a int, primary key (a));
create table t2 (a int, primary key (a));
create table t3 (a int, primary key (a));
-- error ER_UNKNOWN_TABLE
delete t1,t3 from t1,t2 where t1.a=t2.a and t2.a=(select t3.a from t3 where t1.a=t3.a);
drop table t1, t2, t3;
2004-05-07 18:52:06 +02:00
#
# multi* unique updating table check
#
create table t1 (col1 int);
create table t2 (col1 int);
-- error ER_UPDATE_TABLE_USED
update t1,t2 set t1.col1 = (select max(col1) from t1) where t1.col1 = t2.col1;
-- error ER_UPDATE_TABLE_USED
delete t1 from t1,t2 where t1.col1 < (select max(col1) from t1) and t1.col1 = t2.col1;
drop table t1,t2;
2004-10-29 18:26:52 +02:00
# Test for Bug#5837 delete with outer join and const tables
2004-12-31 11:52:14 +01:00
--disable_warnings
2004-12-11 14:36:12 +01:00
create table t1 (
aclid bigint not null primary key,
status tinyint(1) not null
2004-12-11 14:36:12 +01:00
) engine = innodb;
create table t2 (
refid bigint not null primary key,
aclid bigint, index idx_acl(aclid)
2004-12-11 14:36:12 +01:00
) engine = innodb;
2004-12-31 11:52:14 +01:00
--enable_warnings
insert into t2 values(1,null);
2004-12-11 14:36:12 +01:00
delete t2, t1 from t2 left join t1 on (t2.aclid=t1.aclid) where t2.refid='1';
drop table t1, t2;
#
# Bug#19225 unchecked error leads to server crash
#
create table t1(a int);
create table t2(a int);
--error ER_UPDATE_TABLE_USED
delete from t1,t2 using t1,t2 where t1.a=(select a from t1);
drop table t1, t2;
# End of 4.1 tests
2003-12-02 19:20:51 +01:00
#
# Test for Bug#1980.
2003-12-02 19:20:51 +01:00
#
--disable_warnings
create table t1 ( c char(8) not null ) engine=innodb;
2003-12-02 19:20:51 +01:00
--enable_warnings
insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9');
insert into t1 values ('A'),('B'),('C'),('D'),('E'),('F');
alter table t1 add b char(8) not null;
alter table t1 add a char(8) not null;
alter table t1 add primary key (a,b,c);
update t1 set a=c, b=c;
create table t2 like t1;
insert into t2 select * from t1;
delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b;
drop table t1,t2;
--disable_warnings
create table t1 ( c char(8) not null ) engine=innodb;
2003-12-02 19:20:51 +01:00
--enable_warnings
insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9');
insert into t1 values ('A'),('B'),('C'),('D'),('E'),('F');
alter table t1 add b char(8) not null;
alter table t1 add a char(8) not null;
alter table t1 add primary key (a,b,c);
update t1 set a=c, b=c;
create table t2 like t1;
insert into t2 select * from t1;
delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b;
drop table t1,t2;
2004-11-08 00:54:23 +01:00
#
# Test alter table and a concurrent multi update
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
# (Before we have introduced data-lock-aware metadata locks
# this test case forced update to reopen tables).
#
2004-11-08 00:54:23 +01:00
create table t1 (a int, b int);
insert into t1 values (1, 2), (2, 3), (3, 4);
create table t2 (a int);
insert into t2 values (10), (20), (30);
create view v1 as select a as b, a/10 as a from t2;
connect (locker,localhost,root,,test);
connection locker;
lock table t1 write;
connect (changer,localhost,root,,test);
connection changer;
send alter table t1 add column c int default 100 after a;
connect (updater,localhost,root,,test);
connection updater;
2009-02-09 22:00:15 +01:00
# Wait till "alter table t1 ..." of session changer is in work.
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
# = There is one session waiting.
2009-02-09 22:00:15 +01:00
let $wait_condition= select count(*)= 1 from information_schema.processlist
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
where state= 'Waiting for table';
2009-02-09 22:00:15 +01:00
--source include/wait_condition.inc
2004-11-08 00:54:23 +01:00
send update t1, v1 set t1.b=t1.a+t1.b+v1.b where t1.a=v1.a;
connection locker;
2009-02-09 22:00:15 +01:00
# Wait till
# - "alter table t1 ..." of session changer and
# - "update t1, v1 ..." of session updater
# are in work.
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
# = There are two session waiting.
2009-02-09 22:00:15 +01:00
let $wait_condition= select count(*)= 2 from information_schema.processlist
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
where state= 'Waiting for table';
2009-02-09 22:00:15 +01:00
--source include/wait_condition.inc
2004-11-08 00:54:23 +01:00
unlock tables;
connection changer;
reap;
connection updater;
reap;
select * from t1;
select * from t2;
drop view v1;
drop table t1, t2;
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
connection default;
disconnect locker;
disconnect changer;
disconnect updater;
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
#
# Test multi updates and deletes using primary key and without.
#
create table t1 (i1 int, i2 int, i3 int);
create table t2 (id int, c1 varchar(20), c2 varchar(20));
insert into t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
insert into t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
select * from t1 order by i1;
select * from t2;
update t1,t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
select * from t1 order by i1;
select * from t2 order by id;
delete t1.*,t2.* from t1,t2 where t1.i2=t2.id;
select * from t1 order by i1;
select * from t2 order by id;
drop table t1, t2;
create table t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1));
create table t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id));
insert into t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2);
insert into t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test");
select * from t1 order by i1;
select * from t2 order by id;
update t1,t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id;
select * from t1 order by i1;
select * from t2 order by id;
delete t1.*,t2.* from t1,t2 where t1.i2=t2.id;
select * from t1 order by i1;
select * from t2 order by id;
drop table t1, t2;
#
# Bug#27716 multi-update did partially and has not binlogged
#
CREATE TABLE `t1` (
`a` int(11) NOT NULL auto_increment,
`b` int(11) default NULL,
PRIMARY KEY (`a`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 ;
CREATE TABLE `t2` (
`a` int(11) NOT NULL auto_increment,
`b` int(11) default NULL,
PRIMARY KEY (`a`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 ;
# as the test is about to see erroed queries in binlog
2007-10-13 22:12:50 +02:00
set @sav_binlog_format= @@session.binlog_format;
set @@session.binlog_format= mixed;
# A. testing multi_update::send_error() effective update
insert into t1 values (1,1),(2,2);
insert into t2 values (1,1),(4,4);
reset master;
--error ER_DUP_ENTRY
UPDATE t2,t1 SET t2.a=t1.a+2;
# check
select * from t2 /* must be (3,1), (4,4) */;
show master status /* there must be the UPDATE query event */;
2009-02-09 22:00:15 +01:00
# B. testing multi_update::send_error() ineffective update
# (as there is a policy described at mysql_update() still go to binlog)
delete from t1;
delete from t2;
insert into t1 values (1,2),(3,4),(4,4);
insert into t2 values (1,2),(3,4),(4,4);
reset master;
--error ER_DUP_ENTRY
UPDATE t2,t1 SET t2.a=t2.b where t2.a=t1.a;
show master status /* there must be the UPDATE query event */;
# cleanup
drop table t1, t2;
2007-10-13 22:12:50 +02:00
set @@session.binlog_format= @sav_binlog_format;
#
# Bug#29136 erred multi-delete on trans table does not rollback
#
# prepare
--disable_warnings
drop table if exists t1, t2, t3;
--enable_warnings
CREATE TABLE t1 (a int, PRIMARY KEY (a));
CREATE TABLE t2 (a int, PRIMARY KEY (a));
CREATE TABLE t3 (a int, PRIMARY KEY (a)) ENGINE=MyISAM;
create trigger trg_del_t3 before delete on t3 for each row insert into t1 values (1);
insert into t2 values (1),(2);
insert into t3 values (1),(2);
reset master;
# exec cases B, A - see innodb.test
# B. send_eof() and send_error() afterward
--error ER_DUP_ENTRY
delete t3.* from t2,t3 where t2.a=t3.a;
# check
select count(*) from t1 /* must be 1 */;
select count(*) from t3 /* must be 1 */;
# cleanup
drop table t1, t2, t3;
2007-10-13 22:12:50 +02:00
#
# Add further tests from here
#
--echo #
--echo # Bug#49534: multitable IGNORE update with sql_safe_updates error
--echo # causes debug assertion
--echo #
CREATE TABLE t1( a INT, KEY( a ) );
INSERT INTO t1 VALUES (1), (2), (3);
SET SESSION sql_safe_updates = 1;
--echo # Must not cause failed assertion
--error ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
UPDATE IGNORE t1, t1 t1a SET t1.a = 1 WHERE t1a.a = 1;
DROP TABLE t1;
--echo end of tests