Merge kboortz@bk-internal.mysql.com:/home/bk/mysql-5.0

into mysql.com:/Users/kent/mysql/bk/mysql-5.0
This commit is contained in:
kent@mysql.com 2005-12-03 14:40:24 +01:00
commit 28fa86f45e
53 changed files with 1030 additions and 130 deletions

View file

@ -2152,6 +2152,8 @@ static void dump_table(char *table, char *db)
for (i = 0; i < mysql_num_fields(res); i++)
{
int is_blob;
ulong length= lengths[i];
if (!(field = mysql_fetch_field(res)))
{
my_snprintf(query, QUERY_LENGTH,
@ -2178,7 +2180,6 @@ static void dump_table(char *table, char *db)
field->type == MYSQL_TYPE_TINY_BLOB)) ? 1 : 0;
if (extended_insert)
{
ulong length = lengths[i];
if (i == 0)
dynstr_set(&extended_row,"(");
else
@ -2268,19 +2269,19 @@ static void dump_table(char *table, char *db)
{
print_xml_tag1(md_result_file, "\t\t", "field name=",
field->name, "");
print_quoted_xml(md_result_file, row[i], lengths[i]);
print_quoted_xml(md_result_file, row[i], length);
fputs("</field>\n", md_result_file);
}
else if (opt_hex_blob && is_blob)
else if (opt_hex_blob && is_blob && length)
{
/* sakaik got the idea to to provide blob's in hex notation. */
char *ptr= row[i], *end= ptr+ lengths[i];
char *ptr= row[i], *end= ptr + length;
fputs("0x", md_result_file);
for (; ptr < end ; ptr++)
fprintf(md_result_file, "%02X", *((uchar *)ptr));
}
else
unescape(md_result_file, row[i], lengths[i]);
unescape(md_result_file, row[i], length);
}
else
{

View file

@ -402,7 +402,7 @@ int chk_key(MI_CHECK *param, register MI_INFO *info)
full_text_keys++;
if (share->state.key_root[key] == HA_OFFSET_ERROR &&
(info->state->records == 0 || keyinfo->flag & HA_FULLTEXT))
continue;
goto do_stat;
if (!_mi_fetch_keypage(info,keyinfo,share->state.key_root[key],
DFLT_INIT_HITS,info->buff,0))
{
@ -498,6 +498,7 @@ int chk_key(MI_CHECK *param, register MI_INFO *info)
param->max_level);
all_keydata+=param->keydata; all_totaldata+=param->totaldata; key_totlength+=length;
do_stat:
if (param->testflag & T_STATISTICS)
update_key_parts(keyinfo, rec_per_key_part, param->unique_count,
param->stats_method == MI_STATS_METHOD_IGNORE_NULLS?

View file

@ -30,6 +30,15 @@ check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
drop table t1;
create table t1 (a mediumtext, fulltext key key1(a)) charset utf8 collate utf8_general_ci engine myisam;
insert into t1 values ('hello');
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status Table is already up to date
drop table t1;
CREATE TABLE t1 (a int);
prepare stmt1 from "SELECT * FROM t1 PROCEDURE ANALYSE()";
execute stmt1;

View file

@ -1891,3 +1891,17 @@ t1 CREATE TABLE `t1` (
) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1
drop table t1;
set storage_engine=MyISAM;
create table t1 (a varchar(255) character set utf8,
b varchar(255) character set utf8,
c varchar(255) character set utf8,
d varchar(255) character set utf8,
key (a,b,c,d)) engine=bdb;
drop table t1;
create table t1 (a varchar(255) character set utf8,
b varchar(255) character set utf8,
c varchar(255) character set utf8,
d varchar(255) character set utf8,
e varchar(255) character set utf8,
key (a,b,c,d,e)) engine=bdb;
ERROR 42000: Specified key was too long; max key length is 3072 bytes
End of 5.0 tests

View file

@ -33,3 +33,12 @@ id value
select * from t1 where id <=> value or value<=>id;
id value
drop table t1,t2;
create table t1 (a bigint unsigned);
insert into t1 values (4828532208463511553);
select * from t1 where a = '4828532208463511553';
a
4828532208463511553
select * from t1 where a in ('4828532208463511553');
a
4828532208463511553
drop table t1;

View file

@ -2012,3 +2012,34 @@ explain select distinct f1, f2 from t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL PRIMARY 5 NULL 3 Using index for group-by; Using temporary
drop table t1;
create table t1 (c1 int not null,c2 int not null, primary key(c1,c2));
insert into t1 (c1,c2) values
(10,1),(10,2),(10,3),(20,4),(20,5),(20,6),(30,7),(30,8),(30,9);
select distinct c1, c2 from t1 order by c2;
c1 c2
10 1
10 2
10 3
20 4
20 5
20 6
30 7
30 8
30 9
select c1,min(c2) as c2 from t1 group by c1 order by c2;
c1 c2
10 1
20 4
30 7
select c1,c2 from t1 group by c1,c2 order by c2;
c1 c2
10 1
10 2
10 3
20 4
20 5
20 6
30 7
30 8
30 9
drop table t1;

View file

@ -1048,3 +1048,11 @@ blob 65535 65535
text 65535 65535
text 65535 32767
drop table t1;
create table t1 (f1 int(11));
create view v1 as select * from t1;
drop table t1;
select table_type from information_schema.tables
where table_name="v1";
table_type
VIEW
drop view v1;

View file

@ -2772,3 +2772,17 @@ insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken');
drop table t1;
drop table t2;
commit;
create table t1 (a varchar(255) character set utf8,
b varchar(255) character set utf8,
c varchar(255) character set utf8,
d varchar(255) character set utf8,
key (a,b,c,d)) engine=innodb;
drop table t1;
create table t1 (a varchar(255) character set utf8,
b varchar(255) character set utf8,
c varchar(255) character set utf8,
d varchar(255) character set utf8,
e varchar(255) character set utf8,
key (a,b,c,d,e)) engine=innodb;
ERROR 42000: Specified key was too long; max key length is 3072 bytes
End of 5.0 tests

View file

@ -2550,3 +2550,72 @@ DELIMITER ;
DROP TRIGGER tr1;
DROP TABLE t1;
create table t1 (a binary(1), b blob);
insert into t1 values ('','');
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8 */;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
DROP TABLE IF EXISTS `t1`;
CREATE TABLE `t1` (
`a` binary(1) default NULL,
`b` blob
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
INSERT INTO `t1` VALUES (0x00,'');
UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8 */;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
DROP TABLE IF EXISTS `t1`;
CREATE TABLE `t1` (
`a` binary(1) default NULL,
`b` blob
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
INSERT INTO `t1` VALUES (0x00,'');
UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
drop table t1;

View file

@ -391,3 +391,25 @@ root@localhost
--------------------------------------------------------------------------------
this will be executed
this will be executed
mysqltest: At line 2: query 'create table t1 (a int primary key);
insert into t1 values (1);
select 'select-me';
insertz 'error query'' failed: 1064: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'insertz 'error query'' at line 1
drop table t1;
drop table t1;
create table t1 (a int primary key);
insert into t1 values (1);
select 'select-me';
insertz error query||||
select-me
select-me
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'insertz error query' at line 1
drop table t1;
create table t1 (a int primary key);
insert into t1 values (1);
select 'select-me';
insertz error query||||
select-me
select-me
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'insertz error query' at line 1
drop table t1;

View file

@ -337,7 +337,7 @@ set @precision=10000000000;
select rand(),
cast(rand(10)*@precision as unsigned integer) from t1;
rand() cast(rand(10)*@precision as unsigned integer)
- 6570515219
- 6570515220
- 1282061302
- 6698761160
- 9647622201
@ -348,23 +348,23 @@ prepare stmt from
set @var=1;
execute stmt using @var;
rand() cast(rand(10)*@precision as unsigned integer) cast(rand(?)*@precision as unsigned integer)
- 6570515219 -
- 6570515220 -
- 1282061302 -
- 6698761160 -
- 9647622201 -
set @var=2;
execute stmt using @var;
rand() cast(rand(10)*@precision as unsigned integer) cast(rand(?)*@precision as unsigned integer)
- 6570515219 6555866465
- 1282061302 1223466192
- 6698761160 6449731873
- 6570515220 6555866465
- 1282061302 1223466193
- 6698761160 6449731874
- 9647622201 8578261098
set @var=3;
execute stmt using @var;
rand() cast(rand(10)*@precision as unsigned integer) cast(rand(?)*@precision as unsigned integer)
- 6570515219 9057697559
- 6570515220 9057697560
- 1282061302 3730790581
- 6698761160 1480860534
- 6698761160 1480860535
- 9647622201 6211931236
drop table t1;
deallocate prepare stmt;

View file

@ -1022,6 +1022,31 @@ Variable_name Value
Qcache_hits 1
drop table t1;
create table t1 (a int);
flush status;
(select a from t1) union (select a from t1);
a
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 1
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 1
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
(select a from t1) union (select a from t1);
a
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 1
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 1
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 1
drop table t1;
create table t1 (a int);
insert into t1 values (1),(2);
CREATE PROCEDURE `p1`()
begin

View file

@ -4046,4 +4046,111 @@ boo
2
drop procedure bug14643_1|
drop procedure bug14643_2|
drop procedure if exists bug14304|
drop table if exists t3, t4|
create table t3(a int primary key auto_increment)|
create table t4(a int primary key auto_increment)|
create procedure bug14304()
begin
insert into t3 set a=null;
insert into t4 set a=null;
insert into t4 set a=null;
insert into t4 set a=null;
insert into t4 set a=null;
insert into t4 set a=null;
insert into t4 select null as a;
insert into t3 set a=null;
insert into t3 set a=null;
select * from t3;
end|
call bug14304()|
a
1
2
3
drop procedure bug14304|
drop table t3, t4|
drop procedure if exists bug14376|
create procedure bug14376()
begin
declare x int default x;
end|
call bug14376()|
ERROR 42S22: Unknown column 'x' in 'field list'
drop procedure bug14376|
create procedure bug14376()
begin
declare x int default 42;
begin
declare x int default x;
select x;
end;
end|
call bug14376()|
x
42
drop procedure bug14376|
create procedure bug14376(x int)
begin
declare x int default x;
select x;
end|
call bug14376(4711)|
x
4711
drop procedure bug14376|
drop procedure if exists p1|
Warnings:
Note 1305 PROCEDURE p1 does not exist
drop table if exists t1|
create table t1 (a varchar(255))|
insert into t1 (a) values ("a - table column")|
create procedure p1(a varchar(255))
begin
declare i varchar(255);
declare c cursor for select a from t1;
select a;
select a from t1 into i;
select i as 'Parameter takes precedence over table column'; open c;
fetch c into i;
close c;
select i as 'Parameter takes precedence over table column in cursors';
begin
declare a varchar(255) default 'a - local variable';
declare c1 cursor for select a from t1;
select a as 'A local variable takes precedence over parameter';
open c1;
fetch c1 into i;
close c1;
select i as 'A local variable takes precedence over parameter in cursors';
begin
declare a varchar(255) default 'a - local variable in a nested compound statement';
declare c2 cursor for select a from t1;
select a as 'A local variable in a nested compound statement takes precedence over a local variable in the outer statement';
select a from t1 into i;
select i as 'A local variable in a nested compound statement takes precedence over table column';
open c2;
fetch c2 into i;
close c2;
select i as 'A local variable in a nested compound statement takes precedence over table column in cursors';
end;
end;
end|
call p1("a - stored procedure parameter")|
a
a - stored procedure parameter
Parameter takes precedence over table column
a - stored procedure parameter
Parameter takes precedence over table column in cursors
a - stored procedure parameter
A local variable takes precedence over parameter
a - local variable
A local variable takes precedence over parameter in cursors
a - local variable
A local variable in a nested compound statement takes precedence over a local variable in the outer statement
a - local variable in a nested compound statement
A local variable in a nested compound statement takes precedence over table column
a - local variable in a nested compound statement
A local variable in a nested compound statement takes precedence over table column in cursors
a - local variable in a nested compound statement
drop table t1,t2;

View file

@ -345,3 +345,16 @@ f1
2000-01-01
2002-02-02
drop table t1;
create table t1 (f1 int);
create table t2 (f2 int);
insert into t1 values(1),(2);
insert into t2 values(1),(1);
update t1,t2 set f1=3,f2=3 where f1=f2 and f1=1;
affected rows: 3
info: Rows matched: 3 Changed: 3 Warnings: 0
update t2 set f2=1;
update t1 set f1=1 where f1=3;
update t2,t1 set f1=3,f2=3 where f1=f2 and f1=1;
affected rows: 3
info: Rows matched: 3 Changed: 3 Warnings: 0
drop table t1,t2;

View file

@ -2424,3 +2424,30 @@ f1 sum(f2)
NULL 12
drop view v1;
drop table t1;
drop procedure if exists p1;
create procedure p1 () deterministic
begin
create view v1 as select 1;
end;
//
call p1();
show create view v1;
View Create View
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select 1 AS `1`
drop view v1;
drop procedure p1;
CREATE VIEW v1 AS SELECT 42 AS Meaning;
DROP FUNCTION IF EXISTS f1;
CREATE FUNCTION f1() RETURNS INTEGER
BEGIN
DECLARE retn INTEGER;
SELECT Meaning FROM v1 INTO retn;
RETURN retn;
END
//
CREATE VIEW v2 AS SELECT f1();
select * from v2;
f1()
42
drop view v2,v1;
drop function f1;

View file

@ -132,4 +132,65 @@ unlock tables;
set query_cache_wlock_invalidate=default;
drop view v1;
drop table t1;
flush status;
create table t1 (a int, b int);
create algorithm=temptable view v1 as select * from t1;
select * from v1;
a b
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 1
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 1
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
select * from v1;
a b
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 1
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 1
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 1
insert into t1 values (1,1);
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 1
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 1
select * from v1;
a b
1 1
select * from v1;
a b
1 1
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 1
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 2
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 2
drop view v1;
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 2
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 2
drop table t1;
set GLOBAL query_cache_size=default;

View file

@ -39,6 +39,20 @@ check table t1;
drop table t1;
# Bug #14902 ANALYZE TABLE fails to recognize up-to-date tables
# minimal test case to get an error.
# The problem is happening when analysing table with FT index that
# contains stopwords only. The first execution of analyze table should
# mark index statistics as up to date so that next execution of this
# statement will end up with Table is up to date status.
create table t1 (a mediumtext, fulltext key key1(a)) charset utf8 collate utf8_general_ci engine myisam;
insert into t1 values ('hello');
analyze table t1;
analyze table t1;
drop table t1;
#
# procedure in PS BUG#13673
#

View file

@ -974,3 +974,22 @@ drop table t1;
# End varchar test
eval set storage_engine=$default;
#
# Test that we can create a large key
#
create table t1 (a varchar(255) character set utf8,
b varchar(255) character set utf8,
c varchar(255) character set utf8,
d varchar(255) character set utf8,
key (a,b,c,d)) engine=bdb;
drop table t1;
--error ER_TOO_LONG_KEY
create table t1 (a varchar(255) character set utf8,
b varchar(255) character set utf8,
c varchar(255) character set utf8,
d varchar(255) character set utf8,
e varchar(255) character set utf8,
key (a,b,c,d,e)) engine=bdb;
--echo End of 5.0 tests

View file

@ -17,7 +17,7 @@ while ($1)
SET @rnd= RAND();
SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
SET @id_rev= @rnd_max - @id;
SET @grp= CAST(128.0 * @rnd AS UNSIGNED);
SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
dec $1;
}

View file

@ -34,4 +34,13 @@ select * from t1 where value <=> value;
select * from t1 where id <=> value or value<=>id;
drop table t1,t2;
#
# Bug #12612: quoted bigint unsigned value and the use of "in" in where clause
#
create table t1 (a bigint unsigned);
insert into t1 values (4828532208463511553);
select * from t1 where a = '4828532208463511553';
select * from t1 where a in ('4828532208463511553');
drop table t1;
# End of 4.1 tests

View file

@ -703,3 +703,15 @@ alter table t1 drop primary key, add primary key (f2, f1);
explain select distinct f1 a, f1 b from t1;
explain select distinct f1, f2 from t1;
drop table t1;
#
# Bug #14920 Ordering aggregated result sets with composite primary keys
# corrupts resultset
#
create table t1 (c1 int not null,c2 int not null, primary key(c1,c2));
insert into t1 (c1,c2) values
(10,1),(10,2),(10,3),(20,4),(20,5),(20,6),(30,7),(30,8),(30,9);
select distinct c1, c2 from t1 order by c2;
select c1,min(c2) as c2 from t1 group by c1 order by c2;
select c1,c2 from t1 group by c1,c2 order by c2;
drop table t1;

View file

@ -738,3 +738,13 @@ create table t1(a blob, b text charset utf8, c text charset ucs2);
select data_type, character_octet_length, character_maximum_length
from information_schema.columns where table_name='t1';
drop table t1;
#
# Bug#14476 `information_schema`.`TABLES`.`TABLE_TYPE` with empty value
#
create table t1 (f1 int(11));
create view v1 as select * from t1;
drop table t1;
select table_type from information_schema.tables
where table_name="v1";
drop view v1;

View file

@ -1751,3 +1751,22 @@ insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken');
drop table t1;
drop table t2;
commit;
#
# Test that we can create a large (>1K) key
#
create table t1 (a varchar(255) character set utf8,
b varchar(255) character set utf8,
c varchar(255) character set utf8,
d varchar(255) character set utf8,
key (a,b,c,d)) engine=innodb;
drop table t1;
--error ER_TOO_LONG_KEY
create table t1 (a varchar(255) character set utf8,
b varchar(255) character set utf8,
c varchar(255) character set utf8,
d varchar(255) character set utf8,
e varchar(255) character set utf8,
key (a,b,c,d,e)) engine=innodb;
--echo End of 5.0 tests

View file

@ -605,6 +605,7 @@ select * from t2 order by a;
drop table t1, t2;
drop database db1;
#
# Bug #9558 mysqldump --no-data db t1 t2 format still dumps data
#
@ -644,7 +645,7 @@ select '------ Testing with illegal table names ------' as test_sequence ;
--error 6
--exec $MYSQL_DUMP --compact --skip-comments mysqldump_test_db "\\t1" 2>&1
--error 6
--exec $MYSQL_DUMP --compact --skip-comments mysqldump_test_db "\\\\t1" 2>&1
@ -685,6 +686,7 @@ drop table t1, t2, t3;
drop database mysqldump_test_db;
use test;
#
# Bug #9657 mysqldump xml ( -x ) does not format NULL fields correctly
#
@ -1023,3 +1025,14 @@ SET SQL_MODE = @old_sql_mode;
DROP TRIGGER tr1;
DROP TABLE t1;
#
# Bug #13318: Bad result with empty field and --hex-blob
#
create table t1 (a binary(1), b blob);
insert into t1 values ('','');
--exec $MYSQL_DUMP --skip-comments --skip-extended-insert --hex-blob test t1
--exec $MYSQL_DUMP --skip-comments --hex-blob test t1
drop table t1;
# End of 4.1 tests

View file

@ -945,3 +945,49 @@ select "this will not be executed";
--enable_parsing
select "this will be executed";
--enable_query_log
#
# Bug #11731 mysqltest in multi-statement queries ignores errors in
# non-1st queries
#
# Failing multi statement query
--exec echo "delimiter ||||;" > var/tmp/bug11731.sql
--exec echo "create table t1 (a int primary key);" >> var/tmp/bug11731.sql
--exec echo "insert into t1 values (1);" >> var/tmp/bug11731.sql
--exec echo "select 'select-me';" >> var/tmp/bug11731.sql
--exec echo "insertz 'error query'||||" >> var/tmp/bug11731.sql
--exec echo "delimiter ;||||" >> var/tmp/bug11731.sql
--error 1
--exec $MYSQL_TEST -x $MYSQL_TEST_DIR/var/tmp/bug11731.sql 2>&1
drop table t1;
--error 1
--exec $MYSQL_TEST --record -x $MYSQL_TEST_DIR/var/tmp/bug11731.sql -R $MYSQL_TEST_DIR/var/tmp/bug11731.out
# The .out file should be empty, cat will fail!
--error 1
--exec cat $MYSQL_TEST_DIR/var/tmp/bug11731.out
drop table t1;
# Using expected error
--exec echo "delimiter ||||;" > var/tmp/bug11731.sql
--exec echo "--error 1064" >> var/tmp/bug11731.sql
--exec echo "create table t1 (a int primary key);" >> var/tmp/bug11731.sql
--exec echo "insert into t1 values (1);" >> var/tmp/bug11731.sql
--exec echo "select 'select-me';" >> var/tmp/bug11731.sql
--exec echo "insertz "error query"||||" >> var/tmp/bug11731.sql
--exec echo "delimiter ;||||" >> var/tmp/bug11731.sql
# These two should work since the error is expected
--exec $MYSQL_TEST -x $MYSQL_TEST_DIR/var/tmp/bug11731.sql 2>&1
drop table t1;
--exec $MYSQL_TEST --record -x $MYSQL_TEST_DIR/var/tmp/bug11731.sql -R $MYSQL_TEST_DIR/var/tmp/bug11731.out
--exec cat $MYSQL_TEST_DIR/var/tmp/bug11731.out
drop table t1;

View file

@ -743,6 +743,19 @@ show status like "Qcache_hits";
drop table t1;
#
# BUG#14652: Queries with leading '(' characters.
#
create table t1 (a int);
flush status;
(select a from t1) union (select a from t1);
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
(select a from t1) union (select a from t1);
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
drop table t1;
# SP cursors and selects with query cache (BUG#9715)
#
create table t1 (a int);

View file

@ -4823,6 +4823,127 @@ call bug14643_2()|
drop procedure bug14643_1|
drop procedure bug14643_2|
#
# BUG#14304: auto_increment field incorrect set in SP
#
--disable_warnings
drop procedure if exists bug14304|
drop table if exists t3, t4|
--enable_warnings
create table t3(a int primary key auto_increment)|
create table t4(a int primary key auto_increment)|
create procedure bug14304()
begin
insert into t3 set a=null;
insert into t4 set a=null;
insert into t4 set a=null;
insert into t4 set a=null;
insert into t4 set a=null;
insert into t4 set a=null;
insert into t4 select null as a;
insert into t3 set a=null;
insert into t3 set a=null;
select * from t3;
end|
call bug14304()|
drop procedure bug14304|
drop table t3, t4|
#
# BUG#14376: MySQL crash on scoped variable (re)initialization
#
--disable_warnings
drop procedure if exists bug14376|
--enable_warnings
create procedure bug14376()
begin
declare x int default x;
end|
# Not the error we want, but that's what we got for now...
--error ER_BAD_FIELD_ERROR
call bug14376()|
drop procedure bug14376|
create procedure bug14376()
begin
declare x int default 42;
begin
declare x int default x;
select x;
end;
end|
call bug14376()|
drop procedure bug14376|
create procedure bug14376(x int)
begin
declare x int default x;
select x;
end|
call bug14376(4711)|
drop procedure bug14376|
#
# Bug#5967 "Stored procedure declared variable used instead of column"
# The bug should be fixed later.
# Test precedence of names of parameters, variable declarations,
# variable declarations in nested compound statements, table columns,
# table columns in cursor declarations.
# According to the standard, table columns take precedence over
# variable declarations. In MySQL 5.0 it's vice versa.
#
drop procedure if exists p1|
drop table if exists t1|
create table t1 (a varchar(255))|
insert into t1 (a) values ("a - table column")|
create procedure p1(a varchar(255))
begin
declare i varchar(255);
declare c cursor for select a from t1;
select a;
select a from t1 into i;
select i as 'Parameter takes precedence over table column'; open c;
fetch c into i;
close c;
select i as 'Parameter takes precedence over table column in cursors';
begin
declare a varchar(255) default 'a - local variable';
declare c1 cursor for select a from t1;
select a as 'A local variable takes precedence over parameter';
open c1;
fetch c1 into i;
close c1;
select i as 'A local variable takes precedence over parameter in cursors';
begin
declare a varchar(255) default 'a - local variable in a nested compound statement';
declare c2 cursor for select a from t1;
select a as 'A local variable in a nested compound statement takes precedence over a local variable in the outer statement';
select a from t1 into i;
select i as 'A local variable in a nested compound statement takes precedence over table column';
open c2;
fetch c2 into i;
close c2;
select i as 'A local variable in a nested compound statement takes precedence over table column in cursors';
end;
end;
end|
call p1("a - stored procedure parameter")|
#
# BUG#NNNN: New bug synopsis

View file

@ -270,4 +270,21 @@ insert into t1 values('2000-01-01'),('0000-00-00');
update t1 set f1='2002-02-02' where f1 is null;
select * from t1;
drop table t1;
#
# Bug#15028 Multitable update returns different numbers of matched rows
# depending on table order
create table t1 (f1 int);
create table t2 (f2 int);
insert into t1 values(1),(2);
insert into t2 values(1),(1);
--enable_info
update t1,t2 set f1=3,f2=3 where f1=f2 and f1=1;
--disable_info
update t2 set f2=1;
update t1 set f1=1 where f1=3;
--enable_info
update t2,t1 set f1=3,f2=3 where f1=f2 and f1=1;
--disable_info
drop table t1,t2;
# End of 4.1 tests

View file

@ -2280,3 +2280,43 @@ create view v1 as select * from t1;
select f1, sum(f2) from v1 group by f1;
drop view v1;
drop table t1;
#
# BUG#14885: incorrect SOURCE in view created in a procedure
# TODO: here SOURCE string must be shown when it will be possible
#
--disable_warnings
drop procedure if exists p1;
--enable_warnings
delimiter //;
create procedure p1 () deterministic
begin
create view v1 as select 1;
end;
//
delimiter ;//
call p1();
show create view v1;
drop view v1;
drop procedure p1;
#
# BUG#15096: using function with view for view creation
#
CREATE VIEW v1 AS SELECT 42 AS Meaning;
--disable_warnings
DROP FUNCTION IF EXISTS f1;
--enable_warnings
DELIMITER //;
CREATE FUNCTION f1() RETURNS INTEGER
BEGIN
DECLARE retn INTEGER;
SELECT Meaning FROM v1 INTO retn;
RETURN retn;
END
//
DELIMITER ;//
CREATE VIEW v2 AS SELECT f1();
select * from v2;
drop view v2,v1;
drop function f1;

View file

@ -96,4 +96,35 @@ unlock tables;
set query_cache_wlock_invalidate=default;
drop view v1;
drop table t1;
#
# BUG#15119: returning temptable view from the query cache.
#
flush status;
create table t1 (a int, b int);
create algorithm=temptable view v1 as select * from t1;
select * from v1;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
select * from v1;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
insert into t1 values (1,1);
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
select * from v1;
select * from v1;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
drop view v1;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
drop table t1;
# Reset default environment.
set GLOBAL query_cache_size=default;

View file

@ -886,13 +886,17 @@ Backup::checkNodeFail(Signal* signal,
pos= &ref->nodeId - signal->getDataPtr();
break;
}
case GSN_WAIT_GCP_REQ:
case GSN_DROP_TRIG_REQ:
case GSN_CREATE_TRIG_REQ:
case GSN_ALTER_TRIG_REQ:
case GSN_WAIT_GCP_REQ:
ptr.p->setErrorCode(AbortBackupOrd::BackupFailureDueToNodeFail);
return;
case GSN_UTIL_SEQUENCE_REQ:
case GSN_UTIL_LOCK_REQ:
case GSN_DROP_TRIG_REQ:
return;
default:
ndbrequire(false);
}
for(Uint32 i = 0; (i = mask.find(i+1)) != NdbNodeBitmask::NotFound; )
@ -1903,7 +1907,7 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal)
const Uint32 nodeId = refToNode(signal->senderBlockRef());
const Uint32 noOfBytes = conf->noOfBytes;
const Uint32 noOfRecords = conf->noOfRecords;
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
@ -1980,7 +1984,7 @@ Backup::execBACKUP_FRAGMENT_REF(Signal* signal)
}
}
}
ndbrequire(false);
goto err;
done:
ptr.p->masterData.sendCounter--;
@ -1992,7 +1996,8 @@ done:
masterAbort(signal, ptr);
return;
}//if
err:
AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend();
ord->backupId = ptr.p->backupId;
ord->backupPtr = ptr.i;

View file

@ -3953,7 +3953,7 @@ longlong Field_float::val_int(void)
else
#endif
memcpy_fixed((byte*) &j,ptr,sizeof(j));
return ((longlong) j);
return (longlong) rint(j);
}
@ -4241,7 +4241,7 @@ longlong Field_double::val_int(void)
else
#endif
doubleget(j,ptr);
return ((longlong) j);
return (longlong) rint(j);
}

View file

@ -2459,7 +2459,7 @@ longlong Item_param::val_int()
{
switch (state) {
case REAL_VALUE:
return (longlong) (value.real + (value.real > 0 ? 0.5 : -0.5));
return (longlong) rint(value.real);
case INT_VALUE:
return value.integer;
case DECIMAL_VALUE:
@ -5441,7 +5441,7 @@ void Item_cache_real::store(Item *item)
longlong Item_cache_real::val_int()
{
DBUG_ASSERT(fixed == 1);
return (longlong) (value+(value > 0 ? 0.5 : -0.5));
return (longlong) rint(value);
}

View file

@ -159,7 +159,7 @@ struct Hybrid_type_traits
{ val->real/= ulonglong2double(u); }
virtual longlong val_int(Hybrid_type *val, bool unsigned_flag) const
{ return (longlong) val->real; }
{ return (longlong) rint(val->real); }
virtual double val_real(Hybrid_type *val) const { return val->real; }
virtual my_decimal *val_decimal(Hybrid_type *val, my_decimal *buf) const;
virtual String *val_str(Hybrid_type *val, String *buf, uint8 decimals) const;
@ -1354,7 +1354,7 @@ public:
{
return LONGLONG_MAX;
}
return (longlong) (value+(value > 0 ? 0.5 : -0.5));
return (longlong) rint(value);
}
String *val_str(String*);
my_decimal *val_decimal(my_decimal *);

View file

@ -25,6 +25,8 @@
#include <m_ctype.h>
#include "sql_select.h"
static bool convert_constant_item(THD *thd, Field *field, Item **item);
static Item_result item_store_type(Item_result a,Item_result b)
{
if (a == STRING_RESULT || b == STRING_RESULT)
@ -45,14 +47,37 @@ static void agg_result_type(Item_result *type, Item **items, uint nitems)
type[0]= item_store_type(type[0], items[i]->result_type());
}
static void agg_cmp_type(Item_result *type, Item **items, uint nitems)
static void agg_cmp_type(THD *thd, Item_result *type, Item **items, uint nitems)
{
uint i;
Field *field= NULL;
bool all_constant= TRUE;
/* If the first argument is a FIELD_ITEM, pull out the field. */
if (items[0]->type() == Item::FIELD_ITEM)
field=((Item_field *)items[0])->field;
/* But if it can't be compared as a longlong, we don't really care. */
if (field && !field->can_be_compared_as_longlong())
field= NULL;
type[0]= items[0]->result_type();
for (i=1 ; i < nitems ; i++)
for (i= 1; i < nitems; i++)
{
type[0]= item_cmp_type(type[0], items[i]->result_type());
if (field && !convert_constant_item(thd, field, &items[i]))
all_constant= FALSE;
}
/*
If we had a field that can be compared as a longlong, and all constant
items, then the aggregate result will be an INT_RESULT.
*/
if (field && all_constant)
type[0]= INT_RESULT;
}
static void my_coll_agg_error(DTCollation &c1, DTCollation &c2,
const char *fname)
{
@ -1051,32 +1076,11 @@ void Item_func_between::fix_length_and_dec()
*/
if (!args[0] || !args[1] || !args[2])
return;
agg_cmp_type(&cmp_type, args, 3);
agg_cmp_type(thd, &cmp_type, args, 3);
if (cmp_type == STRING_RESULT &&
agg_arg_charsets(cmp_collation, args, 3, MY_COLL_CMP_CONV))
return;
/*
Make a special ease of compare with date/time and longlong fields.
They are compared as integers, so for const item this time-consuming
conversion can be done only once, not for every single comparison
*/
if (args[0]->type() == FIELD_ITEM)
{
Field *field=((Item_field*) args[0])->field;
if (!thd->is_context_analysis_only() &&
field->can_be_compared_as_longlong())
{
/*
The following can't be recoded with || as convert_constant_item
changes the argument
*/
if (convert_constant_item(thd, field,&args[1]))
cmp_type=INT_RESULT; // Works for all types.
if (convert_constant_item(thd, field,&args[2]))
cmp_type=INT_RESULT; // Works for all types.
}
}
}
@ -1722,6 +1726,7 @@ void Item_func_case::fix_length_and_dec()
{
Item **agg;
uint nagg;
THD *thd= current_thd;
if (!(agg= (Item**) sql_alloc(sizeof(Item*)*(ncases+1))))
return;
@ -1753,7 +1758,7 @@ void Item_func_case::fix_length_and_dec()
for (nagg= 0; nagg < ncases/2 ; nagg++)
agg[nagg+1]= args[nagg*2];
nagg++;
agg_cmp_type(&cmp_type, agg, nagg);
agg_cmp_type(thd, &cmp_type, agg, nagg);
if ((cmp_type == STRING_RESULT) &&
agg_arg_charsets(cmp_collation, agg, nagg, MY_COLL_CMP_CONV))
return;
@ -2346,7 +2351,7 @@ void Item_func_in::fix_length_and_dec()
uint const_itm= 1;
THD *thd= current_thd;
agg_cmp_type(&cmp_type, args, arg_count);
agg_cmp_type(thd, &cmp_type, args, arg_count);
if (cmp_type == STRING_RESULT &&
agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV))

View file

@ -735,7 +735,7 @@ longlong Item_func_numhybrid::val_int()
case INT_RESULT:
return int_op();
case REAL_RESULT:
return (longlong)real_op();
return (longlong) rint(real_op());
case STRING_RESULT:
{
int err_not_used;

View file

@ -199,7 +199,7 @@ public:
String *val_str(String*str);
my_decimal *val_decimal(my_decimal *decimal_value);
longlong val_int()
{ DBUG_ASSERT(fixed == 1); return (longlong) val_real(); }
{ DBUG_ASSERT(fixed == 1); return (longlong) rint(val_real()); }
enum Item_result result_type () const { return REAL_RESULT; }
void fix_length_and_dec()
{ decimals= NOT_FIXED_DEC; max_length= float_length(decimals); }
@ -943,7 +943,7 @@ class Item_func_udf_float :public Item_udf_func
longlong val_int()
{
DBUG_ASSERT(fixed == 1);
return (longlong) Item_func_udf_float::val_real();
return (longlong) rint(Item_func_udf_float::val_real());
}
my_decimal *val_decimal(my_decimal *dec_buf)
{

View file

@ -452,7 +452,7 @@ longlong Item_sum_sum::val_int()
&result);
return result;
}
return (longlong) val_real();
return (longlong) rint(val_real());
}
@ -1285,7 +1285,7 @@ longlong Item_sum_hybrid::val_int()
return sum_int;
}
default:
return (longlong) Item_sum_hybrid::val_real();
return (longlong) rint(Item_sum_hybrid::val_real());
}
}
@ -2001,7 +2001,7 @@ double Item_avg_field::val_real()
longlong Item_avg_field::val_int()
{
return (longlong) val_real();
return (longlong) rint(val_real());
}

View file

@ -126,7 +126,7 @@ public:
longlong val_int()
{
DBUG_ASSERT(fixed == 1);
return (longlong) val_real(); /* Real as default */
return (longlong) rint(val_real()); /* Real as default */
}
String *val_str(String*str);
my_decimal *val_decimal(my_decimal *);
@ -392,7 +392,7 @@ public:
bool add();
double val_real();
// In SPs we might force the "wrong" type with select into a declare variable
longlong val_int() { return (longlong)val_real(); }
longlong val_int() { return (longlong) rint(val_real()); }
my_decimal *val_decimal(my_decimal *);
String *val_str(String *str);
void reset_field();
@ -421,7 +421,7 @@ public:
enum Type type() const {return FIELD_VARIANCE_ITEM; }
double val_real();
longlong val_int()
{ /* can't be fix_fields()ed */ return (longlong) val_real(); }
{ /* can't be fix_fields()ed */ return (longlong) rint(val_real()); }
String *val_str(String*);
my_decimal *val_decimal(my_decimal *);
bool is_null() { (void) val_int(); return null_value; }
@ -699,7 +699,7 @@ class Item_sum_udf_float :public Item_udf_sum
longlong val_int()
{
DBUG_ASSERT(fixed == 1);
return (longlong) Item_sum_udf_float::val_real();
return (longlong) rint(Item_sum_udf_float::val_real());
}
double val_real();
String *val_str(String*str);

View file

@ -954,8 +954,12 @@ int sp_head::execute(THD *thd)
m_first_instance->m_first_free_instance= m_next_cached_sp;
DBUG_PRINT("info", ("first free for 0x%lx ++: 0x%lx->0x%lx, level: %lu, flags %x",
(ulong)m_first_instance, this, m_next_cached_sp,
m_next_cached_sp->m_recursion_level,
m_next_cached_sp->m_flags));
(m_next_cached_sp ?
m_next_cached_sp->m_recursion_level :
0),
(m_next_cached_sp ?
m_next_cached_sp->m_flags :
0)));
/*
Check that if there are not any instances after this one then
pointer to the last instance points on this instance or if there are
@ -1069,7 +1073,7 @@ int sp_head::execute(THD *thd)
}
/* we should cleanup free_list and memroot, used by instruction */
thd->free_items();
thd->cleanup_after_query();
free_root(&execute_mem_root, MYF(0));
/*

View file

@ -52,7 +52,7 @@ sp_cond_check(LEX_STRING *sqlstate)
sp_pcontext::sp_pcontext(sp_pcontext *prev)
: Sql_alloc(), m_psubsize(0), m_csubsize(0), m_hsubsize(0),
m_handlers(0), m_parent(prev)
m_handlers(0), m_parent(prev), m_pboundary(0)
{
VOID(my_init_dynamic_array(&m_pvar, sizeof(sp_pvar_t *), 16, 8));
VOID(my_init_dynamic_array(&m_cond, sizeof(sp_cond_type_t *), 16, 8));
@ -150,7 +150,7 @@ sp_pcontext::diff_cursors(sp_pcontext *ctx)
sp_pvar_t *
sp_pcontext::find_pvar(LEX_STRING *name, my_bool scoped)
{
uint i= m_pvar.elements;
uint i= m_pvar.elements - m_pboundary;
while (i--)
{

View file

@ -174,6 +174,16 @@ class sp_pcontext : public Sql_alloc
sp_pvar_t *
find_pvar(uint offset);
/*
Set the current scope boundary (for default values)
The argument is the number of variables to skip.
*/
inline void
declare_var_boundary(uint n)
{
m_pboundary= n;
}
//
// Labels
//
@ -282,6 +292,13 @@ private:
uint m_poffset; // Variable offset for this context
uint m_coffset; // Cursor offset for this context
/*
Boundary for finding variables in this context. This is the number
of variables currently "invisible" to default clauses.
This is normally 0, but will be larger during parsing of
DECLARE ... DEFAULT, to get the scope right for DEFAULT values.
*/
uint m_pboundary;
DYNAMIC_ARRAY m_pvar; // Parameters/variables
DYNAMIC_ARRAY m_cond; // Conditions

View file

@ -977,21 +977,31 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
goto err;
}
/*
Test if the query is a SELECT
(pre-space is removed in dispatch_command).
First '/' looks like comment before command it is not
frequently appeared in real lihe, consequently we can
check all such queries, too.
*/
if ((my_toupper(system_charset_info, sql[0]) != 'S' ||
my_toupper(system_charset_info, sql[1]) != 'E' ||
my_toupper(system_charset_info,sql[2]) !='L') &&
sql[0] != '/')
{
DBUG_PRINT("qcache", ("The statement is not a SELECT; Not cached"));
goto err;
uint i= 0;
/*
Skip '(' characters in queries like following:
(select a from t1) union (select a from t1);
*/
while (sql[i]=='(')
i++;
/*
Test if the query is a SELECT
(pre-space is removed in dispatch_command)
First '/' looks like comment before command it is not
frequently appeared in real lihe, consequently we can
check all such queries, too.
*/
if ((my_toupper(system_charset_info, sql[i]) != 'S' ||
my_toupper(system_charset_info, sql[i + 1]) != 'E' ||
my_toupper(system_charset_info, sql[i + 2]) != 'L') &&
sql[i] != '/')
{
DBUG_PRINT("qcache", ("The statement is not a SELECT; Not cached"));
goto err;
}
}
STRUCT_LOCK(&structure_guard_mutex);
@ -2184,7 +2194,7 @@ Query_cache::register_tables_from_list(TABLE_LIST *tables_used,
tables_used;
tables_used= tables_used->next_global, n++, block_table++)
{
if (tables_used->derived)
if (tables_used->derived && !tables_used->view)
{
DBUG_PRINT("qcache", ("derived table skipped"));
n--;

View file

@ -1815,6 +1815,7 @@ void TMP_TABLE_PARAM::init()
group_parts= group_length= group_null_parts= 0;
quick_group= 1;
table_charset= 0;
precomputed_group_by= 0;
}
@ -1948,6 +1949,7 @@ void THD::reset_sub_statement_state(Sub_statement_state *backup,
backup->last_insert_id= last_insert_id;
backup->next_insert_id= next_insert_id;
backup->insert_id_used= insert_id_used;
backup->clear_next_insert_id= clear_next_insert_id;
backup->limit_found_rows= limit_found_rows;
backup->examined_row_count= examined_row_count;
backup->sent_row_count= sent_row_count;
@ -1999,6 +2001,7 @@ void THD::restore_sub_statement_state(Sub_statement_state *backup)
last_insert_id= backup->last_insert_id;
next_insert_id= backup->next_insert_id;
insert_id_used= backup->insert_id_used;
clear_next_insert_id= backup->clear_next_insert_id;
limit_found_rows= backup->limit_found_rows;
sent_row_count= backup->sent_row_count;
client_capabilities= backup->client_capabilities;

View file

@ -1090,7 +1090,7 @@ public:
ha_rows cuted_fields, sent_row_count, examined_row_count;
ulong client_capabilities;
uint in_sub_stmt;
bool enable_slow_log, insert_id_used;
bool enable_slow_log, insert_id_used, clear_next_insert_id;
my_bool no_send_ok;
SAVEPOINT *savepoints;
};
@ -1822,11 +1822,18 @@ public:
uint convert_blob_length;
CHARSET_INFO *table_charset;
bool schema_table;
/*
True if GROUP BY and its aggregate functions are already computed
by a table access method (e.g. by loose index scan). In this case
query execution should not perform aggregation and should treat
aggregate functions as normal functions.
*/
bool precomputed_group_by;
TMP_TABLE_PARAM()
:copy_field(0), group_parts(0),
group_length(0), group_null_parts(0), convert_blob_length(0),
schema_table(0)
schema_table(0), precomputed_group_by(0)
{}
~TMP_TABLE_PARAM()
{

View file

@ -738,8 +738,8 @@ typedef struct st_lex
TABLE_LIST **query_tables_last;
/* store original leaf_tables for INSERT SELECT and PS/SP */
TABLE_LIST *leaf_tables_insert;
char *create_view_start;
char *create_view_select_start;
/* Position (first character index) of SELECT of CREATE VIEW statement */
uint create_view_select_start;
/*
The definer of the object being created (view, trigger, stored routine).

View file

@ -1007,6 +1007,20 @@ JOIN::optimize()
}
having= 0;
/*
The loose index scan access method guarantees that all grouping or
duplicate row elimination (for distinct) is already performed
during data retrieval, and that all MIN/MAX functions are already
computed for each group. Thus all MIN/MAX functions should be
treated as regular functions, and there is no need to perform
grouping in the main execution loop.
Notice that currently loose index scan is applicable only for
single table queries, thus it is sufficient to test only the first
join_tab element of the plan for its access method.
*/
if (join_tab->is_using_loose_index_scan())
tmp_table_param.precomputed_group_by= TRUE;
/* Create a tmp table if distinct or if the sort is too complicated */
if (need_tmp)
{
@ -1410,6 +1424,15 @@ JOIN::exec()
else
{
/* group data to new table */
/*
If the access method is loose index scan then all MIN/MAX
functions are precomputed, and should be treated as regular
functions. See extended comment in JOIN::exec.
*/
if (curr_join->join_tab->is_using_loose_index_scan())
curr_join->tmp_table_param.precomputed_group_by= TRUE;
if (!(curr_tmp_table=
exec_tmp_table2= create_tmp_table(thd,
&curr_join->tmp_table_param,
@ -8279,6 +8302,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
MEM_ROOT *mem_root_save, own_root;
TABLE *table;
uint i,field_count,null_count,null_pack_length;
uint copy_func_count= param->func_count;
uint hidden_null_count, hidden_null_pack_length, hidden_field_count;
uint blob_count,group_null_items, string_count;
uint temp_pool_slot=MY_BIT_NONE;
@ -8342,6 +8366,16 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
field_count=param->field_count+param->func_count+param->sum_func_count;
hidden_field_count=param->hidden_field_count;
/*
When loose index scan is employed as access method, it already
computes all groups and the result of all aggregate functions. We
make space for the items of the aggregate function in the list of
functions TMP_TABLE_PARAM::items_to_copy, so that the values of
these items are stored in the temporary table.
*/
if (param->precomputed_group_by)
copy_func_count+= param->sum_func_count;
init_sql_alloc(&own_root, TABLE_ALLOC_BLOCK_SIZE, 0);
if (!multi_alloc_root(&own_root,
@ -8349,7 +8383,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
&reg_field, sizeof(Field*) * (field_count+1),
&blob_field, sizeof(uint)*(field_count+1),
&from_field, sizeof(Field*)*field_count,
&copy_func, sizeof(*copy_func)*(param->func_count+1),
&copy_func, sizeof(*copy_func)*(copy_func_count+1),
&param->keyinfo, sizeof(*param->keyinfo),
&key_part_info,
sizeof(*key_part_info)*(param->group_parts+1),
@ -9241,11 +9275,13 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
Next_select_func setup_end_select_func(JOIN *join)
{
TABLE *table= join->tmp_table;
TMP_TABLE_PARAM *tmp_tbl= &join->tmp_table_param;
Next_select_func end_select;
/* Set up select_end */
if (table)
{
if (table->group && join->tmp_table_param.sum_func_count)
if (table->group && tmp_tbl->sum_func_count)
{
if (table->s->keys)
{
@ -9258,7 +9294,7 @@ Next_select_func setup_end_select_func(JOIN *join)
end_select=end_unique_update;
}
}
else if (join->sort_and_group)
else if (join->sort_and_group && !tmp_tbl->precomputed_group_by)
{
DBUG_PRINT("info",("Using end_write_group"));
end_select=end_write_group;
@ -9267,19 +9303,27 @@ Next_select_func setup_end_select_func(JOIN *join)
{
DBUG_PRINT("info",("Using end_write"));
end_select=end_write;
if (tmp_tbl->precomputed_group_by)
{
/*
A preceding call to create_tmp_table in the case when loose
index scan is used guarantees that
TMP_TABLE_PARAM::items_to_copy has enough space for the group
by functions. It is OK here to use memcpy since we copy
Item_sum pointers into an array of Item pointers.
*/
memcpy(tmp_tbl->items_to_copy + tmp_tbl->func_count,
join->sum_funcs,
sizeof(Item*)*tmp_tbl->sum_func_count);
tmp_tbl->items_to_copy[tmp_tbl->func_count+tmp_tbl->sum_func_count]= 0;
}
}
}
else
{
/* Test if data is accessed via QUICK_GROUP_MIN_MAX_SELECT. */
bool is_using_quick_group_min_max_select=
(join->join_tab->select && join->join_tab->select->quick &&
(join->join_tab->select->quick->get_type() ==
QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX));
if ((join->sort_and_group ||
(join->procedure && join->procedure->flags & PROC_GROUP)) &&
!is_using_quick_group_min_max_select)
!tmp_tbl->precomputed_group_by)
end_select= end_send_group;
else
end_select= end_send;
@ -10553,7 +10597,6 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
copy_fields(&join->tmp_table_param);
copy_funcs(join->tmp_table_param.items_to_copy);
#ifdef TO_BE_DELETED
if (!table->uniques) // If not unique handling
{

View file

@ -140,6 +140,12 @@ typedef struct st_join_table {
nested_join_map embedding_map;
void cleanup();
inline bool is_using_loose_index_scan()
{
return (select && select->quick &&
(select->quick->get_type() ==
QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX));
}
} JOIN_TAB;
enum_nested_loop_state sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool

View file

@ -2315,6 +2315,12 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables,
there was errors during opening tables
*/
const char *error= thd->net.last_error;
if (tables->view)
table->field[3]->store(STRING_WITH_LEN("VIEW"), cs);
else if (tables->schema_table)
table->field[3]->store(STRING_WITH_LEN("SYSTEM VIEW"), cs);
else
table->field[3]->store(STRING_WITH_LEN("BASE TABLE"), cs);
table->field[20]->store(error, strlen(error), cs);
thd->clear_error();
}

View file

@ -1290,22 +1290,23 @@ bool multi_update::send_data(List<Item> &not_used_values)
int error;
TABLE *tmp_table= tmp_tables[offset];
fill_record(thd, tmp_table->field+1, *values_for_table[offset], 1);
found++;
/* Store pointer to row */
memcpy((char*) tmp_table->field[0]->ptr,
(char*) table->file->ref, table->file->ref_length);
/* Write row, ignoring duplicated updates to a row */
if ((error= tmp_table->file->write_row(tmp_table->record[0])) &&
(error != HA_ERR_FOUND_DUPP_KEY &&
error != HA_ERR_FOUND_DUPP_UNIQUE))
if (error= tmp_table->file->write_row(tmp_table->record[0]))
{
if (create_myisam_from_heap(thd, tmp_table, tmp_table_param + offset,
error, 1))
if (error != HA_ERR_FOUND_DUPP_KEY &&
error != HA_ERR_FOUND_DUPP_UNIQUE &&
create_myisam_from_heap(thd, tmp_table,
tmp_table_param + offset, error, 1))
{
do_update=0;
DBUG_RETURN(1); // Not a table_is_full error
}
}
else
found++;
}
}
DBUG_RETURN(0);

View file

@ -350,15 +350,6 @@ bool mysql_create_view(THD *thd,
*/
for (tbl= lex->query_tables; tbl; tbl= tbl->next_global)
{
/* is this table temporary and is not view? */
if (tbl->table->s->tmp_table != NO_TMP_TABLE && !tbl->view &&
!tbl->schema_table)
{
my_error(ER_VIEW_SELECT_TMPTABLE, MYF(0), tbl->alias);
res= TRUE;
goto err;
}
/* is this table view and the same view which we creates now? */
if (tbl->view &&
strcmp(tbl->view_db.str, view->db) == 0 &&
@ -370,11 +361,29 @@ bool mysql_create_view(THD *thd,
}
/*
Copy the privileges of the underlying VIEWs which were filled by
fill_effective_table_privileges
(they were not copied at derived tables processing)
tbl->table can be NULL when tbl is a placeholder for a view
that is indirectly referenced via a stored function from the
view being created. We don't check these indirectly
referenced views in CREATE VIEW so they don't have table
object.
*/
tbl->table->grant.privilege= tbl->grant.privilege;
if (tbl->table)
{
/* is this table temporary and is not view? */
if (tbl->table->s->tmp_table != NO_TMP_TABLE && !tbl->view &&
!tbl->schema_table)
{
my_error(ER_VIEW_SELECT_TMPTABLE, MYF(0), tbl->alias);
res= TRUE;
goto err;
}
/*
Copy the privileges of the underlying VIEWs which were filled by
fill_effective_table_privileges
(they were not copied at derived tables processing)
*/
tbl->table->grant.privilege= tbl->grant.privilege;
}
}
/* prepare select to resolve all fields */
@ -641,10 +650,9 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
/* fill structure */
view->query.str= (char*)str.ptr();
view->query.length= str.length()-1; // we do not need last \0
view->source.str= thd->lex->create_view_select_start;
view->source.str= thd->query + thd->lex->create_view_select_start;
view->source.length= (thd->query_length -
(thd->lex->create_view_select_start -
thd->lex->create_view_start));
thd->lex->create_view_select_start);
view->file_version= 1;
view->calc_md5(md5);
view->md5.str= md5;

View file

@ -1592,7 +1592,12 @@ sp_decls:
sp_decl:
DECLARE_SYM sp_decl_idents type
{ Lex->sphead->reset_lex(YYTHD); }
{
LEX *lex= Lex;
lex->sphead->reset_lex(YYTHD);
lex->spcont->declare_var_boundary($2);
}
sp_opt_default
{
LEX *lex= Lex;
@ -1623,6 +1628,7 @@ sp_decl:
lex->sphead->add_instr(in);
ctx->set_default(off, it);
}
ctx->declare_var_boundary(0);
lex->sphead->restore_lex(YYTHD);
$$.vars= $2;
$$.conds= $$.hndlrs= $$.curs= 0;
@ -3374,7 +3380,6 @@ alter:
THD *thd= YYTHD;
LEX *lex= thd->lex;
lex->sql_command= SQLCOM_CREATE_VIEW;
lex->create_view_start= thd->query;
lex->create_view_mode= VIEW_ALTER;
/* first table in list is target VIEW name */
lex->select_lex.add_table_to_list(thd, $6, NULL, 0);
@ -8978,7 +8983,6 @@ view_tail:
THD *thd= YYTHD;
LEX *lex= thd->lex;
lex->sql_command= SQLCOM_CREATE_VIEW;
lex->create_view_start= thd->query;
/* first table in list is target VIEW name */
if (!lex->select_lex.add_table_to_list(thd, $3, NULL, 0))
YYABORT;
@ -9009,11 +9013,21 @@ view_list:
view_select:
SELECT_SYM remember_name select_init2
{
Lex->create_view_select_start= $2;
THD *thd=YYTHD;
LEX *lex= thd->lex;
char *stmt_beg= (lex->sphead ?
(char *)lex->sphead->m_tmp_query :
thd->query);
lex->create_view_select_start= $2 - stmt_beg;
}
| '(' remember_name select_paren ')' union_opt
{
Lex->create_view_select_start= $2;
THD *thd=YYTHD;
LEX *lex= thd->lex;
char *stmt_beg= (lex->sphead ?
(char *)lex->sphead->m_tmp_query :
thd->query);
lex->create_view_select_start= $2 - stmt_beg;
}
;

View file

@ -50,11 +50,7 @@
#define MAX_SYS_VAR_LENGTH 32
#define MAX_KEY 64 /* Max used keys */
#define MAX_REF_PARTS 16 /* Max parts used as ref */
#if SIZEOF_CHARP > 4
#define MAX_KEY_LENGTH 3072 /* max possible key, if 64 bits */
#else
#define MAX_KEY_LENGTH 1024 /* max possible key, if 32 bits */
#endif
#define MAX_KEY_LENGTH 3072 /* max possible key */
#if SIZEOF_OFF_T > 4
#define MAX_REFLENGTH 8 /* Max length for record ref */
#else