mariadb/mysql-test/t/sum_distinct-big.test
Igor Babaev 323fdd7ac6 Fixed bug mdev-4311 (bug #68749).
This bug was introduced by the patch for WL#3220.
If the memory allocated for the tree to store unique elements
to be counted is not big enough to include all of them then
an external file is used to store the elements.
The unique elements are guaranteed not to be nulls. So, when 
reading them from the file we don't have to care about the null
flags of the read values. However, we should remove the flag 
at the very beginning of the process. If we don't do it and
if the last value written into the record buffer for the field
whose distinct values needs to be counted happens to be null,
then all values read from the file  are considered to be nulls
and are not counted in.
The fix does not remove a possible null flag for the read values.
Rather it just counts the values in the same way it was done
before WL #3220.
2013-03-27 19:17:32 -07:00

153 lines
4.4 KiB
Text

#
# Various tests for SUM(DISTINCT ...)
#
--source include/big_test.inc
--source include/have_innodb.inc
--disable_warnings
DROP TABLE IF EXISTS t1, t2;
--enable_warnings
set @save_tmp_table_size=@@tmp_table_size;
set @save_max_heap_table_size=@@max_heap_table_size;
set @save_storage_engine=@@storage_engine;
#
# Test the case when distinct values doesn't fit in memory and
# filesort is used (see uniques.cc:merge_walk)
#
set storage_engine=MYISAM;
CREATE TABLE t1 (id INTEGER);
CREATE TABLE t2 (id INTEGER);
INSERT INTO t1 (id) VALUES (1), (1), (1),(1);
INSERT INTO t1 (id) SELECT id FROM t1; /* 8 */
INSERT INTO t1 (id) SELECT id FROM t1; /* 12 */
INSERT INTO t1 (id) SELECT id FROM t1; /* 16 */
INSERT INTO t1 (id) SELECT id FROM t1; /* 20 */
INSERT INTO t1 (id) SELECT id FROM t1; /* 24 */
INSERT INTO t1 SELECT id+1 FROM t1;
INSERT INTO t1 SELECT id+2 FROM t1;
INSERT INTO t1 SELECT id+4 FROM t1;
INSERT INTO t1 SELECT id+8 FROM t1;
INSERT INTO t1 SELECT id+16 FROM t1;
INSERT INTO t1 SELECT id+32 FROM t1;
INSERT INTO t1 SELECT id+64 FROM t1;
INSERT INTO t1 SELECT id+128 FROM t1;
INSERT INTO t1 SELECT id+256 FROM t1;
INSERT INTO t1 SELECT id+512 FROM t1;
# Just test that AVG(DISTINCT) is there
SELECT AVG(DISTINCT id) FROM t1 GROUP BY id % 13;
SELECT SUM(DISTINCT id)/COUNT(DISTINCT id) FROM t1 GROUP BY id % 13;
INSERT INTO t1 SELECT id+1024 FROM t1;
INSERT INTO t1 SELECT id+2048 FROM t1;
INSERT INTO t1 SELECT id+4096 FROM t1;
INSERT INTO t1 SELECT id+8192 FROM t1;
INSERT INTO t2 SELECT id FROM t1 ORDER BY id*rand();
# SELECT '++++++++++++++++++++++++++++++++++++++++++++++++++';
SELECT SUM(DISTINCT id) sm FROM t1;
SELECT SUM(DISTINCT id) sm FROM t2;
SELECT SUM(DISTINCT id) sm FROM t1 group by id % 13;
# this limit for max_heap_table_size is set to force testing the case, when
# all distinct sum values can not fit in memory and must be stored in a
# temporary table
SET max_heap_table_size=16384;
# to check that max_heap_table_size was actually set (hard limit for minimum
# max_heap_table_size is set in mysqld.cc):
SHOW variables LIKE 'max_heap_table_size';
SELECT SUM(DISTINCT id) sm FROM t1;
SELECT SUM(DISTINCT id) sm FROM t2;
SELECT SUM(DISTINCT id) sm FROM t1 GROUP BY id % 13;
--echo #
--echo # Bug mdev-4063: SUM(DISTINCT...) with small'max_heap_table_size
--echo # (bug #56927)
--echo #
SET max_heap_table_size=default;
INSERT INTO t1 SELECT id+16384 FROM t1;
DELETE FROM t2;
INSERT INTO t2 SELECT id FROM t1 ORDER BY id*rand();
SELECT SUM(DISTINCT id) sm FROM t2;
SET max_heap_table_size=16384;
SELECT SUM(DISTINCT id) sm FROM t2;
DROP TABLE t1;
DROP TABLE t2;
SET @@tmp_table_size=@save_tmp_table_size;
SET @@max_heap_table_size=@save_max_heap_table_size;
--echo #
--echo # Bug mdev-4311: COUNT(DISTINCT...) requiring a file for Unique
--echo # (bug #68749)
--echo #
set @save_storage_engine=@@storage_engine;
set storage_engine=INNODB;
CREATE TABLE t1 (id INTEGER) ENGINE=InnoDB;
CREATE TABLE t2 (id INTEGER) ENGINE=InnoDB;
INSERT INTO t1 (id) VALUES (1), (1), (1),(1);
INSERT INTO t1 (id) SELECT id FROM t1;
INSERT INTO t1 (id) SELECT id FROM t1;
INSERT INTO t1 (id) SELECT id FROM t1;
INSERT INTO t1 (id) SELECT id FROM t1;
INSERT INTO t1 (id) SELECT id FROM t1;
INSERT INTO t1 SELECT id+1 FROM t1;
INSERT INTO t1 SELECT id+2 FROM t1;
INSERT INTO t1 SELECT id+4 FROM t1;
INSERT INTO t1 SELECT id+8 FROM t1;
INSERT INTO t1 SELECT id+16 FROM t1;
INSERT INTO t1 SELECT id+32 FROM t1;
INSERT INTO t1 SELECT id+64 FROM t1;
INSERT INTO t1 SELECT id+128 FROM t1;
INSERT INTO t1 SELECT id+256 FROM t1;
INSERT INTO t1 SELECT id+512 FROM t1;
INSERT INTO t1 SELECT id+1024 FROM t1;
INSERT INTO t1 SELECT id+2048 FROM t1;
INSERT INTO t1 SELECT id+4096 FROM t1;
INSERT INTO t1 SELECT id+8192 FROM t1;
INSERT INTO t2 SELECT id FROM t1 ORDER BY id*rand();
INSERT INTO t2 VALUE(NULL);
--echo # With default tmp_table_size / max_heap_table_size
SELECT SQL_NO_CACHE count(DISTINCT id) sm FROM t2;
set @@tmp_table_size=1024*256;
--echo # With reduced tmp_table_size
SELECT SQL_NO_CACHE count(DISTINCT id) sm FROM t2;
set @@tmp_table_size=@save_tmp_table_size;
SET @@max_heap_table_size=1024*256;
--echo # With reduced max_heap_table_size
SELECT SQL_NO_CACHE count(DISTINCT id) sm FROM t2;
SET @@max_heap_table_size=@save_max_heap_table_size;
--echo # Back to default tmp_table_size / max_heap_table_size
SELECT SQL_NO_CACHE count(DISTINCT id) sm FROM t2;
DROP TABLE t1,t2;
set storage_engine=@save_storage_engine;