mirror of
https://github.com/MariaDB/server.git
synced 2025-01-29 02:05:57 +01:00
Better comments and add a test case.
This commit is contained in:
parent
8ff66501ca
commit
e5802c38f9
4 changed files with 106 additions and 12 deletions
4
mysql-test/suite/innodb/r/innodb-stats-sample.result
Normal file
4
mysql-test/suite/innodb/r/innodb-stats-sample.result
Normal file
|
@ -0,0 +1,4 @@
|
|||
Variable_name Value
|
||||
innodb_stats_sample_pages 1
|
||||
Variable_name Value
|
||||
innodb_stats_traditional OFF
|
78
mysql-test/suite/innodb/t/innodb-stats-sample.test
Normal file
78
mysql-test/suite/innodb/t/innodb-stats-sample.test
Normal file
|
@ -0,0 +1,78 @@
|
|||
--source include/have_innodb.inc
|
||||
#
|
||||
# Test that mysqld does not crash when running ANALYZE TABLE with
|
||||
# different values of the parameter innodb_stats_sample_pages.
|
||||
#
|
||||
|
||||
# we care only that the following SQL commands do not produce errors
|
||||
# and do not crash the server
|
||||
-- disable_query_log
|
||||
-- disable_result_log
|
||||
-- enable_warnings
|
||||
|
||||
let $sample_pages=`select @@innodb_stats_sample_pages`;
|
||||
let $traditional=`select @@innodb_stats_traditional`;
|
||||
SET GLOBAL innodb_stats_sample_pages=0;
|
||||
#use new method to calculate statistics
|
||||
SET GLOBAL innodb_stats_traditional=0;
|
||||
|
||||
# check that the value has been adjusted to 1
|
||||
-- enable_result_log
|
||||
SHOW VARIABLES LIKE 'innodb_stats_sample_pages';
|
||||
SHOW VARIABLES LIKE 'innodb_stats_traditional';
|
||||
-- disable_result_log
|
||||
|
||||
CREATE TABLE innodb_analyze (
|
||||
a INT,
|
||||
b INT,
|
||||
c char(50),
|
||||
KEY(a),
|
||||
KEY(b,a)
|
||||
) ENGINE=InnoDB;
|
||||
|
||||
# test with empty table
|
||||
ANALYZE TABLE innodb_analyze;
|
||||
|
||||
SET GLOBAL innodb_stats_sample_pages=2;
|
||||
ANALYZE TABLE innodb_analyze;
|
||||
|
||||
SET GLOBAL innodb_stats_sample_pages=1;
|
||||
ANALYZE TABLE innodb_analyze;
|
||||
|
||||
SET GLOBAL innodb_stats_sample_pages=8000;
|
||||
ANALYZE TABLE innodb_analyze;
|
||||
|
||||
delimiter //;
|
||||
create procedure innodb_insert_proc (repeat_count int)
|
||||
begin
|
||||
declare current_num int;
|
||||
set current_num = 0;
|
||||
while current_num < repeat_count do
|
||||
insert into innodb_analyze values(current_num, current_num*100,substring(MD5(RAND()), -44));
|
||||
set current_num = current_num + 1;
|
||||
end while;
|
||||
end//
|
||||
delimiter ;//
|
||||
commit;
|
||||
|
||||
set autocommit=0;
|
||||
call innodb_insert_proc(7000);
|
||||
commit;
|
||||
set autocommit=1;
|
||||
|
||||
SET GLOBAL innodb_stats_sample_pages=1;
|
||||
ANALYZE TABLE innodb_analyze;
|
||||
|
||||
SET GLOBAL innodb_stats_sample_pages=8;
|
||||
ANALYZE TABLE innodb_analyze;
|
||||
|
||||
SET GLOBAL innodb_stats_sample_pages=16;
|
||||
ANALYZE TABLE innodb_analyze;
|
||||
|
||||
SET GLOBAL innodb_stats_sample_pages=8000;
|
||||
ANALYZE TABLE innodb_analyze;
|
||||
|
||||
DROP PROCEDURE innodb_insert_proc;
|
||||
DROP TABLE innodb_analyze;
|
||||
EVAL SET GLOBAL innodb_stats_sample_pages=$sample_pages;
|
||||
EVAL SET GLOBAL innodb_stats_traditional=$traditional;
|
|
@ -3777,18 +3777,24 @@ btr_estimate_number_of_different_key_vals(
|
|||
n_sample_pages = srv_stats_sample_pages;
|
||||
}
|
||||
} else {
|
||||
/* New logaritmic number of pages that are estimated. We
|
||||
first pick minimun from srv_stats_sample_pages and number of
|
||||
pages on index. Then we pick maximum from previous number of
|
||||
pages and log2(number of index pages) * srv_stats_sample_pages. */
|
||||
/* New logaritmic number of pages that are estimated.
|
||||
Number of pages estimated should be between 1 and
|
||||
index->stat_index_size. We pick index->stat_index_size
|
||||
as maximum and log2(index->stat_index_size)*sr_stats_sample_pages
|
||||
if between range as minimum.*/
|
||||
if (index->stat_index_size > 0) {
|
||||
n_sample_pages = ut_max(ut_min(srv_stats_sample_pages, index->stat_index_size),
|
||||
log2(index->stat_index_size)*srv_stats_sample_pages);
|
||||
n_sample_pages = ut_min(index->stat_index_size,
|
||||
ut_max(ut_min(srv_stats_sample_pages,
|
||||
index->stat_index_size),
|
||||
log2(index->stat_index_size)*srv_stats_sample_pages));
|
||||
} else {
|
||||
n_sample_pages = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Sanity check */
|
||||
ut_ad(n_sample_pages > 0 && n_sample_pages < (index->stat_index_size <= 1 ? 1 : index->stat_index_size));
|
||||
|
||||
/* We sample some pages in the index to get an estimate */
|
||||
|
||||
for (i = 0; i < n_sample_pages; i++) {
|
||||
|
|
|
@ -3957,18 +3957,24 @@ btr_estimate_number_of_different_key_vals(
|
|||
n_sample_pages = srv_stats_sample_pages;
|
||||
}
|
||||
} else {
|
||||
/* New logaritmic number of pages that are estimated. We
|
||||
first pick minimun from srv_stats_sample_pages and number of
|
||||
pages on index. Then we pick maximum from previous number of
|
||||
pages and log2(number of index pages) * srv_stats_sample_pages. */
|
||||
/* New logaritmic number of pages that are estimated.
|
||||
Number of pages estimated should be between 1 and
|
||||
index->stat_index_size. We pick index->stat_index_size
|
||||
as maximum and log2(index->stat_index_size)*sr_stats_sample_pages
|
||||
if between range as minimum.*/
|
||||
if (index->stat_index_size > 0) {
|
||||
n_sample_pages = ut_max(ut_min(srv_stats_sample_pages, index->stat_index_size),
|
||||
log2(index->stat_index_size)*srv_stats_sample_pages);
|
||||
n_sample_pages = ut_min(index->stat_index_size,
|
||||
ut_max(ut_min(srv_stats_sample_pages,
|
||||
index->stat_index_size),
|
||||
log2(index->stat_index_size)*srv_stats_sample_pages));
|
||||
} else {
|
||||
n_sample_pages = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Sanity check */
|
||||
ut_ad(n_sample_pages > 0 && n_sample_pages <= (index->stat_index_size < 1 ? 1 : index->stat_index_size));
|
||||
|
||||
/* We sample some pages in the index to get an estimate */
|
||||
|
||||
for (i = 0; i < n_sample_pages; i++) {
|
||||
|
|
Loading…
Add table
Reference in a new issue