mirror of
https://github.com/MariaDB/server.git
synced 2026-04-21 15:55:53 +02:00
MDEV-34125: ANALYZE FORMAT=JSON: r_engine_stats.pages_read_time_ms has wrong scale
- Change the comments in class ha_handler_stats to say the members are in ticks, not milliseconds. - In sql_explain.cc, adjust the scale to print milliseconds.
This commit is contained in:
parent
2d5cba22a9
commit
36ab6cc80c
5 changed files with 142 additions and 3 deletions
73
mysql-test/main/analyze_engine_stats2.test
Normal file
73
mysql-test/main/analyze_engine_stats2.test
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
#
|
||||
# r_engine_stats tests that require slow query log.
|
||||
#
|
||||
--source include/analyze-format.inc
|
||||
--source include/have_sequence.inc
|
||||
--source include/have_innodb.inc
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-34125: ANALYZE FORMAT=JSON: r_engine_stats.pages_read_time_ms has wrong scale
|
||||
--echo #
|
||||
|
||||
# Each row is 1K.
|
||||
create table t1 (
|
||||
a varchar(255),
|
||||
b varchar(255),
|
||||
c varchar(255),
|
||||
d varchar(255),
|
||||
primary key(a,b,c,d)
|
||||
) engine=innodb;
|
||||
|
||||
# The data size is 160K * 1K = 160M
|
||||
# 16M / (page_size=16K) = 1K pages.
|
||||
insert into t1 select
|
||||
repeat(uuid(), 7),
|
||||
repeat(uuid(), 7),
|
||||
repeat(uuid(), 7),
|
||||
repeat(uuid(), 7)
|
||||
from seq_1_to_16384;
|
||||
|
||||
source include/restart_mysqld.inc;
|
||||
set log_slow_verbosity='engine';
|
||||
set long_query_time=0.0;
|
||||
|
||||
let $analyze_output= `analyze format=json
|
||||
select * from t1 force index (PRIMARY) order by a desc, b desc, c desc, d desc`;
|
||||
evalp set @js='$analyze_output';
|
||||
|
||||
# Print it out for user-friendlines
|
||||
--replace_regex /("(r_[a-z_]*_time_ms|pages[^"]*)": )[^, \n]*/\1"REPLACED"/
|
||||
select @js;
|
||||
|
||||
set @pages_read_time_ms=
|
||||
(select json_value(@js,'$.query_block.table.r_engine_stats.pages_read_time_ms'));
|
||||
|
||||
let ANALYZE_PAGES=`select @pages_read_time_ms`;
|
||||
let SLOW_LOG_FILE= `select @@slow_query_log_file`;
|
||||
|
||||
perl;
|
||||
my $slow_log_file= $ENV{'SLOW_LOG_FILE'} or die "SLOW_LOG_FILE not set";
|
||||
my $analyze_pages=$ENV{'ANALYZE_PAGES'};
|
||||
open(FILE, $slow_log_file) or die "Failed to open $slow_log_file";
|
||||
# We didn't run any queries touching a storage engine after the query of
|
||||
# interest, so we will be fine here if we just get the last occurrence of
|
||||
# Pages_read_time: NNNN in the file
|
||||
while(<FILE>) {
|
||||
$slow_log_pages=$1 if (/Pages_read_time: ([0-9.]+)/);
|
||||
}
|
||||
close(FILE);
|
||||
|
||||
if ( $slow_log_pages > $analyze_pages * 0.95 &&
|
||||
$slow_log_pages < $analyze_pages * 1.05) {
|
||||
print "\n\n OK: pages_read_time is same in slow log and ANALYZE\n\n";
|
||||
} else {
|
||||
print "\n\n FAIL: $slow_log_pages not equal to $analyze_pages\n";
|
||||
}
|
||||
|
||||
EOF
|
||||
|
||||
|
||||
set long_query_time=default;
|
||||
drop table t1;
|
||||
|
||||
|
||||
Loading…
Add table
Add a link
Reference in a new issue