Merge 10.6 into 10.9

This commit is contained in:
Marko Mäkelä 2023-07-10 11:22:21 +03:00
commit 7cde5c539b
39 changed files with 698 additions and 65 deletions

View file

@ -0,0 +1,85 @@
if ($log_expected_matches) {
--echo [log_grep.inc] file: $log_file pattern: $grep_pattern expected_matches: $log_expected_matches
}
if (!$log_expected_matches) {
--echo [log_grep.inc] file: $log_file pattern: $grep_pattern
}
--let LOG_GREP_PERL_RESULT=$MYSQL_TMP_DIR/log_grep_perl_result.test
perl;
open my $command_file, ">", "$ENV{'LOG_GREP_PERL_RESULT'}" or die "Cannot create file";
$log_file= $ENV{'log_file'};
$log_file_full_path= $ENV{'log_file_full_path'};
$log_slow_rate_test= $ENV{'log_slow_rate_test'};
open(FILE, "$log_file_full_path")
or die("Cannot open file $log_file_full_path: $!\n");
if ($log_slow_rate_test) {
$one= 0;
$two= 0;
$three= 0;
while(<FILE>) {
$one++ if(/'connection_one'/);
$two++ if(/'connection_two'/);
$three++ if(/'connection_three'/);
}
$sum= $one + $two + $three;
$zero= 0;
if ($one == 0) {
$zero++;
}
if ($two == 0) {
$zero++;
}
if ($three == 0) {
$zero++;
}
print "[log_grep.inc] sum: $sum\n";
print "[log_grep.inc] zero: $zero\n";
}
else {
$grep_pattern= $ENV{'grep_pattern'};
$lines= 0;
while(<FILE>) {
$lines++ if (/$grep_pattern/);
}
$log_expected_matches= $ENV{'log_expected_matches'};
if ($log_expected_matches) {
if ($log_expected_matches != $lines) {
print "[log_grep.inc] ERROR: expected matches: $log_expected_matches, actual matches: $lines\n";
print "[log_grep.inc] log file at $log_file_full_path\n";
close(FILE);
open(FILE, "$log_file_full_path")
or die("Cannot open file $log_file_full_path: $!\n");
while (<FILE>) {
print ;
}
print $command_file "--let \$log_grep_failed= 1;\n";
} else {
print "[log_grep.inc] found expected match count: $log_expected_matches\n";
}
} else {
print "[log_grep.inc] lines: $lines\n";
}
}
close(FILE);
close($command_file);
EOF
--source $LOG_GREP_PERL_RESULT
--remove_file $LOG_GREP_PERL_RESULT
if ($log_grep_failed)
{
SHOW SESSION STATUS LIKE 'Slow_queries';
SHOW GLOBAL VARIABLES LIKE 'log%';
SHOW GLOBAL VARIABLES LIKE 'long_query_time';
SHOW GLOBAL VARIABLES LIKE 'min_examined_row_limit';
SHOW GLOBAL VARIABLES LIKE 'query_cache%';
SHOW GLOBAL VARIABLES LIKE 'slow_query%';
SHOW SESSION VARIABLES LIKE 'log%';
SHOW SESSION VARIABLES LIKE 'long_query_time';
SHOW SESSION VARIABLES LIKE 'min_examined_row_limit';
SHOW SESSION VARIABLES LIKE 'query_cache%';
SHOW SESSION VARIABLES LIKE 'slow_query%';
--die Testcase failed!
}

View file

@ -0,0 +1,6 @@
--remove_files_wildcard $MYSQLTEST_VARDIR/tmp $log_slow_prefix-*.slog
--disable_query_log
EVAL SET GLOBAL log_output= $log_output_old;
EVAL SET GLOBAL slow_query_log_file= "$slow_query_log_file_old";
EVAL SET GLOBAL slow_query_log= $slow_query_log_old;
--enable_query_log

View file

@ -0,0 +1,25 @@
# Common extensions to the slow query log
--let grep_pattern = ^# Thread_id: .+ Schema: .+ QC_hit: (Yes|No)\$
--let log_expected_matches = $log_slow_verbosity_expected_matches
--source include/log_grep.inc
--let grep_pattern = ^# Query_time: \d+\.\d+ Lock_time: \d+\.\d+ Rows_sent: \d+ Rows_examined: \d+\$
--source include/log_grep.inc
--let grep_pattern = ^# Rows_affected: \d+ Bytes_sent: \d+\$
--source include/log_grep.inc
# Query plan
--let log_expected_matches = $log_slow_verbosity_queryplan_expected_matches
--let grep_pattern = ^# Full_scan: (Yes|No) Full_join: (Yes|No) Tmp_table: (Yes|No) Tmp_table_on_disk: (Yes|No)\$
--source include/log_grep.inc
--let grep_pattern = ^# Filesort: (Yes|No) Filesort_on_disk: (Yes|No) Merge_passes: \d+\ Priority_queue: (Yes|No)\$
--source include/log_grep.inc
# Temp tables
--let log_expected_matches = $log_slow_verbosity_tmptable_expected_matches
--source include/log_grep.inc
--let grep_pattern = ^# Tmp_tables: \d+ Tmp_disk_tables: \d+\$
--source include/log_grep.inc
# InnoDB/Engines
--let log_expected_matches = $log_slow_innodb_expected_matches
--let grep_pattern = ^# Pages_accessed: \d+ Pages_read: \d+ Pages_updated: \d+ Old_rows_read: \d+\n# Pages_read_time: \d+\.\d+ Engine_time: \d+\.\d+\$

View file

@ -0,0 +1,8 @@
--disable_query_log
--let slow_query_log_old= `select @@slow_query_log`
--let slow_query_log_file_old= `select @@slow_query_log_file`
--let log_output_old= `select @@log_output`
SET GLOBAL slow_query_log=0;
SET GLOBAL log_output=FILE;
--enable_query_log

View file

@ -0,0 +1,6 @@
--disable_query_log
--let log_file_full_path=$MYSQLTEST_VARDIR/tmp/$log_file.slog
--echo [slow_log_start.inc] $log_file
EVAL SET GLOBAL slow_query_log_file="$log_file_full_path";
SET GLOBAL slow_query_log=1;
--enable_query_log

View file

@ -0,0 +1,4 @@
--disable_query_log
SET GLOBAL slow_query_log=0;
--echo [log_slow_stop.inc] $log_file
--enable_query_log

View file

@ -1 +1 @@
--slow-query-log --long-query-time=0.00001 --log-slow-verbosity=query_plan,explain
--slow-query-log --long-query-time=0.00001 --log-slow-verbosity=query_plan,explain,innodb

View file

@ -0,0 +1,78 @@
CREATE TABLE t1(a INT primary key, b int) ENGINE=InnoDB;
INSERT INTO t1 select seq, seq from seq_1_to_1000;
SET SESSION min_examined_row_limit=0;
SET SESSION long_query_time=0;
SET SESSION log_slow_verbosity='innodb,query_plan';
[slow_log_start.inc] log_slow_innodb-verbosity_1
SELECT sum(a+b) FROM t1;
sum(a+b)
1001000
UPDATE t1 set b=b+1 where a=1 or a=999;
[log_slow_stop.inc] log_slow_innodb-verbosity_1
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Thread_id: .+ Schema: .+ QC_hit: (Yes|No)$ expected_matches: 3
[log_grep.inc] found expected match count: 3
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Query_time: \d+\.\d+ Lock_time: \d+\.\d+ Rows_sent: \d+ Rows_examined: \d+$ expected_matches: 3
[log_grep.inc] found expected match count: 3
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Rows_affected: \d+ Bytes_sent: \d+$ expected_matches: 3
[log_grep.inc] found expected match count: 3
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Full_scan: (Yes|No) Full_join: (Yes|No) Tmp_table: (Yes|No) Tmp_table_on_disk: (Yes|No)$
[log_grep.inc] lines: 1
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Filesort: (Yes|No) Filesort_on_disk: (Yes|No) Merge_passes: \d+\ Priority_queue: (Yes|No)$
[log_grep.inc] lines: 0
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Filesort: (Yes|No) Filesort_on_disk: (Yes|No) Merge_passes: \d+\ Priority_queue: (Yes|No)$
[log_grep.inc] lines: 0
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Tmp_tables: \d+ Tmp_disk_tables: \d+$
[log_grep.inc] lines: 0
SET SESSION log_slow_verbosity='innodb,query_plan';
[slow_log_start.inc] log_slow_innodb-verbosity_2
SELECT 1;
1
1
[log_slow_stop.inc] log_slow_innodb-verbosity_2
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Thread_id: .+ Schema: .+ QC_hit: (Yes|No)$ expected_matches: 2
[log_grep.inc] found expected match count: 2
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Query_time: \d+\.\d+ Lock_time: \d+\.\d+ Rows_sent: \d+ Rows_examined: \d+$ expected_matches: 2
[log_grep.inc] found expected match count: 2
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Rows_affected: \d+ Bytes_sent: \d+$ expected_matches: 2
[log_grep.inc] found expected match count: 2
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Full_scan: (Yes|No) Full_join: (Yes|No) Tmp_table: (Yes|No) Tmp_table_on_disk: (Yes|No)$
[log_grep.inc] lines: 0
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Filesort: (Yes|No) Filesort_on_disk: (Yes|No) Merge_passes: \d+\ Priority_queue: (Yes|No)$
[log_grep.inc] lines: 0
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Filesort: (Yes|No) Filesort_on_disk: (Yes|No) Merge_passes: \d+\ Priority_queue: (Yes|No)$
[log_grep.inc] lines: 0
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Tmp_tables: \d+ Tmp_disk_tables: \d+$
[log_grep.inc] lines: 0
SET SESSION log_slow_verbosity='query_plan';
[log_slow_stop.inc] log_slow_innodb-verbosity_3
--source include/log_slow_start.inc
INSERT INTO t1 VALUE(1000)
[log_grep.inc] file: log_slow_innodb-verbosity_3
--source include/log_slow_start.inc
INSERT INTO t1 VALUE(1000) pattern: ^# Thread_id: .+ Schema: .+ QC_hit: (Yes|No)$ expected_matches: 2
[log_grep.inc] found expected match count: 2
[log_grep.inc] file: log_slow_innodb-verbosity_3
--source include/log_slow_start.inc
INSERT INTO t1 VALUE(1000) pattern: ^# Query_time: \d+\.\d+ Lock_time: \d+\.\d+ Rows_sent: \d+ Rows_examined: \d+$ expected_matches: 2
[log_grep.inc] found expected match count: 2
[log_grep.inc] file: log_slow_innodb-verbosity_3
--source include/log_slow_start.inc
INSERT INTO t1 VALUE(1000) pattern: ^# Rows_affected: \d+ Bytes_sent: \d+$ expected_matches: 2
[log_grep.inc] found expected match count: 2
[log_grep.inc] file: log_slow_innodb-verbosity_3
--source include/log_slow_start.inc
INSERT INTO t1 VALUE(1000) pattern: ^# Full_scan: (Yes|No) Full_join: (Yes|No) Tmp_table: (Yes|No) Tmp_table_on_disk: (Yes|No)$
[log_grep.inc] lines: 0
[log_grep.inc] file: log_slow_innodb-verbosity_3
--source include/log_slow_start.inc
INSERT INTO t1 VALUE(1000) pattern: ^# Filesort: (Yes|No) Filesort_on_disk: (Yes|No) Merge_passes: \d+\ Priority_queue: (Yes|No)$
[log_grep.inc] lines: 0
[log_grep.inc] file: log_slow_innodb-verbosity_3
--source include/log_slow_start.inc
INSERT INTO t1 VALUE(1000) pattern: ^# Filesort: (Yes|No) Filesort_on_disk: (Yes|No) Merge_passes: \d+\ Priority_queue: (Yes|No)$
[log_grep.inc] lines: 0
[log_grep.inc] file: log_slow_innodb-verbosity_3
--source include/log_slow_start.inc
INSERT INTO t1 VALUE(1000) pattern: ^# Tmp_tables: \d+ Tmp_disk_tables: \d+$
[log_grep.inc] lines: 0
DROP TABLE t1;

View file

@ -0,0 +1,75 @@
#
# Test the extended slow query log output format for various log_slow_verbosity values.
# Test that InnoDB stats for the extended slow query log are collected.
#
# This test file is based on tests from Percona server
#
--source include/have_innodb.inc
--source include/have_sequence.inc
--source include/log_slow_prepare.inc
# Cannot be used with view protocol as number of temporary tables changes
--source include/no_view_protocol.inc
--let $log_slow_prefix=log_slow_innodb
# Force cold buffer pool
#--let $restart_parameters=--innodb_buffer_pool_load_at_startup=OFF
#--source include/restart_mysqld.inc
CREATE TABLE t1(a INT primary key, b int) ENGINE=InnoDB;
INSERT INTO t1 select seq, seq from seq_1_to_1000;
SET SESSION min_examined_row_limit=0;
SET SESSION long_query_time=0;
#
# Test all enabled options with InnoDB-involving query
#
SET SESSION log_slow_verbosity='innodb,query_plan';
--let log_file=$log_slow_prefix-verbosity_1
--source include/log_slow_start.inc
SELECT sum(a+b) FROM t1;
UPDATE t1 set b=b+1 where a=1 or a=999;
--source include/log_slow_stop.inc
--let log_slow_verbosity_expected_matches= 3
--let log_slow_verbosity_queryplan_matches= 1
--let log_slow_verbosity_innodb_expected_matches= 2
--let log_slow_verbosity_tmptable_expected_matches= 0
--source include/log_slow_grep.inc
#
# Test for "No InnoDB statistics available" in output when InnoDB stats are requested
# but the query does not involve InnoDB tables
#
SET SESSION log_slow_verbosity='innodb,query_plan';
--let log_file=$log_slow_prefix-verbosity_2
--source include/log_slow_start.inc
SELECT 1;
--source include/log_slow_stop.inc
--let log_slow_verbosity_expected_matches= 2
--let log_slow_verbosity_queryplan_matches= 1
--let log_slow_verbosity_innodb_expected_matches= 0
--source include/log_slow_grep.inc
#
# Test 'query_plan'
#
SET SESSION log_slow_verbosity='query_plan';
let log_file=$log_slow_prefix-verbosity_3
--source include/log_slow_start.inc
INSERT INTO t1 VALUE(1000);
--source include/log_slow_stop.inc
--let log_slow_verbosity_innodb_expected_matches= 1
--source include/log_slow_grep.inc
DROP TABLE t1;
--source include/log_slow_cleanup.inc

View file

@ -532,7 +532,7 @@ The following specify which files/extra groups are read (specified before remain
(Defaults to on; use --skip-log-slow-slave-statements to disable.)
--log-slow-verbosity=name
Verbosity level for the slow log. Any combination of:
innodb, query_plan, explain
innodb, query_plan, explain, engine, full
--log-tc=name Path to transaction coordinator log (used for
transactions that affect more than one storage engine,
when binary log is disabled).

View file

@ -1,5 +1,8 @@
--source include/not_embedded.inc
--source include/have_sequence.inc
# View protocol changes some plans
--source include/no_view_protocol.inc
SELECT table_name, column_name FROM information_schema.columns where table_name="OPTIMIZER_TRACE";
set optimizer_trace="enabled=on";
show variables like 'optimizer_trace';

View file

@ -67,10 +67,10 @@ set session log_slow_verbosity='explain';
select @@session.log_slow_verbosity;
@@session.log_slow_verbosity
explain
set session log_slow_verbosity='innodb,query_plan,explain';
set session log_slow_verbosity='innodb,query_plan,explain,engine,full';
select @@session.log_slow_verbosity;
@@session.log_slow_verbosity
innodb,query_plan,explain
innodb,query_plan,explain,engine,full
set session log_slow_verbosity='';
select @@session.log_slow_verbosity;
@@session.log_slow_verbosity
@ -81,6 +81,6 @@ set session log_slow_verbosity=1e1;
ERROR 42000: Incorrect argument type to variable 'log_slow_verbosity'
set session log_slow_verbosity="foo";
ERROR 42000: Variable 'log_slow_verbosity' can't be set to the value of 'foo'
set session log_slow_verbosity=8;
ERROR 42000: Variable 'log_slow_verbosity' can't be set to the value of '8'
set session log_slow_verbosity=32;
ERROR 42000: Variable 'log_slow_verbosity' can't be set to the value of '32'
SET @@global.log_slow_verbosity = @start_global_value;

View file

@ -1719,7 +1719,7 @@ VARIABLE_COMMENT Verbosity level for the slow log
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST innodb,query_plan,explain
ENUM_VALUE_LIST innodb,query_plan,explain,engine,full
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME LOG_WARNINGS

View file

@ -1869,7 +1869,7 @@ VARIABLE_COMMENT Verbosity level for the slow log
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST innodb,query_plan,explain
ENUM_VALUE_LIST innodb,query_plan,explain,engine,full
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME LOG_WARNINGS

View file

@ -47,7 +47,7 @@ set session log_slow_verbosity='innodb,query_plan';
select @@session.log_slow_verbosity;
set session log_slow_verbosity='explain';
select @@session.log_slow_verbosity;
set session log_slow_verbosity='innodb,query_plan,explain';
set session log_slow_verbosity='innodb,query_plan,explain,engine,full';
select @@session.log_slow_verbosity;
set session log_slow_verbosity='';
select @@session.log_slow_verbosity;
@ -62,6 +62,6 @@ set session log_slow_verbosity=1e1;
--error ER_WRONG_VALUE_FOR_VAR
set session log_slow_verbosity="foo";
--error ER_WRONG_VALUE_FOR_VAR
set session log_slow_verbosity=8;
set session log_slow_verbosity=32;
SET @@global.log_slow_verbosity = @start_global_value;

59
sql/ha_handler_stats.h Normal file
View file

@ -0,0 +1,59 @@
#ifndef HA_HANDLER_STATS_INCLUDED
#define HA_HANDLER_STATS_INCLUDED
/*
Copyright (c) 2023, MariaDB Foundation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
*/
/* Definitions for parameters to do with handler-routines */
class ha_handler_stats
{
public:
ulonglong pages_accessed; /* Pages accessed from page cache */
ulonglong pages_updated; /* Pages changed in page cache */
ulonglong pages_read_count; /* Pages read from disk */
ulonglong pages_read_time; /* Time reading pages, in microsec. */
ulonglong undo_records_read;
ulonglong engine_time; /* Time spent in engine in microsec */
uint active; /* <> 0 if status has to be updated */
#define first_stat pages_accessed
#define last_stat engine_time
inline void reset()
{
bzero((void*) this, sizeof(*this));
}
inline void add(ha_handler_stats *stats)
{
ulonglong *to= &first_stat;
ulonglong *from= &stats->first_stat;
do
{
(*to)+= *from++;
} while (to++ != &last_stat);
}
inline bool has_stats()
{
ulonglong *to= &first_stat;
do
{
if (*to)
return 1;
} while (to++ != &last_stat);
return 0;
}
};
#endif /* HA_HANDLER_STATS_INCLUDED */

View file

@ -590,6 +590,7 @@ bool ha_partition::initialize_partition(MEM_ROOT *mem_root)
my_error(ER_MIX_HANDLER_ERROR, MYF(0));
DBUG_RETURN(1);
}
file->handler_stats= handler_stats;
} while (*(++file_array));
m_handler_status= handler_initialized;
DBUG_RETURN(0);
@ -4048,6 +4049,13 @@ int ha_partition::discover_check_version()
return m_file[0]->discover_check_version();
}
static int set_part_handler_stats(handler *h, void *stats)
{
h->handler_stats= (ha_handler_stats*) stats;
return 0;
}
/**
Clone the open and locked partitioning handler.
@ -4095,6 +4103,9 @@ handler *ha_partition::clone(const char *name, MEM_ROOT *mem_root)
HA_OPEN_IGNORE_IF_LOCKED | HA_OPEN_NO_PSI_CALL))
goto err;
if (handler_stats)
new_handler->loop_partitions(set_part_handler_stats, handler_stats);
DBUG_RETURN((handler*) new_handler);
err:
@ -4103,6 +4114,16 @@ err:
}
/*
Update all sub partitions to point to handler stats
*/
void ha_partition::handler_stats_updated()
{
loop_partitions(set_part_handler_stats, handler_stats);
}
/*
Close handler object

View file

@ -1649,5 +1649,6 @@ public:
bool can_convert_nocopy(const Field &field,
const Column_definition &new_field) const override;
void handler_stats_updated() override;
};
#endif /* HA_PARTITION_INCLUDED */

View file

@ -3224,6 +3224,7 @@ handler *handler::clone(const char *name, MEM_ROOT *mem_root)
if (new_handler->ha_open(table, name, table->db_stat,
HA_OPEN_IGNORE_IF_LOCKED, mem_root))
goto err;
new_handler->handler_stats= handler_stats;
return new_handler;

View file

@ -35,6 +35,7 @@
#include "sql_array.h" /* Dynamic_array<> */
#include "mdl.h"
#include "vers_string.h"
#include "ha_handler_stats.h"
#include "sql_analyze_stmt.h" // for Exec_time_tracker
@ -3175,13 +3176,23 @@ protected:
ha_rows estimation_rows_to_insert;
handler *lookup_handler;
/* Statistics for the query. Updated if handler_stats.in_use is set */
ha_handler_stats active_handler_stats;
void set_handler_stats();
public:
handlerton *ht; /* storage engine of this handler */
uchar *ref; /* Pointer to current row */
uchar *dup_ref; /* Pointer to duplicate row */
uchar *lookup_buffer;
/* General statistics for the table like number of row, file sizes etc */
ha_statistics stats;
/*
Collect query stats here if pointer is != NULL.
This is a pointer because if we do a clone of the handler, we want to
use the original handler for collecting statistics.
*/
ha_handler_stats *handler_stats;
/** MultiRangeRead-related members: */
range_seq_t mrr_iter; /* Iterator to traverse the range sequence */
@ -3375,8 +3386,8 @@ public:
:table_share(share_arg), table(0),
estimation_rows_to_insert(0),
lookup_handler(this),
ht(ht_arg), ref(0), lookup_buffer(NULL), end_range(NULL),
implicit_emptied(0),
ht(ht_arg), ref(0), lookup_buffer(NULL), handler_stats(NULL),
end_range(NULL), implicit_emptied(0),
mark_trx_read_write_done(0),
check_table_binlog_row_based_done(0),
check_table_binlog_row_based_result(0),
@ -4808,6 +4819,22 @@ public:
{
check_table_binlog_row_based_done= 0;
}
virtual void handler_stats_updated() {}
inline void ha_handler_stats_reset()
{
handler_stats= &active_handler_stats;
active_handler_stats.reset();
active_handler_stats.active= 1;
handler_stats_updated();
}
inline void ha_handler_stats_disable()
{
handler_stats= 0;
active_handler_stats.active= 0;
handler_stats_updated();
}
private:
/* Cache result to avoid extra calls */
inline void mark_trx_read_write()
@ -5163,6 +5190,7 @@ public:
}
bool log_not_redoable_operation(const char *operation);
protected:
Handler_share *get_ha_share_ptr();
void set_ha_share_ptr(Handler_share *arg_ha_share);

View file

@ -3329,6 +3329,10 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
char buff[80], *end;
char query_time_buff[22+7], lock_time_buff[22+7];
size_t buff_len;
ulonglong log_slow_verbosity= thd->variables.log_slow_verbosity;
if (log_slow_verbosity & LOG_SLOW_VERBOSITY_FULL)
log_slow_verbosity= ~(ulonglong) 0;
end= buff;
if (!(specialflag & SPECIAL_SHORT_LOG_FORMAT))
@ -3355,7 +3359,6 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
my_b_write(&log_file, (uchar*) "\n", 1))
goto err;
/* For slow query log */
sprintf(query_time_buff, "%.6f", ulonglong2double(query_utime)/1000000.0);
sprintf(lock_time_buff, "%.6f", ulonglong2double(lock_utime)/1000000.0);
if (my_b_printf(&log_file,
@ -3371,8 +3374,33 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
(ulong) (thd->status_var.bytes_sent - thd->bytes_sent_old)))
goto err;
if ((thd->variables.log_slow_verbosity & LOG_SLOW_VERBOSITY_QUERY_PLAN)
&& thd->tmp_tables_used &&
if (unlikely(log_slow_verbosity &
LOG_SLOW_VERBOSITY_ENGINE) &&
thd->handler_stats.has_stats())
{
ha_handler_stats *stats= &thd->handler_stats;
double tracker_frequency= timer_tracker_frequency();
sprintf(query_time_buff, "%.4f",
1000.0 * ulonglong2double(stats->pages_read_time)/
tracker_frequency);
sprintf(lock_time_buff, "%.4f",
1000.0 * ulonglong2double(stats->engine_time)/
tracker_frequency);
if (my_b_printf(&log_file,
"# Pages_accessed: %lu Pages_read: %lu "
"Pages_updated: %lu Old_rows_read: %lu\n"
"# Pages_read_time: %s Engine_time: %s\n",
(ulong) stats->pages_accessed,
(ulong) stats->pages_read_count,
(ulong) stats->pages_updated,
(ulong) stats->undo_records_read,
query_time_buff, lock_time_buff))
goto err;
}
if ((log_slow_verbosity & LOG_SLOW_VERBOSITY_QUERY_PLAN) &&
thd->tmp_tables_used &&
my_b_printf(&log_file,
"# Tmp_tables: %lu Tmp_disk_tables: %lu "
"Tmp_table_sizes: %s\n",
@ -3386,7 +3414,7 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
ErrConvDQName(thd->spcont->m_sp).ptr()))
goto err;
if ((thd->variables.log_slow_verbosity & LOG_SLOW_VERBOSITY_QUERY_PLAN) &&
if ((log_slow_verbosity & LOG_SLOW_VERBOSITY_QUERY_PLAN) &&
(thd->query_plan_flags &
(QPLAN_FULL_SCAN | QPLAN_FULL_JOIN | QPLAN_TMP_TABLE |
QPLAN_TMP_DISK | QPLAN_FILESORT | QPLAN_FILESORT_DISK |
@ -3408,8 +3436,7 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
"Yes" : "No")
))
goto err;
if (thd->variables.log_slow_verbosity & LOG_SLOW_VERBOSITY_EXPLAIN &&
thd->lex->explain)
if (log_slow_verbosity & LOG_SLOW_VERBOSITY_EXPLAIN && thd->lex->explain)
{
StringBuffer<128> buf;
DBUG_ASSERT(!thd->free_list);

View file

@ -19,9 +19,15 @@
#define LOG_SLOW_INCLUDED
#define LOG_SLOW_VERBOSITY_INIT 0
#define LOG_SLOW_VERBOSITY_INNODB (1U << 0)
#define LOG_SLOW_VERBOSITY_INNODB (1U << 0) /* Old option */
#define LOG_SLOW_VERBOSITY_QUERY_PLAN (1U << 1)
#define LOG_SLOW_VERBOSITY_EXPLAIN (1U << 2)
#define LOG_SLOW_VERBOSITY_STORAGE_ENGINE (1U << 3) /* Replaces InnoDB */
#define LOG_SLOW_VERBOSITY_FULL (1U << 4)
#define LOG_SLOW_VERBOSITY_ENGINE (LOG_SLOW_VERBOSITY_FULL | \
LOG_SLOW_VERBOSITY_INNODB | \
LOG_SLOW_VERBOSITY_STORAGE_ENGINE)
#define QPLAN_INIT QPLAN_QC_NO

View file

@ -35,9 +35,20 @@ log, should the query be slow.
2. Timing data. Measuring the time it took to run parts of query has noticeable
overhead. Because of that, we measure the time only when running "ANALYZE
$stmt").
*/
/* fake microseconds as cycles if cycles isn't available */
static inline double timer_tracker_frequency()
{
#if (MY_TIMER_ROUTINE_CYCLES)
return static_cast<double>(sys_timer_info.cycles.frequency);
#else
return static_cast<double>(sys_timer_info.microseconds.frequency);
#endif
}
class Gap_time_tracker;
void attach_gap_time_tracker(THD *thd, Gap_time_tracker *gap_tracker, ulonglong timeval);
void process_gap_time_tracker(THD *thd, ulonglong timeval);
@ -52,12 +63,19 @@ protected:
ulonglong cycles;
ulonglong last_start;
ulonglong measure() const
{
#if (MY_TIMER_ROUTINE_CYCLES)
return my_timer_cycles();
#else
return my_timer_microseconds();
#endif
}
void cycles_stop_tracking(THD *thd)
{
ulonglong end= my_timer_cycles();
ulonglong end= measure();
cycles += end - last_start;
if (unlikely(end < last_start))
cycles += ULONGLONG_MAX;
process_gap_time_tracker(thd, end);
if (my_gap_tracker)
@ -80,7 +98,7 @@ public:
// interface for collecting time
void start_tracking(THD *thd)
{
last_start= my_timer_cycles();
last_start= measure();
process_gap_time_tracker(thd, last_start);
}
@ -92,11 +110,21 @@ public:
// interface for getting the time
ulonglong get_loops() const { return count; }
double get_time_ms() const
inline double cycles_to_ms(ulonglong cycles_arg) const
{
// convert 'cycles' to milliseconds.
return 1000.0 * static_cast<double>(cycles) /
static_cast<double>(sys_timer_info.cycles.frequency);
return 1000.0 * static_cast<double>(cycles_arg) /
timer_tracker_frequency();
}
double get_time_ms() const
{
return cycles_to_ms(cycles);
}
ulonglong get_cycles() const
{
return cycles;
}
bool has_timed_statistics() const { return cycles > 0; }
@ -122,13 +150,11 @@ public:
double get_time_ms() const
{
// convert 'cycles' to milliseconds.
return 1000.0 * static_cast<double>(cycles) /
static_cast<double>(sys_timer_info.cycles.frequency);
return 1000.0 * static_cast<double>(cycles) / timer_tracker_frequency();
}
};
/*
A class for counting certain actions (in all queries), and optionally
collecting the timings (in ANALYZE queries).

View file

@ -969,11 +969,12 @@ int close_thread_tables(THD *thd)
void close_thread_table(THD *thd, TABLE **table_ptr)
{
TABLE *table= *table_ptr;
handler *file= table->file;
DBUG_ENTER("close_thread_table");
DBUG_PRINT("tcache", ("table: '%s'.'%s' %p", table->s->db.str,
table->s->table_name.str, table));
DBUG_ASSERT(!table->file->keyread_enabled());
DBUG_ASSERT(table->file->inited == handler::NONE);
DBUG_ASSERT(!file->keyread_enabled());
DBUG_ASSERT(file->inited == handler::NONE);
/*
The metadata lock must be released after giving back
@ -983,13 +984,20 @@ void close_thread_table(THD *thd, TABLE **table_ptr)
table->s->db.str,
table->s->table_name.str,
MDL_SHARED));
table->vcol_cleanup_expr(thd);
table->mdl_ticket= NULL;
table->file->update_global_table_stats();
table->file->update_global_index_stats();
file->update_global_table_stats();
file->update_global_index_stats();
if (unlikely(thd->variables.log_slow_verbosity &
LOG_SLOW_VERBOSITY_ENGINE) &&
likely(file->handler_stats))
{
Exec_time_tracker *tracker;
if ((tracker= file->get_time_tracker()))
file->handler_stats->engine_time+= tracker->get_cycles();
thd->handler_stats.add(file->handler_stats);
}
/*
This look is needed to allow THD::notify_shared_lock() to
traverse the thd->open_tables list without having to worry that
@ -1003,17 +1011,17 @@ void close_thread_table(THD *thd, TABLE **table_ptr)
if (! table->needs_reopen())
{
/* Avoid having MERGE tables with attached children in table cache. */
table->file->extra(HA_EXTRA_DETACH_CHILDREN);
file->extra(HA_EXTRA_DETACH_CHILDREN);
/* Free memory and reset for next loop. */
free_field_buffers_larger_than(table, MAX_TDC_BLOB_SIZE);
table->file->ha_reset();
file->ha_reset();
}
/*
Do this *before* entering the TABLE_SHARE::tdc.LOCK_table_share
critical section.
*/
MYSQL_UNBIND_TABLE(table->file);
MYSQL_UNBIND_TABLE(file);
tc_release_table(table);
DBUG_VOID_RETURN;

View file

@ -5831,6 +5831,7 @@ void THD::store_slow_query_state(Sub_statement_state *backup)
backup->tmp_tables_disk_used= tmp_tables_disk_used;
backup->tmp_tables_size= tmp_tables_size;
backup->tmp_tables_used= tmp_tables_used;
backup->handler_stats= handler_stats;
}
/* Reset variables related to slow query log */
@ -5846,6 +5847,8 @@ void THD::reset_slow_query_state()
tmp_tables_disk_used= 0;
tmp_tables_size= 0;
tmp_tables_used= 0;
if ((variables.log_slow_verbosity & LOG_SLOW_VERBOSITY_ENGINE))
handler_stats.reset();
}
/*
@ -5864,6 +5867,8 @@ void THD::add_slow_query_state(Sub_statement_state *backup)
tmp_tables_disk_used+= backup->tmp_tables_disk_used;
tmp_tables_size+= backup->tmp_tables_size;
tmp_tables_used+= backup->tmp_tables_used;
if ((variables.log_slow_verbosity & LOG_SLOW_VERBOSITY_ENGINE))
handler_stats.add(&backup->handler_stats);
}

View file

@ -51,6 +51,7 @@
#include "backup.h"
#include "xa.h"
#include "ddl_log.h" /* DDL_LOG_STATE */
#include "ha_handler_stats.h" // ha_handler_stats */
extern "C"
void set_thd_stage_info(void *thd,
@ -1880,6 +1881,7 @@ public:
ulonglong cuted_fields, sent_row_count, examined_row_count;
ulonglong affected_rows;
ulonglong bytes_sent_old;
ha_handler_stats handler_stats;
ulong tmp_tables_used;
ulong tmp_tables_disk_used;
ulong query_plan_fsort_passes;
@ -2717,6 +2719,7 @@ public:
struct system_status_var status_var; // Per thread statistic vars
struct system_status_var org_status_var; // For user statistics
struct system_status_var *initial_status_var; /* used by show status */
ha_handler_stats handler_stats; // Handler statistics
THR_LOCK_INFO lock_info; // Locking info of this thread
/**
Protects THD data accessed from other threads:

View file

@ -56,7 +56,7 @@
invoked on a running DELETE statement.
*/
Explain_delete* Delete_plan::save_explain_delete_data(MEM_ROOT *mem_root, THD *thd)
Explain_delete* Delete_plan::save_explain_delete_data(THD *thd, MEM_ROOT *mem_root)
{
Explain_query *query= thd->lex->explain;
Explain_delete *explain=
@ -73,7 +73,7 @@ Explain_delete* Delete_plan::save_explain_delete_data(MEM_ROOT *mem_root, THD *t
else
{
explain->deleting_all_rows= false;
if (Update_plan::save_explain_data_intern(mem_root, explain,
if (Update_plan::save_explain_data_intern(thd, mem_root, explain,
thd->lex->analyze_stmt))
return 0;
}
@ -84,21 +84,22 @@ Explain_delete* Delete_plan::save_explain_delete_data(MEM_ROOT *mem_root, THD *t
Explain_update*
Update_plan::save_explain_update_data(MEM_ROOT *mem_root, THD *thd)
Update_plan::save_explain_update_data(THD *thd, MEM_ROOT *mem_root)
{
Explain_query *query= thd->lex->explain;
Explain_update* explain=
new (mem_root) Explain_update(mem_root, thd->lex->analyze_stmt);
if (!explain)
return 0;
if (save_explain_data_intern(mem_root, explain, thd->lex->analyze_stmt))
if (save_explain_data_intern(thd, mem_root, explain, thd->lex->analyze_stmt))
return 0;
query->add_upd_del_plan(explain);
return explain;
}
bool Update_plan::save_explain_data_intern(MEM_ROOT *mem_root,
bool Update_plan::save_explain_data_intern(THD *thd,
MEM_ROOT *mem_root,
Explain_update *explain,
bool is_analyze)
{
@ -120,7 +121,9 @@ bool Update_plan::save_explain_data_intern(MEM_ROOT *mem_root,
return 0;
}
if (is_analyze)
if (is_analyze ||
(thd->variables.log_slow_verbosity &
LOG_SLOW_VERBOSITY_ENGINE))
table->file->set_time_tracker(&explain->table_tracker);
select_lex->set_explain_type(TRUE);
@ -474,7 +477,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
query_type= THD::STMT_QUERY_TYPE;
error= -1;
deleted= maybe_deleted;
if (!query_plan.save_explain_delete_data(thd->mem_root, thd))
if (!query_plan.save_explain_delete_data(thd, thd->mem_root))
error= 1;
goto cleanup;
}
@ -602,7 +605,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
if (thd->lex->describe)
goto produce_explain_and_leave;
if (!(explain= query_plan.save_explain_delete_data(thd->mem_root, thd)))
if (!(explain= query_plan.save_explain_delete_data(thd, thd->mem_root)))
goto got_error;
ANALYZE_START_TRACKING(thd, &explain->command_tracker);
@ -990,7 +993,7 @@ produce_explain_and_leave:
We come here for various "degenerate" query plans: impossible WHERE,
no-partitions-used, impossible-range, etc.
*/
if (!(query_plan.save_explain_delete_data(thd->mem_root, thd)))
if (!(query_plan.save_explain_delete_data(thd, thd->mem_root)))
goto got_error;
send_nothing_and_leave:

View file

@ -3009,9 +3009,9 @@ public:
void set_impossible_where() { impossible_where= true; }
void set_no_partitions() { no_partitions= true; }
Explain_update* save_explain_update_data(MEM_ROOT *mem_root, THD *thd);
Explain_update* save_explain_update_data(THD *thd, MEM_ROOT *mem_root);
protected:
bool save_explain_data_intern(MEM_ROOT *mem_root, Explain_update *eu, bool is_analyze);
bool save_explain_data_intern(THD *thd, MEM_ROOT *mem_root, Explain_update *eu, bool is_analyze);
public:
virtual ~Update_plan() = default;
@ -3046,7 +3046,7 @@ public:
deleting_all_rows= false;
}
Explain_delete* save_explain_delete_data(MEM_ROOT *mem_root, THD *thd);
Explain_delete* save_explain_delete_data(THD *thd, MEM_ROOT *mem_root);
};
enum account_lock_type

View file

@ -27782,12 +27782,16 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
jbuf_unpack_tracker= &eta->jbuf_unpack_tracker;
/* Enable the table access time tracker only for "ANALYZE stmt" */
if (thd->lex->analyze_stmt)
if (unlikely(thd->lex->analyze_stmt ||
thd->variables.log_slow_verbosity &
LOG_SLOW_VERBOSITY_ENGINE))
{
table->file->set_time_tracker(&eta->op_tracker);
eta->op_tracker.set_gap_tracker(&eta->extra_time_tracker);
eta->jbuf_unpack_tracker.set_gap_tracker(&eta->jbuf_extra_time_tracker);
if (likely(thd->lex->analyze_stmt))
{
eta->op_tracker.set_gap_tracker(&eta->extra_time_tracker);
eta->jbuf_unpack_tracker.set_gap_tracker(&eta->jbuf_extra_time_tracker);
}
}
/* No need to save id and select_type here, they are kept in Explain_select */

View file

@ -701,7 +701,7 @@ int mysql_update(THD *thd,
*/
if (thd->lex->describe)
goto produce_explain_and_leave;
if (!(explain= query_plan.save_explain_update_data(query_plan.mem_root, thd)))
if (!(explain= query_plan.save_explain_update_data(thd, query_plan.mem_root)))
goto err;
ANALYZE_START_TRACKING(thd, &explain->command_tracker);
@ -1387,7 +1387,7 @@ produce_explain_and_leave:
We come here for various "degenerate" query plans: impossible WHERE,
no-partitions-used, impossible-range, etc.
*/
if (unlikely(!query_plan.save_explain_update_data(query_plan.mem_root, thd)))
if (unlikely(!query_plan.save_explain_update_data(thd, query_plan.mem_root)))
goto err;
emit_explain_and_leave:

View file

@ -6477,8 +6477,9 @@ static Sys_var_ulong Sys_log_slow_rate_limit(
SESSION_VAR(log_slow_rate_limit), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(1, UINT_MAX), DEFAULT(1), BLOCK_SIZE(1));
static const char *log_slow_verbosity_names[]= { "innodb", "query_plan",
"explain", 0 };
static const char *log_slow_verbosity_names[]=
{ "innodb", "query_plan", "explain", "engine", "full", 0};
static Sys_var_set Sys_log_slow_verbosity(
"log_slow_verbosity",
"Verbosity level for the slow log",

View file

@ -5695,6 +5695,12 @@ void TABLE::init(THD *thd, TABLE_LIST *tl)
(*f_ptr)->cond_selectivity= 1.0;
}
/* enable and clear or disable engine query statistics */
if ((thd->variables.log_slow_verbosity & LOG_SLOW_VERBOSITY_ENGINE))
file->ha_handler_stats_reset();
else
file->ha_handler_stats_disable();
notnull_cond= 0;
DBUG_ASSERT(!file->keyread_enabled());

View file

@ -36,6 +36,7 @@ Created 11/5/1995 Heikki Tuuri
#include "mach0data.h"
#include "buf0buf.h"
#include "buf0checksum.h"
#include "mariadb_stats.h"
#include <string.h>
#ifdef UNIV_INNOCHECKSUM
@ -2166,6 +2167,7 @@ buf_page_t* buf_page_get_zip(const page_id_t page_id, ulint zip_size)
ut_ad(zip_size);
ut_ad(ut_is_2pow(zip_size));
++buf_pool.stat.n_page_gets;
mariadb_increment_pages_accessed();
buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(page_id.fold());
page_hash_latch &hash_lock= buf_pool.page_hash.lock_get(chain);
@ -2261,6 +2263,7 @@ must_read_page:
switch (dberr_t err= buf_read_page(page_id, zip_size)) {
case DB_SUCCESS:
case DB_SUCCESS_LOCKED_REC:
mariadb_increment_pages_read();
goto lookup;
default:
ib::error() << "Reading compressed page " << page_id
@ -2438,6 +2441,7 @@ buf_page_get_low(
|| ibuf_page_low(page_id, zip_size, FALSE, NULL));
++buf_pool.stat.n_page_gets;
mariadb_increment_pages_accessed();
auto& chain= buf_pool.page_hash.cell_get(page_id.fold());
page_hash_latch& hash_lock = buf_pool.page_hash.lock_get(chain);
@ -2509,6 +2513,7 @@ loop:
switch (dberr_t local_err = buf_read_page(page_id, zip_size)) {
case DB_SUCCESS:
case DB_SUCCESS_LOCKED_REC:
mariadb_increment_pages_read();
buf_read_ahead_random(page_id, zip_size, ibuf_inside(mtr));
break;
default:
@ -3069,7 +3074,6 @@ bool buf_page_optimistic_get(ulint rw_latch, buf_block_t *block,
ut_ad(~buf_page_t::LRU_MASK & state);
ut_ad(block->page.frame);
++buf_pool.stat.n_page_gets;
return true;
}
@ -3107,6 +3111,7 @@ buf_block_t *buf_page_try_get(const page_id_t page_id, mtr_t *mtr)
ut_ad(block->page.id() == page_id);
++buf_pool.stat.n_page_gets;
mariadb_increment_pages_accessed();
return block;
}

View file

@ -42,6 +42,7 @@ Created 11/5/1995 Heikki Tuuri
#include "srv0start.h"
#include "srv0srv.h"
#include "log.h"
#include "mariadb_stats.h"
/** If there are buf_pool.curr_size per the number below pending reads, then
read-ahead is not done: this is to prevent flooding the buffer pool with
@ -295,9 +296,12 @@ buf_read_page_low(
}
ut_ad(bpage->in_file());
ulonglong mariadb_timer= 0;
if (sync) {
thd_wait_begin(nullptr, THD_WAIT_DISKIO);
if (mariadb_stats_active())
mariadb_timer= mariadb_measure();
}
DBUG_LOG("ib_buf",
@ -322,6 +326,8 @@ buf_read_page_low(
if (fio.err == DB_FAIL) {
fio.err = DB_PAGE_CORRUPTED;
}
if (mariadb_timer)
mariadb_increment_pages_read_time(mariadb_timer);
}
return fio.err;

View file

@ -112,6 +112,9 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include "fil0pagecompress.h"
#include "ut0mem.h"
#include "row0ext.h"
#include "mariadb_stats.h"
thread_local ha_handler_stats mariadb_dummy_stats;
thread_local ha_handler_stats *mariadb_stats= &mariadb_dummy_stats;
#include "lz4.h"
#include "lzo/lzo1x.h"
@ -7799,6 +7802,7 @@ ha_innobase::write_row(
#endif
int error_result = 0;
bool auto_inc_used = false;
mariadb_set_stats set_stats_temporary(handler_stats);
DBUG_ENTER("ha_innobase::write_row");
@ -8559,6 +8563,7 @@ ha_innobase::update_row(
dberr_t error;
trx_t* trx = thd_to_trx(m_user_thd);
mariadb_set_stats set_stats_temporary(handler_stats);
DBUG_ENTER("ha_innobase::update_row");
@ -8738,6 +8743,7 @@ ha_innobase::delete_row(
{
dberr_t error;
trx_t* trx = thd_to_trx(m_user_thd);
mariadb_set_stats set_stats_temporary(handler_stats);
DBUG_ENTER("ha_innobase::delete_row");
@ -8981,6 +8987,7 @@ ha_innobase::index_read(
enum ha_rkey_function find_flag)/*!< in: search flags from my_base.h */
{
DBUG_ENTER("index_read");
mariadb_set_stats set_stats_temporary(handler_stats);
DEBUG_SYNC_C("ha_innobase_index_read_begin");
ut_a(m_prebuilt->trx == thd_to_trx(m_user_thd));
@ -9307,6 +9314,7 @@ ha_innobase::general_fetch(
{
DBUG_ENTER("general_fetch");
mariadb_set_stats set_stats_temporary(handler_stats);
const trx_t* trx = m_prebuilt->trx;
ut_ad(trx == thd_to_trx(m_user_thd));
@ -9514,7 +9522,6 @@ ha_innobase::rnd_next(
in MySQL format */
{
int error;
DBUG_ENTER("rnd_next");
if (m_start_of_scan) {
@ -9779,6 +9786,7 @@ ha_innobase::ft_read(
uchar* buf) /*!< in/out: buf contain result row */
{
row_prebuilt_t* ft_prebuilt;
mariadb_set_stats set_stats_temporary(handler_stats);
ft_prebuilt = reinterpret_cast<NEW_FT_INFO*>(ft_handler)->ft_prebuilt;
@ -13820,6 +13828,7 @@ static dberr_t innobase_rename_table(trx_t *trx, const char *from,
@retval 0 on success */
int ha_innobase::truncate()
{
mariadb_set_stats set_stats_temporary(handler_stats);
DBUG_ENTER("ha_innobase::truncate");
update_thd();
@ -14385,7 +14394,7 @@ ha_innobase::estimate_rows_upper_bound()
const dict_index_t* index;
ulonglong estimate;
ulonglong local_data_file_length;
mariadb_set_stats set_stats_temporary(handler_stats);
DBUG_ENTER("estimate_rows_upper_bound");
/* We do not know if MySQL can call this function before calling
@ -16623,6 +16632,7 @@ ha_innobase::get_auto_increment(
trx_t* trx;
dberr_t error;
ulonglong autoinc = 0;
mariadb_set_stats set_stats_temporary(handler_stats);
/* Prepare m_prebuilt->trx in the table handle */
update_thd(ha_thd());

View file

@ -0,0 +1,119 @@
/*****************************************************************************
Copyright (c) 2023, MariaDB Foundation
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*****************************************************************************/
#ifndef mariadb_stats_h
#define mariadb_stats_h
/* Include file to handle mariadbd handler specific stats */
#include "ha_handler_stats.h"
#include "my_rdtsc.h"
/* Not active threads are ponting to this structure */
extern thread_local ha_handler_stats mariadb_dummy_stats;
/* Points to either THD->handler_stats or mariad_dummy_stats */
extern thread_local ha_handler_stats *mariadb_stats;
/*
Returns 1 if MariaDB wants engine status
*/
inline bool mariadb_stats_active()
{
return mariadb_stats->active != 0;
}
inline bool mariadb_stats_active(ha_handler_stats *stats)
{
return stats->active != 0;
}
/* The following functions increment different engine status */
inline void mariadb_increment_pages_accessed()
{
mariadb_stats->pages_accessed++;
}
inline void mariadb_increment_pages_updated(ulonglong count)
{
mariadb_stats->pages_updated+= count;
}
inline void mariadb_increment_pages_read()
{
mariadb_stats->pages_read_count++;
}
inline void mariadb_increment_undo_records_read()
{
mariadb_stats->undo_records_read++;
}
/*
The following has to be identical code as measure() in sql_analyze_stmt.h
One should only call this if mariadb_stats_active() is true.
*/
inline ulonglong mariadb_measure()
{
#if (MY_TIMER_ROUTINE_CYCLES)
return my_timer_cycles();
#else
return my_timer_microseconds();
#endif
}
/*
Call this only of start_time != 0
See buf0rea.cc for an example of how to use it efficiently
*/
inline void mariadb_increment_pages_read_time(ulonglong start_time)
{
ha_handler_stats *stats= mariadb_stats;
ulonglong end_time= mariadb_measure();
/* Check that we only call this if active, see example! */
DBUG_ASSERT(start_time);
DBUG_ASSERT(mariadb_stats_active(stats));
stats->pages_read_time+= (end_time - start_time);
}
/*
Helper class to set mariadb_stats temporarly for one call in handler.cc
*/
class mariadb_set_stats
{
public:
uint flag;
mariadb_set_stats(ha_handler_stats *stats)
{
mariadb_stats= stats ? stats : &mariadb_dummy_stats;
}
~mariadb_set_stats()
{
mariadb_stats= &mariadb_dummy_stats;
}
};
#endif /* mariadb_stats_h */

View file

@ -39,6 +39,7 @@ Created 9/17/2000 Heikki Tuuri
struct row_prebuilt_t;
class ha_innobase;
class ha_handler_stats;
/*******************************************************************//**
Frees the blob heap in prebuilt when no longer needed. */

View file

@ -34,6 +34,7 @@ Created 11/26/1995 Heikki Tuuri
#endif
#include "srv0start.h"
#include "log.h"
#include "mariadb_stats.h"
void mtr_memo_slot_t::release() const
{
@ -327,10 +328,10 @@ void mtr_t::commit()
ut_ad(!srv_read_only_mode);
std::pair<lsn_t,page_flush_ahead> lsns{do_write()};
process_freed_pages();
size_t modified= 0;
if (m_made_dirty)
{
size_t modified= 0;
auto it= m_memo.rbegin();
mysql_mutex_lock(&buf_pool.flush_list_mutex);
@ -386,8 +387,6 @@ void mtr_t::commit()
else
log_sys.latch.rd_unlock();
size_t modified= 0;
for (auto it= m_memo.rbegin(); it != m_memo.rend(); )
{
const mtr_memo_slot_t &slot= *it++;
@ -446,6 +445,8 @@ void mtr_t::commit()
m_memo.clear();
}
mariadb_increment_pages_updated(modified);
if (UNIV_UNLIKELY(lsns.second != PAGE_FLUSH_NO))
buf_flush_ahead(m_commit_lsn, lsns.second == PAGE_FLUSH_SYNC);
}

View file

@ -39,6 +39,7 @@ Created 3/26/1996 Heikki Tuuri
#include "row0row.h"
#include "row0mysql.h"
#include "row0ins.h"
#include "mariadb_stats.h"
/** The search tuple corresponding to TRX_UNDO_INSERT_METADATA. */
const dtuple_t trx_undo_metadata = {
@ -2182,6 +2183,7 @@ trx_undo_prev_version_build(
return DB_SUCCESS;
}
mariadb_increment_undo_records_read();
rec_trx_id = row_get_rec_trx_id(rec, index, offsets);
ut_ad(!index->table->skip_alter_undo);