mariadb/sql/opt_trace.cc

795 lines
23 KiB
C++
Raw Normal View History

/* This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#include "mariadb.h"
#include "sql_array.h"
#include "sql_string.h"
#include "sql_class.h"
#include "sql_show.h"
#include "field.h"
#include "sql_i_s.h"
#include "opt_trace.h"
#include "sql_parse.h"
#include "set_var.h"
#include "my_json_writer.h"
#include "sp_head.h"
#include "rowid_filter.h"
MDEV-31340 Remove MY_COLLATION_HANDLER::strcasecmp() This patch also fixes: MDEV-33050 Build-in schemas like oracle_schema are accent insensitive MDEV-33084 LASTVAL(t1) and LASTVAL(T1) do not work well with lower-case-table-names=0 MDEV-33085 Tables T1 and t1 do not work well with ENGINE=CSV and lower-case-table-names=0 MDEV-33086 SHOW OPEN TABLES IN DB1 -- is case insensitive with lower-case-table-names=0 MDEV-33088 Cannot create triggers in the database `MYSQL` MDEV-33103 LOCK TABLE t1 AS t2 -- alias is not case sensitive with lower-case-table-names=0 MDEV-33109 DROP DATABASE MYSQL -- does not drop SP with lower-case-table-names=0 MDEV-33110 HANDLER commands are case insensitive with lower-case-table-names=0 MDEV-33119 User is case insensitive in INFORMATION_SCHEMA.VIEWS MDEV-33120 System log table names are case insensitive with lower-cast-table-names=0 - Removing the virtual function strnncoll() from MY_COLLATION_HANDLER - Adding a wrapper function CHARSET_INFO::streq(), to compare two strings for equality. For now it calls strnncoll() internally. In the future it will turn into a virtual function. - Adding new accent sensitive case insensitive collations: - utf8mb4_general1400_as_ci - utf8mb3_general1400_as_ci They implement accent sensitive case insensitive comparison. The weight of a character is equal to the code point of its upper case variant. These collations use Unicode-14.0.0 casefolding data. The result of my_charset_utf8mb3_general1400_as_ci.strcoll() is very close to the former my_charset_utf8mb3_general_ci.strcasecmp() There is only a difference in a couple dozen rare characters, because: - the switch from "tolower" to "toupper" comparison, to make utf8mb3_general1400_as_ci closer to utf8mb3_general_ci - the switch from Unicode-3.0.0 to Unicode-14.0.0 This difference should be tolarable. See the list of affected characters in the MDEV description. Note, utf8mb4_general1400_as_ci correctly handles non-BMP characters! Unlike utf8mb4_general_ci, it does not treat all BMP characters as equal. - Adding classes representing names of the file based database objects: Lex_ident_db Lex_ident_table Lex_ident_trigger Their comparison collation depends on the underlying file system case sensitivity and on --lower-case-table-names and can be either my_charset_bin or my_charset_utf8mb3_general1400_as_ci. - Adding classes representing names of other database objects, whose names have case insensitive comparison style, using my_charset_utf8mb3_general1400_as_ci: Lex_ident_column Lex_ident_sys_var Lex_ident_user_var Lex_ident_sp_var Lex_ident_ps Lex_ident_i_s_table Lex_ident_window Lex_ident_func Lex_ident_partition Lex_ident_with_element Lex_ident_rpl_filter Lex_ident_master_info Lex_ident_host Lex_ident_locale Lex_ident_plugin Lex_ident_engine Lex_ident_server Lex_ident_savepoint Lex_ident_charset engine_option_value::Name - All the mentioned Lex_ident_xxx classes implement a method streq(): if (ident1.streq(ident2)) do_equal(); This method works as a wrapper for CHARSET_INFO::streq(). - Changing a lot of "LEX_CSTRING name" to "Lex_ident_xxx name" in class members and in function/method parameters. - Replacing all calls like system_charset_info->coll->strcasecmp(ident1, ident2) to ident1.streq(ident2) - Taking advantage of the c++11 user defined literal operator for LEX_CSTRING (see m_strings.h) and Lex_ident_xxx (see lex_ident.h) data types. Use example: const Lex_ident_column primary_key_name= "PRIMARY"_Lex_ident_column; is now a shorter version of: const Lex_ident_column primary_key_name= Lex_ident_column({STRING_WITH_LEN("PRIMARY")});
2023-04-26 15:27:01 +04:00
const Lex_ident_i_s_table I_S_table_name= "OPTIMIZER_TRACE"_Lex_ident_i_s_table;
/**
Whether a list of tables contains information_schema.OPTIMIZER_TRACE.
@param tbl list of tables
Can we do better than this here??
@note this does not catch that a stored routine or view accesses
the OPTIMIZER_TRACE table. So using a stored routine or view to read
OPTIMIZER_TRACE will overwrite OPTIMIZER_TRACE as it runs and provide
uninteresting info.
*/
bool list_has_optimizer_trace_table(const TABLE_LIST *tbl)
{
for (; tbl; tbl= tbl->next_global)
{
if (tbl->schema_table &&
MDEV-31340 Remove MY_COLLATION_HANDLER::strcasecmp() This patch also fixes: MDEV-33050 Build-in schemas like oracle_schema are accent insensitive MDEV-33084 LASTVAL(t1) and LASTVAL(T1) do not work well with lower-case-table-names=0 MDEV-33085 Tables T1 and t1 do not work well with ENGINE=CSV and lower-case-table-names=0 MDEV-33086 SHOW OPEN TABLES IN DB1 -- is case insensitive with lower-case-table-names=0 MDEV-33088 Cannot create triggers in the database `MYSQL` MDEV-33103 LOCK TABLE t1 AS t2 -- alias is not case sensitive with lower-case-table-names=0 MDEV-33109 DROP DATABASE MYSQL -- does not drop SP with lower-case-table-names=0 MDEV-33110 HANDLER commands are case insensitive with lower-case-table-names=0 MDEV-33119 User is case insensitive in INFORMATION_SCHEMA.VIEWS MDEV-33120 System log table names are case insensitive with lower-cast-table-names=0 - Removing the virtual function strnncoll() from MY_COLLATION_HANDLER - Adding a wrapper function CHARSET_INFO::streq(), to compare two strings for equality. For now it calls strnncoll() internally. In the future it will turn into a virtual function. - Adding new accent sensitive case insensitive collations: - utf8mb4_general1400_as_ci - utf8mb3_general1400_as_ci They implement accent sensitive case insensitive comparison. The weight of a character is equal to the code point of its upper case variant. These collations use Unicode-14.0.0 casefolding data. The result of my_charset_utf8mb3_general1400_as_ci.strcoll() is very close to the former my_charset_utf8mb3_general_ci.strcasecmp() There is only a difference in a couple dozen rare characters, because: - the switch from "tolower" to "toupper" comparison, to make utf8mb3_general1400_as_ci closer to utf8mb3_general_ci - the switch from Unicode-3.0.0 to Unicode-14.0.0 This difference should be tolarable. See the list of affected characters in the MDEV description. Note, utf8mb4_general1400_as_ci correctly handles non-BMP characters! Unlike utf8mb4_general_ci, it does not treat all BMP characters as equal. - Adding classes representing names of the file based database objects: Lex_ident_db Lex_ident_table Lex_ident_trigger Their comparison collation depends on the underlying file system case sensitivity and on --lower-case-table-names and can be either my_charset_bin or my_charset_utf8mb3_general1400_as_ci. - Adding classes representing names of other database objects, whose names have case insensitive comparison style, using my_charset_utf8mb3_general1400_as_ci: Lex_ident_column Lex_ident_sys_var Lex_ident_user_var Lex_ident_sp_var Lex_ident_ps Lex_ident_i_s_table Lex_ident_window Lex_ident_func Lex_ident_partition Lex_ident_with_element Lex_ident_rpl_filter Lex_ident_master_info Lex_ident_host Lex_ident_locale Lex_ident_plugin Lex_ident_engine Lex_ident_server Lex_ident_savepoint Lex_ident_charset engine_option_value::Name - All the mentioned Lex_ident_xxx classes implement a method streq(): if (ident1.streq(ident2)) do_equal(); This method works as a wrapper for CHARSET_INFO::streq(). - Changing a lot of "LEX_CSTRING name" to "Lex_ident_xxx name" in class members and in function/method parameters. - Replacing all calls like system_charset_info->coll->strcasecmp(ident1, ident2) to ident1.streq(ident2) - Taking advantage of the c++11 user defined literal operator for LEX_CSTRING (see m_strings.h) and Lex_ident_xxx (see lex_ident.h) data types. Use example: const Lex_ident_column primary_key_name= "PRIMARY"_Lex_ident_column; is now a shorter version of: const Lex_ident_column primary_key_name= Lex_ident_column({STRING_WITH_LEN("PRIMARY")});
2023-04-26 15:27:01 +04:00
I_S_table_name.streq(tbl->schema_table->table_name))
return true;
}
return false;
}
/*
Returns if a query has a set command with optimizer_trace being switched on/off.
True: Don't trace the query(uninteresting)
*/
bool sets_var_optimizer_trace(enum enum_sql_command sql_command,
List<set_var_base> *set_vars)
{
if (sql_command == SQLCOM_SET_OPTION)
{
List_iterator_fast<set_var_base> it(*set_vars);
const set_var_base *var;
while ((var= it++))
if (var->is_var_optimizer_trace()) return true;
}
return false;
}
namespace Show {
ST_FIELD_INFO optimizer_trace_info[]=
{
Column("QUERY", Longtext(65535), NOT_NULL),
Column("TRACE", Longtext(65535), NOT_NULL),
Column("MISSING_BYTES_BEYOND_MAX_MEM_SIZE", SLong(20), NOT_NULL),
Column("INSUFFICIENT_PRIVILEGES", STiny(1), NOT_NULL),
CEnd()
};
} // namespace Show
/*
TODO: one-line needs to be implemented seperately
*/
const char *Opt_trace_context::flag_names[]= {"enabled", "default",
NullS};
/*
Returns if a particular command will be traced or not
*/
inline bool sql_command_can_be_traced(enum enum_sql_command sql_command)
{
/*
For first iteration we are only allowing select queries.
TODO: change to allow other queries.
*/
return sql_command == SQLCOM_SELECT ||
sql_command == SQLCOM_UPDATE ||
sql_command == SQLCOM_DELETE ||
sql_command == SQLCOM_DELETE_MULTI ||
sql_command == SQLCOM_UPDATE_MULTI ||
sql_command == SQLCOM_INSERT_SELECT;
}
void opt_trace_print_expanded_query(THD *thd, SELECT_LEX *select_lex,
Json_writer_object *writer)
{
DBUG_ASSERT(thd->trace_started());
StringBuffer<1024> str(system_charset_info);
ulonglong save_option_bits= thd->variables.option_bits;
thd->variables.option_bits &= ~OPTION_QUOTE_SHOW_CREATE;
select_lex->print(thd, &str,
enum_query_type(QT_TO_SYSTEM_CHARSET |
QT_SHOW_SELECT_NUMBER |
QT_ITEM_IDENT_SKIP_DB_NAMES |
QT_VIEW_INTERNAL));
thd->variables.option_bits= save_option_bits;
/*
The output is not very pretty lots of back-ticks, the output
is as the one in explain extended , lets try to improved it here.
*/
writer->add("expanded_query", str.c_ptr_safe(), str.length());
}
void opt_trace_disable_if_no_security_context_access(THD *thd)
{
if (likely(!(thd->variables.optimizer_trace &
Opt_trace_context::FLAG_ENABLED)) || // (1)
thd->system_thread) // (2)
{
/*
(1) We know that the routine's execution starts with "enabled=off".
If it stays so until the routine ends, we needn't do security checks on
the routine.
If it does not stay so, it means the definer sets it to "on" somewhere
in the routine's body. Then it is his conscious decision to generate
traces, thus it is still correct to skip the security check.
(2) Threads of the Events Scheduler have an unusual security context
(thd->m_main_security_ctx.priv_user==NULL, see comment in
Security_context::change_security_context()).
*/
return;
}
Opt_trace_context *const trace= &thd->opt_trace;
if (unlikely(!thd->trace_started()))
{
/*
@@optimizer_trace has "enabled=on" but trace is not started.
Either Opt_trace_start ctor was not called for our statement (3), or it
was called but at that time, the variable had "enabled=off" (4).
There are no known cases of (3).
(4) suggests that the user managed to change the variable during
execution of the statement, and this statement is using
view/routine (note that we have not been able to provoke this, maybe
this is impossible). If it happens it is suspicious.
We disable I_S output. And we cannot do otherwise: we have no place to
store a possible "missing privilege" information (no Opt_trace_stmt, as
is_started() is false), so cannot do security checks, so cannot safely
do tracing, so have to disable I_S output. And even then, we don't know
when to re-enable I_S output, as we have no place to store the
information "re-enable tracing at the end of this statement", and we
don't even have a notion of statement here (statements in the optimizer
trace world mean an Opt_trace_stmt object, and there is none here). So
we must disable for the session's life.
COM_FIELD_LIST opens views, thus used to be a case of (3). To avoid
disabling I_S output for the session's life when this command is issued
(like in: "SET OPTIMIZER_TRACE='ENABLED=ON';USE somedb;" in the 'mysql'
command-line client), we have decided to create a Opt_trace_start for
this command. The command itself is not traced though
(SQLCOM_SHOW_FIELDS does not have CF_OPTIMIZER_TRACE).
*/
return;
}
/*
Note that thd->main_security_ctx.master_access is probably invariant
accross the life of THD: GRANT/REVOKE don't affect global privileges of an
existing connection, per the manual.
*/
if (!(thd->main_security_ctx.check_access(GLOBAL_ACLS & ~GRANT_ACL)) &&
(0 != strcmp(thd->main_security_ctx.priv_user,
thd->security_context()->priv_user) ||
MDEV-31340 Remove MY_COLLATION_HANDLER::strcasecmp() This patch also fixes: MDEV-33050 Build-in schemas like oracle_schema are accent insensitive MDEV-33084 LASTVAL(t1) and LASTVAL(T1) do not work well with lower-case-table-names=0 MDEV-33085 Tables T1 and t1 do not work well with ENGINE=CSV and lower-case-table-names=0 MDEV-33086 SHOW OPEN TABLES IN DB1 -- is case insensitive with lower-case-table-names=0 MDEV-33088 Cannot create triggers in the database `MYSQL` MDEV-33103 LOCK TABLE t1 AS t2 -- alias is not case sensitive with lower-case-table-names=0 MDEV-33109 DROP DATABASE MYSQL -- does not drop SP with lower-case-table-names=0 MDEV-33110 HANDLER commands are case insensitive with lower-case-table-names=0 MDEV-33119 User is case insensitive in INFORMATION_SCHEMA.VIEWS MDEV-33120 System log table names are case insensitive with lower-cast-table-names=0 - Removing the virtual function strnncoll() from MY_COLLATION_HANDLER - Adding a wrapper function CHARSET_INFO::streq(), to compare two strings for equality. For now it calls strnncoll() internally. In the future it will turn into a virtual function. - Adding new accent sensitive case insensitive collations: - utf8mb4_general1400_as_ci - utf8mb3_general1400_as_ci They implement accent sensitive case insensitive comparison. The weight of a character is equal to the code point of its upper case variant. These collations use Unicode-14.0.0 casefolding data. The result of my_charset_utf8mb3_general1400_as_ci.strcoll() is very close to the former my_charset_utf8mb3_general_ci.strcasecmp() There is only a difference in a couple dozen rare characters, because: - the switch from "tolower" to "toupper" comparison, to make utf8mb3_general1400_as_ci closer to utf8mb3_general_ci - the switch from Unicode-3.0.0 to Unicode-14.0.0 This difference should be tolarable. See the list of affected characters in the MDEV description. Note, utf8mb4_general1400_as_ci correctly handles non-BMP characters! Unlike utf8mb4_general_ci, it does not treat all BMP characters as equal. - Adding classes representing names of the file based database objects: Lex_ident_db Lex_ident_table Lex_ident_trigger Their comparison collation depends on the underlying file system case sensitivity and on --lower-case-table-names and can be either my_charset_bin or my_charset_utf8mb3_general1400_as_ci. - Adding classes representing names of other database objects, whose names have case insensitive comparison style, using my_charset_utf8mb3_general1400_as_ci: Lex_ident_column Lex_ident_sys_var Lex_ident_user_var Lex_ident_sp_var Lex_ident_ps Lex_ident_i_s_table Lex_ident_window Lex_ident_func Lex_ident_partition Lex_ident_with_element Lex_ident_rpl_filter Lex_ident_master_info Lex_ident_host Lex_ident_locale Lex_ident_plugin Lex_ident_engine Lex_ident_server Lex_ident_savepoint Lex_ident_charset engine_option_value::Name - All the mentioned Lex_ident_xxx classes implement a method streq(): if (ident1.streq(ident2)) do_equal(); This method works as a wrapper for CHARSET_INFO::streq(). - Changing a lot of "LEX_CSTRING name" to "Lex_ident_xxx name" in class members and in function/method parameters. - Replacing all calls like system_charset_info->coll->strcasecmp(ident1, ident2) to ident1.streq(ident2) - Taking advantage of the c++11 user defined literal operator for LEX_CSTRING (see m_strings.h) and Lex_ident_xxx (see lex_ident.h) data types. Use example: const Lex_ident_column primary_key_name= "PRIMARY"_Lex_ident_column; is now a shorter version of: const Lex_ident_column primary_key_name= Lex_ident_column({STRING_WITH_LEN("PRIMARY")});
2023-04-26 15:27:01 +04:00
!Lex_ident_host(Lex_cstring_strlen(thd->main_security_ctx.priv_host)).
streq(Lex_cstring_strlen(thd->security_context()->priv_host))))
trace->missing_privilege();
}
void opt_trace_disable_if_no_stored_proc_func_access(THD *thd, sp_head *sp)
{
if (likely(!(thd->variables.optimizer_trace &
Opt_trace_context::FLAG_ENABLED)) ||
thd->system_thread ||
likely(!thd->trace_started()))
return;
Opt_trace_context *const trace= &thd->opt_trace;
bool full_access;
Security_context *const backup_thd_sctx= thd->security_context();
thd->set_security_context(&thd->main_security_ctx);
const bool rc= check_show_routine_access(thd, sp, &full_access) || !full_access;
thd->set_security_context(backup_thd_sctx);
if (rc)
trace->missing_privilege();
}
/**
If tracing is on, checks additional privileges on a list of tables/views,
to make sure that the user has the right to do SHOW CREATE TABLE/VIEW and
"SELECT *". For that:
- this functions checks table-level SELECT
- which is sufficient for SHOW CREATE TABLE and "SELECT *", if a base table
- if a view, if the view has not been identified as such then
opt_trace_disable_if_no_view_access() will be later called and check SHOW
VIEW; other we check SHOW VIEW here; SHOW VIEW + SELECT is sufficient for
SHOW CREATE VIEW.
If a privilege is missing, notifies the trace system.
@param thd
@param tbl list of tables to check
*/
void opt_trace_disable_if_no_tables_access(THD *thd, TABLE_LIST *tbl)
{
if (likely(!(thd->variables.optimizer_trace &
Opt_trace_context::FLAG_ENABLED)) ||
thd->system_thread ||
likely(!thd->trace_started()))
return;
Opt_trace_context *const trace= &thd->opt_trace;
Security_context *const backup_thd_sctx= thd->security_context();
thd->set_security_context(&thd->main_security_ctx);
const TABLE_LIST *const first_not_own_table= thd->lex->first_not_own_table();
for (TABLE_LIST *t= tbl; t != NULL && t != first_not_own_table;
t= t->next_global)
{
/*
Anonymous derived tables (as in
"SELECT ... FROM (SELECT ...)") and table functions
don't have their grant.privilege set.
*/
if (!t->is_anonymous_derived_table() &&
!t->table_function)
{
const GRANT_INFO backup_grant_info= t->grant;
Security_context *const backup_table_sctx= t->security_ctx;
t->security_ctx= NULL;
/*
(1) check_table_access() fills t->grant.privilege.
(2) Because SELECT privileges can be column-based,
check_table_access() will return 'false' as long as there is SELECT
privilege on one column. But we want a table-level privilege.
*/
bool rc =
check_table_access(thd, SELECT_ACL, t, false, 1, true) || // (1)
((t->grant.privilege & SELECT_ACL) == NO_ACL); // (2)
if (t->is_view())
{
/*
It's a view which has already been opened: we are executing a
prepared statement. The view has been unfolded in the global list of
tables. So underlying tables will be automatically checked in the
present function, but we need an explicit check of SHOW VIEW:
*/
rc |= check_table_access(thd, SHOW_VIEW_ACL, t, false, 1, true);
}
t->security_ctx= backup_table_sctx;
t->grant= backup_grant_info;
if (rc)
{
trace->missing_privilege();
break;
}
}
}
thd->set_security_context(backup_thd_sctx);
return;
}
void opt_trace_disable_if_no_view_access(THD *thd, TABLE_LIST *view,
TABLE_LIST *underlying_tables)
{
if (likely(!(thd->variables.optimizer_trace &
Opt_trace_context::FLAG_ENABLED)) ||
thd->system_thread ||
likely(!thd->trace_started()))
return;
Opt_trace_context *const trace= &thd->opt_trace;
Security_context *const backup_table_sctx= view->security_ctx;
Security_context *const backup_thd_sctx= thd->security_context();
const GRANT_INFO backup_grant_info= view->grant;
view->security_ctx= NULL; // no SUID context for view
// no SUID context for THD
thd->set_security_context(&thd->main_security_ctx);
const int rc= check_table_access(thd, SHOW_VIEW_ACL, view, false, 1, true);
view->security_ctx= backup_table_sctx;
thd->set_security_context(backup_thd_sctx);
view->grant= backup_grant_info;
if (rc)
{
trace->missing_privilege();
return;
}
/*
We needn't check SELECT privilege on this view. Some
opt_trace_disable_if_no_tables_access() call has or will check it.
Now we check underlying tables/views of our view:
*/
opt_trace_disable_if_no_tables_access(thd, underlying_tables);
return;
}
/**
@class Opt_trace_stmt
The trace of one statement.
*/
Opt_trace_stmt::Opt_trace_stmt(Opt_trace_context *ctx_arg)
{
ctx= ctx_arg;
current_json= new Json_writer();
missing_priv= false;
I_S_disabled= 0;
}
Opt_trace_stmt::~Opt_trace_stmt()
{
delete current_json;
}
size_t Opt_trace_stmt::get_length()
{
return current_json->output.length();
}
size_t Opt_trace_stmt::get_truncated_bytes()
{
return current_json->get_truncated_bytes();
}
void Opt_trace_stmt::set_query(const char *query_ptr, size_t length,
const CHARSET_INFO *charset)
{
query.append(query_ptr, length, charset);
}
void Opt_trace_context::missing_privilege()
{
if (current_trace)
current_trace->missing_privilege();
}
void Opt_trace_context::set_allowed_mem_size(size_t mem_size)
{
current_trace->set_allowed_mem_size(mem_size);
}
/*
TODO: In future when we would be saving multiple trace,
this function would return
max_mem_size - memory_occupied_by_the_saved_traces
*/
size_t Opt_trace_context::remaining_mem_size()
{
return max_mem_size;
}
/*
Disable tracing for children if the current trace is already present.
Currently only one trace is stored and there is no mechanism
to restore traces, so disabling tracing for children is the best option.
*/
bool Opt_trace_context::disable_tracing_if_required()
{
if (current_trace)
{
current_trace->disable_tracing_for_children();
return true;
}
return false;
}
bool Opt_trace_context::enable_tracing_if_required()
{
if (current_trace)
{
current_trace->enable_tracing_for_children();
return true;
}
return false;
}
bool Opt_trace_context::is_enabled()
{
if (current_trace)
return current_trace->is_enabled();
return false;
}
Opt_trace_context::Opt_trace_context() : traces(PSI_INSTRUMENT_MEM)
{
current_trace= NULL;
max_mem_size= 0;
}
Opt_trace_context::~Opt_trace_context()
{
delete_traces();
}
void Opt_trace_context::set_query(const char *query, size_t length, const CHARSET_INFO *charset)
{
current_trace->set_query(query, length, charset);
}
void Opt_trace_context::start(THD *thd, TABLE_LIST *tbl,
enum enum_sql_command sql_command,
const char *query,
size_t query_length,
const CHARSET_INFO *query_charset,
ulong max_mem_size_arg)
{
/*
This is done currently because we don't want to have multiple
traces open at the same time, so as soon as a new trace is created
we forcefully end the previous one, if it has not ended by itself.
This would mostly happen with stored functions or procedures.
TODO: handle multiple traces
*/
DBUG_ASSERT(!current_trace);
current_trace= new Opt_trace_stmt(this);
max_mem_size= max_mem_size_arg;
set_allowed_mem_size(remaining_mem_size());
}
void Opt_trace_context::end()
{
if (current_trace)
traces.push(current_trace);
if (!traces.elements())
return;
if (traces.elements() > 1)
{
Opt_trace_stmt *prev= traces.at(0);
delete prev;
traces.del(0);
}
current_trace= NULL;
}
void Opt_trace_start::init(THD *thd,
TABLE_LIST *tbl,
enum enum_sql_command sql_command,
List<set_var_base> *set_vars,
const char *query,
size_t query_length,
const CHARSET_INFO *query_charset)
{
/*
if optimizer trace is enabled and the statment we have is traceable,
then we start the context.
*/
const ulonglong var= thd->variables.optimizer_trace;
traceable= FALSE;
if (unlikely(var & Opt_trace_context::FLAG_ENABLED) &&
sql_command_can_be_traced(sql_command) &&
!list_has_optimizer_trace_table(tbl) &&
!sets_var_optimizer_trace(sql_command, set_vars) &&
!thd->system_thread &&
!ctx->disable_tracing_if_required())
{
ctx->start(thd, tbl, sql_command, query, query_length, query_charset,
thd->variables.optimizer_trace_max_mem_size);
ctx->set_query(query, query_length, query_charset);
traceable= TRUE;
opt_trace_disable_if_no_tables_access(thd, tbl);
Json_writer *w= ctx->get_current_json();
w->start_object();
w->add_member("steps").start_array();
}
}
Opt_trace_start::~Opt_trace_start()
{
if (traceable)
{
Json_writer *w= ctx->get_current_json();
w->end_array();
w->end_object();
ctx->end();
traceable= FALSE;
}
else
{
ctx->enable_tracing_if_required();
}
}
void Opt_trace_stmt::fill_info(Opt_trace_info* info)
{
if (unlikely(info->missing_priv= get_missing_priv()))
{
info->trace_ptr= info->query_ptr= "";
info->trace_length= info->query_length= 0;
info->query_charset= &my_charset_bin;
info->missing_bytes= 0;
}
else
{
info->trace_ptr= current_json->output.get_string()->ptr();
info->trace_length= get_length();
info->query_ptr= query.ptr();
info->query_length= query.length();
info->query_charset= query.charset();
info->missing_bytes= get_truncated_bytes();
info->missing_priv= get_missing_priv();
}
}
void Opt_trace_stmt::missing_privilege()
{
missing_priv= true;
}
void Opt_trace_stmt::disable_tracing_for_children()
{
++I_S_disabled;
}
void Opt_trace_stmt::enable_tracing_for_children()
{
if (I_S_disabled)
--I_S_disabled;
}
void Opt_trace_stmt::set_allowed_mem_size(size_t mem_size)
{
current_json->set_size_limit(mem_size);
}
void get_table_name_for_trace(const JOIN_TAB *tab, String *out)
{
char table_name_buffer[64];
DBUG_ASSERT(tab != NULL);
DBUG_ASSERT(tab->join->thd->trace_started());
if (tab->table && tab->table->derived_select_number)
{
/* Derived table name generation */
size_t len= my_snprintf(table_name_buffer, sizeof(table_name_buffer)-1,
"<derived%u>",
tab->table->derived_select_number);
out->copy(table_name_buffer, len, &my_charset_bin);
}
else if (tab->bush_children)
{
JOIN_TAB *ctab= tab->bush_children->start;
size_t len= my_snprintf(table_name_buffer,
sizeof(table_name_buffer)-1,
"<subquery%d>",
ctab->emb_sj_nest->sj_subq_pred->get_identifier());
out->copy(table_name_buffer, len, &my_charset_bin);
}
else
{
TABLE_LIST *real_table= tab->table->pos_in_table_list;
out->set(real_table->alias.str, real_table->alias.length, &my_charset_bin);
}
}
/*
Prefer this when you are iterating over JOIN_TABs
*/
void Json_writer::add_table_name(const JOIN_TAB *tab)
{
String sbuf;
get_table_name_for_trace(tab, &sbuf);
add_str(sbuf.ptr(), sbuf.length());
}
void Json_writer::add_table_name(const TABLE *table)
{
add_str(table->pos_in_table_list->alias.str);
}
void trace_condition(THD * thd, const char *name, const char *transform_type,
Item *item, const char *table_name)
{
DBUG_ASSERT(thd->trace_started());
Json_writer_object trace_wrapper(thd);
Json_writer_object trace_cond(thd, transform_type);
trace_cond.add("condition", name);
if (table_name)
trace_cond.add("attached_to", table_name);
trace_cond.add("resulting_condition", item);
}
void add_table_scan_values_to_trace(THD *thd, JOIN_TAB *tab)
{
DBUG_ASSERT(thd->trace_started());
Json_writer_object table_records(thd);
table_records.add_table_name(tab);
Json_writer_object table_rec(thd, "table_scan");
table_rec.
add("rows", tab->found_records).
Update row and key fetch cost models to take into account data copy costs Before this patch, when calculating the cost of fetching and using a row/key from the engine, we took into account the cost of finding a row or key from the engine, but did not consistently take into account index only accessed, clustered key or covered keys for all access paths. The cost of the WHERE clause (TIME_FOR_COMPARE) was not consistently considered in best_access_path(). TIME_FOR_COMPARE was used in calculation in other places, like greedy_search(), but was in some cases (like scans) done an a different number of rows than was accessed. The cost calculation of row and index scans didn't take into account the number of rows that where accessed, only the number of accepted rows. When using a filter, the cost of index_only_reads and cost of accessing and disregarding 'filtered rows' where not taken into account, which made filters cost less than there actually where. To remedy the above, the following key & row fetch related costs has been added: - The cost of fetching and using a row is now split into different costs: - key + Row fetch cost (as before) but multiplied with the variable 'optimizer_cache_cost' (default to 0.5). This allows the user to tell the optimizer the likehood of finding the key and row in the engine cache. - ROW_COPY_COST, The cost copying a row from the engine to the sql layer or creating a row from the join_cache to the record buffer. Mostly affects table scan costs. - ROW_LOOKUP_COST, the cost of fetching a row by rowid. - KEY_COPY_COST the cost of finding the next key and copying it from the engine to the SQL layer. This is used when we calculate the cost index only reads. It makes index scans more expensive than before if they cover a lot of rows. (main.index_merge_myisam) - KEY_LOOKUP_COST, the cost of finding the first key in a range. This replaces the old define IDX_LOOKUP_COST, but with a higher cost. - KEY_NEXT_FIND_COST, the cost of finding the next key (and rowid). when doing a index scan and comparing the rowid to the filter. Before this cost was assumed to be 0. All of the above constants/variables are now tuned to be somewhat in proportion of executing complexity to each other. There is tuning need for these in the future, but that can wait until the above are made user variables as that will make tuning much easier. To make the usage of the above easy, there are new (not virtual) cost calclation functions in handler: - ha_read_time(), like read_time(), but take optimizer_cache_cost into account. - ha_read_and_copy_time(), like ha_read_time() but take into account ROW_COPY_TIME - ha_read_and_compare_time(), like ha_read_and_copy_time() but take TIME_FOR_COMPARE into account. - ha_rnd_pos_time(). Read row with row id, taking ROW_COPY_COST into account. This is used with filesort where we don't need to execute the WHERE clause again. - ha_keyread_time(), like keyread_time() but take optimizer_cache_cost into account. - ha_keyread_and_copy_time(), like ha_keyread_time(), but add KEY_COPY_COST. - ha_key_scan_time(), like key_scan_time() but take optimizer_cache_cost nto account. - ha_key_scan_and_compare_time(), like ha_key_scan_time(), but add KEY_COPY_COST & TIME_FOR_COMPARE. I also added some setup costs for doing different types of scans and creating temporary tables (on disk and in memory). This encourages the optimizer to not use these for simple 'a few row' lookups if there are adequate key lookup strategies. - TABLE_SCAN_SETUP_COST, cost of starting a table scan. - INDEX_SCAN_SETUP_COST, cost of starting an index scan. - HEAP_TEMPTABLE_CREATE_COST, cost of creating in memory temporary table. - DISK_TEMPTABLE_CREATE_COST, cost of creating an on disk temporary table. When calculating cost of fetching ranges, we had a cost of IDX_LOOKUP_COST (0.125) for doing a key div for a new range. This is now replaced with 'io_cost * KEY_LOOKUP_COST (1.0) * optimizer_cache_cost', which matches the cost we use for 'ref' and other key lookups. The effect is that the cost is now a bit higher when we have many ranges for a key. Allmost all calculation with TIME_FOR_COMPARE is now done in best_access_path(). 'JOIN::read_time' now includes the full cost for finding the rows in the table. In the result files, many of the changes are now again close to what they where before the "Update cost for hash and cached joins" commit, as that commit didn't fix the filter cost (too complex to do everything in one commit). The above changes showed a lot of a lot of inconsistencies in optimizer cost calculation. The main objective with the other changes was to do calculation as similar (and accurate) as possible and to make different plans more comparable. Detailed list of changes: - Calculate index_only_cost consistently and correctly for all scan and ref accesses. The row fetch_cost and index_only_cost now takes into account clustered keys, covered keys and index only accesses. - cost_for_index_read now returns both full cost and index_only_cost - Fixed cost calculation of get_sweep_read_cost() to match other similar costs. This is bases on the assumption that data is more often stored on SSD than a hard disk. - Replaced constant 2.0 with new define TABLE_SCAN_SETUP_COST. - Some scan cost estimates did not take into account TIME_FOR_COMPARE. Now all scan costs takes this into account. (main.show_explain) - Added session variable optimizer_cache_hit_ratio (default 50%). By adjusting this on can reduce or increase the cost of index or direct record lookups. The effect of the default is that key lookups is now a bit cheaper than before. See usage of 'optimizer_cache_cost' in handler.h. - JOIN_TAB::scan_time() did not take into account index only scans, which produced a wrong cost when index scan was used. Changed JOIN_TAB:::scan_time() to take into consideration clustered and covered keys. The values are now cached and we only have to call this function once. Other calls are changed to use the cached values. Function renamed to JOIN_TAB::estimate_scan_time(). - Fixed that most index cost calculations are done the same way and more close to 'range' calculations. The cost is now lower than before for small data sets and higher for large data sets as we take into account how many keys are read (main.opt_trace_selectivity, main.limit_rows_examined). - Ensured that index_scan_cost() == range(scan_of_all_rows_in_table_using_one_range) + MULTI_RANGE_READ_INFO_CONST. One effect of this is that if there is choice of doing a full index scan and a range-index scan over almost the whole table then index scan will be preferred (no range-read setup cost). (innodb.innodb, main.show_explain, main.range) - Fixed the EQ_REF and REF takes into account clustered and covered keys. This changes some plans to use covered or clustered indexes as these are much cheaper. (main.subselect_mat_cost, main.state_tables_innodb, main.limit_rows_examined) - Rowid filter setup cost and filter compare cost now takes into account fetching and checking the rowid (KEY_NEXT_FIND_COST). (main.partition_pruning heap.heap_btree main.log_state) - Added KEY_NEXT_FIND_COST to Range_rowid_filter_cost_info::lookup_cost to account of the time to find and check the next key value against the container - Introduced ha_keyread_time(rows) that takes into account finding the next row and copying the key value to 'record' (KEY_COPY_COST). - Introduced ha_key_scan_time() for calculating an index scan over all rows. - Added IDX_LOOKUP_COST to keyread_time() as a startup cost. - Added index_only_fetch_cost() as a convenience function to OPT_RANGE. - keyread_time() cost is slightly reduced to prefer shorter keys. (main.index_merge_myisam) - All of the above caused some index_merge combinations to be rejected because of cost (main.index_intersect). In some cases 'ref' where replaced with index_merge because of the low cost calculation of get_sweep_read_cost(). - Some index usage moved from PRIMARY to a covering index. (main.subselect_innodb) - Changed cost calculation of filter to take KEY_LOOKUP_COST and TIME_FOR_COMPARE into account. See sql_select.cc::apply_filter(). filter parameters and costs are now written to optimizer_trace. - Don't use matchings_records_in_range() to try to estimate the number of filtered rows for ranges. The reason is that we want to ensure that 'range' is calculated similar to 'ref'. There is also more work needed to calculate the selectivity when using ranges and ranges and filtering. This causes filtering column in EXPLAIN EXTENDED to be 100.00 for some cases where range cannot use filtering. (main.rowid_filter) - Introduced ha_scan_time() that takes into account the CPU cost of finding the next row and copying the row from the engine to 'record'. This causes costs of table scan to slightly increase and some test to changed their plan from ALL to RANGE or ALL to ref. (innodb.innodb_mysql, main.select_pkeycache) In a few cases where scan time of very small tables have lower cost than a ref or range, things changed from ref/range to ALL. (main.myisam, main.func_group, main.limit_rows_examined, main.subselect2) - Introduced ha_scan_and_compare_time() which is like ha_scan_time() but also adds the cost of the where clause (TIME_FOR_COMPARE). - Added small cost for creating temporary table for materialization. This causes some very small tables to use scan instead of materialization. - Added checking of the WHERE clause (TIME_FOR_COMPARE) of the accepted rows to ROR costs in get_best_ror_intersect() - Removed '- 0.001' from 'join->best_read' and optimize_straight_join() to ensure that the 'Last_query_cost' status variable contains the same value as the one that was calculated by the optimizer. - Take avg_io_cost() into account in handler::keyread_time() and handler::read_time(). This should have no effect as it's 1.0 by default, except for heap that overrides these functions. - Some 'ref_or_null' accesses changed to 'range' because of cost adjustments (main.order_by) - Added scan type "scan_with_join_cache" for optimizer_trace. This is just to show in the trace what kind of scan was used. - When using 'scan_with_join_cache' take into account number of preceding tables (as have to restore all fields for all previous table combination when checking the where clause) The new cost added is: (row_combinations * ROW_COPY_COST * number_of_cached_tables). This increases the cost of join buffering in proportion of the number of tables in the join buffer. One effect is that full scans are now done earlier as the cost is then smaller. (main.join_outer_innodb, main.greedy_optimizer) - Removed the usage of 'worst_seeks' in cost_for_index_read as it caused wrong plans to be created; It prefered JT_EQ_REF even if it would be much more expensive than a full table scan. A related issue was that worst_seeks only applied to full lookup, not to clustered or index only lookups, which is not consistent. This caused some plans to use index scan instead of eq_ref (main.union) - Changed federated block size from 4096 to 1500, which is the typical size of an IO packet. - Added costs for reading rows to Federated. Needed as there is no caching of rows in the federated engine. - Added ha_innobase::rnd_pos_time() cost function. - A lot of extra things added to optimizer trace - More costs, especially for materialization and index_merge. - Make lables more uniform - Fixed a lot of minor bugs - Added 'trace_started()' around a lot of trace blocks. - When calculating ORDER BY with LIMIT cost for using an index the cost did not take into account the number of row retrivals that has to be done or the cost of comparing the rows with the WHERE clause. The cost calculated would be just a fraction of the real cost. Now we calculate the cost as we do for ranges and 'ref'. - 'Using index for group-by' is used a bit more than before as now take into account the WHERE clause cost when comparing with 'ref' and prefer the method with fewer row combinations. (main.group_min_max). Bugs fixed: - Fixed that we don't calculate TIME_FOR_COMPARE twice for some plans, like in optimize_straight_join() and greedy_search() - Fixed bug in save_explain_data where we could test for the wrong index when displaying 'Using index'. This caused some old plans to show 'Using index'. (main.subselect_innodb, main.subselect2) - Fixed bug in get_best_ror_intersect() where 'min_cost' was not updated, and the cost we compared with was not the one that was used. - Fixed very wrong cost calculation for priority queues in check_if_pq_applicable(). (main.order_by now correctly uses priority queue) - When calculating cost of EQ_REF or REF, we added the cost of comparing the WHERE clause with the found rows, not all row combinations. This made ref and eq_ref to be regarded way to cheap compared to other access methods. - FORCE INDEX cost calculation didn't take into account clustered or covered indexes. - JT_EQ_REF cost was estimated as avg_io_cost(), which is half the cost of a JT_REF key. This may be true for InnoDB primary key, but not for other unique keys or other engines. Now we use handler function to calculate the cost, which allows us to handle consistently clustered, covered keys and not covered keys. - ha_start_keyread() didn't call extra_opt() if keyread was already enabled but still changed the 'keyread' variable (which is wrong). Fixed by not doing anything if keyread is already enabled. - multi_range_read_info_cost() didn't take into account io_cost when calculating the cost of ranges. - fix_semijoin_strategies_for_picked_join_order() used the wrong record_count when calling best_access_path() for SJ_OPT_FIRST_MATCH and SJ_OPT_LOOSE_SCAN. - Hash joins didn't provide correct best_cost to the upper level, which means that the cost for hash_joins more expensive than calculated in best_access_path (a difference of 10x * TIME_OF_COMPARE). This is fixed in the new code thanks to that we now include TIME_OF_COMPARE cost in 'read_time'. Other things: - Added some 'if (thd->trace_started())' to speed up code - Removed not used function Cost_estimate::is_zero() - Simplified testing of HA_POS_ERROR in get_best_ror_intersect(). (No cost changes) - Moved ha_start_keyread() from join_read_const_table() to join_read_const() to enable keyread for all types of JT_CONST tables. - Made a few very short functions inline in handler.h Notes: - In main.rowid_filter the join order of order and lineitem is swapped. This is because the cost of doing a range fetch of lineitem(98 rows) is almost as big as the whole join of order,lineitem. The filtering will also ensure that we only have to do very small key fetches of the rows in lineitem. - main.index_merge_myisam had a few changes where we are now using less keys for index_merge. This is because index scans are now more expensive than before. - handler->optimizer_cache_cost is updated in ha_external_lock(). This ensures that it is up to date per statements. Not an optimal solution (for locked tables), but should be ok for now. - 'DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a' does not take cost of filesort into consideration when table scan is chosen. (main.myisam_explain_non_select_all) - perfschema.table_aggregate_global_* has changed because an update on a table with 1 row will now use table scan instead of key lookup. TODO in upcomming commits: - Fix selectivity calculation for ranges with and without filtering and when there is a ref access but scan is chosen. For this we have to store the lowest known value for 'accepted_records' in the OPT_RANGE structure. - Change that records_read does not include filtered rows. - test_if_cheaper_ordering() needs to be updated to properly calculate costs. This will fix tests like main.order_by_innodb, main.single_delete_update - Extend get_range_limit_read_cost() to take into considering cost_for_index_read() if there where no quick keys. This will reduce the computed cost for ORDER BY with LIMIT in some cases. (main.innodb_ext_key) - Fix that we take into account selectivity when counting the number of rows we have to read when considering using a index table scan to resolve ORDER BY. - Add new calculation for rnd_pos_time() where we take into account the benefit of reading multiple rows from the same page.
2021-11-01 12:34:24 +02:00
add("read_cost", tab->read_time).
add("read_and_compare_cost", tab->cached_scan_and_compare_time);
}
/*
@brief
Add the tables inside a partial join to the optimizer trace
@param join join handler
@param idx length of the partial QEP in 'join->positions'
@table_map map of all non-const tables of the join
@note
This function is used during best_access_path to print the tables
inside the partial join that were considered doing the cost based
analysis of the various join orders.
*/
void trace_plan_prefix(Json_writer_object *jsobj, JOIN *join, uint idx,
table_map join_tables)
{
DBUG_ASSERT(join->thd->trace_started());
String prefix_str;
prefix_str.length(0);
for (uint i= join->const_tables; i < idx; i++)
{
TABLE_LIST *const tr= join->positions[i].table->tab_list;
if (!(tr->map & join_tables))
{
String str;
get_table_name_for_trace(join->positions[i].table, &str);
if (prefix_str.length() != 0)
prefix_str.append(',');
prefix_str.append(str);
}
}
jsobj->add("plan_prefix", prefix_str.ptr(), prefix_str.length());
}
/*
Print the join order of all the tables for top level select.
For example:
select * from ot1
where ot1.a IN (select it1.a from it1, it2 where it1.b=it2.a);
So this function would print
ot1, <subquery2> ----> For select #1
*/
void print_final_join_order(JOIN *join)
{
DBUG_ASSERT(join->thd->trace_started());
Json_writer_object join_order(join->thd);
Json_writer_array best_order(join->thd, "best_join_order");
JOIN_TAB *j;
uint i;
for (j= join->join_tab,i=0 ; i < join->top_join_tab_count;
i++, j++)
best_order.add_table_name(j);
best_order.end();
/* Write information about the resulting join */
Update row and key fetch cost models to take into account data copy costs Before this patch, when calculating the cost of fetching and using a row/key from the engine, we took into account the cost of finding a row or key from the engine, but did not consistently take into account index only accessed, clustered key or covered keys for all access paths. The cost of the WHERE clause (TIME_FOR_COMPARE) was not consistently considered in best_access_path(). TIME_FOR_COMPARE was used in calculation in other places, like greedy_search(), but was in some cases (like scans) done an a different number of rows than was accessed. The cost calculation of row and index scans didn't take into account the number of rows that where accessed, only the number of accepted rows. When using a filter, the cost of index_only_reads and cost of accessing and disregarding 'filtered rows' where not taken into account, which made filters cost less than there actually where. To remedy the above, the following key & row fetch related costs has been added: - The cost of fetching and using a row is now split into different costs: - key + Row fetch cost (as before) but multiplied with the variable 'optimizer_cache_cost' (default to 0.5). This allows the user to tell the optimizer the likehood of finding the key and row in the engine cache. - ROW_COPY_COST, The cost copying a row from the engine to the sql layer or creating a row from the join_cache to the record buffer. Mostly affects table scan costs. - ROW_LOOKUP_COST, the cost of fetching a row by rowid. - KEY_COPY_COST the cost of finding the next key and copying it from the engine to the SQL layer. This is used when we calculate the cost index only reads. It makes index scans more expensive than before if they cover a lot of rows. (main.index_merge_myisam) - KEY_LOOKUP_COST, the cost of finding the first key in a range. This replaces the old define IDX_LOOKUP_COST, but with a higher cost. - KEY_NEXT_FIND_COST, the cost of finding the next key (and rowid). when doing a index scan and comparing the rowid to the filter. Before this cost was assumed to be 0. All of the above constants/variables are now tuned to be somewhat in proportion of executing complexity to each other. There is tuning need for these in the future, but that can wait until the above are made user variables as that will make tuning much easier. To make the usage of the above easy, there are new (not virtual) cost calclation functions in handler: - ha_read_time(), like read_time(), but take optimizer_cache_cost into account. - ha_read_and_copy_time(), like ha_read_time() but take into account ROW_COPY_TIME - ha_read_and_compare_time(), like ha_read_and_copy_time() but take TIME_FOR_COMPARE into account. - ha_rnd_pos_time(). Read row with row id, taking ROW_COPY_COST into account. This is used with filesort where we don't need to execute the WHERE clause again. - ha_keyread_time(), like keyread_time() but take optimizer_cache_cost into account. - ha_keyread_and_copy_time(), like ha_keyread_time(), but add KEY_COPY_COST. - ha_key_scan_time(), like key_scan_time() but take optimizer_cache_cost nto account. - ha_key_scan_and_compare_time(), like ha_key_scan_time(), but add KEY_COPY_COST & TIME_FOR_COMPARE. I also added some setup costs for doing different types of scans and creating temporary tables (on disk and in memory). This encourages the optimizer to not use these for simple 'a few row' lookups if there are adequate key lookup strategies. - TABLE_SCAN_SETUP_COST, cost of starting a table scan. - INDEX_SCAN_SETUP_COST, cost of starting an index scan. - HEAP_TEMPTABLE_CREATE_COST, cost of creating in memory temporary table. - DISK_TEMPTABLE_CREATE_COST, cost of creating an on disk temporary table. When calculating cost of fetching ranges, we had a cost of IDX_LOOKUP_COST (0.125) for doing a key div for a new range. This is now replaced with 'io_cost * KEY_LOOKUP_COST (1.0) * optimizer_cache_cost', which matches the cost we use for 'ref' and other key lookups. The effect is that the cost is now a bit higher when we have many ranges for a key. Allmost all calculation with TIME_FOR_COMPARE is now done in best_access_path(). 'JOIN::read_time' now includes the full cost for finding the rows in the table. In the result files, many of the changes are now again close to what they where before the "Update cost for hash and cached joins" commit, as that commit didn't fix the filter cost (too complex to do everything in one commit). The above changes showed a lot of a lot of inconsistencies in optimizer cost calculation. The main objective with the other changes was to do calculation as similar (and accurate) as possible and to make different plans more comparable. Detailed list of changes: - Calculate index_only_cost consistently and correctly for all scan and ref accesses. The row fetch_cost and index_only_cost now takes into account clustered keys, covered keys and index only accesses. - cost_for_index_read now returns both full cost and index_only_cost - Fixed cost calculation of get_sweep_read_cost() to match other similar costs. This is bases on the assumption that data is more often stored on SSD than a hard disk. - Replaced constant 2.0 with new define TABLE_SCAN_SETUP_COST. - Some scan cost estimates did not take into account TIME_FOR_COMPARE. Now all scan costs takes this into account. (main.show_explain) - Added session variable optimizer_cache_hit_ratio (default 50%). By adjusting this on can reduce or increase the cost of index or direct record lookups. The effect of the default is that key lookups is now a bit cheaper than before. See usage of 'optimizer_cache_cost' in handler.h. - JOIN_TAB::scan_time() did not take into account index only scans, which produced a wrong cost when index scan was used. Changed JOIN_TAB:::scan_time() to take into consideration clustered and covered keys. The values are now cached and we only have to call this function once. Other calls are changed to use the cached values. Function renamed to JOIN_TAB::estimate_scan_time(). - Fixed that most index cost calculations are done the same way and more close to 'range' calculations. The cost is now lower than before for small data sets and higher for large data sets as we take into account how many keys are read (main.opt_trace_selectivity, main.limit_rows_examined). - Ensured that index_scan_cost() == range(scan_of_all_rows_in_table_using_one_range) + MULTI_RANGE_READ_INFO_CONST. One effect of this is that if there is choice of doing a full index scan and a range-index scan over almost the whole table then index scan will be preferred (no range-read setup cost). (innodb.innodb, main.show_explain, main.range) - Fixed the EQ_REF and REF takes into account clustered and covered keys. This changes some plans to use covered or clustered indexes as these are much cheaper. (main.subselect_mat_cost, main.state_tables_innodb, main.limit_rows_examined) - Rowid filter setup cost and filter compare cost now takes into account fetching and checking the rowid (KEY_NEXT_FIND_COST). (main.partition_pruning heap.heap_btree main.log_state) - Added KEY_NEXT_FIND_COST to Range_rowid_filter_cost_info::lookup_cost to account of the time to find and check the next key value against the container - Introduced ha_keyread_time(rows) that takes into account finding the next row and copying the key value to 'record' (KEY_COPY_COST). - Introduced ha_key_scan_time() for calculating an index scan over all rows. - Added IDX_LOOKUP_COST to keyread_time() as a startup cost. - Added index_only_fetch_cost() as a convenience function to OPT_RANGE. - keyread_time() cost is slightly reduced to prefer shorter keys. (main.index_merge_myisam) - All of the above caused some index_merge combinations to be rejected because of cost (main.index_intersect). In some cases 'ref' where replaced with index_merge because of the low cost calculation of get_sweep_read_cost(). - Some index usage moved from PRIMARY to a covering index. (main.subselect_innodb) - Changed cost calculation of filter to take KEY_LOOKUP_COST and TIME_FOR_COMPARE into account. See sql_select.cc::apply_filter(). filter parameters and costs are now written to optimizer_trace. - Don't use matchings_records_in_range() to try to estimate the number of filtered rows for ranges. The reason is that we want to ensure that 'range' is calculated similar to 'ref'. There is also more work needed to calculate the selectivity when using ranges and ranges and filtering. This causes filtering column in EXPLAIN EXTENDED to be 100.00 for some cases where range cannot use filtering. (main.rowid_filter) - Introduced ha_scan_time() that takes into account the CPU cost of finding the next row and copying the row from the engine to 'record'. This causes costs of table scan to slightly increase and some test to changed their plan from ALL to RANGE or ALL to ref. (innodb.innodb_mysql, main.select_pkeycache) In a few cases where scan time of very small tables have lower cost than a ref or range, things changed from ref/range to ALL. (main.myisam, main.func_group, main.limit_rows_examined, main.subselect2) - Introduced ha_scan_and_compare_time() which is like ha_scan_time() but also adds the cost of the where clause (TIME_FOR_COMPARE). - Added small cost for creating temporary table for materialization. This causes some very small tables to use scan instead of materialization. - Added checking of the WHERE clause (TIME_FOR_COMPARE) of the accepted rows to ROR costs in get_best_ror_intersect() - Removed '- 0.001' from 'join->best_read' and optimize_straight_join() to ensure that the 'Last_query_cost' status variable contains the same value as the one that was calculated by the optimizer. - Take avg_io_cost() into account in handler::keyread_time() and handler::read_time(). This should have no effect as it's 1.0 by default, except for heap that overrides these functions. - Some 'ref_or_null' accesses changed to 'range' because of cost adjustments (main.order_by) - Added scan type "scan_with_join_cache" for optimizer_trace. This is just to show in the trace what kind of scan was used. - When using 'scan_with_join_cache' take into account number of preceding tables (as have to restore all fields for all previous table combination when checking the where clause) The new cost added is: (row_combinations * ROW_COPY_COST * number_of_cached_tables). This increases the cost of join buffering in proportion of the number of tables in the join buffer. One effect is that full scans are now done earlier as the cost is then smaller. (main.join_outer_innodb, main.greedy_optimizer) - Removed the usage of 'worst_seeks' in cost_for_index_read as it caused wrong plans to be created; It prefered JT_EQ_REF even if it would be much more expensive than a full table scan. A related issue was that worst_seeks only applied to full lookup, not to clustered or index only lookups, which is not consistent. This caused some plans to use index scan instead of eq_ref (main.union) - Changed federated block size from 4096 to 1500, which is the typical size of an IO packet. - Added costs for reading rows to Federated. Needed as there is no caching of rows in the federated engine. - Added ha_innobase::rnd_pos_time() cost function. - A lot of extra things added to optimizer trace - More costs, especially for materialization and index_merge. - Make lables more uniform - Fixed a lot of minor bugs - Added 'trace_started()' around a lot of trace blocks. - When calculating ORDER BY with LIMIT cost for using an index the cost did not take into account the number of row retrivals that has to be done or the cost of comparing the rows with the WHERE clause. The cost calculated would be just a fraction of the real cost. Now we calculate the cost as we do for ranges and 'ref'. - 'Using index for group-by' is used a bit more than before as now take into account the WHERE clause cost when comparing with 'ref' and prefer the method with fewer row combinations. (main.group_min_max). Bugs fixed: - Fixed that we don't calculate TIME_FOR_COMPARE twice for some plans, like in optimize_straight_join() and greedy_search() - Fixed bug in save_explain_data where we could test for the wrong index when displaying 'Using index'. This caused some old plans to show 'Using index'. (main.subselect_innodb, main.subselect2) - Fixed bug in get_best_ror_intersect() where 'min_cost' was not updated, and the cost we compared with was not the one that was used. - Fixed very wrong cost calculation for priority queues in check_if_pq_applicable(). (main.order_by now correctly uses priority queue) - When calculating cost of EQ_REF or REF, we added the cost of comparing the WHERE clause with the found rows, not all row combinations. This made ref and eq_ref to be regarded way to cheap compared to other access methods. - FORCE INDEX cost calculation didn't take into account clustered or covered indexes. - JT_EQ_REF cost was estimated as avg_io_cost(), which is half the cost of a JT_REF key. This may be true for InnoDB primary key, but not for other unique keys or other engines. Now we use handler function to calculate the cost, which allows us to handle consistently clustered, covered keys and not covered keys. - ha_start_keyread() didn't call extra_opt() if keyread was already enabled but still changed the 'keyread' variable (which is wrong). Fixed by not doing anything if keyread is already enabled. - multi_range_read_info_cost() didn't take into account io_cost when calculating the cost of ranges. - fix_semijoin_strategies_for_picked_join_order() used the wrong record_count when calling best_access_path() for SJ_OPT_FIRST_MATCH and SJ_OPT_LOOSE_SCAN. - Hash joins didn't provide correct best_cost to the upper level, which means that the cost for hash_joins more expensive than calculated in best_access_path (a difference of 10x * TIME_OF_COMPARE). This is fixed in the new code thanks to that we now include TIME_OF_COMPARE cost in 'read_time'. Other things: - Added some 'if (thd->trace_started())' to speed up code - Removed not used function Cost_estimate::is_zero() - Simplified testing of HA_POS_ERROR in get_best_ror_intersect(). (No cost changes) - Moved ha_start_keyread() from join_read_const_table() to join_read_const() to enable keyread for all types of JT_CONST tables. - Made a few very short functions inline in handler.h Notes: - In main.rowid_filter the join order of order and lineitem is swapped. This is because the cost of doing a range fetch of lineitem(98 rows) is almost as big as the whole join of order,lineitem. The filtering will also ensure that we only have to do very small key fetches of the rows in lineitem. - main.index_merge_myisam had a few changes where we are now using less keys for index_merge. This is because index scans are now more expensive than before. - handler->optimizer_cache_cost is updated in ha_external_lock(). This ensures that it is up to date per statements. Not an optimal solution (for locked tables), but should be ok for now. - 'DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a' does not take cost of filesort into consideration when table scan is chosen. (main.myisam_explain_non_select_all) - perfschema.table_aggregate_global_* has changed because an update on a table with 1 row will now use table scan instead of key lookup. TODO in upcomming commits: - Fix selectivity calculation for ranges with and without filtering and when there is a ref access but scan is chosen. For this we have to store the lowest known value for 'accepted_records' in the OPT_RANGE structure. - Change that records_read does not include filtered rows. - test_if_cheaper_ordering() needs to be updated to properly calculate costs. This will fix tests like main.order_by_innodb, main.single_delete_update - Extend get_range_limit_read_cost() to take into considering cost_for_index_read() if there where no quick keys. This will reduce the computed cost for ORDER BY with LIMIT in some cases. (main.innodb_ext_key) - Fix that we take into account selectivity when counting the number of rows we have to read when considering using a index table scan to resolve ORDER BY. - Add new calculation for rnd_pos_time() where we take into account the benefit of reading multiple rows from the same page.
2021-11-01 12:34:24 +02:00
join_order.
add("rows", join->join_record_count).
add("cost", join->best_read);
}
void print_best_access_for_table(THD *thd, POSITION *pos)
{
DBUG_ASSERT(thd->trace_started());
Json_writer_object obj(thd, "chosen_access_method");
obj.
add("type", pos->type == JT_ALL ? "scan" : join_type_str[pos->type]).
Changing all cost calculation to be given in milliseconds This makes it easier to compare different costs and also allows the optimizer to optimizer different storage engines more reliably. - Added tests/check_costs.pl, a tool to verify optimizer cost calculations. - Most engine costs has been found with this program. All steps to calculate the new costs are documented in Docs/optimizer_costs.txt - User optimizer_cost variables are given in microseconds (as individual costs can be very small). Internally they are stored in ms. - Changed DISK_READ_COST (was DISK_SEEK_BASE_COST) from a hard disk cost (9 ms) to common SSD cost (400MB/sec). - Removed cost calculations for hard disks (rotation etc). - Changed the following handler functions to return IO_AND_CPU_COST. This makes it easy to apply different cost modifiers in ha_..time() functions for io and cpu costs. - scan_time() - rnd_pos_time() & rnd_pos_call_time() - keyread_time() - Enhanched keyread_time() to calculate the full cost of reading of a set of keys with a given number of ranges and optional number of blocks that need to be accessed. - Removed read_time() as keyread_time() + rnd_pos_time() can do the same thing and more. - Tuned cost for: heap, myisam, Aria, InnoDB, archive and MyRocks. Used heap table costs for json_table. The rest are using default engine costs. - Added the following new optimizer variables: - optimizer_disk_read_ratio - optimizer_disk_read_cost - optimizer_key_lookup_cost - optimizer_row_lookup_cost - optimizer_row_next_find_cost - optimizer_scan_cost - Moved all engine specific cost to OPTIMIZER_COSTS structure. - Changed costs to use 'records_out' instead of 'records_read' when recalculating costs. - Split optimizer_costs.h to optimizer_costs.h and optimizer_defaults.h. This allows one to change costs without having to compile a lot of files. - Updated costs for filter lookup. - Use a better cost estimate in best_extension_by_limited_search() for the sorting cost. - Fixed previous issues with 'filtered' explain column as we are now using 'records_out' (min rows seen for table) to calculate filtering. This greatly simplifies the filtering code in JOIN_TAB::save_explain_data(). This change caused a lot of queries to be optimized differently than before, which exposed different issues in the optimizer that needs to be fixed. These fixes are in the following commits. To not have to change the same test case over and over again, the changes in the test cases are done in a single commit after all the critical change sets are done. InnoDB changes: - Updated InnoDB to not divide big range cost with 2. - Added cost for InnoDB (innobase_update_optimizer_costs()). - Don't mark clustered primary key with HA_KEYREAD_ONLY. This will prevent that the optimizer is trying to use index-only scans on the clustered key. - Disabled ha_innobase::scan_time() and ha_innobase::read_time() and ha_innobase::rnd_pos_time() as the default engine cost functions now works good for InnoDB. Other things: - Added --show-query-costs (\Q) option to mysql.cc to show the query cost after each query (good when working with query costs). - Extended my_getopt with GET_ADJUSTED_VALUE which allows one to adjust the value that user is given. This is used to change cost from microseconds (user input) to milliseconds (what the server is internally using). - Added include/my_tracker.h ; Useful include file to quickly test costs of a function. - Use handler::set_table() in all places instead of 'table= arg'. - Added SHOW_OPTIMIZER_COSTS to sys variables. These are input and shown in microseconds for the user but stored as milliseconds. This is to make the numbers easier to read for the user (less pre-zeros). Implemented in 'Sys_var_optimizer_cost' class. - In test_quick_select() do not use index scans if 'no_keyread' is set for the table. This is what we do in other places of the server. - Added THD parameter to Unique::get_use_cost() and check_index_intersect_extension() and similar functions to be able to provide costs to called functions. - Changed 'records' to 'rows' in optimizer_trace. - Write more information to optimizer_trace. - Added INDEX_BLOCK_FILL_FACTOR_MUL (4) and INDEX_BLOCK_FILL_FACTOR_DIV (3) to calculate usage space of keys in b-trees. (Before we used numeric constants). - Removed code that assumed that b-trees has similar costs as binary trees. Replaced with engine calls that returns the cost. - Added Bitmap::find_first_bit() - Added timings to join_cache for ANALYZE table (patch by Sergei Petrunia). - Added records_init and records_after_filter to POSITION to remember more of what best_access_patch() calculates. - table_after_join_selectivity() changed to recalculate 'records_out' based on the new fields from best_access_patch() Bug fixes: - Some queries did not update last_query_cost (was 0). Fixed by moving setting thd->...last_query_cost in JOIN::optimize(). - Write '0' as number of rows for const tables with a matching row. Some internals: - Engine cost are stored in OPTIMIZER_COSTS structure. When a handlerton is created, we also created a new cost variable for the handlerton. We also create a new variable if the user changes a optimizer cost for a not yet loaded handlerton either with command line arguments or with SET @@global.engine.optimizer_cost_variable=xx. - There are 3 global OPTIMIZER_COSTS variables: default_optimizer_costs The default costs + changes from the command line without an engine specifier. heap_optimizer_costs Heap table costs, used for temporary tables tmp_table_optimizer_costs The cost for the default on disk internal temporary table (MyISAM or Aria) - The engine cost for a table is stored in table_share. To speed up accesses the handler has a pointer to this. The cost is copied to the table on first access. If one wants to change the cost one must first update the global engine cost and then do a FLUSH TABLES. This was done to be able to access the costs for an open table without any locks. - When a handlerton is created, the cost are updated the following way: See sql/keycaches.cc for details: - Use 'default_optimizer_costs' as a base - Call hton->update_optimizer_costs() to override with the engines default costs. - Override the costs that the user has specified for the engine. - One handler open, copy the engine cost from handlerton to TABLE_SHARE. - Call handler::update_optimizer_costs() to allow the engine to update cost for this particular table. - There are two costs stored in THD. These are copied to the handler when the table is used in a query: - optimizer_where_cost - optimizer_scan_setup_cost - Simply code in best_access_path() by storing all cost result in a structure. (Idea/Suggestion by Igor)
2022-08-11 13:05:23 +03:00
add("rows_read", pos->records_read).
add("rows_out", pos->records_out).
add("cost", pos->read_time).
add("uses_join_buffering", pos->use_join_buffer);
if (pos->range_rowid_filter_info)
{
uint key_no= pos->range_rowid_filter_info->get_key_no();
obj.add("rowid_filter_index",
pos->table->table->key_info[key_no].name);
}
}
/*
Introduce enum_query_type flags parameter, maybe also allow
EXPLAIN also use this function.
*/
void Json_writer::add_str(Item *item)
{
if (item)
{
THD *thd= current_thd;
StringBuffer<256> str(system_charset_info);
ulonglong save_option_bits= thd->variables.option_bits;
thd->variables.option_bits &= ~OPTION_QUOTE_SHOW_CREATE;
item->print(&str,
enum_query_type(QT_TO_SYSTEM_CHARSET | QT_SHOW_SELECT_NUMBER
| QT_ITEM_IDENT_SKIP_DB_NAMES));
thd->variables.option_bits= save_option_bits;
add_str(str.c_ptr_safe());
}
else
add_null();
}
void Opt_trace_context::delete_traces()
{
if (traces.elements())
{
while (traces.elements())
{
Opt_trace_stmt *prev= traces.at(0);
delete prev;
traces.del(0);
}
}
}
int fill_optimizer_trace_info(THD *thd, TABLE_LIST *tables, Item *)
{
TABLE *table= tables->table;
Opt_trace_info info;
/* get_values of trace, query , missing bytes and missing_priv
@todo: Need an iterator here to walk over all the traces
*/
Opt_trace_context* ctx= &thd->opt_trace;
if (!thd->opt_trace.empty())
{
Opt_trace_stmt *stmt= ctx->get_top_trace();
stmt->fill_info(&info);
table->field[0]->store(info.query_ptr, static_cast<uint>(info.query_length),
info.query_charset);
table->field[1]->store(info.trace_ptr, static_cast<uint>(info.trace_length),
system_charset_info);
table->field[2]->store(info.missing_bytes, true);
table->field[3]->store(info.missing_priv, true);
// Store in IS
if (schema_table_store_record(thd, table))
return 1;
}
return 0;
}