2012-01-05 02:51:53 +01:00
|
|
|
/* Copyright (C) 2009 MySQL AB
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; version 2 of the License.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software
|
2019-05-11 21:19:05 +02:00
|
|
|
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
@file
|
|
|
|
|
|
|
|
@brief
|
|
|
|
functions to update persitent statistical tables and to read from them
|
|
|
|
|
|
|
|
@defgroup Query_Optimizer Query Optimizer
|
|
|
|
@{
|
|
|
|
*/
|
|
|
|
|
2014-09-30 19:31:14 +02:00
|
|
|
#include <my_global.h>
|
2012-03-19 09:35:32 +01:00
|
|
|
#include "sql_base.h"
|
|
|
|
#include "key.h"
|
2012-01-05 02:51:53 +01:00
|
|
|
#include "sql_statistics.h"
|
2013-03-11 15:44:24 +01:00
|
|
|
#include "opt_range.h"
|
2012-07-27 02:50:08 +02:00
|
|
|
#include "my_atomic.h"
|
2016-06-20 08:58:31 +02:00
|
|
|
#include "sql_show.h"
|
2018-12-06 21:42:22 +01:00
|
|
|
#include "sql_partition.h"
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
/*
|
2012-06-03 02:19:01 +02:00
|
|
|
The system variable 'use_stat_tables' can take one of the
|
2012-01-05 02:51:53 +01:00
|
|
|
following values:
|
2012-06-03 02:19:01 +02:00
|
|
|
"never", "complementary", "preferably".
|
|
|
|
If the values of the variable 'use_stat_tables' is set to
|
2012-01-05 02:51:53 +01:00
|
|
|
"never then any statistical data from the persistent statistical tables
|
|
|
|
is ignored by the optimizer.
|
2012-06-03 02:19:01 +02:00
|
|
|
If the value of the variable 'use_stat_tables' is set to
|
2012-01-05 02:51:53 +01:00
|
|
|
"complementary" then a particular statistical characteristic is used
|
|
|
|
by the optimizer only if the database engine does not provide similar
|
|
|
|
statistics. For example, 'nulls_ratio' for table columns currently
|
|
|
|
are not provided by any engine. So optimizer uses this statistical data
|
|
|
|
from the statistical tables. At the same time it does not use
|
|
|
|
'avg_frequency' for any index prefix from the statistical tables since
|
|
|
|
the a similar statistical characteristic 'records_per_key' can be
|
|
|
|
requested from the database engine.
|
2012-06-03 02:19:01 +02:00
|
|
|
If the value the variable 'use_stat_tables' is set to
|
2012-01-05 02:51:53 +01:00
|
|
|
"preferably" the optimizer uses a particular statistical data only if
|
|
|
|
it can't be found in the statistical data.
|
|
|
|
If an ANALYZE command is executed then it results in collecting
|
|
|
|
statistical data for the tables specified by the command and storing
|
|
|
|
the collected statistics in the persistent statistical tables only
|
2012-06-03 02:19:01 +02:00
|
|
|
when the value of the variable 'use_stat_tables' is not
|
2012-01-05 02:51:53 +01:00
|
|
|
equal to "never".
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Currently there are only 3 persistent statistical tables */
|
|
|
|
static const uint STATISTICS_TABLES= 3;
|
|
|
|
|
|
|
|
/*
|
2012-07-11 01:34:39 +02:00
|
|
|
The names of the statistical tables in this array must correspond the
|
2012-01-05 02:51:53 +01:00
|
|
|
definitions of the tables in the file ../scripts/mysql_system_tables.sql
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
static const LEX_STRING stat_table_name[STATISTICS_TABLES]=
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
2012-12-05 09:31:05 +01:00
|
|
|
{ C_STRING_WITH_LEN("table_stats") },
|
|
|
|
{ C_STRING_WITH_LEN("column_stats") },
|
|
|
|
{ C_STRING_WITH_LEN("index_stats") }
|
2012-01-05 02:51:53 +01:00
|
|
|
};
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
/* Name of database to which the statistical tables belong */
|
|
|
|
static const LEX_STRING stat_tables_db_name= { C_STRING_WITH_LEN("mysql") };
|
|
|
|
|
|
|
|
|
2012-03-19 09:35:32 +01:00
|
|
|
/**
|
|
|
|
@details
|
|
|
|
The function builds a list of TABLE_LIST elements for system statistical
|
|
|
|
tables using array of TABLE_LIST passed as a parameter.
|
|
|
|
The lock type of each element is set to TL_READ if for_write = FALSE,
|
|
|
|
otherwise it is set to TL_WRITE.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
static
|
2012-03-19 09:35:32 +01:00
|
|
|
inline void init_table_list_for_stat_tables(TABLE_LIST *tables, bool for_write)
|
|
|
|
{
|
|
|
|
uint i;
|
|
|
|
|
2012-07-27 21:05:23 +02:00
|
|
|
memset((char *) &tables[0], 0, sizeof(TABLE_LIST) * STATISTICS_TABLES);
|
2012-03-19 09:35:32 +01:00
|
|
|
|
|
|
|
for (i= 0; i < STATISTICS_TABLES; i++)
|
|
|
|
{
|
2012-07-11 01:34:39 +02:00
|
|
|
tables[i].db= stat_tables_db_name.str;
|
|
|
|
tables[i].db_length= stat_tables_db_name.length;
|
|
|
|
tables[i].alias= tables[i].table_name= stat_table_name[i].str;
|
|
|
|
tables[i].table_name_length= stat_table_name[i].length;
|
2012-03-19 09:35:32 +01:00
|
|
|
tables[i].lock_type= for_write ? TL_WRITE : TL_READ;
|
|
|
|
if (i < STATISTICS_TABLES - 1)
|
|
|
|
tables[i].next_global= tables[i].next_local=
|
|
|
|
tables[i].next_name_resolution_table= &tables[i+1];
|
2012-07-11 01:34:39 +02:00
|
|
|
if (i != 0)
|
|
|
|
tables[i].prev_global= &tables[i-1].next_global;
|
2012-03-19 09:35:32 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
@details
|
|
|
|
The function builds a TABLE_LIST containing only one element 'tbl' for
|
|
|
|
the statistical table called 'stat_tab_name'.
|
|
|
|
The lock type of the element is set to TL_READ if for_write = FALSE,
|
|
|
|
otherwise it is set to TL_WRITE.
|
|
|
|
*/
|
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
static
|
2012-07-11 01:34:39 +02:00
|
|
|
inline void init_table_list_for_single_stat_table(TABLE_LIST *tbl,
|
|
|
|
const LEX_STRING *stat_tab_name,
|
|
|
|
bool for_write)
|
|
|
|
{
|
2012-07-27 21:05:23 +02:00
|
|
|
memset((char *) tbl, 0, sizeof(TABLE_LIST));
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
tbl->db= stat_tables_db_name.str;
|
|
|
|
tbl->db_length= stat_tables_db_name.length;
|
|
|
|
tbl->alias= tbl->table_name= stat_tab_name->str;
|
|
|
|
tbl->table_name_length= stat_tab_name->length;
|
|
|
|
tbl->lock_type= for_write ? TL_WRITE : TL_READ;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-12-09 14:13:43 +01:00
|
|
|
static Table_check_intact_log_error stat_table_intact;
|
|
|
|
|
|
|
|
static const
|
|
|
|
TABLE_FIELD_TYPE table_stat_fields[TABLE_STAT_N_FIELDS] =
|
|
|
|
{
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("db_name") },
|
|
|
|
{ C_STRING_WITH_LEN("varchar(64)") },
|
|
|
|
{ C_STRING_WITH_LEN("utf8") }
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("table_name") },
|
|
|
|
{ C_STRING_WITH_LEN("varchar(64)") },
|
|
|
|
{ C_STRING_WITH_LEN("utf8") }
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("cardinality") },
|
|
|
|
{ C_STRING_WITH_LEN("bigint(21)") },
|
|
|
|
{ NULL, 0 }
|
|
|
|
},
|
|
|
|
};
|
|
|
|
static const uint table_stat_pk_col[]= {0,1};
|
|
|
|
static const TABLE_FIELD_DEF
|
|
|
|
table_stat_def= {TABLE_STAT_N_FIELDS, table_stat_fields, 2, table_stat_pk_col };
|
|
|
|
|
|
|
|
static const
|
|
|
|
TABLE_FIELD_TYPE column_stat_fields[COLUMN_STAT_N_FIELDS] =
|
|
|
|
{
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("db_name") },
|
|
|
|
{ C_STRING_WITH_LEN("varchar(64)") },
|
|
|
|
{ C_STRING_WITH_LEN("utf8") }
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("table_name") },
|
|
|
|
{ C_STRING_WITH_LEN("varchar(64)") },
|
|
|
|
{ C_STRING_WITH_LEN("utf8") }
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("column_name") },
|
|
|
|
{ C_STRING_WITH_LEN("varchar(64)") },
|
|
|
|
{ C_STRING_WITH_LEN("utf8") }
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("min_value") },
|
|
|
|
{ C_STRING_WITH_LEN("varbinary(255)") },
|
|
|
|
{ NULL, 0 }
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("max_value") },
|
|
|
|
{ C_STRING_WITH_LEN("varbinary(255)") },
|
|
|
|
{ NULL, 0 }
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("nulls_ratio") },
|
|
|
|
{ C_STRING_WITH_LEN("decimal(12,4)") },
|
|
|
|
{ NULL, 0 }
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("avg_length") },
|
|
|
|
{ C_STRING_WITH_LEN("decimal(12,4)") },
|
|
|
|
{ NULL, 0 }
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("avg_frequency") },
|
|
|
|
{ C_STRING_WITH_LEN("decimal(12,4)") },
|
|
|
|
{ NULL, 0 }
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("hist_size") },
|
|
|
|
{ C_STRING_WITH_LEN("tinyint(3)") },
|
|
|
|
{ NULL, 0 }
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("hist_type") },
|
|
|
|
{ C_STRING_WITH_LEN("enum('SINGLE_PREC_HB','DOUBLE_PREC_HB')") },
|
|
|
|
{ C_STRING_WITH_LEN("utf8") }
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("histogram") },
|
|
|
|
{ C_STRING_WITH_LEN("varbinary(255)") },
|
|
|
|
{ NULL, 0 }
|
|
|
|
}
|
|
|
|
};
|
|
|
|
static const uint column_stat_pk_col[]= {0,1,2};
|
|
|
|
static const TABLE_FIELD_DEF
|
|
|
|
column_stat_def= {COLUMN_STAT_N_FIELDS, column_stat_fields, 3, column_stat_pk_col};
|
|
|
|
|
|
|
|
static const
|
|
|
|
TABLE_FIELD_TYPE index_stat_fields[INDEX_STAT_N_FIELDS] =
|
|
|
|
{
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("db_name") },
|
|
|
|
{ C_STRING_WITH_LEN("varchar(64)") },
|
|
|
|
{ C_STRING_WITH_LEN("utf8") }
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("table_name") },
|
|
|
|
{ C_STRING_WITH_LEN("varchar(64)") },
|
|
|
|
{ C_STRING_WITH_LEN("utf8") }
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("index") },
|
|
|
|
{ C_STRING_WITH_LEN("varchar(64)") },
|
|
|
|
{ C_STRING_WITH_LEN("utf8") }
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("prefix_arity") },
|
|
|
|
{ C_STRING_WITH_LEN("int(11)") },
|
|
|
|
{ NULL, 0 }
|
|
|
|
},
|
|
|
|
{
|
|
|
|
{ C_STRING_WITH_LEN("avg_frequency") },
|
|
|
|
{ C_STRING_WITH_LEN("decimal(12,4)") },
|
|
|
|
{ NULL, 0 }
|
|
|
|
}
|
|
|
|
};
|
|
|
|
static const uint index_stat_pk_col[]= {0,1,2,3};
|
|
|
|
static const TABLE_FIELD_DEF
|
|
|
|
index_stat_def= {INDEX_STAT_N_FIELDS, index_stat_fields, 4, index_stat_pk_col};
|
|
|
|
|
|
|
|
|
2012-12-13 08:16:54 +01:00
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Open all statistical tables and lock them
|
|
|
|
*/
|
|
|
|
|
2019-10-30 19:42:16 +01:00
|
|
|
static int open_stat_tables(THD *thd, TABLE_LIST *tables,
|
|
|
|
Open_tables_backup *backup, bool for_write)
|
2012-12-13 08:16:54 +01:00
|
|
|
{
|
2016-12-09 14:13:43 +01:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
Dummy_error_handler deh; // suppress errors
|
|
|
|
thd->push_internal_handler(&deh);
|
2012-12-13 08:16:54 +01:00
|
|
|
init_table_list_for_stat_tables(tables, for_write);
|
|
|
|
init_mdl_requests(tables);
|
2019-10-30 19:31:26 +01:00
|
|
|
thd->in_sub_stmt|= SUB_STMT_STAT_TABLES;
|
2016-12-09 14:13:43 +01:00
|
|
|
rc= open_system_tables_for_read(thd, tables, backup);
|
2019-10-30 19:31:26 +01:00
|
|
|
thd->in_sub_stmt&= ~SUB_STMT_STAT_TABLES;
|
2016-12-09 14:13:43 +01:00
|
|
|
thd->pop_internal_handler();
|
|
|
|
|
|
|
|
|
|
|
|
/* If the number of tables changes, we should revise the check below. */
|
2019-10-30 19:42:16 +01:00
|
|
|
compile_time_assert(STATISTICS_TABLES == 3);
|
2016-12-09 14:13:43 +01:00
|
|
|
|
|
|
|
if (!rc &&
|
|
|
|
(stat_table_intact.check(tables[TABLE_STAT].table, &table_stat_def) ||
|
|
|
|
stat_table_intact.check(tables[COLUMN_STAT].table, &column_stat_def) ||
|
|
|
|
stat_table_intact.check(tables[INDEX_STAT].table, &index_stat_def)))
|
|
|
|
{
|
|
|
|
close_system_tables(thd, backup);
|
|
|
|
rc= 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
2012-12-13 08:16:54 +01:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Open a statistical table and lock it
|
|
|
|
*/
|
|
|
|
static
|
|
|
|
inline int open_single_stat_table(THD *thd, TABLE_LIST *table,
|
|
|
|
const LEX_STRING *stat_tab_name,
|
|
|
|
Open_tables_backup *backup,
|
|
|
|
bool for_write)
|
|
|
|
{
|
|
|
|
init_table_list_for_single_stat_table(table, stat_tab_name, for_write);
|
|
|
|
init_mdl_requests(table);
|
|
|
|
return open_system_tables_for_read(thd, table, backup);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
/*
|
|
|
|
The class Column_statistics_collected is a helper class used to collect
|
|
|
|
statistics on a table column. The class is derived directly from
|
|
|
|
the class Column_statistics, and, additionally to the fields of the
|
|
|
|
latter, it contains the fields to accumulate the results of aggregation
|
|
|
|
for the number of nulls in the column and for the size of the column
|
|
|
|
values. There is also a container for distinct column values used
|
|
|
|
to calculate the average number of records per distinct column value.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
class Column_statistics_collected :public Column_statistics
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
private:
|
|
|
|
Field *column; /* The column to collect statistics on */
|
|
|
|
ha_rows nulls; /* To accumulate the number of nulls in the column */
|
|
|
|
ulonglong column_total_length; /* To accumulate the size of column values */
|
|
|
|
Count_distinct_field *count_distinct; /* The container for distinct
|
|
|
|
column values */
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-12-14 08:05:12 +01:00
|
|
|
bool is_single_pk_col; /* TRUE <-> the only column of the primary key */
|
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
public:
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
inline void init(THD *thd, Field * table_field);
|
2014-08-04 21:36:02 +02:00
|
|
|
inline bool add(ha_rows rowno);
|
2012-07-27 02:50:08 +02:00
|
|
|
inline void finish(ha_rows rows);
|
2012-08-27 23:19:25 +02:00
|
|
|
inline void cleanup();
|
2012-07-27 02:50:08 +02:00
|
|
|
};
|
2012-01-05 02:51:53 +01:00
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
Stat_table is the base class for classes Table_stat, Column_stat and
|
2012-07-11 01:34:39 +02:00
|
|
|
Index_stat. The methods of these classes allow us to read statistical
|
|
|
|
data from statistical tables, write collected statistical data into
|
|
|
|
statistical tables and update statistical data in these tables
|
|
|
|
as well as update access fields belonging to the primary key and
|
|
|
|
delete records by prefixes of the primary key.
|
|
|
|
Objects of the classes Table_stat, Column_stat and Index stat are used
|
2012-12-05 09:31:05 +01:00
|
|
|
for reading/writing statistics from/into persistent tables table_stats,
|
|
|
|
column_stats and index_stats correspondingly. These tables are stored in
|
2012-07-11 01:34:39 +02:00
|
|
|
the system database 'mysql'.
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
Statistics is read and written always for a given database table t. When
|
|
|
|
an object of any of these classes is created a pointer to the TABLE
|
|
|
|
structure for this database table is passed as a parameter to the constructor
|
|
|
|
of the object. The other parameter is a pointer to the TABLE structure for
|
|
|
|
the corresponding statistical table st. So construction of an object to
|
|
|
|
read/write statistical data on table t from/into statistical table st
|
|
|
|
requires both table t and st to be opened.
|
2012-07-11 01:34:39 +02:00
|
|
|
In some cases the TABLE structure for table t may be undefined. Then
|
|
|
|
the objects of the classes Table_stat, Column_stat and Index stat are
|
|
|
|
created by the alternative constructor that require only the name
|
2012-07-27 02:50:08 +02:00
|
|
|
of the table t and the name of the database it belongs to. Currently the
|
2012-07-11 01:34:39 +02:00
|
|
|
alternative constructors are used only in the cases when some records
|
|
|
|
belonging to the table are to be deleted, or its keys are to be updated
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
Reading/writing statistical data from/into a statistical table is always
|
2012-07-11 01:34:39 +02:00
|
|
|
performed by a key. At the moment there is only one key defined for each
|
2012-01-05 02:51:53 +01:00
|
|
|
statistical table and this key is primary.
|
2012-12-05 09:31:05 +01:00
|
|
|
The primary key for the table table_stats is built as (db_name, table_name).
|
|
|
|
The primary key for the table column_stats is built as (db_name, table_name,
|
2012-01-05 02:51:53 +01:00
|
|
|
column_name).
|
2012-12-05 09:31:05 +01:00
|
|
|
The primary key for the table index_stats is built as (db_name, table_name,
|
2012-01-05 02:51:53 +01:00
|
|
|
index_name, prefix_arity).
|
|
|
|
|
|
|
|
Reading statistical data from a statistical table is performed by the
|
|
|
|
following pattern. First a table dependent method sets the values of the
|
|
|
|
the fields that comprise the lookup key. Then an implementation of the
|
|
|
|
method get_stat_values() declared in Stat_table as a pure virtual method
|
|
|
|
finds the row from the statistical table by the set key. If the row is
|
|
|
|
found the values of statistical fields are read from this row and are
|
|
|
|
distributed in the internal structures.
|
|
|
|
|
|
|
|
Let's assume the statistical data is read for table t from database db.
|
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
When statistical data is searched in the table table_stats first
|
2012-01-05 02:51:53 +01:00
|
|
|
Table_stat::set_key_fields() should set the fields of db_name and
|
|
|
|
table_name. Then get_stat_values looks for a row by the set key value,
|
|
|
|
and, if the row is found, reads the value from the column
|
2012-12-05 09:31:05 +01:00
|
|
|
table_stats.cardinality into the field read_stat.cardinality of the TABLE
|
2012-01-05 02:51:53 +01:00
|
|
|
structure for table t and sets the value of read_stat.cardinality_is_null
|
|
|
|
from this structure to FALSE. If the value of the 'cardinality' column
|
|
|
|
in the row is null or if no row is found read_stat.cardinality_is_null
|
|
|
|
is set to TRUE.
|
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
When statistical data is searched in the table column_stats first
|
2012-01-05 02:51:53 +01:00
|
|
|
Column_stat::set_key_fields() should set the fields of db_name, table_name
|
|
|
|
and column_name with column_name taken out of the only parameter f of the
|
|
|
|
Field* type passed to this method. After this get_stat_values looks
|
|
|
|
for a row by the set key value. If the row is found the values of statistical
|
2013-04-16 07:43:07 +02:00
|
|
|
data columns min_value, max_value, nulls_ratio, avg_length, avg_frequency,
|
|
|
|
hist_size, hist_type, histogram are read into internal structures. Values
|
|
|
|
of nulls_ratio, avg_length, avg_frequency, hist_size, hist_type, histogram
|
|
|
|
are read into the corresponding fields of the read_stat structure from
|
|
|
|
the Field object f, while values from min_value and max_value are copied
|
|
|
|
into the min_value and max_value record buffers attached to the TABLE
|
|
|
|
structure for table t.
|
2012-01-05 02:51:53 +01:00
|
|
|
If the value of a statistical column in the found row is null, then the
|
|
|
|
corresponding flag in the f->read_stat.column_stat_nulls bitmap is set off.
|
|
|
|
Otherwise the flag is set on. If no row is found for the column the all flags
|
|
|
|
in f->column_stat_nulls are set off.
|
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
When statistical data is searched in the table index_stats first
|
2012-01-05 02:51:53 +01:00
|
|
|
Index_stat::set_key_fields() has to be called to set the fields of db_name,
|
|
|
|
table_name, index_name and prefix_arity. The value of index_name is extracted
|
|
|
|
from the first parameter key_info of the KEY* type passed to the method.
|
|
|
|
This parameter specifies the index of interest idx. The second parameter
|
|
|
|
passed to the method specifies the arity k of the index prefix for which
|
|
|
|
statistical data is to be read. E.g. if the index idx consists of 3
|
2012-12-05 09:31:05 +01:00
|
|
|
components (p1,p2,p3) the table index_stats usually will contain 3 rows for
|
2012-01-05 02:51:53 +01:00
|
|
|
this index: the first - for the prefix (p1), the second - for the prefix
|
|
|
|
(p1,p2), and the third - for the the prefix (p1,p2,p3). After the key fields
|
|
|
|
has been set a call of get_stat_value looks for a row by the set key value.
|
|
|
|
If the row is found and the value of the avg_frequency column is not null
|
|
|
|
then this value is assigned to key_info->read_stat.avg_frequency[k].
|
|
|
|
Otherwise 0 is assigned to this element.
|
|
|
|
|
|
|
|
The method Stat_table::update_stat is used to write statistical data
|
|
|
|
collected in the internal structures into a statistical table st.
|
|
|
|
It is assumed that before any invocation of this method a call of the
|
|
|
|
function st.set_key_fields has set the values of the primary key fields
|
|
|
|
that serve to locate the row from the statistical table st where the
|
2012-07-11 01:34:39 +02:00
|
|
|
the collected statistical data from internal structures are to be written
|
2012-01-05 02:51:53 +01:00
|
|
|
to. The statistical data is written from the counterparts of the
|
|
|
|
statistical fields of internal structures into which it would be read
|
|
|
|
by the functions get_stat_values. The counterpart fields are used
|
|
|
|
only when statistics is collected
|
|
|
|
When updating/inserting a row from the statistical table st the method
|
|
|
|
Stat_table::update_stat calls the implementation of the pure virtual
|
|
|
|
method store_field_values to transfer statistical data from the fields
|
|
|
|
of internal structures to the fields of record buffer used for updates
|
|
|
|
of the statistical table st.
|
|
|
|
*/
|
|
|
|
|
|
|
|
class Stat_table
|
|
|
|
{
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
private:
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
/* Handler used for the retrieval of the statistical table stat_table */
|
|
|
|
handler *stat_file;
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
uint stat_key_length; /* Length of the key to access stat_table */
|
2012-01-05 02:51:53 +01:00
|
|
|
uchar *record[2]; /* Record buffers used to access/update stat_table */
|
|
|
|
uint stat_key_idx; /* The number of the key to access stat_table */
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
/* This is a helper function used only by the Stat_table constructors */
|
|
|
|
void common_init_stat_table()
|
|
|
|
{
|
|
|
|
stat_file= stat_table->file;
|
|
|
|
/* Currently any statistical table has only one key */
|
|
|
|
stat_key_idx= 0;
|
|
|
|
stat_key_info= &stat_table->key_info[stat_key_idx];
|
|
|
|
stat_key_length= stat_key_info->key_length;
|
|
|
|
record[0]= stat_table->record[0];
|
|
|
|
record[1]= stat_table->record[1];
|
|
|
|
}
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
protected:
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/* Statistical table to read statistics from or to update/delete */
|
2012-01-05 02:51:53 +01:00
|
|
|
TABLE *stat_table;
|
2012-07-11 01:34:39 +02:00
|
|
|
KEY *stat_key_info; /* Structure for the index to access stat_table */
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
/* Table for which statistical data is read / updated */
|
2012-07-27 02:50:08 +02:00
|
|
|
TABLE *table;
|
|
|
|
TABLE_SHARE *table_share; /* Table share for 'table */
|
|
|
|
LEX_STRING *db_name; /* Name of the database containing 'table' */
|
|
|
|
LEX_STRING *table_name; /* Name of the table 'table' */
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
void store_record_for_update()
|
|
|
|
{
|
|
|
|
store_record(stat_table, record[1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void store_record_for_lookup()
|
|
|
|
{
|
2013-05-05 20:39:31 +02:00
|
|
|
DBUG_ASSERT(record[0] == stat_table->record[0]);
|
2012-07-11 01:34:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool update_record()
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
if ((err= stat_file->ha_update_row(record[1], record[0])) &&
|
|
|
|
err != HA_ERR_RECORD_IS_THE_SAME)
|
|
|
|
return TRUE;
|
2014-03-19 17:05:54 +01:00
|
|
|
/* Make change permanent and avoid 'table is marked as crashed' errors */
|
|
|
|
stat_file->extra(HA_EXTRA_FLUSH);
|
2012-07-11 01:34:39 +02:00
|
|
|
return FALSE;
|
|
|
|
}
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
public:
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@details
|
|
|
|
This constructor has to be called by any constructor of the derived
|
|
|
|
classes. The constructor 'tunes' the private and protected members of
|
|
|
|
the constructed object to the statistical table 'stat_table' with the
|
|
|
|
statistical data of our interest and to the table 'tab' for which this
|
|
|
|
statistics has been collected.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
Stat_table(TABLE *stat, TABLE *tab)
|
|
|
|
:stat_table(stat), table(tab)
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
2012-07-27 02:50:08 +02:00
|
|
|
table_share= tab->s;
|
2012-07-11 01:34:39 +02:00
|
|
|
common_init_stat_table();
|
2012-07-27 02:50:08 +02:00
|
|
|
db_name= &table_share->db;
|
|
|
|
table_name= &table_share->table_name;
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
@details
|
|
|
|
This constructor has to be called by any constructor of the derived
|
|
|
|
classes. The constructor 'tunes' the private and protected members of
|
|
|
|
the constructed object to the statistical table 'stat_table' with the
|
|
|
|
statistical data of our interest and to the table t for which this
|
|
|
|
statistics has been collected. The table t is uniquely specified
|
|
|
|
by the database name 'db' and the table name 'tab'.
|
|
|
|
*/
|
|
|
|
|
|
|
|
Stat_table(TABLE *stat, LEX_STRING *db, LEX_STRING *tab)
|
2012-07-27 02:50:08 +02:00
|
|
|
:stat_table(stat), table_share(NULL)
|
2012-07-11 01:34:39 +02:00
|
|
|
{
|
|
|
|
common_init_stat_table();
|
|
|
|
db_name= db;
|
|
|
|
table_name= tab;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
virtual ~Stat_table() {}
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Store the given values of fields for database name and table name
|
|
|
|
|
|
|
|
@details
|
|
|
|
This is a purely virtual method.
|
|
|
|
The implementation for any derived class shall store the given
|
|
|
|
values of the database name and table name in the corresponding
|
|
|
|
fields of stat_table.
|
|
|
|
|
|
|
|
@note
|
|
|
|
The method is called by the update_table_name_key_parts function.
|
|
|
|
*/
|
|
|
|
|
|
|
|
virtual void change_full_table_name(LEX_STRING *db, LEX_STRING *tab)= 0;
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
|
|
|
Store statistical data into fields of the statistical table
|
|
|
|
|
|
|
|
@details
|
|
|
|
This is a purely virtual method.
|
|
|
|
The implementation for any derived class shall put the appropriate
|
|
|
|
statistical data into the corresponding fields of stat_table.
|
|
|
|
|
|
|
|
@note
|
|
|
|
The method is called by the update_stat function.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
virtual void store_stat_fields()= 0;
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
|
|
|
Read statistical data from fields of the statistical table
|
|
|
|
|
|
|
|
@details
|
|
|
|
This is a purely virtual method.
|
|
|
|
The implementation for any derived read shall read the appropriate
|
|
|
|
statistical data from the corresponding fields of stat_table.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
virtual void get_stat_values()= 0;
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Find a record in the statistical table by a primary key
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
@details
|
|
|
|
The function looks for a record in stat_table by its primary key.
|
|
|
|
It assumes that the key fields have been already stored in the record
|
|
|
|
buffer of stat_table.
|
|
|
|
|
|
|
|
@retval
|
|
|
|
FALSE the record is not found
|
|
|
|
@retval
|
|
|
|
TRUE the record is found
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
bool find_stat()
|
|
|
|
{
|
|
|
|
uchar key[MAX_KEY_LENGTH];
|
|
|
|
key_copy(key, record[0], stat_key_info, stat_key_length);
|
|
|
|
return !stat_file->ha_index_read_idx_map(record[0], stat_key_idx, key,
|
|
|
|
HA_WHOLE_KEY, HA_READ_KEY_EXACT);
|
|
|
|
}
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Find a record in the statistical table by a key prefix value
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function looks for a record in stat_table by the key value consisting
|
|
|
|
of 'prefix_parts' major components for the primary index.
|
|
|
|
It assumes that the key prefix fields have been already stored in the record
|
|
|
|
buffer of stat_table.
|
|
|
|
|
|
|
|
@retval
|
|
|
|
FALSE the record is not found
|
|
|
|
@retval
|
|
|
|
TRUE the record is found
|
|
|
|
*/
|
|
|
|
|
|
|
|
bool find_next_stat_for_prefix(uint prefix_parts)
|
|
|
|
{
|
|
|
|
uchar key[MAX_KEY_LENGTH];
|
|
|
|
uint prefix_key_length= 0;
|
|
|
|
for (uint i= 0; i < prefix_parts; i++)
|
|
|
|
prefix_key_length+= stat_key_info->key_part[i].store_length;
|
|
|
|
key_copy(key, record[0], stat_key_info, prefix_key_length);
|
|
|
|
key_part_map prefix_map= (key_part_map) ((1 << prefix_parts) - 1);
|
|
|
|
return !stat_file->ha_index_read_idx_map(record[0], stat_key_idx, key,
|
|
|
|
prefix_map, HA_READ_KEY_EXACT);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
2012-01-05 02:51:53 +01:00
|
|
|
Update/insert a record in the statistical table with new statistics
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function first looks for a record by its primary key in the statistical
|
|
|
|
table stat_table. If the record is found the function updates statistical
|
|
|
|
fields of the records. The data for these fields are taken from internal
|
|
|
|
structures containing info on the table 'table'. If the record is not
|
|
|
|
found the function inserts a new record with the primary key set to the
|
|
|
|
search key and the statistical data taken from the internal structures.
|
|
|
|
The function assumes that the key fields have been already stored in
|
|
|
|
the record buffer of stat_table.
|
|
|
|
|
|
|
|
@retval
|
|
|
|
FALSE success with the update/insert of the record
|
|
|
|
@retval
|
|
|
|
TRUE failure with the update/insert of the record
|
|
|
|
|
|
|
|
@note
|
|
|
|
The function calls the virtual method store_stat_fields to populate the
|
|
|
|
statistical fields of the updated/inserted row with new statistics.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
bool update_stat()
|
|
|
|
{
|
|
|
|
if (find_stat())
|
|
|
|
{
|
2012-07-11 01:34:39 +02:00
|
|
|
store_record_for_update();
|
2012-01-05 02:51:53 +01:00
|
|
|
store_stat_fields();
|
2012-07-11 01:34:39 +02:00
|
|
|
return update_record();
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2012-07-11 01:34:39 +02:00
|
|
|
int err;
|
2012-01-05 02:51:53 +01:00
|
|
|
store_stat_fields();
|
|
|
|
if ((err= stat_file->ha_write_row(record[0])))
|
|
|
|
return TRUE;
|
2014-03-19 13:32:57 +01:00
|
|
|
/* Make change permanent and avoid 'table is marked as crashed' errors */
|
2014-03-19 17:05:54 +01:00
|
|
|
stat_file->extra(HA_EXTRA_FLUSH);
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
return FALSE;
|
|
|
|
}
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Update the table name fields in the current record of stat_table
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function updates the fields containing database name and table name
|
|
|
|
for the last found record in the statistical table stat_table.
|
|
|
|
The corresponding names for update is taken from the parameters
|
|
|
|
db and tab.
|
|
|
|
|
|
|
|
@retval
|
|
|
|
FALSE success with the update of the record
|
|
|
|
@retval
|
|
|
|
TRUE failure with the update of the record
|
|
|
|
|
|
|
|
@note
|
|
|
|
The function calls the virtual method change_full_table_name
|
|
|
|
to store the new names in the record buffer used for updates.
|
|
|
|
*/
|
|
|
|
|
|
|
|
bool update_table_name_key_parts(LEX_STRING *db, LEX_STRING *tab)
|
|
|
|
{
|
|
|
|
store_record_for_update();
|
|
|
|
change_full_table_name(db, tab);
|
|
|
|
bool rc= update_record();
|
|
|
|
store_record_for_lookup();
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Delete the current record of the statistical table stat_table
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function deletes the last found record from the statistical
|
|
|
|
table stat_table.
|
|
|
|
|
|
|
|
@retval
|
|
|
|
FALSE success with the deletion of the record
|
|
|
|
@retval
|
|
|
|
TRUE failure with the deletion of the record
|
|
|
|
*/
|
|
|
|
|
|
|
|
bool delete_stat()
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
if ((err= stat_file->ha_delete_row(record[0])))
|
|
|
|
return TRUE;
|
2014-03-19 17:05:54 +01:00
|
|
|
/* Make change permanent and avoid 'table is marked as crashed' errors */
|
|
|
|
stat_file->extra(HA_EXTRA_FLUSH);
|
2012-07-11 01:34:39 +02:00
|
|
|
return FALSE;
|
|
|
|
}
|
2015-11-18 19:31:45 +01:00
|
|
|
|
|
|
|
friend class Stat_table_write_iter;
|
2012-01-05 02:51:53 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
An object of the class Table_stat is created to read statistical
|
2012-12-05 09:31:05 +01:00
|
|
|
data on tables from the statistical table table_stats, to update
|
|
|
|
table_stats with such statistical data, or to update columns
|
2012-07-11 01:34:39 +02:00
|
|
|
of the primary key, or to delete the record by its primary key or
|
|
|
|
its prefix.
|
2012-01-05 02:51:53 +01:00
|
|
|
Rows from the statistical table are read and updated always by
|
|
|
|
primary key.
|
|
|
|
*/
|
|
|
|
|
|
|
|
class Table_stat: public Stat_table
|
|
|
|
{
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
private:
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
Field *db_name_field; /* Field for the column table_stats.db_name */
|
|
|
|
Field *table_name_field; /* Field for the column table_stats.table_name */
|
2012-01-05 02:51:53 +01:00
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
void common_init_table_stat()
|
|
|
|
{
|
|
|
|
db_name_field= stat_table->field[TABLE_STAT_DB_NAME];
|
|
|
|
table_name_field= stat_table->field[TABLE_STAT_TABLE_NAME];
|
|
|
|
}
|
|
|
|
|
|
|
|
void change_full_table_name(LEX_STRING *db, LEX_STRING *tab)
|
|
|
|
{
|
|
|
|
db_name_field->store(db->str, db->length, system_charset_info);
|
|
|
|
table_name_field->store(tab->str, tab->length, system_charset_info);
|
|
|
|
}
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
public:
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@details
|
|
|
|
The constructor 'tunes' the private and protected members of the
|
2012-12-05 09:31:05 +01:00
|
|
|
constructed object for the statistical table table_stats to read/update
|
2012-01-05 02:51:53 +01:00
|
|
|
statistics on table 'tab'. The TABLE structure for the table table_stat
|
|
|
|
must be passed as a value for the parameter 'stat'.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
Table_stat(TABLE *stat, TABLE *tab) :Stat_table(stat, tab)
|
|
|
|
{
|
2012-07-11 01:34:39 +02:00
|
|
|
common_init_table_stat();
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
@details
|
|
|
|
The constructor 'tunes' the private and protected members of the
|
|
|
|
object constructed for the statistical table table_stat for
|
|
|
|
the future updates/deletes of the record concerning the table 'tab'
|
|
|
|
from the database 'db'.
|
|
|
|
*/
|
|
|
|
|
|
|
|
Table_stat(TABLE *stat, LEX_STRING *db, LEX_STRING *tab)
|
|
|
|
:Stat_table(stat, db, tab)
|
|
|
|
{
|
|
|
|
common_init_table_stat();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
|
|
|
Set the key fields for the statistical table table_stat
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function sets the values of the fields db_name and table_name
|
|
|
|
in the record buffer for the statistical table table_stat.
|
|
|
|
These fields comprise the primary key for the table.
|
|
|
|
|
|
|
|
@note
|
|
|
|
The function is supposed to be called before any use of the
|
|
|
|
method find_stat for an object of the Table_stat class.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
void set_key_fields()
|
|
|
|
{
|
2012-07-11 01:34:39 +02:00
|
|
|
db_name_field->store(db_name->str, db_name->length, system_charset_info);
|
|
|
|
table_name_field->store(table_name->str, table_name->length,
|
|
|
|
system_charset_info);
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
|
|
|
Store statistical data into statistical fields of table_stat
|
|
|
|
|
|
|
|
@details
|
|
|
|
This implementation of a purely virtual method sets the value of the
|
|
|
|
column 'cardinality' of the statistical table table_stat according to
|
|
|
|
the value of the flag write_stat.cardinality_is_null and the value of
|
|
|
|
the field write_stat.cardinality' from the TABLE structure for 'table'.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
void store_stat_fields()
|
|
|
|
{
|
|
|
|
Field *stat_field= stat_table->field[TABLE_STAT_CARDINALITY];
|
2012-07-27 02:50:08 +02:00
|
|
|
if (table->collected_stats->cardinality_is_null)
|
2012-01-05 02:51:53 +01:00
|
|
|
stat_field->set_null();
|
|
|
|
else
|
|
|
|
{
|
|
|
|
stat_field->set_notnull();
|
2012-07-27 02:50:08 +02:00
|
|
|
stat_field->store(table->collected_stats->cardinality);
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
|
|
|
Read statistical data from statistical fields of table_stat
|
|
|
|
|
|
|
|
@details
|
|
|
|
This implementation of a purely virtual method first looks for a record
|
|
|
|
the statistical table table_stat by its primary key set the record
|
|
|
|
buffer with the help of Table_stat::set_key_fields. Then, if the row is
|
|
|
|
found the function reads the value of the column 'cardinality' of the table
|
|
|
|
table_stat and sets the value of the flag read_stat.cardinality_is_null
|
|
|
|
and the value of the field read_stat.cardinality' from the TABLE structure
|
|
|
|
for 'table' accordingly.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
void get_stat_values()
|
|
|
|
{
|
2013-01-08 17:17:51 +01:00
|
|
|
Table_statistics *read_stats= table_share->stats_cb.table_stats;
|
|
|
|
read_stats->cardinality_is_null= TRUE;
|
|
|
|
read_stats->cardinality= 0;
|
2012-01-05 02:51:53 +01:00
|
|
|
if (find_stat())
|
|
|
|
{
|
|
|
|
Field *stat_field= stat_table->field[TABLE_STAT_CARDINALITY];
|
|
|
|
if (!stat_field->is_null())
|
|
|
|
{
|
2013-01-08 17:17:51 +01:00
|
|
|
read_stats->cardinality_is_null= FALSE;
|
|
|
|
read_stats->cardinality= stat_field->val_int();
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
An object of the class Column_stat is created to read statistical data
|
2012-12-05 09:31:05 +01:00
|
|
|
on table columns from the statistical table column_stats, to update
|
|
|
|
column_stats with such statistical data, or to update columns
|
2012-07-11 01:34:39 +02:00
|
|
|
of the primary key, or to delete the record by its primary key or
|
|
|
|
its prefix.
|
2012-01-05 02:51:53 +01:00
|
|
|
Rows from the statistical table are read and updated always by
|
|
|
|
primary key.
|
|
|
|
*/
|
|
|
|
|
|
|
|
class Column_stat: public Stat_table
|
|
|
|
{
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
private:
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
Field *db_name_field; /* Field for the column column_stats.db_name */
|
|
|
|
Field *table_name_field; /* Field for the column column_stats.table_name */
|
|
|
|
Field *column_name_field; /* Field for the column column_stats.column_name */
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
Field *table_field; /* Field from 'table' to read /update statistics on */
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
void common_init_column_stat_table()
|
|
|
|
{
|
|
|
|
db_name_field= stat_table->field[COLUMN_STAT_DB_NAME];
|
|
|
|
table_name_field= stat_table->field[COLUMN_STAT_TABLE_NAME];
|
|
|
|
column_name_field= stat_table->field[COLUMN_STAT_COLUMN_NAME];
|
|
|
|
}
|
|
|
|
|
|
|
|
void change_full_table_name(LEX_STRING *db, LEX_STRING *tab)
|
|
|
|
{
|
|
|
|
db_name_field->store(db->str, db->length, system_charset_info);
|
|
|
|
table_name_field->store(tab->str, tab->length, system_charset_info);
|
|
|
|
}
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
public:
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@details
|
|
|
|
The constructor 'tunes' the private and protected members of the
|
2012-12-05 09:31:05 +01:00
|
|
|
constructed object for the statistical table column_stats to read/update
|
2012-01-05 02:51:53 +01:00
|
|
|
statistics on fields of the table 'tab'. The TABLE structure for the table
|
2012-12-05 09:31:05 +01:00
|
|
|
column_stats must be passed as a value for the parameter 'stat'.
|
2012-01-05 02:51:53 +01:00
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
Column_stat(TABLE *stat, TABLE *tab) :Stat_table(stat, tab)
|
|
|
|
{
|
2012-07-11 01:34:39 +02:00
|
|
|
common_init_column_stat_table();
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
@details
|
|
|
|
The constructor 'tunes' the private and protected members of the
|
2012-12-05 09:31:05 +01:00
|
|
|
object constructed for the statistical table column_stats for
|
2012-07-11 01:34:39 +02:00
|
|
|
the future updates/deletes of the record concerning the table 'tab'
|
|
|
|
from the database 'db'.
|
|
|
|
*/
|
|
|
|
|
|
|
|
Column_stat(TABLE *stat, LEX_STRING *db, LEX_STRING *tab)
|
|
|
|
:Stat_table(stat, db, tab)
|
|
|
|
{
|
|
|
|
common_init_column_stat_table();
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
2012-12-05 09:31:05 +01:00
|
|
|
Set table name fields for the statistical table column_stats
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
@details
|
|
|
|
The function stores the values of the fields db_name and table_name
|
2012-12-05 09:31:05 +01:00
|
|
|
of the statistical table column_stats in the record buffer.
|
2012-07-11 01:34:39 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
void set_full_table_name()
|
|
|
|
{
|
|
|
|
db_name_field->store(db_name->str, db_name->length, system_charset_info);
|
|
|
|
table_name_field->store(table_name->str, table_name->length,
|
|
|
|
system_charset_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
2012-12-05 09:31:05 +01:00
|
|
|
Set the key fields for the statistical table column_stats
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
@param
|
2012-07-11 01:34:39 +02:00
|
|
|
col Field for the 'table' column to read/update statistics on
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
@details
|
2012-07-11 01:34:39 +02:00
|
|
|
The function stores the values of the fields db_name, table_name and
|
2012-12-05 09:31:05 +01:00
|
|
|
column_name in the record buffer for the statistical table column_stats.
|
2012-01-05 02:51:53 +01:00
|
|
|
These fields comprise the primary key for the table.
|
2012-07-11 01:34:39 +02:00
|
|
|
It also sets table_field to the passed parameter.
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
@note
|
|
|
|
The function is supposed to be called before any use of the
|
|
|
|
method find_stat for an object of the Column_stat class.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
void set_key_fields(Field *col)
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
2012-07-11 01:34:39 +02:00
|
|
|
set_full_table_name();
|
|
|
|
const char *column_name= col->field_name;
|
|
|
|
column_name_field->store(column_name, strlen(column_name),
|
|
|
|
system_charset_info);
|
|
|
|
table_field= col;
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Update the table name fields in the current record of stat_table
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function updates the primary key fields containing database name,
|
|
|
|
table name, and column name for the last found record in the statistical
|
2012-12-05 09:31:05 +01:00
|
|
|
table column_stats.
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
@retval
|
|
|
|
FALSE success with the update of the record
|
|
|
|
@retval
|
|
|
|
TRUE failure with the update of the record
|
|
|
|
*/
|
|
|
|
|
|
|
|
bool update_column_key_part(const char *col)
|
|
|
|
{
|
|
|
|
store_record_for_update();
|
|
|
|
set_full_table_name();
|
|
|
|
column_name_field->store(col, strlen(col), system_charset_info);
|
|
|
|
bool rc= update_record();
|
|
|
|
store_record_for_lookup();
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
2012-12-05 09:31:05 +01:00
|
|
|
Store statistical data into statistical fields of column_stats
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
@details
|
|
|
|
This implementation of a purely virtual method sets the value of the
|
2013-04-16 07:43:07 +02:00
|
|
|
columns 'min_value', 'max_value', 'nulls_ratio', 'avg_length',
|
|
|
|
'avg_frequency', 'hist_size', 'hist_type' and 'histogram' of the
|
|
|
|
stistical table columns_stat according to the contents of the bitmap
|
|
|
|
write_stat.column_stat_nulls and the values of the fields min_value,
|
|
|
|
max_value, nulls_ratio, avg_length, avg_frequency, hist_size, hist_type
|
|
|
|
and histogram of the structure write_stat from the Field structure
|
2012-01-05 02:51:53 +01:00
|
|
|
for the field 'table_field'.
|
|
|
|
The value of the k-th column in the table columns_stat is set to NULL
|
|
|
|
if the k-th bit in the bitmap 'column_stat_nulls' is set to 1.
|
|
|
|
|
|
|
|
@note
|
|
|
|
A value from the field min_value/max_value is always converted
|
2015-11-09 15:58:35 +01:00
|
|
|
into a varbinary string. If the length of the column 'min_value'/'max_value'
|
2012-01-05 02:51:53 +01:00
|
|
|
is less than the length of the string the string is trimmed to fit the
|
|
|
|
length of the column.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
void store_stat_fields()
|
|
|
|
{
|
|
|
|
char buff[MAX_FIELD_WIDTH];
|
2015-11-09 15:58:35 +01:00
|
|
|
String val(buff, sizeof(buff), &my_charset_bin);
|
2012-01-05 02:51:53 +01:00
|
|
|
|
2013-03-26 07:48:29 +01:00
|
|
|
for (uint i= COLUMN_STAT_MIN_VALUE; i <= COLUMN_STAT_HISTOGRAM; i++)
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
|
|
|
Field *stat_field= stat_table->field[i];
|
2012-07-27 02:50:08 +02:00
|
|
|
if (table_field->collected_stats->is_null(i))
|
2012-01-05 02:51:53 +01:00
|
|
|
stat_field->set_null();
|
|
|
|
else
|
|
|
|
{
|
|
|
|
stat_field->set_notnull();
|
|
|
|
switch (i) {
|
|
|
|
case COLUMN_STAT_MIN_VALUE:
|
|
|
|
if (table_field->type() == MYSQL_TYPE_BIT)
|
2012-07-27 02:50:08 +02:00
|
|
|
stat_field->store(table_field->collected_stats->min_value->val_int());
|
2012-01-05 02:51:53 +01:00
|
|
|
else
|
|
|
|
{
|
2012-07-27 02:50:08 +02:00
|
|
|
table_field->collected_stats->min_value->val_str(&val);
|
2015-11-09 15:58:35 +01:00
|
|
|
stat_field->store(val.ptr(), val.length(), &my_charset_bin);
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case COLUMN_STAT_MAX_VALUE:
|
|
|
|
if (table_field->type() == MYSQL_TYPE_BIT)
|
2012-07-27 02:50:08 +02:00
|
|
|
stat_field->store(table_field->collected_stats->max_value->val_int());
|
2012-01-05 02:51:53 +01:00
|
|
|
else
|
|
|
|
{
|
2012-07-27 02:50:08 +02:00
|
|
|
table_field->collected_stats->max_value->val_str(&val);
|
2015-11-09 15:58:35 +01:00
|
|
|
stat_field->store(val.ptr(), val.length(), &my_charset_bin);
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case COLUMN_STAT_NULLS_RATIO:
|
2012-07-27 02:50:08 +02:00
|
|
|
stat_field->store(table_field->collected_stats->get_nulls_ratio());
|
2012-01-05 02:51:53 +01:00
|
|
|
break;
|
|
|
|
case COLUMN_STAT_AVG_LENGTH:
|
2012-07-27 02:50:08 +02:00
|
|
|
stat_field->store(table_field->collected_stats->get_avg_length());
|
2012-01-05 02:51:53 +01:00
|
|
|
break;
|
|
|
|
case COLUMN_STAT_AVG_FREQUENCY:
|
2012-07-27 02:50:08 +02:00
|
|
|
stat_field->store(table_field->collected_stats->get_avg_frequency());
|
2013-03-26 07:48:29 +01:00
|
|
|
break;
|
|
|
|
case COLUMN_STAT_HIST_SIZE:
|
|
|
|
stat_field->store(table_field->collected_stats->histogram.get_size());
|
|
|
|
break;
|
2013-03-31 03:57:07 +02:00
|
|
|
case COLUMN_STAT_HIST_TYPE:
|
2013-04-06 08:48:49 +02:00
|
|
|
stat_field->store(table_field->collected_stats->histogram.get_type() +
|
|
|
|
1);
|
2013-03-31 03:57:07 +02:00
|
|
|
break;
|
2013-03-26 07:48:29 +01:00
|
|
|
case COLUMN_STAT_HISTOGRAM:
|
|
|
|
const char * col_histogram=
|
|
|
|
(const char *) (table_field->collected_stats->histogram.get_values());
|
|
|
|
stat_field->store(col_histogram,
|
|
|
|
table_field->collected_stats->histogram.get_size(),
|
|
|
|
&my_charset_bin);
|
|
|
|
break;
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
2012-12-05 09:31:05 +01:00
|
|
|
Read statistical data from statistical fields of column_stats
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
@details
|
|
|
|
This implementation of a purely virtual method first looks for a record
|
2013-04-16 07:43:07 +02:00
|
|
|
in the statistical table column_stats by its primary key set in the record
|
2012-01-05 02:51:53 +01:00
|
|
|
buffer with the help of Column_stat::set_key_fields. Then, if the row is
|
|
|
|
found, the function reads the values of the columns 'min_value',
|
2013-04-16 07:43:07 +02:00
|
|
|
'max_value', 'nulls_ratio', 'avg_length', 'avg_frequency', 'hist_size' and
|
|
|
|
'hist_type" of the table column_stat and sets accordingly the value of
|
|
|
|
the bitmap read_stat.column_stat_nulls' and the values of the fields
|
|
|
|
min_value, max_value, nulls_ratio, avg_length, avg_frequency, hist_size and
|
|
|
|
hist_type of the structure read_stat from the Field structure for the field
|
|
|
|
'table_field'.
|
2012-01-05 02:51:53 +01:00
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
void get_stat_values()
|
|
|
|
{
|
2012-07-27 02:50:08 +02:00
|
|
|
table_field->read_stats->set_all_nulls();
|
2012-01-05 02:51:53 +01:00
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
if (table_field->read_stats->min_value)
|
|
|
|
table_field->read_stats->min_value->set_null();
|
|
|
|
if (table_field->read_stats->max_value)
|
|
|
|
table_field->read_stats->max_value->set_null();
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
if (find_stat())
|
|
|
|
{
|
|
|
|
char buff[MAX_FIELD_WIDTH];
|
2015-11-09 15:58:35 +01:00
|
|
|
String val(buff, sizeof(buff), &my_charset_bin);
|
2012-01-05 02:51:53 +01:00
|
|
|
|
2013-03-31 03:57:07 +02:00
|
|
|
for (uint i= COLUMN_STAT_MIN_VALUE; i <= COLUMN_STAT_HIST_TYPE; i++)
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
|
|
|
Field *stat_field= stat_table->field[i];
|
|
|
|
|
|
|
|
if (!stat_field->is_null() &&
|
|
|
|
(i > COLUMN_STAT_MAX_VALUE ||
|
2012-07-27 02:50:08 +02:00
|
|
|
(i == COLUMN_STAT_MIN_VALUE &&
|
|
|
|
table_field->read_stats->min_value) ||
|
|
|
|
(i == COLUMN_STAT_MAX_VALUE &&
|
|
|
|
table_field->read_stats->max_value)))
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
2012-07-27 02:50:08 +02:00
|
|
|
table_field->read_stats->set_not_null(i);
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
switch (i) {
|
|
|
|
case COLUMN_STAT_MIN_VALUE:
|
2016-10-24 19:15:11 +02:00
|
|
|
table_field->read_stats->min_value->set_notnull();
|
2012-01-05 02:51:53 +01:00
|
|
|
stat_field->val_str(&val);
|
2012-07-27 02:50:08 +02:00
|
|
|
table_field->read_stats->min_value->store(val.ptr(), val.length(),
|
2015-11-09 15:58:35 +01:00
|
|
|
&my_charset_bin);
|
2012-01-05 02:51:53 +01:00
|
|
|
break;
|
|
|
|
case COLUMN_STAT_MAX_VALUE:
|
2016-10-24 19:15:11 +02:00
|
|
|
table_field->read_stats->max_value->set_notnull();
|
2012-01-05 02:51:53 +01:00
|
|
|
stat_field->val_str(&val);
|
2012-07-27 02:50:08 +02:00
|
|
|
table_field->read_stats->max_value->store(val.ptr(), val.length(),
|
2015-11-09 15:58:35 +01:00
|
|
|
&my_charset_bin);
|
2012-01-05 02:51:53 +01:00
|
|
|
break;
|
|
|
|
case COLUMN_STAT_NULLS_RATIO:
|
2012-07-27 02:50:08 +02:00
|
|
|
table_field->read_stats->set_nulls_ratio(stat_field->val_real());
|
2012-01-05 02:51:53 +01:00
|
|
|
break;
|
|
|
|
case COLUMN_STAT_AVG_LENGTH:
|
2012-07-27 02:50:08 +02:00
|
|
|
table_field->read_stats->set_avg_length(stat_field->val_real());
|
2012-01-05 02:51:53 +01:00
|
|
|
break;
|
|
|
|
case COLUMN_STAT_AVG_FREQUENCY:
|
2012-07-27 02:50:08 +02:00
|
|
|
table_field->read_stats->set_avg_frequency(stat_field->val_real());
|
2013-03-26 07:48:29 +01:00
|
|
|
break;
|
|
|
|
case COLUMN_STAT_HIST_SIZE:
|
|
|
|
table_field->read_stats->histogram.set_size(stat_field->val_int());
|
2012-01-05 02:51:53 +01:00
|
|
|
break;
|
2013-03-31 03:57:07 +02:00
|
|
|
case COLUMN_STAT_HIST_TYPE:
|
2013-04-06 08:48:49 +02:00
|
|
|
Histogram_type hist_type= (Histogram_type) (stat_field->val_int() -
|
|
|
|
1);
|
2013-03-31 03:57:07 +02:00
|
|
|
table_field->read_stats->histogram.set_type(hist_type);
|
|
|
|
break;
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-16 07:43:07 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Read histogram from of column_stats
|
|
|
|
|
|
|
|
@details
|
|
|
|
This method first looks for a record in the statistical table column_stats
|
|
|
|
by its primary key set the record buffer with the help of
|
|
|
|
Column_stat::set_key_fields. Then, if the row is found, the function reads
|
|
|
|
the value of the column 'histogram' of the table column_stat and sets
|
|
|
|
accordingly the corresponding bit in the bitmap read_stat.column_stat_nulls.
|
|
|
|
The method assumes that the value of histogram size and the pointer to
|
|
|
|
the histogram location has been already set in the fields size and values
|
|
|
|
of read_stats->histogram.
|
|
|
|
*/
|
|
|
|
|
2013-03-26 07:48:29 +01:00
|
|
|
void get_histogram_value()
|
|
|
|
{
|
|
|
|
if (find_stat())
|
|
|
|
{
|
|
|
|
char buff[MAX_FIELD_WIDTH];
|
2015-11-09 15:58:35 +01:00
|
|
|
String val(buff, sizeof(buff), &my_charset_bin);
|
2013-03-26 07:48:29 +01:00
|
|
|
uint fldno= COLUMN_STAT_HISTOGRAM;
|
|
|
|
Field *stat_field= stat_table->field[fldno];
|
|
|
|
table_field->read_stats->set_not_null(fldno);
|
|
|
|
stat_field->val_str(&val);
|
|
|
|
memcpy(table_field->read_stats->histogram.get_values(),
|
|
|
|
val.ptr(), table_field->read_stats->histogram.get_size());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
An object of the class Index_stat is created to read statistical
|
2012-07-11 01:34:39 +02:00
|
|
|
data on tables from the statistical table table_stat, to update
|
2012-12-05 09:31:05 +01:00
|
|
|
index_stats with such statistical data, or to update columns
|
2012-07-11 01:34:39 +02:00
|
|
|
of the primary key, or to delete the record by its primary key or
|
|
|
|
its prefix.
|
|
|
|
Rows from the statistical table are read and updated always by
|
|
|
|
primary key.
|
2012-01-05 02:51:53 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
class Index_stat: public Stat_table
|
|
|
|
{
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
private:
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
Field *db_name_field; /* Field for the column index_stats.db_name */
|
|
|
|
Field *table_name_field; /* Field for the column index_stats.table_name */
|
|
|
|
Field *index_name_field; /* Field for the column index_stats.table_name */
|
|
|
|
Field *prefix_arity_field; /* Field for the column index_stats.prefix_arity */
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
KEY *table_key_info; /* Info on the index to read/update statistics on */
|
|
|
|
uint prefix_arity; /* Number of components of the index prefix of interest */
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
void common_init_index_stat_table()
|
|
|
|
{
|
|
|
|
db_name_field= stat_table->field[INDEX_STAT_DB_NAME];
|
|
|
|
table_name_field= stat_table->field[INDEX_STAT_TABLE_NAME];
|
|
|
|
index_name_field= stat_table->field[INDEX_STAT_INDEX_NAME];
|
|
|
|
prefix_arity_field= stat_table->field[INDEX_STAT_PREFIX_ARITY];
|
|
|
|
}
|
|
|
|
|
|
|
|
void change_full_table_name(LEX_STRING *db, LEX_STRING *tab)
|
|
|
|
{
|
|
|
|
db_name_field->store(db->str, db->length, system_charset_info);
|
|
|
|
table_name_field->store(tab->str, tab->length, system_charset_info);
|
|
|
|
}
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
public:
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@details
|
|
|
|
The constructor 'tunes' the private and protected members of the
|
2012-12-05 09:31:05 +01:00
|
|
|
constructed object for the statistical table index_stats to read/update
|
2012-01-05 02:51:53 +01:00
|
|
|
statistics on prefixes of different indexes of the table 'tab'.
|
2012-12-05 09:31:05 +01:00
|
|
|
The TABLE structure for the table index_stats must be passed as a value
|
2012-01-05 02:51:53 +01:00
|
|
|
for the parameter 'stat'.
|
|
|
|
*/
|
2012-07-27 02:50:08 +02:00
|
|
|
|
|
|
|
Index_stat(TABLE *stat, TABLE*tab) :Stat_table(stat, tab)
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
2012-07-11 01:34:39 +02:00
|
|
|
common_init_index_stat_table();
|
|
|
|
}
|
2012-01-05 02:51:53 +01:00
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
@details
|
|
|
|
The constructor 'tunes' the private and protected members of the
|
2012-12-05 09:31:05 +01:00
|
|
|
object constructed for the statistical table index_stats for
|
2012-07-11 01:34:39 +02:00
|
|
|
the future updates/deletes of the record concerning the table 'tab'
|
|
|
|
from the database 'db'.
|
|
|
|
*/
|
|
|
|
|
|
|
|
Index_stat(TABLE *stat, LEX_STRING *db, LEX_STRING *tab)
|
|
|
|
:Stat_table(stat, db, tab)
|
|
|
|
{
|
|
|
|
common_init_index_stat_table();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
2012-12-05 09:31:05 +01:00
|
|
|
Set table name fields for the statistical table index_stats
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
@details
|
|
|
|
The function stores the values of the fields db_name and table_name
|
2012-12-05 09:31:05 +01:00
|
|
|
of the statistical table index_stats in the record buffer.
|
2012-07-11 01:34:39 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
void set_full_table_name()
|
|
|
|
{
|
|
|
|
db_name_field->store(db_name->str, db_name->length, system_charset_info);
|
|
|
|
table_name_field->store(table_name->str, table_name->length,
|
|
|
|
system_charset_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
2012-12-05 09:31:05 +01:00
|
|
|
Set the key fields of index_stats used to access records for index prefixes
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
@param
|
|
|
|
index_info Info for the index of 'table' to read/update statistics on
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function sets the values of the fields db_name, table_name and
|
2012-12-05 09:31:05 +01:00
|
|
|
index_name in the record buffer for the statistical table index_stats.
|
2012-07-11 01:34:39 +02:00
|
|
|
It also sets table_key_info to the passed parameter.
|
|
|
|
|
|
|
|
@note
|
|
|
|
The function is supposed to be called before any use of the method
|
|
|
|
find_next_stat_for_prefix for an object of the Index_stat class.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void set_index_prefix_key_fields(KEY *index_info)
|
|
|
|
{
|
|
|
|
set_full_table_name();
|
|
|
|
char *index_name= index_info->name;
|
|
|
|
index_name_field->store(index_name, strlen(index_name),
|
|
|
|
system_charset_info);
|
|
|
|
table_key_info= index_info;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
2012-12-05 09:31:05 +01:00
|
|
|
Set the key fields for the statistical table index_stats
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
@param
|
|
|
|
index_info Info for the index of 'table' to read/update statistics on
|
|
|
|
@param
|
|
|
|
index_prefix_arity Number of components in the index prefix of interest
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function sets the values of the fields db_name, table_name and
|
|
|
|
index_name, prefix_arity in the record buffer for the statistical
|
2012-12-05 09:31:05 +01:00
|
|
|
table index_stats. These fields comprise the primary key for the table.
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
@note
|
|
|
|
The function is supposed to be called before any use of the
|
|
|
|
method find_stat for an object of the Index_stat class.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
void set_key_fields(KEY *index_info, uint index_prefix_arity)
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
2012-07-11 01:34:39 +02:00
|
|
|
set_index_prefix_key_fields(index_info);
|
2012-01-05 02:51:53 +01:00
|
|
|
prefix_arity= index_prefix_arity;
|
|
|
|
prefix_arity_field->store(index_prefix_arity, TRUE);
|
|
|
|
}
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
2012-12-05 09:31:05 +01:00
|
|
|
Store statistical data into statistical fields of table index_stats
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
@details
|
|
|
|
This implementation of a purely virtual method sets the value of the
|
2012-12-05 09:31:05 +01:00
|
|
|
column 'avg_frequency' of the statistical table index_stats according to
|
2012-01-05 02:51:53 +01:00
|
|
|
the value of write_stat.avg_frequency[Index_stat::prefix_arity]
|
|
|
|
from the KEY_INFO structure 'table_key_info'.
|
|
|
|
If the value of write_stat. avg_frequency[Index_stat::prefix_arity] is
|
|
|
|
equal to 0, the value of the column is set to NULL.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
void store_stat_fields()
|
|
|
|
{
|
2012-05-07 07:42:14 +02:00
|
|
|
Field *stat_field= stat_table->field[INDEX_STAT_AVG_FREQUENCY];
|
2012-01-05 02:51:53 +01:00
|
|
|
double avg_frequency=
|
2012-07-27 02:50:08 +02:00
|
|
|
table_key_info->collected_stats->get_avg_frequency(prefix_arity-1);
|
2012-01-05 02:51:53 +01:00
|
|
|
if (avg_frequency == 0)
|
|
|
|
stat_field->set_null();
|
|
|
|
else
|
|
|
|
{
|
|
|
|
stat_field->set_notnull();
|
|
|
|
stat_field->store(avg_frequency);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
2012-12-05 09:31:05 +01:00
|
|
|
Read statistical data from statistical fields of index_stats
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
@details
|
|
|
|
This implementation of a purely virtual method first looks for a record the
|
2012-12-05 09:31:05 +01:00
|
|
|
statistical table index_stats by its primary key set the record buffer with
|
2012-01-05 02:51:53 +01:00
|
|
|
the help of Index_stat::set_key_fields. If the row is found the function
|
|
|
|
reads the value of the column 'avg_freguency' of the table index_stat and
|
|
|
|
sets the value of read_stat.avg_frequency[Index_stat::prefix_arity]
|
|
|
|
from the KEY_INFO structure 'table_key_info' accordingly. If the value of
|
|
|
|
the column is NULL, read_stat.avg_frequency[Index_stat::prefix_arity] is
|
|
|
|
set to 0. Otherwise, read_stat.avg_frequency[Index_stat::prefix_arity] is
|
|
|
|
set to the value of the column.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
void get_stat_values()
|
|
|
|
{
|
|
|
|
double avg_frequency= 0;
|
|
|
|
if(find_stat())
|
|
|
|
{
|
|
|
|
Field *stat_field= stat_table->field[INDEX_STAT_AVG_FREQUENCY];
|
|
|
|
if (!stat_field->is_null())
|
2012-06-26 07:33:07 +02:00
|
|
|
avg_frequency= stat_field->val_real();
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
2012-07-27 02:50:08 +02:00
|
|
|
table_key_info->read_stats->set_avg_frequency(prefix_arity-1, avg_frequency);
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
};
|
|
|
|
|
2015-11-18 19:31:45 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
An iterator to enumerate statistics table rows which allows to modify
|
|
|
|
the rows while reading them.
|
|
|
|
|
|
|
|
Used by RENAME TABLE handling to assign new dbname.tablename to statistic
|
|
|
|
rows.
|
|
|
|
*/
|
|
|
|
class Stat_table_write_iter
|
|
|
|
{
|
|
|
|
Stat_table *owner;
|
|
|
|
IO_CACHE io_cache;
|
|
|
|
uchar *rowid_buf;
|
|
|
|
uint rowid_size;
|
|
|
|
|
|
|
|
public:
|
|
|
|
Stat_table_write_iter(Stat_table *stat_table_arg)
|
|
|
|
: owner(stat_table_arg), rowid_buf(NULL),
|
|
|
|
rowid_size(owner->stat_file->ref_length)
|
|
|
|
{
|
|
|
|
my_b_clear(&io_cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Initialize the iterator. It will return rows with n_keyparts matching the
|
|
|
|
curernt values.
|
|
|
|
|
|
|
|
@return false - OK
|
|
|
|
true - Error
|
|
|
|
*/
|
|
|
|
bool init(uint n_keyparts)
|
|
|
|
{
|
|
|
|
if (!(rowid_buf= (uchar*)my_malloc(rowid_size, MYF(0))))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (open_cached_file(&io_cache, mysql_tmpdir, TEMP_PREFIX,
|
|
|
|
1024, MYF(MY_WME)))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
handler *h= owner->stat_file;
|
|
|
|
uchar key[MAX_KEY_LENGTH];
|
|
|
|
uint prefix_len= 0;
|
|
|
|
for (uint i= 0; i < n_keyparts; i++)
|
|
|
|
prefix_len += owner->stat_key_info->key_part[i].store_length;
|
|
|
|
|
|
|
|
key_copy(key, owner->record[0], owner->stat_key_info,
|
|
|
|
prefix_len);
|
|
|
|
key_part_map prefix_map= (key_part_map) ((1 << n_keyparts) - 1);
|
|
|
|
h->ha_index_init(owner->stat_key_idx, false);
|
|
|
|
int res= h->ha_index_read_map(owner->record[0], key, prefix_map,
|
|
|
|
HA_READ_KEY_EXACT);
|
|
|
|
if (res)
|
|
|
|
{
|
|
|
|
reinit_io_cache(&io_cache, READ_CACHE, 0L, 0, 0);
|
|
|
|
/* "Key not found" is not considered an error */
|
|
|
|
return (res == HA_ERR_KEY_NOT_FOUND)? false: true;
|
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
|
|
|
h->position(owner->record[0]);
|
|
|
|
my_b_write(&io_cache, h->ref, rowid_size);
|
|
|
|
|
|
|
|
} while (!h->ha_index_next_same(owner->record[0], key, prefix_len));
|
|
|
|
|
|
|
|
/* Prepare for reading */
|
|
|
|
reinit_io_cache(&io_cache, READ_CACHE, 0L, 0, 0);
|
|
|
|
h->ha_index_or_rnd_end();
|
|
|
|
if (h->ha_rnd_init(false))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Read the next row.
|
|
|
|
|
|
|
|
@return
|
|
|
|
false OK
|
|
|
|
true No more rows or error.
|
|
|
|
*/
|
|
|
|
bool get_next_row()
|
|
|
|
{
|
|
|
|
if (!my_b_inited(&io_cache) || my_b_read(&io_cache, rowid_buf, rowid_size))
|
|
|
|
return true; /* No more data */
|
|
|
|
|
|
|
|
handler *h= owner->stat_file;
|
|
|
|
/*
|
|
|
|
We should normally be able to find the row that we have rowid for. If we
|
|
|
|
don't, let's consider this an error.
|
|
|
|
*/
|
|
|
|
int res= h->ha_rnd_pos(owner->record[0], rowid_buf);
|
|
|
|
|
|
|
|
return (res==0)? false : true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void cleanup()
|
|
|
|
{
|
|
|
|
if (rowid_buf)
|
|
|
|
my_free(rowid_buf);
|
|
|
|
rowid_buf= NULL;
|
|
|
|
owner->stat_file->ha_index_or_rnd_end();
|
|
|
|
close_cached_file(&io_cache);
|
|
|
|
my_b_clear(&io_cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
~Stat_table_write_iter()
|
|
|
|
{
|
2018-05-23 10:26:49 +02:00
|
|
|
/* Ensure that cleanup has been run */
|
|
|
|
DBUG_ASSERT(rowid_buf == 0);
|
2015-11-18 19:31:45 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2013-04-16 07:43:07 +02:00
|
|
|
/*
|
|
|
|
Histogram_builder is a helper class that is used to build histograms
|
|
|
|
for columns
|
|
|
|
*/
|
2012-01-05 02:51:53 +01:00
|
|
|
|
2013-03-26 07:48:29 +01:00
|
|
|
class Histogram_builder
|
|
|
|
{
|
2013-04-16 07:43:07 +02:00
|
|
|
Field *column; /* table field for which the histogram is built */
|
|
|
|
uint col_length; /* size of this field */
|
|
|
|
ha_rows records; /* number of records the histogram is built for */
|
|
|
|
Field *min_value; /* pointer to the minimal value for the field */
|
|
|
|
Field *max_value; /* pointer to the maximal value for the field */
|
|
|
|
Histogram *histogram; /* the histogram location */
|
|
|
|
uint hist_width; /* the number of points in the histogram */
|
|
|
|
double bucket_capacity; /* number of rows in a bucket of the histogram */
|
|
|
|
uint curr_bucket; /* number of the current bucket to be built */
|
|
|
|
ulonglong count; /* number of values retrieved */
|
|
|
|
ulonglong count_distinct; /* number of distinct values retrieved */
|
2013-03-26 07:48:29 +01:00
|
|
|
|
|
|
|
public:
|
|
|
|
Histogram_builder(Field *col, uint col_len, ha_rows rows)
|
|
|
|
: column(col), col_length(col_len), records(rows)
|
|
|
|
{
|
|
|
|
Column_statistics *col_stats= col->collected_stats;
|
|
|
|
min_value= col_stats->min_value;
|
|
|
|
max_value= col_stats->max_value;
|
|
|
|
histogram= &col_stats->histogram;
|
2013-03-31 03:57:07 +02:00
|
|
|
hist_width= histogram->get_width();
|
|
|
|
bucket_capacity= (double) records / (hist_width + 1);
|
2013-03-26 07:48:29 +01:00
|
|
|
curr_bucket= 0;
|
|
|
|
count= 0;
|
|
|
|
count_distinct= 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ulonglong get_count_distinct() { return count_distinct; }
|
|
|
|
|
|
|
|
int next(void *elem, element_count elem_cnt)
|
|
|
|
{
|
|
|
|
count_distinct++;
|
|
|
|
count+= elem_cnt;
|
2013-03-31 03:57:07 +02:00
|
|
|
if (curr_bucket == hist_width)
|
2013-03-26 07:48:29 +01:00
|
|
|
return 0;
|
|
|
|
if (count > bucket_capacity * (curr_bucket + 1))
|
|
|
|
{
|
|
|
|
column->store_field_value((uchar *) elem, col_length);
|
|
|
|
histogram->set_value(curr_bucket,
|
2013-04-16 07:43:07 +02:00
|
|
|
column->pos_in_interval(min_value, max_value));
|
2013-03-26 07:48:29 +01:00
|
|
|
curr_bucket++;
|
2013-03-31 03:57:07 +02:00
|
|
|
while (curr_bucket != hist_width &&
|
2013-03-26 07:48:29 +01:00
|
|
|
count > bucket_capacity * (curr_bucket + 1))
|
|
|
|
{
|
|
|
|
histogram->set_prev_value(curr_bucket);
|
|
|
|
curr_bucket++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
C_MODE_START
|
|
|
|
|
|
|
|
int histogram_build_walk(void *elem, element_count elem_cnt, void *arg)
|
|
|
|
{
|
|
|
|
Histogram_builder *hist_builder= (Histogram_builder *) arg;
|
|
|
|
return hist_builder->next(elem, elem_cnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
C_MODE_END
|
|
|
|
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
/*
|
|
|
|
The class Count_distinct_field is a helper class used to calculate
|
|
|
|
the number of distinct values for a column. The class employs the
|
|
|
|
Unique class for this purpose.
|
|
|
|
The class Count_distinct_field is used only by the function
|
2012-12-13 08:16:54 +01:00
|
|
|
collect_statistics_for_table to calculate the values for
|
2012-12-05 09:31:05 +01:00
|
|
|
column avg_frequency of the statistical table column_stats.
|
2012-01-05 02:51:53 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
class Count_distinct_field: public Sql_alloc
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
|
|
|
|
/* Field for which the number of distinct values is to be find out */
|
|
|
|
Field *table_field;
|
|
|
|
Unique *tree; /* The helper object to contain distinct values */
|
|
|
|
uint tree_key_length; /* The length of the keys for the elements of 'tree */
|
|
|
|
|
|
|
|
public:
|
2013-03-26 07:48:29 +01:00
|
|
|
|
|
|
|
Count_distinct_field() {}
|
2012-01-05 02:51:53 +01:00
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@param
|
|
|
|
field Field for which the number of distinct values is
|
|
|
|
to be find out
|
|
|
|
@param
|
2012-07-11 01:34:39 +02:00
|
|
|
max_heap_table_size The limit for the memory used by the RB tree container
|
2012-01-05 02:51:53 +01:00
|
|
|
of the constructed Unique object 'tree'
|
|
|
|
|
|
|
|
@details
|
|
|
|
The constructor sets the values of 'table_field' and 'tree_key_length',
|
|
|
|
and then calls the 'new' operation to create a Unique object for 'tree'.
|
|
|
|
The type of 'field' and the value max_heap_table_size of determine the set
|
|
|
|
of the parameters to be passed to the constructor of the Unique object.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
Count_distinct_field(Field *field, uint max_heap_table_size)
|
|
|
|
{
|
|
|
|
table_field= field;
|
|
|
|
tree_key_length= field->pack_length();
|
|
|
|
|
2013-03-26 07:48:29 +01:00
|
|
|
tree= new Unique((qsort_cmp2) simple_str_key_cmp, (void*) field,
|
2013-04-07 00:36:28 +02:00
|
|
|
tree_key_length, max_heap_table_size, 1);
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual ~Count_distinct_field()
|
|
|
|
{
|
|
|
|
delete tree;
|
|
|
|
tree= NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
@brief
|
2012-07-11 01:34:39 +02:00
|
|
|
Check whether the Unique object tree has been successfully created
|
2012-01-05 02:51:53 +01:00
|
|
|
*/
|
|
|
|
bool exists()
|
|
|
|
{
|
|
|
|
return (tree != NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
@brief
|
|
|
|
Add the value of 'field' to the container of the Unique object 'tree'
|
|
|
|
*/
|
|
|
|
virtual bool add()
|
|
|
|
{
|
|
|
|
return tree->unique_add(table_field->ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
@brief
|
|
|
|
Calculate the number of elements accumulated in the container of 'tree'
|
|
|
|
*/
|
|
|
|
ulonglong get_value()
|
|
|
|
{
|
|
|
|
ulonglong count;
|
|
|
|
if (tree->elements == 0)
|
|
|
|
return (ulonglong) tree->elements_in_tree();
|
|
|
|
count= 0;
|
2013-01-29 15:10:47 +01:00
|
|
|
tree->walk(table_field->table, count_distinct_walk, (void*) &count);
|
2012-01-05 02:51:53 +01:00
|
|
|
return count;
|
|
|
|
}
|
2013-03-26 07:48:29 +01:00
|
|
|
|
2013-04-16 07:43:07 +02:00
|
|
|
/*
|
|
|
|
@brief
|
|
|
|
Build the histogram for the elements accumulated in the container of 'tree'
|
|
|
|
*/
|
2013-03-26 07:48:29 +01:00
|
|
|
ulonglong get_value_with_histogram(ha_rows rows)
|
|
|
|
{
|
|
|
|
Histogram_builder hist_builder(table_field, tree_key_length, rows);
|
|
|
|
tree->walk(table_field->table, histogram_build_walk, (void *) &hist_builder);
|
|
|
|
return hist_builder.get_count_distinct();
|
|
|
|
}
|
|
|
|
|
2013-04-16 07:43:07 +02:00
|
|
|
/*
|
|
|
|
@brief
|
|
|
|
Get the size of the histogram in bytes built for table_field
|
|
|
|
*/
|
2013-03-26 07:48:29 +01:00
|
|
|
uint get_hist_size()
|
|
|
|
{
|
|
|
|
return table_field->collected_stats->histogram.get_size();
|
|
|
|
}
|
|
|
|
|
2013-04-16 07:43:07 +02:00
|
|
|
/*
|
|
|
|
@brief
|
|
|
|
Get the pointer to the histogram built for table_field
|
|
|
|
*/
|
2013-03-26 07:48:29 +01:00
|
|
|
uchar *get_histogram()
|
|
|
|
{
|
|
|
|
return table_field->collected_stats->histogram.get_values();
|
|
|
|
}
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2013-03-26 07:48:29 +01:00
|
|
|
static
|
|
|
|
int simple_ulonglong_key_cmp(void* arg, uchar* key1, uchar* key2)
|
|
|
|
{
|
|
|
|
ulonglong *val1= (ulonglong *) key1;
|
|
|
|
ulonglong *val2= (ulonglong *) key2;
|
|
|
|
return *val1 > *val2 ? 1 : *val1 == *val2 ? 0 : -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
/*
|
|
|
|
The class Count_distinct_field_bit is derived from the class
|
|
|
|
Count_distinct_field to be used only for fields of the MYSQL_TYPE_BIT type.
|
|
|
|
The class provides a different implementation for the method add
|
|
|
|
*/
|
|
|
|
|
|
|
|
class Count_distinct_field_bit: public Count_distinct_field
|
|
|
|
{
|
|
|
|
public:
|
2013-03-26 07:48:29 +01:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
Count_distinct_field_bit(Field *field, uint max_heap_table_size)
|
2013-03-26 07:48:29 +01:00
|
|
|
{
|
|
|
|
table_field= field;
|
|
|
|
tree_key_length= sizeof(ulonglong);
|
|
|
|
|
|
|
|
tree= new Unique((qsort_cmp2) simple_ulonglong_key_cmp,
|
|
|
|
(void*) &tree_key_length,
|
2013-04-07 00:36:28 +02:00
|
|
|
tree_key_length, max_heap_table_size, 1);
|
2013-03-26 07:48:29 +01:00
|
|
|
}
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
bool add()
|
|
|
|
{
|
|
|
|
longlong val= table_field->val_int();
|
|
|
|
return tree->unique_add(&val);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
The class Index_prefix_calc is a helper class used to calculate the values
|
2012-12-05 09:31:05 +01:00
|
|
|
for the column 'avg_frequency' of the statistical table index_stats.
|
2012-01-05 02:51:53 +01:00
|
|
|
For any table t from the database db and any k-component prefix of the
|
2012-12-05 09:31:05 +01:00
|
|
|
index i for this table the row from index_stats with the primary key
|
2012-01-05 02:51:53 +01:00
|
|
|
(db,t,i,k) must contain in the column 'avg_frequency' either NULL or
|
|
|
|
the number that is the ratio of N and V, where N is the number of index
|
|
|
|
entries without NULL values in the first k components of the index i,
|
|
|
|
and V is the number of distinct tuples composed of the first k components
|
|
|
|
encountered among these index entries.
|
|
|
|
Currently the objects of this class are used only by the function
|
|
|
|
collect_statistics_for_index.
|
|
|
|
*/
|
|
|
|
|
|
|
|
class Index_prefix_calc: public Sql_alloc
|
|
|
|
{
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
private:
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
/* Table containing index specified by index_info */
|
|
|
|
TABLE *index_table;
|
|
|
|
/* Info for the index i for whose prefix 'avg_frequency' is calculated */
|
|
|
|
KEY *index_info;
|
|
|
|
/* The maximum number of the components in the prefixes of interest */
|
|
|
|
uint prefixes;
|
|
|
|
bool empty;
|
|
|
|
|
|
|
|
/* This structure is created for every k components of the index i */
|
|
|
|
class Prefix_calc_state
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
/*
|
|
|
|
The number of the scanned index entries without nulls
|
|
|
|
in the first k components
|
|
|
|
*/
|
|
|
|
ulonglong entry_count;
|
|
|
|
/*
|
|
|
|
The number if the scanned index entries without nulls with
|
|
|
|
the last encountered k-component prefix
|
|
|
|
*/
|
|
|
|
ulonglong prefix_count;
|
2012-07-11 01:34:39 +02:00
|
|
|
/* The values of the last encountered k-component prefix */
|
2012-01-05 02:51:53 +01:00
|
|
|
Cached_item *last_prefix;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
Array of structures used to calculate 'avg_frequency' for different
|
|
|
|
prefixes of the index i
|
|
|
|
*/
|
|
|
|
Prefix_calc_state *calc_state;
|
|
|
|
|
|
|
|
public:
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-12-14 08:05:12 +01:00
|
|
|
bool is_single_comp_pk;
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
Index_prefix_calc(TABLE *table, KEY *key_info)
|
|
|
|
: index_table(table), index_info(key_info)
|
|
|
|
{
|
|
|
|
uint i;
|
|
|
|
Prefix_calc_state *state;
|
2012-05-07 07:42:14 +02:00
|
|
|
uint key_parts= table->actual_n_key_parts(key_info);
|
2012-01-05 02:51:53 +01:00
|
|
|
empty= TRUE;
|
|
|
|
prefixes= 0;
|
2015-02-10 11:05:49 +01:00
|
|
|
LINT_INIT_STRUCT(calc_state);
|
2012-12-14 08:05:12 +01:00
|
|
|
|
|
|
|
is_single_comp_pk= FALSE;
|
|
|
|
uint pk= table->s->primary_key;
|
MDEV-4011 Added per thread memory counting and usage
Base code and idea from a patch from by plinux at Taobao.
The idea is that we mark all memory that are thread specific with MY_THREAD_SPECIFIC.
Memory counting is done per thread in the my_malloc_size_cb_func callback function from my_malloc().
There are plenty of new asserts to ensure that for a debug server the counting is correct.
Information_schema.processlist gets two new columns: MEMORY_USED and EXAMINED_ROWS.
- The later is there mainly to show how query is progressing.
The following changes in interfaces was needed to get this to work:
- init_alloc_root() amd init_sql_alloc() has extra option so that one can mark memory with MY_THREAD_SPECIFIC
- One now have to use alloc_root_set_min_malloc() to set min memory to be allocated by alloc_root()
- my_init_dynamic_array() has extra option so that one can mark memory with MY_THREAD_SPECIFIC
- my_net_init() has extra option so that one can mark memory with MY_THREAD_SPECIFIC
- Added flag for hash_init() so that one can mark hash table to be thread specific.
- Added flags to init_tree() so that one can mark tree to be thread specific.
- Removed with_delete option to init_tree(). Now one should instead use MY_TREE_WITH_DELETE_FLAG.
- Added flag to Warning_info::Warning_info() if the structure should be fully initialized.
- String elements can now be marked as thread specific.
- Internal HEAP tables are now marking it's memory as MY_THREAD_SPECIFIC.
- Changed type of myf from int to ulong, as this is always a set of bit flags.
Other things:
- Removed calls to net_end() and thd->cleanup() as these are now done in ~THD()
- We now also show EXAMINED_ROWS in SHOW PROCESSLIST
- Added new variable 'memory_used'
- Fixed bug where kill_threads_for_user() was using the wrong mem_root to allocate memory.
- Removed calls to the obsoleted function init_dynamic_array()
- Use set_current_thd() instead of my_pthread_setspecific_ptr(THR_THD,...)
client/completion_hash.cc:
Updated call to init_alloc_root()
client/mysql.cc:
Updated call to init_alloc_root()
client/mysqlbinlog.cc:
init_dynamic_array() -> my_init_dynamic_array()
Updated call to init_alloc_root()
client/mysqlcheck.c:
Updated call to my_init_dynamic_array()
client/mysqldump.c:
Updated call to init_alloc_root()
client/mysqltest.cc:
Updated call to init_alloc_root()
Updated call to my_init_dynamic_array()
Fixed compiler warnings
extra/comp_err.c:
Updated call to my_init_dynamic_array()
extra/resolve_stack_dump.c:
Updated call to my_init_dynamic_array()
include/hash.h:
Added HASH_THREAD_SPECIFIC
include/heap.h:
Added flag is internal temporary table.
include/my_dir.h:
Safety fix: Ensure that MY_DONT_SORT and MY_WANT_STAT don't interfer with other mysys flags
include/my_global.h:
Changed type of myf from int to ulong, as this is always a set of bit flags.
include/my_sys.h:
Added MY_THREAD_SPECIFIC and MY_THREAD_MOVE
Added malloc_flags to DYNAMIC_ARRAY
Added extra mysys flag argument to my_init_dynamic_array()
Removed deprecated functions init_dynamic_array() and my_init_dynamic_array.._ci
Updated paramaters for init_alloc_root()
include/my_tree.h:
Added my_flags to allow one to use MY_THREAD_SPECIFIC with hash tables.
Removed with_delete. One should now instead use MY_TREE_WITH_DELETE_FLAG
Updated parameters to init_tree()
include/myisamchk.h:
Added malloc_flags to allow one to use MY_THREAD_SPECIFIC for checks.
include/mysql.h:
Added MYSQL_THREAD_SPECIFIC_MALLOC
Used 'unused1' to mark memory as thread specific.
include/mysql.h.pp:
Updated file
include/mysql_com.h:
Used 'unused1' to mark memory as thread specific.
Updated parameters for my_net_init()
libmysql/libmysql.c:
Updated call to init_alloc_root() to mark memory thread specific.
libmysqld/emb_qcache.cc:
Updated call to init_alloc_root()
libmysqld/lib_sql.cc:
Updated call to init_alloc_root()
mysql-test/r/create.result:
Updated results
mysql-test/r/user_var.result:
Updated results
mysql-test/suite/funcs_1/datadict/processlist_priv.inc:
Update to handle new format of SHOW PROCESSLIST
mysql-test/suite/funcs_1/datadict/processlist_val.inc:
Update to handle new format of SHOW PROCESSLIST
mysql-test/suite/funcs_1/r/is_columns_is.result:
Update to handle new format of SHOW PROCESSLIST
mysql-test/suite/funcs_1/r/processlist_priv_no_prot.result:
Updated results
mysql-test/suite/funcs_1/r/processlist_val_no_prot.result:
Updated results
mysql-test/t/show_explain.test:
Fixed usage of debug variable so that one can run test with --debug
mysql-test/t/user_var.test:
Added test of memory_usage variable.
mysys/array.c:
Added extra my_flags option to init_dynamic_array() and init_dynamic_array2() so that one can mark memory with MY_THREAD_SPECIFIC
All allocated memory is marked with the given my_flags.
Removed obsolete function init_dynamic_array()
mysys/default.c:
Updated call to init_alloc_root()
Updated call to my_init_dynamic_array()
mysys/hash.c:
Updated call to my_init_dynamic_array_ci().
Allocated memory is marked with MY_THREAD_SPECIFIC if HASH_THREAD_SPECIFIC is used.
mysys/ma_dyncol.c:
init_dynamic_array() -> my_init_dynamic_array()
Added #if to get rid of compiler warnings
mysys/mf_tempdir.c:
Updated call to my_init_dynamic_array()
mysys/my_alloc.c:
Added extra parameter to init_alloc_root() so that one can mark memory with MY_THREAD_SPECIFIC
Extend MEM_ROOT with a flag if memory is thread specific.
This is stored in block_size, to keep the size of the MEM_ROOT object identical as before.
Allocated memory is marked with MY_THREAD_SPECIFIC if used with init_alloc_root()
mysys/my_chmod.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_chsize.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_copy.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_create.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_delete.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_error.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_fopen.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_fstream.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_getwd.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_lib.c:
Updated call to init_alloc_root()
Updated call to my_init_dynamic_array()
Updated DBUG_PRINT because of change of myf type
mysys/my_lock.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_malloc.c:
Store at start of each allocated memory block the size of the block and if the block is thread specific.
Call malloc_size_cb_func, if set, with the memory allocated/freed.
Updated DBUG_PRINT because of change of myf type
mysys/my_open.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_pread.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_read.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_redel.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_rename.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_seek.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_sync.c:
Updated DBUG_PRINT because of change of myf type
mysys/my_thr_init.c:
Ensure that one can call my_thread_dbug_id() even if thread is not properly initialized.
mysys/my_write.c:
Updated DBUG_PRINT because of change of myf type
mysys/mysys_priv.h:
Updated parameters to sf_malloc and sf_realloc()
mysys/safemalloc.c:
Added checking that for memory marked with MY_THREAD_SPECIFIC that it's the same thread that is allocation and freeing the memory.
Added sf_malloc_dbug_id() to allow MariaDB to specify which THD is handling the memory.
Added my_flags arguments to sf_malloc() and sf_realloc() to be able to mark memory with MY_THREAD_SPECIFIC.
Added sf_report_leaked_memory() to get list of memory not freed by a thread.
mysys/tree.c:
Added flags to init_tree() so that one can mark tree to be thread specific.
Removed with_delete option to init_tree(). Now one should instead use MY_TREE_WITH_DELETE_FLAG.
Updated call to init_alloc_root()
All allocated memory is marked with the given malloc flags
mysys/waiting_threads.c:
Updated call to my_init_dynamic_array()
sql-common/client.c:
Updated call to init_alloc_root() and my_net_init() to mark memory thread specific.
Updated call to my_init_dynamic_array().
Added MYSQL_THREAD_SPECIFIC_MALLOC so that client can mark memory as MY_THREAD_SPECIFIC.
sql-common/client_plugin.c:
Updated call to init_alloc_root()
sql/debug_sync.cc:
Added MY_THREAD_SPECIFIC to allocated memory.
sql/event_scheduler.cc:
Removed calls to net_end() as this is now done in ~THD()
Call set_current_thd() to ensure that memory is assigned to right thread.
sql/events.cc:
my_pthread_setspecific_ptr(THR_THD,...) -> set_current_thd()
sql/filesort.cc:
Added MY_THREAD_SPECIFIC to allocated memory.
sql/filesort_utils.cc:
Added MY_THREAD_SPECIFIC to allocated memory.
sql/ha_ndbcluster.cc:
Updated call to init_alloc_root()
Updated call to my_net_init()
Removed calls to net_end() and thd->cleanup() as these are now done in ~THD()
sql/ha_ndbcluster_binlog.cc:
Updated call to my_net_init()
Updated call to init_sql_alloc()
Removed calls to net_end() and thd->cleanup() as these are now done in ~THD()
sql/ha_partition.cc:
Updated call to init_alloc_root()
sql/handler.cc:
Added MY_THREAD_SPECIFIC to allocated memory.
Added missing call to my_dir_end()
sql/item_func.cc:
Added MY_THREAD_SPECIFIC to allocated memory.
sql/item_subselect.cc:
Added MY_THREAD_SPECIFIC to allocated memory.
sql/item_sum.cc:
Added MY_THREAD_SPECIFIC to allocated memory.
sql/log.cc:
More DBUG
Updated call to init_alloc_root()
sql/mdl.cc:
Added MY_THREAD_SPECIFIC to allocated memory.
sql/mysqld.cc:
Added total_memory_used
Updated call to init_alloc_root()
Move mysql_cond_broadcast() before my_thread_end()
Added mariadb_dbug_id() to count memory per THD instead of per thread.
Added my_malloc_size_cb_func() callback function for my_malloc() to count memory.
Move initialization of mysqld_server_started and mysqld_server_initialized earlier.
Updated call to my_init_dynamic_array().
Updated call to my_net_init().
Call my_pthread_setspecific_ptr(THR_THD,...) to ensure that memory is assigned to right thread.
Added status variable 'memory_used'.
Updated call to init_alloc_root()
my_pthread_setspecific_ptr(THR_THD,...) -> set_current_thd()
sql/mysqld.h:
Added set_current_thd()
sql/net_serv.cc:
Added new parameter to my_net_init() so that one can mark memory with MY_THREAD_SPECIFIC.
Store in net->thread_specific_malloc if memory is thread specific.
Mark memory to be thread specific if requested.
sql/opt_range.cc:
Updated call to my_init_dynamic_array()
Updated call to init_sql_alloc()
Added MY_THREAD_SPECIFIC to allocated memory.
sql/opt_subselect.cc:
Updated call to init_sql_alloc() to mark memory thread specific.
sql/protocol.cc:
Fixed compiler warning
sql/records.cc:
Added MY_THREAD_SPECIFIC to allocated memory.
sql/rpl_filter.cc:
Updated call to my_init_dynamic_array()
sql/rpl_handler.cc:
Updated call to my_init_dynamic_array2()
sql/rpl_handler.h:
Updated call to init_sql_alloc()
sql/rpl_mi.cc:
Updated call to my_init_dynamic_array()
sql/rpl_tblmap.cc:
Updated call to init_alloc_root()
sql/rpl_utility.cc:
Updated call to my_init_dynamic_array()
sql/slave.cc:
Initialize things properly before calling functions that allocate memory.
Removed calls to net_end() as this is now done in ~THD()
sql/sp_head.cc:
Updated call to init_sql_alloc()
Updated call to my_init_dynamic_array()
Added parameter to warning_info() that it should be fully initialized.
sql/sp_pcontext.cc:
Updated call to my_init_dynamic_array()
sql/sql_acl.cc:
Updated call to init_sql_alloc()
Updated call to my_init_dynamic_array()
my_pthread_setspecific_ptr(THR_THD,...) -> set_current_thd()
sql/sql_admin.cc:
Added parameter to warning_info() that it should be fully initialized.
sql/sql_analyse.h:
Updated call to init_tree() to mark memory thread specific.
sql/sql_array.h:
Updated call to my_init_dynamic_array() to mark memory thread specific.
sql/sql_audit.cc:
Updated call to my_init_dynamic_array()
sql/sql_base.cc:
Updated call to init_sql_alloc()
my_pthread_setspecific_ptr(THR_THD,...) -> set_current_thd()
sql/sql_cache.cc:
Updated comment
sql/sql_class.cc:
Added parameter to warning_info() that not initialize it until THD is fully created.
Updated call to init_sql_alloc()
Mark THD::user_vars has to be thread specific.
Updated call to my_init_dynamic_array()
Ensure that memory allocated by THD is assigned to the THD.
More DBUG
Always acll net_end() in ~THD()
Assert that all memory signed to this THD is really deleted at ~THD.
Fixed set_status_var_init() to not reset memory_used.
my_pthread_setspecific_ptr(THR_THD,...) -> set_current_thd()
sql/sql_class.h:
Added MY_THREAD_SPECIFIC to allocated memory.
Added malloc_size to THD to record allocated memory per THD.
sql/sql_delete.cc:
Added MY_THREAD_SPECIFIC to allocated memory.
sql/sql_error.cc:
Added 'initialize' parameter to Warning_info() to say if should allocate memory for it's structures.
This is used by THD::THD() to not allocate memory until THD is ready.
Added Warning_info::free_memory()
sql/sql_error.h:
Updated Warning_info() class.
sql/sql_handler.cc:
Updated call to init_alloc_root() to mark memory thread specific.
sql/sql_insert.cc:
More DBUG
sql/sql_join_cache.cc:
Added MY_THREAD_SPECIFIC to allocated memory.
sql/sql_lex.cc:
Updated call to my_init_dynamic_array()
sql/sql_lex.h:
Updated call to my_init_dynamic_array()
sql/sql_load.cc:
Added MY_THREAD_SPECIFIC to allocated memory.
sql/sql_parse.cc:
Removed calls to net_end() and thd->cleanup() as these are now done in ~THD()
Ensure that examined_row_count() is reset before query.
Fixed bug where kill_threads_for_user() was using the wrong mem_root to allocate memory.
my_pthread_setspecific_ptr(THR_THD,...) -> set_current_thd()
Don't restore thd->status_var.memory_used when restoring thd->status_var
sql/sql_plugin.cc:
Updated call to init_alloc_root()
Updated call to my_init_dynamic_array()
Don't allocate THD on the stack, as this causes problems with valgrind when doing thd memory counting.
my_pthread_setspecific_ptr(THR_THD,...) -> set_current_thd()
sql/sql_prepare.cc:
Added parameter to warning_info() that it should be fully initialized.
Updated call to init_sql_alloc() to mark memory thread specific.
sql/sql_reload.cc:
my_pthread_setspecific_ptr(THR_THD,...) -> set_current_thd()
sql/sql_select.cc:
Updated call to my_init_dynamic_array() and init_sql_alloc() to mark memory thread specific.
Added MY_THREAD_SPECIFIC to allocated memory.
More DBUG
sql/sql_servers.cc:
Updated call to init_sql_alloc() to mark memory some memory thread specific.
my_pthread_setspecific_ptr(THR_THD,...) -> set_current_thd()
sql/sql_show.cc:
Updated call to my_init_dynamic_array()
Mark my_dir() memory thread specific.
Use my_pthread_setspecific_ptr(THR_THD,...) to mark that allocated memory should be allocated to calling thread.
More DBUG.
Added malloc_size and examined_row_count to SHOW PROCESSLIST.
Added MY_THREAD_SPECIFIC to allocated memory.
Updated call to init_sql_alloc()
Added parameter to warning_info() that it should be fully initialized.
sql/sql_statistics.cc:
Fixed compiler warning
sql/sql_string.cc:
String elements can now be marked as thread specific.
sql/sql_string.h:
String elements can now be marked as thread specific.
sql/sql_table.cc:
Updated call to init_sql_alloc() and my_malloc() to mark memory thread specific
my_pthread_setspecific_ptr(THR_THD,...) -> set_current_thd()
Fixed compiler warning
sql/sql_test.cc:
Updated call to my_init_dynamic_array() to mark memory thread specific.
sql/sql_trigger.cc:
Updated call to init_sql_alloc()
sql/sql_udf.cc:
Updated call to init_sql_alloc()
my_pthread_setspecific_ptr(THR_THD,...) -> set_current_thd()
sql/sql_update.cc:
Added MY_THREAD_SPECIFIC to allocated memory.
sql/table.cc:
Updated call to init_sql_alloc().
Mark memory used by temporary tables, that are not for slave threads, as MY_THREAD_SPECIFIC
Updated call to init_sql_alloc()
sql/thr_malloc.cc:
Added my_flags argument to init_sql_alloc() to be able to mark memory as MY_THREAD_SPECIFIC.
sql/thr_malloc.h:
Updated prototype for init_sql_alloc()
sql/tztime.cc:
Updated call to init_sql_alloc()
Updated call to init_alloc_root() to mark memory thread specific.
my_pthread_setspecific_ptr(THR_THD,...) -> set_current_thd()
sql/uniques.cc:
Updated calls to init_tree(), my_init_dynamic_array() and my_malloc() to mark memory thread specific.
sql/unireg.cc:
Added MY_THREAD_SPECIFIC to allocated memory.
storage/csv/ha_tina.cc:
Updated call to init_alloc_root()
storage/federated/ha_federated.cc:
Updated call to init_alloc_root()
Updated call to my_init_dynamic_array()
Ensure that memory allocated by fedarated is registered for the system, not for the thread.
storage/federatedx/federatedx_io_mysql.cc:
Updated call to my_init_dynamic_array()
storage/federatedx/ha_federatedx.cc:
Updated call to init_alloc_root()
Updated call to my_init_dynamic_array()
storage/heap/ha_heap.cc:
Added MY_THREAD_SPECIFIC to allocated memory.
storage/heap/heapdef.h:
Added parameter to hp_get_new_block() to be able to do thread specific memory tagging.
storage/heap/hp_block.c:
Added parameter to hp_get_new_block() to be able to do thread specific memory tagging.
storage/heap/hp_create.c:
- Internal HEAP tables are now marking it's memory as MY_THREAD_SPECIFIC.
- Use MY_TREE_WITH_DELETE instead of removed option 'with_delete'.
storage/heap/hp_open.c:
Internal HEAP tables are now marking it's memory as MY_THREAD_SPECIFIC.
storage/heap/hp_write.c:
Added new parameter to hp_get_new_block()
storage/maria/ma_bitmap.c:
Updated call to my_init_dynamic_array()
storage/maria/ma_blockrec.c:
Updated call to my_init_dynamic_array()
storage/maria/ma_check.c:
Updated call to init_alloc_root()
storage/maria/ma_ft_boolean_search.c:
Updated calls to init_tree() and init_alloc_root()
storage/maria/ma_ft_nlq_search.c:
Updated call to init_tree()
storage/maria/ma_ft_parser.c:
Updated call to init_tree()
Updated call to init_alloc_root()
storage/maria/ma_loghandler.c:
Updated call to my_init_dynamic_array()
storage/maria/ma_open.c:
Updated call to my_init_dynamic_array()
storage/maria/ma_sort.c:
Updated call to my_init_dynamic_array()
storage/maria/ma_write.c:
Updated calls to my_init_dynamic_array() and init_tree()
storage/maria/maria_pack.c:
Updated call to init_tree()
storage/maria/unittest/sequence_storage.c:
Updated call to my_init_dynamic_array()
storage/myisam/ft_boolean_search.c:
Updated call to init_tree()
Updated call to init_alloc_root()
storage/myisam/ft_nlq_search.c:
Updated call to init_tree()
storage/myisam/ft_parser.c:
Updated call to init_tree()
Updated call to init_alloc_root()
storage/myisam/ft_stopwords.c:
Updated call to init_tree()
storage/myisam/mi_check.c:
Updated call to init_alloc_root()
storage/myisam/mi_write.c:
Updated call to my_init_dynamic_array()
Updated call to init_tree()
storage/myisam/myisamlog.c:
Updated call to init_tree()
storage/myisam/myisampack.c:
Updated call to init_tree()
storage/myisam/sort.c:
Updated call to my_init_dynamic_array()
storage/myisammrg/ha_myisammrg.cc:
Updated call to init_sql_alloc()
storage/perfschema/pfs_check.cc:
Rest current_thd
storage/perfschema/pfs_instr.cc:
Removed DBUG_ENTER/DBUG_VOID_RETURN as at this point my_thread_var is not allocated anymore, which can cause problems.
support-files/compiler_warnings.supp:
Disable compiler warning from offsetof macro.
2013-01-23 16:16:14 +01:00
|
|
|
if ((uint) (table->key_info - key_info) == pk &&
|
2013-06-15 17:32:08 +02:00
|
|
|
table->key_info[pk].user_defined_key_parts == 1)
|
2012-12-14 08:05:12 +01:00
|
|
|
{
|
|
|
|
prefixes= 1;
|
|
|
|
is_single_comp_pk= TRUE;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
if ((calc_state=
|
|
|
|
(Prefix_calc_state *) sql_alloc(sizeof(Prefix_calc_state)*key_parts)))
|
|
|
|
{
|
|
|
|
uint keyno= key_info-table->key_info;
|
|
|
|
for (i= 0, state= calc_state; i < key_parts; i++, state++)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
Do not consider prefixes containing a component that is only part
|
|
|
|
of the field. This limitation is set to avoid fetching data when
|
|
|
|
calculating the values of 'avg_frequency' for prefixes.
|
|
|
|
*/
|
|
|
|
if (!key_info->key_part[i].field->part_of_key.is_set(keyno))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!(state->last_prefix=
|
|
|
|
new Cached_item_field(key_info->key_part[i].field)))
|
|
|
|
break;
|
|
|
|
state->entry_count= state->prefix_count= 0;
|
|
|
|
prefixes++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-14 08:05:12 +01:00
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@breif
|
|
|
|
Change the elements of calc_state after reading the next index entry
|
|
|
|
|
|
|
|
@details
|
|
|
|
This function is to be called at the index scan each time the next
|
|
|
|
index entry has been read into the record buffer.
|
|
|
|
For each of the index prefixes the function checks whether nulls
|
|
|
|
are encountered in any of the k components of the prefix.
|
|
|
|
If this is not the case the value of calc_state[k-1].entry_count
|
|
|
|
is incremented by 1. Then the function checks whether the value of
|
|
|
|
any of these k components has changed. If so, the value of
|
|
|
|
calc_state[k-1].prefix_count is incremented by 1.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
void add()
|
|
|
|
{
|
|
|
|
uint i;
|
|
|
|
Prefix_calc_state *state;
|
|
|
|
uint first_changed= prefixes;
|
|
|
|
for (i= prefixes, state= calc_state+prefixes-1; i; i--, state--)
|
|
|
|
{
|
|
|
|
if (state->last_prefix->cmp())
|
|
|
|
first_changed= i-1;
|
|
|
|
}
|
|
|
|
if (empty)
|
|
|
|
{
|
|
|
|
first_changed= 0;
|
|
|
|
empty= FALSE;
|
|
|
|
}
|
|
|
|
for (i= 0, state= calc_state; i < prefixes; i++, state++)
|
|
|
|
{
|
|
|
|
if (state->last_prefix->null_value)
|
|
|
|
break;
|
|
|
|
if (i >= first_changed)
|
|
|
|
state->prefix_count++;
|
|
|
|
state->entry_count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
|
|
|
Calculate the values of avg_frequency for all prefixes of an index
|
|
|
|
|
|
|
|
@details
|
|
|
|
This function is to be called after the index scan to count the number
|
|
|
|
of distinct index prefixes has been done. The function calculates
|
|
|
|
the value of avg_frequency for the index prefix with k components
|
|
|
|
as calc_state[k-1].entry_count/calc_state[k-1].prefix_count.
|
|
|
|
If calc_state[k-1].prefix_count happens to be 0, the value of
|
|
|
|
avg_frequency[k-1] is set to 0, i.e. is considered as unknown.
|
|
|
|
*/
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
void get_avg_frequency()
|
|
|
|
{
|
|
|
|
uint i;
|
|
|
|
Prefix_calc_state *state;
|
2012-12-14 08:05:12 +01:00
|
|
|
|
|
|
|
if (is_single_comp_pk)
|
|
|
|
{
|
|
|
|
index_info->collected_stats->set_avg_frequency(0, 1.0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
for (i= 0, state= calc_state; i < prefixes; i++, state++)
|
|
|
|
{
|
|
|
|
if (i < prefixes)
|
|
|
|
{
|
2012-06-26 07:33:07 +02:00
|
|
|
double val= state->prefix_count == 0 ?
|
|
|
|
0 : (double) state->entry_count / state->prefix_count;
|
2012-07-27 02:50:08 +02:00
|
|
|
index_info->collected_stats->set_avg_frequency(i, val);
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2012-03-19 09:35:32 +01:00
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
2012-07-27 02:50:08 +02:00
|
|
|
Create fields for min/max values to collect column statistics
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
@param
|
|
|
|
table Table the fields are created for
|
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
@details
|
2012-01-05 02:51:53 +01:00
|
|
|
The function first allocates record buffers to store min/max values
|
|
|
|
for 'table's fields. Then for each table field f it creates Field structures
|
|
|
|
that points to these buffers rather that to the record buffer as the
|
|
|
|
Field object for f does. The pointers of the created fields are placed
|
2012-07-27 02:50:08 +02:00
|
|
|
in the collected_stats structure of the Field object for f.
|
|
|
|
The function allocates the buffers for min/max values in the table
|
|
|
|
memory.
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
@note
|
|
|
|
The buffers allocated when min/max values are used to read statistics
|
|
|
|
from the persistent statistical tables differ from those buffers that
|
2012-07-27 02:50:08 +02:00
|
|
|
are used when statistics on min/max values for column is collected
|
|
|
|
as they are allocated in different mem_roots.
|
2012-01-05 02:51:53 +01:00
|
|
|
The same is true for the fields created for min/max values.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static
|
2013-01-08 17:17:51 +01:00
|
|
|
void create_min_max_statistical_fields_for_table(TABLE *table)
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
|
|
|
uint rec_buff_length= table->s->rec_buff_length;
|
|
|
|
|
2013-01-08 17:17:51 +01:00
|
|
|
if ((table->collected_stats->min_max_record_buffers=
|
|
|
|
(uchar *) alloc_root(&table->mem_root, 2*rec_buff_length)))
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
2013-01-08 17:17:51 +01:00
|
|
|
uchar *record= table->collected_stats->min_max_record_buffers;
|
|
|
|
memset(record, 0, 2*rec_buff_length);
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
for (uint i=0; i < 2; i++, record+= rec_buff_length)
|
|
|
|
{
|
2013-01-08 17:17:51 +01:00
|
|
|
for (Field **field_ptr= table->field; *field_ptr; field_ptr++)
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
|
|
|
Field *fld;
|
2013-01-08 17:17:51 +01:00
|
|
|
Field *table_field= *field_ptr;
|
2012-01-05 02:51:53 +01:00
|
|
|
my_ptrdiff_t diff= record-table->record[0];
|
2012-07-27 02:50:08 +02:00
|
|
|
if (!bitmap_is_set(table->read_set, table_field->field_index))
|
|
|
|
continue;
|
2012-01-05 02:51:53 +01:00
|
|
|
if (!(fld= table_field->clone(&table->mem_root, table, diff, TRUE)))
|
|
|
|
continue;
|
|
|
|
if (i == 0)
|
2012-07-27 02:50:08 +02:00
|
|
|
table_field->collected_stats->min_value= fld;
|
2012-01-05 02:51:53 +01:00
|
|
|
else
|
2012-07-27 02:50:08 +02:00
|
|
|
table_field->collected_stats->max_value= fld;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Create fields for min/max values to read column statistics
|
|
|
|
|
|
|
|
@param
|
|
|
|
thd Thread handler
|
|
|
|
@param
|
|
|
|
table_share Table share the fields are created for
|
|
|
|
@param
|
|
|
|
is_safe TRUE <-> at any time only one thread can perform the function
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function first allocates record buffers to store min/max values
|
|
|
|
for 'table_share's fields. Then for each field f it creates Field structures
|
|
|
|
that points to these buffers rather that to the record buffer as the
|
|
|
|
Field object for f does. The pointers of the created fields are placed
|
|
|
|
in the read_stats structure of the Field object for f.
|
|
|
|
The function allocates the buffers for min/max values in the table share
|
|
|
|
memory.
|
|
|
|
If the parameter is_safe is TRUE then it is guaranteed that at any given time
|
|
|
|
only one thread is executed the code of the function.
|
|
|
|
|
|
|
|
@note
|
|
|
|
The buffers allocated when min/max values are used to collect statistics
|
|
|
|
from the persistent statistical tables differ from those buffers that
|
|
|
|
are used when statistics on min/max values for column is read as they
|
|
|
|
are allocated in different mem_roots.
|
|
|
|
The same is true for the fields created for min/max values.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static
|
2013-01-08 17:17:51 +01:00
|
|
|
void create_min_max_statistical_fields_for_table_share(THD *thd,
|
|
|
|
TABLE_SHARE *table_share)
|
2012-07-27 02:50:08 +02:00
|
|
|
{
|
2013-01-08 17:17:51 +01:00
|
|
|
TABLE_STATISTICS_CB *stats_cb= &table_share->stats_cb;
|
|
|
|
Table_statistics *stats= stats_cb->table_stats;
|
|
|
|
|
|
|
|
if (stats->min_max_record_buffers)
|
|
|
|
return;
|
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
uint rec_buff_length= table_share->rec_buff_length;
|
|
|
|
|
2013-01-08 17:17:51 +01:00
|
|
|
if ((stats->min_max_record_buffers=
|
|
|
|
(uchar *) alloc_root(&stats_cb->mem_root, 2*rec_buff_length)))
|
2012-07-27 02:50:08 +02:00
|
|
|
{
|
2013-01-08 17:17:51 +01:00
|
|
|
uchar *record= stats->min_max_record_buffers;
|
|
|
|
memset(record, 0, 2*rec_buff_length);
|
2012-07-27 02:50:08 +02:00
|
|
|
|
|
|
|
for (uint i=0; i < 2; i++, record+= rec_buff_length)
|
|
|
|
{
|
2013-01-08 17:17:51 +01:00
|
|
|
for (Field **field_ptr= table_share->field; *field_ptr; field_ptr++)
|
2012-07-27 02:50:08 +02:00
|
|
|
{
|
|
|
|
Field *fld;
|
2013-01-08 17:17:51 +01:00
|
|
|
Field *table_field= *field_ptr;
|
2012-07-27 02:50:08 +02:00
|
|
|
my_ptrdiff_t diff= record - table_share->default_values;
|
2013-01-08 17:17:51 +01:00
|
|
|
if (!(fld= table_field->clone(&stats_cb->mem_root, diff)))
|
2012-07-27 02:50:08 +02:00
|
|
|
continue;
|
2013-01-08 17:17:51 +01:00
|
|
|
if (i == 0)
|
|
|
|
table_field->read_stats->min_value= fld;
|
|
|
|
else
|
|
|
|
table_field->read_stats->max_value= fld;
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-01-08 17:17:51 +01:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
|
2012-03-19 09:35:32 +01:00
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Allocate memory for the table's statistical data to be collected
|
|
|
|
|
|
|
|
@param
|
|
|
|
table Table for which the memory for statistical data is allocated
|
|
|
|
|
|
|
|
@note
|
|
|
|
The function allocates the memory for the statistical data on 'table' with
|
|
|
|
the intention to collect the data there. The memory is allocated for
|
|
|
|
the statistics on the table, on the table's columns, and on the table's
|
|
|
|
indexes. The memory is allocated in the table's mem_root.
|
|
|
|
|
|
|
|
@retval
|
|
|
|
0 If the memory for all statistical data has been successfully allocated
|
|
|
|
@retval
|
|
|
|
1 Otherwise
|
|
|
|
|
|
|
|
@note
|
|
|
|
Each thread allocates its own memory to collect statistics on the table
|
|
|
|
It allows us, for example, to collect statistics on the different indexes
|
|
|
|
of the same table in parallel.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int alloc_statistics_for_table(THD* thd, TABLE *table)
|
|
|
|
{
|
|
|
|
Field **field_ptr;
|
2012-12-14 08:05:12 +01:00
|
|
|
uint fields;
|
2012-07-27 02:50:08 +02:00
|
|
|
|
|
|
|
DBUG_ENTER("alloc_statistics_for_table");
|
|
|
|
|
2013-01-08 17:17:51 +01:00
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
Table_statistics *table_stats=
|
|
|
|
(Table_statistics *) alloc_root(&table->mem_root,
|
|
|
|
sizeof(Table_statistics));
|
|
|
|
|
2012-12-14 08:05:12 +01:00
|
|
|
fields= table->s->fields ;
|
2012-07-27 02:50:08 +02:00
|
|
|
Column_statistics_collected *column_stats=
|
|
|
|
(Column_statistics_collected *) alloc_root(&table->mem_root,
|
2012-12-14 08:05:12 +01:00
|
|
|
sizeof(Column_statistics_collected) *
|
2013-01-08 17:17:51 +01:00
|
|
|
(fields+1));
|
2012-07-27 02:50:08 +02:00
|
|
|
|
|
|
|
uint keys= table->s->keys;
|
|
|
|
Index_statistics *index_stats=
|
|
|
|
(Index_statistics *) alloc_root(&table->mem_root,
|
|
|
|
sizeof(Index_statistics) * keys);
|
|
|
|
|
|
|
|
uint key_parts= table->s->ext_key_parts;
|
|
|
|
ulong *idx_avg_frequency= (ulong*) alloc_root(&table->mem_root,
|
|
|
|
sizeof(ulong) * key_parts);
|
|
|
|
|
2013-03-26 07:48:29 +01:00
|
|
|
uint columns= 0;
|
|
|
|
for (field_ptr= table->field; *field_ptr; field_ptr++)
|
|
|
|
{
|
|
|
|
if (bitmap_is_set(table->read_set, (*field_ptr)->field_index))
|
|
|
|
columns++;
|
|
|
|
}
|
|
|
|
uint hist_size= thd->variables.histogram_size;
|
2013-03-31 03:57:07 +02:00
|
|
|
Histogram_type hist_type= (Histogram_type) (thd->variables.histogram_type);
|
2013-03-26 07:48:29 +01:00
|
|
|
uchar *histogram= NULL;
|
|
|
|
if (hist_size > 0)
|
2019-08-13 18:29:59 +02:00
|
|
|
{
|
|
|
|
if ((histogram= (uchar *) alloc_root(&table->mem_root,
|
|
|
|
hist_size * columns)))
|
|
|
|
bzero(histogram, hist_size * columns);
|
|
|
|
|
|
|
|
}
|
2013-03-26 07:48:29 +01:00
|
|
|
|
|
|
|
if (!table_stats || !column_stats || !index_stats || !idx_avg_frequency ||
|
|
|
|
(hist_size && !histogram))
|
2012-07-27 02:50:08 +02:00
|
|
|
DBUG_RETURN(1);
|
|
|
|
|
|
|
|
table->collected_stats= table_stats;
|
|
|
|
table_stats->column_stats= column_stats;
|
|
|
|
table_stats->index_stats= index_stats;
|
|
|
|
table_stats->idx_avg_frequency= idx_avg_frequency;
|
2013-03-26 07:48:29 +01:00
|
|
|
table_stats->histograms= histogram;
|
2012-07-27 02:50:08 +02:00
|
|
|
|
2013-01-08 17:17:51 +01:00
|
|
|
memset(column_stats, 0, sizeof(Column_statistics) * (fields+1));
|
2012-07-27 02:50:08 +02:00
|
|
|
|
|
|
|
for (field_ptr= table->field; *field_ptr; field_ptr++, column_stats++)
|
2013-01-08 17:17:51 +01:00
|
|
|
{
|
2012-07-27 02:50:08 +02:00
|
|
|
(*field_ptr)->collected_stats= column_stats;
|
2013-01-08 17:17:51 +01:00
|
|
|
(*field_ptr)->collected_stats->max_value= NULL;
|
|
|
|
(*field_ptr)->collected_stats->min_value= NULL;
|
2013-03-26 07:48:29 +01:00
|
|
|
if (bitmap_is_set(table->read_set, (*field_ptr)->field_index))
|
|
|
|
{
|
|
|
|
column_stats->histogram.set_size(hist_size);
|
2013-03-31 03:57:07 +02:00
|
|
|
column_stats->histogram.set_type(hist_type);
|
2013-03-26 07:48:29 +01:00
|
|
|
column_stats->histogram.set_values(histogram);
|
|
|
|
histogram+= hist_size;
|
|
|
|
}
|
2013-01-08 17:17:51 +01:00
|
|
|
}
|
2012-07-27 02:50:08 +02:00
|
|
|
|
2012-07-27 21:05:23 +02:00
|
|
|
memset(idx_avg_frequency, 0, sizeof(ulong) * key_parts);
|
2012-07-27 02:50:08 +02:00
|
|
|
|
|
|
|
KEY *key_info, *end;
|
|
|
|
for (key_info= table->key_info, end= key_info + table->s->keys;
|
|
|
|
key_info < end;
|
|
|
|
key_info++, index_stats++)
|
|
|
|
{
|
|
|
|
key_info->collected_stats= index_stats;
|
|
|
|
key_info->collected_stats->init_avg_frequency(idx_avg_frequency);
|
|
|
|
idx_avg_frequency+= key_info->ext_key_parts;
|
|
|
|
}
|
|
|
|
|
2013-01-08 17:17:51 +01:00
|
|
|
create_min_max_statistical_fields_for_table(table);
|
2012-07-27 02:50:08 +02:00
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Allocate memory for the statistical data used by a table share
|
|
|
|
|
|
|
|
@param
|
|
|
|
thd Thread handler
|
|
|
|
@param
|
|
|
|
table_share Table share for which the memory for statistical data is allocated
|
|
|
|
|
|
|
|
@note
|
|
|
|
The function allocates the memory for the statistical data on a table in the
|
|
|
|
table's share memory with the intention to read the statistics there from
|
2012-12-05 09:31:05 +01:00
|
|
|
the system persistent statistical tables mysql.table_stat, mysql.column_stats,
|
|
|
|
mysql.index_stats. The memory is allocated for the statistics on the table,
|
2012-07-27 02:50:08 +02:00
|
|
|
on the tables's columns, and on the table's indexes. The memory is allocated
|
|
|
|
in the table_share's mem_root.
|
|
|
|
|
|
|
|
@retval
|
|
|
|
0 If the memory for all statistical data has been successfully allocated
|
|
|
|
@retval
|
|
|
|
1 Otherwise
|
|
|
|
|
|
|
|
@note
|
|
|
|
The situation when more than one thread try to allocate memory for
|
|
|
|
statistical data is rare. It happens under the following scenario:
|
|
|
|
1. One thread executes a query over table t with the system variable
|
|
|
|
'use_stat_tables' set to 'never'.
|
|
|
|
2. After this the second thread sets 'use_stat_tables' to 'preferably'
|
|
|
|
and executes a query over table t.
|
|
|
|
3. Simultaneously the third thread sets 'use_stat_tables' to 'preferably'
|
|
|
|
and executes a query over table t.
|
|
|
|
Here the second and the third threads try to allocate the memory for
|
|
|
|
statistical data at the same time. The precautions are taken to
|
|
|
|
guarantee the correctness of the allocation.
|
|
|
|
*/
|
|
|
|
|
2019-10-02 13:23:59 +02:00
|
|
|
static int alloc_statistics_for_table_share(THD* thd, TABLE_SHARE *table_share)
|
2012-07-27 02:50:08 +02:00
|
|
|
{
|
|
|
|
Field **field_ptr;
|
2013-01-08 17:17:51 +01:00
|
|
|
KEY *key_info, *end;
|
|
|
|
TABLE_STATISTICS_CB *stats_cb= &table_share->stats_cb;
|
2012-07-27 02:50:08 +02:00
|
|
|
|
2012-09-08 21:04:31 +02:00
|
|
|
DBUG_ENTER("alloc_statistics_for_table_share");
|
2012-07-27 02:50:08 +02:00
|
|
|
|
2013-01-08 17:17:51 +01:00
|
|
|
Table_statistics *table_stats= stats_cb->table_stats;
|
|
|
|
if (!table_stats)
|
|
|
|
{
|
|
|
|
table_stats= (Table_statistics *) alloc_root(&stats_cb->mem_root,
|
|
|
|
sizeof(Table_statistics));
|
|
|
|
if (!table_stats)
|
|
|
|
DBUG_RETURN(1);
|
|
|
|
memset(table_stats, 0, sizeof(Table_statistics));
|
|
|
|
stats_cb->table_stats= table_stats;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint fields= table_share->fields;
|
|
|
|
Column_statistics *column_stats= table_stats->column_stats;
|
|
|
|
if (!column_stats)
|
|
|
|
{
|
|
|
|
column_stats= (Column_statistics *) alloc_root(&stats_cb->mem_root,
|
|
|
|
sizeof(Column_statistics) *
|
|
|
|
(fields+1));
|
|
|
|
if (column_stats)
|
|
|
|
{
|
|
|
|
memset(column_stats, 0, sizeof(Column_statistics) * (fields+1));
|
|
|
|
table_stats->column_stats= column_stats;
|
|
|
|
for (field_ptr= table_share->field;
|
|
|
|
*field_ptr;
|
|
|
|
field_ptr++, column_stats++)
|
|
|
|
{
|
|
|
|
(*field_ptr)->read_stats= column_stats;
|
|
|
|
(*field_ptr)->read_stats->min_value= NULL;
|
|
|
|
(*field_ptr)->read_stats->max_value= NULL;
|
|
|
|
}
|
|
|
|
create_min_max_statistical_fields_for_table_share(thd, table_share);
|
|
|
|
}
|
|
|
|
}
|
2012-07-27 02:50:08 +02:00
|
|
|
|
|
|
|
uint keys= table_share->keys;
|
2013-01-08 17:17:51 +01:00
|
|
|
Index_statistics *index_stats= table_stats->index_stats;
|
2012-07-27 02:50:08 +02:00
|
|
|
if (!index_stats)
|
2013-01-08 17:17:51 +01:00
|
|
|
{
|
|
|
|
index_stats= (Index_statistics *) alloc_root(&stats_cb->mem_root,
|
|
|
|
sizeof(Index_statistics) *
|
|
|
|
keys);
|
|
|
|
if (index_stats)
|
|
|
|
{
|
|
|
|
table_stats->index_stats= index_stats;
|
|
|
|
for (key_info= table_share->key_info, end= key_info + keys;
|
|
|
|
key_info < end;
|
|
|
|
key_info++, index_stats++)
|
|
|
|
{
|
|
|
|
key_info->read_stats= index_stats;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-07-27 02:50:08 +02:00
|
|
|
|
|
|
|
uint key_parts= table_share->ext_key_parts;
|
2013-01-08 17:17:51 +01:00
|
|
|
ulong *idx_avg_frequency= table_stats->idx_avg_frequency;
|
2012-07-27 02:50:08 +02:00
|
|
|
if (!idx_avg_frequency)
|
|
|
|
{
|
2013-01-08 17:17:51 +01:00
|
|
|
idx_avg_frequency= (ulong*) alloc_root(&stats_cb->mem_root,
|
|
|
|
sizeof(ulong) * key_parts);
|
|
|
|
if (idx_avg_frequency)
|
|
|
|
{
|
|
|
|
memset(idx_avg_frequency, 0, sizeof(ulong) * key_parts);
|
|
|
|
table_stats->idx_avg_frequency= idx_avg_frequency;
|
|
|
|
for (key_info= table_share->key_info, end= key_info + keys;
|
|
|
|
key_info < end;
|
|
|
|
key_info++)
|
|
|
|
{
|
|
|
|
key_info->read_stats->init_avg_frequency(idx_avg_frequency);
|
|
|
|
idx_avg_frequency+= key_info->ext_key_parts;
|
|
|
|
}
|
|
|
|
}
|
2012-07-27 02:50:08 +02:00
|
|
|
}
|
2019-10-16 16:19:59 +02:00
|
|
|
DBUG_RETURN(column_stats && index_stats && idx_avg_frequency ? 0 : 1);
|
2012-07-27 02:50:08 +02:00
|
|
|
}
|
|
|
|
|
2013-04-16 07:43:07 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Allocate memory for the histogram used by a table share
|
|
|
|
|
|
|
|
@param
|
|
|
|
thd Thread handler
|
|
|
|
@param
|
|
|
|
table_share Table share for which the memory for histogram data is allocated
|
|
|
|
@param
|
|
|
|
is_safe TRUE <-> at any time only one thread can perform the function
|
|
|
|
|
|
|
|
@note
|
|
|
|
The function allocates the memory for the histogram built for a table in the
|
|
|
|
table's share memory with the intention to read the data there from the
|
|
|
|
system persistent statistical table mysql.column_stats,
|
|
|
|
The memory is allocated in the table_share's mem_root.
|
|
|
|
If the parameter is_safe is TRUE then it is guaranteed that at any given time
|
|
|
|
only one thread is executed the code of the function.
|
|
|
|
|
|
|
|
@retval
|
|
|
|
0 If the memory for all statistical data has been successfully allocated
|
|
|
|
@retval
|
|
|
|
1 Otherwise
|
|
|
|
|
|
|
|
@note
|
|
|
|
Currently the function always is called with the parameter is_safe set
|
|
|
|
to FALSE.
|
|
|
|
*/
|
|
|
|
|
2013-03-26 07:48:29 +01:00
|
|
|
static
|
|
|
|
int alloc_histograms_for_table_share(THD* thd, TABLE_SHARE *table_share,
|
|
|
|
bool is_safe)
|
|
|
|
{
|
|
|
|
TABLE_STATISTICS_CB *stats_cb= &table_share->stats_cb;
|
|
|
|
|
|
|
|
DBUG_ENTER("alloc_histograms_for_table_share");
|
|
|
|
|
|
|
|
if (!is_safe)
|
2013-07-21 16:39:19 +02:00
|
|
|
mysql_mutex_lock(&table_share->LOCK_share);
|
2013-03-26 07:48:29 +01:00
|
|
|
|
|
|
|
if (stats_cb->histograms_can_be_read)
|
|
|
|
{
|
|
|
|
if (!is_safe)
|
2013-07-21 16:39:19 +02:00
|
|
|
mysql_mutex_unlock(&table_share->LOCK_share);
|
2013-03-26 07:48:29 +01:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
Table_statistics *table_stats= stats_cb->table_stats;
|
|
|
|
ulong total_hist_size= table_stats->total_hist_size;
|
|
|
|
|
|
|
|
if (total_hist_size && !table_stats->histograms)
|
|
|
|
{
|
|
|
|
uchar *histograms= (uchar *) alloc_root(&stats_cb->mem_root,
|
|
|
|
total_hist_size);
|
|
|
|
if (!histograms)
|
|
|
|
{
|
|
|
|
if (!is_safe)
|
2013-07-21 16:39:19 +02:00
|
|
|
mysql_mutex_unlock(&table_share->LOCK_share);
|
2013-03-26 07:48:29 +01:00
|
|
|
DBUG_RETURN(1);
|
|
|
|
}
|
|
|
|
memset(histograms, 0, total_hist_size);
|
|
|
|
table_stats->histograms= histograms;
|
|
|
|
stats_cb->histograms_can_be_read= TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_safe)
|
2013-07-21 16:39:19 +02:00
|
|
|
mysql_mutex_unlock(&table_share->LOCK_share);
|
2013-03-26 07:48:29 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
|
|
|
|
}
|
2012-07-27 02:50:08 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Initialize the aggregation fields to collect statistics on a column
|
|
|
|
|
|
|
|
@param
|
|
|
|
thd Thread handler
|
|
|
|
@param
|
|
|
|
table_field Column to collect statistics for
|
|
|
|
*/
|
|
|
|
|
|
|
|
inline
|
|
|
|
void Column_statistics_collected::init(THD *thd, Field *table_field)
|
|
|
|
{
|
|
|
|
uint max_heap_table_size= thd->variables.max_heap_table_size;
|
2012-12-14 08:05:12 +01:00
|
|
|
TABLE *table= table_field->table;
|
|
|
|
uint pk= table->s->primary_key;
|
|
|
|
|
|
|
|
is_single_pk_col= FALSE;
|
2012-07-27 02:50:08 +02:00
|
|
|
|
2013-06-15 17:32:08 +02:00
|
|
|
if (pk != MAX_KEY && table->key_info[pk].user_defined_key_parts == 1 &&
|
2012-12-14 08:05:12 +01:00
|
|
|
table->key_info[pk].key_part[0].fieldnr == table_field->field_index + 1)
|
|
|
|
is_single_pk_col= TRUE;
|
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
column= table_field;
|
|
|
|
|
|
|
|
set_all_nulls();
|
|
|
|
|
|
|
|
nulls= 0;
|
|
|
|
column_total_length= 0;
|
2012-12-14 08:05:12 +01:00
|
|
|
if (is_single_pk_col)
|
|
|
|
count_distinct= NULL;
|
2012-07-27 02:50:08 +02:00
|
|
|
if (table_field->flags & BLOB_FLAG)
|
|
|
|
count_distinct= NULL;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
count_distinct=
|
|
|
|
table_field->type() == MYSQL_TYPE_BIT ?
|
|
|
|
new Count_distinct_field_bit(table_field, max_heap_table_size) :
|
|
|
|
new Count_distinct_field(table_field, max_heap_table_size);
|
|
|
|
}
|
|
|
|
if (count_distinct && !count_distinct->exists())
|
|
|
|
count_distinct= NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Perform aggregation for a row when collecting statistics on a column
|
|
|
|
|
|
|
|
@param
|
|
|
|
rowno The order number of the row
|
|
|
|
*/
|
|
|
|
|
|
|
|
inline
|
2014-08-04 21:36:02 +02:00
|
|
|
bool Column_statistics_collected::add(ha_rows rowno)
|
2012-07-27 02:50:08 +02:00
|
|
|
{
|
|
|
|
|
2014-08-04 21:36:02 +02:00
|
|
|
bool err= 0;
|
2012-07-27 02:50:08 +02:00
|
|
|
if (column->is_null())
|
|
|
|
nulls++;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
column_total_length+= column->value_length();
|
|
|
|
if (min_value && column->update_min(min_value, rowno == nulls))
|
|
|
|
set_not_null(COLUMN_STAT_MIN_VALUE);
|
|
|
|
if (max_value && column->update_max(max_value, rowno == nulls))
|
|
|
|
set_not_null(COLUMN_STAT_MAX_VALUE);
|
|
|
|
if (count_distinct)
|
2014-08-04 21:36:02 +02:00
|
|
|
err= count_distinct->add();
|
2012-07-27 02:50:08 +02:00
|
|
|
}
|
2014-08-04 21:36:02 +02:00
|
|
|
return err;
|
2012-07-27 02:50:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Get the results of aggregation when collecting the statistics on a column
|
|
|
|
|
|
|
|
@param
|
|
|
|
rows The total number of rows in the table
|
|
|
|
*/
|
|
|
|
|
|
|
|
inline
|
|
|
|
void Column_statistics_collected::finish(ha_rows rows)
|
|
|
|
{
|
|
|
|
double val;
|
|
|
|
|
|
|
|
if (rows)
|
|
|
|
{
|
|
|
|
val= (double) nulls / rows;
|
|
|
|
set_nulls_ratio(val);
|
|
|
|
set_not_null(COLUMN_STAT_NULLS_RATIO);
|
|
|
|
}
|
|
|
|
if (rows - nulls)
|
|
|
|
{
|
|
|
|
val= (double) column_total_length / (rows - nulls);
|
|
|
|
set_avg_length(val);
|
|
|
|
set_not_null(COLUMN_STAT_AVG_LENGTH);
|
|
|
|
}
|
|
|
|
if (count_distinct)
|
2013-03-26 07:48:29 +01:00
|
|
|
{
|
|
|
|
ulonglong distincts;
|
|
|
|
uint hist_size= count_distinct->get_hist_size();
|
|
|
|
if (hist_size == 0)
|
|
|
|
distincts= count_distinct->get_value();
|
|
|
|
else
|
|
|
|
distincts= count_distinct->get_value_with_histogram(rows - nulls);
|
2012-07-27 02:50:08 +02:00
|
|
|
if (distincts)
|
|
|
|
{
|
|
|
|
val= (double) (rows - nulls) / distincts;
|
|
|
|
set_avg_frequency(val);
|
|
|
|
set_not_null(COLUMN_STAT_AVG_FREQUENCY);
|
|
|
|
}
|
2013-03-26 07:48:29 +01:00
|
|
|
else
|
|
|
|
hist_size= 0;
|
|
|
|
histogram.set_size(hist_size);
|
|
|
|
set_not_null(COLUMN_STAT_HIST_SIZE);
|
|
|
|
if (hist_size && distincts)
|
|
|
|
{
|
2013-03-31 03:57:07 +02:00
|
|
|
set_not_null(COLUMN_STAT_HIST_TYPE);
|
2013-03-26 07:48:29 +01:00
|
|
|
histogram.set_values(count_distinct->get_histogram());
|
|
|
|
set_not_null(COLUMN_STAT_HISTOGRAM);
|
|
|
|
}
|
2012-07-27 02:50:08 +02:00
|
|
|
delete count_distinct;
|
|
|
|
count_distinct= NULL;
|
|
|
|
}
|
2012-12-14 08:05:12 +01:00
|
|
|
else if (is_single_pk_col)
|
|
|
|
{
|
|
|
|
val= 1.0;
|
|
|
|
set_avg_frequency(val);
|
|
|
|
set_not_null(COLUMN_STAT_AVG_FREQUENCY);
|
|
|
|
}
|
2012-07-27 02:50:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-27 23:19:25 +02:00
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Clean up auxiliary structures used for aggregation
|
|
|
|
*/
|
|
|
|
|
|
|
|
inline
|
|
|
|
void Column_statistics_collected::cleanup()
|
|
|
|
{
|
|
|
|
if (count_distinct)
|
|
|
|
{
|
|
|
|
delete count_distinct;
|
|
|
|
count_distinct= NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-03-19 09:35:32 +01:00
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
|
|
|
Collect statistical data on an index
|
|
|
|
|
|
|
|
@param
|
|
|
|
table The table the index belongs to
|
|
|
|
index The number of this index in the table
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function collects the value of 'avg_frequency' for the prefixes
|
|
|
|
on an index from 'table'. The index is specified by its number.
|
|
|
|
If the scan is successful the calculated statistics is saved in the
|
|
|
|
elements of the array write_stat.avg_frequency of the KEY_INFO structure
|
|
|
|
for the index. The statistics for the prefix with k components is saved
|
|
|
|
in the element number k-1.
|
|
|
|
|
|
|
|
@retval
|
|
|
|
0 If the statistics has been successfully collected
|
|
|
|
@retval
|
|
|
|
1 Otherwise
|
|
|
|
|
|
|
|
@note
|
|
|
|
The function collects statistics for the index prefixes for one index
|
|
|
|
scan during which no data is fetched from the table records. That's why
|
|
|
|
statistical data for prefixes that contain part of a field is not
|
|
|
|
collected.
|
|
|
|
The function employs an object of the helper class Index_prefix_calc to
|
|
|
|
count for each index prefix the number of index entries without nulls and
|
|
|
|
the number of distinct entries among them.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
static
|
2012-08-18 20:49:14 +02:00
|
|
|
int collect_statistics_for_index(THD *thd, TABLE *table, uint index)
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
|
|
|
int rc= 0;
|
|
|
|
KEY *key_info= &table->key_info[index];
|
|
|
|
ha_rows rows= 0;
|
2015-01-17 17:58:10 +01:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
DBUG_ENTER("collect_statistics_for_index");
|
|
|
|
|
2015-01-17 17:58:10 +01:00
|
|
|
/* No statistics for FULLTEXT indexes. */
|
2018-04-17 00:44:46 +02:00
|
|
|
if (key_info->flags & (HA_FULLTEXT|HA_SPATIAL))
|
2015-01-17 17:58:10 +01:00
|
|
|
DBUG_RETURN(rc);
|
|
|
|
|
|
|
|
Index_prefix_calc index_prefix_calc(table, key_info);
|
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
DEBUG_SYNC(table->in_use, "statistics_collection_start1");
|
|
|
|
DEBUG_SYNC(table->in_use, "statistics_collection_start2");
|
|
|
|
|
2012-12-14 08:05:12 +01:00
|
|
|
if (index_prefix_calc.is_single_comp_pk)
|
|
|
|
{
|
|
|
|
index_prefix_calc.get_avg_frequency();
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
}
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
table->key_read= 1;
|
|
|
|
table->file->extra(HA_EXTRA_KEYREAD);
|
|
|
|
|
|
|
|
table->file->ha_index_init(index, TRUE);
|
|
|
|
rc= table->file->ha_index_first(table->record[0]);
|
|
|
|
while (rc != HA_ERR_END_OF_FILE)
|
|
|
|
{
|
2012-08-18 20:49:14 +02:00
|
|
|
if (thd->killed)
|
|
|
|
break;
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
if (rc)
|
|
|
|
break;
|
|
|
|
rows++;
|
|
|
|
index_prefix_calc.add();
|
|
|
|
rc= table->file->ha_index_next(table->record[0]);
|
|
|
|
}
|
|
|
|
table->key_read= 0;
|
|
|
|
table->file->ha_index_end();
|
|
|
|
|
2012-08-18 20:49:14 +02:00
|
|
|
rc= (rc == HA_ERR_END_OF_FILE && !thd->killed) ? 0 : 1;
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
if (!rc)
|
|
|
|
index_prefix_calc.get_avg_frequency();
|
|
|
|
|
2015-01-17 17:58:10 +01:00
|
|
|
DBUG_RETURN(rc);
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
|
2012-03-19 09:35:32 +01:00
|
|
|
|
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
|
|
|
Collect statistical data for a table
|
|
|
|
|
|
|
|
@param
|
|
|
|
thd The thread handle
|
|
|
|
@param
|
|
|
|
table The table to collect statistics on
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function collects data for various statistical characteristics on
|
|
|
|
the table 'table'. These data is saved in the internal fields that could
|
|
|
|
be reached from 'table'. The data is prepared to be saved in the persistent
|
|
|
|
statistical table by the function update_statistics_for_table.
|
|
|
|
The collected statistical values are not placed in the same fields that
|
|
|
|
keep the statistical data used by the optimizer. Therefore, at any time,
|
|
|
|
there is no collision between the statistics being collected and the one
|
|
|
|
used by the optimizer to look for optimal query execution plans for other
|
|
|
|
clients.
|
|
|
|
|
|
|
|
@retval
|
|
|
|
0 If the statistics has been successfully collected
|
|
|
|
@retval
|
|
|
|
1 Otherwise
|
|
|
|
|
|
|
|
@note
|
|
|
|
The function first collects statistical data for statistical characteristics
|
2012-12-05 09:31:05 +01:00
|
|
|
to be saved in the statistical tables table_stat and column_stats. To do this
|
2012-01-05 02:51:53 +01:00
|
|
|
it performs a full table scan of 'table'. At this scan the function collects
|
|
|
|
statistics on each column of the table and count the total number of the
|
|
|
|
scanned rows. To calculate the value of 'avg_frequency' for a column the
|
|
|
|
function constructs an object of the helper class Count_distinct_field
|
|
|
|
(or its derivation). Currently this class cannot count the number of
|
|
|
|
distinct values for blob columns. So the value of 'avg_frequency' for
|
|
|
|
blob columns is always null.
|
|
|
|
After the full table scan the function calls collect_statistics_for_index
|
|
|
|
for each table index. The latter performs full index scan for each index.
|
|
|
|
|
|
|
|
@note
|
|
|
|
Currently the statistical data is collected indiscriminately for all
|
|
|
|
columns/indexes of 'table', for all statistical characteristics.
|
|
|
|
TODO. Collect only specified statistical characteristics for specified
|
|
|
|
columns/indexes.
|
|
|
|
|
|
|
|
@note
|
|
|
|
Currently the process of collecting statistical data is not optimized.
|
|
|
|
For example, 'avg_frequency' for a column could be copied from the
|
|
|
|
'avg_frequency' collected for an index if this column is used as the
|
|
|
|
first component of the index. Min and min values for this column could
|
|
|
|
be extracted from the index as well.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int collect_statistics_for_table(THD *thd, TABLE *table)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
Field **field_ptr;
|
|
|
|
Field *table_field;
|
|
|
|
ha_rows rows= 0;
|
2012-07-11 01:34:39 +02:00
|
|
|
handler *file=table->file;
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
DBUG_ENTER("collect_statistics_for_table");
|
|
|
|
|
2012-07-27 02:50:08 +02:00
|
|
|
table->collected_stats->cardinality_is_null= TRUE;
|
|
|
|
table->collected_stats->cardinality= 0;
|
2012-12-14 08:05:12 +01:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
for (field_ptr= table->field; *field_ptr; field_ptr++)
|
|
|
|
{
|
2012-07-27 02:50:08 +02:00
|
|
|
table_field= *field_ptr;
|
2012-05-23 05:55:07 +02:00
|
|
|
if (!bitmap_is_set(table->read_set, table_field->field_index))
|
|
|
|
continue;
|
2012-07-27 02:50:08 +02:00
|
|
|
table_field->collected_stats->init(thd, table_field);
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
|
2014-11-18 22:25:27 +01:00
|
|
|
restore_record(table, s->default_values);
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
/* Perform a full table scan to collect statistics on 'table's columns */
|
2012-05-07 07:42:14 +02:00
|
|
|
if (!(rc= file->ha_rnd_init(TRUE)))
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
2012-12-10 06:33:08 +01:00
|
|
|
DEBUG_SYNC(table->in_use, "statistics_collection_start");
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
while ((rc= file->ha_rnd_next(table->record[0])) != HA_ERR_END_OF_FILE)
|
|
|
|
{
|
2012-08-18 20:49:14 +02:00
|
|
|
if (thd->killed)
|
|
|
|
break;
|
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
if (rc)
|
2012-08-27 23:19:25 +02:00
|
|
|
{
|
|
|
|
if (rc == HA_ERR_RECORD_DELETED)
|
|
|
|
continue;
|
2012-01-05 02:51:53 +01:00
|
|
|
break;
|
2012-08-27 23:19:25 +02:00
|
|
|
}
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
for (field_ptr= table->field; *field_ptr; field_ptr++)
|
|
|
|
{
|
|
|
|
table_field= *field_ptr;
|
2012-05-23 05:55:07 +02:00
|
|
|
if (!bitmap_is_set(table->read_set, table_field->field_index))
|
2012-07-27 02:50:08 +02:00
|
|
|
continue;
|
2014-08-04 21:36:02 +02:00
|
|
|
if ((rc= table_field->collected_stats->add(rows)))
|
|
|
|
break;
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
2014-08-04 21:36:02 +02:00
|
|
|
if (rc)
|
|
|
|
break;
|
2012-01-05 02:51:53 +01:00
|
|
|
rows++;
|
|
|
|
}
|
|
|
|
file->ha_rnd_end();
|
|
|
|
}
|
2012-08-18 20:49:14 +02:00
|
|
|
rc= (rc == HA_ERR_END_OF_FILE && !thd->killed) ? 0 : 1;
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
Calculate values for all statistical characteristics on columns and
|
|
|
|
and for each field f of 'table' save them in the write_stat structure
|
|
|
|
from the Field object for f.
|
|
|
|
*/
|
|
|
|
if (!rc)
|
|
|
|
{
|
2012-07-27 02:50:08 +02:00
|
|
|
table->collected_stats->cardinality_is_null= FALSE;
|
|
|
|
table->collected_stats->cardinality= rows;
|
2012-08-27 23:19:25 +02:00
|
|
|
}
|
2012-01-05 02:51:53 +01:00
|
|
|
|
2013-03-26 07:48:29 +01:00
|
|
|
bitmap_clear_all(table->write_set);
|
2012-08-27 23:19:25 +02:00
|
|
|
for (field_ptr= table->field; *field_ptr; field_ptr++)
|
|
|
|
{
|
|
|
|
table_field= *field_ptr;
|
|
|
|
if (!bitmap_is_set(table->read_set, table_field->field_index))
|
|
|
|
continue;
|
2013-03-26 07:48:29 +01:00
|
|
|
bitmap_set_bit(table->write_set, table_field->field_index);
|
2012-08-27 23:19:25 +02:00
|
|
|
if (!rc)
|
2012-07-27 02:50:08 +02:00
|
|
|
table_field->collected_stats->finish(rows);
|
2012-08-27 23:19:25 +02:00
|
|
|
else
|
|
|
|
table_field->collected_stats->cleanup();
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
2014-08-04 21:36:02 +02:00
|
|
|
bitmap_clear_all(table->write_set);
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
if (!rc)
|
|
|
|
{
|
2012-05-23 05:55:07 +02:00
|
|
|
uint key;
|
|
|
|
key_map::Iterator it(table->keys_in_use_for_query);
|
2012-07-27 02:50:08 +02:00
|
|
|
|
|
|
|
MY_BITMAP *save_read_set= table->read_set;
|
|
|
|
table->read_set= &table->tmp_set;
|
|
|
|
bitmap_set_all(table->read_set);
|
2012-05-23 05:55:07 +02:00
|
|
|
|
2012-01-05 02:51:53 +01:00
|
|
|
/* Collect statistics for indexes */
|
2012-05-23 05:55:07 +02:00
|
|
|
while ((key= it++) != key_map::Iterator::BITMAP_END)
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
2012-08-18 20:49:14 +02:00
|
|
|
if ((rc= collect_statistics_for_index(thd, table, key)))
|
2012-01-05 02:51:53 +01:00
|
|
|
break;
|
|
|
|
}
|
2012-07-27 02:50:08 +02:00
|
|
|
|
|
|
|
table->read_set= save_read_set;
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-03-19 09:35:32 +01:00
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
|
|
|
Update statistics for a table in the persistent statistical tables
|
|
|
|
|
|
|
|
@param
|
|
|
|
thd The thread handle
|
|
|
|
@param
|
|
|
|
table The table to collect statistics on
|
|
|
|
|
|
|
|
@details
|
|
|
|
For each statistical table st the function looks for the rows from this
|
|
|
|
table that contain statistical data on 'table'. If rows with given
|
2012-07-11 01:34:39 +02:00
|
|
|
statistical characteristics exist they are updated with the new statistical
|
2012-01-05 02:51:53 +01:00
|
|
|
values taken from internal structures for 'table'. Otherwise new rows
|
|
|
|
with these statistical characteristics are added into st.
|
|
|
|
It is assumed that values stored in the statistical tables are found and
|
|
|
|
saved by the function collect_statistics_for_table.
|
|
|
|
|
|
|
|
@retval
|
|
|
|
0 If all statistical tables has been successfully updated
|
|
|
|
@retval
|
|
|
|
1 Otherwise
|
|
|
|
|
|
|
|
@note
|
|
|
|
The function is called when executing the ANALYZE actions for 'table'.
|
|
|
|
The function first unlocks the opened table the statistics on which has
|
|
|
|
been collected, but does not closes it, so all collected statistical data
|
|
|
|
remains in internal structures for 'table'. Then the function opens the
|
|
|
|
statistical tables and writes the statistical data for 'table'into them.
|
|
|
|
It is not allowed just to open statistical tables for writing when some
|
|
|
|
other tables are locked for reading.
|
|
|
|
After the statistical tables have been opened they are updated one by one
|
|
|
|
with the new statistics on 'table'. Objects of the helper classes
|
|
|
|
Table_stat, Column_stat and Index_stat are employed for this.
|
|
|
|
After having been updated the statistical system tables are closed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int update_statistics_for_table(THD *thd, TABLE *table)
|
|
|
|
{
|
|
|
|
TABLE_LIST tables[STATISTICS_TABLES];
|
2012-03-19 09:35:32 +01:00
|
|
|
Open_tables_backup open_tables_backup;
|
2012-01-05 02:51:53 +01:00
|
|
|
uint i;
|
|
|
|
int err;
|
2012-12-17 06:33:17 +01:00
|
|
|
enum_binlog_format save_binlog_format;
|
2012-01-05 02:51:53 +01:00
|
|
|
int rc= 0;
|
|
|
|
TABLE *stat_table;
|
|
|
|
|
|
|
|
DBUG_ENTER("update_statistics_for_table");
|
|
|
|
|
2012-12-13 08:16:54 +01:00
|
|
|
DEBUG_SYNC(thd, "statistics_update_start");
|
2012-01-05 02:51:53 +01:00
|
|
|
|
2012-12-13 08:16:54 +01:00
|
|
|
if (open_stat_tables(thd, tables, &open_tables_backup, TRUE))
|
2012-07-11 01:34:39 +02:00
|
|
|
DBUG_RETURN(rc);
|
2012-01-05 02:51:53 +01:00
|
|
|
|
2012-12-17 06:33:17 +01:00
|
|
|
save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
|
2012-09-11 07:22:57 +02:00
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
/* Update the statistical table table_stats */
|
2012-01-05 02:51:53 +01:00
|
|
|
stat_table= tables[TABLE_STAT].table;
|
|
|
|
Table_stat table_stat(stat_table, table);
|
|
|
|
restore_record(stat_table, s->default_values);
|
|
|
|
table_stat.set_key_fields();
|
|
|
|
err= table_stat.update_stat();
|
|
|
|
if (err)
|
|
|
|
rc= 1;
|
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
/* Update the statistical table colum_stats */
|
2012-01-05 02:51:53 +01:00
|
|
|
stat_table= tables[COLUMN_STAT].table;
|
|
|
|
Column_stat column_stat(stat_table, table);
|
|
|
|
for (Field **field_ptr= table->field; *field_ptr; field_ptr++)
|
|
|
|
{
|
|
|
|
Field *table_field= *field_ptr;
|
2012-05-23 05:55:07 +02:00
|
|
|
if (!bitmap_is_set(table->read_set, table_field->field_index))
|
|
|
|
continue;
|
2012-01-05 02:51:53 +01:00
|
|
|
restore_record(stat_table, s->default_values);
|
|
|
|
column_stat.set_key_fields(table_field);
|
|
|
|
err= column_stat.update_stat();
|
2012-07-11 01:34:39 +02:00
|
|
|
if (err && !rc)
|
2012-01-05 02:51:53 +01:00
|
|
|
rc= 1;
|
|
|
|
}
|
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
/* Update the statistical table index_stats */
|
2012-01-05 02:51:53 +01:00
|
|
|
stat_table= tables[INDEX_STAT].table;
|
2012-05-23 05:55:07 +02:00
|
|
|
uint key;
|
|
|
|
key_map::Iterator it(table->keys_in_use_for_query);
|
2012-01-05 02:51:53 +01:00
|
|
|
Index_stat index_stat(stat_table, table);
|
|
|
|
|
2012-05-23 05:55:07 +02:00
|
|
|
while ((key= it++) != key_map::Iterator::BITMAP_END)
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
2012-05-23 05:55:07 +02:00
|
|
|
KEY *key_info= table->key_info+key;
|
2012-05-07 07:42:14 +02:00
|
|
|
uint key_parts= table->actual_n_key_parts(key_info);
|
2012-01-05 02:51:53 +01:00
|
|
|
for (i= 0; i < key_parts; i++)
|
|
|
|
{
|
|
|
|
restore_record(stat_table, s->default_values);
|
|
|
|
index_stat.set_key_fields(key_info, i+1);
|
|
|
|
err= index_stat.update_stat();
|
2012-07-11 01:34:39 +02:00
|
|
|
if (err && !rc)
|
2012-01-05 02:51:53 +01:00
|
|
|
rc= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-17 06:33:17 +01:00
|
|
|
thd->restore_stmt_binlog_format(save_binlog_format);
|
2012-09-11 07:22:57 +02:00
|
|
|
|
2012-03-19 09:35:32 +01:00
|
|
|
close_system_tables(thd, &open_tables_backup);
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-03-19 09:35:32 +01:00
|
|
|
/**
|
2012-01-05 02:51:53 +01:00
|
|
|
@brief
|
|
|
|
Read statistics for a table from the persistent statistical tables
|
|
|
|
|
|
|
|
@param
|
|
|
|
thd The thread handle
|
|
|
|
@param
|
|
|
|
table The table to read statistics on
|
2012-09-08 21:04:31 +02:00
|
|
|
@param
|
|
|
|
stat_tables The array of TABLE_LIST objects for statistical tables
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
@details
|
|
|
|
For each statistical table the function looks for the rows from this
|
|
|
|
table that contain statistical data on 'table'. If such rows is found
|
|
|
|
the data from statistical columns of it is read into the appropriate
|
|
|
|
fields of internal structures for 'table'. Later at the query processing
|
|
|
|
this data are supposed to be used by the optimizer.
|
2012-09-08 21:04:31 +02:00
|
|
|
The parameter stat_tables should point to an array of TABLE_LIST
|
|
|
|
objects for all statistical tables linked into a list. All statistical
|
|
|
|
tables are supposed to be opened.
|
2012-12-13 08:16:54 +01:00
|
|
|
The function is called by read_statistics_for_tables_if_needed().
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
@retval
|
2012-09-08 21:04:31 +02:00
|
|
|
0 If data has been successfully read for the table
|
2012-01-05 02:51:53 +01:00
|
|
|
@retval
|
|
|
|
1 Otherwise
|
|
|
|
|
|
|
|
@note
|
2012-09-08 21:04:31 +02:00
|
|
|
Objects of the helper classes Table_stat, Column_stat and Index_stat
|
2012-01-05 02:51:53 +01:00
|
|
|
are employed to read statistical data from the statistical tables.
|
|
|
|
now.
|
|
|
|
*/
|
|
|
|
|
2012-09-08 21:04:31 +02:00
|
|
|
static
|
|
|
|
int read_statistics_for_table(THD *thd, TABLE *table, TABLE_LIST *stat_tables)
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
|
|
|
uint i;
|
|
|
|
TABLE *stat_table;
|
|
|
|
Field *table_field;
|
|
|
|
Field **field_ptr;
|
|
|
|
KEY *key_info, *key_info_end;
|
2012-07-27 02:50:08 +02:00
|
|
|
TABLE_SHARE *table_share= table->s;
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
DBUG_ENTER("read_statistics_for_table");
|
2019-10-16 16:19:59 +02:00
|
|
|
DEBUG_SYNC(thd, "statistics_mem_alloc_start1");
|
|
|
|
DEBUG_SYNC(thd, "statistics_mem_alloc_start2");
|
|
|
|
|
|
|
|
if (!table_share->stats_cb.start_stats_load())
|
|
|
|
DBUG_RETURN(table_share->stats_cb.stats_are_ready() ? 0 : 1);
|
|
|
|
|
|
|
|
if (alloc_statistics_for_table_share(thd, table_share))
|
|
|
|
{
|
|
|
|
table_share->stats_cb.abort_stats_load();
|
|
|
|
DBUG_RETURN(1);
|
|
|
|
}
|
2012-01-05 02:51:53 +01:00
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
/* Read statistics from the statistical table table_stats */
|
2019-10-16 16:19:59 +02:00
|
|
|
Table_statistics *read_stats= table_share->stats_cb.table_stats;
|
2012-09-08 21:04:31 +02:00
|
|
|
stat_table= stat_tables[TABLE_STAT].table;
|
2012-01-05 02:51:53 +01:00
|
|
|
Table_stat table_stat(stat_table, table);
|
|
|
|
table_stat.set_key_fields();
|
|
|
|
table_stat.get_stat_values();
|
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
/* Read statistics from the statistical table column_stats */
|
2012-09-08 21:04:31 +02:00
|
|
|
stat_table= stat_tables[COLUMN_STAT].table;
|
2013-03-26 07:48:29 +01:00
|
|
|
ulong total_hist_size= 0;
|
2012-01-05 02:51:53 +01:00
|
|
|
Column_stat column_stat(stat_table, table);
|
2012-07-27 02:50:08 +02:00
|
|
|
for (field_ptr= table_share->field; *field_ptr; field_ptr++)
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
|
|
|
table_field= *field_ptr;
|
|
|
|
column_stat.set_key_fields(table_field);
|
|
|
|
column_stat.get_stat_values();
|
2013-03-26 07:48:29 +01:00
|
|
|
total_hist_size+= table_field->read_stats->histogram.get_size();
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
2013-03-26 07:48:29 +01:00
|
|
|
read_stats->total_hist_size= total_hist_size;
|
2012-01-05 02:51:53 +01:00
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
/* Read statistics from the statistical table index_stats */
|
2012-09-08 21:04:31 +02:00
|
|
|
stat_table= stat_tables[INDEX_STAT].table;
|
2012-01-05 02:51:53 +01:00
|
|
|
Index_stat index_stat(stat_table, table);
|
2012-07-27 02:50:08 +02:00
|
|
|
for (key_info= table_share->key_info,
|
|
|
|
key_info_end= key_info + table_share->keys;
|
2012-01-05 02:51:53 +01:00
|
|
|
key_info < key_info_end; key_info++)
|
|
|
|
{
|
2012-07-27 02:50:08 +02:00
|
|
|
uint key_parts= key_info->ext_key_parts;
|
2012-05-07 07:42:14 +02:00
|
|
|
for (i= 0; i < key_parts; i++)
|
2012-01-05 02:51:53 +01:00
|
|
|
{
|
|
|
|
index_stat.set_key_fields(key_info, i+1);
|
|
|
|
index_stat.get_stat_values();
|
|
|
|
}
|
2012-04-12 02:14:06 +02:00
|
|
|
|
|
|
|
key_part_map ext_key_part_map= key_info->ext_key_part_map;
|
2013-05-21 21:00:08 +02:00
|
|
|
if (key_info->user_defined_key_parts != key_info->ext_key_parts &&
|
|
|
|
key_info->read_stats->get_avg_frequency(key_info->user_defined_key_parts) == 0)
|
2012-04-12 02:14:06 +02:00
|
|
|
{
|
2012-07-27 02:50:08 +02:00
|
|
|
KEY *pk_key_info= table_share->key_info + table_share->primary_key;
|
2013-05-21 21:00:08 +02:00
|
|
|
uint k= key_info->user_defined_key_parts;
|
|
|
|
uint pk_parts= pk_key_info->user_defined_key_parts;
|
2013-01-08 17:17:51 +01:00
|
|
|
ha_rows n_rows= read_stats->cardinality;
|
2012-07-27 02:50:08 +02:00
|
|
|
double k_dist= n_rows / key_info->read_stats->get_avg_frequency(k-1);
|
2012-05-07 07:42:14 +02:00
|
|
|
uint m= 0;
|
2012-04-12 02:14:06 +02:00
|
|
|
for (uint j= 0; j < pk_parts; j++)
|
|
|
|
{
|
|
|
|
if (!(ext_key_part_map & 1 << j))
|
2012-05-07 07:42:14 +02:00
|
|
|
{
|
2012-06-26 07:33:07 +02:00
|
|
|
for (uint l= k; l < k + m; l++)
|
2012-05-07 07:42:14 +02:00
|
|
|
{
|
2012-07-27 02:50:08 +02:00
|
|
|
double avg_frequency=
|
|
|
|
pk_key_info->read_stats->get_avg_frequency(j-1);
|
2012-05-07 07:42:14 +02:00
|
|
|
set_if_smaller(avg_frequency, 1);
|
2012-07-27 02:50:08 +02:00
|
|
|
double val= pk_key_info->read_stats->get_avg_frequency(j) /
|
2012-06-26 07:33:07 +02:00
|
|
|
avg_frequency;
|
2012-07-27 02:50:08 +02:00
|
|
|
key_info->read_stats->set_avg_frequency (l, val);
|
2012-05-07 07:42:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2012-07-27 02:50:08 +02:00
|
|
|
double avg_frequency= pk_key_info->read_stats->get_avg_frequency(j);
|
|
|
|
key_info->read_stats->set_avg_frequency(k + m, avg_frequency);
|
2012-06-26 07:33:07 +02:00
|
|
|
m++;
|
2012-05-07 07:42:14 +02:00
|
|
|
}
|
|
|
|
}
|
2012-06-26 07:33:07 +02:00
|
|
|
for (uint l= k; l < k + m; l++)
|
2012-05-07 07:42:14 +02:00
|
|
|
{
|
2012-07-27 02:50:08 +02:00
|
|
|
double avg_frequency= key_info->read_stats->get_avg_frequency(l);
|
2013-01-08 17:17:51 +01:00
|
|
|
if (avg_frequency == 0 || read_stats->cardinality_is_null)
|
2012-04-12 02:14:06 +02:00
|
|
|
avg_frequency= 1;
|
|
|
|
else if (avg_frequency > 1)
|
2012-05-07 07:42:14 +02:00
|
|
|
{
|
|
|
|
avg_frequency/= k_dist;
|
|
|
|
set_if_bigger(avg_frequency, 1);
|
|
|
|
}
|
2012-07-27 02:50:08 +02:00
|
|
|
key_info->read_stats->set_avg_frequency(l, avg_frequency);
|
2012-04-12 02:14:06 +02:00
|
|
|
}
|
|
|
|
}
|
2012-01-05 02:51:53 +01:00
|
|
|
}
|
2019-10-16 16:19:59 +02:00
|
|
|
table_share->stats_cb.end_stats_load();
|
2012-09-08 21:04:31 +02:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-07-16 01:24:24 +02:00
|
|
|
/**
|
|
|
|
@breif
|
|
|
|
Cleanup of min/max statistical values for table share
|
|
|
|
*/
|
|
|
|
|
|
|
|
void delete_stat_values_for_table_share(TABLE_SHARE *table_share)
|
|
|
|
{
|
|
|
|
TABLE_STATISTICS_CB *stats_cb= &table_share->stats_cb;
|
|
|
|
Table_statistics *table_stats= stats_cb->table_stats;
|
|
|
|
if (!table_stats)
|
|
|
|
return;
|
|
|
|
Column_statistics *column_stats= table_stats->column_stats;
|
|
|
|
if (!column_stats)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (Field **field_ptr= table_share->field;
|
|
|
|
*field_ptr;
|
|
|
|
field_ptr++, column_stats++)
|
|
|
|
{
|
|
|
|
if (column_stats->min_value)
|
|
|
|
{
|
|
|
|
delete column_stats->min_value;
|
|
|
|
column_stats->min_value= NULL;
|
|
|
|
}
|
|
|
|
if (column_stats->max_value)
|
|
|
|
{
|
|
|
|
delete column_stats->max_value;
|
|
|
|
column_stats->max_value= NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-04-16 07:43:07 +02:00
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Read histogram for a table from the persistent statistical tables
|
|
|
|
|
|
|
|
@param
|
|
|
|
thd The thread handle
|
|
|
|
@param
|
|
|
|
table The table to read histograms for
|
|
|
|
@param
|
|
|
|
stat_tables The array of TABLE_LIST objects for statistical tables
|
|
|
|
|
|
|
|
@details
|
|
|
|
For the statistical table columns_stats the function looks for the rows
|
|
|
|
from this table that contain statistical data on 'table'. If such rows
|
|
|
|
are found the histograms from them are read into the memory allocated
|
|
|
|
for histograms of 'table'. Later at the query processing these histogram
|
|
|
|
are supposed to be used by the optimizer.
|
|
|
|
The parameter stat_tables should point to an array of TABLE_LIST
|
|
|
|
objects for all statistical tables linked into a list. All statistical
|
|
|
|
tables are supposed to be opened.
|
|
|
|
The function is called by read_statistics_for_tables_if_needed().
|
|
|
|
|
|
|
|
@retval
|
|
|
|
0 If data has been successfully read for the table
|
|
|
|
@retval
|
|
|
|
1 Otherwise
|
|
|
|
|
|
|
|
@note
|
|
|
|
Objects of the helper Column_stat are employed read histogram
|
|
|
|
from the statistical table column_stats now.
|
|
|
|
*/
|
|
|
|
|
2013-03-26 07:48:29 +01:00
|
|
|
static
|
|
|
|
int read_histograms_for_table(THD *thd, TABLE *table, TABLE_LIST *stat_tables)
|
|
|
|
{
|
|
|
|
TABLE_SHARE *table_share= table->s;
|
|
|
|
|
|
|
|
DBUG_ENTER("read_histograms_for_table");
|
|
|
|
|
|
|
|
if (!table_share->stats_cb.histograms_can_be_read)
|
|
|
|
{
|
|
|
|
(void) alloc_histograms_for_table_share(thd, table_share, FALSE);
|
|
|
|
}
|
|
|
|
if (table_share->stats_cb.histograms_can_be_read &&
|
|
|
|
!table_share->stats_cb.histograms_are_read)
|
|
|
|
{
|
|
|
|
Field **field_ptr;
|
|
|
|
uchar *histogram= table_share->stats_cb.table_stats->histograms;
|
|
|
|
TABLE *stat_table= stat_tables[COLUMN_STAT].table;
|
|
|
|
Column_stat column_stat(stat_table, table);
|
|
|
|
for (field_ptr= table_share->field; *field_ptr; field_ptr++)
|
|
|
|
{
|
|
|
|
Field *table_field= *field_ptr;
|
|
|
|
uint hist_size= table_field->read_stats->histogram.get_size();
|
|
|
|
if (hist_size)
|
|
|
|
{
|
|
|
|
column_stat.set_key_fields(table_field);
|
|
|
|
table_field->read_stats->histogram.set_values(histogram);
|
|
|
|
column_stat.get_histogram_value();
|
|
|
|
histogram+= hist_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
2012-09-08 21:04:31 +02:00
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Read statistics for tables from a table list if it is needed
|
|
|
|
|
|
|
|
@param
|
|
|
|
thd The thread handle
|
|
|
|
@param
|
|
|
|
tables The tables list for whose tables to read statistics
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function first checks whether for any of the tables opened and locked
|
|
|
|
for a statement statistics from statistical tables is needed to be read.
|
|
|
|
Then, if so, it opens system statistical tables for read and reads
|
|
|
|
the statistical data from them for those tables from the list for which it
|
|
|
|
makes sense. Then the function closes system statistical tables.
|
|
|
|
|
|
|
|
@retval
|
|
|
|
0 Statistics for tables was successfully read
|
|
|
|
@retval
|
|
|
|
1 Otherwise
|
|
|
|
*/
|
|
|
|
|
|
|
|
int read_statistics_for_tables_if_needed(THD *thd, TABLE_LIST *tables)
|
2019-10-02 14:04:52 +02:00
|
|
|
{
|
|
|
|
switch (thd->lex->sql_command) {
|
|
|
|
case SQLCOM_SELECT:
|
|
|
|
case SQLCOM_INSERT:
|
|
|
|
case SQLCOM_INSERT_SELECT:
|
|
|
|
case SQLCOM_UPDATE:
|
|
|
|
case SQLCOM_UPDATE_MULTI:
|
|
|
|
case SQLCOM_DELETE:
|
|
|
|
case SQLCOM_DELETE_MULTI:
|
|
|
|
case SQLCOM_REPLACE:
|
|
|
|
case SQLCOM_REPLACE_SELECT:
|
|
|
|
case SQLCOM_CREATE_TABLE:
|
|
|
|
case SQLCOM_SET_OPTION:
|
|
|
|
case SQLCOM_DO:
|
|
|
|
return read_statistics_for_tables(thd, tables);
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-10-16 16:19:59 +02:00
|
|
|
static void dump_stats_from_share_to_table(TABLE *table)
|
|
|
|
{
|
|
|
|
TABLE_SHARE *table_share= table->s;
|
|
|
|
KEY *key_info= table_share->key_info;
|
|
|
|
KEY *key_info_end= key_info + table_share->keys;
|
|
|
|
KEY *table_key_info= table->key_info;
|
|
|
|
for ( ; key_info < key_info_end; key_info++, table_key_info++)
|
|
|
|
table_key_info->read_stats= key_info->read_stats;
|
|
|
|
|
|
|
|
Field **field_ptr= table_share->field;
|
|
|
|
Field **table_field_ptr= table->field;
|
|
|
|
for ( ; *field_ptr; field_ptr++, table_field_ptr++)
|
|
|
|
(*table_field_ptr)->read_stats= (*field_ptr)->read_stats;
|
|
|
|
table->stats_is_read= true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-10-02 14:04:52 +02:00
|
|
|
int read_statistics_for_tables(THD *thd, TABLE_LIST *tables)
|
2012-09-08 21:04:31 +02:00
|
|
|
{
|
|
|
|
TABLE_LIST stat_tables[STATISTICS_TABLES];
|
|
|
|
Open_tables_backup open_tables_backup;
|
|
|
|
|
2019-10-02 14:04:52 +02:00
|
|
|
DBUG_ENTER("read_statistics_for_tables");
|
|
|
|
|
|
|
|
if (thd->bootstrap || thd->variables.use_stat_tables == NEVER)
|
|
|
|
DBUG_RETURN(0);
|
2012-09-08 21:04:31 +02:00
|
|
|
|
2019-10-11 15:20:28 +02:00
|
|
|
bool found_stat_table= false;
|
|
|
|
bool statistics_for_tables_is_needed= false;
|
|
|
|
|
2019-10-02 13:23:59 +02:00
|
|
|
for (TABLE_LIST *tl= tables; tl; tl= tl->next_global)
|
|
|
|
{
|
2019-10-11 15:20:28 +02:00
|
|
|
TABLE_SHARE *table_share;
|
|
|
|
if (!tl->is_view_or_derived() && tl->table && (table_share= tl->table->s) &&
|
|
|
|
table_share->tmp_table == NO_TMP_TABLE)
|
2019-10-02 13:23:59 +02:00
|
|
|
{
|
2019-10-11 15:20:28 +02:00
|
|
|
if (table_share->table_category == TABLE_CATEGORY_USER)
|
2019-10-02 13:23:59 +02:00
|
|
|
{
|
2019-10-16 16:19:59 +02:00
|
|
|
if (table_share->stats_cb.stats_are_ready())
|
2019-10-02 13:23:59 +02:00
|
|
|
{
|
2019-10-16 16:19:59 +02:00
|
|
|
if (!tl->table->stats_is_read)
|
|
|
|
dump_stats_from_share_to_table(tl->table);
|
|
|
|
tl->table->histograms_are_read=
|
|
|
|
table_share->stats_cb.histograms_are_read;
|
|
|
|
if (table_share->stats_cb.histograms_are_read ||
|
|
|
|
thd->variables.optimizer_use_condition_selectivity <= 3)
|
|
|
|
continue;
|
2019-10-02 13:23:59 +02:00
|
|
|
}
|
2019-10-16 16:19:59 +02:00
|
|
|
statistics_for_tables_is_needed= true;
|
2019-10-02 13:23:59 +02:00
|
|
|
}
|
2019-10-11 15:20:28 +02:00
|
|
|
else if (is_stat_table(tl->db, tl->alias))
|
|
|
|
found_stat_table= true;
|
2019-10-02 13:23:59 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-13 08:16:54 +01:00
|
|
|
DEBUG_SYNC(thd, "statistics_read_start");
|
|
|
|
|
2019-10-11 15:20:28 +02:00
|
|
|
/*
|
|
|
|
Do not read statistics for any query that explicity involves
|
|
|
|
statistical tables, failure to to do so we may end up
|
|
|
|
in a deadlock.
|
|
|
|
*/
|
|
|
|
if (found_stat_table || !statistics_for_tables_is_needed)
|
2012-09-08 21:04:31 +02:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
|
2012-12-13 08:16:54 +01:00
|
|
|
if (open_stat_tables(thd, stat_tables, &open_tables_backup, FALSE))
|
2012-09-08 21:04:31 +02:00
|
|
|
DBUG_RETURN(1);
|
|
|
|
|
|
|
|
for (TABLE_LIST *tl= tables; tl; tl= tl->next_global)
|
|
|
|
{
|
2019-10-11 15:20:28 +02:00
|
|
|
TABLE_SHARE *table_share;
|
|
|
|
if (!tl->is_view_or_derived() && tl->table && (table_share= tl->table->s) &&
|
|
|
|
table_share->tmp_table == NO_TMP_TABLE &&
|
|
|
|
table_share->table_category == TABLE_CATEGORY_USER)
|
|
|
|
{
|
2019-10-16 16:19:59 +02:00
|
|
|
if (!tl->table->stats_is_read)
|
2012-09-08 21:04:31 +02:00
|
|
|
{
|
2019-10-16 16:19:59 +02:00
|
|
|
if (!read_statistics_for_table(thd, tl->table, stat_tables))
|
|
|
|
dump_stats_from_share_to_table(tl->table);
|
|
|
|
else
|
|
|
|
continue;
|
2012-09-08 21:04:31 +02:00
|
|
|
}
|
2019-10-11 15:20:28 +02:00
|
|
|
if (thd->variables.optimizer_use_condition_selectivity > 3 &&
|
2019-04-30 22:14:45 +02:00
|
|
|
!table_share->stats_cb.histograms_are_read)
|
2013-03-26 07:48:29 +01:00
|
|
|
{
|
|
|
|
(void) read_histograms_for_table(thd, tl->table, stat_tables);
|
|
|
|
table_share->stats_cb.histograms_are_read= TRUE;
|
|
|
|
}
|
2019-04-30 22:14:45 +02:00
|
|
|
if (table_share->stats_cb.histograms_are_read)
|
2013-03-26 07:48:29 +01:00
|
|
|
tl->table->histograms_are_read= TRUE;
|
2012-09-08 21:04:31 +02:00
|
|
|
}
|
2019-10-11 15:20:28 +02:00
|
|
|
}
|
2012-09-08 21:04:31 +02:00
|
|
|
|
2012-03-19 09:35:32 +01:00
|
|
|
close_system_tables(thd, &open_tables_backup);
|
2012-01-05 02:51:53 +01:00
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
2012-03-19 09:35:32 +01:00
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Delete statistics on a table from all statistical tables
|
|
|
|
|
|
|
|
@param
|
|
|
|
thd The thread handle
|
|
|
|
@param
|
|
|
|
db The name of the database the table belongs to
|
|
|
|
@param
|
|
|
|
tab The name of the table whose statistics is to be deleted
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function delete statistics on the table called 'tab' of the database
|
2012-12-05 09:31:05 +01:00
|
|
|
'db' from all statistical tables: table_stats, column_stats, index_stats.
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
@retval
|
|
|
|
0 If all deletions are successful
|
|
|
|
@retval
|
|
|
|
1 Otherwise
|
|
|
|
|
|
|
|
@note
|
|
|
|
The function is called when executing the statement DROP TABLE 'tab'.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int delete_statistics_for_table(THD *thd, LEX_STRING *db, LEX_STRING *tab)
|
|
|
|
{
|
|
|
|
int err;
|
2012-12-17 06:33:17 +01:00
|
|
|
enum_binlog_format save_binlog_format;
|
2012-07-11 01:34:39 +02:00
|
|
|
TABLE *stat_table;
|
|
|
|
TABLE_LIST tables[STATISTICS_TABLES];
|
|
|
|
Open_tables_backup open_tables_backup;
|
|
|
|
int rc= 0;
|
|
|
|
|
|
|
|
DBUG_ENTER("delete_statistics_for_table");
|
|
|
|
|
2012-12-13 08:16:54 +01:00
|
|
|
if (open_stat_tables(thd, tables, &open_tables_backup, TRUE))
|
2012-07-11 01:34:39 +02:00
|
|
|
DBUG_RETURN(rc);
|
|
|
|
|
2012-12-17 06:33:17 +01:00
|
|
|
save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
|
2012-09-11 07:22:57 +02:00
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
/* Delete statistics on table from the statistical table index_stats */
|
2012-07-11 01:34:39 +02:00
|
|
|
stat_table= tables[INDEX_STAT].table;
|
|
|
|
Index_stat index_stat(stat_table, db, tab);
|
|
|
|
index_stat.set_full_table_name();
|
|
|
|
while (index_stat.find_next_stat_for_prefix(2))
|
|
|
|
{
|
|
|
|
err= index_stat.delete_stat();
|
|
|
|
if (err & !rc)
|
|
|
|
rc= 1;
|
|
|
|
}
|
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
/* Delete statistics on table from the statistical table column_stats */
|
2012-07-11 01:34:39 +02:00
|
|
|
stat_table= tables[COLUMN_STAT].table;
|
|
|
|
Column_stat column_stat(stat_table, db, tab);
|
|
|
|
column_stat.set_full_table_name();
|
|
|
|
while (column_stat.find_next_stat_for_prefix(2))
|
|
|
|
{
|
|
|
|
err= column_stat.delete_stat();
|
|
|
|
if (err & !rc)
|
|
|
|
rc= 1;
|
|
|
|
}
|
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
/* Delete statistics on table from the statistical table table_stats */
|
2012-07-11 01:34:39 +02:00
|
|
|
stat_table= tables[TABLE_STAT].table;
|
|
|
|
Table_stat table_stat(stat_table, db, tab);
|
|
|
|
table_stat.set_key_fields();
|
|
|
|
if (table_stat.find_stat())
|
|
|
|
{
|
|
|
|
err= table_stat.delete_stat();
|
|
|
|
if (err & !rc)
|
|
|
|
rc= 1;
|
|
|
|
}
|
|
|
|
|
2016-06-20 08:58:31 +02:00
|
|
|
err= del_global_table_stat(thd, db, tab);
|
|
|
|
if (err & !rc)
|
|
|
|
rc= 1;
|
|
|
|
|
2012-12-17 06:33:17 +01:00
|
|
|
thd->restore_stmt_binlog_format(save_binlog_format);
|
2012-09-11 07:22:57 +02:00
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
close_system_tables(thd, &open_tables_backup);
|
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Delete statistics on a column of the specified table
|
|
|
|
|
|
|
|
@param
|
|
|
|
thd The thread handle
|
|
|
|
@param
|
|
|
|
tab The table the column belongs to
|
|
|
|
@param
|
|
|
|
col The field of the column whose statistics is to be deleted
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function delete statistics on the column 'col' belonging to the table
|
2012-12-05 09:31:05 +01:00
|
|
|
'tab' from the statistical table column_stats.
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
@retval
|
|
|
|
0 If the deletion is successful
|
|
|
|
@retval
|
|
|
|
1 Otherwise
|
|
|
|
|
|
|
|
@note
|
|
|
|
The function is called when dropping a table column or when changing
|
|
|
|
the definition of this column.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int delete_statistics_for_column(THD *thd, TABLE *tab, Field *col)
|
|
|
|
{
|
|
|
|
int err;
|
2012-12-17 06:33:17 +01:00
|
|
|
enum_binlog_format save_binlog_format;
|
2012-07-11 01:34:39 +02:00
|
|
|
TABLE *stat_table;
|
|
|
|
TABLE_LIST tables;
|
|
|
|
Open_tables_backup open_tables_backup;
|
|
|
|
int rc= 0;
|
|
|
|
|
|
|
|
DBUG_ENTER("delete_statistics_for_column");
|
|
|
|
|
2012-12-13 08:16:54 +01:00
|
|
|
if (open_single_stat_table(thd, &tables, &stat_table_name[1],
|
|
|
|
&open_tables_backup, TRUE))
|
2012-07-11 01:34:39 +02:00
|
|
|
{
|
|
|
|
thd->clear_error();
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
}
|
|
|
|
|
2012-12-17 06:33:17 +01:00
|
|
|
save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
|
2012-09-11 07:22:57 +02:00
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
stat_table= tables.table;
|
|
|
|
Column_stat column_stat(stat_table, tab);
|
|
|
|
column_stat.set_key_fields(col);
|
|
|
|
if (column_stat.find_stat())
|
|
|
|
{
|
|
|
|
err= column_stat.delete_stat();
|
|
|
|
if (err)
|
|
|
|
rc= 1;
|
|
|
|
}
|
|
|
|
|
2012-12-17 06:33:17 +01:00
|
|
|
thd->restore_stmt_binlog_format(save_binlog_format);
|
2012-09-11 07:22:57 +02:00
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
close_system_tables(thd, &open_tables_backup);
|
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Delete statistics on an index of the specified table
|
|
|
|
|
|
|
|
@param
|
|
|
|
thd The thread handle
|
|
|
|
@param
|
|
|
|
tab The table the index belongs to
|
|
|
|
@param
|
|
|
|
key_info The descriptor of the index whose statistics is to be deleted
|
2012-12-09 00:38:15 +01:00
|
|
|
@param
|
|
|
|
ext_prefixes_only Delete statistics only on the index prefixes extended by
|
|
|
|
the components of the primary key
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
@details
|
|
|
|
The function delete statistics on the index specified by 'key_info'
|
2012-12-05 09:31:05 +01:00
|
|
|
defined on the table 'tab' from the statistical table index_stats.
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
@retval
|
|
|
|
0 If the deletion is successful
|
|
|
|
@retval
|
|
|
|
1 Otherwise
|
|
|
|
|
|
|
|
@note
|
|
|
|
The function is called when dropping an index, or dropping/changing the
|
|
|
|
definition of a column used in the definition of the index.
|
|
|
|
*/
|
|
|
|
|
2012-12-09 00:38:15 +01:00
|
|
|
int delete_statistics_for_index(THD *thd, TABLE *tab, KEY *key_info,
|
|
|
|
bool ext_prefixes_only)
|
2012-07-11 01:34:39 +02:00
|
|
|
{
|
|
|
|
int err;
|
2012-12-17 06:33:17 +01:00
|
|
|
enum_binlog_format save_binlog_format;
|
2012-07-11 01:34:39 +02:00
|
|
|
TABLE *stat_table;
|
|
|
|
TABLE_LIST tables;
|
|
|
|
Open_tables_backup open_tables_backup;
|
|
|
|
int rc= 0;
|
|
|
|
|
|
|
|
DBUG_ENTER("delete_statistics_for_index");
|
|
|
|
|
2012-12-13 08:16:54 +01:00
|
|
|
if (open_single_stat_table(thd, &tables, &stat_table_name[2],
|
|
|
|
&open_tables_backup, TRUE))
|
2012-07-11 01:34:39 +02:00
|
|
|
{
|
|
|
|
thd->clear_error();
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
}
|
|
|
|
|
2012-12-17 06:33:17 +01:00
|
|
|
save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
|
2012-09-11 07:22:57 +02:00
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
stat_table= tables.table;
|
|
|
|
Index_stat index_stat(stat_table, tab);
|
2012-12-09 00:38:15 +01:00
|
|
|
if (!ext_prefixes_only)
|
2012-07-11 01:34:39 +02:00
|
|
|
{
|
2012-12-09 00:38:15 +01:00
|
|
|
index_stat.set_index_prefix_key_fields(key_info);
|
|
|
|
while (index_stat.find_next_stat_for_prefix(3))
|
|
|
|
{
|
|
|
|
err= index_stat.delete_stat();
|
|
|
|
if (err && !rc)
|
|
|
|
rc= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2013-05-21 21:00:08 +02:00
|
|
|
for (uint i= key_info->user_defined_key_parts; i < key_info->ext_key_parts; i++)
|
2012-12-09 00:38:15 +01:00
|
|
|
{
|
|
|
|
index_stat.set_key_fields(key_info, i+1);
|
|
|
|
if (index_stat.find_next_stat_for_prefix(4))
|
|
|
|
{
|
|
|
|
err= index_stat.delete_stat();
|
|
|
|
if (err && !rc)
|
|
|
|
rc= 1;
|
|
|
|
}
|
|
|
|
}
|
2012-07-11 01:34:39 +02:00
|
|
|
}
|
2012-12-17 06:33:17 +01:00
|
|
|
|
2016-06-20 08:58:31 +02:00
|
|
|
err= del_global_index_stat(thd, tab, key_info);
|
|
|
|
if (err && !rc)
|
|
|
|
rc= 1;
|
|
|
|
|
2012-12-17 06:33:17 +01:00
|
|
|
thd->restore_stmt_binlog_format(save_binlog_format);
|
2012-09-11 07:22:57 +02:00
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
close_system_tables(thd, &open_tables_backup);
|
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Rename a table in all statistical tables
|
|
|
|
|
|
|
|
@param
|
|
|
|
thd The thread handle
|
|
|
|
@param
|
|
|
|
db The name of the database the table belongs to
|
|
|
|
@param
|
|
|
|
tab The name of the table to be renamed in statistical tables
|
|
|
|
@param
|
|
|
|
new_tab The new name of the table
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function replaces the name of the table 'tab' from the database 'db'
|
2012-12-05 09:31:05 +01:00
|
|
|
for 'new_tab' in all all statistical tables: table_stats, column_stats,
|
|
|
|
index_stats.
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
@retval
|
|
|
|
0 If all updates of the table name are successful
|
|
|
|
@retval
|
|
|
|
1 Otherwise
|
|
|
|
|
|
|
|
@note
|
|
|
|
The function is called when executing any statement that renames a table
|
|
|
|
*/
|
|
|
|
|
|
|
|
int rename_table_in_stat_tables(THD *thd, LEX_STRING *db, LEX_STRING *tab,
|
|
|
|
LEX_STRING *new_db, LEX_STRING *new_tab)
|
|
|
|
{
|
|
|
|
int err;
|
2012-12-17 06:33:17 +01:00
|
|
|
enum_binlog_format save_binlog_format;
|
2012-07-11 01:34:39 +02:00
|
|
|
TABLE *stat_table;
|
|
|
|
TABLE_LIST tables[STATISTICS_TABLES];
|
|
|
|
Open_tables_backup open_tables_backup;
|
|
|
|
int rc= 0;
|
|
|
|
|
|
|
|
DBUG_ENTER("rename_table_in_stat_tables");
|
|
|
|
|
2012-12-13 08:16:54 +01:00
|
|
|
if (open_stat_tables(thd, tables, &open_tables_backup, TRUE))
|
2016-12-09 14:13:43 +01:00
|
|
|
DBUG_RETURN(0); // not an error
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-12-17 06:33:17 +01:00
|
|
|
save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
|
2012-09-11 07:22:57 +02:00
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
/* Rename table in the statistical table index_stats */
|
2012-07-11 01:34:39 +02:00
|
|
|
stat_table= tables[INDEX_STAT].table;
|
|
|
|
Index_stat index_stat(stat_table, db, tab);
|
|
|
|
index_stat.set_full_table_name();
|
2015-11-18 19:31:45 +01:00
|
|
|
|
|
|
|
Stat_table_write_iter index_iter(&index_stat);
|
|
|
|
if (index_iter.init(2))
|
|
|
|
rc= 1;
|
|
|
|
while (!index_iter.get_next_row())
|
2012-07-11 01:34:39 +02:00
|
|
|
{
|
|
|
|
err= index_stat.update_table_name_key_parts(new_db, new_tab);
|
|
|
|
if (err & !rc)
|
|
|
|
rc= 1;
|
|
|
|
index_stat.set_full_table_name();
|
|
|
|
}
|
2015-11-18 19:31:45 +01:00
|
|
|
index_iter.cleanup();
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
/* Rename table in the statistical table column_stats */
|
2012-07-11 01:34:39 +02:00
|
|
|
stat_table= tables[COLUMN_STAT].table;
|
|
|
|
Column_stat column_stat(stat_table, db, tab);
|
|
|
|
column_stat.set_full_table_name();
|
2015-11-18 19:31:45 +01:00
|
|
|
Stat_table_write_iter column_iter(&column_stat);
|
|
|
|
if (column_iter.init(2))
|
|
|
|
rc= 1;
|
|
|
|
while (!column_iter.get_next_row())
|
2012-07-11 01:34:39 +02:00
|
|
|
{
|
|
|
|
err= column_stat.update_table_name_key_parts(new_db, new_tab);
|
|
|
|
if (err & !rc)
|
|
|
|
rc= 1;
|
|
|
|
column_stat.set_full_table_name();
|
|
|
|
}
|
2015-11-18 19:31:45 +01:00
|
|
|
column_iter.cleanup();
|
2012-07-11 01:34:39 +02:00
|
|
|
|
2012-12-05 09:31:05 +01:00
|
|
|
/* Rename table in the statistical table table_stats */
|
2012-07-11 01:34:39 +02:00
|
|
|
stat_table= tables[TABLE_STAT].table;
|
|
|
|
Table_stat table_stat(stat_table, db, tab);
|
|
|
|
table_stat.set_key_fields();
|
|
|
|
if (table_stat.find_stat())
|
|
|
|
{
|
|
|
|
err= table_stat.update_table_name_key_parts(new_db, new_tab);
|
|
|
|
if (err & !rc)
|
|
|
|
rc= 1;
|
|
|
|
}
|
|
|
|
|
2012-12-17 06:33:17 +01:00
|
|
|
thd->restore_stmt_binlog_format(save_binlog_format);
|
2012-09-11 07:22:57 +02:00
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
close_system_tables(thd, &open_tables_backup);
|
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
2012-12-05 09:31:05 +01:00
|
|
|
Rename a column in the statistical table column_stats
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
@param
|
|
|
|
thd The thread handle
|
|
|
|
@param
|
|
|
|
tab The table the column belongs to
|
|
|
|
@param
|
|
|
|
col The column to be renamed
|
|
|
|
@param
|
|
|
|
new_name The new column name
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function replaces the name of the column 'col' belonging to the table
|
2012-12-05 09:31:05 +01:00
|
|
|
'tab' for 'new_name' in the statistical table column_stats.
|
2012-07-11 01:34:39 +02:00
|
|
|
|
|
|
|
@retval
|
|
|
|
0 If all updates of the table name are successful
|
|
|
|
@retval
|
|
|
|
1 Otherwise
|
|
|
|
|
|
|
|
@note
|
|
|
|
The function is called when executing any statement that renames a column,
|
|
|
|
but does not change the column definition.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int rename_column_in_stat_tables(THD *thd, TABLE *tab, Field *col,
|
|
|
|
const char *new_name)
|
|
|
|
{
|
|
|
|
int err;
|
2012-12-17 06:33:17 +01:00
|
|
|
enum_binlog_format save_binlog_format;
|
2012-07-11 01:34:39 +02:00
|
|
|
TABLE *stat_table;
|
|
|
|
TABLE_LIST tables;
|
|
|
|
Open_tables_backup open_tables_backup;
|
|
|
|
int rc= 0;
|
|
|
|
|
|
|
|
DBUG_ENTER("rename_column_in_stat_tables");
|
2013-07-09 22:36:53 +02:00
|
|
|
|
|
|
|
if (tab->s->tmp_table != NO_TMP_TABLE)
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
|
2012-12-13 08:16:54 +01:00
|
|
|
if (open_single_stat_table(thd, &tables, &stat_table_name[1],
|
|
|
|
&open_tables_backup, TRUE))
|
2012-07-11 01:34:39 +02:00
|
|
|
{
|
|
|
|
thd->clear_error();
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
}
|
|
|
|
|
2012-12-17 06:33:17 +01:00
|
|
|
save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
|
2012-09-11 07:22:57 +02:00
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
/* Rename column in the statistical table table_stat */
|
|
|
|
stat_table= tables.table;
|
|
|
|
Column_stat column_stat(stat_table, tab);
|
|
|
|
column_stat.set_key_fields(col);
|
|
|
|
if (column_stat.find_stat())
|
|
|
|
{
|
|
|
|
err= column_stat.update_column_key_part(new_name);
|
|
|
|
if (err & !rc)
|
|
|
|
rc= 1;
|
|
|
|
}
|
2012-09-11 07:22:57 +02:00
|
|
|
|
2012-12-17 06:33:17 +01:00
|
|
|
thd->restore_stmt_binlog_format(save_binlog_format);
|
2012-09-11 07:22:57 +02:00
|
|
|
|
2012-07-11 01:34:39 +02:00
|
|
|
close_system_tables(thd, &open_tables_backup);
|
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
}
|
|
|
|
|
2012-04-12 02:14:06 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Set statistics for a table that will be used by the optimizer
|
|
|
|
|
|
|
|
@param
|
|
|
|
thd The thread handle
|
|
|
|
@param
|
|
|
|
table The table to set statistics for
|
|
|
|
|
|
|
|
@details
|
2012-06-03 02:19:01 +02:00
|
|
|
Depending on the value of thd->variables.use_stat_tables
|
2012-04-12 02:14:06 +02:00
|
|
|
the function performs the settings for the table that will control
|
|
|
|
from where the statistical data used by the optimizer will be taken.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void set_statistics_for_table(THD *thd, TABLE *table)
|
|
|
|
{
|
2013-01-08 17:17:51 +01:00
|
|
|
TABLE_STATISTICS_CB *stats_cb= &table->s->stats_cb;
|
|
|
|
Table_statistics *read_stats= stats_cb->table_stats;
|
2012-12-14 08:05:12 +01:00
|
|
|
Use_stat_tables_mode use_stat_table_mode= get_use_stat_tables_mode(thd);
|
2012-04-12 02:14:06 +02:00
|
|
|
table->used_stat_records=
|
2012-12-14 08:05:12 +01:00
|
|
|
(use_stat_table_mode <= COMPLEMENTARY ||
|
2013-01-13 09:40:38 +01:00
|
|
|
!table->stats_is_read || read_stats->cardinality_is_null) ?
|
2013-01-08 17:17:51 +01:00
|
|
|
table->file->stats.records : read_stats->cardinality;
|
2018-12-06 21:42:22 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
For partitioned table, EITS statistics is based on data from all partitions.
|
|
|
|
|
|
|
|
On the other hand, Partition Pruning figures which partitions will be
|
|
|
|
accessed and then computes the estimate of rows in used_partitions.
|
|
|
|
|
|
|
|
Use the estimate from Partition Pruning as it is typically more precise.
|
|
|
|
Ideally, EITS should provide per-partition statistics but this is not
|
|
|
|
implemented currently.
|
|
|
|
*/
|
2018-12-13 11:15:18 +01:00
|
|
|
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
2018-12-06 21:42:22 +01:00
|
|
|
if (table->part_info)
|
|
|
|
table->used_stat_records= table->file->stats.records;
|
2018-12-13 11:15:18 +01:00
|
|
|
#endif
|
2018-12-06 21:42:22 +01:00
|
|
|
|
2012-04-12 02:14:06 +02:00
|
|
|
KEY *key_info, *key_info_end;
|
|
|
|
for (key_info= table->key_info, key_info_end= key_info+table->s->keys;
|
|
|
|
key_info < key_info_end; key_info++)
|
|
|
|
{
|
|
|
|
key_info->is_statistics_from_stat_tables=
|
2012-12-14 08:05:12 +01:00
|
|
|
(use_stat_table_mode > COMPLEMENTARY &&
|
2013-01-13 09:40:38 +01:00
|
|
|
table->stats_is_read &&
|
2012-07-27 02:50:08 +02:00
|
|
|
key_info->read_stats->avg_frequency_is_inited() &&
|
|
|
|
key_info->read_stats->get_avg_frequency(0) > 0.5);
|
2012-04-12 02:14:06 +02:00
|
|
|
}
|
|
|
|
}
|
2013-03-11 15:44:24 +01:00
|
|
|
|
|
|
|
|
2013-04-16 07:43:07 +02:00
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Get the average frequency for a column
|
|
|
|
|
|
|
|
@param
|
|
|
|
field The column whose average frequency is required
|
|
|
|
|
|
|
|
@retval
|
|
|
|
The required average frequency
|
|
|
|
*/
|
|
|
|
|
2013-03-11 15:44:24 +01:00
|
|
|
double get_column_avg_frequency(Field * field)
|
|
|
|
{
|
|
|
|
double res;
|
|
|
|
TABLE *table= field->table;
|
2013-03-30 23:37:21 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
Statistics is shared by table instances and is accessed through
|
|
|
|
the table share. If table->s->field is not set for 'table', then
|
|
|
|
no column statistics is available for the table .
|
|
|
|
*/
|
|
|
|
if (!table->s->field)
|
|
|
|
{
|
|
|
|
res= table->stat_records();
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2013-03-11 15:44:24 +01:00
|
|
|
Column_statistics *col_stats= table->s->field[field->field_index]->read_stats;
|
|
|
|
|
|
|
|
if (!col_stats)
|
|
|
|
res= table->stat_records();
|
|
|
|
else
|
|
|
|
res= col_stats->get_avg_frequency();
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-04-16 07:43:07 +02:00
|
|
|
/**
|
|
|
|
@brief
|
|
|
|
Estimate the number of rows in a column range using data from stat tables
|
|
|
|
|
|
|
|
@param
|
|
|
|
field The column whose range cardinality is to be estimated
|
|
|
|
@param
|
|
|
|
min_endp The left end of the range whose cardinality is required
|
|
|
|
@param
|
|
|
|
max_endp The right end of the range whose cardinality is required
|
|
|
|
@param
|
|
|
|
range_flag The range flags
|
|
|
|
|
|
|
|
@details
|
|
|
|
The function gets an estimate of the number of rows in a column range
|
|
|
|
using the statistical data from the table column_stats.
|
|
|
|
|
|
|
|
@retval
|
|
|
|
The required estimate of the rows in the column range
|
|
|
|
*/
|
|
|
|
|
2013-03-11 15:44:24 +01:00
|
|
|
double get_column_range_cardinality(Field *field,
|
|
|
|
key_range *min_endp,
|
2013-04-07 02:18:51 +02:00
|
|
|
key_range *max_endp,
|
|
|
|
uint range_flag)
|
2013-03-11 15:44:24 +01:00
|
|
|
{
|
|
|
|
double res;
|
|
|
|
TABLE *table= field->table;
|
|
|
|
Column_statistics *col_stats= table->field[field->field_index]->read_stats;
|
2013-04-01 08:41:47 +02:00
|
|
|
double tab_records= table->stat_records();
|
2013-03-11 15:44:24 +01:00
|
|
|
|
|
|
|
if (!col_stats)
|
2013-04-01 08:41:47 +02:00
|
|
|
return tab_records;
|
2018-04-02 12:14:30 +02:00
|
|
|
/*
|
|
|
|
Use statistics for a table only when we have actually read
|
|
|
|
the statistics from the stat tables. For example due to
|
|
|
|
chances of getting a deadlock we disable reading statistics for
|
|
|
|
a table.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!table->stats_is_read)
|
|
|
|
return tab_records;
|
2013-04-01 08:41:47 +02:00
|
|
|
|
|
|
|
double col_nulls= tab_records * col_stats->get_nulls_ratio();
|
|
|
|
|
|
|
|
double col_non_nulls= tab_records - col_nulls;
|
|
|
|
|
2013-04-07 02:18:51 +02:00
|
|
|
bool nulls_incl= field->null_ptr && min_endp && min_endp->key[0] &&
|
|
|
|
!(range_flag & NEAR_MIN);
|
|
|
|
|
2013-04-01 08:41:47 +02:00
|
|
|
if (col_non_nulls < 1)
|
2014-10-29 06:31:52 +01:00
|
|
|
{
|
|
|
|
if (nulls_incl)
|
|
|
|
res= col_nulls;
|
|
|
|
else
|
|
|
|
res= 0;
|
|
|
|
}
|
2013-03-30 23:37:21 +01:00
|
|
|
else if (min_endp && max_endp && min_endp->length == max_endp->length &&
|
|
|
|
!memcmp(min_endp->key, max_endp->key, min_endp->length))
|
2013-03-26 07:48:29 +01:00
|
|
|
{
|
2013-04-07 02:18:51 +02:00
|
|
|
if (nulls_incl)
|
2013-03-26 07:48:29 +01:00
|
|
|
{
|
2013-04-01 08:41:47 +02:00
|
|
|
/* This is null single point range */
|
|
|
|
res= col_nulls;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
double avg_frequency= col_stats->get_avg_frequency();
|
|
|
|
res= avg_frequency;
|
|
|
|
if (avg_frequency > 1.0 + 0.000001 &&
|
2016-10-24 19:15:11 +02:00
|
|
|
col_stats->min_max_values_are_provided())
|
2013-03-26 07:48:29 +01:00
|
|
|
{
|
2013-04-01 08:41:47 +02:00
|
|
|
Histogram *hist= &col_stats->histogram;
|
2013-04-05 20:24:28 +02:00
|
|
|
if (hist->is_available())
|
2013-04-01 08:41:47 +02:00
|
|
|
{
|
2014-03-20 21:53:41 +01:00
|
|
|
store_key_image_to_rec(field, (uchar *) min_endp->key,
|
2014-10-10 15:08:12 +02:00
|
|
|
field->key_length());
|
2013-04-16 07:43:07 +02:00
|
|
|
double pos= field->pos_in_interval(col_stats->min_value,
|
|
|
|
col_stats->max_value);
|
2013-04-01 08:41:47 +02:00
|
|
|
res= col_non_nulls *
|
|
|
|
hist->point_selectivity(pos,
|
|
|
|
avg_frequency / col_non_nulls);
|
|
|
|
}
|
2013-03-26 07:48:29 +01:00
|
|
|
}
|
2014-10-06 13:29:22 +02:00
|
|
|
else if (avg_frequency == 0.0)
|
|
|
|
{
|
|
|
|
/* This actually means there is no statistics data */
|
|
|
|
res= tab_records;
|
|
|
|
}
|
2013-03-26 07:48:29 +01:00
|
|
|
}
|
2013-03-11 15:44:24 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2016-10-24 19:15:11 +02:00
|
|
|
if (col_stats->min_max_values_are_provided())
|
2013-03-11 15:44:24 +01:00
|
|
|
{
|
2013-03-30 23:37:21 +01:00
|
|
|
double sel, min_mp_pos, max_mp_pos;
|
|
|
|
|
2013-04-20 11:16:55 +02:00
|
|
|
if (min_endp && !(field->null_ptr && min_endp->key[0]))
|
2013-03-30 23:37:21 +01:00
|
|
|
{
|
|
|
|
store_key_image_to_rec(field, (uchar *) min_endp->key,
|
2014-10-10 15:08:12 +02:00
|
|
|
field->key_length());
|
2013-04-16 07:43:07 +02:00
|
|
|
min_mp_pos= field->pos_in_interval(col_stats->min_value,
|
|
|
|
col_stats->max_value);
|
2013-03-30 23:37:21 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
min_mp_pos= 0.0;
|
|
|
|
if (max_endp)
|
|
|
|
{
|
|
|
|
store_key_image_to_rec(field, (uchar *) max_endp->key,
|
2014-10-10 15:08:12 +02:00
|
|
|
field->key_length());
|
2013-04-16 07:43:07 +02:00
|
|
|
max_mp_pos= field->pos_in_interval(col_stats->min_value,
|
|
|
|
col_stats->max_value);
|
2013-03-30 23:37:21 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
max_mp_pos= 1.0;
|
|
|
|
|
2013-03-26 07:48:29 +01:00
|
|
|
Histogram *hist= &col_stats->histogram;
|
2013-04-05 20:24:28 +02:00
|
|
|
if (!hist->is_available())
|
2013-03-26 07:48:29 +01:00
|
|
|
sel= (max_mp_pos - min_mp_pos);
|
|
|
|
else
|
|
|
|
sel= hist->range_selectivity(min_mp_pos, max_mp_pos);
|
2013-04-01 08:41:47 +02:00
|
|
|
res= col_non_nulls * sel;
|
2013-04-12 11:47:46 +02:00
|
|
|
set_if_bigger(res, col_stats->get_avg_frequency());
|
2013-03-11 15:44:24 +01:00
|
|
|
}
|
|
|
|
else
|
2013-04-01 08:41:47 +02:00
|
|
|
res= col_non_nulls;
|
2013-04-07 02:18:51 +02:00
|
|
|
if (nulls_incl)
|
|
|
|
res+= col_nulls;
|
2013-03-11 15:44:24 +01:00
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
2014-03-27 10:08:00 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
Estimate selectivity of "col=const" using a histogram
|
|
|
|
|
|
|
|
@param pos Position of the "const" between column's min_value and
|
|
|
|
max_value. This is a number in [0..1] range.
|
|
|
|
@param avg_sel Average selectivity of condition "col=const" in this table.
|
|
|
|
It is calcuated as (#non_null_values / #distinct_values).
|
|
|
|
|
|
|
|
@return
|
|
|
|
Expected condition selectivity (a number between 0 and 1)
|
|
|
|
|
|
|
|
@notes
|
|
|
|
[re_zero_length_buckets] If a bucket with zero value-length is in the
|
|
|
|
middle of the histogram, we will not have min==max. Example: suppose,
|
|
|
|
pos_value=0x12, and the histogram is:
|
|
|
|
|
|
|
|
#n #n+1 #n+2
|
|
|
|
... 0x10 0x12 0x12 0x14 ...
|
|
|
|
|
|
|
|
|
+------------- bucket with zero value-length
|
|
|
|
|
|
|
|
Here, we will get min=#n+1, max=#n+2, and use the multi-bucket formula.
|
|
|
|
|
|
|
|
The problem happens at the histogram ends. if pos_value=0, and the
|
|
|
|
histogram is:
|
|
|
|
|
|
|
|
0x00 0x10 ...
|
|
|
|
|
|
|
|
then min=0, max=0. This means pos_value is contained within bucket #0,
|
|
|
|
but on the other hand, histogram data says that the bucket has only one
|
|
|
|
value.
|
|
|
|
*/
|
|
|
|
|
|
|
|
double Histogram::point_selectivity(double pos, double avg_sel)
|
|
|
|
{
|
|
|
|
double sel;
|
|
|
|
/* Find the bucket that contains the value 'pos'. */
|
|
|
|
uint min= find_bucket(pos, TRUE);
|
|
|
|
uint pos_value= (uint) (pos * prec_factor());
|
|
|
|
|
|
|
|
/* Find how many buckets this value occupies */
|
|
|
|
uint max= min;
|
|
|
|
while (max + 1 < get_width() && get_value(max + 1) == pos_value)
|
|
|
|
max++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
A special case: we're looking at a single bucket, and that bucket has
|
|
|
|
zero value-length. Use the multi-bucket formula (attempt to use
|
|
|
|
single-bucket formula will cause divison by zero).
|
|
|
|
|
|
|
|
For more details see [re_zero_length_buckets] above.
|
|
|
|
*/
|
|
|
|
if (max == min && get_value(max) == ((max==0)? 0 : get_value(max-1)))
|
|
|
|
max++;
|
|
|
|
|
|
|
|
if (max > min)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
The value occupies multiple buckets. Use start_bucket ... end_bucket as
|
|
|
|
selectivity.
|
|
|
|
*/
|
|
|
|
double bucket_sel= 1.0/(get_width() + 1);
|
|
|
|
sel= bucket_sel * (max - min + 1);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
The value 'pos' fits within one single histogram bucket.
|
|
|
|
|
|
|
|
Histogram buckets have the same numbers of rows, but they cover
|
|
|
|
different ranges of values.
|
|
|
|
|
|
|
|
We assume that values are uniformly distributed across the [0..1] value
|
|
|
|
range.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
If all buckets covered value ranges of the same size, the width of
|
|
|
|
value range would be:
|
|
|
|
*/
|
|
|
|
double avg_bucket_width= 1.0 / (get_width() + 1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Let's see what is the width of value range that our bucket is covering.
|
|
|
|
(min==max currently. they are kept in the formula just in case we
|
|
|
|
will want to extend it to handle multi-bucket case)
|
|
|
|
*/
|
|
|
|
double inv_prec_factor= (double) 1.0 / prec_factor();
|
|
|
|
double current_bucket_width=
|
|
|
|
(max + 1 == get_width() ? 1.0 : (get_value(max) * inv_prec_factor)) -
|
|
|
|
(min == 0 ? 0.0 : (get_value(min-1) * inv_prec_factor));
|
|
|
|
|
|
|
|
DBUG_ASSERT(current_bucket_width); /* We shouldn't get a one zero-width bucket */
|
|
|
|
|
|
|
|
/*
|
|
|
|
So:
|
|
|
|
- each bucket has the same #rows
|
|
|
|
- values are unformly distributed across the [min_value,max_value] domain.
|
|
|
|
|
|
|
|
If a bucket has value range that's N times bigger then average, than
|
|
|
|
each value will have to have N times fewer rows than average.
|
|
|
|
*/
|
|
|
|
sel= avg_sel * avg_bucket_width / current_bucket_width;
|
|
|
|
|
|
|
|
/*
|
|
|
|
(Q: if we just follow this proportion we may end up in a situation
|
|
|
|
where number of different values we expect to find in this bucket
|
|
|
|
exceeds the number of rows that this histogram has in a bucket. Are
|
|
|
|
we ok with this or we would want to have certain caps?)
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
return sel;
|
|
|
|
}
|
|
|
|
|
2016-10-05 10:24:07 +02:00
|
|
|
/*
|
|
|
|
Check whether the table is one of the persistent statistical tables.
|
|
|
|
*/
|
|
|
|
bool is_stat_table(const char *db, const char *table)
|
|
|
|
{
|
|
|
|
DBUG_ASSERT(db && table);
|
|
|
|
|
2018-04-17 00:44:34 +02:00
|
|
|
if (!my_strcasecmp(table_alias_charset, db, stat_tables_db_name.str))
|
2016-10-05 10:24:07 +02:00
|
|
|
{
|
|
|
|
for (uint i= 0; i < STATISTICS_TABLES; i ++)
|
|
|
|
{
|
2018-04-17 00:44:34 +02:00
|
|
|
if (!my_strcasecmp(table_alias_charset, table, stat_table_name[i].str))
|
2016-10-05 10:24:07 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2018-12-06 21:42:22 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
Check wheter we can use EITS statistics for a field or not
|
|
|
|
|
|
|
|
TRUE : Use EITS for the columns
|
|
|
|
FALSE: Otherwise
|
|
|
|
*/
|
|
|
|
|
|
|
|
bool is_eits_usable(Field *field)
|
|
|
|
{
|
2019-04-25 14:48:26 +02:00
|
|
|
Column_statistics* col_stats= field->read_stats;
|
|
|
|
|
|
|
|
// check if column_statistics was allocated for this field
|
|
|
|
if (!col_stats)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
DBUG_ASSERT(field->table->stats_is_read);
|
|
|
|
|
2018-12-06 21:42:22 +01:00
|
|
|
/*
|
|
|
|
(1): checks if we have EITS statistics for a particular column
|
|
|
|
(2): Don't use EITS for GEOMETRY columns
|
|
|
|
(3): Disabling reading EITS statistics for columns involved in the
|
|
|
|
partition list of a table. We assume the selecticivity for
|
|
|
|
such columns would be handled during partition pruning.
|
|
|
|
*/
|
2019-04-25 14:48:26 +02:00
|
|
|
|
|
|
|
return !col_stats->no_stat_values_provided() && //(1)
|
|
|
|
field->type() != MYSQL_TYPE_GEOMETRY && //(2)
|
2018-12-13 11:15:18 +01:00
|
|
|
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
|
|
|
(!field->table->part_info ||
|
|
|
|
!field->table->part_info->field_in_partition_expr(field)) && //(3)
|
|
|
|
#endif
|
|
|
|
true;
|
2018-12-06 21:42:22 +01:00
|
|
|
}
|