mirror of
https://github.com/MariaDB/server.git
synced 2025-01-17 20:42:30 +01:00
Merge 10.2 into 10.3
This commit is contained in:
commit
2b6f804490
24 changed files with 294 additions and 284 deletions
|
@ -38,6 +38,7 @@ ALTER TABLE t2 ADD COLUMN (c4 TEXT NOT NULL DEFAULT ' et malorum');
|
|||
connection default;
|
||||
SET DEBUG_SYNC='now WAIT_FOR ddl';
|
||||
SET GLOBAL innodb_flush_log_at_trx_commit=1;
|
||||
SET debug_dbug='+d,dict_sys_mutex_avoid';
|
||||
DELETE FROM t1;
|
||||
# Kill the server
|
||||
disconnect ddl;
|
||||
|
|
18
mysql-test/suite/innodb/r/stats_persistent.result
Normal file
18
mysql-test/suite/innodb/r/stats_persistent.result
Normal file
|
@ -0,0 +1,18 @@
|
|||
#
|
||||
# MDEV-23991 dict_table_stats_lock() has unnecessarily long scope
|
||||
#
|
||||
CREATE TABLE t1(a INT) ENGINE=INNODB STATS_PERSISTENT=1;
|
||||
SET DEBUG_SYNC='dict_stats_update_persistent SIGNAL stop WAIT_FOR go';
|
||||
ANALYZE TABLE t1;
|
||||
connect con1, localhost, root;
|
||||
SET DEBUG_SYNC='now WAIT_FOR stop';
|
||||
SELECT ENGINE,SUM(DATA_LENGTH+INDEX_LENGTH),COUNT(ENGINE),SUM(DATA_LENGTH),SUM(INDEX_LENGTH) FROM information_schema.TABLES WHERE ENGINE='InnoDB';
|
||||
ENGINE SUM(DATA_LENGTH+INDEX_LENGTH) COUNT(ENGINE) SUM(DATA_LENGTH) SUM(INDEX_LENGTH)
|
||||
InnoDB 114688 4 65536 49152
|
||||
SET DEBUG_SYNC='now SIGNAL go';
|
||||
disconnect con1;
|
||||
connection default;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
SET DEBUG_SYNC= 'RESET';
|
||||
DROP TABLE t1;
|
|
@ -54,6 +54,7 @@ ALTER TABLE t2 ADD COLUMN (c4 TEXT NOT NULL DEFAULT ' et malorum');
|
|||
connection default;
|
||||
SET DEBUG_SYNC='now WAIT_FOR ddl';
|
||||
SET GLOBAL innodb_flush_log_at_trx_commit=1;
|
||||
SET debug_dbug='+d,dict_sys_mutex_avoid';
|
||||
DELETE FROM t1;
|
||||
|
||||
--source include/kill_mysqld.inc
|
||||
|
|
27
mysql-test/suite/innodb/t/stats_persistent.test
Normal file
27
mysql-test/suite/innodb/t/stats_persistent.test
Normal file
|
@ -0,0 +1,27 @@
|
|||
--source include/have_innodb.inc
|
||||
--source include/have_debug.inc
|
||||
--source include/have_debug_sync.inc
|
||||
--source include/count_sessions.inc
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-23991 dict_table_stats_lock() has unnecessarily long scope
|
||||
--echo #
|
||||
CREATE TABLE t1(a INT) ENGINE=INNODB STATS_PERSISTENT=1;
|
||||
|
||||
SET DEBUG_SYNC='dict_stats_update_persistent SIGNAL stop WAIT_FOR go';
|
||||
--send ANALYZE TABLE t1
|
||||
|
||||
--connect(con1, localhost, root)
|
||||
SET DEBUG_SYNC='now WAIT_FOR stop';
|
||||
|
||||
SELECT ENGINE,SUM(DATA_LENGTH+INDEX_LENGTH),COUNT(ENGINE),SUM(DATA_LENGTH),SUM(INDEX_LENGTH) FROM information_schema.TABLES WHERE ENGINE='InnoDB';
|
||||
|
||||
SET DEBUG_SYNC='now SIGNAL go';
|
||||
--disconnect con1
|
||||
|
||||
--connection default
|
||||
--reap
|
||||
SET DEBUG_SYNC= 'RESET';
|
||||
DROP TABLE t1;
|
||||
|
||||
--source include/wait_until_count_sessions.inc
|
|
@ -284,7 +284,7 @@ the index.
|
|||
ulint
|
||||
btr_height_get(
|
||||
/*===========*/
|
||||
dict_index_t* index, /*!< in: index tree */
|
||||
const dict_index_t* index, /*!< in: index tree */
|
||||
mtr_t* mtr) /*!< in/out: mini-transaction */
|
||||
{
|
||||
ulint height=0;
|
||||
|
@ -591,7 +591,7 @@ Gets the number of pages in a B-tree.
|
|||
ulint
|
||||
btr_get_size(
|
||||
/*=========*/
|
||||
dict_index_t* index, /*!< in: index */
|
||||
const dict_index_t* index, /*!< in: index */
|
||||
ulint flag, /*!< in: BTR_N_LEAF_PAGES or BTR_TOTAL_SIZE */
|
||||
mtr_t* mtr) /*!< in/out: mini-transaction where index
|
||||
is s-latched */
|
||||
|
|
|
@ -6558,21 +6558,19 @@ btr_record_not_null_field_in_rec(
|
|||
}
|
||||
}
|
||||
|
||||
/*******************************************************************//**
|
||||
Estimates the number of different key values in a given index, for
|
||||
/** Estimates the number of different key values in a given index, for
|
||||
each n-column prefix of the index where 1 <= n <= dict_index_get_n_unique(index).
|
||||
The estimates are stored in the array index->stat_n_diff_key_vals[] (indexed
|
||||
0..n_uniq-1) and the number of pages that were sampled is saved in
|
||||
index->stat_n_sample_sizes[].
|
||||
result.n_sample_sizes[].
|
||||
If innodb_stats_method is nulls_ignored, we also record the number of
|
||||
non-null values for each prefix and stored the estimates in
|
||||
array index->stat_n_non_null_key_vals.
|
||||
@return true if the index is available and we get the estimated numbers,
|
||||
false if the index is unavailable. */
|
||||
bool
|
||||
btr_estimate_number_of_different_key_vals(
|
||||
/*======================================*/
|
||||
dict_index_t* index) /*!< in: index */
|
||||
array result.n_non_null_key_vals.
|
||||
@param[in] index index
|
||||
@return vector with statistics information
|
||||
empty vector if the index is unavailable. */
|
||||
std::vector<index_field_stats_t>
|
||||
btr_estimate_number_of_different_key_vals(dict_index_t* index)
|
||||
{
|
||||
btr_cur_t cursor;
|
||||
page_t* page;
|
||||
|
@ -6592,11 +6590,11 @@ btr_estimate_number_of_different_key_vals(
|
|||
rec_offs* offsets_rec = NULL;
|
||||
rec_offs* offsets_next_rec = NULL;
|
||||
|
||||
std::vector<index_field_stats_t> result;
|
||||
|
||||
/* For spatial index, there is no such stats can be
|
||||
fetched. */
|
||||
if (dict_index_is_spatial(index)) {
|
||||
return(false);
|
||||
}
|
||||
ut_ad(!dict_index_is_spatial(index));
|
||||
|
||||
n_cols = dict_index_get_n_unique(index);
|
||||
|
||||
|
@ -6705,7 +6703,7 @@ btr_estimate_number_of_different_key_vals(
|
|||
mtr_commit(&mtr);
|
||||
mem_heap_free(heap);
|
||||
|
||||
return(false);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Count the number of different key values for each prefix of
|
||||
|
@ -6811,8 +6809,12 @@ exit_loop:
|
|||
also the pages used for external storage of fields (those pages are
|
||||
included in index->stat_n_leaf_pages) */
|
||||
|
||||
result.reserve(n_cols);
|
||||
|
||||
for (j = 0; j < n_cols; j++) {
|
||||
index->stat_n_diff_key_vals[j]
|
||||
index_field_stats_t stat;
|
||||
|
||||
stat.n_diff_key_vals
|
||||
= BTR_TABLE_STATS_FROM_SAMPLE(
|
||||
n_diff[j], index, n_sample_pages,
|
||||
total_external_size, not_empty_flag);
|
||||
|
@ -6833,25 +6835,23 @@ exit_loop:
|
|||
add_on = n_sample_pages;
|
||||
}
|
||||
|
||||
index->stat_n_diff_key_vals[j] += add_on;
|
||||
stat.n_diff_key_vals += add_on;
|
||||
|
||||
index->stat_n_sample_sizes[j] = n_sample_pages;
|
||||
stat.n_sample_sizes = n_sample_pages;
|
||||
|
||||
/* Update the stat_n_non_null_key_vals[] with our
|
||||
sampled result. stat_n_non_null_key_vals[] is created
|
||||
and initialized to zero in dict_index_add_to_cache(),
|
||||
along with stat_n_diff_key_vals[] array */
|
||||
if (n_not_null != NULL) {
|
||||
index->stat_n_non_null_key_vals[j] =
|
||||
stat.n_non_null_key_vals =
|
||||
BTR_TABLE_STATS_FROM_SAMPLE(
|
||||
n_not_null[j], index, n_sample_pages,
|
||||
total_external_size, not_empty_flag);
|
||||
}
|
||||
|
||||
result.push_back(stat);
|
||||
}
|
||||
|
||||
mem_heap_free(heap);
|
||||
|
||||
return(true);
|
||||
return result;
|
||||
}
|
||||
|
||||
/*================== EXTERNAL STORAGE OF BIG FIELDS ===================*/
|
||||
|
|
|
@ -262,56 +262,6 @@ dict_mutex_exit_for_mysql(void)
|
|||
mutex_exit(&dict_sys->mutex);
|
||||
}
|
||||
|
||||
/** Lock the appropriate latch to protect a given table's statistics.
|
||||
@param[in] table table whose stats to lock
|
||||
@param[in] latch_mode RW_S_LATCH or RW_X_LATCH */
|
||||
void
|
||||
dict_table_stats_lock(
|
||||
dict_table_t* table,
|
||||
ulint latch_mode)
|
||||
{
|
||||
ut_ad(table != NULL);
|
||||
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
|
||||
|
||||
switch (latch_mode) {
|
||||
case RW_S_LATCH:
|
||||
rw_lock_s_lock(&table->stats_latch);
|
||||
break;
|
||||
case RW_X_LATCH:
|
||||
rw_lock_x_lock(&table->stats_latch);
|
||||
break;
|
||||
case RW_NO_LATCH:
|
||||
/* fall through */
|
||||
default:
|
||||
ut_error;
|
||||
}
|
||||
}
|
||||
|
||||
/** Unlock the latch that has been locked by dict_table_stats_lock().
|
||||
@param[in] table table whose stats to unlock
|
||||
@param[in] latch_mode RW_S_LATCH or RW_X_LATCH */
|
||||
void
|
||||
dict_table_stats_unlock(
|
||||
dict_table_t* table,
|
||||
ulint latch_mode)
|
||||
{
|
||||
ut_ad(table != NULL);
|
||||
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
|
||||
|
||||
switch (latch_mode) {
|
||||
case RW_S_LATCH:
|
||||
rw_lock_s_unlock(&table->stats_latch);
|
||||
break;
|
||||
case RW_X_LATCH:
|
||||
rw_lock_x_unlock(&table->stats_latch);
|
||||
break;
|
||||
case RW_NO_LATCH:
|
||||
/* fall through */
|
||||
default:
|
||||
ut_error;
|
||||
}
|
||||
}
|
||||
|
||||
/**********************************************************************//**
|
||||
Try to drop any indexes after an aborted index creation.
|
||||
This can also be after a server kill during DROP INDEX. */
|
||||
|
|
|
@ -122,7 +122,6 @@ operator<<(
|
|||
@param n_v_cols number of virtual columns
|
||||
@param flags table flags
|
||||
@param flags2 table flags2
|
||||
@param init_stats_latch whether to init the stats latch
|
||||
@return own: table object */
|
||||
dict_table_t*
|
||||
dict_mem_table_create(
|
||||
|
@ -131,8 +130,7 @@ dict_mem_table_create(
|
|||
ulint n_cols,
|
||||
ulint n_v_cols,
|
||||
ulint flags,
|
||||
ulint flags2,
|
||||
bool init_stats_latch)
|
||||
ulint flags2)
|
||||
{
|
||||
dict_table_t* table;
|
||||
mem_heap_t* heap;
|
||||
|
@ -193,12 +191,6 @@ dict_mem_table_create(
|
|||
new(&table->foreign_set) dict_foreign_set();
|
||||
new(&table->referenced_set) dict_foreign_set();
|
||||
|
||||
if (init_stats_latch) {
|
||||
rw_lock_create(dict_table_stats_key, &table->stats_latch,
|
||||
SYNC_INDEX_TREE);
|
||||
table->stats_latch_inited = true;
|
||||
}
|
||||
|
||||
return(table);
|
||||
}
|
||||
|
||||
|
@ -248,10 +240,6 @@ dict_mem_table_free(
|
|||
UT_DELETE(table->s_cols);
|
||||
}
|
||||
|
||||
if (table->stats_latch_inited) {
|
||||
rw_lock_free(&table->stats_latch);
|
||||
}
|
||||
|
||||
mem_heap_free(table->heap);
|
||||
}
|
||||
|
||||
|
|
|
@ -484,8 +484,6 @@ dict_stats_table_clone_create(
|
|||
|
||||
ut_d(t->magic_n = DICT_TABLE_MAGIC_N);
|
||||
|
||||
rw_lock_create(dict_table_stats_key, &t->stats_latch, SYNC_INDEX_TREE);
|
||||
|
||||
return(t);
|
||||
}
|
||||
|
||||
|
@ -498,15 +496,12 @@ dict_stats_table_clone_free(
|
|||
/*========================*/
|
||||
dict_table_t* t) /*!< in: dummy table object to free */
|
||||
{
|
||||
rw_lock_free(&t->stats_latch);
|
||||
mem_heap_free(t->heap);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Write all zeros (or 1 where it makes sense) into an index
|
||||
statistics members. The resulting stats correspond to an empty index.
|
||||
The caller must own index's table stats latch in X mode
|
||||
(dict_table_stats_lock(table, RW_X_LATCH)) */
|
||||
statistics members. The resulting stats correspond to an empty index. */
|
||||
static
|
||||
void
|
||||
dict_stats_empty_index(
|
||||
|
@ -517,6 +512,7 @@ dict_stats_empty_index(
|
|||
{
|
||||
ut_ad(!(index->type & DICT_FTS));
|
||||
ut_ad(!dict_index_is_ibuf(index));
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
|
||||
ulint n_uniq = index->n_uniq;
|
||||
|
||||
|
@ -546,10 +542,9 @@ dict_stats_empty_table(
|
|||
bool empty_defrag_stats)
|
||||
/*!< in: whether to empty defrag stats */
|
||||
{
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
/* Zero the stats members */
|
||||
|
||||
dict_table_stats_lock(table, RW_X_LATCH);
|
||||
|
||||
table->stat_n_rows = 0;
|
||||
table->stat_clustered_index_size = 1;
|
||||
/* 1 page for each index, not counting the clustered */
|
||||
|
@ -573,8 +568,7 @@ dict_stats_empty_table(
|
|||
}
|
||||
|
||||
table->stat_initialized = TRUE;
|
||||
|
||||
dict_table_stats_unlock(table, RW_X_LATCH);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
|
@ -673,6 +667,8 @@ dict_stats_copy(
|
|||
to have the same statistics as if
|
||||
the table was empty */
|
||||
{
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
|
||||
dst->stats_last_recalc = src->stats_last_recalc;
|
||||
dst->stat_n_rows = src->stat_n_rows;
|
||||
dst->stat_clustered_index_size = src->stat_clustered_index_size;
|
||||
|
@ -790,8 +786,6 @@ dict_stats_snapshot_create(
|
|||
{
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
dict_table_stats_lock(table, RW_S_LATCH);
|
||||
|
||||
dict_stats_assert_initialized(table);
|
||||
|
||||
dict_table_t* t;
|
||||
|
@ -805,8 +799,6 @@ dict_stats_snapshot_create(
|
|||
t->stats_sample_pages = table->stats_sample_pages;
|
||||
t->stats_bg_flag = table->stats_bg_flag;
|
||||
|
||||
dict_table_stats_unlock(table, RW_S_LATCH);
|
||||
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
return(t);
|
||||
|
@ -846,10 +838,14 @@ dict_stats_update_transient_for_index(
|
|||
Initialize some bogus index cardinality
|
||||
statistics, so that the data can be queried in
|
||||
various means, also via secondary indexes. */
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
dict_stats_empty_index(index, false);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
|
||||
} else if (ibuf_debug && !dict_index_is_clust(index)) {
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
dict_stats_empty_index(index, false);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
|
||||
} else {
|
||||
mtr_t mtr;
|
||||
|
@ -870,7 +866,9 @@ dict_stats_update_transient_for_index(
|
|||
|
||||
switch (size) {
|
||||
case ULINT_UNDEFINED:
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
dict_stats_empty_index(index, false);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
return;
|
||||
case 0:
|
||||
/* The root node of the tree is a leaf */
|
||||
|
@ -882,11 +880,23 @@ dict_stats_update_transient_for_index(
|
|||
/* Do not continue if table decryption has failed or
|
||||
table is already marked as corrupted. */
|
||||
if (index->is_readable()) {
|
||||
/* We don't handle the return value since it
|
||||
will be false only when some thread is
|
||||
dropping the table and we don't have to empty
|
||||
the statistics of the to be dropped index */
|
||||
btr_estimate_number_of_different_key_vals(index);
|
||||
std::vector<index_field_stats_t> stats
|
||||
= btr_estimate_number_of_different_key_vals(
|
||||
index);
|
||||
|
||||
if (!stats.empty()) {
|
||||
ut_ad(!mutex_own(&dict_sys->mutex));
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
for (size_t i = 0; i < stats.size(); ++i) {
|
||||
index->stat_n_diff_key_vals[i]
|
||||
= stats[i].n_diff_key_vals;
|
||||
index->stat_n_sample_sizes[i]
|
||||
= stats[i].n_sample_sizes;
|
||||
index->stat_n_non_null_key_vals[i]
|
||||
= stats[i].n_non_null_key_vals;
|
||||
}
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -903,6 +913,8 @@ dict_stats_update_transient(
|
|||
/*========================*/
|
||||
dict_table_t* table) /*!< in/out: table */
|
||||
{
|
||||
ut_ad(!mutex_own(&dict_sys->mutex));
|
||||
|
||||
dict_index_t* index;
|
||||
ulint sum_of_index_sizes = 0;
|
||||
|
||||
|
@ -928,27 +940,25 @@ dict_stats_update_transient(
|
|||
|
||||
ut_ad(!dict_index_is_ibuf(index));
|
||||
|
||||
if (index->type & DICT_FTS || dict_index_is_spatial(index)) {
|
||||
if (index->type & (DICT_FTS | DICT_SPATIAL)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
dict_stats_empty_index(index, false);
|
||||
|
||||
if (dict_stats_should_ignore_index(index)) {
|
||||
if (dict_stats_should_ignore_index(index)
|
||||
|| !index->is_readable()) {
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
dict_stats_empty_index(index, false);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Do not continue if table decryption has failed or
|
||||
table is already marked as corrupted. */
|
||||
if (!index->is_readable()) {
|
||||
break;
|
||||
}
|
||||
|
||||
dict_stats_update_transient_for_index(index);
|
||||
|
||||
sum_of_index_sizes += index->stat_index_size;
|
||||
}
|
||||
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
index = dict_table_get_first_index(table);
|
||||
|
||||
table->stat_n_rows = index->stat_n_diff_key_vals[
|
||||
|
@ -964,6 +974,8 @@ dict_stats_update_transient(
|
|||
table->stat_modified_counter = 0;
|
||||
|
||||
table->stat_initialized = TRUE;
|
||||
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
}
|
||||
|
||||
/* @{ Pseudo code about the relation between the following functions
|
||||
|
@ -1808,16 +1820,31 @@ dict_stats_analyze_index_for_n_prefix(
|
|||
btr_pcur_close(&pcur);
|
||||
}
|
||||
|
||||
/** statistics for an index */
|
||||
struct index_stats_t
|
||||
{
|
||||
std::vector<index_field_stats_t> stats;
|
||||
ulint index_size;
|
||||
ulint n_leaf_pages;
|
||||
|
||||
index_stats_t(ulint n_uniq) : index_size(1), n_leaf_pages(1)
|
||||
{
|
||||
stats.reserve(n_uniq);
|
||||
for (ulint i= 0; i < n_uniq; ++i)
|
||||
stats.push_back(index_field_stats_t(0, 1, 0));
|
||||
}
|
||||
};
|
||||
|
||||
/** Set dict_index_t::stat_n_diff_key_vals[] and stat_n_sample_sizes[].
|
||||
@param[in] n_diff_data input data to use to derive the results
|
||||
@param[in,out] index index whose stat_n_diff_key_vals[] to set */
|
||||
@param[in,out] index_stats index stats to set */
|
||||
UNIV_INLINE
|
||||
void
|
||||
dict_stats_index_set_n_diff(
|
||||
const n_diff_data_t* n_diff_data,
|
||||
dict_index_t* index)
|
||||
index_stats_t& index_stats)
|
||||
{
|
||||
for (ulint n_prefix = dict_index_get_n_unique(index);
|
||||
for (ulint n_prefix = index_stats.stats.size();
|
||||
n_prefix >= 1;
|
||||
n_prefix--) {
|
||||
/* n_diff_all_analyzed_pages can be 0 here if
|
||||
|
@ -1848,14 +1875,14 @@ dict_stats_index_set_n_diff(
|
|||
that the total number of ordinary leaf pages is
|
||||
T * D / (D + E). */
|
||||
n_ordinary_leaf_pages
|
||||
= index->stat_n_leaf_pages
|
||||
= index_stats.n_leaf_pages
|
||||
* data->n_leaf_pages_to_analyze
|
||||
/ (data->n_leaf_pages_to_analyze
|
||||
+ data->n_external_pages_sum);
|
||||
}
|
||||
|
||||
/* See REF01 for an explanation of the algorithm */
|
||||
index->stat_n_diff_key_vals[n_prefix - 1]
|
||||
index_stats.stats[n_prefix - 1].n_diff_key_vals
|
||||
= n_ordinary_leaf_pages
|
||||
|
||||
* data->n_diff_on_level
|
||||
|
@ -1864,7 +1891,7 @@ dict_stats_index_set_n_diff(
|
|||
* data->n_diff_all_analyzed_pages
|
||||
/ data->n_leaf_pages_to_analyze;
|
||||
|
||||
index->stat_n_sample_sizes[n_prefix - 1]
|
||||
index_stats.stats[n_prefix - 1].n_sample_sizes
|
||||
= data->n_leaf_pages_to_analyze;
|
||||
|
||||
DEBUG_PRINTF(" %s(): n_diff=" UINT64PF
|
||||
|
@ -1873,9 +1900,9 @@ dict_stats_index_set_n_diff(
|
|||
" * " UINT64PF " / " UINT64PF
|
||||
" * " UINT64PF " / " UINT64PF ")\n",
|
||||
__func__,
|
||||
index->stat_n_diff_key_vals[n_prefix - 1],
|
||||
index_stats.stats[n_prefix - 1].n_diff_key_vals,
|
||||
n_prefix,
|
||||
index->stat_n_leaf_pages,
|
||||
index_stats.n_leaf_pages,
|
||||
data->n_diff_on_level,
|
||||
data->n_recs_on_level,
|
||||
data->n_diff_all_analyzed_pages,
|
||||
|
@ -1883,15 +1910,12 @@ dict_stats_index_set_n_diff(
|
|||
}
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Calculates new statistics for a given index and saves them to the index
|
||||
/** Calculates new statistics for a given index and saves them to the index
|
||||
members stat_n_diff_key_vals[], stat_n_sample_sizes[], stat_index_size and
|
||||
stat_n_leaf_pages. This function could be slow. */
|
||||
static
|
||||
void
|
||||
dict_stats_analyze_index(
|
||||
/*=====================*/
|
||||
dict_index_t* index) /*!< in/out: index to analyze */
|
||||
stat_n_leaf_pages. This function can be slow.
|
||||
@param[in] index index to analyze
|
||||
@return index stats */
|
||||
static index_stats_t dict_stats_analyze_index(dict_index_t* index)
|
||||
{
|
||||
ulint root_level;
|
||||
ulint level;
|
||||
|
@ -1902,26 +1926,28 @@ dict_stats_analyze_index(
|
|||
ib_uint64_t total_pages;
|
||||
mtr_t mtr;
|
||||
ulint size;
|
||||
index_stats_t result(index->n_uniq);
|
||||
DBUG_ENTER("dict_stats_analyze_index");
|
||||
|
||||
DBUG_PRINT("info", ("index: %s, online status: %d", index->name(),
|
||||
dict_index_get_online_status(index)));
|
||||
|
||||
ut_ad(!mutex_own(&dict_sys->mutex)); // because this function is slow
|
||||
ut_ad(index->table->get_ref_count());
|
||||
|
||||
/* Disable update statistic for Rtree */
|
||||
if (dict_index_is_spatial(index)) {
|
||||
DBUG_VOID_RETURN;
|
||||
DBUG_RETURN(result);
|
||||
}
|
||||
|
||||
DEBUG_PRINTF(" %s(index=%s)\n", __func__, index->name());
|
||||
|
||||
dict_stats_empty_index(index, false);
|
||||
|
||||
mtr.start();
|
||||
mtr_s_lock_index(index, &mtr);
|
||||
size = btr_get_size(index, BTR_TOTAL_SIZE, &mtr);
|
||||
|
||||
if (size != ULINT_UNDEFINED) {
|
||||
index->stat_index_size = size;
|
||||
result.index_size = size;
|
||||
size = btr_get_size(index, BTR_N_LEAF_PAGES, &mtr);
|
||||
}
|
||||
|
||||
|
@ -1931,13 +1957,13 @@ dict_stats_analyze_index(
|
|||
switch (size) {
|
||||
case ULINT_UNDEFINED:
|
||||
dict_stats_assert_initialized_index(index);
|
||||
DBUG_VOID_RETURN;
|
||||
DBUG_RETURN(result);
|
||||
case 0:
|
||||
/* The root node of the tree is a leaf */
|
||||
size = 1;
|
||||
}
|
||||
|
||||
index->stat_n_leaf_pages = size;
|
||||
result.n_leaf_pages = size;
|
||||
|
||||
mtr.start();
|
||||
mtr_sx_lock_index(index, &mtr);
|
||||
|
@ -1976,14 +2002,18 @@ dict_stats_analyze_index(
|
|||
NULL /* boundaries not needed */,
|
||||
&mtr);
|
||||
|
||||
for (ulint i = 0; i < n_uniq; i++) {
|
||||
index->stat_n_sample_sizes[i] = total_pages;
|
||||
}
|
||||
|
||||
mtr.commit();
|
||||
|
||||
dict_stats_assert_initialized_index(index);
|
||||
DBUG_VOID_RETURN;
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
for (ulint i = 0; i < n_uniq; i++) {
|
||||
result.stats[i].n_diff_key_vals = index->stat_n_diff_key_vals[i];
|
||||
result.stats[i].n_sample_sizes = total_pages;
|
||||
result.stats[i].n_non_null_key_vals = index->stat_n_non_null_key_vals[i];
|
||||
}
|
||||
result.n_leaf_pages = index->stat_n_leaf_pages;
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
DBUG_RETURN(result);
|
||||
}
|
||||
|
||||
/* For each level that is being scanned in the btree, this contains the
|
||||
|
@ -2175,13 +2205,12 @@ found_level:
|
|||
/* n_prefix == 0 means that the above loop did not end up prematurely
|
||||
due to tree being changed and so n_diff_data[] is set up. */
|
||||
if (n_prefix == 0) {
|
||||
dict_stats_index_set_n_diff(n_diff_data, index);
|
||||
dict_stats_index_set_n_diff(n_diff_data, result);
|
||||
}
|
||||
|
||||
UT_DELETE_ARRAY(n_diff_data);
|
||||
|
||||
dict_stats_assert_initialized_index(index);
|
||||
DBUG_VOID_RETURN;
|
||||
DBUG_RETURN(result);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
|
@ -2199,7 +2228,7 @@ dict_stats_update_persistent(
|
|||
|
||||
DEBUG_PRINTF("%s(table=%s)\n", __func__, table->name);
|
||||
|
||||
dict_table_stats_lock(table, RW_X_LATCH);
|
||||
DEBUG_SYNC_C("dict_stats_update_persistent");
|
||||
|
||||
/* analyze the clustered index first */
|
||||
|
||||
|
@ -2210,7 +2239,6 @@ dict_stats_update_persistent(
|
|||
|| (index->type | DICT_UNIQUE) != (DICT_CLUSTERED | DICT_UNIQUE)) {
|
||||
|
||||
/* Table definition is corrupt */
|
||||
dict_table_stats_unlock(table, RW_X_LATCH);
|
||||
dict_stats_empty_table(table, true);
|
||||
|
||||
return(DB_CORRUPTION);
|
||||
|
@ -2218,7 +2246,16 @@ dict_stats_update_persistent(
|
|||
|
||||
ut_ad(!dict_index_is_ibuf(index));
|
||||
|
||||
dict_stats_analyze_index(index);
|
||||
index_stats_t stats = dict_stats_analyze_index(index);
|
||||
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
index->stat_index_size = stats.index_size;
|
||||
index->stat_n_leaf_pages = stats.n_leaf_pages;
|
||||
for (size_t i = 0; i < stats.stats.size(); ++i) {
|
||||
index->stat_n_diff_key_vals[i] = stats.stats[i].n_diff_key_vals;
|
||||
index->stat_n_sample_sizes[i] = stats.stats[i].n_sample_sizes;
|
||||
index->stat_n_non_null_key_vals[i] = stats.stats[i].n_non_null_key_vals;
|
||||
}
|
||||
|
||||
ulint n_unique = dict_index_get_n_unique(index);
|
||||
|
||||
|
@ -2236,7 +2273,7 @@ dict_stats_update_persistent(
|
|||
|
||||
ut_ad(!dict_index_is_ibuf(index));
|
||||
|
||||
if (index->type & DICT_FTS || dict_index_is_spatial(index)) {
|
||||
if (index->type & (DICT_FTS | DICT_SPATIAL)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -2247,7 +2284,20 @@ dict_stats_update_persistent(
|
|||
}
|
||||
|
||||
if (!(table->stats_bg_flag & BG_STAT_SHOULD_QUIT)) {
|
||||
dict_stats_analyze_index(index);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
stats = dict_stats_analyze_index(index);
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
index->stat_index_size = stats.index_size;
|
||||
index->stat_n_leaf_pages = stats.n_leaf_pages;
|
||||
for (size_t i = 0; i < stats.stats.size(); ++i) {
|
||||
index->stat_n_diff_key_vals[i]
|
||||
= stats.stats[i].n_diff_key_vals;
|
||||
index->stat_n_sample_sizes[i]
|
||||
= stats.stats[i].n_sample_sizes;
|
||||
index->stat_n_non_null_key_vals[i]
|
||||
= stats.stats[i].n_non_null_key_vals;
|
||||
}
|
||||
}
|
||||
|
||||
table->stat_sum_of_other_index_sizes
|
||||
|
@ -2262,7 +2312,7 @@ dict_stats_update_persistent(
|
|||
|
||||
dict_stats_assert_initialized(table);
|
||||
|
||||
dict_table_stats_unlock(table, RW_X_LATCH);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
@ -3090,11 +3140,22 @@ dict_stats_update_for_index(
|
|||
if (dict_stats_is_persistent_enabled(index->table)) {
|
||||
|
||||
if (dict_stats_persistent_storage_check(false)) {
|
||||
dict_table_stats_lock(index->table, RW_X_LATCH);
|
||||
dict_stats_analyze_index(index);
|
||||
index_stats_t stats = dict_stats_analyze_index(index);
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
index->stat_index_size = stats.index_size;
|
||||
index->stat_n_leaf_pages = stats.n_leaf_pages;
|
||||
for (size_t i = 0; i < stats.stats.size(); ++i) {
|
||||
index->stat_n_diff_key_vals[i]
|
||||
= stats.stats[i].n_diff_key_vals;
|
||||
index->stat_n_sample_sizes[i]
|
||||
= stats.stats[i].n_sample_sizes;
|
||||
index->stat_n_non_null_key_vals[i]
|
||||
= stats.stats[i].n_non_null_key_vals;
|
||||
}
|
||||
index->table->stat_sum_of_other_index_sizes
|
||||
+= index->stat_index_size;
|
||||
dict_table_stats_unlock(index->table, RW_X_LATCH);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
dict_stats_save(index->table, &index->id);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
@ -3115,9 +3176,7 @@ dict_stats_update_for_index(
|
|||
}
|
||||
}
|
||||
|
||||
dict_table_stats_lock(index->table, RW_X_LATCH);
|
||||
dict_stats_update_transient_for_index(index);
|
||||
dict_table_stats_unlock(index->table, RW_X_LATCH);
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
@ -3271,7 +3330,7 @@ dict_stats_update(
|
|||
switch (err) {
|
||||
case DB_SUCCESS:
|
||||
|
||||
dict_table_stats_lock(table, RW_X_LATCH);
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
/* Pass reset_ignored_indexes=true as parameter
|
||||
to dict_stats_copy. This will cause statictics
|
||||
|
@ -3280,7 +3339,7 @@ dict_stats_update(
|
|||
|
||||
dict_stats_assert_initialized(table);
|
||||
|
||||
dict_table_stats_unlock(table, RW_X_LATCH);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
dict_stats_table_clone_free(t);
|
||||
|
||||
|
@ -3334,13 +3393,8 @@ dict_stats_update(
|
|||
}
|
||||
|
||||
transient:
|
||||
|
||||
dict_table_stats_lock(table, RW_X_LATCH);
|
||||
|
||||
dict_stats_update_transient(table);
|
||||
|
||||
dict_table_stats_unlock(table, RW_X_LATCH);
|
||||
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
|
|
|
@ -695,7 +695,6 @@ static PSI_rwlock_info all_innodb_rwlocks[] = {
|
|||
PSI_RWLOCK_KEY(trx_purge_latch),
|
||||
PSI_RWLOCK_KEY(index_tree_rw_lock),
|
||||
PSI_RWLOCK_KEY(index_online_log),
|
||||
PSI_RWLOCK_KEY(dict_table_stats),
|
||||
PSI_RWLOCK_KEY(hash_table_locks)
|
||||
};
|
||||
# endif /* UNIV_PFS_RWLOCK */
|
||||
|
@ -14117,6 +14116,8 @@ ha_innobase::info_low(
|
|||
|
||||
DEBUG_SYNC_C("ha_innobase_info_low");
|
||||
|
||||
ut_ad(!mutex_own(&dict_sys->mutex));
|
||||
|
||||
/* If we are forcing recovery at a high level, we will suppress
|
||||
statistics calculation on tables, because that may crash the
|
||||
server if an index is badly corrupted. */
|
||||
|
@ -14153,7 +14154,6 @@ ha_innobase::info_low(
|
|||
opt = DICT_STATS_RECALC_TRANSIENT;
|
||||
}
|
||||
|
||||
ut_ad(!mutex_own(&dict_sys->mutex));
|
||||
ret = dict_stats_update(ib_table, opt);
|
||||
|
||||
if (ret != DB_SUCCESS) {
|
||||
|
@ -14169,14 +14169,14 @@ ha_innobase::info_low(
|
|||
stats.update_time = (ulong) ib_table->update_time;
|
||||
}
|
||||
|
||||
DBUG_EXECUTE_IF("dict_sys_mutex_avoid", goto func_exit;);
|
||||
|
||||
if (flag & HA_STATUS_VARIABLE) {
|
||||
|
||||
ulint stat_clustered_index_size;
|
||||
ulint stat_sum_of_other_index_sizes;
|
||||
|
||||
if (!(flag & HA_STATUS_NO_LOCK)) {
|
||||
dict_table_stats_lock(ib_table, RW_S_LATCH);
|
||||
}
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
ut_a(ib_table->stat_initialized);
|
||||
|
||||
|
@ -14188,9 +14188,7 @@ ha_innobase::info_low(
|
|||
stat_sum_of_other_index_sizes
|
||||
= ib_table->stat_sum_of_other_index_sizes;
|
||||
|
||||
if (!(flag & HA_STATUS_NO_LOCK)) {
|
||||
dict_table_stats_unlock(ib_table, RW_S_LATCH);
|
||||
}
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
/*
|
||||
The MySQL optimizer seems to assume in a left join that n_rows
|
||||
|
@ -14293,10 +14291,27 @@ ha_innobase::info_low(
|
|||
ib_push_frm_error(m_user_thd, ib_table, table, num_innodb_index, true);
|
||||
}
|
||||
|
||||
if (!(flag & HA_STATUS_NO_LOCK)) {
|
||||
dict_table_stats_lock(ib_table, RW_S_LATCH);
|
||||
|
||||
snprintf(path, sizeof(path), "%s/%s%s",
|
||||
mysql_data_home, table->s->normalized_path.str,
|
||||
reg_ext);
|
||||
|
||||
unpack_filename(path,path);
|
||||
|
||||
/* Note that we do not know the access time of the table,
|
||||
nor the CHECK TABLE time, nor the UPDATE or INSERT time. */
|
||||
|
||||
if (os_file_get_status(
|
||||
path, &stat_info, false,
|
||||
srv_read_only_mode) == DB_SUCCESS) {
|
||||
stats.create_time = (ulong) stat_info.ctime;
|
||||
}
|
||||
|
||||
struct Locking {
|
||||
Locking() { mutex_enter(&dict_sys->mutex); }
|
||||
~Locking() { mutex_exit(&dict_sys->mutex); }
|
||||
} locking;
|
||||
|
||||
ut_a(ib_table->stat_initialized);
|
||||
|
||||
for (i = 0; i < table->s->keys; i++) {
|
||||
|
@ -14374,25 +14389,6 @@ ha_innobase::info_low(
|
|||
key->rec_per_key[j] = rec_per_key_int;
|
||||
}
|
||||
}
|
||||
|
||||
if (!(flag & HA_STATUS_NO_LOCK)) {
|
||||
dict_table_stats_unlock(ib_table, RW_S_LATCH);
|
||||
}
|
||||
|
||||
snprintf(path, sizeof(path), "%s/%s%s",
|
||||
mysql_data_home, table->s->normalized_path.str,
|
||||
reg_ext);
|
||||
|
||||
unpack_filename(path,path);
|
||||
|
||||
/* Note that we do not know the access time of the table,
|
||||
nor the CHECK TABLE time, nor the UPDATE or INSERT time. */
|
||||
|
||||
if (os_file_get_status(
|
||||
path, &stat_info, false,
|
||||
srv_read_only_mode) == DB_SUCCESS) {
|
||||
stats.create_time = (ulong) stat_info.ctime;
|
||||
}
|
||||
}
|
||||
|
||||
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
|
||||
|
|
|
@ -6270,38 +6270,43 @@ i_s_dict_fill_sys_tablestats(
|
|||
OK(field_store_string(fields[SYS_TABLESTATS_NAME],
|
||||
table->name.m_name));
|
||||
|
||||
dict_table_stats_lock(table, RW_S_LATCH);
|
||||
{
|
||||
struct Locking
|
||||
{
|
||||
Locking() { mutex_enter(&dict_sys->mutex); }
|
||||
~Locking() { mutex_exit(&dict_sys->mutex); }
|
||||
} locking;
|
||||
|
||||
if (table->stat_initialized) {
|
||||
OK(field_store_string(fields[SYS_TABLESTATS_INIT],
|
||||
"Initialized"));
|
||||
if (table->stat_initialized) {
|
||||
OK(field_store_string(fields[SYS_TABLESTATS_INIT],
|
||||
"Initialized"));
|
||||
|
||||
OK(fields[SYS_TABLESTATS_NROW]->store(table->stat_n_rows,
|
||||
true));
|
||||
OK(fields[SYS_TABLESTATS_NROW]->store(
|
||||
table->stat_n_rows, true));
|
||||
|
||||
OK(fields[SYS_TABLESTATS_CLUST_SIZE]->store(
|
||||
table->stat_clustered_index_size, true));
|
||||
OK(fields[SYS_TABLESTATS_CLUST_SIZE]->store(
|
||||
table->stat_clustered_index_size, true));
|
||||
|
||||
OK(fields[SYS_TABLESTATS_INDEX_SIZE]->store(
|
||||
table->stat_sum_of_other_index_sizes, true));
|
||||
OK(fields[SYS_TABLESTATS_INDEX_SIZE]->store(
|
||||
table->stat_sum_of_other_index_sizes,
|
||||
true));
|
||||
|
||||
OK(fields[SYS_TABLESTATS_MODIFIED]->store(
|
||||
table->stat_modified_counter, true));
|
||||
} else {
|
||||
OK(field_store_string(fields[SYS_TABLESTATS_INIT],
|
||||
"Uninitialized"));
|
||||
OK(fields[SYS_TABLESTATS_MODIFIED]->store(
|
||||
table->stat_modified_counter, true));
|
||||
} else {
|
||||
OK(field_store_string(fields[SYS_TABLESTATS_INIT],
|
||||
"Uninitialized"));
|
||||
|
||||
OK(fields[SYS_TABLESTATS_NROW]->store(0, true));
|
||||
OK(fields[SYS_TABLESTATS_NROW]->store(0, true));
|
||||
|
||||
OK(fields[SYS_TABLESTATS_CLUST_SIZE]->store(0, true));
|
||||
OK(fields[SYS_TABLESTATS_CLUST_SIZE]->store(0, true));
|
||||
|
||||
OK(fields[SYS_TABLESTATS_INDEX_SIZE]->store(0, true));
|
||||
OK(fields[SYS_TABLESTATS_INDEX_SIZE]->store(0, true));
|
||||
|
||||
OK(fields[SYS_TABLESTATS_MODIFIED]->store(0, true));
|
||||
OK(fields[SYS_TABLESTATS_MODIFIED]->store(0, true));
|
||||
}
|
||||
}
|
||||
|
||||
dict_table_stats_unlock(table, RW_S_LATCH);
|
||||
|
||||
OK(fields[SYS_TABLESTATS_AUTONINC]->store(table->autoinc, true));
|
||||
|
||||
OK(fields[SYS_TABLESTATS_TABLE_REF_COUNT]->store(ref_count, true));
|
||||
|
|
|
@ -1447,7 +1447,7 @@ ibuf_dummy_index_create(
|
|||
dict_index_t* index;
|
||||
|
||||
table = dict_mem_table_create("IBUF_DUMMY", NULL, n, 0,
|
||||
comp ? DICT_TF_COMPACT : 0, 0, false);
|
||||
comp ? DICT_TF_COMPACT : 0, 0);
|
||||
|
||||
index = dict_mem_index_create(table, "IBUF_DUMMY", 0, n);
|
||||
|
||||
|
|
|
@ -219,7 +219,7 @@ the index.
|
|||
ulint
|
||||
btr_height_get(
|
||||
/*===========*/
|
||||
dict_index_t* index, /*!< in: index tree */
|
||||
const dict_index_t* index, /*!< in: index tree */
|
||||
mtr_t* mtr) /*!< in/out: mini-transaction */
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
|
@ -597,7 +597,7 @@ Gets the number of pages in a B-tree.
|
|||
ulint
|
||||
btr_get_size(
|
||||
/*=========*/
|
||||
dict_index_t* index, /*!< in: index */
|
||||
const dict_index_t* index, /*!< in: index */
|
||||
ulint flag, /*!< in: BTR_N_LEAF_PAGES or BTR_TOTAL_SIZE */
|
||||
mtr_t* mtr) /*!< in/out: mini-transaction where index
|
||||
is s-latched */
|
||||
|
|
|
@ -614,8 +614,24 @@ btr_estimate_n_rows_in_range(
|
|||
const dtuple_t* tuple2,
|
||||
page_cur_mode_t mode2);
|
||||
|
||||
/*******************************************************************//**
|
||||
Estimates the number of different key values in a given index, for
|
||||
|
||||
/** Statistics for one field of an index. */
|
||||
struct index_field_stats_t
|
||||
{
|
||||
ib_uint64_t n_diff_key_vals;
|
||||
ib_uint64_t n_sample_sizes;
|
||||
ib_uint64_t n_non_null_key_vals;
|
||||
|
||||
index_field_stats_t(ib_uint64_t n_diff_key_vals= 0,
|
||||
ib_uint64_t n_sample_sizes= 0,
|
||||
ib_uint64_t n_non_null_key_vals= 0)
|
||||
: n_diff_key_vals(n_diff_key_vals), n_sample_sizes(n_sample_sizes),
|
||||
n_non_null_key_vals(n_non_null_key_vals)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/** Estimates the number of different key values in a given index, for
|
||||
each n-column prefix of the index where 1 <= n <= dict_index_get_n_unique(index).
|
||||
The estimates are stored in the array index->stat_n_diff_key_vals[] (indexed
|
||||
0..n_uniq-1) and the number of pages that were sampled is saved in
|
||||
|
@ -623,12 +639,11 @@ index->stat_n_sample_sizes[].
|
|||
If innodb_stats_method is nulls_ignored, we also record the number of
|
||||
non-null values for each prefix and stored the estimates in
|
||||
array index->stat_n_non_null_key_vals.
|
||||
@return true if the index is available and we get the estimated numbers,
|
||||
false if the index is unavailable. */
|
||||
bool
|
||||
btr_estimate_number_of_different_key_vals(
|
||||
/*======================================*/
|
||||
dict_index_t* index); /*!< in: index */
|
||||
@param[in] index index
|
||||
@return stat vector if the index is available and we get the estimated numbers,
|
||||
empty vector if the index is unavailable. */
|
||||
std::vector<index_field_stats_t>
|
||||
btr_estimate_number_of_different_key_vals(dict_index_t* index);
|
||||
|
||||
/** Gets the externally stored size of a record, in units of a database page.
|
||||
@param[in] rec record
|
||||
|
|
|
@ -206,8 +206,6 @@ struct btr_search_t{
|
|||
the machine word, i.e., they cannot be turned into bit-fields. */
|
||||
buf_block_t* root_guess;/*!< the root page frame when it was last time
|
||||
fetched, or NULL */
|
||||
ulint withdraw_clock; /*!< the withdraw clock value of the buffer
|
||||
pool when root_guess was stored */
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
ulint hash_analysis; /*!< when this exceeds
|
||||
BTR_SEARCH_HASH_ANALYSIS, the hash
|
||||
|
|
|
@ -1403,7 +1403,7 @@ UNIV_INLINE
|
|||
rw_lock_t*
|
||||
dict_index_get_lock(
|
||||
/*================*/
|
||||
dict_index_t* index) /*!< in: index */
|
||||
const dict_index_t* index) /*!< in: index */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
/********************************************************************//**
|
||||
Returns free space reserved for future updates of records. This is
|
||||
|
@ -1468,22 +1468,6 @@ void
|
|||
dict_mutex_exit_for_mysql(void);
|
||||
/*===========================*/
|
||||
|
||||
/** Lock the appropriate latch to protect a given table's statistics.
|
||||
@param[in] table table whose stats to lock
|
||||
@param[in] latch_mode RW_S_LATCH or RW_X_LATCH */
|
||||
void
|
||||
dict_table_stats_lock(
|
||||
dict_table_t* table,
|
||||
ulint latch_mode);
|
||||
|
||||
/** Unlock the latch that has been locked by dict_table_stats_lock().
|
||||
@param[in] table table whose stats to unlock
|
||||
@param[in] latch_mode RW_S_LATCH or RW_X_LATCH */
|
||||
void
|
||||
dict_table_stats_unlock(
|
||||
dict_table_t* table,
|
||||
ulint latch_mode);
|
||||
|
||||
/********************************************************************//**
|
||||
Checks if the database name in two table names is the same.
|
||||
@return TRUE if same db name */
|
||||
|
|
|
@ -1100,7 +1100,7 @@ UNIV_INLINE
|
|||
rw_lock_t*
|
||||
dict_index_get_lock(
|
||||
/*================*/
|
||||
dict_index_t* index) /*!< in: index */
|
||||
const dict_index_t* index) /*!< in: index */
|
||||
{
|
||||
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
|
||||
|
||||
|
|
|
@ -302,7 +302,6 @@ before proceeds. */
|
|||
@param n_v_cols number of virtual columns
|
||||
@param flags table flags
|
||||
@param flags2 table flags2
|
||||
@param init_stats_latch whether to init the stats latch
|
||||
@return own: table object */
|
||||
dict_table_t*
|
||||
dict_mem_table_create(
|
||||
|
@ -311,8 +310,7 @@ dict_mem_table_create(
|
|||
ulint n_cols,
|
||||
ulint n_v_cols,
|
||||
ulint flags,
|
||||
ulint flags2,
|
||||
bool init_stats_latch=true);
|
||||
ulint flags2);
|
||||
|
||||
/****************************************************************//**
|
||||
Free a table memory object. */
|
||||
|
@ -997,7 +995,7 @@ struct dict_index_t{
|
|||
when InnoDB was started up */
|
||||
zip_pad_info_t zip_pad;/*!< Information about state of
|
||||
compression failures and successes */
|
||||
rw_lock_t lock; /*!< read-write lock protecting the
|
||||
mutable rw_lock_t lock; /*!< read-write lock protecting the
|
||||
upper levels of the index tree */
|
||||
|
||||
/** Determine if the index has been committed to the
|
||||
|
@ -1862,23 +1860,8 @@ struct dict_table_t {
|
|||
/*!< set of foreign key constraints which refer to this table */
|
||||
dict_foreign_set referenced_set;
|
||||
|
||||
/** Statistics for query optimization. @{ */
|
||||
|
||||
/** Creation state of 'stats_latch'. */
|
||||
bool stats_latch_inited;
|
||||
|
||||
/** This latch protects:
|
||||
dict_table_t::stat_initialized,
|
||||
dict_table_t::stat_n_rows (*),
|
||||
dict_table_t::stat_clustered_index_size,
|
||||
dict_table_t::stat_sum_of_other_index_sizes,
|
||||
dict_table_t::stat_modified_counter (*),
|
||||
dict_table_t::indexes*::stat_n_diff_key_vals[],
|
||||
dict_table_t::indexes*::stat_index_size,
|
||||
dict_table_t::indexes*::stat_n_leaf_pages.
|
||||
(*) Those are not always protected for
|
||||
performance reasons. */
|
||||
rw_lock_t stats_latch;
|
||||
/** Statistics for query optimization. Mostly protected by
|
||||
dict_sys->mutex. @{ */
|
||||
|
||||
/** TRUE if statistics have been calculated the first time after
|
||||
database startup or table creation. */
|
||||
|
|
|
@ -75,7 +75,7 @@ dict_stats_is_persistent_enabled(const dict_table_t* table)
|
|||
+ dict_stats_update(DICT_STATS_RECALC_TRANSIENT) on a table that has
|
||||
just been PS-enabled.
|
||||
This is acceptable. Avoiding this would mean that we would have to
|
||||
protect the ::stat_persistent with dict_table_stats_lock() like the
|
||||
protect the ::stat_persistent with dict_sys->mutex like the
|
||||
other ::stat_ members which would be too big performance penalty,
|
||||
especially when this function is called from
|
||||
dict_stats_update_if_needed(). */
|
||||
|
@ -178,10 +178,7 @@ dict_stats_deinit(
|
|||
|
||||
ut_a(table->get_ref_count() == 0);
|
||||
|
||||
dict_table_stats_lock(table, RW_X_LATCH);
|
||||
|
||||
if (!table->stat_initialized) {
|
||||
dict_table_stats_unlock(table, RW_X_LATCH);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -221,6 +218,4 @@ dict_stats_deinit(
|
|||
sizeof(index->stat_n_leaf_pages));
|
||||
}
|
||||
#endif /* HAVE_valgrind */
|
||||
|
||||
dict_table_stats_unlock(table, RW_X_LATCH);
|
||||
}
|
||||
|
|
|
@ -125,7 +125,6 @@ extern mysql_pfs_key_t trx_i_s_cache_lock_key;
|
|||
extern mysql_pfs_key_t trx_purge_latch_key;
|
||||
extern mysql_pfs_key_t index_tree_rw_lock_key;
|
||||
extern mysql_pfs_key_t index_online_log_key;
|
||||
extern mysql_pfs_key_t dict_table_stats_key;
|
||||
extern mysql_pfs_key_t trx_sys_rw_lock_key;
|
||||
extern mysql_pfs_key_t hash_table_locks_key;
|
||||
#endif /* UNIV_PFS_RWLOCK */
|
||||
|
|
|
@ -1522,7 +1522,7 @@ error_exit:
|
|||
srv_stats.n_rows_inserted.inc(size_t(trx->id));
|
||||
}
|
||||
|
||||
/* Not protected by dict_table_stats_lock() for performance
|
||||
/* Not protected by dict_sys->mutex for performance
|
||||
reasons, we would rather get garbage in stat_n_rows (which is
|
||||
just an estimate anyway) than protecting the following code
|
||||
with a latch. */
|
||||
|
@ -1892,7 +1892,7 @@ row_update_for_mysql(row_prebuilt_t* prebuilt)
|
|||
ut_ad(is_delete == (node->is_delete == PLAIN_DELETE));
|
||||
|
||||
if (is_delete) {
|
||||
/* Not protected by dict_table_stats_lock() for performance
|
||||
/* Not protected by dict_sys->mutex for performance
|
||||
reasons, we would rather get garbage in stat_n_rows (which is
|
||||
just an estimate anyway) than protecting the following code
|
||||
with a latch. */
|
||||
|
@ -2244,7 +2244,7 @@ row_update_cascade_for_mysql(
|
|||
|
||||
if (node->is_delete == PLAIN_DELETE) {
|
||||
/* Not protected by
|
||||
dict_table_stats_lock() for
|
||||
dict_sys->mutex for
|
||||
performance reasons, we would rather
|
||||
get garbage in stat_n_rows (which is
|
||||
just an estimate anyway) than
|
||||
|
|
|
@ -613,7 +613,7 @@ row_undo_ins(
|
|||
}
|
||||
|
||||
if (err == DB_SUCCESS && node->table->stat_initialized) {
|
||||
/* Not protected by dict_table_stats_lock() for
|
||||
/* Not protected by dict_sys->mutex for
|
||||
performance reasons, we would rather get garbage
|
||||
in stat_n_rows (which is just an estimate anyway)
|
||||
than protecting the following code with a latch. */
|
||||
|
|
|
@ -1457,9 +1457,6 @@ sync_latch_meta_init()
|
|||
|
||||
LATCH_ADD_RWLOCK(INDEX_TREE, SYNC_INDEX_TREE, index_tree_rw_lock_key);
|
||||
|
||||
LATCH_ADD_RWLOCK(DICT_TABLE_STATS, SYNC_INDEX_TREE,
|
||||
dict_table_stats_key);
|
||||
|
||||
LATCH_ADD_RWLOCK(HASH_TABLE_RW_LOCK, SYNC_BUF_PAGE_HASH,
|
||||
hash_table_locks_key);
|
||||
|
||||
|
|
|
@ -101,7 +101,6 @@ mysql_pfs_key_t buf_block_debug_latch_key;
|
|||
# endif /* UNIV_DEBUG */
|
||||
mysql_pfs_key_t checkpoint_lock_key;
|
||||
mysql_pfs_key_t dict_operation_lock_key;
|
||||
mysql_pfs_key_t dict_table_stats_key;
|
||||
mysql_pfs_key_t hash_table_locks_key;
|
||||
mysql_pfs_key_t index_tree_rw_lock_key;
|
||||
mysql_pfs_key_t index_online_log_key;
|
||||
|
|
Loading…
Reference in a new issue