mariadb/storage/innobase/dict/dict0defrag_bg.cc

363 lines
9.7 KiB
C++
Raw Normal View History

/*****************************************************************************
Copyright (c) 2016, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
*****************************************************************************/
/**************************************************//**
@file dict/dict0defrag_bg.cc
Defragmentation routines.
Created 25/08/2016 Jan Lindström
*******************************************************/
#include "dict0dict.h"
#include "dict0stats.h"
#include "dict0stats_bg.h"
#include "dict0defrag_bg.h"
#include "row0mysql.h"
#include "srv0start.h"
#include "ut0new.h"
#include <vector>
static ib_mutex_t defrag_pool_mutex;
#ifdef MYSQL_PFS
static mysql_pfs_key_t defrag_pool_mutex_key;
#endif
/** Indices whose defrag stats need to be saved to persistent storage.*/
struct defrag_pool_item_t {
table_id_t table_id;
index_id_t index_id;
};
/** Allocator type, used by std::vector */
typedef ut_allocator<defrag_pool_item_t>
defrag_pool_allocator_t;
/** The multitude of tables to be defragmented- an STL vector */
typedef std::vector<defrag_pool_item_t, defrag_pool_allocator_t>
defrag_pool_t;
/** Iterator type for iterating over the elements of objects of type
defrag_pool_t. */
typedef defrag_pool_t::iterator defrag_pool_iterator_t;
/** Pool where we store information on which tables are to be processed
by background defragmentation. */
static defrag_pool_t* defrag_pool;
extern bool dict_stats_start_shutdown;
/*****************************************************************//**
Initialize the defrag pool, called once during thread initialization. */
void
dict_defrag_pool_init(void)
/*=======================*/
{
ut_ad(!srv_read_only_mode);
/* JAN: TODO: MySQL 5.7 PSI
const PSI_memory_key key2 = mem_key_dict_defrag_pool_t;
defrag_pool = UT_NEW(defrag_pool_t(defrag_pool_allocator_t(key2)), key2);
recalc_pool->reserve(RECALC_POOL_INITIAL_SLOTS);
*/
defrag_pool = new std::vector<defrag_pool_item_t, defrag_pool_allocator_t>();
/* We choose SYNC_STATS_DEFRAG to be below SYNC_FSP_PAGE. */
mutex_create(LATCH_ID_DEFRAGMENT_MUTEX, &defrag_pool_mutex);
}
/*****************************************************************//**
Free the resources occupied by the defrag pool, called once during
thread de-initialization. */
void
dict_defrag_pool_deinit(void)
/*=========================*/
{
ut_ad(!srv_read_only_mode);
defrag_pool->clear();
mutex_free(&defrag_pool_mutex);
UT_DELETE(defrag_pool);
}
/*****************************************************************//**
Get an index from the auto defrag pool. The returned index id is removed
from the pool.
@return true if the pool was non-empty and "id" was set, false otherwise */
static
bool
dict_stats_defrag_pool_get(
/*=======================*/
table_id_t* table_id, /*!< out: table id, or unmodified if
list is empty */
index_id_t* index_id) /*!< out: index id, or unmodified if
list is empty */
{
ut_ad(!srv_read_only_mode);
mutex_enter(&defrag_pool_mutex);
if (defrag_pool->empty()) {
mutex_exit(&defrag_pool_mutex);
return(false);
}
defrag_pool_item_t& item = defrag_pool->back();
*table_id = item.table_id;
*index_id = item.index_id;
defrag_pool->pop_back();
mutex_exit(&defrag_pool_mutex);
return(true);
}
/*****************************************************************//**
Add an index in a table to the defrag pool, which is processed by the
background stats gathering thread. Only the table id and index id are
added to the list, so the table can be closed after being enqueued and
it will be opened when needed. If the table or index does not exist later
(has been DROPped), then it will be removed from the pool and skipped. */
void
dict_stats_defrag_pool_add(
/*=======================*/
const dict_index_t* index) /*!< in: table to add */
{
defrag_pool_item_t item;
ut_ad(!srv_read_only_mode);
mutex_enter(&defrag_pool_mutex);
/* quit if already in the list */
for (defrag_pool_iterator_t iter = defrag_pool->begin();
iter != defrag_pool->end();
++iter) {
if ((*iter).table_id == index->table->id
&& (*iter).index_id == index->id) {
mutex_exit(&defrag_pool_mutex);
return;
}
}
item.table_id = index->table->id;
item.index_id = index->id;
defrag_pool->push_back(item);
mutex_exit(&defrag_pool_mutex);
os_event_set(dict_stats_event);
}
/*****************************************************************//**
Delete a given index from the auto defrag pool. */
void
dict_stats_defrag_pool_del(
/*=======================*/
const dict_table_t* table, /*!<in: if given, remove
all entries for the table */
const dict_index_t* index) /*!< in: if given, remove this index */
{
ut_a((table && !index) || (!table && index));
ut_ad(!srv_read_only_mode);
ut_ad(mutex_own(&dict_sys->mutex));
mutex_enter(&defrag_pool_mutex);
defrag_pool_iterator_t iter = defrag_pool->begin();
while (iter != defrag_pool->end()) {
if ((table && (*iter).table_id == table->id)
|| (index
&& (*iter).table_id == index->table->id
&& (*iter).index_id == index->id)) {
/* erase() invalidates the iterator */
iter = defrag_pool->erase(iter);
if (index)
break;
} else {
iter++;
}
}
mutex_exit(&defrag_pool_mutex);
}
/*****************************************************************//**
Get the first index that has been added for updating persistent defrag
stats and eventually save its stats. */
static
void
dict_stats_process_entry_from_defrag_pool()
{
table_id_t table_id;
index_id_t index_id;
ut_ad(!srv_read_only_mode);
/* pop the first index from the auto defrag pool */
if (!dict_stats_defrag_pool_get(&table_id, &index_id)) {
/* no index in defrag pool */
return;
}
dict_table_t* table;
mutex_enter(&dict_sys->mutex);
/* If the table is no longer cached, we've already lost the in
memory stats so there's nothing really to write to disk. */
table = dict_table_open_on_id(table_id, TRUE,
DICT_TABLE_OP_OPEN_ONLY_IF_CACHED);
2017-12-05 13:23:57 +01:00
dict_index_t* index = table && !table->corrupted
? dict_table_find_index_on_id(table, index_id)
: NULL;
2017-12-05 13:23:57 +01:00
if (!index || dict_index_is_corrupted(index)) {
if (table) {
dict_table_close(table, TRUE, FALSE);
}
mutex_exit(&dict_sys->mutex);
return;
}
mutex_exit(&dict_sys->mutex);
dict_stats_save_defrag_stats(index);
dict_table_close(table, FALSE, FALSE);
}
/*****************************************************************//**
Get the first index that has been added for updating persistent defrag
stats and eventually save its stats. */
void
dict_defrag_process_entries_from_defrag_pool()
/*==========================================*/
{
while (defrag_pool->size() && !dict_stats_start_shutdown) {
dict_stats_process_entry_from_defrag_pool();
}
}
/*********************************************************************//**
Save defragmentation result.
@return DB_SUCCESS or error code */
dberr_t
dict_stats_save_defrag_summary(
/*============================*/
dict_index_t* index) /*!< in: index */
{
dberr_t ret=DB_SUCCESS;
lint now = (lint) ut_time();
if (dict_index_is_ibuf(index)) {
return DB_SUCCESS;
}
rw_lock_x_lock(dict_operation_lock);
mutex_enter(&dict_sys->mutex);
ret = dict_stats_save_index_stat(index, now, "n_pages_freed",
index->stat_defrag_n_pages_freed,
NULL,
"Number of pages freed during"
" last defragmentation run.",
NULL);
mutex_exit(&dict_sys->mutex);
rw_lock_x_unlock(dict_operation_lock);
return (ret);
}
/*********************************************************************//**
Save defragmentation stats for a given index.
@return DB_SUCCESS or error code */
dberr_t
dict_stats_save_defrag_stats(
/*============================*/
dict_index_t* index) /*!< in: index */
{
dberr_t ret;
if (dict_index_is_ibuf(index)) {
return DB_SUCCESS;
}
Merge 10.1 into 10.2 This only merges MDEV-12253, adapting it to MDEV-12602 which is already present in 10.2 but not yet in the 10.1 revision that is being merged. TODO: Error handling in crash recovery needs to be improved. If a page cannot be decrypted (or read), we should cleanly abort the startup. If innodb_force_recovery is specified, we should ignore the problematic page and apply redo log to other pages. Currently, the test encryption.innodb-redo-badkey randomly fails like this (the last messages are from cmake -DWITH_ASAN): 2017-05-05 10:19:40 140037071685504 [Note] InnoDB: Starting crash recovery from checkpoint LSN=1635994 2017-05-05 10:19:40 140037071685504 [ERROR] InnoDB: Missing MLOG_FILE_NAME or MLOG_FILE_DELETE before MLOG_CHECKPOINT for tablespace 1 2017-05-05 10:19:40 140037071685504 [ERROR] InnoDB: Plugin initialization aborted at srv0start.cc[2201] with error Data structure corruption 2017-05-05 10:19:41 140037071685504 [Note] InnoDB: Starting shutdown... i================================================================= ==5226==ERROR: AddressSanitizer: attempting free on address which was not malloc()-ed: 0x612000018588 in thread T0 #0 0x736750 in operator delete(void*) (/mariadb/server/build/sql/mysqld+0x736750) #1 0x1e4833f in LatchCounter::~LatchCounter() /mariadb/server/storage/innobase/include/sync0types.h:599:4 #2 0x1e480b8 in LatchMeta<LatchCounter>::~LatchMeta() /mariadb/server/storage/innobase/include/sync0types.h:786:17 #3 0x1e35509 in sync_latch_meta_destroy() /mariadb/server/storage/innobase/sync/sync0debug.cc:1622:3 #4 0x1e35314 in sync_check_close() /mariadb/server/storage/innobase/sync/sync0debug.cc:1839:2 #5 0x1dfdc18 in innodb_shutdown() /mariadb/server/storage/innobase/srv/srv0start.cc:2888:2 #6 0x197e5e6 in innobase_init(void*) /mariadb/server/storage/innobase/handler/ha_innodb.cc:4475:3
2017-05-05 09:25:29 +02:00
if (!index->is_readable()) {
return dict_stats_report_error(index->table, true);
}
lint now = (lint) ut_time();
mtr_t mtr;
ulint n_leaf_pages;
ulint n_leaf_reserved;
mtr_start(&mtr);
mtr_s_lock(dict_index_get_lock(index), &mtr);
n_leaf_reserved = btr_get_size_and_reserved(index, BTR_N_LEAF_PAGES,
&n_leaf_pages, &mtr);
mtr_commit(&mtr);
if (n_leaf_reserved == ULINT_UNDEFINED) {
// The index name is different during fast index creation,
// so the stats won't be associated with the right index
// for later use. We just return without saving.
return DB_SUCCESS;
}
rw_lock_x_lock(dict_operation_lock);
mutex_enter(&dict_sys->mutex);
ret = dict_stats_save_index_stat(index, now, "n_page_split",
index->stat_defrag_n_page_split,
NULL,
"Number of new page splits on leaves"
" since last defragmentation.",
NULL);
if (ret != DB_SUCCESS) {
goto end;
}
ret = dict_stats_save_index_stat(
index, now, "n_leaf_pages_defrag",
n_leaf_pages,
NULL,
"Number of leaf pages when this stat is saved to disk",
NULL);
if (ret != DB_SUCCESS) {
goto end;
MDEV-14511 Use fewer transactions for updating InnoDB persistent statistics dict_stats_exec_sql(): Expect the caller to always provide a transaction. Remove some redundant assertions. The caller must hold dict_sys->mutex, but holding dict_operation_lock is only necessary for accessing data dictionary tables, which we are not accessing. dict_stats_save_index_stat(): Acquire dict_sys->mutex for invoking dict_stats_exec_sql(). dict_stats_save(), dict_stats_update_for_index(), dict_stats_update(), dict_stats_drop_index(), dict_stats_delete_from_table_stats(), dict_stats_delete_from_index_stats(), dict_stats_drop_table(), dict_stats_rename_in_table_stats(), dict_stats_rename_in_index_stats(), dict_stats_rename_table(): Use a single caller-provided transaction that is started and committed or rolled back by the caller. dict_stats_process_entry_from_recalc_pool(): Let the caller provide a transaction object. ha_innobase::open(): Pass a transaction to dict_stats_init(). ha_innobase::create(), ha_innobase::discard_or_import_tablespace(): Pass a transaction to dict_stats_update(). ha_innobase::rename_table(): Pass a transaction to dict_stats_rename_table(). We do not use the same transaction as the one that updated the data dictionary tables, because we already released the dict_operation_lock. (FIXME: there is a race condition; a lock wait on SYS_* tables could occur in another DDL transaction until the data dictionary transaction is committed.) ha_innobase::info_low(): Pass a transaction to dict_stats_update() when calculating persistent statistics. alter_stats_norebuild(), alter_stats_rebuild(): Update the persistent statistics as well. In this way, a single transaction will be used for updating the statistics of a whole table, even for partitioned tables. ha_innobase::commit_inplace_alter_table(): Drop statistics for all partitions when adding or dropping virtual columns, so that the statistics will be recalculated on the next handler::open(). This is a refactored version of Oracle Bug#22469660 fix. RecLock::add_to_waitq(), lock_table_enqueue_waiting(): Do not allow a lock wait to occur for updating statistics in a data dictionary transaction, such as DROP TABLE. Instead, return the previously unused error code DB_QUE_THR_SUSPENDED. row_merge_lock_table(), row_mysql_lock_table(): Remove dead code for handling DB_QUE_THR_SUSPENDED. row_drop_table_for_mysql(), row_truncate_table_for_mysql(): Drop the statistics as part of the data dictionary transaction. After TRUNCATE TABLE, the statistics will be recalculated on subsequent ha_innobase::open(), similar to how the logic after the above-mentioned Oracle Bug#22469660 fix in ha_innobase::commit_inplace_alter_table() works. btr_defragment_thread(): Use a single transaction object for updating defragmentation statistics. dict_stats_save_defrag_stats(), dict_stats_save_defrag_stats(), dict_stats_process_entry_from_defrag_pool(), dict_defrag_process_entries_from_defrag_pool(), dict_stats_save_defrag_summary(), dict_stats_save_defrag_stats(): Add a parameter for the transaction. dict_stats_empty_table(): Make public. This will be called by row_truncate_table_for_mysql() after dropping persistent statistics, to clear the memory-based statistics as well.
2017-12-01 15:51:24 +01:00
}
ret = dict_stats_save_index_stat(
index, now, "n_leaf_pages_reserved",
n_leaf_reserved,
NULL,
"Number of pages reserved for this index leaves when this stat "
"is saved to disk",
NULL);
end:
mutex_exit(&dict_sys->mutex);
rw_lock_x_unlock(dict_operation_lock);
return (ret);
}