2012-08-01 17:27:34 +03:00
|
|
|
/*****************************************************************************
|
|
|
|
|
2016-09-06 09:43:16 +03:00
|
|
|
Copyright (c) 2010, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
2013-03-26 00:03:13 +02:00
|
|
|
Copyright (c) 2012, Facebook Inc.
|
2021-01-07 11:18:13 +02:00
|
|
|
Copyright (c) 2013, 2021, MariaDB Corporation.
|
2012-08-01 17:27:34 +03:00
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify it under
|
|
|
|
the terms of the GNU General Public License as published by the Free Software
|
|
|
|
Foundation; version 2 of the License.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
|
|
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License along with
|
|
|
|
this program; if not, write to the Free Software Foundation, Inc.,
|
2019-05-11 19:25:02 +03:00
|
|
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
2012-08-01 17:27:34 +03:00
|
|
|
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
/**************************************************//**
|
|
|
|
@file srv/srv0mon.cc
|
|
|
|
Database monitor counter interfaces
|
|
|
|
|
|
|
|
Created 12/9/2009 Jimmy Yang
|
|
|
|
*******************************************************/
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
#include "buf0buf.h"
|
|
|
|
#include "dict0mem.h"
|
|
|
|
#include "ibuf0ibuf.h"
|
|
|
|
#include "lock0lock.h"
|
2012-08-01 17:27:34 +03:00
|
|
|
#include "mach0data.h"
|
2016-08-12 11:17:45 +03:00
|
|
|
#include "os0file.h"
|
2012-08-01 17:27:34 +03:00
|
|
|
#include "srv0mon.h"
|
|
|
|
#include "srv0srv.h"
|
|
|
|
#include "trx0rseg.h"
|
2016-08-12 11:17:45 +03:00
|
|
|
#include "trx0sys.h"
|
2012-08-01 17:27:34 +03:00
|
|
|
|
|
|
|
/* Macro to standardize the counter names for counters in the
|
|
|
|
"monitor_buf_page" module as they have very structured defines */
|
|
|
|
#define MONITOR_BUF_PAGE(name, description, code, op, op_code) \
|
2014-06-09 18:16:00 +02:00
|
|
|
{"buffer_page_" op "_" name, "buffer_page_io", \
|
|
|
|
"Number of " description " Pages " op, \
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_GROUP_MODULE, MONITOR_DEFAULT_START, \
|
|
|
|
MONITOR_##code##_##op_code}
|
|
|
|
|
|
|
|
#define MONITOR_BUF_PAGE_READ(name, description, code) \
|
|
|
|
MONITOR_BUF_PAGE(name, description, code, "read", PAGE_READ)
|
|
|
|
|
|
|
|
#define MONITOR_BUF_PAGE_WRITTEN(name, description, code) \
|
|
|
|
MONITOR_BUF_PAGE(name, description, code, "written", PAGE_WRITTEN)
|
|
|
|
|
|
|
|
/** This array defines basic static information of monitor counters,
|
|
|
|
including each monitor's name, module it belongs to, a short
|
|
|
|
description and its property/type and corresponding monitor_id.
|
|
|
|
Please note: If you add a monitor here, please add its corresponding
|
|
|
|
monitor_id to "enum monitor_id_value" structure in srv0mon.h file. */
|
|
|
|
|
|
|
|
static monitor_info_t innodb_counter_info[] =
|
|
|
|
{
|
|
|
|
/* A dummy item to mark the module start, this is
|
|
|
|
to accomodate the default value (0) set for the
|
|
|
|
global variables with the control system. */
|
|
|
|
{"module_start", "module_start", "module_start",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_DEFAULT_START},
|
|
|
|
|
|
|
|
/* ========== Counters for Server Metadata ========== */
|
|
|
|
{"module_metadata", "metadata", "Server Metadata",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_METADATA},
|
|
|
|
|
|
|
|
{"metadata_table_handles_opened", "metadata",
|
|
|
|
"Number of table handles opened",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_TABLE_OPEN},
|
|
|
|
|
|
|
|
/* ========== Counters for Lock Module ========== */
|
|
|
|
{"module_lock", "lock", "Lock Module",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_LOCK},
|
|
|
|
|
|
|
|
{"lock_deadlocks", "lock", "Number of deadlocks",
|
2021-02-04 16:38:07 +02:00
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON | MONITOR_DISPLAY_CURRENT),
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_DEFAULT_START, MONITOR_DEADLOCK},
|
|
|
|
|
|
|
|
{"lock_timeouts", "lock", "Number of lock timeouts",
|
2021-02-26 14:52:51 +02:00
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON | MONITOR_DISPLAY_CURRENT),
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_DEFAULT_START, MONITOR_TIMEOUT},
|
|
|
|
|
|
|
|
{"lock_rec_lock_waits", "lock",
|
|
|
|
"Number of times enqueued into record lock wait queue",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_LOCKREC_WAIT},
|
|
|
|
|
|
|
|
{"lock_table_lock_waits", "lock",
|
|
|
|
"Number of times enqueued into table lock wait queue",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_TABLELOCK_WAIT},
|
|
|
|
|
|
|
|
{"lock_rec_lock_requests", "lock",
|
|
|
|
"Number of record locks requested",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_NUM_RECLOCK_REQ},
|
|
|
|
|
|
|
|
{"lock_rec_lock_created", "lock", "Number of record locks created",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_RECLOCK_CREATED},
|
|
|
|
|
|
|
|
{"lock_rec_lock_removed", "lock",
|
|
|
|
"Number of record locks removed from the lock queue",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_RECLOCK_REMOVED},
|
|
|
|
|
|
|
|
{"lock_rec_locks", "lock",
|
|
|
|
"Current number of record locks on tables",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_NUM_RECLOCK},
|
|
|
|
|
|
|
|
{"lock_table_lock_created", "lock", "Number of table locks created",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_TABLELOCK_CREATED},
|
|
|
|
|
|
|
|
{"lock_table_lock_removed", "lock",
|
|
|
|
"Number of table locks removed from the lock queue",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_TABLELOCK_REMOVED},
|
|
|
|
|
|
|
|
{"lock_table_locks", "lock",
|
|
|
|
"Current number of table locks on tables",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_NUM_TABLELOCK},
|
|
|
|
|
|
|
|
{"lock_row_lock_current_waits", "lock",
|
|
|
|
"Number of row locks currently being waited for"
|
|
|
|
" (innodb_row_lock_current_waits)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_ROW_LOCK_CURRENT_WAIT},
|
|
|
|
|
|
|
|
{"lock_row_lock_time", "lock",
|
|
|
|
"Time spent in acquiring row locks, in milliseconds"
|
|
|
|
" (innodb_row_lock_time)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_LOCK_WAIT_TIME},
|
|
|
|
|
|
|
|
{"lock_row_lock_time_max", "lock",
|
|
|
|
"The maximum time to acquire a row lock, in milliseconds"
|
|
|
|
" (innodb_row_lock_time_max)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_LOCK_MAX_WAIT_TIME},
|
|
|
|
|
|
|
|
{"lock_row_lock_waits", "lock",
|
|
|
|
"Number of times a row lock had to be waited for"
|
|
|
|
" (innodb_row_lock_waits)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_ROW_LOCK_WAIT},
|
|
|
|
|
|
|
|
{"lock_row_lock_time_avg", "lock",
|
|
|
|
"The average time to acquire a row lock, in milliseconds"
|
|
|
|
" (innodb_row_lock_time_avg)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_LOCK_AVG_WAIT_TIME},
|
|
|
|
|
|
|
|
/* ========== Counters for Buffer Manager and I/O ========== */
|
|
|
|
{"module_buffer", "buffer", "Buffer Manager Module",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_BUFFER},
|
|
|
|
|
|
|
|
{"buffer_pool_size", "server",
|
|
|
|
"Server buffer pool size (all buffer pools) in bytes",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON | MONITOR_DISPLAY_CURRENT),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BUFFER_POOL_SIZE},
|
|
|
|
|
|
|
|
{"buffer_pool_reads", "buffer",
|
|
|
|
"Number of reads directly from disk (innodb_buffer_pool_reads)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_READS},
|
|
|
|
|
|
|
|
{"buffer_pool_read_requests", "buffer",
|
|
|
|
"Number of logical read requests (innodb_buffer_pool_read_requests)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_READ_REQUESTS},
|
|
|
|
|
|
|
|
{"buffer_pool_write_requests", "buffer",
|
|
|
|
"Number of write requests (innodb_buffer_pool_write_requests)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_WRITE_REQUEST},
|
|
|
|
|
|
|
|
{"buffer_pool_wait_free", "buffer",
|
|
|
|
"Number of times waited for free buffer"
|
|
|
|
" (innodb_buffer_pool_wait_free)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_WAIT_FREE},
|
|
|
|
|
|
|
|
{"buffer_pool_read_ahead", "buffer",
|
|
|
|
"Number of pages read as read ahead (innodb_buffer_pool_read_ahead)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_READ_AHEAD},
|
|
|
|
|
|
|
|
{"buffer_pool_read_ahead_evicted", "buffer",
|
|
|
|
"Read-ahead pages evicted without being accessed"
|
|
|
|
" (innodb_buffer_pool_read_ahead_evicted)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_READ_AHEAD_EVICTED},
|
|
|
|
|
|
|
|
{"buffer_pool_pages_total", "buffer",
|
|
|
|
"Total buffer pool size in pages (innodb_buffer_pool_pages_total)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_PAGE_TOTAL},
|
|
|
|
|
|
|
|
{"buffer_pool_pages_misc", "buffer",
|
|
|
|
"Buffer pages for misc use such as row locks or the adaptive"
|
|
|
|
" hash index (innodb_buffer_pool_pages_misc)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_PAGE_MISC},
|
|
|
|
|
|
|
|
{"buffer_pool_pages_data", "buffer",
|
|
|
|
"Buffer pages containing data (innodb_buffer_pool_pages_data)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_PAGES_DATA},
|
|
|
|
|
2013-03-26 00:03:13 +02:00
|
|
|
{"buffer_pool_bytes_data", "buffer",
|
|
|
|
"Buffer bytes containing data (innodb_buffer_pool_bytes_data)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_BYTES_DATA},
|
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
{"buffer_pool_pages_dirty", "buffer",
|
|
|
|
"Buffer pages currently dirty (innodb_buffer_pool_pages_dirty)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_PAGES_DIRTY},
|
|
|
|
|
2013-03-26 00:03:13 +02:00
|
|
|
{"buffer_pool_bytes_dirty", "buffer",
|
2016-08-12 11:17:45 +03:00
|
|
|
"Buffer bytes currently dirty (innodb_buffer_pool_bytes_dirty)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_BYTES_DIRTY},
|
2013-03-26 00:03:13 +02:00
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
{"buffer_pool_pages_free", "buffer",
|
|
|
|
"Buffer pages currently free (innodb_buffer_pool_pages_free)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_PAGES_FREE},
|
|
|
|
|
|
|
|
{"buffer_pages_created", "buffer",
|
|
|
|
"Number of pages created (innodb_pages_created)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_PAGE_CREATED},
|
|
|
|
|
|
|
|
{"buffer_pages_written", "buffer",
|
|
|
|
"Number of pages written (innodb_pages_written)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_PAGES_WRITTEN},
|
|
|
|
|
2013-12-19 14:36:38 +02:00
|
|
|
{"buffer_index_pages_written", "buffer",
|
|
|
|
"Number of index pages written (innodb_index_pages_written)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_INDEX_PAGES_WRITTEN},
|
|
|
|
|
2014-03-12 14:47:38 +02:00
|
|
|
{"buffer_non_index_pages_written", "buffer",
|
|
|
|
"Number of non index pages written (innodb_non_index_pages_written)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_NON_INDEX_PAGES_WRITTEN},
|
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
{"buffer_pages_read", "buffer",
|
|
|
|
"Number of pages read (innodb_pages_read)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_PAGES_READ},
|
|
|
|
|
2014-11-03 11:18:52 +02:00
|
|
|
{"buffer_index_sec_rec_cluster_reads", "buffer",
|
|
|
|
"Number of secondary record reads triggered cluster read",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_INDEX_SEC_REC_CLUSTER_READS},
|
|
|
|
|
|
|
|
{"buffer_index_sec_rec_cluster_reads_avoided", "buffer",
|
|
|
|
"Number of secondary record reads avoided triggering cluster read",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_INDEX_SEC_REC_CLUSTER_READS_AVOIDED},
|
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
{"buffer_data_reads", "buffer",
|
|
|
|
"Amount of data read in bytes (innodb_data_reads)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BYTE_READ},
|
|
|
|
|
|
|
|
{"buffer_data_written", "buffer",
|
|
|
|
"Amount of data written in bytes (innodb_data_written)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BYTE_WRITTEN},
|
|
|
|
|
|
|
|
/* Cumulative counter for scanning in flush batches */
|
|
|
|
{"buffer_flush_batch_scanned", "buffer",
|
|
|
|
"Total pages scanned as part of flush batch",
|
|
|
|
MONITOR_SET_OWNER,
|
|
|
|
MONITOR_FLUSH_BATCH_SCANNED_NUM_CALL,
|
|
|
|
MONITOR_FLUSH_BATCH_SCANNED},
|
|
|
|
|
|
|
|
{"buffer_flush_batch_num_scan", "buffer",
|
|
|
|
"Number of times buffer flush list flush is called",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_FLUSH_BATCH_SCANNED,
|
|
|
|
MONITOR_FLUSH_BATCH_SCANNED_NUM_CALL},
|
|
|
|
|
|
|
|
{"buffer_flush_batch_scanned_per_call", "buffer",
|
|
|
|
"Pages scanned per flush batch scan",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_FLUSH_BATCH_SCANNED,
|
|
|
|
MONITOR_FLUSH_BATCH_SCANNED_PER_CALL},
|
|
|
|
|
|
|
|
/* Cumulative counter for pages flushed in flush batches */
|
|
|
|
{"buffer_flush_batch_total_pages", "buffer",
|
|
|
|
"Total pages flushed as part of flush batch",
|
|
|
|
MONITOR_SET_OWNER, MONITOR_FLUSH_BATCH_COUNT,
|
|
|
|
MONITOR_FLUSH_BATCH_TOTAL_PAGE},
|
|
|
|
|
|
|
|
{"buffer_flush_batches", "buffer",
|
|
|
|
"Number of flush batches",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_FLUSH_BATCH_TOTAL_PAGE,
|
|
|
|
MONITOR_FLUSH_BATCH_COUNT},
|
|
|
|
|
|
|
|
{"buffer_flush_batch_pages", "buffer",
|
|
|
|
"Pages queued as a flush batch",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_FLUSH_BATCH_TOTAL_PAGE,
|
|
|
|
MONITOR_FLUSH_BATCH_PAGES},
|
|
|
|
|
|
|
|
/* Cumulative counter for flush batches because of neighbor */
|
|
|
|
{"buffer_flush_neighbor_total_pages", "buffer",
|
|
|
|
"Total neighbors flushed as part of neighbor flush",
|
|
|
|
MONITOR_SET_OWNER, MONITOR_FLUSH_NEIGHBOR_COUNT,
|
|
|
|
MONITOR_FLUSH_NEIGHBOR_TOTAL_PAGE},
|
|
|
|
|
|
|
|
{"buffer_flush_neighbor", "buffer",
|
|
|
|
"Number of times neighbors flushing is invoked",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_FLUSH_NEIGHBOR_TOTAL_PAGE,
|
|
|
|
MONITOR_FLUSH_NEIGHBOR_COUNT},
|
|
|
|
|
|
|
|
{"buffer_flush_neighbor_pages", "buffer",
|
|
|
|
"Pages queued as a neighbor batch",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_FLUSH_NEIGHBOR_TOTAL_PAGE,
|
|
|
|
MONITOR_FLUSH_NEIGHBOR_PAGES},
|
|
|
|
|
2013-03-26 00:03:13 +02:00
|
|
|
{"buffer_flush_n_to_flush_requested", "buffer",
|
|
|
|
"Number of pages requested for flushing.",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_FLUSH_N_TO_FLUSH_REQUESTED},
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
{"buffer_flush_n_to_flush_by_age", "buffer",
|
|
|
|
"Number of pages target by LSN Age for flushing.",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_FLUSH_N_TO_FLUSH_BY_AGE},
|
|
|
|
|
MDEV-23855: Improve InnoDB log checkpoint performance
After MDEV-15053, MDEV-22871, MDEV-23399 shifted the scalability
bottleneck, log checkpoints became a new bottleneck.
If innodb_io_capacity is set low or innodb_max_dirty_pct_lwm is
set high and the workload fits in the buffer pool, the page cleaner
thread will perform very little flushing. When we reach the capacity
of the circular redo log file ib_logfile0 and must initiate a checkpoint,
some 'furious flushing' will be necessary. (If innodb_flush_sync=OFF,
then flushing would continue at the innodb_io_capacity rate, and
writers would be throttled.)
We have the best chance of advancing the checkpoint LSN immediately
after a page flush batch has been completed. Hence, it is best to
perform checkpoints after every batch in the page cleaner thread,
attempting to run once per second.
By initiating high-priority flushing in the page cleaner as early
as possible, we aim to make the throughput more stable.
The function buf_flush_wait_flushed() used to sleep for 10ms, hoping
that the page cleaner thread would do something during that time.
The observed end result was that a large number of threads that call
log_free_check() would end up sleeping while nothing useful is happening.
We will revise the design so that in the default innodb_flush_sync=ON
mode, buf_flush_wait_flushed() will wake up the page cleaner thread
to perform the necessary flushing, and it will wait for a signal from
the page cleaner thread.
If innodb_io_capacity is set to a low value (causing the page cleaner to
throttle its work), a write workload would initially perform well, until
the capacity of the circular ib_logfile0 is reached and log_free_check()
will trigger checkpoints. At that point, the extra waiting in
buf_flush_wait_flushed() will start reducing throughput.
The page cleaner thread will also initiate log checkpoints after each
buf_flush_lists() call, because that is the best point of time for
the checkpoint LSN to advance by the maximum amount.
Even in 'furious flushing' mode we invoke buf_flush_lists() with
innodb_io_capacity_max pages at a time, and at the start of each
batch (in the log_flush() callback function that runs in a separate
task) we will invoke os_aio_wait_until_no_pending_writes(). This
tweak allows the checkpoint to advance in smaller steps and
significantly reduces the maximum latency. On an Intel Optane 960
NVMe SSD on Linux, it reduced from 4.6 seconds to 74 milliseconds.
On Microsoft Windows with a slower SSD, it reduced from more than
180 seconds to 0.6 seconds.
We will make innodb_adaptive_flushing=OFF simply flush innodb_io_capacity
per second whenever the dirty proportion of buffer pool pages exceeds
innodb_max_dirty_pages_pct_lwm. For innodb_adaptive_flushing=ON we try
to make page_cleaner_flush_pages_recommendation() more consistent and
predictable: if we are below innodb_adaptive_flushing_lwm, let us flush
pages according to the return value of af_get_pct_for_dirty().
innodb_max_dirty_pages_pct_lwm: Revert the change of the default value
that was made in MDEV-23399. The value innodb_max_dirty_pages_pct_lwm=0
guarantees that a shutdown of an idle server will be fast. Users might
be surprised if normal shutdown suddenly became slower when upgrading
within a GA release series.
innodb_checkpoint_usec: Remove. The master task will no longer perform
periodic log checkpoints. It is the duty of the page cleaner thread.
log_sys.max_modified_age: Remove. The current span of the
buf_pool.flush_list expressed in LSN only matters for adaptive
flushing (outside the 'furious flushing' condition).
For the correctness of checkpoints, the only thing that matters is
the checkpoint age (log_sys.lsn - log_sys.last_checkpoint_lsn).
This run-time constant was also reported as log_max_modified_age_sync.
log_sys.max_checkpoint_age_async: Remove. This does not serve any
purpose, because the checkpoints will now be triggered by the page
cleaner thread. We will retain the log_sys.max_checkpoint_age limit
for engaging 'furious flushing'.
page_cleaner.slot: Remove. It turns out that
page_cleaner_slot.flush_list_time was duplicating
page_cleaner.slot.flush_time and page_cleaner.slot.flush_list_pass
was duplicating page_cleaner.flush_pass.
Likewise, there were some redundant monitor counters, because the
page cleaner thread no longer performs any buf_pool.LRU flushing, and
because there only is one buf_flush_page_cleaner thread.
buf_flush_sync_lsn: Protect writes by buf_pool.flush_list_mutex.
buf_pool_t::get_oldest_modification(): Add a parameter to specify the
return value when no persistent data pages are dirty. Require the
caller to hold buf_pool.flush_list_mutex.
log_buf_pool_get_oldest_modification(): Take the fall-back LSN
as a parameter. All callers will also invoke log_sys.get_lsn().
log_preflush_pool_modified_pages(): Replaced with buf_flush_wait_flushed().
buf_flush_wait_flushed(): Implement two limits. If not enough buffer pool
has been flushed, signal the page cleaner (unless innodb_flush_sync=OFF)
and wait for the page cleaner to complete. If the page cleaner
thread is not running (which can be the case durign shutdown),
initiate the flush and wait for it directly.
buf_flush_ahead(): If innodb_flush_sync=ON (the default),
submit a new buf_flush_sync_lsn target for the page cleaner
but do not wait for the flushing to finish.
log_get_capacity(), log_get_max_modified_age_async(): Remove, to make
it easier to see that af_get_pct_for_lsn() is not acquiring any mutexes.
page_cleaner_flush_pages_recommendation(): Protect all access to
buf_pool.flush_list with buf_pool.flush_list_mutex. Previously there
were some race conditions in the calculation.
buf_flush_sync_for_checkpoint(): New function to process
buf_flush_sync_lsn in the page cleaner thread. At the end of
each batch, we try to wake up any blocked buf_flush_wait_flushed().
If everything up to buf_flush_sync_lsn has been flushed, we will
reset buf_flush_sync_lsn=0. The page cleaner thread will keep
'furious flushing' until the limit is reached. Any threads that
are waiting in buf_flush_wait_flushed() will be able to resume
as soon as their own limit has been satisfied.
buf_flush_page_cleaner: Prioritize buf_flush_sync_lsn and do not
sleep as long as it is set. Do not update any page_cleaner statistics
for this special mode of operation. In the normal mode
(buf_flush_sync_lsn is not set for innodb_flush_sync=ON),
try to wake up once per second. No longer check whether
srv_inc_activity_count() has been called. After each batch,
try to perform a log checkpoint, because the best chances for
the checkpoint LSN to advance by the maximum amount are upon
completing a flushing batch.
log_t: Move buf_free, max_buf_free possibly to the same cache line
with log_sys.mutex.
log_margin_checkpoint_age(): Simplify the logic, and replace
a 0.1-second sleep with a call to buf_flush_wait_flushed() to
initiate flushing. Moved to the same compilation unit
with the only caller.
log_close(): Clean up the calculations. (Should be no functional
change.) Return whether flush-ahead is needed. Moved to the same
compilation unit with the only caller.
mtr_t::finish_write(): Return whether flush-ahead is needed.
mtr_t::commit(): Invoke buf_flush_ahead() when needed. Let us avoid
external calls in mtr_t::commit() and make the logic easier to follow
by having related code in a single compilation unit. Also, we will
invoke srv_stats.log_write_requests.inc() only once per
mini-transaction commit, while not holding mutexes.
log_checkpoint_margin(): Only care about log_sys.max_checkpoint_age.
Upon reaching log_sys.max_checkpoint_age where we must wait to prevent
the log from getting corrupted, let us wait for at most 1MiB of LSN
at a time, before rechecking the condition. This should allow writers
to proceed even if the redo log capacity has been reached and
'furious flushing' is in progress. We no longer care about
log_sys.max_modified_age_sync or log_sys.max_modified_age_async.
The log_sys.max_modified_age_sync could be a relic from the time when
there was a srv_master_thread that wrote dirty pages to data files.
Also, we no longer have any log_sys.max_checkpoint_age_async limit,
because log checkpoints will now be triggered by the page cleaner
thread upon completing buf_flush_lists().
log_set_capacity(): Simplify the calculations of the limit
(no functional change).
log_checkpoint_low(): Split from log_checkpoint(). Moved to the
same compilation unit with the caller.
log_make_checkpoint(): Only wait for everything to be flushed until
the current LSN.
create_log_file(): After checkpoint, invoke log_write_up_to()
to ensure that the FILE_CHECKPOINT record has been written.
This avoids ut_ad(!srv_log_file_created) in create_log_file_rename().
srv_start(): Do not call recv_recovery_from_checkpoint_start()
if the log has just been created. Set fil_system.space_id_reuse_warned
before dict_boot() has been executed, and clear it after recovery
has finished.
dict_boot(): Initialize fil_system.max_assigned_id.
srv_check_activity(): Remove. The activity count is counting transaction
commits and therefore mostly interesting for the purge of history.
BtrBulk::insert(): Do not explicitly wake up the page cleaner,
but do invoke srv_inc_activity_count(), because that counter is
still being used in buf_load_throttle_if_needed() for some
heuristics. (It might be cleaner to execute buf_load() in the
page cleaner thread!)
Reviewed by: Vladislav Vaintroub
2020-10-26 16:35:47 +02:00
|
|
|
{"buffer_flush_adaptive_avg_time", "buffer",
|
|
|
|
"Avg time (ms) spent for adaptive flushing recently.",
|
2016-08-12 11:17:45 +03:00
|
|
|
MONITOR_NONE,
|
MDEV-23855: Improve InnoDB log checkpoint performance
After MDEV-15053, MDEV-22871, MDEV-23399 shifted the scalability
bottleneck, log checkpoints became a new bottleneck.
If innodb_io_capacity is set low or innodb_max_dirty_pct_lwm is
set high and the workload fits in the buffer pool, the page cleaner
thread will perform very little flushing. When we reach the capacity
of the circular redo log file ib_logfile0 and must initiate a checkpoint,
some 'furious flushing' will be necessary. (If innodb_flush_sync=OFF,
then flushing would continue at the innodb_io_capacity rate, and
writers would be throttled.)
We have the best chance of advancing the checkpoint LSN immediately
after a page flush batch has been completed. Hence, it is best to
perform checkpoints after every batch in the page cleaner thread,
attempting to run once per second.
By initiating high-priority flushing in the page cleaner as early
as possible, we aim to make the throughput more stable.
The function buf_flush_wait_flushed() used to sleep for 10ms, hoping
that the page cleaner thread would do something during that time.
The observed end result was that a large number of threads that call
log_free_check() would end up sleeping while nothing useful is happening.
We will revise the design so that in the default innodb_flush_sync=ON
mode, buf_flush_wait_flushed() will wake up the page cleaner thread
to perform the necessary flushing, and it will wait for a signal from
the page cleaner thread.
If innodb_io_capacity is set to a low value (causing the page cleaner to
throttle its work), a write workload would initially perform well, until
the capacity of the circular ib_logfile0 is reached and log_free_check()
will trigger checkpoints. At that point, the extra waiting in
buf_flush_wait_flushed() will start reducing throughput.
The page cleaner thread will also initiate log checkpoints after each
buf_flush_lists() call, because that is the best point of time for
the checkpoint LSN to advance by the maximum amount.
Even in 'furious flushing' mode we invoke buf_flush_lists() with
innodb_io_capacity_max pages at a time, and at the start of each
batch (in the log_flush() callback function that runs in a separate
task) we will invoke os_aio_wait_until_no_pending_writes(). This
tweak allows the checkpoint to advance in smaller steps and
significantly reduces the maximum latency. On an Intel Optane 960
NVMe SSD on Linux, it reduced from 4.6 seconds to 74 milliseconds.
On Microsoft Windows with a slower SSD, it reduced from more than
180 seconds to 0.6 seconds.
We will make innodb_adaptive_flushing=OFF simply flush innodb_io_capacity
per second whenever the dirty proportion of buffer pool pages exceeds
innodb_max_dirty_pages_pct_lwm. For innodb_adaptive_flushing=ON we try
to make page_cleaner_flush_pages_recommendation() more consistent and
predictable: if we are below innodb_adaptive_flushing_lwm, let us flush
pages according to the return value of af_get_pct_for_dirty().
innodb_max_dirty_pages_pct_lwm: Revert the change of the default value
that was made in MDEV-23399. The value innodb_max_dirty_pages_pct_lwm=0
guarantees that a shutdown of an idle server will be fast. Users might
be surprised if normal shutdown suddenly became slower when upgrading
within a GA release series.
innodb_checkpoint_usec: Remove. The master task will no longer perform
periodic log checkpoints. It is the duty of the page cleaner thread.
log_sys.max_modified_age: Remove. The current span of the
buf_pool.flush_list expressed in LSN only matters for adaptive
flushing (outside the 'furious flushing' condition).
For the correctness of checkpoints, the only thing that matters is
the checkpoint age (log_sys.lsn - log_sys.last_checkpoint_lsn).
This run-time constant was also reported as log_max_modified_age_sync.
log_sys.max_checkpoint_age_async: Remove. This does not serve any
purpose, because the checkpoints will now be triggered by the page
cleaner thread. We will retain the log_sys.max_checkpoint_age limit
for engaging 'furious flushing'.
page_cleaner.slot: Remove. It turns out that
page_cleaner_slot.flush_list_time was duplicating
page_cleaner.slot.flush_time and page_cleaner.slot.flush_list_pass
was duplicating page_cleaner.flush_pass.
Likewise, there were some redundant monitor counters, because the
page cleaner thread no longer performs any buf_pool.LRU flushing, and
because there only is one buf_flush_page_cleaner thread.
buf_flush_sync_lsn: Protect writes by buf_pool.flush_list_mutex.
buf_pool_t::get_oldest_modification(): Add a parameter to specify the
return value when no persistent data pages are dirty. Require the
caller to hold buf_pool.flush_list_mutex.
log_buf_pool_get_oldest_modification(): Take the fall-back LSN
as a parameter. All callers will also invoke log_sys.get_lsn().
log_preflush_pool_modified_pages(): Replaced with buf_flush_wait_flushed().
buf_flush_wait_flushed(): Implement two limits. If not enough buffer pool
has been flushed, signal the page cleaner (unless innodb_flush_sync=OFF)
and wait for the page cleaner to complete. If the page cleaner
thread is not running (which can be the case durign shutdown),
initiate the flush and wait for it directly.
buf_flush_ahead(): If innodb_flush_sync=ON (the default),
submit a new buf_flush_sync_lsn target for the page cleaner
but do not wait for the flushing to finish.
log_get_capacity(), log_get_max_modified_age_async(): Remove, to make
it easier to see that af_get_pct_for_lsn() is not acquiring any mutexes.
page_cleaner_flush_pages_recommendation(): Protect all access to
buf_pool.flush_list with buf_pool.flush_list_mutex. Previously there
were some race conditions in the calculation.
buf_flush_sync_for_checkpoint(): New function to process
buf_flush_sync_lsn in the page cleaner thread. At the end of
each batch, we try to wake up any blocked buf_flush_wait_flushed().
If everything up to buf_flush_sync_lsn has been flushed, we will
reset buf_flush_sync_lsn=0. The page cleaner thread will keep
'furious flushing' until the limit is reached. Any threads that
are waiting in buf_flush_wait_flushed() will be able to resume
as soon as their own limit has been satisfied.
buf_flush_page_cleaner: Prioritize buf_flush_sync_lsn and do not
sleep as long as it is set. Do not update any page_cleaner statistics
for this special mode of operation. In the normal mode
(buf_flush_sync_lsn is not set for innodb_flush_sync=ON),
try to wake up once per second. No longer check whether
srv_inc_activity_count() has been called. After each batch,
try to perform a log checkpoint, because the best chances for
the checkpoint LSN to advance by the maximum amount are upon
completing a flushing batch.
log_t: Move buf_free, max_buf_free possibly to the same cache line
with log_sys.mutex.
log_margin_checkpoint_age(): Simplify the logic, and replace
a 0.1-second sleep with a call to buf_flush_wait_flushed() to
initiate flushing. Moved to the same compilation unit
with the only caller.
log_close(): Clean up the calculations. (Should be no functional
change.) Return whether flush-ahead is needed. Moved to the same
compilation unit with the only caller.
mtr_t::finish_write(): Return whether flush-ahead is needed.
mtr_t::commit(): Invoke buf_flush_ahead() when needed. Let us avoid
external calls in mtr_t::commit() and make the logic easier to follow
by having related code in a single compilation unit. Also, we will
invoke srv_stats.log_write_requests.inc() only once per
mini-transaction commit, while not holding mutexes.
log_checkpoint_margin(): Only care about log_sys.max_checkpoint_age.
Upon reaching log_sys.max_checkpoint_age where we must wait to prevent
the log from getting corrupted, let us wait for at most 1MiB of LSN
at a time, before rechecking the condition. This should allow writers
to proceed even if the redo log capacity has been reached and
'furious flushing' is in progress. We no longer care about
log_sys.max_modified_age_sync or log_sys.max_modified_age_async.
The log_sys.max_modified_age_sync could be a relic from the time when
there was a srv_master_thread that wrote dirty pages to data files.
Also, we no longer have any log_sys.max_checkpoint_age_async limit,
because log checkpoints will now be triggered by the page cleaner
thread upon completing buf_flush_lists().
log_set_capacity(): Simplify the calculations of the limit
(no functional change).
log_checkpoint_low(): Split from log_checkpoint(). Moved to the
same compilation unit with the caller.
log_make_checkpoint(): Only wait for everything to be flushed until
the current LSN.
create_log_file(): After checkpoint, invoke log_write_up_to()
to ensure that the FILE_CHECKPOINT record has been written.
This avoids ut_ad(!srv_log_file_created) in create_log_file_rename().
srv_start(): Do not call recv_recovery_from_checkpoint_start()
if the log has just been created. Set fil_system.space_id_reuse_warned
before dict_boot() has been executed, and clear it after recovery
has finished.
dict_boot(): Initialize fil_system.max_assigned_id.
srv_check_activity(): Remove. The activity count is counting transaction
commits and therefore mostly interesting for the purge of history.
BtrBulk::insert(): Do not explicitly wake up the page cleaner,
but do invoke srv_inc_activity_count(), because that counter is
still being used in buf_load_throttle_if_needed() for some
heuristics. (It might be cleaner to execute buf_load() in the
page cleaner thread!)
Reviewed by: Vladislav Vaintroub
2020-10-26 16:35:47 +02:00
|
|
|
MONITOR_DEFAULT_START, MONITOR_FLUSH_ADAPTIVE_AVG_TIME},
|
2016-08-12 11:17:45 +03:00
|
|
|
|
|
|
|
{"buffer_flush_adaptive_avg_pass", "buffer",
|
2018-12-21 17:06:08 -03:00
|
|
|
"Number of adaptive flushes passed during the recent Avg period.",
|
2016-08-12 11:17:45 +03:00
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_FLUSH_ADAPTIVE_AVG_PASS},
|
|
|
|
|
|
|
|
{"buffer_LRU_get_free_loops", "buffer",
|
|
|
|
"Total loops in LRU get free.",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_LRU_GET_FREE_LOOPS},
|
|
|
|
|
|
|
|
{"buffer_LRU_get_free_waits", "buffer",
|
|
|
|
"Total sleep waits in LRU get free.",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_LRU_GET_FREE_WAITS},
|
|
|
|
|
2013-03-26 00:03:13 +02:00
|
|
|
{"buffer_flush_avg_page_rate", "buffer",
|
|
|
|
"Average number of pages at which flushing is happening",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_FLUSH_AVG_PAGE_RATE},
|
|
|
|
|
|
|
|
{"buffer_flush_lsn_avg_rate", "buffer",
|
|
|
|
"Average redo generation rate",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_FLUSH_LSN_AVG_RATE},
|
|
|
|
|
|
|
|
{"buffer_flush_pct_for_dirty", "buffer",
|
|
|
|
"Percent of IO capacity used to avoid max dirty page limit",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_FLUSH_PCT_FOR_DIRTY},
|
2012-08-01 17:27:34 +03:00
|
|
|
|
2013-03-26 00:03:13 +02:00
|
|
|
{"buffer_flush_pct_for_lsn", "buffer",
|
|
|
|
"Percent of IO capacity used to avoid reusable redo space limit",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_FLUSH_PCT_FOR_LSN},
|
|
|
|
|
|
|
|
{"buffer_flush_sync_waits", "buffer",
|
|
|
|
"Number of times a wait happens due to sync flushing",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_FLUSH_SYNC_WAITS},
|
2012-08-01 17:27:34 +03:00
|
|
|
|
2013-03-26 00:03:13 +02:00
|
|
|
/* Cumulative counter for flush batches for adaptive flushing */
|
2012-08-01 17:27:34 +03:00
|
|
|
{"buffer_flush_adaptive_total_pages", "buffer",
|
2013-03-26 00:03:13 +02:00
|
|
|
"Total pages flushed as part of adaptive flushing",
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_SET_OWNER, MONITOR_FLUSH_ADAPTIVE_COUNT,
|
|
|
|
MONITOR_FLUSH_ADAPTIVE_TOTAL_PAGE},
|
|
|
|
|
|
|
|
{"buffer_flush_adaptive", "buffer",
|
|
|
|
"Number of adaptive batches",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_FLUSH_ADAPTIVE_TOTAL_PAGE,
|
|
|
|
MONITOR_FLUSH_ADAPTIVE_COUNT},
|
|
|
|
|
|
|
|
{"buffer_flush_adaptive_pages", "buffer",
|
|
|
|
"Pages queued as an adaptive batch",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_FLUSH_ADAPTIVE_TOTAL_PAGE,
|
|
|
|
MONITOR_FLUSH_ADAPTIVE_PAGES},
|
|
|
|
|
|
|
|
/* Cumulative counter for flush batches because of sync */
|
|
|
|
{"buffer_flush_sync_total_pages", "buffer",
|
|
|
|
"Total pages flushed as part of sync batches",
|
|
|
|
MONITOR_SET_OWNER, MONITOR_FLUSH_SYNC_COUNT,
|
|
|
|
MONITOR_FLUSH_SYNC_TOTAL_PAGE},
|
|
|
|
|
|
|
|
{"buffer_flush_sync", "buffer",
|
|
|
|
"Number of sync batches",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_FLUSH_SYNC_TOTAL_PAGE,
|
|
|
|
MONITOR_FLUSH_SYNC_COUNT},
|
|
|
|
|
|
|
|
{"buffer_flush_sync_pages", "buffer",
|
|
|
|
"Pages queued as a sync batch",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_FLUSH_SYNC_TOTAL_PAGE,
|
|
|
|
MONITOR_FLUSH_SYNC_PAGES},
|
|
|
|
|
|
|
|
/* Cumulative counter for flush batches because of background */
|
|
|
|
{"buffer_flush_background_total_pages", "buffer",
|
|
|
|
"Total pages flushed as part of background batches",
|
|
|
|
MONITOR_SET_OWNER, MONITOR_FLUSH_BACKGROUND_COUNT,
|
|
|
|
MONITOR_FLUSH_BACKGROUND_TOTAL_PAGE},
|
|
|
|
|
|
|
|
{"buffer_flush_background", "buffer",
|
|
|
|
"Number of background batches",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_FLUSH_BACKGROUND_TOTAL_PAGE,
|
|
|
|
MONITOR_FLUSH_BACKGROUND_COUNT},
|
|
|
|
|
|
|
|
{"buffer_flush_background_pages", "buffer",
|
|
|
|
"Pages queued as a background batch",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_FLUSH_BACKGROUND_TOTAL_PAGE,
|
|
|
|
MONITOR_FLUSH_BACKGROUND_PAGES},
|
|
|
|
|
|
|
|
/* Cumulative counter for LRU batch scan */
|
|
|
|
{"buffer_LRU_batch_scanned", "buffer",
|
|
|
|
"Total pages scanned as part of LRU batch",
|
|
|
|
MONITOR_SET_OWNER, MONITOR_LRU_BATCH_SCANNED_NUM_CALL,
|
|
|
|
MONITOR_LRU_BATCH_SCANNED},
|
|
|
|
|
|
|
|
{"buffer_LRU_batch_num_scan", "buffer",
|
|
|
|
"Number of times LRU batch is called",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_LRU_BATCH_SCANNED,
|
|
|
|
MONITOR_LRU_BATCH_SCANNED_NUM_CALL},
|
|
|
|
|
|
|
|
{"buffer_LRU_batch_scanned_per_call", "buffer",
|
|
|
|
"Pages scanned per LRU batch call",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_LRU_BATCH_SCANNED,
|
|
|
|
MONITOR_LRU_BATCH_SCANNED_PER_CALL},
|
|
|
|
|
|
|
|
/* Cumulative counter for LRU batch pages flushed */
|
2014-11-06 13:17:11 +02:00
|
|
|
{"buffer_LRU_batch_flush_total_pages", "buffer",
|
2012-08-01 17:27:34 +03:00
|
|
|
"Total pages flushed as part of LRU batches",
|
2021-03-09 08:29:38 +02:00
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_LRU_BATCH_FLUSH_TOTAL_PAGE},
|
2014-11-06 13:17:11 +02:00
|
|
|
|
|
|
|
/* Cumulative counter for LRU batch pages flushed */
|
|
|
|
{"buffer_LRU_batch_evict_total_pages", "buffer",
|
|
|
|
"Total pages evicted as part of LRU batches",
|
2021-03-09 08:29:38 +02:00
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_LRU_BATCH_EVICT_TOTAL_PAGE},
|
2012-08-01 17:27:34 +03:00
|
|
|
|
|
|
|
{"buffer_LRU_single_flush_failure_count", "Buffer",
|
|
|
|
"Number of times attempt to flush a single page from LRU failed",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_LRU_SINGLE_FLUSH_FAILURE_COUNT},
|
|
|
|
|
|
|
|
{"buffer_LRU_get_free_search", "Buffer",
|
|
|
|
"Number of searches performed for a clean page",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_LRU_GET_FREE_SEARCH},
|
|
|
|
|
|
|
|
/* Cumulative counter for LRU search scans */
|
|
|
|
{"buffer_LRU_search_scanned", "buffer",
|
|
|
|
"Total pages scanned as part of LRU search",
|
|
|
|
MONITOR_SET_OWNER,
|
|
|
|
MONITOR_LRU_SEARCH_SCANNED_NUM_CALL,
|
|
|
|
MONITOR_LRU_SEARCH_SCANNED},
|
|
|
|
|
|
|
|
{"buffer_LRU_search_num_scan", "buffer",
|
|
|
|
"Number of times LRU search is performed",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_LRU_SEARCH_SCANNED,
|
|
|
|
MONITOR_LRU_SEARCH_SCANNED_NUM_CALL},
|
|
|
|
|
|
|
|
{"buffer_LRU_search_scanned_per_call", "buffer",
|
|
|
|
"Page scanned per single LRU search",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_LRU_SEARCH_SCANNED,
|
|
|
|
MONITOR_LRU_SEARCH_SCANNED_PER_CALL},
|
|
|
|
|
|
|
|
/* Cumulative counter for LRU unzip search scans */
|
|
|
|
{"buffer_LRU_unzip_search_scanned", "buffer",
|
|
|
|
"Total pages scanned as part of LRU unzip search",
|
|
|
|
MONITOR_SET_OWNER,
|
|
|
|
MONITOR_LRU_UNZIP_SEARCH_SCANNED_NUM_CALL,
|
|
|
|
MONITOR_LRU_UNZIP_SEARCH_SCANNED},
|
|
|
|
|
|
|
|
{"buffer_LRU_unzip_search_num_scan", "buffer",
|
|
|
|
"Number of times LRU unzip search is performed",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_LRU_UNZIP_SEARCH_SCANNED,
|
|
|
|
MONITOR_LRU_UNZIP_SEARCH_SCANNED_NUM_CALL},
|
|
|
|
|
|
|
|
{"buffer_LRU_unzip_search_scanned_per_call", "buffer",
|
|
|
|
"Page scanned per single LRU unzip search",
|
|
|
|
MONITOR_SET_MEMBER, MONITOR_LRU_UNZIP_SEARCH_SCANNED,
|
|
|
|
MONITOR_LRU_UNZIP_SEARCH_SCANNED_PER_CALL},
|
|
|
|
|
|
|
|
/* ========== Counters for Buffer Page I/O ========== */
|
|
|
|
{"module_buffer_page", "buffer_page_io", "Buffer Page I/O Module",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_MODULE | MONITOR_GROUP_MODULE),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_BUF_PAGE},
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_READ("index_leaf","Index Leaf", INDEX_LEAF),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_READ("index_non_leaf","Index Non-leaf",
|
|
|
|
INDEX_NON_LEAF),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_READ("index_ibuf_leaf", "Insert Buffer Index Leaf",
|
|
|
|
INDEX_IBUF_LEAF),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_READ("index_ibuf_non_leaf",
|
|
|
|
"Insert Buffer Index Non-Leaf",
|
|
|
|
INDEX_IBUF_NON_LEAF),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_READ("undo_log", "Undo Log", UNDO_LOG),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_READ("index_inode", "Index Inode", INODE),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_READ("ibuf_free_list", "Insert Buffer Free List",
|
|
|
|
IBUF_FREELIST),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_READ("ibuf_bitmap", "Insert Buffer Bitmap",
|
|
|
|
IBUF_BITMAP),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_READ("system_page", "System", SYSTEM),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_READ("trx_system", "Transaction System", TRX_SYSTEM),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_READ("fsp_hdr", "File Space Header", FSP_HDR),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_READ("xdes", "Extent Descriptor", XDES),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_READ("blob", "Uncompressed BLOB", BLOB),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_READ("zblob", "First Compressed BLOB", ZBLOB),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_READ("zblob2", "Subsequent Compressed BLOB", ZBLOB2),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_READ("other", "other/unknown (old version of InnoDB)",
|
|
|
|
OTHER),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_WRITTEN("index_leaf","Index Leaf", INDEX_LEAF),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_WRITTEN("index_non_leaf","Index Non-leaf",
|
|
|
|
INDEX_NON_LEAF),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_WRITTEN("index_ibuf_leaf", "Insert Buffer Index Leaf",
|
|
|
|
INDEX_IBUF_LEAF),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_WRITTEN("index_ibuf_non_leaf",
|
|
|
|
"Insert Buffer Index Non-Leaf",
|
|
|
|
INDEX_IBUF_NON_LEAF),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_WRITTEN("undo_log", "Undo Log", UNDO_LOG),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_WRITTEN("index_inode", "Index Inode", INODE),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_WRITTEN("ibuf_free_list", "Insert Buffer Free List",
|
|
|
|
IBUF_FREELIST),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_WRITTEN("ibuf_bitmap", "Insert Buffer Bitmap",
|
|
|
|
IBUF_BITMAP),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_WRITTEN("system_page", "System", SYSTEM),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_WRITTEN("trx_system", "Transaction System",
|
|
|
|
TRX_SYSTEM),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_WRITTEN("fsp_hdr", "File Space Header", FSP_HDR),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_WRITTEN("xdes", "Extent Descriptor", XDES),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_WRITTEN("blob", "Uncompressed BLOB", BLOB),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_WRITTEN("zblob", "First Compressed BLOB", ZBLOB),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_WRITTEN("zblob2", "Subsequent Compressed BLOB",
|
|
|
|
ZBLOB2),
|
|
|
|
|
|
|
|
MONITOR_BUF_PAGE_WRITTEN("other", "other/unknown (old version InnoDB)",
|
|
|
|
OTHER),
|
|
|
|
|
|
|
|
/* ========== Counters for OS level operations ========== */
|
|
|
|
{"module_os", "os", "OS Level Operation",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_OS},
|
|
|
|
|
|
|
|
{"os_data_reads", "os",
|
|
|
|
"Number of reads initiated (innodb_data_reads)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_OS_FILE_READ},
|
|
|
|
|
|
|
|
{"os_data_writes", "os",
|
|
|
|
"Number of writes initiated (innodb_data_writes)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_OS_FILE_WRITE},
|
|
|
|
|
|
|
|
{"os_data_fsyncs", "os",
|
|
|
|
"Number of fsync() calls (innodb_data_fsyncs)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_OS_FSYNC},
|
|
|
|
|
|
|
|
{"os_pending_reads", "os", "Number of reads pending",
|
2017-04-19 22:30:18 +03:00
|
|
|
MONITOR_DEFAULT_ON,
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_DEFAULT_START, MONITOR_OS_PENDING_READS},
|
|
|
|
|
|
|
|
{"os_pending_writes", "os", "Number of writes pending",
|
2017-04-19 22:30:18 +03:00
|
|
|
MONITOR_DEFAULT_ON,
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_DEFAULT_START, MONITOR_OS_PENDING_WRITES},
|
|
|
|
|
|
|
|
{"os_log_bytes_written", "os",
|
|
|
|
"Bytes of log written (innodb_os_log_written)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_OS_LOG_WRITTEN},
|
|
|
|
|
|
|
|
{"os_log_fsyncs", "os",
|
|
|
|
"Number of fsync log writes (innodb_os_log_fsyncs)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_OS_LOG_FSYNC},
|
|
|
|
|
|
|
|
{"os_log_pending_fsyncs", "os",
|
|
|
|
"Number of pending fsync write (innodb_os_log_pending_fsyncs)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_OS_LOG_PENDING_FSYNC},
|
|
|
|
|
|
|
|
{"os_log_pending_writes", "os",
|
|
|
|
"Number of pending log file writes (innodb_os_log_pending_writes)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_OS_LOG_PENDING_WRITES},
|
|
|
|
|
|
|
|
/* ========== Counters for Transaction Module ========== */
|
|
|
|
{"module_trx", "transaction", "Transaction Manager",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_TRX},
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
{"trx_rw_commits", "transaction",
|
|
|
|
"Number of read-write transactions committed",
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_NONE, MONITOR_DEFAULT_START, MONITOR_TRX_RW_COMMIT},
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
{"trx_ro_commits", "transaction",
|
|
|
|
"Number of read-only transactions committed",
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_NONE, MONITOR_DEFAULT_START, MONITOR_TRX_RO_COMMIT},
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
{"trx_nl_ro_commits", "transaction",
|
|
|
|
"Number of non-locking auto-commit read-only transactions committed",
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_NONE, MONITOR_DEFAULT_START, MONITOR_TRX_NL_RO_COMMIT},
|
|
|
|
|
|
|
|
{"trx_commits_insert_update", "transaction",
|
|
|
|
"Number of transactions committed with inserts and updates",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_TRX_COMMIT_UNDO},
|
|
|
|
|
|
|
|
{"trx_rollbacks", "transaction",
|
|
|
|
"Number of transactions rolled back",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_TRX_ROLLBACK},
|
|
|
|
|
|
|
|
{"trx_rollbacks_savepoint", "transaction",
|
|
|
|
"Number of transactions rolled back to savepoint",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_TRX_ROLLBACK_SAVEPOINT},
|
|
|
|
|
|
|
|
{"trx_rseg_history_len", "transaction",
|
|
|
|
"Length of the TRX_RSEG_HISTORY list",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_RSEG_HISTORY_LEN},
|
|
|
|
|
|
|
|
{"trx_undo_slots_used", "transaction", "Number of undo slots used",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_NUM_UNDO_SLOT_USED},
|
|
|
|
|
|
|
|
{"trx_undo_slots_cached", "transaction",
|
|
|
|
"Number of undo slots cached",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_NUM_UNDO_SLOT_CACHED},
|
|
|
|
|
|
|
|
{"trx_rseg_current_size", "transaction",
|
|
|
|
"Current rollback segment size in pages",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_RSEG_CUR_SIZE},
|
|
|
|
|
|
|
|
/* ========== Counters for Purge Module ========== */
|
|
|
|
{"module_purge", "purge", "Purge Module",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_PURGE},
|
|
|
|
|
|
|
|
{"purge_del_mark_records", "purge",
|
|
|
|
"Number of delete-marked rows purged",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_N_DEL_ROW_PURGE},
|
|
|
|
|
|
|
|
{"purge_upd_exist_or_extern_records", "purge",
|
2016-08-12 11:17:45 +03:00
|
|
|
"Number of purges on updates of existing records and"
|
2012-08-01 17:27:34 +03:00
|
|
|
" updates on delete marked record with externally stored field",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_N_UPD_EXIST_EXTERN},
|
|
|
|
|
|
|
|
{"purge_invoked", "purge",
|
|
|
|
"Number of times purge was invoked",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_PURGE_INVOKED},
|
|
|
|
|
|
|
|
{"purge_undo_log_pages", "purge",
|
|
|
|
"Number of undo log pages handled by the purge",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_PURGE_N_PAGE_HANDLED},
|
|
|
|
|
|
|
|
{"purge_dml_delay_usec", "purge",
|
|
|
|
"Microseconds DML to be delayed due to purge lagging",
|
|
|
|
MONITOR_DISPLAY_CURRENT,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_DML_PURGE_DELAY},
|
|
|
|
|
|
|
|
{"purge_stop_count", "purge",
|
|
|
|
"Number of times purge was stopped",
|
|
|
|
MONITOR_DISPLAY_CURRENT,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_PURGE_STOP_COUNT},
|
|
|
|
|
|
|
|
{"purge_resume_count", "purge",
|
|
|
|
"Number of times purge was resumed",
|
|
|
|
MONITOR_DISPLAY_CURRENT,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_PURGE_RESUME_COUNT},
|
|
|
|
|
|
|
|
/* ========== Counters for Recovery Module ========== */
|
|
|
|
{"module_log", "recovery", "Recovery Module",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_RECOVERY},
|
|
|
|
|
|
|
|
{"log_checkpoints", "recovery", "Number of checkpoints",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_NUM_CHECKPOINT},
|
|
|
|
|
|
|
|
{"log_lsn_last_flush", "recovery", "LSN of Last flush",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_LSN_FLUSHDISK},
|
|
|
|
|
|
|
|
{"log_lsn_last_checkpoint", "recovery", "LSN at last checkpoint",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_LSN_CHECKPOINT},
|
|
|
|
|
|
|
|
{"log_lsn_current", "recovery", "Current LSN value",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_LSN_CURRENT},
|
|
|
|
|
|
|
|
{"log_lsn_checkpoint_age", "recovery",
|
|
|
|
"Current LSN value minus LSN at last checkpoint",
|
2020-03-04 12:59:20 +02:00
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT),
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_DEFAULT_START, MONITOR_LSN_CHECKPOINT_AGE},
|
|
|
|
|
|
|
|
{"log_lsn_buf_pool_oldest", "recovery",
|
|
|
|
"The oldest modified block LSN in the buffer pool",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_OLDEST_LSN},
|
|
|
|
|
|
|
|
{"log_max_modified_age_async", "recovery",
|
|
|
|
"Maximum LSN difference; when exceeded, start asynchronous preflush",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_MAX_AGE_ASYNC},
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
{"log_pending_log_flushes", "recovery", "Pending log flushes",
|
2019-04-26 14:54:44 +03:00
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT),
|
2016-08-12 11:17:45 +03:00
|
|
|
MONITOR_DEFAULT_START, MONITOR_PENDING_LOG_FLUSH},
|
2012-08-01 17:27:34 +03:00
|
|
|
|
|
|
|
{"log_pending_checkpoint_writes", "recovery", "Pending checkpoints",
|
2019-04-26 14:54:44 +03:00
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT),
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_DEFAULT_START, MONITOR_PENDING_CHECKPOINT_WRITE},
|
|
|
|
|
|
|
|
{"log_num_log_io", "recovery", "Number of log I/Os",
|
2019-04-26 14:54:44 +03:00
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT),
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_DEFAULT_START, MONITOR_LOG_IO},
|
|
|
|
|
|
|
|
{"log_waits", "recovery",
|
|
|
|
"Number of log waits due to small log buffer (innodb_log_waits)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_LOG_WAITS},
|
|
|
|
|
|
|
|
{"log_write_requests", "recovery",
|
|
|
|
"Number of log write requests (innodb_log_write_requests)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_LOG_WRITE_REQUEST},
|
|
|
|
|
|
|
|
{"log_writes", "recovery",
|
|
|
|
"Number of log writes (innodb_log_writes)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_LOG_WRITES},
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
{"log_padded", "recovery",
|
|
|
|
"Bytes of log padded for log write ahead",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_LOG_PADDED},
|
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
/* ========== Counters for Page Compression ========== */
|
|
|
|
{"module_compress", "compression", "Page Compression Info",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_PAGE},
|
|
|
|
|
|
|
|
{"compress_pages_compressed", "compression",
|
|
|
|
"Number of pages compressed", MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_PAGE_COMPRESS},
|
|
|
|
|
|
|
|
{"compress_pages_decompressed", "compression",
|
|
|
|
"Number of pages decompressed",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_PAGE_DECOMPRESS},
|
|
|
|
|
2013-03-26 00:03:13 +02:00
|
|
|
{"compression_pad_increments", "compression",
|
|
|
|
"Number of times padding is incremented to avoid compression failures",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_PAD_INCREMENTS},
|
|
|
|
|
|
|
|
{"compression_pad_decrements", "compression",
|
|
|
|
"Number of times padding is decremented due to good compressibility",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_PAD_DECREMENTS},
|
|
|
|
|
2013-12-19 14:36:38 +02:00
|
|
|
{"compress_saved", "compression",
|
|
|
|
"Number of bytes saved by page compression",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_PAGE_COMPRESS_SAVED},
|
|
|
|
|
|
|
|
{"compress_pages_page_compressed", "compression",
|
|
|
|
"Number of pages compressed by page compression",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_PAGES_PAGE_COMPRESSED},
|
|
|
|
|
|
|
|
{"compress_page_compressed_trim_op", "compression",
|
|
|
|
"Number of TRIM operation performed by page compression",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_PAGE_COMPRESSED_TRIM_OP},
|
|
|
|
|
|
|
|
{"compress_pages_page_decompressed", "compression",
|
|
|
|
"Number of pages decompressed by page compression",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_PAGES_PAGE_DECOMPRESSED},
|
|
|
|
|
2014-03-21 15:46:36 +02:00
|
|
|
{"compress_pages_page_compression_error", "compression",
|
|
|
|
"Number of page compression errors",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_PAGES_PAGE_COMPRESSION_ERROR},
|
|
|
|
|
2015-04-01 22:03:14 +03:00
|
|
|
{"compress_pages_encrypted", "compression",
|
|
|
|
"Number of pages encrypted",
|
2014-12-22 16:53:17 +02:00
|
|
|
MONITOR_NONE,
|
2015-04-01 22:03:14 +03:00
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_PAGES_ENCRYPTED},
|
2014-12-22 16:53:17 +02:00
|
|
|
|
2015-04-01 22:03:14 +03:00
|
|
|
{"compress_pages_decrypted", "compression",
|
|
|
|
"Number of pages decrypted",
|
2014-12-22 16:53:17 +02:00
|
|
|
MONITOR_NONE,
|
2015-04-01 22:03:14 +03:00
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_PAGES_DECRYPTED},
|
2014-12-22 16:53:17 +02:00
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
/* ========== Counters for Index ========== */
|
|
|
|
{"module_index", "index", "Index Manager",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_INDEX},
|
|
|
|
|
2014-02-26 19:23:04 +01:00
|
|
|
{"index_page_splits", "index", "Number of index page splits",
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_INDEX_SPLIT},
|
|
|
|
|
2014-02-26 19:23:04 +01:00
|
|
|
{"index_page_merge_attempts", "index",
|
|
|
|
"Number of index page merge attempts",
|
2014-02-26 19:11:54 +01:00
|
|
|
MONITOR_NONE,
|
2014-02-26 19:23:04 +01:00
|
|
|
MONITOR_DEFAULT_START, MONITOR_INDEX_MERGE_ATTEMPTS},
|
|
|
|
|
|
|
|
{"index_page_merge_successful", "index",
|
|
|
|
"Number of successful index page merges",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_INDEX_MERGE_SUCCESSFUL},
|
|
|
|
|
|
|
|
{"index_page_reorg_attempts", "index",
|
|
|
|
"Number of index page reorganization attempts",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_INDEX_REORG_ATTEMPTS},
|
|
|
|
|
|
|
|
{"index_page_reorg_successful", "index",
|
|
|
|
"Number of successful index page reorganizations",
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_NONE,
|
2014-02-26 19:23:04 +01:00
|
|
|
MONITOR_DEFAULT_START, MONITOR_INDEX_REORG_SUCCESSFUL},
|
|
|
|
|
|
|
|
{"index_page_discards", "index", "Number of index pages discarded",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_INDEX_DISCARD},
|
2012-08-01 17:27:34 +03:00
|
|
|
|
2017-02-23 23:05:12 +02:00
|
|
|
#ifdef BTR_CUR_HASH_ADAPT
|
2012-08-01 17:27:34 +03:00
|
|
|
/* ========== Counters for Adaptive Hash Index ========== */
|
2017-02-23 23:05:12 +02:00
|
|
|
{"module_adaptive_hash", "adaptive_hash_index", "Adaptive Hash Index",
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_ADAPTIVE_HASH},
|
|
|
|
|
|
|
|
{"adaptive_hash_searches", "adaptive_hash_index",
|
|
|
|
"Number of successful searches using Adaptive Hash Index",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_ADAPTIVE_HASH_SEARCH},
|
|
|
|
|
|
|
|
{"adaptive_hash_searches_btree", "adaptive_hash_index",
|
|
|
|
"Number of searches using B-tree on an index search",
|
2015-02-18 16:20:46 +01:00
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_ADAPTIVE_HASH_SEARCH_BTREE},
|
|
|
|
|
|
|
|
{"adaptive_hash_pages_added", "adaptive_hash_index",
|
|
|
|
"Number of index pages on which the Adaptive Hash Index is built",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_ADAPTIVE_HASH_PAGE_ADDED},
|
|
|
|
|
|
|
|
{"adaptive_hash_pages_removed", "adaptive_hash_index",
|
|
|
|
"Number of index pages whose corresponding Adaptive Hash Index"
|
|
|
|
" entries were removed",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_ADAPTIVE_HASH_PAGE_REMOVED},
|
|
|
|
|
|
|
|
{"adaptive_hash_rows_added", "adaptive_hash_index",
|
|
|
|
"Number of Adaptive Hash Index rows added",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_ADAPTIVE_HASH_ROW_ADDED},
|
|
|
|
|
|
|
|
{"adaptive_hash_rows_removed", "adaptive_hash_index",
|
|
|
|
"Number of Adaptive Hash Index rows removed",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_ADAPTIVE_HASH_ROW_REMOVED},
|
|
|
|
|
|
|
|
{"adaptive_hash_rows_deleted_no_hash_entry", "adaptive_hash_index",
|
|
|
|
"Number of rows deleted that did not have corresponding Adaptive Hash"
|
|
|
|
" Index entries",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_ADAPTIVE_HASH_ROW_REMOVE_NOT_FOUND},
|
|
|
|
|
|
|
|
{"adaptive_hash_rows_updated", "adaptive_hash_index",
|
|
|
|
"Number of Adaptive Hash Index rows updated",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_ADAPTIVE_HASH_ROW_UPDATED},
|
2017-02-23 23:05:12 +02:00
|
|
|
#endif /* BTR_CUR_HASH_ADAPT */
|
2012-08-01 17:27:34 +03:00
|
|
|
|
|
|
|
/* ========== Counters for tablespace ========== */
|
|
|
|
{"module_file", "file_system", "Tablespace and File System Manager",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_FIL_SYSTEM},
|
|
|
|
|
|
|
|
{"file_num_open_files", "file_system",
|
|
|
|
"Number of files currently open (innodb_num_open_files)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_N_FILE_OPENED},
|
|
|
|
|
|
|
|
/* ========== Counters for Change Buffer ========== */
|
|
|
|
{"module_ibuf_system", "change_buffer", "InnoDB Change Buffer",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_IBUF_SYSTEM},
|
|
|
|
|
|
|
|
{"ibuf_merges_insert", "change_buffer",
|
|
|
|
"Number of inserted records merged by change buffering",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_IBUF_MERGE_INSERT},
|
|
|
|
|
|
|
|
{"ibuf_merges_delete_mark", "change_buffer",
|
|
|
|
"Number of deleted records merged by change buffering",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_IBUF_MERGE_DELETE},
|
|
|
|
|
|
|
|
{"ibuf_merges_delete", "change_buffer",
|
|
|
|
"Number of purge records merged by change buffering",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_IBUF_MERGE_PURGE},
|
|
|
|
|
|
|
|
{"ibuf_merges_discard_insert", "change_buffer",
|
|
|
|
"Number of insert merged operations discarded",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_IBUF_MERGE_DISCARD_INSERT},
|
|
|
|
|
|
|
|
{"ibuf_merges_discard_delete_mark", "change_buffer",
|
|
|
|
"Number of deleted merged operations discarded",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_IBUF_MERGE_DISCARD_DELETE},
|
|
|
|
|
|
|
|
{"ibuf_merges_discard_delete", "change_buffer",
|
|
|
|
"Number of purge merged operations discarded",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_IBUF_MERGE_DISCARD_PURGE},
|
|
|
|
|
|
|
|
{"ibuf_merges", "change_buffer", "Number of change buffer merges",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_IBUF_MERGES},
|
|
|
|
|
|
|
|
{"ibuf_size", "change_buffer", "Change buffer size in pages",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_IBUF_SIZE},
|
|
|
|
|
|
|
|
/* ========== Counters for server operations ========== */
|
|
|
|
{"module_innodb", "innodb",
|
|
|
|
"Counter for general InnoDB server wide operations and properties",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_SERVER},
|
|
|
|
|
|
|
|
{"innodb_master_thread_sleeps", "server",
|
|
|
|
"Number of times (seconds) master thread sleeps",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MASTER_THREAD_SLEEP},
|
|
|
|
|
|
|
|
{"innodb_activity_count", "server", "Current server activity count",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_SERVER_ACTIVITY},
|
|
|
|
|
|
|
|
{"innodb_master_active_loops", "server",
|
|
|
|
"Number of times master thread performs its tasks when"
|
|
|
|
" server is active",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MASTER_ACTIVE_LOOPS},
|
|
|
|
|
|
|
|
{"innodb_master_idle_loops", "server",
|
|
|
|
"Number of times master thread performs its tasks when server is idle",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MASTER_IDLE_LOOPS},
|
|
|
|
|
|
|
|
{"innodb_log_flush_usec", "server",
|
|
|
|
"Time (in microseconds) spent to flush log records",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_SRV_LOG_FLUSH_MICROSECOND},
|
|
|
|
|
|
|
|
{"innodb_dict_lru_usec", "server",
|
|
|
|
"Time (in microseconds) spent to process DICT LRU list",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_SRV_DICT_LRU_MICROSECOND},
|
|
|
|
|
2015-08-26 11:48:19 +02:00
|
|
|
{"innodb_dict_lru_count_active", "server",
|
|
|
|
"Number of tables evicted from DICT LRU list in the active loop",
|
2015-08-26 10:02:06 +02:00
|
|
|
MONITOR_NONE,
|
2015-08-26 11:48:19 +02:00
|
|
|
MONITOR_DEFAULT_START, MONITOR_SRV_DICT_LRU_EVICT_COUNT_ACTIVE},
|
|
|
|
|
|
|
|
{"innodb_dict_lru_count_idle", "server",
|
|
|
|
"Number of tables evicted from DICT LRU list in the idle loop",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_SRV_DICT_LRU_EVICT_COUNT_IDLE},
|
2015-08-26 10:02:06 +02:00
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
{"innodb_dblwr_writes", "server",
|
|
|
|
"Number of doublewrite operations that have been performed"
|
|
|
|
" (innodb_dblwr_writes)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_SRV_DBLWR_WRITES},
|
|
|
|
|
|
|
|
{"innodb_dblwr_pages_written", "server",
|
|
|
|
"Number of pages that have been written for doublewrite operations"
|
|
|
|
" (innodb_dblwr_pages_written)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_SRV_DBLWR_PAGES_WRITTEN},
|
|
|
|
|
|
|
|
{"innodb_page_size", "server",
|
|
|
|
"InnoDB page size in bytes (innodb_page_size)",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON | MONITOR_DISPLAY_CURRENT),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OVLD_SRV_PAGE_SIZE},
|
|
|
|
|
|
|
|
/* ========== Counters for DML operations ========== */
|
|
|
|
{"module_dml", "dml", "Statistics for DMLs",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_DML_STATS},
|
|
|
|
|
|
|
|
{"dml_reads", "dml", "Number of rows read",
|
2016-08-12 11:17:45 +03:00
|
|
|
static_cast<monitor_type_t>(MONITOR_EXISTING),
|
2012-08-01 17:27:34 +03:00
|
|
|
MONITOR_DEFAULT_START, MONITOR_OLVD_ROW_READ},
|
|
|
|
|
|
|
|
{"dml_inserts", "dml", "Number of rows inserted",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OLVD_ROW_INSERTED},
|
|
|
|
|
|
|
|
{"dml_deletes", "dml", "Number of rows deleted",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OLVD_ROW_DELETED},
|
|
|
|
|
|
|
|
{"dml_updates", "dml", "Number of rows updated",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OLVD_ROW_UPDTATED},
|
|
|
|
|
2014-10-26 07:22:51 +02:00
|
|
|
{"dml_system_reads", "dml", "Number of system rows read",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OLVD_SYSTEM_ROW_READ},
|
|
|
|
|
|
|
|
{"dml_system_inserts", "dml", "Number of system rows inserted",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OLVD_SYSTEM_ROW_INSERTED},
|
|
|
|
|
|
|
|
{"dml_system_deletes", "dml", "Number of system rows deleted",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OLVD_SYSTEM_ROW_DELETED},
|
|
|
|
|
|
|
|
{"dml_system_updates", "dml", "Number of system rows updated",
|
|
|
|
static_cast<monitor_type_t>(
|
|
|
|
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_OLVD_SYSTEM_ROW_UPDATED},
|
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
/* ========== Counters for DDL operations ========== */
|
|
|
|
{"module_ddl", "ddl", "Statistics for DDLs",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_DDL_STATS},
|
|
|
|
|
2013-03-26 00:03:13 +02:00
|
|
|
{"ddl_background_drop_indexes", "ddl",
|
|
|
|
"Number of indexes waiting to be dropped after failed index creation",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_BACKGROUND_DROP_INDEX},
|
|
|
|
|
|
|
|
{"ddl_online_create_index", "ddl",
|
|
|
|
"Number of indexes being created online",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_ONLINE_CREATE_INDEX},
|
|
|
|
|
|
|
|
{"ddl_pending_alter_table", "ddl",
|
|
|
|
"Number of ALTER TABLE, CREATE INDEX, DROP INDEX in progress",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_PENDING_ALTER_TABLE},
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
{"ddl_sort_file_alter_table", "ddl",
|
|
|
|
"Number of sort files created during alter table",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_ALTER_TABLE_SORT_FILES},
|
|
|
|
|
|
|
|
{"ddl_log_file_alter_table", "ddl",
|
|
|
|
"Number of log files created during alter table",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_ALTER_TABLE_LOG_FILES},
|
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
/* ===== Counters for ICP (Index Condition Pushdown) Module ===== */
|
|
|
|
{"module_icp", "icp", "Index Condition Pushdown",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_MODULE_ICP},
|
|
|
|
|
|
|
|
{"icp_attempts", "icp",
|
|
|
|
"Number of attempts for index push-down condition checks",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_ICP_ATTEMPTS},
|
|
|
|
|
|
|
|
{"icp_no_match", "icp", "Index push-down condition does not match",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_ICP_NO_MATCH},
|
|
|
|
|
|
|
|
{"icp_out_of_range", "icp", "Index push-down condition out of range",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_ICP_OUT_OF_RANGE},
|
|
|
|
|
|
|
|
{"icp_match", "icp", "Index push-down condition matches",
|
|
|
|
MONITOR_NONE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_ICP_MATCH},
|
|
|
|
|
|
|
|
/* ========== To turn on/off reset all counters ========== */
|
|
|
|
{"all", "All Counters", "Turn on/off and reset all counters",
|
|
|
|
MONITOR_MODULE,
|
|
|
|
MONITOR_DEFAULT_START, MONITOR_ALL_COUNTER}
|
|
|
|
};
|
|
|
|
|
|
|
|
/* The "innodb_counter_value" array stores actual counter values */
|
2016-08-12 11:17:45 +03:00
|
|
|
monitor_value_t innodb_counter_value[NUM_MONITOR];
|
2012-08-01 17:27:34 +03:00
|
|
|
|
|
|
|
/* monitor_set_tbl is used to record and determine whether a monitor
|
|
|
|
has been turned on/off. */
|
2020-12-02 11:34:37 +03:00
|
|
|
Atomic_relaxed<ulint>
|
|
|
|
monitor_set_tbl[(NUM_MONITOR + NUM_BITS_ULINT - 1) / NUM_BITS_ULINT];
|
2012-08-01 17:27:34 +03:00
|
|
|
|
|
|
|
/****************************************************************//**
|
|
|
|
Get a monitor's "monitor_info" by its monitor id (index into the
|
|
|
|
innodb_counter_info array.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return Point to corresponding monitor_info_t, or NULL if no such
|
2012-08-01 17:27:34 +03:00
|
|
|
monitor */
|
|
|
|
monitor_info_t*
|
|
|
|
srv_mon_get_info(
|
|
|
|
/*=============*/
|
|
|
|
monitor_id_t monitor_id) /*!< id indexing into the
|
|
|
|
innodb_counter_info array */
|
|
|
|
{
|
|
|
|
ut_a(monitor_id < NUM_MONITOR);
|
|
|
|
|
|
|
|
return((monitor_id < NUM_MONITOR)
|
|
|
|
? &innodb_counter_info[monitor_id]
|
|
|
|
: NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************//**
|
|
|
|
Get monitor's name by its monitor id (indexing into the
|
|
|
|
innodb_counter_info array.
|
2016-08-12 11:17:45 +03:00
|
|
|
@return corresponding monitor name, or NULL if no such
|
2012-08-01 17:27:34 +03:00
|
|
|
monitor */
|
|
|
|
const char*
|
|
|
|
srv_mon_get_name(
|
|
|
|
/*=============*/
|
|
|
|
monitor_id_t monitor_id) /*!< id index into the
|
|
|
|
innodb_counter_info array */
|
|
|
|
{
|
|
|
|
ut_a(monitor_id < NUM_MONITOR);
|
|
|
|
|
|
|
|
return((monitor_id < NUM_MONITOR)
|
|
|
|
? innodb_counter_info[monitor_id].monitor_name
|
|
|
|
: NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************//**
|
|
|
|
Turn on/off, reset monitor counters in a module. If module_id
|
|
|
|
is MONITOR_ALL_COUNTER then turn on all monitor counters.
|
|
|
|
turned on because it has already been turned on. */
|
|
|
|
void
|
|
|
|
srv_mon_set_module_control(
|
|
|
|
/*=======================*/
|
|
|
|
monitor_id_t module_id, /*!< in: Module ID as in
|
|
|
|
monitor_counter_id. If it is
|
|
|
|
set to MONITOR_ALL_COUNTER, this means
|
|
|
|
we shall turn on all the counters */
|
|
|
|
mon_option_t set_option) /*!< in: Turn on/off reset the
|
|
|
|
counter */
|
|
|
|
{
|
2018-04-28 15:49:09 +03:00
|
|
|
lint ix;
|
|
|
|
lint start_id;
|
2012-08-01 17:27:34 +03:00
|
|
|
ibool set_current_module = FALSE;
|
|
|
|
|
|
|
|
ut_a(module_id <= NUM_MONITOR);
|
MDEV-23399: Performance regression with write workloads
The buffer pool refactoring in MDEV-15053 and MDEV-22871 shifted
the performance bottleneck to the page flushing.
The configuration parameters will be changed as follows:
innodb_lru_flush_size=32 (new: how many pages to flush on LRU eviction)
innodb_lru_scan_depth=1536 (old: 1024)
innodb_max_dirty_pages_pct=90 (old: 75)
innodb_max_dirty_pages_pct_lwm=75 (old: 0)
Note: The parameter innodb_lru_scan_depth will only affect LRU
eviction of buffer pool pages when a new page is being allocated. The
page cleaner thread will no longer evict any pages. It used to
guarantee that some pages will remain free in the buffer pool. Now, we
perform that eviction 'on demand' in buf_LRU_get_free_block().
The parameter innodb_lru_scan_depth(srv_LRU_scan_depth) is used as follows:
* When the buffer pool is being shrunk in buf_pool_t::withdraw_blocks()
* As a buf_pool.free limit in buf_LRU_list_batch() for terminating
the flushing that is initiated e.g., by buf_LRU_get_free_block()
The parameter also used to serve as an initial limit for unzip_LRU
eviction (evicting uncompressed page frames while retaining
ROW_FORMAT=COMPRESSED pages), but now we will use a hard-coded limit
of 100 or unlimited for invoking buf_LRU_scan_and_free_block().
The status variables will be changed as follows:
innodb_buffer_pool_pages_flushed: This includes also the count of
innodb_buffer_pool_pages_LRU_flushed and should work reliably,
updated one by one in buf_flush_page() to give more real-time
statistics. The function buf_flush_stats(), which we are removing,
was not called in every code path. For both counters, we will use
regular variables that are incremented in a critical section of
buf_pool.mutex. Note that show_innodb_vars() directly links to the
variables, and reads of the counters will *not* be protected by
buf_pool.mutex, so you cannot get a consistent snapshot of both variables.
The following INFORMATION_SCHEMA.INNODB_METRICS counters will be
removed, because the page cleaner no longer deals with writing or
evicting least recently used pages, and because the single-page writes
have been removed:
* buffer_LRU_batch_flush_avg_time_slot
* buffer_LRU_batch_flush_avg_time_thread
* buffer_LRU_batch_flush_avg_time_est
* buffer_LRU_batch_flush_avg_pass
* buffer_LRU_single_flush_scanned
* buffer_LRU_single_flush_num_scan
* buffer_LRU_single_flush_scanned_per_call
When moving to a single buffer pool instance in MDEV-15058, we missed
some opportunity to simplify the buf_flush_page_cleaner thread. It was
unnecessarily using a mutex and some complex data structures, even
though we always have a single page cleaner thread.
Furthermore, the buf_flush_page_cleaner thread had separate 'recovery'
and 'shutdown' modes where it was waiting to be triggered by some
other thread, adding unnecessary latency and potential for hangs in
relatively rarely executed startup or shutdown code.
The page cleaner was also running two kinds of batches in an
interleaved fashion: "LRU flush" (writing out some least recently used
pages and evicting them on write completion) and the normal batches
that aim to increase the MIN(oldest_modification) in the buffer pool,
to help the log checkpoint advance.
The buf_pool.flush_list flushing was being blocked by
buf_block_t::lock for no good reason. Furthermore, if the FIL_PAGE_LSN
of a page is ahead of log_sys.get_flushed_lsn(), that is, what has
been persistently written to the redo log, we would trigger a log
flush and then resume the page flushing. This would unnecessarily
limit the performance of the page cleaner thread and trigger the
infamous messages "InnoDB: page_cleaner: 1000ms intended loop took 4450ms.
The settings might not be optimal" that were suppressed in
commit d1ab89037a518fcffbc50c24e4bd94e4ec33aed0 unless log_warnings>2.
Our revised algorithm will make log_sys.get_flushed_lsn() advance at
the start of buf_flush_lists(), and then execute a 'best effort' to
write out all pages. The flush batches will skip pages that were modified
since the log was written, or are are currently exclusively locked.
The MDEV-13670 message "page_cleaner: 1000ms intended loop took" message
will be removed, because by design, the buf_flush_page_cleaner() should
not be blocked during a batch for extended periods of time.
We will remove the single-page flushing altogether. Related to this,
the debug parameter innodb_doublewrite_batch_size will be removed,
because all of the doublewrite buffer will be used for flushing
batches. If a page needs to be evicted from the buffer pool and all
100 least recently used pages in the buffer pool have unflushed
changes, buf_LRU_get_free_block() will execute buf_flush_lists() to
write out and evict innodb_lru_flush_size pages. At most one thread
will execute buf_flush_lists() in buf_LRU_get_free_block(); other
threads will wait for that LRU flushing batch to finish.
To improve concurrency, we will replace the InnoDB ib_mutex_t and
os_event_t native mutexes and condition variables in this area of code.
Most notably, this means that the buffer pool mutex (buf_pool.mutex)
is no longer instrumented via any InnoDB interfaces. It will continue
to be instrumented via PERFORMANCE_SCHEMA.
For now, both buf_pool.flush_list_mutex and buf_pool.mutex will be
declared with MY_MUTEX_INIT_FAST (PTHREAD_MUTEX_ADAPTIVE_NP). The critical
sections of buf_pool.flush_list_mutex should be shorter than those for
buf_pool.mutex, because in the worst case, they cover a linear scan of
buf_pool.flush_list, while the worst case of a critical section of
buf_pool.mutex covers a linear scan of the potentially much longer
buf_pool.LRU list.
mysql_mutex_is_owner(), safe_mutex_is_owner(): New predicate, usable
with SAFE_MUTEX. Some InnoDB debug assertions need this predicate
instead of mysql_mutex_assert_owner() or mysql_mutex_assert_not_owner().
buf_pool_t::n_flush_LRU, buf_pool_t::n_flush_list:
Replaces buf_pool_t::init_flush[] and buf_pool_t::n_flush[].
The number of active flush operations.
buf_pool_t::mutex, buf_pool_t::flush_list_mutex: Use mysql_mutex_t
instead of ib_mutex_t, to have native mutexes with PERFORMANCE_SCHEMA
and SAFE_MUTEX instrumentation.
buf_pool_t::done_flush_LRU: Condition variable for !n_flush_LRU.
buf_pool_t::done_flush_list: Condition variable for !n_flush_list.
buf_pool_t::do_flush_list: Condition variable to wake up the
buf_flush_page_cleaner when a log checkpoint needs to be written
or the server is being shut down. Replaces buf_flush_event.
We will keep using timed waits (the page cleaner thread will wake
_at least_ once per second), because the calculations for
innodb_adaptive_flushing depend on fixed time intervals.
buf_dblwr: Allocate statically, and move all code to member functions.
Use a native mutex and condition variable. Remove code to deal with
single-page flushing.
buf_dblwr_check_block(): Make the check debug-only. We were spending
a significant amount of execution time in page_simple_validate_new().
flush_counters_t::unzip_LRU_evicted: Remove.
IORequest: Make more members const. FIXME: m_fil_node should be removed.
buf_flush_sync_lsn: Protect by std::atomic, not page_cleaner.mutex
(which we are removing).
page_cleaner_slot_t, page_cleaner_t: Remove many redundant members.
pc_request_flush_slot(): Replaces pc_request() and pc_flush_slot().
recv_writer_thread: Remove. Recovery works just fine without it, if we
simply invoke buf_flush_sync() at the end of each batch in
recv_sys_t::apply().
recv_recovery_from_checkpoint_finish(): Remove. We can simply call
recv_sys.debug_free() directly.
srv_started_redo: Replaces srv_start_state.
SRV_SHUTDOWN_FLUSH_PHASE: Remove. logs_empty_and_mark_files_at_shutdown()
can communicate with the normal page cleaner loop via the new function
flush_buffer_pool().
buf_flush_remove(): Assert that the calling thread is holding
buf_pool.flush_list_mutex. This removes unnecessary mutex operations
from buf_flush_remove_pages() and buf_flush_dirty_pages(),
which replace buf_LRU_flush_or_remove_pages().
buf_flush_lists(): Renamed from buf_flush_batch(), with simplified
interface. Return the number of flushed pages. Clarified comments and
renamed min_n to max_n. Identify LRU batch by lsn=0. Merge all the functions
buf_flush_start(), buf_flush_batch(), buf_flush_end() directly to this
function, which was their only caller, and remove 2 unnecessary
buf_pool.mutex release/re-acquisition that we used to perform around
the buf_flush_batch() call. At the start, if not all log has been
durably written, wait for a background task to do it, or start a new
task to do it. This allows the log write to run concurrently with our
page flushing batch. Any pages that were skipped due to too recent
FIL_PAGE_LSN or due to them being latched by a writer should be flushed
during the next batch, unless there are further modifications to those
pages. It is possible that a page that we must flush due to small
oldest_modification also carries a recent FIL_PAGE_LSN or is being
constantly modified. In the worst case, all writers would then end up
waiting in log_free_check() to allow the flushing and the checkpoint
to complete.
buf_do_flush_list_batch(): Clarify comments, and rename min_n to max_n.
Cache the last looked up tablespace. If neighbor flushing is not applicable,
invoke buf_flush_page() directly, avoiding a page lookup in between.
buf_flush_space(): Auxiliary function to look up a tablespace for
page flushing.
buf_flush_page(): Defer the computation of space->full_crc32(). Never
call log_write_up_to(), but instead skip persistent pages whose latest
modification (FIL_PAGE_LSN) is newer than the redo log. Also skip
pages on which we cannot acquire a shared latch without waiting.
buf_flush_try_neighbors(): Do not bother checking buf_fix_count
because buf_flush_page() will no longer wait for the page latch.
Take the tablespace as a parameter, and only execute this function
when innodb_flush_neighbors>0. Avoid repeated calls of page_id_t::fold().
buf_flush_relocate_on_flush_list(): Declare as cold, and push down
a condition from the callers.
buf_flush_check_neighbor(): Take id.fold() as a parameter.
buf_flush_sync(): Ensure that the buf_pool.flush_list is empty,
because the flushing batch will skip pages whose modifications have
not yet been written to the log or were latched for modification.
buf_free_from_unzip_LRU_list_batch(): Remove redundant local variables.
buf_flush_LRU_list_batch(): Let the caller buf_do_LRU_batch() initialize
the counters, and report n->evicted.
Cache the last looked up tablespace. If neighbor flushing is not applicable,
invoke buf_flush_page() directly, avoiding a page lookup in between.
buf_do_LRU_batch(): Return the number of pages flushed.
buf_LRU_free_page(): Only release and re-acquire buf_pool.mutex if
adaptive hash index entries are pointing to the block.
buf_LRU_get_free_block(): Do not wake up the page cleaner, because it
will no longer perform any useful work for us, and we do not want it
to compete for I/O while buf_flush_lists(innodb_lru_flush_size, 0)
writes out and evicts at most innodb_lru_flush_size pages. (The
function buf_do_LRU_batch() may complete after writing fewer pages if
more than innodb_lru_scan_depth pages end up in buf_pool.free list.)
Eliminate some mutex release-acquire cycles, and wait for the LRU
flush batch to complete before rescanning.
buf_LRU_check_size_of_non_data_objects(): Simplify the code.
buf_page_write_complete(): Remove the parameter evict, and always
evict pages that were part of an LRU flush.
buf_page_create(): Take a pre-allocated page as a parameter.
buf_pool_t::free_block(): Free a pre-allocated block.
recv_sys_t::recover_low(), recv_sys_t::apply(): Preallocate the block
while not holding recv_sys.mutex. During page allocation, we may
initiate a page flush, which in turn may initiate a log flush, which
would require acquiring log_sys.mutex, which should always be acquired
before recv_sys.mutex in order to avoid deadlocks. Therefore, we must
not be holding recv_sys.mutex while allocating a buffer pool block.
BtrBulk::logFreeCheck(): Skip a redundant condition.
row_undo_step(): Do not invoke srv_inc_activity_count() for every row
that is being rolled back. It should suffice to invoke the function in
trx_flush_log_if_needed() during trx_t::commit_in_memory() when the
rollback completes.
sync_check_enable(): Remove. We will enable innodb_sync_debug from the
very beginning.
Reviewed by: Vladislav Vaintroub
2020-10-15 12:10:42 +03:00
|
|
|
compile_time_assert(array_elements(innodb_counter_info)
|
|
|
|
== NUM_MONITOR);
|
2012-08-01 17:27:34 +03:00
|
|
|
|
|
|
|
/* The module_id must be an ID of MONITOR_MODULE type */
|
|
|
|
ut_a(innodb_counter_info[module_id].monitor_type & MONITOR_MODULE);
|
|
|
|
|
|
|
|
/* start with the first monitor in the module. If module_id
|
|
|
|
is MONITOR_ALL_COUNTER, this means we need to turn on all
|
|
|
|
monitor counters. */
|
|
|
|
if (module_id == MONITOR_ALL_COUNTER) {
|
|
|
|
start_id = 1;
|
|
|
|
} else if (innodb_counter_info[module_id].monitor_type
|
|
|
|
& MONITOR_GROUP_MODULE) {
|
|
|
|
/* Counters in this module are set as a group together
|
|
|
|
and cannot be turned on/off individually. Need to set
|
|
|
|
the on/off bit in the module counter */
|
|
|
|
start_id = module_id;
|
|
|
|
set_current_module = TRUE;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
start_id = module_id + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (ix = start_id; ix < NUM_MONITOR; ix++) {
|
|
|
|
/* if we hit the next module counter, we will
|
|
|
|
continue if we want to turn on all monitor counters,
|
|
|
|
and break if just turn on the counters in the
|
|
|
|
current module. */
|
|
|
|
if (innodb_counter_info[ix].monitor_type & MONITOR_MODULE) {
|
|
|
|
|
|
|
|
if (set_current_module) {
|
|
|
|
/* Continue to set on/off bit on current
|
|
|
|
module */
|
|
|
|
set_current_module = FALSE;
|
|
|
|
} else if (module_id == MONITOR_ALL_COUNTER) {
|
2016-09-06 09:43:16 +03:00
|
|
|
if (!(innodb_counter_info[ix].monitor_type
|
|
|
|
& MONITOR_GROUP_MODULE)) {
|
|
|
|
continue;
|
|
|
|
}
|
2012-08-01 17:27:34 +03:00
|
|
|
} else {
|
|
|
|
/* Hitting the next module, stop */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Cannot turn on a monitor already been turned on. User
|
|
|
|
should be aware some counters are already on before
|
|
|
|
turn them on again (which could reset counter value) */
|
|
|
|
if (MONITOR_IS_ON(ix) && (set_option == MONITOR_TURN_ON)) {
|
2016-08-12 11:17:45 +03:00
|
|
|
ib::info() << "Monitor '"
|
|
|
|
<< srv_mon_get_name((monitor_id_t) ix)
|
|
|
|
<< "' is already enabled.";
|
2012-08-01 17:27:34 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For some existing counters (server status variables),
|
|
|
|
we will get its counter value at the start/stop time
|
|
|
|
to calculate the actual value during the time. */
|
|
|
|
if (innodb_counter_info[ix].monitor_type & MONITOR_EXISTING) {
|
|
|
|
srv_mon_process_existing_counter(
|
|
|
|
static_cast<monitor_id_t>(ix), set_option);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Currently support 4 operations on the monitor counters:
|
|
|
|
turn on, turn off, reset and reset all operations. */
|
|
|
|
switch (set_option) {
|
|
|
|
case MONITOR_TURN_ON:
|
|
|
|
MONITOR_ON(ix);
|
|
|
|
MONITOR_INIT(ix);
|
|
|
|
MONITOR_SET_START(ix);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_TURN_OFF:
|
|
|
|
MONITOR_OFF(ix);
|
|
|
|
MONITOR_SET_OFF(ix);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_RESET_VALUE:
|
|
|
|
srv_mon_reset(static_cast<monitor_id_t>(ix));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_RESET_ALL_VALUE:
|
|
|
|
srv_mon_reset_all(static_cast<monitor_id_t>(ix));
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
ut_error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************//**
|
|
|
|
Get transaction system's rollback segment size in pages
|
|
|
|
@return size in pages */
|
|
|
|
static
|
|
|
|
ulint
|
|
|
|
srv_mon_get_rseg_size(void)
|
|
|
|
/*=======================*/
|
|
|
|
{
|
|
|
|
ulint i;
|
|
|
|
ulint value = 0;
|
|
|
|
|
|
|
|
/* rseg_array is a static array, so we can go through it without
|
|
|
|
mutex protection. In addition, we provide an estimate of the
|
|
|
|
total rollback segment size and to avoid mutex contention we
|
|
|
|
don't acquire the rseg->mutex" */
|
|
|
|
for (i = 0; i < TRX_SYS_N_RSEGS; ++i) {
|
2017-12-22 16:15:41 +02:00
|
|
|
const trx_rseg_t* rseg = trx_sys.rseg_array[i];
|
2012-08-01 17:27:34 +03:00
|
|
|
|
|
|
|
if (rseg != NULL) {
|
|
|
|
value += rseg->curr_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return(value);
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************//**
|
|
|
|
This function consolidates some existing server counters used
|
|
|
|
by "system status variables". These existing system variables do not have
|
|
|
|
mechanism to start/stop and reset the counters, so we simulate these
|
|
|
|
controls by remembering the corresponding counter values when the
|
|
|
|
corresponding monitors are turned on/off/reset, and do appropriate
|
|
|
|
mathematics to deduct the actual value. Please also refer to
|
|
|
|
srv_export_innodb_status() for related global counters used by
|
|
|
|
the existing status variables.*/
|
|
|
|
void
|
|
|
|
srv_mon_process_existing_counter(
|
|
|
|
/*=============================*/
|
|
|
|
monitor_id_t monitor_id, /*!< in: the monitor's ID as in
|
|
|
|
monitor_counter_id */
|
|
|
|
mon_option_t set_option) /*!< in: Turn on/off reset the
|
|
|
|
counter */
|
|
|
|
{
|
2013-03-26 00:03:13 +02:00
|
|
|
mon_type_t value;
|
|
|
|
monitor_info_t* monitor_info;
|
|
|
|
ibool update_min = FALSE;
|
2012-08-01 17:27:34 +03:00
|
|
|
|
|
|
|
monitor_info = srv_mon_get_info(monitor_id);
|
|
|
|
|
|
|
|
ut_a(monitor_info->monitor_type & MONITOR_EXISTING);
|
|
|
|
ut_a(monitor_id < NUM_MONITOR);
|
|
|
|
|
|
|
|
/* Get the value from corresponding global variable */
|
|
|
|
switch (monitor_id) {
|
|
|
|
/* export_vars.innodb_buffer_pool_reads. Num Reads from
|
|
|
|
disk (page not in buffer) */
|
|
|
|
case MONITOR_OVLD_BUF_POOL_READS:
|
2013-03-26 00:03:13 +02:00
|
|
|
value = srv_stats.buf_pool_reads;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_buffer_pool_read_requests, the number of logical
|
|
|
|
read requests */
|
|
|
|
case MONITOR_OVLD_BUF_POOL_READ_REQUESTS:
|
2020-03-18 21:48:00 +02:00
|
|
|
value = buf_pool.stat.n_page_gets;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_buffer_pool_write_requests, the number of
|
|
|
|
write request */
|
|
|
|
case MONITOR_OVLD_BUF_POOL_WRITE_REQUEST:
|
2013-03-26 00:03:13 +02:00
|
|
|
value = srv_stats.buf_pool_write_requests;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_buffer_pool_wait_free */
|
|
|
|
case MONITOR_OVLD_BUF_POOL_WAIT_FREE:
|
2021-01-07 11:18:13 +02:00
|
|
|
value = buf_pool.stat.LRU_waits;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_buffer_pool_read_ahead */
|
|
|
|
case MONITOR_OVLD_BUF_POOL_READ_AHEAD:
|
2020-03-18 21:48:00 +02:00
|
|
|
value = buf_pool.stat.n_ra_pages_read;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_buffer_pool_read_ahead_evicted */
|
|
|
|
case MONITOR_OVLD_BUF_POOL_READ_AHEAD_EVICTED:
|
2020-03-18 21:48:00 +02:00
|
|
|
value = buf_pool.stat.n_ra_pages_evicted;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_buffer_pool_pages_total */
|
|
|
|
case MONITOR_OVLD_BUF_POOL_PAGE_TOTAL:
|
2020-03-18 21:48:00 +02:00
|
|
|
value = buf_pool.get_n_pages();
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_buffer_pool_pages_misc */
|
|
|
|
case MONITOR_OVLD_BUF_POOL_PAGE_MISC:
|
2020-03-18 21:48:00 +02:00
|
|
|
value = buf_pool.get_n_pages()
|
|
|
|
- UT_LIST_GET_LEN(buf_pool.LRU)
|
|
|
|
- UT_LIST_GET_LEN(buf_pool.free);
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_buffer_pool_pages_data */
|
|
|
|
case MONITOR_OVLD_BUF_POOL_PAGES_DATA:
|
2020-03-18 21:48:00 +02:00
|
|
|
value = UT_LIST_GET_LEN(buf_pool.LRU);
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
2013-03-26 00:03:13 +02:00
|
|
|
/* innodb_buffer_pool_bytes_data */
|
|
|
|
case MONITOR_OVLD_BUF_POOL_BYTES_DATA:
|
2020-03-18 21:48:00 +02:00
|
|
|
value = buf_pool.stat.LRU_bytes
|
|
|
|
+ (UT_LIST_GET_LEN(buf_pool.unzip_LRU)
|
2020-02-12 14:45:21 +02:00
|
|
|
<< srv_page_size_shift);
|
2013-03-26 00:03:13 +02:00
|
|
|
break;
|
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
/* innodb_buffer_pool_pages_dirty */
|
|
|
|
case MONITOR_OVLD_BUF_POOL_PAGES_DIRTY:
|
2020-03-18 21:48:00 +02:00
|
|
|
value = UT_LIST_GET_LEN(buf_pool.flush_list);
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
2013-03-26 00:03:13 +02:00
|
|
|
/* innodb_buffer_pool_bytes_dirty */
|
|
|
|
case MONITOR_OVLD_BUF_POOL_BYTES_DIRTY:
|
2020-03-18 21:48:00 +02:00
|
|
|
value = buf_pool.stat.flush_list_bytes;
|
2013-03-26 00:03:13 +02:00
|
|
|
break;
|
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
/* innodb_buffer_pool_pages_free */
|
|
|
|
case MONITOR_OVLD_BUF_POOL_PAGES_FREE:
|
2020-03-18 21:48:00 +02:00
|
|
|
value = UT_LIST_GET_LEN(buf_pool.free);
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_pages_created, the number of pages created */
|
|
|
|
case MONITOR_OVLD_PAGE_CREATED:
|
2020-03-18 21:48:00 +02:00
|
|
|
value = buf_pool.stat.n_pages_created;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_pages_written, the number of page written */
|
|
|
|
case MONITOR_OVLD_PAGES_WRITTEN:
|
2020-03-18 21:48:00 +02:00
|
|
|
value = buf_pool.stat.n_pages_written;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
2014-03-12 14:47:38 +02:00
|
|
|
/* innodb_index_pages_written, the number of index pages written */
|
2013-12-19 14:36:38 +02:00
|
|
|
case MONITOR_OVLD_INDEX_PAGES_WRITTEN:
|
|
|
|
value = srv_stats.index_pages_written;
|
|
|
|
break;
|
|
|
|
|
2014-03-12 14:47:38 +02:00
|
|
|
/* innodb_non_index_pages_written, the number of non index pages written */
|
|
|
|
case MONITOR_OVLD_NON_INDEX_PAGES_WRITTEN:
|
|
|
|
value = srv_stats.non_index_pages_written;
|
|
|
|
break;
|
|
|
|
|
2021-03-09 08:29:38 +02:00
|
|
|
case MONITOR_LRU_BATCH_FLUSH_TOTAL_PAGE:
|
|
|
|
value = buf_lru_flush_page_count;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_LRU_BATCH_EVICT_TOTAL_PAGE:
|
|
|
|
value = buf_lru_freed_page_count;
|
|
|
|
break;
|
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
/* innodb_pages_read */
|
|
|
|
case MONITOR_OVLD_PAGES_READ:
|
2020-03-18 21:48:00 +02:00
|
|
|
value = buf_pool.stat.n_pages_read;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
2014-11-03 11:18:52 +02:00
|
|
|
/* Number of times secondary index lookup triggered cluster lookup */
|
|
|
|
case MONITOR_OVLD_INDEX_SEC_REC_CLUSTER_READS:
|
|
|
|
value = srv_stats.n_sec_rec_cluster_reads;
|
|
|
|
break;
|
|
|
|
/* Number of times prefix optimization avoided triggering cluster
|
|
|
|
lookup */
|
|
|
|
case MONITOR_OVLD_INDEX_SEC_REC_CLUSTER_READS_AVOIDED:
|
|
|
|
value = srv_stats.n_sec_rec_cluster_reads_avoided;
|
|
|
|
break;
|
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
/* innodb_data_reads, the total number of data reads */
|
|
|
|
case MONITOR_OVLD_BYTE_READ:
|
2013-03-26 00:03:13 +02:00
|
|
|
value = srv_stats.data_read;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_data_writes, the total number of data writes. */
|
|
|
|
case MONITOR_OVLD_BYTE_WRITTEN:
|
2013-03-26 00:03:13 +02:00
|
|
|
value = srv_stats.data_written;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_data_reads, the total number of data reads. */
|
|
|
|
case MONITOR_OVLD_OS_FILE_READ:
|
|
|
|
value = os_n_file_reads;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_data_writes, the total number of data writes*/
|
|
|
|
case MONITOR_OVLD_OS_FILE_WRITE:
|
|
|
|
value = os_n_file_writes;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_data_fsyncs, number of fsync() operations so far. */
|
|
|
|
case MONITOR_OVLD_OS_FSYNC:
|
|
|
|
value = os_n_fsyncs;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_os_log_written */
|
|
|
|
case MONITOR_OVLD_OS_LOG_WRITTEN:
|
2013-03-26 00:03:13 +02:00
|
|
|
value = (mon_type_t) srv_stats.os_log_written;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_os_log_fsyncs */
|
|
|
|
case MONITOR_OVLD_OS_LOG_FSYNC:
|
2019-11-25 22:32:24 +07:00
|
|
|
value = log_sys.get_flushes();
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_os_log_pending_fsyncs */
|
|
|
|
case MONITOR_OVLD_OS_LOG_PENDING_FSYNC:
|
2019-11-25 22:32:24 +07:00
|
|
|
value = log_sys.get_pending_flushes();
|
2012-08-01 17:27:34 +03:00
|
|
|
update_min = TRUE;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_os_log_pending_writes */
|
|
|
|
case MONITOR_OVLD_OS_LOG_PENDING_WRITES:
|
2013-03-26 00:03:13 +02:00
|
|
|
value = srv_stats.os_log_pending_writes;
|
2012-08-01 17:27:34 +03:00
|
|
|
update_min = TRUE;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_log_waits */
|
|
|
|
case MONITOR_OVLD_LOG_WAITS:
|
2013-03-26 00:03:13 +02:00
|
|
|
value = srv_stats.log_waits;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_log_write_requests */
|
|
|
|
case MONITOR_OVLD_LOG_WRITE_REQUEST:
|
2013-03-26 00:03:13 +02:00
|
|
|
value = srv_stats.log_write_requests;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_log_writes */
|
|
|
|
case MONITOR_OVLD_LOG_WRITES:
|
2013-03-26 00:03:13 +02:00
|
|
|
value = srv_stats.log_writes;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
2016-08-12 11:17:45 +03:00
|
|
|
case MONITOR_OVLD_LOG_PADDED:
|
|
|
|
value = srv_stats.log_padded;
|
|
|
|
break;
|
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
/* innodb_dblwr_writes */
|
|
|
|
case MONITOR_OVLD_SRV_DBLWR_WRITES:
|
2020-12-04 17:52:23 +02:00
|
|
|
buf_dblwr.lock();
|
|
|
|
value = buf_dblwr.batches();
|
|
|
|
buf_dblwr.unlock();
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_dblwr_pages_written */
|
|
|
|
case MONITOR_OVLD_SRV_DBLWR_PAGES_WRITTEN:
|
2020-12-04 17:52:23 +02:00
|
|
|
buf_dblwr.lock();
|
|
|
|
value = buf_dblwr.written();
|
|
|
|
buf_dblwr.unlock();
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_page_size */
|
|
|
|
case MONITOR_OVLD_SRV_PAGE_SIZE:
|
2018-04-27 13:49:25 +03:00
|
|
|
value = srv_page_size;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_OVLD_BUFFER_POOL_SIZE:
|
|
|
|
value = srv_buf_pool_size;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_rows_read */
|
|
|
|
case MONITOR_OLVD_ROW_READ:
|
2013-03-26 00:03:13 +02:00
|
|
|
value = srv_stats.n_rows_read;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_rows_inserted */
|
|
|
|
case MONITOR_OLVD_ROW_INSERTED:
|
2013-03-26 00:03:13 +02:00
|
|
|
value = srv_stats.n_rows_inserted;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_rows_deleted */
|
|
|
|
case MONITOR_OLVD_ROW_DELETED:
|
2013-03-26 00:03:13 +02:00
|
|
|
value = srv_stats.n_rows_deleted;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_rows_updated */
|
|
|
|
case MONITOR_OLVD_ROW_UPDTATED:
|
2013-03-26 00:03:13 +02:00
|
|
|
value = srv_stats.n_rows_updated;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
2014-10-26 07:22:51 +02:00
|
|
|
/* innodb_system_rows_read */
|
|
|
|
case MONITOR_OLVD_SYSTEM_ROW_READ:
|
|
|
|
value = srv_stats.n_system_rows_read;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_system_rows_inserted */
|
|
|
|
case MONITOR_OLVD_SYSTEM_ROW_INSERTED:
|
|
|
|
value = srv_stats.n_system_rows_inserted;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_system_rows_deleted */
|
|
|
|
case MONITOR_OLVD_SYSTEM_ROW_DELETED:
|
|
|
|
value = srv_stats.n_system_rows_deleted;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_system_rows_updated */
|
|
|
|
case MONITOR_OLVD_SYSTEM_ROW_UPDATED:
|
|
|
|
value = srv_stats.n_system_rows_updated;
|
|
|
|
break;
|
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
/* innodb_row_lock_current_waits */
|
|
|
|
case MONITOR_OVLD_ROW_LOCK_CURRENT_WAIT:
|
MDEV-24671: Replace lock_wait_timeout_task with mysql_cond_timedwait()
lock_wait(): Replaces lock_wait_suspend_thread(). Wait for the lock to
be granted or the transaction to be killed using mysql_cond_timedwait()
or mysql_cond_wait().
lock_wait_end(): Replaces que_thr_end_lock_wait() and
lock_wait_release_thread_if_suspended().
lock_wait_timeout_task: Remove. The operating system kernel will
resume the mysql_cond_timedwait() in lock_wait(). An added benefit
is that innodb_lock_wait_timeout no longer has a 'jitter' of 1 second,
which was caused by this wake-up task waking up only once per second,
and then waking up any threads for which the timeout (which was only
measured in seconds) was exceeded.
innobase_kill_query(): Set trx->error_state=DB_INTERRUPTED,
so that a call trx_is_interrupted(trx) in lock_wait() can be avoided.
We will protect things more consistently with lock_sys.wait_mutex,
which will be moved below lock_sys.mutex in the latching order.
trx_lock_t::cond: Condition variable for !wait_lock, used with
lock_sys.wait_mutex.
srv_slot_t: Remove. Replaced by trx_lock_t::cond,
lock_grant_after_reset(): Merged to to lock_grant().
lock_rec_get_index_name(): Remove.
lock_sys_t: Introduce wait_pending, wait_count, wait_time, wait_time_max
that are protected by wait_mutex.
trx_lock_t::que_state: Remove.
que_thr_state_t: Remove QUE_THR_COMMAND_WAIT, QUE_THR_LOCK_WAIT.
que_thr_t: Remove is_active, start_running(), stop_no_error().
que_fork_t::n_active_thrs, trx_lock_t::n_active_thrs: Remove.
2021-01-26 16:39:56 +02:00
|
|
|
// dirty read without lock_sys.wait_mutex
|
|
|
|
value = lock_sys.get_wait_pending();
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_row_lock_time */
|
|
|
|
case MONITOR_OVLD_LOCK_WAIT_TIME:
|
MDEV-24671: Replace lock_wait_timeout_task with mysql_cond_timedwait()
lock_wait(): Replaces lock_wait_suspend_thread(). Wait for the lock to
be granted or the transaction to be killed using mysql_cond_timedwait()
or mysql_cond_wait().
lock_wait_end(): Replaces que_thr_end_lock_wait() and
lock_wait_release_thread_if_suspended().
lock_wait_timeout_task: Remove. The operating system kernel will
resume the mysql_cond_timedwait() in lock_wait(). An added benefit
is that innodb_lock_wait_timeout no longer has a 'jitter' of 1 second,
which was caused by this wake-up task waking up only once per second,
and then waking up any threads for which the timeout (which was only
measured in seconds) was exceeded.
innobase_kill_query(): Set trx->error_state=DB_INTERRUPTED,
so that a call trx_is_interrupted(trx) in lock_wait() can be avoided.
We will protect things more consistently with lock_sys.wait_mutex,
which will be moved below lock_sys.mutex in the latching order.
trx_lock_t::cond: Condition variable for !wait_lock, used with
lock_sys.wait_mutex.
srv_slot_t: Remove. Replaced by trx_lock_t::cond,
lock_grant_after_reset(): Merged to to lock_grant().
lock_rec_get_index_name(): Remove.
lock_sys_t: Introduce wait_pending, wait_count, wait_time, wait_time_max
that are protected by wait_mutex.
trx_lock_t::que_state: Remove.
que_thr_state_t: Remove QUE_THR_COMMAND_WAIT, QUE_THR_LOCK_WAIT.
que_thr_t: Remove is_active, start_running(), stop_no_error().
que_fork_t::n_active_thrs, trx_lock_t::n_active_thrs: Remove.
2021-01-26 16:39:56 +02:00
|
|
|
// dirty read without lock_sys.wait_mutex
|
|
|
|
value = lock_sys.get_wait_time_cumulative() / 1000;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_row_lock_time_max */
|
|
|
|
case MONITOR_OVLD_LOCK_MAX_WAIT_TIME:
|
MDEV-24671: Replace lock_wait_timeout_task with mysql_cond_timedwait()
lock_wait(): Replaces lock_wait_suspend_thread(). Wait for the lock to
be granted or the transaction to be killed using mysql_cond_timedwait()
or mysql_cond_wait().
lock_wait_end(): Replaces que_thr_end_lock_wait() and
lock_wait_release_thread_if_suspended().
lock_wait_timeout_task: Remove. The operating system kernel will
resume the mysql_cond_timedwait() in lock_wait(). An added benefit
is that innodb_lock_wait_timeout no longer has a 'jitter' of 1 second,
which was caused by this wake-up task waking up only once per second,
and then waking up any threads for which the timeout (which was only
measured in seconds) was exceeded.
innobase_kill_query(): Set trx->error_state=DB_INTERRUPTED,
so that a call trx_is_interrupted(trx) in lock_wait() can be avoided.
We will protect things more consistently with lock_sys.wait_mutex,
which will be moved below lock_sys.mutex in the latching order.
trx_lock_t::cond: Condition variable for !wait_lock, used with
lock_sys.wait_mutex.
srv_slot_t: Remove. Replaced by trx_lock_t::cond,
lock_grant_after_reset(): Merged to to lock_grant().
lock_rec_get_index_name(): Remove.
lock_sys_t: Introduce wait_pending, wait_count, wait_time, wait_time_max
that are protected by wait_mutex.
trx_lock_t::que_state: Remove.
que_thr_state_t: Remove QUE_THR_COMMAND_WAIT, QUE_THR_LOCK_WAIT.
que_thr_t: Remove is_active, start_running(), stop_no_error().
que_fork_t::n_active_thrs, trx_lock_t::n_active_thrs: Remove.
2021-01-26 16:39:56 +02:00
|
|
|
// dirty read without lock_sys.wait_mutex
|
|
|
|
value = lock_sys.get_wait_time_max() / 1000;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_row_lock_time_avg */
|
|
|
|
case MONITOR_OVLD_LOCK_AVG_WAIT_TIME:
|
MDEV-24671: Replace lock_wait_timeout_task with mysql_cond_timedwait()
lock_wait(): Replaces lock_wait_suspend_thread(). Wait for the lock to
be granted or the transaction to be killed using mysql_cond_timedwait()
or mysql_cond_wait().
lock_wait_end(): Replaces que_thr_end_lock_wait() and
lock_wait_release_thread_if_suspended().
lock_wait_timeout_task: Remove. The operating system kernel will
resume the mysql_cond_timedwait() in lock_wait(). An added benefit
is that innodb_lock_wait_timeout no longer has a 'jitter' of 1 second,
which was caused by this wake-up task waking up only once per second,
and then waking up any threads for which the timeout (which was only
measured in seconds) was exceeded.
innobase_kill_query(): Set trx->error_state=DB_INTERRUPTED,
so that a call trx_is_interrupted(trx) in lock_wait() can be avoided.
We will protect things more consistently with lock_sys.wait_mutex,
which will be moved below lock_sys.mutex in the latching order.
trx_lock_t::cond: Condition variable for !wait_lock, used with
lock_sys.wait_mutex.
srv_slot_t: Remove. Replaced by trx_lock_t::cond,
lock_grant_after_reset(): Merged to to lock_grant().
lock_rec_get_index_name(): Remove.
lock_sys_t: Introduce wait_pending, wait_count, wait_time, wait_time_max
that are protected by wait_mutex.
trx_lock_t::que_state: Remove.
que_thr_state_t: Remove QUE_THR_COMMAND_WAIT, QUE_THR_LOCK_WAIT.
que_thr_t: Remove is_active, start_running(), stop_no_error().
que_fork_t::n_active_thrs, trx_lock_t::n_active_thrs: Remove.
2021-01-26 16:39:56 +02:00
|
|
|
mysql_mutex_lock(&lock_sys.wait_mutex);
|
|
|
|
if (auto count = lock_sys.get_wait_cumulative()) {
|
|
|
|
value = lock_sys.get_wait_time_cumulative() / 1000
|
|
|
|
/ count;
|
2012-08-01 17:27:34 +03:00
|
|
|
} else {
|
|
|
|
value = 0;
|
|
|
|
}
|
MDEV-24671: Replace lock_wait_timeout_task with mysql_cond_timedwait()
lock_wait(): Replaces lock_wait_suspend_thread(). Wait for the lock to
be granted or the transaction to be killed using mysql_cond_timedwait()
or mysql_cond_wait().
lock_wait_end(): Replaces que_thr_end_lock_wait() and
lock_wait_release_thread_if_suspended().
lock_wait_timeout_task: Remove. The operating system kernel will
resume the mysql_cond_timedwait() in lock_wait(). An added benefit
is that innodb_lock_wait_timeout no longer has a 'jitter' of 1 second,
which was caused by this wake-up task waking up only once per second,
and then waking up any threads for which the timeout (which was only
measured in seconds) was exceeded.
innobase_kill_query(): Set trx->error_state=DB_INTERRUPTED,
so that a call trx_is_interrupted(trx) in lock_wait() can be avoided.
We will protect things more consistently with lock_sys.wait_mutex,
which will be moved below lock_sys.mutex in the latching order.
trx_lock_t::cond: Condition variable for !wait_lock, used with
lock_sys.wait_mutex.
srv_slot_t: Remove. Replaced by trx_lock_t::cond,
lock_grant_after_reset(): Merged to to lock_grant().
lock_rec_get_index_name(): Remove.
lock_sys_t: Introduce wait_pending, wait_count, wait_time, wait_time_max
that are protected by wait_mutex.
trx_lock_t::que_state: Remove.
que_thr_state_t: Remove QUE_THR_COMMAND_WAIT, QUE_THR_LOCK_WAIT.
que_thr_t: Remove is_active, start_running(), stop_no_error().
que_fork_t::n_active_thrs, trx_lock_t::n_active_thrs: Remove.
2021-01-26 16:39:56 +02:00
|
|
|
mysql_mutex_unlock(&lock_sys.wait_mutex);
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* innodb_row_lock_waits */
|
|
|
|
case MONITOR_OVLD_ROW_LOCK_WAIT:
|
MDEV-24671: Replace lock_wait_timeout_task with mysql_cond_timedwait()
lock_wait(): Replaces lock_wait_suspend_thread(). Wait for the lock to
be granted or the transaction to be killed using mysql_cond_timedwait()
or mysql_cond_wait().
lock_wait_end(): Replaces que_thr_end_lock_wait() and
lock_wait_release_thread_if_suspended().
lock_wait_timeout_task: Remove. The operating system kernel will
resume the mysql_cond_timedwait() in lock_wait(). An added benefit
is that innodb_lock_wait_timeout no longer has a 'jitter' of 1 second,
which was caused by this wake-up task waking up only once per second,
and then waking up any threads for which the timeout (which was only
measured in seconds) was exceeded.
innobase_kill_query(): Set trx->error_state=DB_INTERRUPTED,
so that a call trx_is_interrupted(trx) in lock_wait() can be avoided.
We will protect things more consistently with lock_sys.wait_mutex,
which will be moved below lock_sys.mutex in the latching order.
trx_lock_t::cond: Condition variable for !wait_lock, used with
lock_sys.wait_mutex.
srv_slot_t: Remove. Replaced by trx_lock_t::cond,
lock_grant_after_reset(): Merged to to lock_grant().
lock_rec_get_index_name(): Remove.
lock_sys_t: Introduce wait_pending, wait_count, wait_time, wait_time_max
that are protected by wait_mutex.
trx_lock_t::que_state: Remove.
que_thr_state_t: Remove QUE_THR_COMMAND_WAIT, QUE_THR_LOCK_WAIT.
que_thr_t: Remove is_active, start_running(), stop_no_error().
que_fork_t::n_active_thrs, trx_lock_t::n_active_thrs: Remove.
2021-01-26 16:39:56 +02:00
|
|
|
// dirty read without lock_sys.wait_mutex
|
|
|
|
value = lock_sys.get_wait_cumulative();
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_RSEG_HISTORY_LEN:
|
2018-10-25 17:37:16 +04:00
|
|
|
value = trx_sys.rseg_history_len;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_RSEG_CUR_SIZE:
|
|
|
|
value = srv_mon_get_rseg_size();
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_OVLD_N_FILE_OPENED:
|
2018-11-28 01:02:46 +02:00
|
|
|
value = fil_system.n_open;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_OVLD_IBUF_MERGE_INSERT:
|
2019-07-03 16:05:34 +03:00
|
|
|
value = ibuf.n_merged_ops[IBUF_OP_INSERT];
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_OVLD_IBUF_MERGE_DELETE:
|
2019-07-03 16:05:34 +03:00
|
|
|
value = ibuf.n_merged_ops[IBUF_OP_DELETE_MARK];
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_OVLD_IBUF_MERGE_PURGE:
|
2019-07-03 16:05:34 +03:00
|
|
|
value = ibuf.n_merged_ops[IBUF_OP_DELETE];
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_OVLD_IBUF_MERGE_DISCARD_INSERT:
|
2019-07-03 16:05:34 +03:00
|
|
|
value = ibuf.n_discarded_ops[IBUF_OP_INSERT];
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_OVLD_IBUF_MERGE_DISCARD_DELETE:
|
2019-07-03 16:05:34 +03:00
|
|
|
value = ibuf.n_discarded_ops[IBUF_OP_DELETE_MARK];
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_OVLD_IBUF_MERGE_DISCARD_PURGE:
|
2019-07-03 16:05:34 +03:00
|
|
|
value = ibuf.n_discarded_ops[IBUF_OP_DELETE];
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_OVLD_IBUF_MERGES:
|
2019-07-03 16:05:34 +03:00
|
|
|
value = ibuf.n_merges;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_OVLD_IBUF_SIZE:
|
2019-07-03 16:05:34 +03:00
|
|
|
value = ibuf.size;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_OVLD_SERVER_ACTIVITY:
|
|
|
|
value = srv_get_activity_count();
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_OVLD_LSN_FLUSHDISK:
|
2020-03-05 11:45:28 +02:00
|
|
|
value = log_sys.get_flushed_lsn();
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_OVLD_LSN_CURRENT:
|
2020-03-05 11:45:28 +02:00
|
|
|
value = log_sys.get_lsn();
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
2019-05-06 18:07:40 +02:00
|
|
|
case MONITOR_PENDING_LOG_FLUSH:
|
2020-02-07 22:12:35 +01:00
|
|
|
value = static_cast<mon_type_t>(log_sys.pending_flushes);
|
|
|
|
|
2019-04-26 14:54:44 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_PENDING_CHECKPOINT_WRITE:
|
2020-10-26 16:43:52 +02:00
|
|
|
mysql_mutex_lock(&log_sys.mutex);
|
2019-04-26 14:54:44 +03:00
|
|
|
value = static_cast<mon_type_t>(
|
2019-05-12 17:20:23 +02:00
|
|
|
log_sys.n_pending_checkpoint_writes);
|
2020-10-26 16:43:52 +02:00
|
|
|
mysql_mutex_unlock(&log_sys.mutex);
|
2019-04-26 14:54:44 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_LOG_IO:
|
2020-10-26 16:43:52 +02:00
|
|
|
mysql_mutex_lock(&log_sys.mutex);
|
2019-05-12 17:20:23 +02:00
|
|
|
value = static_cast<mon_type_t>(log_sys.n_log_ios);
|
2020-10-26 16:43:52 +02:00
|
|
|
mysql_mutex_unlock(&log_sys.mutex);
|
2019-04-26 14:54:44 +03:00
|
|
|
break;
|
|
|
|
|
2020-03-04 12:59:20 +02:00
|
|
|
case MONITOR_LSN_CHECKPOINT_AGE:
|
2020-10-26 16:43:52 +02:00
|
|
|
mysql_mutex_lock(&log_sys.mutex);
|
2020-03-05 11:45:28 +02:00
|
|
|
value = static_cast<mon_type_t>(log_sys.get_lsn()
|
2020-03-04 12:59:20 +02:00
|
|
|
- log_sys.last_checkpoint_lsn);
|
2020-10-26 16:43:52 +02:00
|
|
|
mysql_mutex_unlock(&log_sys.mutex);
|
2020-03-04 12:59:20 +02:00
|
|
|
break;
|
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
case MONITOR_OVLD_BUF_OLDEST_LSN:
|
MDEV-23855: Improve InnoDB log checkpoint performance
After MDEV-15053, MDEV-22871, MDEV-23399 shifted the scalability
bottleneck, log checkpoints became a new bottleneck.
If innodb_io_capacity is set low or innodb_max_dirty_pct_lwm is
set high and the workload fits in the buffer pool, the page cleaner
thread will perform very little flushing. When we reach the capacity
of the circular redo log file ib_logfile0 and must initiate a checkpoint,
some 'furious flushing' will be necessary. (If innodb_flush_sync=OFF,
then flushing would continue at the innodb_io_capacity rate, and
writers would be throttled.)
We have the best chance of advancing the checkpoint LSN immediately
after a page flush batch has been completed. Hence, it is best to
perform checkpoints after every batch in the page cleaner thread,
attempting to run once per second.
By initiating high-priority flushing in the page cleaner as early
as possible, we aim to make the throughput more stable.
The function buf_flush_wait_flushed() used to sleep for 10ms, hoping
that the page cleaner thread would do something during that time.
The observed end result was that a large number of threads that call
log_free_check() would end up sleeping while nothing useful is happening.
We will revise the design so that in the default innodb_flush_sync=ON
mode, buf_flush_wait_flushed() will wake up the page cleaner thread
to perform the necessary flushing, and it will wait for a signal from
the page cleaner thread.
If innodb_io_capacity is set to a low value (causing the page cleaner to
throttle its work), a write workload would initially perform well, until
the capacity of the circular ib_logfile0 is reached and log_free_check()
will trigger checkpoints. At that point, the extra waiting in
buf_flush_wait_flushed() will start reducing throughput.
The page cleaner thread will also initiate log checkpoints after each
buf_flush_lists() call, because that is the best point of time for
the checkpoint LSN to advance by the maximum amount.
Even in 'furious flushing' mode we invoke buf_flush_lists() with
innodb_io_capacity_max pages at a time, and at the start of each
batch (in the log_flush() callback function that runs in a separate
task) we will invoke os_aio_wait_until_no_pending_writes(). This
tweak allows the checkpoint to advance in smaller steps and
significantly reduces the maximum latency. On an Intel Optane 960
NVMe SSD on Linux, it reduced from 4.6 seconds to 74 milliseconds.
On Microsoft Windows with a slower SSD, it reduced from more than
180 seconds to 0.6 seconds.
We will make innodb_adaptive_flushing=OFF simply flush innodb_io_capacity
per second whenever the dirty proportion of buffer pool pages exceeds
innodb_max_dirty_pages_pct_lwm. For innodb_adaptive_flushing=ON we try
to make page_cleaner_flush_pages_recommendation() more consistent and
predictable: if we are below innodb_adaptive_flushing_lwm, let us flush
pages according to the return value of af_get_pct_for_dirty().
innodb_max_dirty_pages_pct_lwm: Revert the change of the default value
that was made in MDEV-23399. The value innodb_max_dirty_pages_pct_lwm=0
guarantees that a shutdown of an idle server will be fast. Users might
be surprised if normal shutdown suddenly became slower when upgrading
within a GA release series.
innodb_checkpoint_usec: Remove. The master task will no longer perform
periodic log checkpoints. It is the duty of the page cleaner thread.
log_sys.max_modified_age: Remove. The current span of the
buf_pool.flush_list expressed in LSN only matters for adaptive
flushing (outside the 'furious flushing' condition).
For the correctness of checkpoints, the only thing that matters is
the checkpoint age (log_sys.lsn - log_sys.last_checkpoint_lsn).
This run-time constant was also reported as log_max_modified_age_sync.
log_sys.max_checkpoint_age_async: Remove. This does not serve any
purpose, because the checkpoints will now be triggered by the page
cleaner thread. We will retain the log_sys.max_checkpoint_age limit
for engaging 'furious flushing'.
page_cleaner.slot: Remove. It turns out that
page_cleaner_slot.flush_list_time was duplicating
page_cleaner.slot.flush_time and page_cleaner.slot.flush_list_pass
was duplicating page_cleaner.flush_pass.
Likewise, there were some redundant monitor counters, because the
page cleaner thread no longer performs any buf_pool.LRU flushing, and
because there only is one buf_flush_page_cleaner thread.
buf_flush_sync_lsn: Protect writes by buf_pool.flush_list_mutex.
buf_pool_t::get_oldest_modification(): Add a parameter to specify the
return value when no persistent data pages are dirty. Require the
caller to hold buf_pool.flush_list_mutex.
log_buf_pool_get_oldest_modification(): Take the fall-back LSN
as a parameter. All callers will also invoke log_sys.get_lsn().
log_preflush_pool_modified_pages(): Replaced with buf_flush_wait_flushed().
buf_flush_wait_flushed(): Implement two limits. If not enough buffer pool
has been flushed, signal the page cleaner (unless innodb_flush_sync=OFF)
and wait for the page cleaner to complete. If the page cleaner
thread is not running (which can be the case durign shutdown),
initiate the flush and wait for it directly.
buf_flush_ahead(): If innodb_flush_sync=ON (the default),
submit a new buf_flush_sync_lsn target for the page cleaner
but do not wait for the flushing to finish.
log_get_capacity(), log_get_max_modified_age_async(): Remove, to make
it easier to see that af_get_pct_for_lsn() is not acquiring any mutexes.
page_cleaner_flush_pages_recommendation(): Protect all access to
buf_pool.flush_list with buf_pool.flush_list_mutex. Previously there
were some race conditions in the calculation.
buf_flush_sync_for_checkpoint(): New function to process
buf_flush_sync_lsn in the page cleaner thread. At the end of
each batch, we try to wake up any blocked buf_flush_wait_flushed().
If everything up to buf_flush_sync_lsn has been flushed, we will
reset buf_flush_sync_lsn=0. The page cleaner thread will keep
'furious flushing' until the limit is reached. Any threads that
are waiting in buf_flush_wait_flushed() will be able to resume
as soon as their own limit has been satisfied.
buf_flush_page_cleaner: Prioritize buf_flush_sync_lsn and do not
sleep as long as it is set. Do not update any page_cleaner statistics
for this special mode of operation. In the normal mode
(buf_flush_sync_lsn is not set for innodb_flush_sync=ON),
try to wake up once per second. No longer check whether
srv_inc_activity_count() has been called. After each batch,
try to perform a log checkpoint, because the best chances for
the checkpoint LSN to advance by the maximum amount are upon
completing a flushing batch.
log_t: Move buf_free, max_buf_free possibly to the same cache line
with log_sys.mutex.
log_margin_checkpoint_age(): Simplify the logic, and replace
a 0.1-second sleep with a call to buf_flush_wait_flushed() to
initiate flushing. Moved to the same compilation unit
with the only caller.
log_close(): Clean up the calculations. (Should be no functional
change.) Return whether flush-ahead is needed. Moved to the same
compilation unit with the only caller.
mtr_t::finish_write(): Return whether flush-ahead is needed.
mtr_t::commit(): Invoke buf_flush_ahead() when needed. Let us avoid
external calls in mtr_t::commit() and make the logic easier to follow
by having related code in a single compilation unit. Also, we will
invoke srv_stats.log_write_requests.inc() only once per
mini-transaction commit, while not holding mutexes.
log_checkpoint_margin(): Only care about log_sys.max_checkpoint_age.
Upon reaching log_sys.max_checkpoint_age where we must wait to prevent
the log from getting corrupted, let us wait for at most 1MiB of LSN
at a time, before rechecking the condition. This should allow writers
to proceed even if the redo log capacity has been reached and
'furious flushing' is in progress. We no longer care about
log_sys.max_modified_age_sync or log_sys.max_modified_age_async.
The log_sys.max_modified_age_sync could be a relic from the time when
there was a srv_master_thread that wrote dirty pages to data files.
Also, we no longer have any log_sys.max_checkpoint_age_async limit,
because log checkpoints will now be triggered by the page cleaner
thread upon completing buf_flush_lists().
log_set_capacity(): Simplify the calculations of the limit
(no functional change).
log_checkpoint_low(): Split from log_checkpoint(). Moved to the
same compilation unit with the caller.
log_make_checkpoint(): Only wait for everything to be flushed until
the current LSN.
create_log_file(): After checkpoint, invoke log_write_up_to()
to ensure that the FILE_CHECKPOINT record has been written.
This avoids ut_ad(!srv_log_file_created) in create_log_file_rename().
srv_start(): Do not call recv_recovery_from_checkpoint_start()
if the log has just been created. Set fil_system.space_id_reuse_warned
before dict_boot() has been executed, and clear it after recovery
has finished.
dict_boot(): Initialize fil_system.max_assigned_id.
srv_check_activity(): Remove. The activity count is counting transaction
commits and therefore mostly interesting for the purge of history.
BtrBulk::insert(): Do not explicitly wake up the page cleaner,
but do invoke srv_inc_activity_count(), because that counter is
still being used in buf_load_throttle_if_needed() for some
heuristics. (It might be cleaner to execute buf_load() in the
page cleaner thread!)
Reviewed by: Vladislav Vaintroub
2020-10-26 16:35:47 +02:00
|
|
|
mysql_mutex_lock(&buf_pool.flush_list_mutex);
|
|
|
|
value = (mon_type_t) buf_pool.get_oldest_modification(0);
|
|
|
|
mysql_mutex_unlock(&buf_pool.flush_list_mutex);
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_OVLD_LSN_CHECKPOINT:
|
2018-04-27 10:06:14 +03:00
|
|
|
value = (mon_type_t) log_sys.last_checkpoint_lsn;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_OVLD_MAX_AGE_ASYNC:
|
2018-04-27 10:06:14 +03:00
|
|
|
value = log_sys.max_modified_age_async;
|
2012-08-01 17:27:34 +03:00
|
|
|
break;
|
|
|
|
|
2017-02-23 23:05:12 +02:00
|
|
|
#ifdef BTR_CUR_HASH_ADAPT
|
2012-08-01 17:27:34 +03:00
|
|
|
case MONITOR_OVLD_ADAPTIVE_HASH_SEARCH:
|
|
|
|
value = btr_cur_n_sea;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MONITOR_OVLD_ADAPTIVE_HASH_SEARCH_BTREE:
|
|
|
|
value = btr_cur_n_non_sea;
|
|
|
|
break;
|
2021-06-09 13:13:06 +08:00
|
|
|
#endif /* BTR_CUR_HASH_ADAPT */
|
2012-08-01 17:27:34 +03:00
|
|
|
|
2013-12-19 14:36:38 +02:00
|
|
|
case MONITOR_OVLD_PAGE_COMPRESS_SAVED:
|
|
|
|
value = srv_stats.page_compression_saved;
|
|
|
|
break;
|
|
|
|
case MONITOR_OVLD_PAGES_PAGE_COMPRESSED:
|
|
|
|
value = srv_stats.pages_page_compressed;
|
|
|
|
break;
|
|
|
|
case MONITOR_OVLD_PAGE_COMPRESSED_TRIM_OP:
|
|
|
|
value = srv_stats.page_compressed_trim_op;
|
|
|
|
break;
|
|
|
|
case MONITOR_OVLD_PAGES_PAGE_DECOMPRESSED:
|
|
|
|
value = srv_stats.pages_page_decompressed;
|
|
|
|
break;
|
2014-03-21 15:46:36 +02:00
|
|
|
case MONITOR_OVLD_PAGES_PAGE_COMPRESSION_ERROR:
|
|
|
|
value = srv_stats.pages_page_compression_error;
|
|
|
|
break;
|
2015-04-01 22:03:14 +03:00
|
|
|
case MONITOR_OVLD_PAGES_ENCRYPTED:
|
|
|
|
value = srv_stats.pages_encrypted;
|
2014-12-22 16:53:17 +02:00
|
|
|
break;
|
2015-04-01 22:03:14 +03:00
|
|
|
case MONITOR_OVLD_PAGES_DECRYPTED:
|
|
|
|
value = srv_stats.pages_decrypted;
|
2014-12-22 16:53:17 +02:00
|
|
|
break;
|
2021-02-04 16:38:07 +02:00
|
|
|
case MONITOR_DEADLOCK:
|
|
|
|
value = lock_sys.deadlocks;
|
|
|
|
break;
|
2021-02-26 14:52:51 +02:00
|
|
|
case MONITOR_TIMEOUT:
|
|
|
|
value = lock_sys.timeouts;
|
|
|
|
break;
|
2013-12-19 14:36:38 +02:00
|
|
|
|
2012-08-01 17:27:34 +03:00
|
|
|
default:
|
|
|
|
ut_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (set_option) {
|
|
|
|
case MONITOR_TURN_ON:
|
|
|
|
/* Save the initial counter value in mon_start_value
|
|
|
|
field */
|
|
|
|
MONITOR_SAVE_START(monitor_id, value);
|
|
|
|
return;
|
|
|
|
|
|
|
|
case MONITOR_TURN_OFF:
|
|
|
|
/* Save the counter value to mon_last_value when we
|
|
|
|
turn off the monitor but not yet reset. Note the
|
|
|
|
counter has not yet been set to off in the bitmap
|
|
|
|
table for normal turn off. We need to check the
|
|
|
|
count status (on/off) to avoid reset the value
|
|
|
|
for an already off conte */
|
|
|
|
if (MONITOR_IS_ON(monitor_id)) {
|
|
|
|
srv_mon_process_existing_counter(monitor_id,
|
|
|
|
MONITOR_GET_VALUE);
|
|
|
|
MONITOR_SAVE_LAST(monitor_id);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
case MONITOR_GET_VALUE:
|
|
|
|
if (MONITOR_IS_ON(monitor_id)) {
|
|
|
|
|
|
|
|
/* If MONITOR_DISPLAY_CURRENT bit is on, we
|
|
|
|
only record the current value, rather than
|
|
|
|
incremental value over a period. Most of
|
|
|
|
` this type of counters are resource related
|
|
|
|
counters such as number of buffer pages etc. */
|
|
|
|
if (monitor_info->monitor_type
|
|
|
|
& MONITOR_DISPLAY_CURRENT) {
|
|
|
|
MONITOR_SET(monitor_id, value);
|
|
|
|
} else {
|
|
|
|
/* Most status counters are montonically
|
|
|
|
increasing, no need to update their
|
|
|
|
minimum values. Only do so
|
|
|
|
if "update_min" set to TRUE */
|
|
|
|
MONITOR_SET_DIFF(monitor_id, value);
|
|
|
|
|
|
|
|
if (update_min
|
|
|
|
&& (MONITOR_VALUE(monitor_id)
|
|
|
|
< MONITOR_MIN_VALUE(monitor_id))) {
|
|
|
|
MONITOR_MIN_VALUE(monitor_id) =
|
|
|
|
MONITOR_VALUE(monitor_id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
case MONITOR_RESET_VALUE:
|
|
|
|
if (!MONITOR_IS_ON(monitor_id)) {
|
|
|
|
MONITOR_LAST_VALUE(monitor_id) = 0;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Nothing special for reset all operation for these existing
|
|
|
|
counters */
|
|
|
|
case MONITOR_RESET_ALL_VALUE:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*************************************************************//**
|
|
|
|
Reset a monitor, create a new base line with the current monitor
|
|
|
|
value. This baseline is recorded by MONITOR_VALUE_RESET(monitor) */
|
|
|
|
void
|
|
|
|
srv_mon_reset(
|
|
|
|
/*==========*/
|
|
|
|
monitor_id_t monitor) /*!< in: monitor id */
|
|
|
|
{
|
|
|
|
ibool monitor_was_on;
|
|
|
|
|
|
|
|
monitor_was_on = MONITOR_IS_ON(monitor);
|
|
|
|
|
|
|
|
if (monitor_was_on) {
|
|
|
|
/* Temporarily turn off the counter for the resetting
|
|
|
|
operation */
|
|
|
|
MONITOR_OFF(monitor);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Before resetting the current monitor value, first
|
|
|
|
calculate and set the max/min value since monitor
|
|
|
|
start */
|
|
|
|
srv_mon_calc_max_since_start(monitor);
|
|
|
|
srv_mon_calc_min_since_start(monitor);
|
|
|
|
|
|
|
|
/* Monitors with MONITOR_DISPLAY_CURRENT bit
|
|
|
|
are not incremental, no need to remember
|
|
|
|
the reset value. */
|
|
|
|
if (innodb_counter_info[monitor].monitor_type
|
|
|
|
& MONITOR_DISPLAY_CURRENT) {
|
|
|
|
MONITOR_VALUE_RESET(monitor) = 0;
|
|
|
|
} else {
|
|
|
|
/* Remember the new baseline */
|
|
|
|
MONITOR_VALUE_RESET(monitor) = MONITOR_VALUE_RESET(monitor)
|
|
|
|
+ MONITOR_VALUE(monitor);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reset the counter value */
|
|
|
|
MONITOR_VALUE(monitor) = 0;
|
|
|
|
MONITOR_MAX_VALUE(monitor) = MAX_RESERVED;
|
|
|
|
MONITOR_MIN_VALUE(monitor) = MIN_RESERVED;
|
|
|
|
|
|
|
|
MONITOR_FIELD((monitor), mon_reset_time) = time(NULL);
|
|
|
|
|
|
|
|
if (monitor_was_on) {
|
|
|
|
MONITOR_ON(monitor);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*************************************************************//**
|
|
|
|
Turn on monitor counters that are marked as default ON. */
|
|
|
|
void
|
|
|
|
srv_mon_default_on(void)
|
|
|
|
/*====================*/
|
|
|
|
{
|
|
|
|
ulint ix;
|
|
|
|
|
|
|
|
for (ix = 0; ix < NUM_MONITOR; ix++) {
|
|
|
|
if (innodb_counter_info[ix].monitor_type
|
|
|
|
& MONITOR_DEFAULT_ON) {
|
|
|
|
/* Turn on monitor counters that are default on */
|
|
|
|
MONITOR_ON(ix);
|
|
|
|
MONITOR_INIT(ix);
|
|
|
|
MONITOR_SET_START(ix);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|