MDEV-21962 Allocate buf_pool statically

Thanks to MDEV-15058, there is only one InnoDB buffer pool.
Allocating buf_pool statically removes one level of pointer indirection
and makes code more readable, and removes the awkward initialization of
some buf_pool members.

While doing this, we will also declare some buf_pool_t data members
private and replace some functions with member functions. This is
mostly affecting buffer pool resizing.

This is not aiming to be a complete rewrite of buf_pool_t to
a proper class. Most of the buffer pool interface, such as
buf_page_get_gen(), will remain in the C programming style
for now.

buf_pool_t::withdrawing: Replaces buf_pool_withdrawing.
buf_pool_t::withdraw_clock_: Replaces buf_withdraw_clock.

buf_pool_t::create(): Repalces buf_pool_init().
buf_pool_t::close(): Replaces buf_pool_free().

buf_bool_t::will_be_withdrawn(): Replaces buf_block_will_be_withdrawn(),
buf_frame_will_be_withdrawn().

buf_pool_t::clear_hash_index(): Replaces buf_pool_clear_hash_index().
buf_pool_t::get_n_pages(): Replaces buf_pool_get_n_pages().
buf_pool_t::validate(): Replaces buf_validate().
buf_pool_t::print(): Replaces buf_print().
buf_pool_t::block_from_ahi(): Replaces buf_block_from_ahi().
buf_pool_t::is_block_field(): Replaces buf_pointer_is_block_field().
buf_pool_t::is_block_mutex(): Replaces buf_pool_is_block_mutex().
buf_pool_t::is_block_lock(): Replaces buf_pool_is_block_lock().
buf_pool_t::is_obsolete(): Replaces buf_pool_is_obsolete().
buf_pool_t::io_buf: Make default-constructible.
buf_pool_t::io_buf::create(): Delayed 'constructor'
buf_pool_t::io_buf::close(): Early 'destructor'

HazardPointer: Make default-constructible. Define all member functions
inline, also for derived classes.
This commit is contained in:
Marko Mäkelä 2020-03-18 21:48:00 +02:00
commit a786f50de5
29 changed files with 1873 additions and 2168 deletions

View file

@ -126,9 +126,9 @@ struct buf_page_info_t{
built on this page */
#endif /* BTR_CUR_HASH_ADAPT */
unsigned is_old:1; /*!< TRUE if the block is in the old
blocks in buf_pool->LRU_old */
blocks in buf_pool.LRU_old */
unsigned freed_page_clock:31; /*!< the value of
buf_pool->freed_page_clock */
buf_pool.freed_page_clock */
unsigned zip_ssize:PAGE_ZIP_SSIZE_BITS;
/*!< Compressed page size */
unsigned page_state:BUF_PAGE_STATE_BITS; /*!< Page state */
@ -1637,22 +1637,22 @@ i_s_cmpmem_fill_low(
buf_buddy_stat_t buddy_stat_local[BUF_BUDDY_SIZES_MAX + 1];
/* Save buddy stats for buffer pool in local variables. */
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) {
zip_free_len_local[x] = (x < BUF_BUDDY_SIZES) ?
UT_LIST_GET_LEN(buf_pool->zip_free[x]) : 0;
UT_LIST_GET_LEN(buf_pool.zip_free[x]) : 0;
buddy_stat_local[x] = buf_pool->buddy_stat[x];
buddy_stat_local[x] = buf_pool.buddy_stat[x];
if (reset) {
/* This is protected by buf_pool->mutex. */
buf_pool->buddy_stat[x].relocated = 0;
buf_pool->buddy_stat[x].relocated_usec = 0;
/* This is protected by buf_pool.mutex. */
buf_pool.buddy_stat[x].relocated = 0;
buf_pool.buddy_stat[x].relocated_usec = 0;
}
}
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) {
buf_buddy_stat_t* buddy_stat = &buddy_stat_local[x];
@ -4247,7 +4247,7 @@ static int i_s_innodb_buffer_page_fill(THD *thd, TABLE_LIST *tables, Item *)
heap = mem_heap_create(10000);
for (ulint n = 0;
n < ut_min(buf_pool->n_chunks, buf_pool->n_chunks_new); n++) {
n < ut_min(buf_pool.n_chunks, buf_pool.n_chunks_new); n++) {
const buf_block_t* block;
ulint n_blocks;
buf_page_info_t* info_buffer;
@ -4258,8 +4258,8 @@ static int i_s_innodb_buffer_page_fill(THD *thd, TABLE_LIST *tables, Item *)
ulint block_id = 0;
/* Get buffer block of the nth chunk */
block = buf_pool->chunks[n].blocks;
chunk_size = buf_pool->chunks[n].size;
block = buf_pool.chunks[n].blocks;
chunk_size = buf_pool.chunks[n].size;
num_page = 0;
while (chunk_size > 0) {
@ -4280,7 +4280,7 @@ static int i_s_innodb_buffer_page_fill(THD *thd, TABLE_LIST *tables, Item *)
buffer pool info printout, we are not required to
preserve the overall consistency, so we can
release mutex periodically */
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
/* GO through each block in the chunk */
for (n_blocks = num_to_process; n_blocks--; block++) {
@ -4291,7 +4291,7 @@ static int i_s_innodb_buffer_page_fill(THD *thd, TABLE_LIST *tables, Item *)
num_page++;
}
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
/* Fill in information schema table with information
just collected from the buffer chunk scan */
@ -4615,10 +4615,10 @@ static int i_s_innodb_fill_buffer_lru(THD *thd, TABLE_LIST *tables, Item *)
}
/* Aquire the mutex before allocating info_buffer, since
UT_LIST_GET_LEN(buf_pool->LRU) could change */
mutex_enter(&buf_pool->mutex);
UT_LIST_GET_LEN(buf_pool.LRU) could change */
mutex_enter(&buf_pool.mutex);
lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
lru_len = UT_LIST_GET_LEN(buf_pool.LRU);
/* Print error message if malloc fail */
info_buffer = (buf_page_info_t*) my_malloc(PSI_INSTRUMENT_ME,
@ -4633,7 +4633,7 @@ static int i_s_innodb_fill_buffer_lru(THD *thd, TABLE_LIST *tables, Item *)
/* Walk through Pool's LRU list and print the buffer page
information */
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
bpage = UT_LIST_GET_LAST(buf_pool.LRU);
while (bpage != NULL) {
/* Use the same function that collect buffer info for
@ -4647,10 +4647,10 @@ static int i_s_innodb_fill_buffer_lru(THD *thd, TABLE_LIST *tables, Item *)
}
ut_ad(lru_pos == lru_len);
ut_ad(lru_pos == UT_LIST_GET_LEN(buf_pool->LRU));
ut_ad(lru_pos == UT_LIST_GET_LEN(buf_pool.LRU));
exit:
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
if (info_buffer) {
status = i_s_innodb_buf_page_lru_fill(
@ -7209,7 +7209,7 @@ i_s_innodb_mutexes_fill_table(
continue;
}
if (buf_pool_is_block_mutex(mutex)) {
if (buf_pool.is_block_mutex(mutex)) {
block_mutex = mutex;
block_mutex_oswait_count += mutex->count_os_wait;
continue;
@ -7257,7 +7257,7 @@ i_s_innodb_mutexes_fill_table(
continue;
}
if (buf_pool_is_block_lock(lock)) {
if (buf_pool.is_block_lock(lock)) {
block_lock = lock;
block_lock_oswait_count += lock->count_os_wait;
continue;