MDEV-21962 Allocate buf_pool statically

Thanks to MDEV-15058, there is only one InnoDB buffer pool.
Allocating buf_pool statically removes one level of pointer indirection
and makes code more readable, and removes the awkward initialization of
some buf_pool members.

While doing this, we will also declare some buf_pool_t data members
private and replace some functions with member functions. This is
mostly affecting buffer pool resizing.

This is not aiming to be a complete rewrite of buf_pool_t to
a proper class. Most of the buffer pool interface, such as
buf_page_get_gen(), will remain in the C programming style
for now.

buf_pool_t::withdrawing: Replaces buf_pool_withdrawing.
buf_pool_t::withdraw_clock_: Replaces buf_withdraw_clock.

buf_pool_t::create(): Repalces buf_pool_init().
buf_pool_t::close(): Replaces buf_pool_free().

buf_bool_t::will_be_withdrawn(): Replaces buf_block_will_be_withdrawn(),
buf_frame_will_be_withdrawn().

buf_pool_t::clear_hash_index(): Replaces buf_pool_clear_hash_index().
buf_pool_t::get_n_pages(): Replaces buf_pool_get_n_pages().
buf_pool_t::validate(): Replaces buf_validate().
buf_pool_t::print(): Replaces buf_print().
buf_pool_t::block_from_ahi(): Replaces buf_block_from_ahi().
buf_pool_t::is_block_field(): Replaces buf_pointer_is_block_field().
buf_pool_t::is_block_mutex(): Replaces buf_pool_is_block_mutex().
buf_pool_t::is_block_lock(): Replaces buf_pool_is_block_lock().
buf_pool_t::is_obsolete(): Replaces buf_pool_is_obsolete().
buf_pool_t::io_buf: Make default-constructible.
buf_pool_t::io_buf::create(): Delayed 'constructor'
buf_pool_t::io_buf::close(): Early 'destructor'

HazardPointer: Make default-constructible. Define all member functions
inline, also for derived classes.
This commit is contained in:
Marko Mäkelä 2020-03-18 21:48:00 +02:00
commit a786f50de5
29 changed files with 1873 additions and 2168 deletions

View file

@ -1389,7 +1389,7 @@ btr_cur_search_to_nth_level_func(
#else
info = btr_search_get_info(index);
if (!buf_pool_is_obsolete(info->withdraw_clock)) {
if (!buf_pool.is_obsolete(info->withdraw_clock)) {
guess = info->root_guess;
} else {
guess = NULL;
@ -1461,7 +1461,7 @@ btr_cur_search_to_nth_level_func(
for them, when the history list is glowing huge. */
if (lock_intention == BTR_INTENTION_DELETE
&& trx_sys.rseg_history_len > BTR_CUR_FINE_HISTORY_LENGTH
&& buf_pool->n_pend_reads) {
&& buf_pool.n_pend_reads) {
x_latch_index:
mtr_x_lock_index(index, mtr);
} else if (index->is_spatial()
@ -1837,7 +1837,7 @@ retry_page_get:
#ifdef BTR_CUR_ADAPT
if (block != guess) {
info->root_guess = block;
info->withdraw_clock = buf_withdraw_clock;
info->withdraw_clock = buf_pool.withdraw_clock();
}
#endif
}
@ -2590,7 +2590,7 @@ btr_cur_open_at_index_side_func(
for them, when the history list is glowing huge. */
if (lock_intention == BTR_INTENTION_DELETE
&& trx_sys.rseg_history_len > BTR_CUR_FINE_HISTORY_LENGTH
&& buf_pool->n_pend_reads) {
&& buf_pool.n_pend_reads) {
mtr_x_lock_index(index, mtr);
} else {
mtr_sx_lock_index(index, mtr);
@ -2917,7 +2917,7 @@ btr_cur_open_at_rnd_pos_func(
for them, when the history list is glowing huge. */
if (lock_intention == BTR_INTENTION_DELETE
&& trx_sys.rseg_history_len > BTR_CUR_FINE_HISTORY_LENGTH
&& buf_pool->n_pend_reads) {
&& buf_pool.n_pend_reads) {
mtr_x_lock_index(index, mtr);
} else {
mtr_sx_lock_index(index, mtr);
@ -7062,7 +7062,7 @@ btr_blob_free(
mtr_commit(mtr);
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
/* Only free the block if it is still allocated to
the same file page. */
@ -7081,7 +7081,7 @@ btr_blob_free(
}
}
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
}
/** Helper class used while writing blob pages, during insert or update. */

View file

@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2016, 2019, MariaDB Corporation.
Copyright (c) 2016, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -202,7 +202,7 @@ before_first:
/* Function try to check if block is S/X latch. */
cursor->modify_clock = buf_block_get_modify_clock(block);
cursor->withdraw_clock = buf_withdraw_clock;
cursor->withdraw_clock = buf_pool.withdraw_clock();
}
/**************************************************************//**
@ -309,7 +309,7 @@ btr_pcur_restore_position_func(
case BTR_MODIFY_PREV:
/* Try optimistic restoration. */
if (!buf_pool_is_obsolete(cursor->withdraw_clock)
if (!buf_pool.is_obsolete(cursor->withdraw_clock)
&& btr_cur_optimistic_latch_leaves(
cursor->block_when_stored, cursor->modify_clock,
&latch_mode, btr_pcur_get_btr_cur(cursor),
@ -416,7 +416,7 @@ btr_pcur_restore_position_func(
cursor->modify_clock = buf_block_get_modify_clock(
cursor->block_when_stored);
cursor->old_stored = true;
cursor->withdraw_clock = buf_withdraw_clock;
cursor->withdraw_clock = buf_pool.withdraw_clock();
mem_heap_free(heap);

View file

@ -394,7 +394,7 @@ void btr_search_disable(bool need_mutex)
}
/* Set all block->index = NULL. */
buf_pool_clear_hash_index();
buf_pool.clear_hash_index();
/* Clear the adaptive hash index. */
for (ulint i = 0; i < btr_ahi_parts; ++i) {
@ -408,12 +408,12 @@ void btr_search_disable(bool need_mutex)
/** Enable the adaptive hash search system. */
void btr_search_enable()
{
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
if (srv_buf_pool_old_size != srv_buf_pool_size) {
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
return;
}
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
btr_search_x_lock_all();
btr_search_enabled = true;
@ -840,6 +840,82 @@ btr_search_failure(btr_search_t* info, btr_cur_t* cursor)
info->last_hash_succ = FALSE;
}
#ifdef BTR_CUR_HASH_ADAPT
/** Clear the adaptive hash index on all pages in the buffer pool. */
inline void buf_pool_t::clear_hash_index()
{
ut_ad(btr_search_own_all(RW_LOCK_X));
ut_ad(!resizing);
ut_ad(!btr_search_enabled);
for (chunk_t *chunk= chunks + n_chunks; --chunk != chunks; )
{
for (buf_block_t *block= chunk->blocks, * const end= block + chunk->size;
block != end; block++)
{
dict_index_t *index= block->index;
assert_block_ahi_valid(block);
/* We can clear block->index block->n_pointers when
btr_search_own_all(RW_LOCK_X); see the comments in buf0buf.h */
if (!index)
{
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(!block->n_pointers);
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
continue;
}
ut_d(buf_page_state state= buf_block_get_state(block));
/* Another thread may have set the state to
BUF_BLOCK_REMOVE_HASH in buf_LRU_block_remove_hashed().
The state change in buf_pool_t::realloc() is not observable
here, because in that case we would have !block->index.
In the end, the entire adaptive hash index will be removed. */
ut_ad(state == BUF_BLOCK_FILE_PAGE || state == BUF_BLOCK_REMOVE_HASH);
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
block->n_pointers= 0;
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
block->index= nullptr;
}
}
}
#endif /* BTR_CUR_HASH_ADAPT */
/** Get a buffer block from an adaptive hash index pointer.
This function does not return if the block is not identified.
@param ptr pointer to within a page frame
@return pointer to block, never NULL */
inline buf_block_t* buf_pool_t::block_from_ahi(const byte *ptr) const
{
chunk_t::map *chunk_map = chunk_t::map_ref;
ut_ad(chunk_t::map_ref == chunk_t::map_reg);
ut_ad(!resizing);
chunk_t::map::const_iterator it= chunk_map->upper_bound(ptr);
ut_a(it != chunk_map->begin());
chunk_t *chunk= it == chunk_map->end()
? chunk_map->rbegin()->second
: (--it)->second;
const size_t offs= size_t(ptr - chunk->blocks->frame) >> srv_page_size_shift;
ut_a(offs < chunk->size);
buf_block_t *block= &chunk->blocks[offs];
/* buf_pool_t::chunk_t::init() invokes buf_block_init() so that
block[n].frame == block->frame + n * srv_page_size. Check it. */
ut_ad(block->frame == page_align(ptr));
/* Read the state of the block without holding a mutex.
A state transition from BUF_BLOCK_FILE_PAGE to
BUF_BLOCK_REMOVE_HASH is possible during this execution. */
ut_d(const buf_page_state state = buf_block_get_state(block));
ut_ad(state == BUF_BLOCK_FILE_PAGE || state == BUF_BLOCK_REMOVE_HASH);
return block;
}
/** Tries to guess the right search position based on the hash search info
of the index. Note that if mode is PAGE_CUR_LE, which is used in inserts,
and the function returns TRUE, then cursor->up_match and cursor->low_match
@ -944,7 +1020,7 @@ fail:
return(FALSE);
}
buf_block_t* block = buf_block_from_ahi(rec);
buf_block_t* block = buf_pool.block_from_ahi(rec);
if (use_latch) {
mutex_enter(&block->mutex);
@ -983,7 +1059,7 @@ got_no_latch:
}
mtr->memo_push(block, fix_type);
buf_pool->stat.n_page_gets++;
buf_pool.stat.n_page_gets++;
rw_lock_s_unlock(use_latch);
@ -1074,7 +1150,7 @@ got_no_latch:
#endif
/* Increment the page get statistics though we did not really
fix the page: for user info only */
++buf_pool->stat.n_page_gets;
++buf_pool.stat.n_page_gets;
if (!ahi_latch) {
buf_page_make_young_if_needed(&block->page);
@ -1087,7 +1163,7 @@ got_no_latch:
@param[in,out] block block containing index page, s- or x-latched, or an
index page for which we know that
block->buf_fix_count == 0 or it is an index page which
has already been removed from the buf_pool->page_hash
has already been removed from the buf_pool.page_hash
i.e.: it is in state BUF_BLOCK_REMOVE_HASH */
void btr_search_drop_page_hash_index(buf_block_t* block)
{
@ -1112,7 +1188,7 @@ retry:
not in the adaptive hash index. */
index = block->index;
/* This debug check uses a dirty read that could theoretically cause
false positives while buf_pool_clear_hash_index() is executing. */
false positives while buf_pool.clear_hash_index() is executing. */
assert_block_ahi_valid(block);
ut_ad(!btr_search_own_any(RW_LOCK_S));
ut_ad(!btr_search_own_any(RW_LOCK_X));
@ -1990,7 +2066,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
rec_offs_init(offsets_);
btr_search_x_lock_all();
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
cell_count = hash_get_n_cells(
btr_search_sys->hash_tables[hash_table_id]);
@ -2000,13 +2076,13 @@ btr_search_hash_table_validate(ulint hash_table_id)
give other queries a chance to run. */
if ((i != 0) && ((i % chunk_size) == 0)) {
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
btr_search_x_unlock_all();
os_thread_yield();
btr_search_x_lock_all();
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
ulint curr_cell_count = hash_get_n_cells(
btr_search_sys->hash_tables[hash_table_id]);
@ -2026,7 +2102,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
for (; node != NULL; node = node->next) {
const buf_block_t* block
= buf_block_from_ahi((byte*) node->data);
= buf_pool.block_from_ahi((byte*) node->data);
const buf_block_t* hash_block;
index_id_t page_index_id;
@ -2050,7 +2126,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
/* When a block is being freed,
buf_LRU_search_and_free_block() first
removes the block from
buf_pool->page_hash by calling
buf_pool.page_hash by calling
buf_LRU_block_remove_hashed_page().
After that, it invokes
btr_search_drop_page_hash_index() to
@ -2112,13 +2188,13 @@ btr_search_hash_table_validate(ulint hash_table_id)
/* We release search latches every once in a while to
give other queries a chance to run. */
if (i != 0) {
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
btr_search_x_unlock_all();
os_thread_yield();
btr_search_x_lock_all();
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
ulint curr_cell_count = hash_get_n_cells(
btr_search_sys->hash_tables[hash_table_id]);
@ -2141,7 +2217,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
}
}
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
btr_search_x_unlock_all();
if (UNIV_LIKELY_NULL(heap)) {

View file

@ -184,26 +184,26 @@ struct CheckZipFree {
@param[in] i buddy size to validate */
static void buf_buddy_list_validate(ulint i)
{
ut_list_validate(buf_pool->zip_free[i], CheckZipFree(i));
ut_list_validate(buf_pool.zip_free[i], CheckZipFree(i));
}
/**********************************************************************//**
Debug function to validate that a buffer is indeed free i.e.: in the
zip_free[].
@param[in] buf block to check
@param[in] i index of buf_pool->zip_free[]
@param[in] i index of buf_pool.zip_free[]
@return true if free */
static bool buf_buddy_check_free(const buf_buddy_free_t* buf, ulint i)
{
const ulint size = BUF_BUDDY_LOW << i;
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(!ut_align_offset(buf, size));
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
buf_buddy_free_t* itr;
for (itr = UT_LIST_GET_FIRST(buf_pool->zip_free[i]);
for (itr = UT_LIST_GET_FIRST(buf_pool.zip_free[i]);
itr && itr != buf;
itr = UT_LIST_GET_NEXT(list, itr)) {
}
@ -223,7 +223,7 @@ buf_buddy_is_free(
/*==============*/
buf_buddy_free_t* buf, /*!< in: block to check */
ulint i) /*!< in: index of
buf_pool->zip_free[] */
buf_pool.zip_free[] */
{
#ifdef UNIV_DEBUG
const ulint size = BUF_BUDDY_LOW << i;
@ -261,54 +261,54 @@ buf_buddy_is_free(
/** Add a block to the head of the appropriate buddy free list.
@param[in,out] buf block to be freed
@param[in] i index of buf_pool->zip_free[] */
@param[in] i index of buf_pool.zip_free[] */
UNIV_INLINE
void
buf_buddy_add_to_free(buf_buddy_free_t* buf, ulint i)
{
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(buf_pool->zip_free[i].start != buf);
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_pool.zip_free[i].start != buf);
buf_buddy_stamp_free(buf, i);
UT_LIST_ADD_FIRST(buf_pool->zip_free[i], buf);
UT_LIST_ADD_FIRST(buf_pool.zip_free[i], buf);
ut_d(buf_buddy_list_validate(i));
}
/** Remove a block from the appropriate buddy free list.
@param[in,out] buf block to be freed
@param[in] i index of buf_pool->zip_free[] */
@param[in] i index of buf_pool.zip_free[] */
UNIV_INLINE
void
buf_buddy_remove_from_free(buf_buddy_free_t* buf, ulint i)
{
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_buddy_check_free(buf, i));
UT_LIST_REMOVE(buf_pool->zip_free[i], buf);
UT_LIST_REMOVE(buf_pool.zip_free[i], buf);
buf_buddy_stamp_nonfree(buf, i);
}
/** Try to allocate a block from buf_pool->zip_free[].
@param[in] i index of buf_pool->zip_free[]
@return allocated block, or NULL if buf_pool->zip_free[] was empty */
/** Try to allocate a block from buf_pool.zip_free[].
@param[in] i index of buf_pool.zip_free[]
@return allocated block, or NULL if buf_pool.zip_free[] was empty */
static buf_buddy_free_t* buf_buddy_alloc_zip(ulint i)
{
buf_buddy_free_t* buf;
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_a(i < BUF_BUDDY_SIZES);
ut_a(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
ut_d(buf_buddy_list_validate(i));
buf = UT_LIST_GET_FIRST(buf_pool->zip_free[i]);
buf = UT_LIST_GET_FIRST(buf_pool.zip_free[i]);
if (buf_pool->curr_size < buf_pool->old_size
&& UT_LIST_GET_LEN(buf_pool->withdraw)
< buf_pool->withdraw_target) {
if (buf_pool.curr_size < buf_pool.old_size
&& UT_LIST_GET_LEN(buf_pool.withdraw)
< buf_pool.withdraw_target) {
while (buf != NULL
&& buf_frame_will_be_withdrawn(
&& buf_pool.will_be_withdrawn(
reinterpret_cast<byte*>(buf))) {
/* This should be withdrawn, not to be allocated */
buf = UT_LIST_GET_NEXT(list, buf);
@ -326,7 +326,7 @@ static buf_buddy_free_t* buf_buddy_alloc_zip(ulint i)
reinterpret_cast<buf_buddy_free_t*>(
buf->stamp.bytes
+ (BUF_BUDDY_LOW << i));
ut_ad(!buf_pool_contains_zip(buddy));
ut_ad(!buf_pool.contains_zip(buddy));
buf_buddy_add_to_free(buddy, i);
}
}
@ -356,11 +356,11 @@ buf_buddy_block_free(void* buf)
buf_page_t* bpage;
buf_block_t* block;
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(!mutex_own(&buf_pool->zip_mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_a(!ut_align_offset(buf, srv_page_size));
HASH_SEARCH(hash, buf_pool->zip_hash, fold, buf_page_t*, bpage,
HASH_SEARCH(hash, buf_pool.zip_hash, fold, buf_page_t*, bpage,
ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_MEMORY
&& bpage->in_zip_hash && !bpage->in_page_hash),
((buf_block_t*) bpage)->frame == buf);
@ -369,7 +369,7 @@ buf_buddy_block_free(void* buf)
ut_ad(!bpage->in_page_hash);
ut_ad(bpage->in_zip_hash);
ut_d(bpage->in_zip_hash = FALSE);
HASH_DELETE(buf_page_t, hash, buf_pool->zip_hash, fold, bpage);
HASH_DELETE(buf_page_t, hash, buf_pool.zip_hash, fold, bpage);
ut_d(memset(buf, 0, srv_page_size));
UNIV_MEM_INVALID(buf, srv_page_size);
@ -379,8 +379,8 @@ buf_buddy_block_free(void* buf)
buf_LRU_block_free_non_file_page(block);
buf_page_mutex_exit(block);
ut_ad(buf_pool->buddy_n_frames > 0);
ut_d(buf_pool->buddy_n_frames--);
ut_ad(buf_pool.buddy_n_frames > 0);
ut_d(buf_pool.buddy_n_frames--);
}
/**********************************************************************//**
@ -392,8 +392,8 @@ buf_buddy_block_register(
buf_block_t* block) /*!< in: buffer frame to allocate */
{
const ulint fold = BUF_POOL_ZIP_FOLD(block);
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(!mutex_own(&buf_pool->zip_mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_ad(buf_block_get_state(block) == BUF_BLOCK_READY_FOR_USE);
buf_block_set_state(block, BUF_BLOCK_MEMORY);
@ -404,15 +404,15 @@ buf_buddy_block_register(
ut_ad(!block->page.in_page_hash);
ut_ad(!block->page.in_zip_hash);
ut_d(block->page.in_zip_hash = TRUE);
HASH_INSERT(buf_page_t, hash, buf_pool->zip_hash, fold, &block->page);
HASH_INSERT(buf_page_t, hash, buf_pool.zip_hash, fold, &block->page);
ut_d(buf_pool->buddy_n_frames++);
ut_d(buf_pool.buddy_n_frames++);
}
/** Allocate a block from a bigger object.
@param[in] buf a block that is free to use
@param[in] i index of buf_pool->zip_free[]
@param[in] j size of buf as an index of buf_pool->zip_free[]
@param[in] i index of buf_pool.zip_free[]
@param[in] j size of buf as an index of buf_pool.zip_free[]
@return allocated block */
static
void*
@ -441,15 +441,15 @@ buf_buddy_alloc_from(void* buf, ulint i, ulint j)
}
/** Allocate a block.
@param[in] i index of buf_pool->zip_free[] or BUF_BUDDY_SIZES
@param[out] lru whether buf_pool->mutex was temporarily released
@param[in] i index of buf_pool.zip_free[] or BUF_BUDDY_SIZES
@param[out] lru whether buf_pool.mutex was temporarily released
@return allocated block, never NULL */
byte *buf_buddy_alloc_low(ulint i, bool *lru)
{
buf_block_t* block;
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(!mutex_own(&buf_pool->zip_mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
if (i < BUF_BUDDY_SIZES) {
@ -461,7 +461,7 @@ byte *buf_buddy_alloc_low(ulint i, bool *lru)
}
}
/* Try allocating from the buf_pool->free list. */
/* Try allocating from the buf_pool.free list. */
block = buf_LRU_get_free_only();
if (block) {
@ -469,9 +469,9 @@ byte *buf_buddy_alloc_low(ulint i, bool *lru)
}
/* Try replacing an uncompressed page in the buffer pool. */
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
block = buf_LRU_get_free_block();
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
if (lru) {
*lru = true;
}
@ -483,7 +483,7 @@ alloc_big:
block->frame, i, BUF_BUDDY_SIZES);
func_exit:
buf_pool->buddy_stat[i].used++;
buf_pool.buddy_stat[i].used++;
return reinterpret_cast<byte*>(block);
}
@ -491,7 +491,7 @@ func_exit:
function will release and lock it again.
@param[in] src block to relocate
@param[in] dst free block to relocated to
@param[in] i index of buf_pool->zip_free[]
@param[in] i index of buf_pool.zip_free[]
@param[in] force true if we must relocated always
@return true if relocated */
static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
@ -501,8 +501,8 @@ static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
ulint space;
ulint offset;
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(!mutex_own(&buf_pool->zip_mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_ad(!ut_align_offset(src, size));
ut_ad(!ut_align_offset(dst, size));
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
@ -531,7 +531,7 @@ static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
if (!bpage || bpage->zip.data != src) {
/* The block has probably been freshly
allocated by buf_LRU_get_free_block() but not
added to buf_pool->page_hash yet. Obviously,
added to buf_pool.page_hash yet. Obviously,
it cannot be relocated. */
rw_lock_x_unlock(hash_lock);
@ -543,7 +543,7 @@ static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
/* It might be just uninitialized page.
We should search from LRU list also. */
bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
bpage = UT_LIST_GET_FIRST(buf_pool.LRU);
while (bpage != NULL) {
if (bpage->zip.data == src) {
hash_lock = buf_page_hash_lock_get(bpage->id);
@ -593,7 +593,7 @@ static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
buf_buddy_mem_invalid(
reinterpret_cast<buf_buddy_free_t*>(src), i);
buf_buddy_stat_t* buddy_stat = &buf_pool->buddy_stat[i];
buf_buddy_stat_t* buddy_stat = &buf_pool.buddy_stat[i];
buddy_stat->relocated++;
buddy_stat->relocated_usec+= (my_interval_timer() - ns) / 1000;
return(true);
@ -608,18 +608,18 @@ static bool buf_buddy_relocate(void* src, void* dst, ulint i, bool force)
/** Deallocate a block.
@param[in] buf block to be freed, must not be pointed to
by the buffer pool
@param[in] i index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */
@param[in] i index of buf_pool.zip_free[], or BUF_BUDDY_SIZES */
void buf_buddy_free_low(void* buf, ulint i)
{
buf_buddy_free_t* buddy;
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(!mutex_own(&buf_pool->zip_mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_ad(i <= BUF_BUDDY_SIZES);
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
ut_ad(buf_pool->buddy_stat[i].used > 0);
ut_ad(buf_pool.buddy_stat[i].used > 0);
buf_pool->buddy_stat[i].used--;
buf_pool.buddy_stat[i].used--;
recombine:
UNIV_MEM_ALLOC(buf, BUF_BUDDY_LOW << i);
@ -630,13 +630,13 @@ recombine:
ut_ad(i < BUF_BUDDY_SIZES);
ut_ad(buf == ut_align_down(buf, BUF_BUDDY_LOW << i));
ut_ad(!buf_pool_contains_zip(buf));
ut_ad(!buf_pool.contains_zip(buf));
/* Do not recombine blocks if there are few free blocks.
We may waste up to 15360*max_len bytes to free blocks
(1024 + 2048 + 4096 + 8192 = 15360) */
if (UT_LIST_GET_LEN(buf_pool->zip_free[i]) < 16
&& buf_pool->curr_size >= buf_pool->old_size) {
if (UT_LIST_GET_LEN(buf_pool.zip_free[i]) < 16
&& buf_pool.curr_size >= buf_pool.old_size) {
goto func_exit;
}
@ -650,7 +650,7 @@ recombine:
/* The buddy is free: recombine */
buf_buddy_remove_from_free(buddy, i);
buddy_is_free:
ut_ad(!buf_pool_contains_zip(buddy));
ut_ad(!buf_pool.contains_zip(buddy));
i++;
buf = ut_align_down(buf, BUF_BUDDY_LOW << i);
@ -662,7 +662,7 @@ buddy_is_free:
/* The buddy is not free. Is there a free block of
this size? */
if (buf_buddy_free_t* zip_buf =
UT_LIST_GET_FIRST(buf_pool->zip_free[i])) {
UT_LIST_GET_FIRST(buf_pool.zip_free[i])) {
/* Remove the block from the free list, because
a successful buf_buddy_relocate() will overwrite
@ -700,8 +700,8 @@ buf_buddy_realloc(void* buf, ulint size)
buf_block_t* block = NULL;
ulint i = buf_buddy_get_slot(size);
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(!mutex_own(&buf_pool->zip_mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(!mutex_own(&buf_pool.zip_mutex));
ut_ad(i <= BUF_BUDDY_SIZES);
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
@ -711,7 +711,7 @@ buf_buddy_realloc(void* buf, ulint size)
}
if (block == NULL) {
/* Try allocating from the buf_pool->free list. */
/* Try allocating from the buf_pool.free list. */
block = buf_LRU_get_free_only();
if (block == NULL) {
@ -725,7 +725,7 @@ buf_buddy_realloc(void* buf, ulint size)
block->frame, i, BUF_BUDDY_SIZES));
}
buf_pool->buddy_stat[i].used++;
buf_pool.buddy_stat[i].used++;
/* Try to relocate the buddy of buf to the free block. */
if (buf_buddy_relocate(buf, block, i, true)) {
@ -742,16 +742,16 @@ buf_buddy_realloc(void* buf, ulint size)
/** Combine all pairs of free buddies. */
void buf_buddy_condense_free()
{
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(buf_pool->curr_size < buf_pool->old_size);
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_pool.curr_size < buf_pool.old_size);
for (ulint i = 0; i < UT_ARR_SIZE(buf_pool->zip_free); ++i) {
for (ulint i = 0; i < UT_ARR_SIZE(buf_pool.zip_free); ++i) {
buf_buddy_free_t* buf =
UT_LIST_GET_FIRST(buf_pool->zip_free[i]);
UT_LIST_GET_FIRST(buf_pool.zip_free[i]);
/* seek to withdraw target */
while (buf != NULL
&& !buf_frame_will_be_withdrawn(
&& !buf_pool.will_be_withdrawn(
reinterpret_cast<byte*>(buf))) {
buf = UT_LIST_GET_NEXT(list, buf);
}
@ -769,7 +769,7 @@ void buf_buddy_condense_free()
/* seek to the next withdraw target */
while (true) {
while (next != NULL
&& !buf_frame_will_be_withdrawn(
&& !buf_pool.will_be_withdrawn(
reinterpret_cast<byte*>(next))) {
next = UT_LIST_GET_NEXT(list, next);
}
@ -786,7 +786,7 @@ void buf_buddy_condense_free()
/* Both buf and buddy are free.
Try to combine them. */
buf_buddy_remove_from_free(buf, i);
buf_pool->buddy_stat[i].used++;
buf_pool.buddy_stat[i].used++;
buf_buddy_free_low(buf, i);
}

File diff suppressed because it is too large Load diff

View file

@ -288,13 +288,13 @@ buf_dump(
ulint n_pages;
ulint j;
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
n_pages = UT_LIST_GET_LEN(buf_pool->LRU);
n_pages = UT_LIST_GET_LEN(buf_pool.LRU);
/* skip empty buffer pools */
if (n_pages == 0) {
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
goto done;
}
@ -303,7 +303,7 @@ buf_dump(
/* limit the number of total pages dumped to X% of the
total number of pages */
t_pages = buf_pool->curr_size * srv_buf_pool_dump_pct / 100;
t_pages = buf_pool.curr_size * srv_buf_pool_dump_pct / 100;
if (n_pages > t_pages) {
buf_dump_status(STATUS_INFO,
"Restricted to " ULINTPF
@ -322,7 +322,7 @@ buf_dump(
n_pages * sizeof(*dump)));
if (dump == NULL) {
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
fclose(f);
buf_dump_status(STATUS_ERR,
"Cannot allocate " ULINTPF " bytes: %s",
@ -332,7 +332,7 @@ buf_dump(
return;
}
for (bpage = UT_LIST_GET_FIRST(buf_pool->LRU), j = 0;
for (bpage = UT_LIST_GET_FIRST(buf_pool.LRU), j = 0;
bpage != NULL && j < n_pages;
bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
@ -347,7 +347,7 @@ buf_dump(
bpage->id.page_no());
}
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
ut_a(j <= n_pages);
n_pages = j;
@ -550,7 +550,7 @@ buf_load()
/* If dump is larger than the buffer pool(s), then we ignore the
extra trailing. This could happen if a dump is made, then buffer
pool is shrunk and then load is attempted. */
dump_n = std::min(dump_n, buf_pool_get_n_pages());
dump_n = std::min(dump_n, buf_pool.get_n_pages());
if (dump_n != 0) {
dump = static_cast<buf_dump_t*>(ut_malloc_nokey(

View file

@ -204,9 +204,9 @@ in thrashing. */
static inline void incr_flush_list_size_in_bytes(const buf_block_t* block)
{
/* FIXME: use std::atomic! */
ut_ad(mutex_own(&buf_pool->flush_list_mutex));
buf_pool->stat.flush_list_bytes += block->physical_size();
ut_ad(buf_pool->stat.flush_list_bytes <= buf_pool->curr_pool_size);
ut_ad(mutex_own(&buf_pool.flush_list_mutex));
buf_pool.stat.flush_list_bytes += block->physical_size();
ut_ad(buf_pool.stat.flush_list_bytes <= buf_pool.curr_pool_size);
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
@ -252,14 +252,14 @@ buf_flush_insert_in_flush_rbt(
buf_page_t* prev = NULL;
ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE);
ut_ad(mutex_own(&buf_pool->flush_list_mutex));
ut_ad(mutex_own(&buf_pool.flush_list_mutex));
/* Insert this buffer into the rbt. */
c_node = rbt_insert(buf_pool->flush_rbt, &bpage, &bpage);
c_node = rbt_insert(buf_pool.flush_rbt, &bpage, &bpage);
ut_a(c_node != NULL);
/* Get the predecessor. */
p_node = rbt_prev(buf_pool->flush_rbt, c_node);
p_node = rbt_prev(buf_pool.flush_rbt, c_node);
if (p_node != NULL) {
buf_page_t** value;
@ -279,12 +279,12 @@ buf_flush_delete_from_flush_rbt(
/*============================*/
buf_page_t* bpage) /*!< in: bpage to be removed. */
{
ut_ad(mutex_own(&buf_pool->flush_list_mutex));
ut_ad(mutex_own(&buf_pool.flush_list_mutex));
#ifdef UNIV_DEBUG
ibool ret =
#endif /* UNIV_DEBUG */
rbt_delete(buf_pool->flush_rbt, &bpage);
rbt_delete(buf_pool.flush_rbt, &bpage);
ut_ad(ret);
}
@ -294,7 +294,7 @@ Compare two modified blocks in the buffer pool. The key for comparison
is:
key = <oldest_modification, space, offset>
This comparison is used to maintian ordering of blocks in the
buf_pool->flush_rbt.
buf_pool.flush_rbt.
Note that for the purpose of flush_rbt, we only need to order blocks
on the oldest_modification. The other two fields are used to uniquely
identify the blocks.
@ -313,7 +313,7 @@ buf_flush_block_cmp(
ut_ad(b1 != NULL);
ut_ad(b2 != NULL);
ut_ad(mutex_own(&buf_pool->flush_list_mutex));
ut_ad(mutex_own(&buf_pool.flush_list_mutex));
ut_ad(b1->in_flush_list);
ut_ad(b2->in_flush_list);
@ -339,12 +339,12 @@ void
buf_flush_init_flush_rbt(void)
/*==========================*/
{
mutex_enter(&buf_pool->flush_list_mutex);
ut_ad(buf_pool->flush_rbt == NULL);
mutex_enter(&buf_pool.flush_list_mutex);
ut_ad(buf_pool.flush_rbt == NULL);
/* Create red black tree for speedy insertions in flush list. */
buf_pool->flush_rbt = rbt_create(
buf_pool.flush_rbt = rbt_create(
sizeof(buf_page_t*), buf_flush_block_cmp);
mutex_exit(&buf_pool->flush_list_mutex);
mutex_exit(&buf_pool.flush_list_mutex);
}
/********************************************************************//**
@ -353,13 +353,13 @@ void
buf_flush_free_flush_rbt(void)
/*==========================*/
{
mutex_enter(&buf_pool->flush_list_mutex);
mutex_enter(&buf_pool.flush_list_mutex);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_flush_validate_low();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
rbt_free(buf_pool->flush_rbt);
buf_pool->flush_rbt = NULL;
mutex_exit(&buf_pool->flush_list_mutex);
rbt_free(buf_pool.flush_rbt);
buf_pool.flush_rbt = NULL;
mutex_exit(&buf_pool.flush_list_mutex);
}
/** Insert a modified block into the flush list.
@ -367,12 +367,12 @@ buf_flush_free_flush_rbt(void)
@param[in] lsn oldest modification */
void buf_flush_insert_into_flush_list(buf_block_t* block, lsn_t lsn)
{
ut_ad(!mutex_own(&buf_pool->mutex));
ut_ad(!mutex_own(&buf_pool.mutex));
ut_ad(log_flush_order_mutex_own());
ut_ad(buf_page_mutex_own(block));
ut_ad(lsn);
mutex_enter(&buf_pool->flush_list_mutex);
mutex_enter(&buf_pool.flush_list_mutex);
ut_ad(!block->page.in_flush_list);
ut_d(block->page.in_flush_list = TRUE);
ut_ad(!block->page.oldest_modification);
@ -382,9 +382,9 @@ void buf_flush_insert_into_flush_list(buf_block_t* block, lsn_t lsn)
block->physical_size());
incr_flush_list_size_in_bytes(block);
if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
if (UNIV_LIKELY_NULL(buf_pool.flush_rbt)) {
ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE);
/* The field in_LRU_list is protected by buf_pool->mutex, which
/* The field in_LRU_list is protected by buf_pool.mutex, which
we are not holding. However, while a block is in the flush
list, it is dirty and cannot be discarded, not from the
page_hash or from the LRU list. At most, the uncompressed
@ -402,18 +402,18 @@ void buf_flush_insert_into_flush_list(buf_block_t* block, lsn_t lsn)
if (buf_page_t* prev_b =
buf_flush_insert_in_flush_rbt(&block->page)) {
UT_LIST_INSERT_AFTER(buf_pool->flush_list, prev_b, &block->page);
UT_LIST_INSERT_AFTER(buf_pool.flush_list, prev_b, &block->page);
goto func_exit;
}
}
UT_LIST_ADD_FIRST(buf_pool->flush_list, &block->page);
UT_LIST_ADD_FIRST(buf_pool.flush_list, &block->page);
func_exit:
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_flush_validate_skip();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
mutex_exit(&buf_pool->flush_list_mutex);
mutex_exit(&buf_pool.flush_list_mutex);
}
/********************************************************************//**
@ -426,7 +426,7 @@ buf_flush_ready_for_replace(
buf_page_t* bpage) /*!< in: buffer control block, must be
buf_page_in_file(bpage) and in the LRU list */
{
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(bpage->in_LRU_list);
@ -453,7 +453,7 @@ buf_flush_ready_for_flush(
buf_page_in_file(bpage) */
buf_flush_t flush_type)/*!< in: type of flush */
{
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_a(buf_page_in_file(bpage));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(flush_type < BUF_FLUSH_N_TYPES);
@ -489,18 +489,18 @@ void buf_flush_remove(buf_page_t* bpage)
INNODB_EXTEND_TIMEOUT_INTERVAL,
"Flush and remove page with tablespace id %u"
", flush list length " ULINTPF,
bpage->space, UT_LIST_GET_LEN(buf_pool->flush_list));
bpage->space, UT_LIST_GET_LEN(buf_pool.flush_list));
}
#endif
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(bpage->in_flush_list);
mutex_enter(&buf_pool->flush_list_mutex);
mutex_enter(&buf_pool.flush_list_mutex);
/* Important that we adjust the hazard pointer before removing
the bpage from flush list. */
buf_pool->flush_hp.adjust(bpage);
buf_pool.flush_hp.adjust(bpage);
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_POOL_WATCH:
@ -514,18 +514,18 @@ void buf_flush_remove(buf_page_t* bpage)
return;
case BUF_BLOCK_ZIP_DIRTY:
buf_page_set_state(bpage, BUF_BLOCK_ZIP_PAGE);
UT_LIST_REMOVE(buf_pool->flush_list, bpage);
UT_LIST_REMOVE(buf_pool.flush_list, bpage);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_LRU_insert_zip_clean(bpage);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
break;
case BUF_BLOCK_FILE_PAGE:
UT_LIST_REMOVE(buf_pool->flush_list, bpage);
UT_LIST_REMOVE(buf_pool.flush_list, bpage);
break;
}
/* If the flush_rbt is active then delete from there as well. */
if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
if (UNIV_LIKELY_NULL(buf_pool.flush_rbt)) {
buf_flush_delete_from_flush_rbt(bpage);
}
@ -533,7 +533,7 @@ void buf_flush_remove(buf_page_t* bpage)
because we assert on in_flush_list in comparison function. */
ut_d(bpage->in_flush_list = FALSE);
buf_pool->stat.flush_list_bytes -= bpage->physical_size();
buf_pool.stat.flush_list_bytes -= bpage->physical_size();
bpage->oldest_modification = 0;
@ -541,7 +541,7 @@ void buf_flush_remove(buf_page_t* bpage)
buf_flush_validate_skip();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
mutex_exit(&buf_pool->flush_list_mutex);
mutex_exit(&buf_pool.flush_list_mutex);
}
/*******************************************************************//**
@ -564,10 +564,10 @@ buf_flush_relocate_on_flush_list(
buf_page_t* prev;
buf_page_t* prev_b = NULL;
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
mutex_enter(&buf_pool->flush_list_mutex);
mutex_enter(&buf_pool.flush_list_mutex);
/* FIXME: At this point we have both buf_pool and flush_list
mutexes. Theoretically removal of a block from flush list is
@ -581,38 +581,38 @@ buf_flush_relocate_on_flush_list(
/* If recovery is active we must swap the control blocks in
the flush_rbt as well. */
if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
if (UNIV_LIKELY_NULL(buf_pool.flush_rbt)) {
buf_flush_delete_from_flush_rbt(bpage);
prev_b = buf_flush_insert_in_flush_rbt(dpage);
}
/* Important that we adjust the hazard pointer before removing
the bpage from the flush list. */
buf_pool->flush_hp.adjust(bpage);
buf_pool.flush_hp.adjust(bpage);
/* Must be done after we have removed it from the flush_rbt
because we assert on in_flush_list in comparison function. */
ut_d(bpage->in_flush_list = FALSE);
prev = UT_LIST_GET_PREV(list, bpage);
UT_LIST_REMOVE(buf_pool->flush_list, bpage);
UT_LIST_REMOVE(buf_pool.flush_list, bpage);
if (prev) {
ut_ad(prev->in_flush_list);
UT_LIST_INSERT_AFTER( buf_pool->flush_list, prev, dpage);
UT_LIST_INSERT_AFTER( buf_pool.flush_list, prev, dpage);
} else {
UT_LIST_ADD_FIRST(buf_pool->flush_list, dpage);
UT_LIST_ADD_FIRST(buf_pool.flush_list, dpage);
}
/* Just an extra check. Previous in flush_list
should be the same control block as in flush_rbt. */
ut_a(buf_pool->flush_rbt == NULL || prev_b == prev);
ut_a(buf_pool.flush_rbt == NULL || prev_b == prev);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_flush_validate_low();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
mutex_exit(&buf_pool->flush_list_mutex);
mutex_exit(&buf_pool.flush_list_mutex);
}
/** Update the flush system data structures when a write is completed.
@ -625,17 +625,17 @@ void buf_flush_write_complete(buf_page_t* bpage, bool dblwr)
buf_flush_remove(bpage);
const buf_flush_t flush_type = buf_page_get_flush_type(bpage);
buf_pool->n_flush[flush_type]--;
ut_ad(buf_pool->n_flush[flush_type] != ULINT_MAX);
buf_pool.n_flush[flush_type]--;
ut_ad(buf_pool.n_flush[flush_type] != ULINT_MAX);
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
if (buf_pool->n_flush[flush_type] == 0
&& buf_pool->init_flush[flush_type] == FALSE) {
if (buf_pool.n_flush[flush_type] == 0
&& buf_pool.init_flush[flush_type] == FALSE) {
/* The running flush batch has ended */
os_event_set(buf_pool->no_flush[flush_type]);
os_event_set(buf_pool.no_flush[flush_type]);
}
if (dblwr) {
@ -958,7 +958,7 @@ static byte* buf_page_encrypt(fil_space_t* space, buf_page_t* bpage, byte* s)
ut_ad(!bpage->zip_size() || !page_compressed);
/* Find free slot from temporary memory array */
buf_tmp_buffer_t *slot= buf_pool->io_buf.reserve();
buf_tmp_buffer_t *slot= buf_pool.io_buf_reserve();
ut_a(slot);
slot->allocate();
slot->out_buf= NULL;
@ -1042,9 +1042,9 @@ static void buf_flush_freed_page(buf_page_t *bpage, fil_space_t *space)
const bool uncompressed= buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE;
BPageMutex *block_mutex= uncompressed
? &reinterpret_cast<buf_block_t*>(bpage)->mutex
: &buf_pool->zip_mutex;
: &buf_pool.zip_mutex;
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
mutex_enter(block_mutex);
buf_page_set_io_fix(bpage, BUF_IO_NONE);
@ -1055,8 +1055,8 @@ static void buf_flush_freed_page(buf_page_t *bpage, fil_space_t *space)
rw_lock_sx_unlock_gen(&reinterpret_cast<buf_block_t*>(bpage)->lock,
BUF_IO_WRITE);
buf_pool->stat.n_pages_written++;
mutex_exit(&buf_pool->mutex);
buf_pool.stat.n_pages_written++;
mutex_exit(&buf_pool.mutex);
const page_id_t page_id(bpage->id);
const auto zip_size= bpage->zip_size();
mutex_exit(block_mutex);
@ -1110,13 +1110,13 @@ buf_flush_write_block_low(
ut_ad(buf_page_in_file(bpage));
/* We are not holding buf_pool->mutex or block_mutex here.
/* We are not holding buf_pool.mutex or block_mutex here.
Nevertheless, it is safe to access bpage, because it is
io_fixed and oldest_modification != 0. Thus, it cannot be
relocated in the buffer pool or removed from flush_list or
LRU_list. */
ut_ad(!mutex_own(&buf_pool->mutex));
ut_ad(!mutex_own(&buf_pool->flush_list_mutex));
ut_ad(!mutex_own(&buf_pool.mutex));
ut_ad(!mutex_own(&buf_pool.flush_list_mutex));
ut_ad(!buf_page_get_mutex(bpage)->is_owned());
ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_WRITE);
ut_ad(bpage->oldest_modification != 0);
@ -1249,7 +1249,7 @@ bool buf_flush_page(buf_page_t* bpage, buf_flush_t flush_type, bool sync)
BPageMutex* block_mutex;
ut_ad(flush_type < BUF_FLUSH_N_TYPES);
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_page_in_file(bpage));
ut_ad(!sync || flush_type == BUF_FLUSH_SINGLE_PAGE);
@ -1260,7 +1260,7 @@ bool buf_flush_page(buf_page_t* bpage, buf_flush_t flush_type, bool sync)
bool is_uncompressed = (buf_page_get_state(bpage)
== BUF_BLOCK_FILE_PAGE);
ut_ad(is_uncompressed == (block_mutex != &buf_pool->zip_mutex));
ut_ad(is_uncompressed == (block_mutex != &buf_pool.zip_mutex));
rw_lock_t* rw_lock;
bool no_fix_count = bpage->buf_fix_count == 0;
@ -1290,16 +1290,16 @@ bool buf_flush_page(buf_page_t* bpage, buf_flush_t flush_type, bool sync)
buf_page_set_flush_type(bpage, flush_type);
if (buf_pool->n_flush[flush_type] == 0) {
os_event_reset(buf_pool->no_flush[flush_type]);
if (buf_pool.n_flush[flush_type] == 0) {
os_event_reset(buf_pool.no_flush[flush_type]);
}
++buf_pool->n_flush[flush_type];
ut_ad(buf_pool->n_flush[flush_type] != 0);
++buf_pool.n_flush[flush_type];
ut_ad(buf_pool.n_flush[flush_type] != 0);
mutex_exit(block_mutex);
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
if (flush_type == BUF_FLUSH_LIST
&& is_uncompressed
@ -1335,7 +1335,7 @@ buf_flush_batch() and buf_flush_page().
@return whether the page was flushed and the mutex released */
bool buf_flush_page_try(buf_block_t* block)
{
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(buf_page_mutex_own(block));
@ -1364,13 +1364,13 @@ buf_flush_check_neighbor(
ut_ad(flush_type == BUF_FLUSH_LRU
|| flush_type == BUF_FLUSH_LIST);
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
bpage = buf_page_hash_get(page_id);
if (!bpage) {
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
return(false);
}
@ -1389,7 +1389,7 @@ buf_flush_check_neighbor(
}
mutex_exit(block_mutex);
}
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
return(ret);
}
@ -1419,7 +1419,7 @@ buf_flush_try_neighbors(
return 0;
}
if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN
if (UT_LIST_GET_LEN(buf_pool.LRU) < BUF_LRU_OLD_MIN_LEN
|| !srv_flush_neighbors || !space->is_rotational()) {
/* If there is little space or neighbor flushing is
not enabled then just flush the victim. */
@ -1433,8 +1433,8 @@ buf_flush_try_neighbors(
ulint buf_flush_area;
buf_flush_area = ut_min(
buf_pool->read_ahead_area,
buf_pool->curr_size / 16);
buf_pool.read_ahead_area,
buf_pool.curr_size / 16);
low = (page_id.page_no() / buf_flush_area) * buf_flush_area;
high = (page_id.page_no() / buf_flush_area + 1) * buf_flush_area;
@ -1503,12 +1503,12 @@ buf_flush_try_neighbors(
const page_id_t cur_page_id(page_id.space(), i);
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
bpage = buf_page_hash_get(cur_page_id);
if (bpage == NULL) {
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
continue;
}
@ -1536,7 +1536,7 @@ buf_flush_try_neighbors(
++count;
} else {
mutex_exit(block_mutex);
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
}
continue;
@ -1544,7 +1544,7 @@ buf_flush_try_neighbors(
mutex_exit(block_mutex);
}
}
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
}
space->release_for_io();
@ -1579,7 +1579,7 @@ buf_flush_page_and_try_neighbors(
ulint n_to_flush,
ulint* count)
{
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
bool flushed;
BPageMutex* block_mutex = buf_page_get_mutex(bpage);
@ -1592,20 +1592,20 @@ buf_flush_page_and_try_neighbors(
const page_id_t page_id = bpage->id;
mutex_exit(block_mutex);
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
/* Try to flush also all the neighbors */
*count += buf_flush_try_neighbors(
page_id, flush_type, *count, n_to_flush);
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
flushed = true;
} else {
mutex_exit(block_mutex);
flushed = false;
}
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
return(flushed);
}
@ -1624,35 +1624,35 @@ static ulint buf_free_from_unzip_LRU_list_batch(ulint max)
{
ulint scanned = 0;
ulint count = 0;
ulint free_len = UT_LIST_GET_LEN(buf_pool->free);
ulint lru_len = UT_LIST_GET_LEN(buf_pool->unzip_LRU);
ulint free_len = UT_LIST_GET_LEN(buf_pool.free);
ulint lru_len = UT_LIST_GET_LEN(buf_pool.unzip_LRU);
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
buf_block_t* block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
buf_block_t* block = UT_LIST_GET_LAST(buf_pool.unzip_LRU);
while (block != NULL
&& count < max
&& free_len < srv_LRU_scan_depth
&& lru_len > UT_LIST_GET_LEN(buf_pool->LRU) / 10) {
&& lru_len > UT_LIST_GET_LEN(buf_pool.LRU) / 10) {
++scanned;
if (buf_LRU_free_page(&block->page, false)) {
/* Block was freed. buf_pool->mutex potentially
/* Block was freed. buf_pool.mutex potentially
released and reacquired */
++count;
block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
block = UT_LIST_GET_LAST(buf_pool.unzip_LRU);
} else {
block = UT_LIST_GET_PREV(unzip_LRU, block);
}
free_len = UT_LIST_GET_LEN(buf_pool->free);
lru_len = UT_LIST_GET_LEN(buf_pool->unzip_LRU);
free_len = UT_LIST_GET_LEN(buf_pool.free);
lru_len = UT_LIST_GET_LEN(buf_pool.unzip_LRU);
}
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
if (scanned) {
MONITOR_INC_VALUE_CUMULATIVE(
@ -1675,29 +1675,29 @@ static void buf_flush_LRU_list_batch(ulint max, flush_counters_t* n)
{
buf_page_t* bpage;
ulint scanned = 0;
ulint free_len = UT_LIST_GET_LEN(buf_pool->free);
ulint lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
ulint free_len = UT_LIST_GET_LEN(buf_pool.free);
ulint lru_len = UT_LIST_GET_LEN(buf_pool.LRU);
ulint withdraw_depth = 0;
n->flushed = 0;
n->evicted = 0;
n->unzip_LRU_evicted = 0;
ut_ad(mutex_own(&buf_pool->mutex));
if (buf_pool->curr_size < buf_pool->old_size
&& buf_pool->withdraw_target > 0) {
withdraw_depth = buf_pool->withdraw_target
- UT_LIST_GET_LEN(buf_pool->withdraw);
ut_ad(mutex_own(&buf_pool.mutex));
if (buf_pool.curr_size < buf_pool.old_size
&& buf_pool.withdraw_target > 0) {
withdraw_depth = buf_pool.withdraw_target
- UT_LIST_GET_LEN(buf_pool.withdraw);
}
for (bpage = UT_LIST_GET_LAST(buf_pool->LRU);
for (bpage = UT_LIST_GET_LAST(buf_pool.LRU);
bpage != NULL && n->flushed + n->evicted < max
&& free_len < srv_LRU_scan_depth + withdraw_depth
&& lru_len > BUF_LRU_MIN_LEN;
++scanned,
bpage = buf_pool->lru_hp.get()) {
bpage = buf_pool.lru_hp.get()) {
buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage);
buf_pool->lru_hp.set(prev);
buf_pool.lru_hp.set(prev);
BPageMutex* block_mutex = buf_page_get_mutex(bpage);
@ -1720,25 +1720,25 @@ static void buf_flush_LRU_list_batch(ulint max, flush_counters_t* n)
} else {
/* Can't evict or dispatch this block. Go to
previous. */
ut_ad(buf_pool->lru_hp.is_hp(prev));
ut_ad(buf_pool.lru_hp.is_hp(prev));
mutex_exit(block_mutex);
}
ut_ad(!mutex_own(block_mutex));
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
free_len = UT_LIST_GET_LEN(buf_pool->free);
lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
free_len = UT_LIST_GET_LEN(buf_pool.free);
lru_len = UT_LIST_GET_LEN(buf_pool.LRU);
}
buf_pool->lru_hp.set(NULL);
buf_pool.lru_hp.set(NULL);
/* We keep track of all flushes happening as part of LRU
flush. When estimating the desired rate at which flush_list
should be flushed, we factor in this value. */
buf_lru_flush_page_count += n->flushed;
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
if (n->evicted) {
MONITOR_INC_VALUE_CUMULATIVE(
@ -1793,22 +1793,22 @@ static ulint buf_do_flush_list_batch(ulint min_n, lsn_t lsn_limit)
ulint count = 0;
ulint scanned = 0;
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
/* Start from the end of the list looking for a suitable
block to be flushed. */
mutex_enter(&buf_pool->flush_list_mutex);
ulint len = UT_LIST_GET_LEN(buf_pool->flush_list);
mutex_enter(&buf_pool.flush_list_mutex);
ulint len = UT_LIST_GET_LEN(buf_pool.flush_list);
/* In order not to degenerate this scan to O(n*n) we attempt
to preserve pointer of previous block in the flush list. To do
so we declare it a hazard pointer. Any thread working on the
flush list must check the hazard pointer and if it is removing
the same block then it must reset it. */
for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool.flush_list);
count < min_n && bpage != NULL && len > 0
&& bpage->oldest_modification < lsn_limit;
bpage = buf_pool->flush_hp.get(),
bpage = buf_pool.flush_hp.get(),
++scanned) {
buf_page_t* prev;
@ -1817,8 +1817,8 @@ static ulint buf_do_flush_list_batch(ulint min_n, lsn_t lsn_limit)
ut_ad(bpage->in_flush_list);
prev = UT_LIST_GET_PREV(list, bpage);
buf_pool->flush_hp.set(prev);
mutex_exit(&buf_pool->flush_list_mutex);
buf_pool.flush_hp.set(prev);
mutex_exit(&buf_pool.flush_list_mutex);
#ifdef UNIV_DEBUG
bool flushed =
@ -1826,15 +1826,15 @@ static ulint buf_do_flush_list_batch(ulint min_n, lsn_t lsn_limit)
buf_flush_page_and_try_neighbors(
bpage, BUF_FLUSH_LIST, min_n, &count);
mutex_enter(&buf_pool->flush_list_mutex);
mutex_enter(&buf_pool.flush_list_mutex);
ut_ad(flushed || buf_pool->flush_hp.is_hp(prev));
ut_ad(flushed || buf_pool.flush_hp.is_hp(prev));
--len;
}
buf_pool->flush_hp.set(NULL);
mutex_exit(&buf_pool->flush_list_mutex);
buf_pool.flush_hp.set(NULL);
mutex_exit(&buf_pool.flush_list_mutex);
if (scanned) {
MONITOR_INC_VALUE_CUMULATIVE(
@ -1852,7 +1852,7 @@ static ulint buf_do_flush_list_batch(ulint min_n, lsn_t lsn_limit)
count);
}
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
return(count);
}
@ -1883,7 +1883,7 @@ buf_flush_batch(
ut_ad(flush_type == BUF_FLUSH_LRU
|| !sync_check_iterate(dict_sync_check()));
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
/* Note: The buffer pool mutex is released and reacquired within
the flush functions. */
@ -1899,7 +1899,7 @@ buf_flush_batch(
ut_error;
}
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
DBUG_LOG("ib_buf", "flush " << flush_type << " completed");
}
@ -1931,23 +1931,23 @@ bool buf_flush_start(buf_flush_t flush_type)
{
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
if (buf_pool->n_flush[flush_type] > 0
|| buf_pool->init_flush[flush_type] == TRUE) {
if (buf_pool.n_flush[flush_type] > 0
|| buf_pool.init_flush[flush_type] == TRUE) {
/* There is already a flush batch of the same type running */
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
return(false);
}
buf_pool->init_flush[flush_type] = TRUE;
buf_pool.init_flush[flush_type] = TRUE;
os_event_reset(buf_pool->no_flush[flush_type]);
os_event_reset(buf_pool.no_flush[flush_type]);
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
return(true);
}
@ -1956,20 +1956,20 @@ bool buf_flush_start(buf_flush_t flush_type)
@param[in] flush_type BUF_FLUSH_LRU or BUF_FLUSH_LIST */
void buf_flush_end(buf_flush_t flush_type)
{
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
buf_pool->init_flush[flush_type] = FALSE;
buf_pool.init_flush[flush_type] = FALSE;
buf_pool->try_LRU_scan = TRUE;
buf_pool.try_LRU_scan = TRUE;
if (buf_pool->n_flush[flush_type] == 0) {
if (buf_pool.n_flush[flush_type] == 0) {
/* The running flush batch has ended */
os_event_set(buf_pool->no_flush[flush_type]);
os_event_set(buf_pool.no_flush[flush_type]);
}
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
if (!srv_read_only_mode) {
buf_dblwr_flush_buffered_writes();
@ -1982,7 +1982,7 @@ void buf_flush_wait_batch_end(buf_flush_t type)
{
ut_ad(type == BUF_FLUSH_LRU || type == BUF_FLUSH_LIST);
thd_wait_begin(NULL, THD_WAIT_DISKIO);
os_event_wait(buf_pool->no_flush[type]);
os_event_wait(buf_pool.no_flush[type]);
thd_wait_end(NULL);
}
@ -2029,7 +2029,7 @@ void buf_flush_wait_flushed(lsn_t new_oldest)
blocks, because anyway we need fsync to make chekpoint.
So, we don't need to wait for the batch end here. */
mutex_enter(&buf_pool->flush_list_mutex);
mutex_enter(&buf_pool.flush_list_mutex);
buf_page_t* bpage;
@ -2037,7 +2037,7 @@ void buf_flush_wait_flushed(lsn_t new_oldest)
list. We would only need to write out temporary pages if the
page is about to be evicted from the buffer pool, and the page
contents is still needed (the page has not been freed). */
for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
for (bpage = UT_LIST_GET_LAST(buf_pool.flush_list);
bpage && fsp_is_system_temporary(bpage->id.space());
bpage = UT_LIST_GET_PREV(list, bpage)) {
ut_ad(bpage->in_flush_list);
@ -2045,7 +2045,7 @@ void buf_flush_wait_flushed(lsn_t new_oldest)
lsn_t oldest = bpage ? bpage->oldest_modification : 0;
mutex_exit(&buf_pool->flush_list_mutex);
mutex_exit(&buf_pool.flush_list_mutex);
if (oldest == 0 || oldest >= new_oldest) {
break;
@ -2101,17 +2101,17 @@ bool buf_flush_single_page_from_LRU()
buf_page_t* bpage;
ibool freed;
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
for (bpage = buf_pool->single_scan_itr.start(), scanned = 0,
for (bpage = buf_pool.single_scan_itr.start(), scanned = 0,
freed = false;
bpage != NULL;
++scanned, bpage = buf_pool->single_scan_itr.get()) {
++scanned, bpage = buf_pool.single_scan_itr.get()) {
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage);
buf_pool->single_scan_itr.set(prev);
buf_pool.single_scan_itr.set(prev);
BPageMutex* block_mutex;
block_mutex = buf_page_get_mutex(bpage);
@ -2124,7 +2124,7 @@ bool buf_flush_single_page_from_LRU()
mutex_exit(block_mutex);
if (buf_LRU_free_page(bpage, true)) {
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
freed = true;
break;
}
@ -2156,7 +2156,7 @@ bool buf_flush_single_page_from_LRU()
if (!freed) {
/* Can't find a single flushable page. */
ut_ad(!bpage);
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
}
if (scanned) {
@ -2167,7 +2167,7 @@ bool buf_flush_single_page_from_LRU()
scanned);
}
ut_ad(!mutex_own(&buf_pool->mutex));
ut_ad(!mutex_own(&buf_pool.mutex));
return(freed);
}
@ -2187,16 +2187,16 @@ static ulint buf_flush_LRU_list()
/* srv_LRU_scan_depth can be arbitrarily large value.
We cap it with current LRU size. */
mutex_enter(&buf_pool->mutex);
scan_depth = UT_LIST_GET_LEN(buf_pool->LRU);
if (buf_pool->curr_size < buf_pool->old_size
&& buf_pool->withdraw_target > 0) {
withdraw_depth = buf_pool->withdraw_target
- UT_LIST_GET_LEN(buf_pool->withdraw);
mutex_enter(&buf_pool.mutex);
scan_depth = UT_LIST_GET_LEN(buf_pool.LRU);
if (buf_pool.curr_size < buf_pool.old_size
&& buf_pool.withdraw_target > 0) {
withdraw_depth = buf_pool.withdraw_target
- UT_LIST_GET_LEN(buf_pool.withdraw);
} else {
withdraw_depth = 0;
}
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
if (withdraw_depth > srv_LRU_scan_depth) {
scan_depth = ut_min(withdraw_depth, scan_depth);
} else {
@ -2215,10 +2215,10 @@ static ulint buf_flush_LRU_list()
/** Wait for any possible LRU flushes to complete. */
void buf_flush_wait_LRU_batch_end()
{
mutex_enter(&buf_pool->mutex);
bool wait = buf_pool->n_flush[BUF_FLUSH_LRU]
|| buf_pool->init_flush[BUF_FLUSH_LRU];
mutex_exit(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
bool wait = buf_pool.n_flush[BUF_FLUSH_LRU]
|| buf_pool.init_flush[BUF_FLUSH_LRU];
mutex_exit(&buf_pool.mutex);
if (wait) {
buf_flush_wait_batch_end(BUF_FLUSH_LRU);
}
@ -2232,7 +2232,7 @@ static
ulint
af_get_pct_for_dirty()
{
const ulint dirty = UT_LIST_GET_LEN(buf_pool->flush_list);
const ulint dirty = UT_LIST_GET_LEN(buf_pool.flush_list);
if (!dirty) {
/* No pages modified */
return 0;
@ -2242,8 +2242,8 @@ af_get_pct_for_dirty()
pool (including the flush_list) was emptied while we are
looking at it) */
double dirty_pct = 100 * static_cast<double>(dirty)
/ static_cast<double>(1 + UT_LIST_GET_LEN(buf_pool->LRU)
+ UT_LIST_GET_LEN(buf_pool->free));
/ static_cast<double>(1 + UT_LIST_GET_LEN(buf_pool.LRU)
+ UT_LIST_GET_LEN(buf_pool.free));
ut_a(srv_max_dirty_pages_pct_lwm
<= srv_max_buf_pool_modified_pct);
@ -2457,8 +2457,8 @@ page_cleaner_flush_pages_recommendation(ulint last_pages_in)
+ lsn_avg_rate * buf_flush_lsn_scan_factor;
ulint pages_for_lsn = 0;
mutex_enter(&buf_pool->flush_list_mutex);
for (buf_page_t* b = UT_LIST_GET_LAST(buf_pool->flush_list);
mutex_enter(&buf_pool.flush_list_mutex);
for (buf_page_t* b = UT_LIST_GET_LAST(buf_pool.flush_list);
b != NULL;
b = UT_LIST_GET_PREV(list, b)) {
if (b->oldest_modification > target_lsn) {
@ -2466,7 +2466,7 @@ page_cleaner_flush_pages_recommendation(ulint last_pages_in)
}
++pages_for_lsn;
}
mutex_exit(&buf_pool->flush_list_mutex);
mutex_exit(&buf_pool.flush_list_mutex);
mutex_enter(&page_cleaner.mutex);
ut_ad(page_cleaner.slot.state == PAGE_CLEANER_STATE_NONE);
@ -2834,7 +2834,7 @@ static os_thread_ret_t DECLARE_THREAD(buf_flush_page_cleaner)(void*)
/* The page_cleaner skips sleep if the server is
idle and there are no pending IOs in the buffer pool
and there is work to do. */
if (!n_flushed || !buf_pool->n_pend_reads
if (!n_flushed || !buf_pool.n_pend_reads
|| srv_check_activity(last_activity)) {
ret_sleep = pc_sleep_if_needed(
@ -3088,7 +3088,7 @@ static os_thread_ret_t DECLARE_THREAD(buf_flush_page_cleaner)(void*)
/* Some sanity checks */
ut_ad(!srv_any_background_activity());
ut_ad(srv_shutdown_state == SRV_SHUTDOWN_FLUSH_PHASE);
ut_a(UT_LIST_GET_LEN(buf_pool->flush_list) == 0);
ut_a(UT_LIST_GET_LEN(buf_pool.flush_list) == 0);
/* We have lived our life. Time to die. */
@ -3171,34 +3171,34 @@ static void buf_flush_validate_low()
buf_page_t* bpage;
const ib_rbt_node_t* rnode = NULL;
ut_ad(mutex_own(&buf_pool->flush_list_mutex));
ut_ad(mutex_own(&buf_pool.flush_list_mutex));
ut_list_validate(buf_pool->flush_list, Check());
ut_list_validate(buf_pool.flush_list, Check());
bpage = UT_LIST_GET_FIRST(buf_pool->flush_list);
bpage = UT_LIST_GET_FIRST(buf_pool.flush_list);
/* If we are in recovery mode i.e.: flush_rbt != NULL
then each block in the flush_list must also be present
in the flush_rbt. */
if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
rnode = rbt_first(buf_pool->flush_rbt);
if (UNIV_LIKELY_NULL(buf_pool.flush_rbt)) {
rnode = rbt_first(buf_pool.flush_rbt);
}
while (bpage != NULL) {
const lsn_t om = bpage->oldest_modification;
ut_ad(bpage->in_flush_list);
/* A page in buf_pool->flush_list can be in
/* A page in buf_pool.flush_list can be in
BUF_BLOCK_REMOVE_HASH state. This happens when a page
is in the middle of being relocated. In that case the
original descriptor can have this state and still be
in the flush list waiting to acquire the
buf_pool->flush_list_mutex to complete the relocation. */
buf_pool.flush_list_mutex to complete the relocation. */
ut_a(buf_page_in_file(bpage)
|| buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH);
ut_a(om > 0);
if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
if (UNIV_LIKELY_NULL(buf_pool.flush_rbt)) {
buf_page_t** prpage;
ut_a(rnode != NULL);
@ -3206,7 +3206,7 @@ static void buf_flush_validate_low()
ut_a(*prpage != NULL);
ut_a(*prpage == bpage);
rnode = rbt_next(buf_pool->flush_rbt, rnode);
rnode = rbt_next(buf_pool.flush_rbt, rnode);
}
bpage = UT_LIST_GET_NEXT(list, bpage);
@ -3222,9 +3222,9 @@ static void buf_flush_validate_low()
/** Validate the flush list. */
void buf_flush_validate()
{
mutex_enter(&buf_pool->flush_list_mutex);
mutex_enter(&buf_pool.flush_list_mutex);
buf_flush_validate_low();
mutex_exit(&buf_pool->flush_list_mutex);
mutex_exit(&buf_pool.flush_list_mutex);
}
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */

File diff suppressed because it is too large Load diff

View file

@ -41,7 +41,7 @@ Created 11/5/1995 Heikki Tuuri
#include "srv0start.h"
#include "srv0srv.h"
/** If there are buf_pool->curr_size per the number below pending reads, then
/** If there are buf_pool.curr_size per the number below pending reads, then
read-ahead is not done: this is to prevent flooding the buffer pool with
i/o-fixed buffer blocks */
#define BUF_READ_AHEAD_PEND_LIMIT 2
@ -60,7 +60,7 @@ buf_read_page_handle_error(
const page_id_t old_page_id = bpage->id;
/* First unfix and release lock on the bpage */
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
mutex_enter(buf_page_get_mutex(bpage));
ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_READ);
@ -79,10 +79,10 @@ buf_read_page_handle_error(
/* remove the block from LRU list */
buf_LRU_free_one_page(bpage, old_page_id);
ut_ad(buf_pool->n_pend_reads > 0);
buf_pool->n_pend_reads--;
ut_ad(buf_pool.n_pend_reads > 0);
buf_pool.n_pend_reads--;
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
}
/** Low-level function which reads a page asynchronously from a file to the
@ -245,7 +245,7 @@ buf_read_ahead_random(const page_id_t page_id, ulint zip_size, bool ibuf)
}
const ulint buf_read_ahead_random_area
= buf_pool->read_ahead_area;
= buf_pool.read_ahead_area;
low = (page_id.page_no() / buf_read_ahead_random_area)
* buf_read_ahead_random_area;
@ -282,11 +282,11 @@ buf_read_ahead_random(const page_id_t page_id, ulint zip_size, bool ibuf)
return(0);
}
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
if (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
mutex_exit(&buf_pool->mutex);
if (buf_pool.n_pend_reads
> buf_pool.curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
mutex_exit(&buf_pool.mutex);
return(0);
}
@ -300,14 +300,14 @@ buf_read_ahead_random(const page_id_t page_id, ulint zip_size, bool ibuf)
if (buf_page_is_accessed(bpage)
&& buf_page_peek_if_young(bpage)
&& ++recent_blocks
>= 5 + buf_pool->read_ahead_area / 8) {
mutex_exit(&buf_pool->mutex);
>= 5 + buf_pool.read_ahead_area / 8) {
mutex_exit(&buf_pool.mutex);
goto read_ahead;
}
}
}
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
/* Do nothing */
return(0);
@ -357,7 +357,7 @@ read_ahead:
LRU policy decision. */
buf_LRU_stat_inc_io();
buf_pool->stat.n_ra_pages_read_rnd += count;
buf_pool.stat.n_ra_pages_read_rnd += count;
srv_stats.buf_pool_reads.add(count);
return(count);
}
@ -501,7 +501,7 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
}
const ulint buf_read_ahead_linear_area
= buf_pool->read_ahead_area;
= buf_pool.read_ahead_area;
low = (page_id.page_no() / buf_read_ahead_linear_area)
* buf_read_ahead_linear_area;
high = (page_id.page_no() / buf_read_ahead_linear_area + 1)
@ -539,11 +539,11 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
return(0);
}
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
if (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
mutex_exit(&buf_pool->mutex);
if (buf_pool.n_pend_reads
> buf_pool.curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
mutex_exit(&buf_pool.mutex);
return(0);
}
@ -561,7 +561,7 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
/* How many out of order accessed pages can we ignore
when working out the access pattern for linear readahead */
threshold = ut_min(static_cast<ulint>(64 - srv_read_ahead_threshold),
buf_pool->read_ahead_area);
buf_pool.read_ahead_area);
fail_count = 0;
@ -592,7 +592,7 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
if (fail_count > threshold) {
/* Too many failures: return */
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
return(0);
}
@ -607,7 +607,7 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
bpage = buf_page_hash_get(page_id);
if (bpage == NULL) {
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
return(0);
}
@ -633,7 +633,7 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
pred_offset = fil_page_get_prev(frame);
succ_offset = fil_page_get_next(frame);
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
if ((page_id.page_no() == low)
&& (succ_offset == page_id.page_no() + 1)) {
@ -715,7 +715,7 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
LRU policy decision. */
buf_LRU_stat_inc_io();
buf_pool->stat.n_ra_pages_read += count;
buf_pool.stat.n_ra_pages_read += count;
return(count);
}
@ -748,7 +748,7 @@ buf_read_recv_pages(
const page_id_t cur_page_id(space_id, page_nos[i]);
for (ulint count = 0, limit = recv_sys.max_blocks() / 2;
buf_pool->n_pend_reads >= limit; ) {
buf_pool.n_pend_reads >= limit; ) {
os_thread_sleep(10000);
@ -757,7 +757,7 @@ buf_read_recv_pages(
ib::error()
<< "Waited for " << count / 100
<< " seconds for "
<< buf_pool->n_pend_reads
<< buf_pool.n_pend_reads
<< " pending reads";
}
}

View file

@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2016, 2018, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2019, MariaDB Corporation.
Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -1275,7 +1275,7 @@ rtr_cur_restore_position(
ut_ad(latch_mode == BTR_CONT_MODIFY_TREE);
if (!buf_pool_is_obsolete(r_cursor->withdraw_clock)
if (!buf_pool.is_obsolete(r_cursor->withdraw_clock)
&& buf_page_optimistic_get(RW_X_LATCH,
r_cursor->block_when_stored,
r_cursor->modify_clock,

View file

@ -72,7 +72,7 @@ ib_create(
if (type == MEM_HEAP_FOR_PAGE_HASH) {
/* We create a hash table protected by rw_locks for
buf_pool->page_hash. */
buf_pool.page_hash. */
hash_create_sync_obj(
table, HASH_TABLE_SYNC_RW_LOCK, id, n_sync_obj);
} else {

View file

@ -18243,9 +18243,9 @@ innodb_buffer_pool_evict_uncompressed()
{
bool all_evicted = true;
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
for (buf_block_t* block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
for (buf_block_t* block = UT_LIST_GET_LAST(buf_pool.unzip_LRU);
block != NULL; ) {
buf_block_t* prev_block = UT_LIST_GET_PREV(unzip_LRU, block);
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
@ -18260,7 +18260,7 @@ innodb_buffer_pool_evict_uncompressed()
block = prev_block;
}
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
return(all_evicted);
}
@ -21431,10 +21431,10 @@ innodb_buffer_pool_size_validate(
#endif /* UNIV_DEBUG */
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
if (srv_buf_pool_old_size != srv_buf_pool_size) {
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
my_printf_error(ER_WRONG_ARGUMENTS,
"Another buffer pool resize is already in progress.", MYF(0));
return(1);
@ -21445,13 +21445,13 @@ innodb_buffer_pool_size_validate(
*static_cast<ulonglong*>(save) = requested_buf_pool_size;
if (srv_buf_pool_size == ulint(intbuf)) {
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
/* nothing to do */
return(0);
}
if (srv_buf_pool_size == requested_buf_pool_size) {
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WRONG_ARGUMENTS,
"innodb_buffer_pool_size must be at least"
@ -21462,7 +21462,7 @@ innodb_buffer_pool_size_validate(
}
srv_buf_pool_size = requested_buf_pool_size;
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
if (intbuf != static_cast<longlong>(requested_buf_pool_size)) {
char buf[64];

View file

@ -126,9 +126,9 @@ struct buf_page_info_t{
built on this page */
#endif /* BTR_CUR_HASH_ADAPT */
unsigned is_old:1; /*!< TRUE if the block is in the old
blocks in buf_pool->LRU_old */
blocks in buf_pool.LRU_old */
unsigned freed_page_clock:31; /*!< the value of
buf_pool->freed_page_clock */
buf_pool.freed_page_clock */
unsigned zip_ssize:PAGE_ZIP_SSIZE_BITS;
/*!< Compressed page size */
unsigned page_state:BUF_PAGE_STATE_BITS; /*!< Page state */
@ -1637,22 +1637,22 @@ i_s_cmpmem_fill_low(
buf_buddy_stat_t buddy_stat_local[BUF_BUDDY_SIZES_MAX + 1];
/* Save buddy stats for buffer pool in local variables. */
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) {
zip_free_len_local[x] = (x < BUF_BUDDY_SIZES) ?
UT_LIST_GET_LEN(buf_pool->zip_free[x]) : 0;
UT_LIST_GET_LEN(buf_pool.zip_free[x]) : 0;
buddy_stat_local[x] = buf_pool->buddy_stat[x];
buddy_stat_local[x] = buf_pool.buddy_stat[x];
if (reset) {
/* This is protected by buf_pool->mutex. */
buf_pool->buddy_stat[x].relocated = 0;
buf_pool->buddy_stat[x].relocated_usec = 0;
/* This is protected by buf_pool.mutex. */
buf_pool.buddy_stat[x].relocated = 0;
buf_pool.buddy_stat[x].relocated_usec = 0;
}
}
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) {
buf_buddy_stat_t* buddy_stat = &buddy_stat_local[x];
@ -4247,7 +4247,7 @@ static int i_s_innodb_buffer_page_fill(THD *thd, TABLE_LIST *tables, Item *)
heap = mem_heap_create(10000);
for (ulint n = 0;
n < ut_min(buf_pool->n_chunks, buf_pool->n_chunks_new); n++) {
n < ut_min(buf_pool.n_chunks, buf_pool.n_chunks_new); n++) {
const buf_block_t* block;
ulint n_blocks;
buf_page_info_t* info_buffer;
@ -4258,8 +4258,8 @@ static int i_s_innodb_buffer_page_fill(THD *thd, TABLE_LIST *tables, Item *)
ulint block_id = 0;
/* Get buffer block of the nth chunk */
block = buf_pool->chunks[n].blocks;
chunk_size = buf_pool->chunks[n].size;
block = buf_pool.chunks[n].blocks;
chunk_size = buf_pool.chunks[n].size;
num_page = 0;
while (chunk_size > 0) {
@ -4280,7 +4280,7 @@ static int i_s_innodb_buffer_page_fill(THD *thd, TABLE_LIST *tables, Item *)
buffer pool info printout, we are not required to
preserve the overall consistency, so we can
release mutex periodically */
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
/* GO through each block in the chunk */
for (n_blocks = num_to_process; n_blocks--; block++) {
@ -4291,7 +4291,7 @@ static int i_s_innodb_buffer_page_fill(THD *thd, TABLE_LIST *tables, Item *)
num_page++;
}
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
/* Fill in information schema table with information
just collected from the buffer chunk scan */
@ -4615,10 +4615,10 @@ static int i_s_innodb_fill_buffer_lru(THD *thd, TABLE_LIST *tables, Item *)
}
/* Aquire the mutex before allocating info_buffer, since
UT_LIST_GET_LEN(buf_pool->LRU) could change */
mutex_enter(&buf_pool->mutex);
UT_LIST_GET_LEN(buf_pool.LRU) could change */
mutex_enter(&buf_pool.mutex);
lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
lru_len = UT_LIST_GET_LEN(buf_pool.LRU);
/* Print error message if malloc fail */
info_buffer = (buf_page_info_t*) my_malloc(PSI_INSTRUMENT_ME,
@ -4633,7 +4633,7 @@ static int i_s_innodb_fill_buffer_lru(THD *thd, TABLE_LIST *tables, Item *)
/* Walk through Pool's LRU list and print the buffer page
information */
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
bpage = UT_LIST_GET_LAST(buf_pool.LRU);
while (bpage != NULL) {
/* Use the same function that collect buffer info for
@ -4647,10 +4647,10 @@ static int i_s_innodb_fill_buffer_lru(THD *thd, TABLE_LIST *tables, Item *)
}
ut_ad(lru_pos == lru_len);
ut_ad(lru_pos == UT_LIST_GET_LEN(buf_pool->LRU));
ut_ad(lru_pos == UT_LIST_GET_LEN(buf_pool.LRU));
exit:
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
if (info_buffer) {
status = i_s_innodb_buf_page_lru_fill(
@ -7209,7 +7209,7 @@ i_s_innodb_mutexes_fill_table(
continue;
}
if (buf_pool_is_block_mutex(mutex)) {
if (buf_pool.is_block_mutex(mutex)) {
block_mutex = mutex;
block_mutex_oswait_count += mutex->count_os_wait;
continue;
@ -7257,7 +7257,7 @@ i_s_innodb_mutexes_fill_table(
continue;
}
if (buf_pool_is_block_lock(lock)) {
if (buf_pool.is_block_lock(lock)) {
block_lock = lock;
block_lock_oswait_count += lock->count_os_wait;
continue;

View file

@ -110,7 +110,7 @@ btr_search_move_or_delete_hash_entries(
@param[in,out] block block containing index page, s- or x-latched, or an
index page for which we know that
block->buf_fix_count == 0 or it is an index page which
has already been removed from the buf_pool->page_hash
has already been removed from the buf_pool.page_hash
i.e.: it is in state BUF_BLOCK_REMOVE_HASH */
void btr_search_drop_page_hash_index(buf_block_t* block);

View file

@ -31,7 +31,7 @@ Created December 2006 by Marko Makela
/**
@param[in] block size in bytes
@return index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */
@return index of buf_pool.zip_free[], or BUF_BUDDY_SIZES */
inline
ulint
buf_buddy_get_slot(ulint size)
@ -50,16 +50,16 @@ buf_buddy_get_slot(ulint size)
}
/** Allocate a ROW_FORMAT=COMPRESSED block.
@param[in] i index of buf_pool->zip_free[] or BUF_BUDDY_SIZES
@param[out] lru whether buf_pool->mutex was temporarily released
@param[in] i index of buf_pool.zip_free[] or BUF_BUDDY_SIZES
@param[out] lru whether buf_pool.mutex was temporarily released
@return allocated block, never NULL */
byte *buf_buddy_alloc_low(ulint i, bool *lru) MY_ATTRIBUTE((malloc));
/** Allocate a ROW_FORMAT=COMPRESSED block.
The caller must not hold buf_pool->mutex nor buf_pool->zip_mutex nor any
The caller must not hold buf_pool.mutex nor buf_pool.zip_mutex nor any
block->mutex.
@param[in] size compressed page size
@param[out] lru whether buf_pool->mutex was temporarily released
@param[out] lru whether buf_pool.mutex was temporarily released
@return allocated block, never NULL */
inline byte *buf_buddy_alloc(ulint size, bool *lru= nullptr)
{
@ -69,7 +69,7 @@ inline byte *buf_buddy_alloc(ulint size, bool *lru= nullptr)
/** Deallocate a block.
@param[in] buf block to be freed, must not be pointed to
by the buffer pool
@param[in] i index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */
@param[in] i index of buf_pool.zip_free[], or BUF_BUDDY_SIZES */
void buf_buddy_free_low(void* buf, ulint i);
/** Deallocate a block.

File diff suppressed because it is too large Load diff

View file

@ -37,23 +37,6 @@ Created 11/5/1995 Heikki Tuuri
#include "buf0rea.h"
#include "fsp0types.h"
/** A chunk of buffers. The buffer pool is allocated in chunks. */
struct buf_chunk_t{
ulint size; /*!< size of frames[] and blocks[] */
unsigned char* mem; /*!< pointer to the memory area which
was allocated for the frames */
ut_new_pfx_t mem_pfx; /*!< Auxiliary structure, describing
"mem". It is filled by the allocator's
alloc method and later passed to the
deallocate method. */
buf_block_t* blocks; /*!< array of buffer control blocks */
/** Get the size of 'mem' in bytes. */
size_t mem_size() const {
return(mem_pfx.m_size);
}
};
/*********************************************************************//**
Gets the current size of buffer buf_pool in bytes.
@return size in bytes */
@ -65,20 +48,6 @@ buf_pool_get_curr_size(void)
return(srv_buf_pool_curr_size);
}
/*********************************************************************//**
Gets the current size of buffer buf_pool in pages.
@return size in pages*/
inline ulint buf_pool_get_n_pages()
{
if (!buf_pool)
return srv_buf_pool_curr_size >> srv_page_size_shift;
ulint chunk_size= 0;
for (uint j= 0; j < buf_pool->n_chunks; j++)
chunk_size+= buf_pool->chunks[j].size;
return chunk_size;
}
/********************************************************************//**
Reads the freed_page_clock of a buffer block.
@return freed_page_clock */
@ -88,7 +57,7 @@ buf_page_get_freed_page_clock(
/*==========================*/
const buf_page_t* bpage) /*!< in: block */
{
/* This is sometimes read without holding buf_pool->mutex. */
/* This is sometimes read without holding buf_pool.mutex. */
return(bpage->freed_page_clock);
}
@ -113,10 +82,10 @@ The page must be either buffer-fixed, or its page hash must be locked.
inline bool buf_page_peek_if_young(const buf_page_t *bpage)
{
/* FIXME: bpage->freed_page_clock is 31 bits */
return((buf_pool->freed_page_clock & ((1UL << 31) - 1))
return((buf_pool.freed_page_clock & ((1UL << 31) - 1))
< (bpage->freed_page_clock
+ (buf_pool->curr_size
* (BUF_LRU_OLD_RATIO_DIV - buf_pool->LRU_old_ratio)
+ (buf_pool.curr_size
* (BUF_LRU_OLD_RATIO_DIV - buf_pool.LRU_old_ratio)
/ (BUF_LRU_OLD_RATIO_DIV * 4))));
}
@ -126,7 +95,7 @@ there is danger of dropping from the buffer pool.
@return true if bpage should be made younger */
inline bool buf_page_peek_if_too_old(const buf_page_t *bpage)
{
if (buf_pool->freed_page_clock == 0) {
if (buf_pool.freed_page_clock == 0) {
/* If eviction has not started yet, do not update the
statistics or move blocks in the LRU list. This is
either the warm-up phase or an in-memory workload. */
@ -146,7 +115,7 @@ inline bool buf_page_peek_if_too_old(const buf_page_t *bpage)
return(TRUE);
}
buf_pool->stat.n_pages_not_made_young++;
buf_pool.stat.n_pages_not_made_young++;
return false;
} else {
return !buf_page_peek_if_young(bpage);
@ -315,7 +284,7 @@ buf_page_get_mutex(
return(NULL);
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
return(&buf_pool->zip_mutex);
return(&buf_pool.zip_mutex);
default:
return(&((buf_block_t*) bpage)->mutex);
}
@ -418,7 +387,7 @@ buf_page_set_io_fix(
buf_page_t* bpage, /*!< in/out: control block */
enum buf_io_fix io_fix) /*!< in: io_fix state */
{
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
bpage->io_fix = io_fix;
@ -439,7 +408,7 @@ buf_block_set_io_fix(
/*********************************************************************//**
Makes a block sticky. A sticky block implies that even after we release
the buf_pool->mutex and the block->mutex:
the buf_pool.mutex and the block->mutex:
* it cannot be removed from the flush_list
* the block descriptor cannot be relocated
* it cannot be removed from the LRU list
@ -452,7 +421,7 @@ buf_page_set_sticky(
/*================*/
buf_page_t* bpage) /*!< in/out: control block */
{
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
@ -467,7 +436,7 @@ buf_page_unset_sticky(
/*==================*/
buf_page_t* bpage) /*!< in/out: control block */
{
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_PIN);
@ -483,7 +452,7 @@ buf_page_can_relocate(
/*==================*/
const buf_page_t* bpage) /*!< control block being relocated */
{
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list);
@ -505,7 +474,7 @@ buf_page_is_old(
purposes even if LRU mutex is not being held. Keep the assertion
for not since all the callers hold it. */
ut_ad(mutex_own(buf_page_get_mutex(bpage))
|| mutex_own(&buf_pool->mutex));
|| mutex_own(&buf_pool.mutex));
ut_ad(buf_page_in_file(bpage));
return(bpage->old);
@ -521,13 +490,13 @@ buf_page_set_old(
bool old) /*!< in: old */
{
ut_a(buf_page_in_file(bpage));
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(bpage->in_LRU_list);
#ifdef UNIV_LRU_DEBUG
ut_a((buf_pool->LRU_old_len == 0) == (buf_pool->LRU_old == NULL));
ut_a((buf_pool.LRU_old_len == 0) == (buf_pool.LRU_old == NULL));
/* If a block is flagged "old", the LRU_old list must exist. */
ut_a(!old || buf_pool->LRU_old);
ut_a(!old || buf_pool.LRU_old);
if (UT_LIST_GET_PREV(LRU, bpage) && UT_LIST_GET_NEXT(LRU, bpage)) {
const buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage);
@ -536,7 +505,7 @@ buf_page_set_old(
ut_a(prev->old == old);
} else {
ut_a(!prev->old);
ut_a(buf_pool->LRU_old == (old ? bpage : next));
ut_a(buf_pool.LRU_old == (old ? bpage : next));
}
}
#endif /* UNIV_LRU_DEBUG */
@ -566,7 +535,7 @@ buf_page_set_accessed(
/*==================*/
buf_page_t* bpage) /*!< in/out: control block */
{
ut_ad(!mutex_own(&buf_pool->mutex));
ut_ad(!mutex_own(&buf_pool.mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_a(buf_page_in_file(bpage));
@ -588,7 +557,7 @@ buf_page_get_block(
{
if (bpage != NULL) {
ut_ad(buf_page_hash_lock_held_s_or_x(bpage)
|| mutex_own(&buf_pool->mutex));
|| mutex_own(&buf_pool.mutex));
ut_ad(buf_page_in_file(bpage));
if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
@ -707,7 +676,7 @@ buf_block_free(
/*===========*/
buf_block_t* block) /*!< in, own: block to be freed */
{
mutex_enter(&buf_pool->mutex);
mutex_enter(&buf_pool.mutex);
buf_page_mutex_enter(block);
@ -717,7 +686,7 @@ buf_block_free(
buf_page_mutex_exit(block);
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
}
/********************************************************************//**
@ -732,7 +701,7 @@ buf_block_modify_clock_inc(
{
/* No latch is acquired for the shared temporary tablespace. */
ut_ad(fsp_is_system_temporary(block->page.id.space())
|| (mutex_own(&buf_pool->mutex)
|| (mutex_own(&buf_pool.mutex)
&& block->page.buf_fix_count == 0)
|| rw_lock_own_flagged(&block->lock,
RW_LOCK_FLAG_X | RW_LOCK_FLAG_SX));
@ -819,14 +788,14 @@ inline buf_page_t *buf_page_hash_get_low(page_id_t page_id)
#ifdef UNIV_DEBUG
rw_lock_t* hash_lock;
hash_lock = hash_get_lock(buf_pool->page_hash, page_id.fold());
hash_lock = hash_get_lock(buf_pool.page_hash, page_id.fold());
ut_ad(rw_lock_own(hash_lock, RW_LOCK_X)
|| rw_lock_own(hash_lock, RW_LOCK_S));
#endif /* UNIV_DEBUG */
/* Look for the page in the hash table */
HASH_SEARCH(hash, buf_pool->page_hash, page_id.fold(), buf_page_t*,
HASH_SEARCH(hash, buf_pool.page_hash, page_id.fold(), buf_page_t*,
bpage,
ut_ad(bpage->in_page_hash && !bpage->in_zip_hash
&& buf_page_in_file(bpage)),
@ -874,7 +843,7 @@ buf_page_hash_get_locked(
mode = lock_mode;
}
hash_lock = hash_get_lock(buf_pool->page_hash, page_id.fold());
hash_lock = hash_get_lock(buf_pool.page_hash, page_id.fold());
ut_ad(!rw_lock_own(hash_lock, RW_LOCK_X)
&& !rw_lock_own(hash_lock, RW_LOCK_S));
@ -884,12 +853,12 @@ buf_page_hash_get_locked(
/* If not own buf_pool_mutex, page_hash can be changed. */
hash_lock = hash_lock_s_confirm(
hash_lock, buf_pool->page_hash, page_id.fold());
hash_lock, buf_pool.page_hash, page_id.fold());
} else {
rw_lock_x_lock(hash_lock);
/* If not own buf_pool_mutex, page_hash can be changed. */
hash_lock = hash_lock_x_confirm(
hash_lock, buf_pool->page_hash, page_id.fold());
hash_lock, buf_pool.page_hash, page_id.fold());
}
bpage = buf_page_hash_get_low(page_id);
@ -1078,18 +1047,6 @@ buf_page_get_frame(
}
}
/** Verify the possibility that a stored page is not in buffer pool.
@param[in] withdraw_clock withdraw clock when stored the page
@retval true if the page might be relocated */
UNIV_INLINE
bool
buf_pool_is_obsolete(
ulint withdraw_clock)
{
return(UNIV_UNLIKELY(buf_pool_withdrawing
|| buf_withdraw_clock != withdraw_clock));
}
/** Calculate aligned buffer pool size based on srv_buf_pool_chunk_unit,
if needed.
@param[in] size size in bytes

View file

@ -64,7 +64,7 @@ void buf_LRU_flush_or_remove_pages(ulint id, bool flush, ulint first = 0);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/********************************************************************//**
Insert a compressed block into buf_pool->zip_clean in the LRU order. */
Insert a compressed block into buf_pool.zip_clean in the LRU order. */
void
buf_LRU_insert_zip_clean(
/*=====================*/
@ -76,10 +76,10 @@ Try to free a block. If bpage is a descriptor of a compressed-only
page, the descriptor object will be freed as well.
NOTE: If this function returns true, it will temporarily
release buf_pool->mutex. Furthermore, the page frame will no longer be
release buf_pool.mutex. Furthermore, the page frame will no longer be
accessible via bpage.
The caller must hold buf_pool->mutex and must not hold any
The caller must hold buf_pool.mutex and must not hold any
buf_page_get_mutex() when calling this function.
@return true if freed, false otherwise. */
bool
@ -96,7 +96,7 @@ buf_LRU_free_page(
@return true if found and freed */
bool buf_LRU_scan_and_free_block(bool scan_all);
/** @return a buffer block from the buf_pool->free list
/** @return a buffer block from the buf_pool.free list
@retval NULL if the free list is empty */
buf_block_t* buf_LRU_get_free_only();
@ -109,7 +109,7 @@ the free list. Even when we flush a page or find a page in LRU scan
we put it to free list to be used.
* iteration 0:
* get a block from free list, success:done
* if buf_pool->try_LRU_scan is set
* if buf_pool.try_LRU_scan is set
* scan LRU up to srv_LRU_scan_depth to find a clean block
* the above will put the block on free list
* success:retry the free list
@ -119,7 +119,7 @@ we put it to free list to be used.
* iteration 1:
* same as iteration 0 except:
* scan whole LRU list
* scan LRU list even if buf_pool->try_LRU_scan is not set
* scan LRU list even if buf_pool.try_LRU_scan is not set
* iteration > 1:
* same as iteration 1 but sleep 10ms
@return the free control block, in state BUF_BLOCK_READY_FOR_USE */
@ -159,11 +159,11 @@ Moves a block to the start of the LRU list. */
void
buf_LRU_make_block_young(buf_page_t* bpage);
/** Update buf_pool->LRU_old_ratio.
/** Update buf_pool.LRU_old_ratio.
@param[in] old_pct Reserve this percentage of
the buffer pool for "old" blocks
@param[in] adjust true=adjust the LRU list;
false=just assign buf_pool->LRU_old_ratio
false=just assign buf_pool.LRU_old_ratio
during the initialization of InnoDB
@return updated old_pct */
uint buf_LRU_old_ratio_update(uint old_pct, bool adjust);
@ -195,15 +195,15 @@ void buf_LRU_print();
#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */
/** @name Heuristics for detecting index scan @{ */
/** The denominator of buf_pool->LRU_old_ratio. */
/** The denominator of buf_pool.LRU_old_ratio. */
#define BUF_LRU_OLD_RATIO_DIV 1024
/** Maximum value of buf_pool->LRU_old_ratio.
/** Maximum value of buf_pool.LRU_old_ratio.
@see buf_LRU_old_adjust_len
@see buf_pool->LRU_old_ratio_update */
@see buf_pool.LRU_old_ratio_update */
#define BUF_LRU_OLD_RATIO_MAX BUF_LRU_OLD_RATIO_DIV
/** Minimum value of buf_pool->LRU_old_ratio.
/** Minimum value of buf_pool.LRU_old_ratio.
@see buf_LRU_old_adjust_len
@see buf_pool->LRU_old_ratio_update
@see buf_pool.LRU_old_ratio_update
The minimum must exceed
(BUF_LRU_OLD_TOLERANCE + 5) * BUF_LRU_OLD_RATIO_DIV / BUF_LRU_OLD_MIN_LEN. */
#define BUF_LRU_OLD_RATIO_MIN 51
@ -224,7 +224,7 @@ extern uint buf_LRU_old_threshold_ms;
These statistics are not 'of' LRU but 'for' LRU. We keep count of I/O
and page_zip_decompress() operations. Based on the statistics we decide
if we want to evict from buf_pool->unzip_LRU or buf_pool->LRU. */
if we want to evict from buf_pool.unzip_LRU or buf_pool.LRU. */
struct buf_LRU_stat_t
{
ulint io; /**< Counter of buffer pool I/O operations. */
@ -236,7 +236,7 @@ Cleared by buf_LRU_stat_update(). */
extern buf_LRU_stat_t buf_LRU_stat_cur;
/** Running sum of past values of buf_LRU_stat_cur.
Updated by buf_LRU_stat_update(). Protected by buf_pool->mutex. */
Updated by buf_LRU_stat_update(). Protected by buf_pool.mutex. */
extern buf_LRU_stat_t buf_LRU_stat_sum;
/********************************************************************//**

View file

@ -34,10 +34,6 @@ Created 11/17/1995 Heikki Tuuri
class buf_page_t;
/** Buffer block for which an uncompressed page exists */
struct buf_block_t;
/** Buffer pool chunk comprising buf_block_t */
struct buf_chunk_t;
/** Buffer pool comprising buf_chunk_t */
struct buf_pool_t;
/** Buffer pool statistics struct */
struct buf_pool_stat_t;
/** Buffer pool buddy statistics struct */

View file

@ -352,7 +352,7 @@ extern const ulint srv_buf_pool_min_size;
extern const ulint srv_buf_pool_def_size;
/** Requested buffer pool chunk size */
extern ulong srv_buf_pool_chunk_unit;
/** Number of locks to protect buf_pool->page_hash */
/** Number of locks to protect buf_pool.page_hash */
extern ulong srv_n_page_hash_locks;
/** Scan depth for LRU flush batch i.e.: number of blocks scanned*/
extern ulong srv_LRU_scan_depth;
@ -829,7 +829,7 @@ struct export_var_t{
ulint innodb_buffer_pool_pages_made_not_young;
ulint innodb_buffer_pool_pages_made_young;
ulint innodb_buffer_pool_pages_old;
ulint innodb_buffer_pool_read_requests; /*!< buf_pool->stat.n_page_gets */
ulint innodb_buffer_pool_read_requests; /*!< buf_pool.stat.n_page_gets */
ulint innodb_buffer_pool_reads; /*!< srv_buf_pool_reads */
ulint innodb_buffer_pool_wait_free; /*!< srv_buf_pool_wait_free */
ulint innodb_buffer_pool_pages_flushed; /*!< srv_buf_pool_flushed */
@ -866,9 +866,9 @@ struct export_var_t{
ulint innodb_os_log_fsyncs; /*!< n_log_flushes */
ulint innodb_os_log_pending_writes; /*!< srv_os_log_pending_writes */
ulint innodb_os_log_pending_fsyncs; /*!< n_pending_log_flushes */
ulint innodb_pages_created; /*!< buf_pool->stat.n_pages_created */
ulint innodb_pages_read; /*!< buf_pool->stat.n_pages_read*/
ulint innodb_pages_written; /*!< buf_pool->stat.n_pages_written */
ulint innodb_pages_created; /*!< buf_pool.stat.n_pages_created */
ulint innodb_pages_read; /*!< buf_pool.stat.n_pages_read*/
ulint innodb_pages_written; /*!< buf_pool.stat.n_pages_written */
ulint innodb_row_lock_waits; /*!< srv_n_lock_wait_count */
ulint innodb_row_lock_current_waits; /*!< srv_n_lock_wait_current_count */
int64_t innodb_row_lock_time; /*!< srv_n_lock_wait_time

View file

@ -516,8 +516,8 @@ void lock_sys_t::resize(ulint n_cells)
hash_table_free(old_hash);
/* need to update block->lock_hash_val */
mutex_enter(&buf_pool->mutex);
for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
mutex_enter(&buf_pool.mutex);
for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool.LRU);
bpage; bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
buf_block_t* block = reinterpret_cast<buf_block_t*>(
@ -527,7 +527,7 @@ void lock_sys_t::resize(ulint n_cells)
bpage->id.space(), bpage->id.page_no());
}
}
mutex_exit(&buf_pool->mutex);
mutex_exit(&buf_pool.mutex);
mutex_exit(&mutex);
}

View file

@ -1680,7 +1680,7 @@ wait_suspend_loop:
}
}
if (!buf_pool) {
if (!buf_pool.is_initialised()) {
ut_ad(!srv_was_started);
} else if (ulint pending_io = buf_pool_check_no_pending_io()) {
if (srv_print_verbose_log && count > 600) {
@ -1754,7 +1754,7 @@ wait_suspend_loop:
service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL,
"Free innodb buffer pool");
ut_d(buf_assert_all_freed());
ut_d(buf_pool.assert_all_freed());
ut_a(lsn == log_sys.get_lsn()
|| srv_force_recovery == SRV_FORCE_NO_LOG_REDO);

View file

@ -1008,7 +1008,13 @@ void recv_sys_t::create()
apply_log_recs = false;
apply_batch_on = false;
max_log_blocks = buf_pool_get_n_pages() / 3;
if (buf_pool.is_initialised()) {
max_log_blocks = buf_pool.get_n_pages() / 3;
} else {
ut_ad(srv_operation == SRV_OPERATION_BACKUP
|| srv_operation == SRV_OPERATION_RESTORE_DELTA);
max_log_blocks = 0;
}
buf = static_cast<byte*>(ut_malloc_dontdump(RECV_PARSING_BUF_SIZE, PSI_INSTRUMENT_ME));
len = 0;
parse_start_lsn = 0;
@ -1118,17 +1124,12 @@ inline void recv_sys_t::free(const void *data)
data= page_align(data);
ut_ad(mutex_own(&mutex));
#ifdef UNIV_DEBUG
/* MDEV-14481 FIXME: To prevent race condition with buf_pool_resize(),
/* MDEV-14481 FIXME: To prevent race condition with buf_pool.resize(),
we must acquire and hold the buffer pool mutex here. */
extern volatile bool buf_pool_resizing;
extern volatile bool buf_pool_withdrawing;
ut_ad(!buf_pool_resizing);
ut_ad(!buf_pool_withdrawing);
#endif
ut_ad(!buf_pool.resize_in_progress());
buf_chunk_t *chunk= buf_pool->chunks;
for (auto i= buf_pool->n_chunks; i--; chunk++)
auto *chunk= buf_pool.chunks;
for (auto i= buf_pool.n_chunks; i--; chunk++)
{
if (data < chunk->blocks->frame)
continue;
@ -3270,10 +3271,10 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
ut_ad(srv_operation == SRV_OPERATION_NORMAL
|| srv_operation == SRV_OPERATION_RESTORE
|| srv_operation == SRV_OPERATION_RESTORE_EXPORT);
ut_d(mutex_enter(&buf_pool->flush_list_mutex));
ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == 0);
ut_ad(UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0);
ut_d(mutex_exit(&buf_pool->flush_list_mutex));
ut_d(mutex_enter(&buf_pool.flush_list_mutex));
ut_ad(UT_LIST_GET_LEN(buf_pool.LRU) == 0);
ut_ad(UT_LIST_GET_LEN(buf_pool.unzip_LRU) == 0);
ut_d(mutex_exit(&buf_pool.flush_list_mutex));
/* Initialize red-black tree for fast insertions into the
flush_list during recovery process. */

View file

@ -1631,7 +1631,7 @@ srv_mon_process_existing_counter(
/* innodb_buffer_pool_read_requests, the number of logical
read requests */
case MONITOR_OVLD_BUF_POOL_READ_REQUESTS:
value = buf_pool->stat.n_page_gets;
value = buf_pool.stat.n_page_gets;
break;
/* innodb_buffer_pool_write_requests, the number of
@ -1647,61 +1647,61 @@ srv_mon_process_existing_counter(
/* innodb_buffer_pool_read_ahead */
case MONITOR_OVLD_BUF_POOL_READ_AHEAD:
value = buf_pool->stat.n_ra_pages_read;
value = buf_pool.stat.n_ra_pages_read;
break;
/* innodb_buffer_pool_read_ahead_evicted */
case MONITOR_OVLD_BUF_POOL_READ_AHEAD_EVICTED:
value = buf_pool->stat.n_ra_pages_evicted;
value = buf_pool.stat.n_ra_pages_evicted;
break;
/* innodb_buffer_pool_pages_total */
case MONITOR_OVLD_BUF_POOL_PAGE_TOTAL:
value = buf_pool_get_n_pages();
value = buf_pool.get_n_pages();
break;
/* innodb_buffer_pool_pages_misc */
case MONITOR_OVLD_BUF_POOL_PAGE_MISC:
value = buf_pool_get_n_pages()
- UT_LIST_GET_LEN(buf_pool->LRU)
- UT_LIST_GET_LEN(buf_pool->free);
value = buf_pool.get_n_pages()
- UT_LIST_GET_LEN(buf_pool.LRU)
- UT_LIST_GET_LEN(buf_pool.free);
break;
/* innodb_buffer_pool_pages_data */
case MONITOR_OVLD_BUF_POOL_PAGES_DATA:
value = UT_LIST_GET_LEN(buf_pool->LRU);
value = UT_LIST_GET_LEN(buf_pool.LRU);
break;
/* innodb_buffer_pool_bytes_data */
case MONITOR_OVLD_BUF_POOL_BYTES_DATA:
value = buf_pool->stat.LRU_bytes
+ (UT_LIST_GET_LEN(buf_pool->unzip_LRU)
value = buf_pool.stat.LRU_bytes
+ (UT_LIST_GET_LEN(buf_pool.unzip_LRU)
<< srv_page_size_shift);
break;
/* innodb_buffer_pool_pages_dirty */
case MONITOR_OVLD_BUF_POOL_PAGES_DIRTY:
value = UT_LIST_GET_LEN(buf_pool->flush_list);
value = UT_LIST_GET_LEN(buf_pool.flush_list);
break;
/* innodb_buffer_pool_bytes_dirty */
case MONITOR_OVLD_BUF_POOL_BYTES_DIRTY:
value = buf_pool->stat.flush_list_bytes;
value = buf_pool.stat.flush_list_bytes;
break;
/* innodb_buffer_pool_pages_free */
case MONITOR_OVLD_BUF_POOL_PAGES_FREE:
value = UT_LIST_GET_LEN(buf_pool->free);
value = UT_LIST_GET_LEN(buf_pool.free);
break;
/* innodb_pages_created, the number of pages created */
case MONITOR_OVLD_PAGE_CREATED:
value = buf_pool->stat.n_pages_created;
value = buf_pool.stat.n_pages_created;
break;
/* innodb_pages_written, the number of page written */
case MONITOR_OVLD_PAGES_WRITTEN:
value = buf_pool->stat.n_pages_written;
value = buf_pool.stat.n_pages_written;
break;
/* innodb_index_pages_written, the number of index pages written */
@ -1716,7 +1716,7 @@ srv_mon_process_existing_counter(
/* innodb_pages_read */
case MONITOR_OVLD_PAGES_READ:
value = buf_pool->stat.n_pages_read;
value = buf_pool.stat.n_pages_read;
break;
/* Number of times secondary index lookup triggered cluster lookup */

View file

@ -207,7 +207,7 @@ const ulint srv_buf_pool_def_size = 128 * 1024 * 1024;
/** Requested buffer pool chunk size */
ulong srv_buf_pool_chunk_unit;
/** innodb_page_hash_locks (a debug-only parameter);
number of locks to protect buf_pool->page_hash */
number of locks to protect buf_pool.page_hash */
ulong srv_n_page_hash_locks = 16;
/** innodb_lru_scan_depth; number of blocks scanned in LRU flush batch */
ulong srv_LRU_scan_depth;
@ -972,7 +972,7 @@ srv_printf_innodb_monitor(
const hash_table_t* table = btr_search_sys->hash_tables[i];
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
/* this is only used for buf_pool->page_hash */
/* this is only used for buf_pool.page_hash */
ut_ad(!table->heaps);
/* this is used for the adaptive hash index */
ut_ad(table->heap);
@ -1173,7 +1173,7 @@ srv_export_innodb_status(void)
export_vars.innodb_data_written = srv_stats.data_written;
export_vars.innodb_buffer_pool_read_requests
= buf_pool->stat.n_page_gets;
= buf_pool.stat.n_page_gets;
export_vars.innodb_buffer_pool_write_requests =
srv_stats.buf_pool_write_requests;
@ -1187,48 +1187,48 @@ srv_export_innodb_status(void)
export_vars.innodb_buffer_pool_reads = srv_stats.buf_pool_reads;
export_vars.innodb_buffer_pool_read_ahead_rnd =
buf_pool->stat.n_ra_pages_read_rnd;
buf_pool.stat.n_ra_pages_read_rnd;
export_vars.innodb_buffer_pool_read_ahead =
buf_pool->stat.n_ra_pages_read;
buf_pool.stat.n_ra_pages_read;
export_vars.innodb_buffer_pool_read_ahead_evicted =
buf_pool->stat.n_ra_pages_evicted;
buf_pool.stat.n_ra_pages_evicted;
export_vars.innodb_buffer_pool_pages_data =
UT_LIST_GET_LEN(buf_pool->LRU);
UT_LIST_GET_LEN(buf_pool.LRU);
export_vars.innodb_buffer_pool_bytes_data =
buf_pool->stat.LRU_bytes
+ (UT_LIST_GET_LEN(buf_pool->unzip_LRU)
buf_pool.stat.LRU_bytes
+ (UT_LIST_GET_LEN(buf_pool.unzip_LRU)
<< srv_page_size_shift);
export_vars.innodb_buffer_pool_pages_dirty =
UT_LIST_GET_LEN(buf_pool->flush_list);
UT_LIST_GET_LEN(buf_pool.flush_list);
export_vars.innodb_buffer_pool_pages_made_young
= buf_pool->stat.n_pages_made_young;
= buf_pool.stat.n_pages_made_young;
export_vars.innodb_buffer_pool_pages_made_not_young
= buf_pool->stat.n_pages_not_made_young;
= buf_pool.stat.n_pages_not_made_young;
export_vars.innodb_buffer_pool_pages_old = buf_pool->LRU_old_len;
export_vars.innodb_buffer_pool_pages_old = buf_pool.LRU_old_len;
export_vars.innodb_buffer_pool_bytes_dirty =
buf_pool->stat.flush_list_bytes;
buf_pool.stat.flush_list_bytes;
export_vars.innodb_buffer_pool_pages_free =
UT_LIST_GET_LEN(buf_pool->free);
UT_LIST_GET_LEN(buf_pool.free);
#ifdef UNIV_DEBUG
export_vars.innodb_buffer_pool_pages_latched =
buf_get_latched_pages_number();
#endif /* UNIV_DEBUG */
export_vars.innodb_buffer_pool_pages_total = buf_pool_get_n_pages();
export_vars.innodb_buffer_pool_pages_total = buf_pool.get_n_pages();
export_vars.innodb_buffer_pool_pages_misc =
buf_pool_get_n_pages()
- UT_LIST_GET_LEN(buf_pool->LRU)
- UT_LIST_GET_LEN(buf_pool->free);
buf_pool.get_n_pages()
- UT_LIST_GET_LEN(buf_pool.LRU)
- UT_LIST_GET_LEN(buf_pool.free);
export_vars.innodb_max_trx_id = trx_sys.get_max_trx_id();
export_vars.innodb_history_list_length = trx_sys.rseg_history_len;
@ -1254,11 +1254,11 @@ srv_export_innodb_status(void)
export_vars.innodb_dblwr_writes = srv_stats.dblwr_writes;
export_vars.innodb_pages_created = buf_pool->stat.n_pages_created;
export_vars.innodb_pages_created = buf_pool.stat.n_pages_created;
export_vars.innodb_pages_read = buf_pool->stat.n_pages_read;
export_vars.innodb_pages_read = buf_pool.stat.n_pages_read;
export_vars.innodb_pages_written = buf_pool->stat.n_pages_written;
export_vars.innodb_pages_written = buf_pool.stat.n_pages_written;
export_vars.innodb_row_lock_waits = srv_stats.n_lock_wait_count;

View file

@ -1308,7 +1308,7 @@ dberr_t srv_start(bool create_new_db)
<< srv_buf_pool_size
<< ", chunk size = " << srv_buf_pool_chunk_unit;
if (buf_pool_init()) {
if (buf_pool.create()) {
ib::error() << "Cannot allocate memory for the buffer pool";
return(srv_init_abort(DB_ERROR));
@ -2172,11 +2172,8 @@ void innodb_shutdown()
pars_lexer_close();
recv_sys.close();
ut_ad(buf_pool || !srv_was_started);
if (buf_pool) {
buf_pool_free();
}
ut_ad(buf_pool.is_initialised() || !srv_was_started);
buf_pool.close();
sync_check_close();
if (srv_was_started && srv_print_verbose_log) {

View file

@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2018, MariaDB Corporation.
Copyright (c) 2017, 2020, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@ -828,16 +828,16 @@ LatchDebug::check_order(
case SYNC_BUF_PAGE_HASH:
/* Multiple page_hash locks are only allowed during
buf_validate and that is where buf_pool mutex is already
buf_pool.validate() and that is where buf_pool mutex is already
held. */
/* Fall through */
case SYNC_BUF_BLOCK:
/* Either the thread must own the (buffer pool) buf_pool->mutex
/* Either the thread must own the (buffer pool) buf_pool.mutex
or it is allowed to latch only ONE of (buffer block)
block->mutex or buf_pool->zip_mutex. */
block->mutex or buf_pool.zip_mutex. */
if (less(latches, level) != NULL) {
basic_check(latches, level, level - 1);

View file

@ -1897,7 +1897,8 @@ dberr_t trx_undo_report_rename(trx_t* trx, const dict_table_t* table)
if (uint16_t offset = trx_undo_page_report_rename(
trx, table, block, &mtr)) {
undo->withdraw_clock = buf_withdraw_clock;
undo->withdraw_clock
= buf_pool.withdraw_clock();
undo->top_page_no = undo->last_page_no;
undo->top_offset = offset;
undo->top_undo_no = trx->undo_no++;
@ -2046,7 +2047,7 @@ trx_undo_report_row_operation(
mtr_commit(&mtr);
} else {
/* Success */
undo->withdraw_clock = buf_withdraw_clock;
undo->withdraw_clock = buf_pool.withdraw_clock();
mtr_commit(&mtr);
undo->top_page_no = undo_block->page.id.page_no();

View file

@ -1154,7 +1154,7 @@ trx_undo_assign(trx_t* trx, dberr_t* err, mtr_t* mtr)
return buf_page_get_gen(
page_id_t(undo->rseg->space->id, undo->last_page_no),
0, RW_X_LATCH,
buf_pool_is_obsolete(undo->withdraw_clock)
buf_pool.is_obsolete(undo->withdraw_clock)
? NULL : undo->guess_block,
BUF_GET, __FILE__, __LINE__, mtr, err);
}
@ -1210,7 +1210,7 @@ trx_undo_assign_low(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo,
return buf_page_get_gen(
page_id_t(rseg->space->id, (*undo)->last_page_no),
0, RW_X_LATCH,
buf_pool_is_obsolete((*undo)->withdraw_clock)
buf_pool.is_obsolete((*undo)->withdraw_clock)
? NULL : (*undo)->guess_block,
BUF_GET, __FILE__, __LINE__, mtr, err);
}