MDEV-35049: Improve btr_search_drop_page_hash_index()

btr_search_drop_page_hash_index(): Replace the Boolean parameter
with const dict_index_t *not_garbage. If buf_block_t::index points
to that, there is no need to acquire btr_sea::partition::latch.

The old parameter bool garbage_collect=false is equivalent to the
parameter not_garbage=nullptr. The parameter garbage_collect=true
will be replaced either with the actual index that is associated
with the buffer page, or with a bogus pointer not_garbage=-1 to
indicate that any lazily entries for a freed index need to be removed.

buf_page_get_low(), buf_page_get_gen(), mtr_t::page_lock(),
mtr_t::upgrade_buffer_fix(): Do not invoke
btr_search_drop_page_hash_index(). Our caller will have to do it
when appropriate.

buf_page_create_low(): Keep invoking btr_search_drop_page_hash_index().
This is the normal way of lazily dropping the adaptive hash index
after a DDL operation such as DROP INDEX operation.

btr_block_get(), btr_root_block_get(), btr_root_adjust_on_import(),
btr_read_autoinc_with_fallback(), btr_cur_instant_init_low(),
btr_cur_t::search_leaf(), btr_cur_t::pessimistic_search_leaf(),
btr_pcur_optimistic_latch_leaves(), dict_stats_analyze_index_below_cur():
Invoke btr_search_drop_page_hash_index(block, index) for pages that
may be leaf pages. No adaptive hash index may have been created on
anything else than a B-tree leaf page.

btr_cur_search_to_nth_level(): Do not invoke
btr_search_drop_page_hash_index(), because we are only accessing
non-leaf pages and the adaptive hash index may only have been created
on leaf pages.

btr_page_alloc_for_ibuf() and many other callers of buf_page_get_gen()
or similar functions do not invoke btr_search_drop_page_hash_index(),
because the adaptive hash index is never created on such pages.
If a page in the tablespace was freed as part of a DDL operation and
reused for something else, then buf_page_create_low() will take care
of dropping the adaptive hash index before the freed page will be
modified.

It is notable that while the flst_ functions may access pages that are
related to allocating B-tree index pages (the BTR_SEG_TOP and BTR_SEG_LEAF
linked from the index root page), those pages themselves can never be
stored in the adaptive hash index. Therefore, it is not necessary to
invoke btr_search_drop_page_hash_index() on them.

Reviewed by: Vladislav Lesin
This commit is contained in:
Marko Mäkelä 2025-01-10 16:40:34 +02:00
parent c942b31340
commit 5f7b2a3ced
12 changed files with 100 additions and 79 deletions

View file

@ -238,6 +238,7 @@ buf_block_t *btr_block_get(const dict_index_t &index, uint32_t page,
if (UNIV_LIKELY(block != nullptr))
{
btr_search_drop_page_hash_index(block, &index);
if (!!page_is_comp(block->page.frame) != index.table->not_redundant() ||
btr_page_get_index_id(block->page.frame) != index.id ||
!fil_page_index_page_check(block->page.frame) ||
@ -291,6 +292,7 @@ btr_root_block_get(
if (UNIV_LIKELY(block != nullptr))
{
btr_search_drop_page_hash_index(block, index);
if (!!page_is_comp(block->page.frame) !=
index->table->not_redundant() ||
btr_page_get_index_id(block->page.frame) != index->id ||
@ -397,6 +399,7 @@ btr_root_adjust_on_import(
goto func_exit;
}
btr_search_drop_page_hash_index(block, index);
page = buf_block_get_frame(block);
page_zip = buf_block_get_page_zip(block);
@ -865,7 +868,7 @@ static void btr_free_root(buf_block_t *block, const fil_space_t &space,
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->is_named_space(&space));
btr_search_drop_page_hash_index(block, false);
btr_search_drop_page_hash_index(block, nullptr);
if (btr_root_fseg_validate(PAGE_HEADER + PAGE_BTR_SEG_TOP, *block, space))
{
@ -893,15 +896,18 @@ buf_block_t *btr_free_root_check(const page_id_t page_id, ulint zip_size,
buf_block_t *block= buf_page_get_gen(page_id, zip_size, RW_X_LATCH,
nullptr, BUF_GET_POSSIBLY_FREED, mtr);
if (!block);
else if (fil_page_index_page_check(block->page.frame) &&
index_id == btr_page_get_index_id(block->page.frame))
/* This should be a root page. It should not be possible to
reassign the same index_id for some other index in the
tablespace. */
ut_ad(!page_has_siblings(block->page.frame));
else
block= nullptr;
if (block)
{
btr_search_drop_page_hash_index(block,reinterpret_cast<dict_index_t*>(-1));
if (fil_page_index_page_check(block->page.frame) &&
index_id == btr_page_get_index_id(block->page.frame))
/* This should be a root page. It should not be possible to
reassign the same index_id for some other index in the
tablespace. */
ut_ad(!page_has_siblings(block->page.frame));
else
block= nullptr;
}
return block;
}
@ -1098,7 +1104,7 @@ dberr_t dict_index_t::clear(que_thr_t *thr)
,any_ahi_pages()
#endif
);
btr_search_drop_page_hash_index(root_block, false);
btr_search_drop_page_hash_index(root_block, nullptr);
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!any_ahi_pages());
#endif
@ -1211,12 +1217,13 @@ uint64_t btr_read_autoinc_with_fallback(const dict_table_t *table,
uint64_t autoinc= 0;
mtr_t mtr;
mtr.start();
const dict_index_t *const first_index= dict_table_get_first_index(table);
if (buf_block_t *block=
buf_page_get(page_id_t(table->space_id,
dict_table_get_first_index(table)->page),
buf_page_get(page_id_t(table->space_id, first_index->page),
table->space->zip_size(), RW_SX_LATCH, &mtr))
{
btr_search_drop_page_hash_index(block, first_index);
autoinc= page_get_autoinc(block->page.frame);
if (autoinc > 0 && autoinc <= max && mysql_version >= 100210);
@ -1269,6 +1276,9 @@ btr_write_autoinc(dict_index_t* index, ib_uint64_t autoinc, bool reset)
if (buf_block_t *root= buf_page_get(page_id_t(space->id, index->page),
space->zip_size(), RW_SX_LATCH, &mtr))
{
#ifdef BTR_CUR_HASH_ADAPT
ut_d(if (dict_index_t *ri= root->index)) ut_ad(ri == index);
#endif /* BTR_CUR_HASH_ADAPT */
buf_page_make_young_if_needed(&root->page);
mtr.set_named_space(space);
page_set_autoinc(root, autoinc, &mtr, reset);
@ -1299,7 +1309,7 @@ static dberr_t btr_page_reorganize_low(page_cur_t *cursor, mtr_t *mtr)
if (UNIV_UNLIKELY(pos == ULINT_UNDEFINED))
return DB_CORRUPTION;
btr_search_drop_page_hash_index(block, false);
btr_search_drop_page_hash_index(block, nullptr);
buf_block_t *old= buf_block_alloc();
/* Copy the old page to temporary space */
@ -1613,7 +1623,7 @@ btr_page_empty(
|| page_zip_validate(page_zip, block->page.frame, index));
#endif /* UNIV_ZIP_DEBUG */
btr_search_drop_page_hash_index(block, false);
btr_search_drop_page_hash_index(block, nullptr);
/* Recreate the page: note that global data on page (possible
segment headers, next page-field, etc.) is preserved intact */
@ -3384,7 +3394,7 @@ parent_corrupted:
mem_heap_free(heap);
}
btr_search_drop_page_hash_index(block, false);
btr_search_drop_page_hash_index(block, nullptr);
/* Make the father empty */
btr_page_empty(father_block, father_page_zip, index, page_level, mtr);
@ -3702,7 +3712,7 @@ cannot_merge:
goto err_exit;
}
btr_search_drop_page_hash_index(block, false);
btr_search_drop_page_hash_index(block, nullptr);
/* Remove the page from the level list */
err = btr_level_list_remove(*block, *index, mtr);
@ -3805,7 +3815,7 @@ cannot_merge:
goto err_exit;
}
btr_search_drop_page_hash_index(block, false);
btr_search_drop_page_hash_index(block, nullptr);
if (merge_page_zip && left_page_no == FIL_NULL) {
@ -3967,7 +3977,7 @@ btr_discard_only_page_on_level(
ut_ad(fil_page_index_page_check(page));
ut_ad(block->page.id().space() == index->table->space->id);
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
btr_search_drop_page_hash_index(block, false);
btr_search_drop_page_hash_index(block, nullptr);
cursor.page_cur.index = index;
cursor.page_cur.block = block;
@ -4163,7 +4173,7 @@ btr_discard_page(
return DB_CORRUPTION;
}
btr_search_drop_page_hash_index(block, false);
btr_search_drop_page_hash_index(block, nullptr);
if (dict_index_is_spatial(index)) {
rtr_node_ptr_delete(&parent_cursor, mtr);

View file

@ -358,6 +358,8 @@ incompatible:
goto incompatible;
}
btr_search_drop_page_hash_index(block, index);
if (fil_page_get_type(block->page.frame) != FIL_PAGE_TYPE_BLOB
|| mach_read_from_4(&block->page.frame
[FIL_PAGE_DATA
@ -1232,6 +1234,8 @@ dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode,
goto func_exit;
}
btr_search_drop_page_hash_index(block, index());
if (!!page_is_comp(block->page.frame) != index()->table->not_redundant() ||
btr_page_get_index_id(block->page.frame) != index()->id ||
fil_page_get_type(block->page.frame) == FIL_PAGE_RTREE ||
@ -1344,9 +1348,7 @@ dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode,
mtr->rollback_to_savepoint(savepoint, savepoint + 1);
reached_index_root_and_leaf:
ut_ad(rw_latch == RW_X_LATCH);
#ifdef BTR_CUR_HASH_ADAPT
btr_search_drop_page_hash_index(block, true);
#endif
btr_search_drop_page_hash_index(block, index());
if (page_cur_search_with_match(tuple, mode, &up_match, &low_match,
&page_cur, nullptr))
goto corrupted;
@ -1631,6 +1633,7 @@ dberr_t btr_cur_t::pessimistic_search_leaf(const dtuple_t *tuple,
const page_cur_mode_t page_mode{btr_cur_nonleaf_mode(mode)};
mtr->page_lock(block, RW_X_LATCH);
btr_search_drop_page_hash_index(block, index());
up_match= 0;
up_bytes= 0;
@ -1700,6 +1703,8 @@ dberr_t btr_cur_t::pessimistic_search_leaf(const dtuple_t *tuple,
goto func_exit;
}
btr_search_drop_page_hash_index(block, index());
if (!!page_is_comp(block->page.frame) != index()->table->not_redundant() ||
btr_page_get_index_id(block->page.frame) != index()->id ||
fil_page_get_type(block->page.frame) == FIL_PAGE_RTREE ||
@ -1808,7 +1813,10 @@ search_loop:
goto func_exit;
}
else
{
btr_search_drop_page_hash_index(block, index);
btr_cur_nonleaf_make_young(&block->page);
}
#ifdef UNIV_ZIP_DEBUG
if (const page_zip_des_t *page_zip= buf_block_get_page_zip(block))
@ -3335,6 +3343,9 @@ static void btr_cur_trim_alter_metadata(dtuple_t* entry,
mtr.commit();
return;
}
btr_search_drop_page_hash_index(block, index);
ut_ad(fil_page_get_type(block->page.frame) == FIL_PAGE_TYPE_BLOB);
ut_ad(mach_read_from_4(&block->page.frame
[FIL_PAGE_DATA + BTR_BLOB_HDR_NEXT_PAGE_NO])

View file

@ -26,8 +26,8 @@ Created 2/23/1996 Heikki Tuuri
#include "btr0pcur.h"
#include "buf0rea.h"
#include "btr0sea.h"
#include "rem0cmp.h"
#include "trx0trx.h"
#include "ibuf0ibuf.h"
/**************************************************************//**
@ -259,11 +259,13 @@ static bool btr_pcur_optimistic_latch_leaves(btr_pcur_t *pcur,
memcmp_aligned<2>(block->page.frame + PAGE_HEADER + PAGE_INDEX_ID,
prev->page.frame + PAGE_HEADER + PAGE_INDEX_ID, 8))
goto fail;
btr_search_drop_page_hash_index(prev, pcur->index());
}
else
prev= nullptr;
mtr->upgrade_buffer_fix(savepoint, RW_S_LATCH);
btr_search_drop_page_hash_index(block, pcur->index());
if (UNIV_UNLIKELY(block->modify_clock != modify_clock) ||
UNIV_UNLIKELY(block->page.is_freed()) ||

View file

@ -633,7 +633,7 @@ func_exit:
else if (UNIV_UNLIKELY(block_index != index))
{
ut_ad(block_index->id == index->id);
btr_search_drop_page_hash_index(block, false);
btr_search_drop_page_hash_index(block, nullptr);
}
else if (cursor.flag == BTR_CUR_HASH_FAIL)
btr_search_update_hash_ref(cursor, block, left_bytes_fields);
@ -1220,15 +1220,15 @@ static constexpr size_t REC_FOLD_IN_STACK= 128;
/** Drop any adaptive hash index entries that point to an index page.
@param block latched block containing index page, or a buffer-unfixed
index page or a block in state BUF_BLOCK_REMOVE_HASH
@param garbage_collect drop ahi only if the index is marked as freed
@param not_garbage drop only if the index is set and NOT this
@param folds work area for REC_FOLD_IN_STACK rec_fold() values */
static void btr_search_drop_page_hash_index(buf_block_t *block,
bool garbage_collect,
const dict_index_t *not_garbage,
uint32_t *folds) noexcept
{
retry:
dict_index_t *index= block->index;
if (!index)
if (!index || index == not_garbage)
return;
ut_d(const auto state= block->page.state());
@ -1268,8 +1268,12 @@ retry:
goto retry;
}
}
else if (garbage_collect)
else if (not_garbage != nullptr)
{
ut_ad(!index || index == not_garbage ||
not_garbage == reinterpret_cast<dict_index_t*>(-1));
goto unlock_and_return;
}
assert_block_ahi_valid(block);
@ -1394,10 +1398,10 @@ cleanup:
}
void btr_search_drop_page_hash_index(buf_block_t *block,
bool garbage_collect) noexcept
const dict_index_t *not_garbage) noexcept
{
uint32_t folds[REC_FOLD_IN_STACK];
btr_search_drop_page_hash_index(block, garbage_collect, folds);
btr_search_drop_page_hash_index(block, not_garbage, folds);
}
void btr_search_drop_page_hash_when_freed(const page_id_t page_id) noexcept
@ -1411,14 +1415,11 @@ void btr_search_drop_page_hash_when_freed(const page_id_t page_id) noexcept
if (buf_block_t *block= buf_page_get_gen(page_id, 0, RW_X_LATCH, nullptr,
BUF_PEEK_IF_IN_POOL, &mtr))
{
if (IF_DBUG(dict_index_t *index=,) block->index)
{
/* In all our callers, the table handle should be open, or we
should be in the process of dropping the table (preventing
eviction). */
DBUG_ASSERT(index->table->get_ref_count() || dict_sys.locked());
btr_search_drop_page_hash_index(block, false);
}
/* In all our callers, the table handle should be open, or we
should be in the process of dropping the table (preventing eviction). */
ut_d(if (dict_index_t *i= block->index))
ut_ad(i->table->get_ref_count() || dict_sys.locked());
btr_search_drop_page_hash_index(block, nullptr);
}
mtr.commit();
@ -1465,7 +1466,7 @@ static void btr_search_build_page_hash_index(dict_index_t *index,
struct{uint32_t fold;uint32_t offset;} fr[REC_FOLD_IN_STACK / 2];
if (rebuild)
btr_search_drop_page_hash_index(block, false, &fr[0].fold);
btr_search_drop_page_hash_index(block, nullptr, &fr[0].fold);
const uint32_t n_bytes_fields{left_bytes_fields & ~buf_block_t::LEFT_SIDE};
@ -1613,7 +1614,7 @@ void btr_search_move_or_delete_hash_entries(buf_block_t *new_block,
{
ut_ad(!index || index == new_block_index);
drop_exit:
btr_search_drop_page_hash_index(block, false);
btr_search_drop_page_hash_index(block, nullptr);
return;
}
@ -1660,7 +1661,7 @@ void btr_search_update_hash_on_delete(btr_cur_t *cursor) noexcept
if (UNIV_UNLIKELY(index != cursor->index()))
{
btr_search_drop_page_hash_index(block, false);
btr_search_drop_page_hash_index(block, nullptr);
return;
}
@ -1715,7 +1716,7 @@ void btr_search_update_hash_on_insert(btr_cur_t *cursor, bool reorg) noexcept
{
ut_ad(index->id == cursor->index()->id);
drop:
btr_search_drop_page_hash_index(block, false);
btr_search_drop_page_hash_index(block, nullptr);
return;
}

View file

@ -2355,7 +2355,7 @@ void buf_page_free(fil_space_t *space, uint32_t page, mtr_t *mtr)
block->page.lock.x_lock();
#ifdef BTR_CUR_HASH_ADAPT
if (block->index)
btr_search_drop_page_hash_index(block, false);
btr_search_drop_page_hash_index(block, nullptr);
#endif /* BTR_CUR_HASH_ADAPT */
block->page.set_freed(block->page.state());
mtr->memo_push(block, MTR_MEMO_PAGE_X_MODIFY);
@ -2992,8 +2992,7 @@ wait_for_unzip:
if (block->page.lock.x_lock_upgraded()) {
ut_ad(block->page.id() == page_id);
block->unfix();
mtr->page_lock_upgrade(*block);
return block;
return mtr->page_lock_upgrade(*block);
}
}
@ -3006,11 +3005,6 @@ wait_for_unzip:
}
ut_ad(state < buf_page_t::READ_FIX || state > buf_page_t::WRITE_FIX);
#ifdef BTR_CUR_HASH_ADAPT
btr_search_drop_page_hash_index(block, true);
#endif /* BTR_CUR_HASH_ADAPT */
ut_ad(page_id_t(page_get_space_id(block->page.frame),
page_get_page_no(block->page.frame)) == page_id);
@ -3065,7 +3059,7 @@ buf_block_t *buf_page_optimistic_get(buf_block_t *block,
{
block->page.lock.u_x_upgrade();
block->page.unfix();
mtr->page_lock_upgrade(*block);
block= mtr->page_lock_upgrade(*block);
ut_ad(modify_clock == block->modify_clock);
}
else if (!block->page.lock.x_lock_try())
@ -3271,7 +3265,7 @@ retry:
#ifdef BTR_CUR_HASH_ADAPT
if (drop_hash_entry)
btr_search_drop_page_hash_index(reinterpret_cast<buf_block_t*>(bpage),
false);
nullptr);
#endif /* BTR_CUR_HASH_ADAPT */
return reinterpret_cast<buf_block_t*>(bpage);

View file

@ -955,7 +955,7 @@ func_exit:
order to avoid bogus Valgrind or MSAN warnings.*/
MEM_MAKE_DEFINED(block->page.frame, srv_page_size);
btr_search_drop_page_hash_index(block, false);
btr_search_drop_page_hash_index(block, nullptr);
MEM_UNDEFINED(block->page.frame, srv_page_size);
mysql_mutex_lock(&buf_pool.mutex);
}

View file

@ -33,6 +33,7 @@ Created Jan 06, 2010 Vasil Dimov
#include <mysql_com.h>
#include "log.h"
#include "btr0btr.h"
#include "btr0sea.h"
#include "que0que.h"
#include "scope.h"
#include "debug_sync.h"
@ -1900,6 +1901,7 @@ dict_stats_analyze_index_below_cur(
goto func_exit;
}
btr_search_drop_page_hash_index(block, index);
page = block->page.frame;
if (page_is_leaf(page)) {

View file

@ -32,6 +32,7 @@ Modified Jan Lindström jan.lindstrom@mariadb.com
#else
#include "buf0flu.h"
#include "buf0dblwr.h"
#include "btr0sea.h"
#include "srv0srv.h"
#include "srv0start.h"
#include "mtr0mtr.h"
@ -1674,6 +1675,8 @@ fil_crypt_get_page_throttle(
BUF_PEEK_IF_IN_POOL, mtr);
if (block != NULL) {
/* page was in buffer pool */
btr_search_drop_page_hash_index(
block, reinterpret_cast<dict_index_t*>(-1));
state->crypt_stat.pages_read_from_cache++;
return block;
}
@ -1757,6 +1760,8 @@ fil_crypt_rotate_page(
if (buf_block_t* block = fil_crypt_get_page_throttle(state,
offset, &mtr,
&sleeptime_ms)) {
btr_search_drop_page_hash_index(
block, reinterpret_cast<dict_index_t*>(-1));
bool modified = false;
byte* frame = buf_block_get_frame(block);
const lsn_t block_lsn = mach_read_from_8(FIL_PAGE_LSN + frame);

View file

@ -69,9 +69,9 @@ void btr_search_move_or_delete_hash_entries(buf_block_t *new_block,
/** Drop any adaptive hash index entries that point to an index page.
@param block latched block containing index page, or a buffer-unfixed
index page or a block in state BUF_BLOCK_REMOVE_HASH
@param garbage_collect drop ahi only if the index is marked as freed */
void btr_search_drop_page_hash_index(buf_block_t* block,
bool garbage_collect) noexcept;
@param not_garbage drop only if the index is set and NOT this */
void btr_search_drop_page_hash_index(buf_block_t *block,
const dict_index_t *not_garbage) noexcept;
/** Drop possible adaptive hash index entries when a page is evicted
from the buffer pool or freed in a file, or the index is being dropped.
@ -202,7 +202,7 @@ extern ulint btr_search_n_hash_fail;
#else /* BTR_CUR_HASH_ADAPT */
# define btr_search_sys_create()
# define btr_search_sys_free()
# define btr_search_drop_page_hash_index(block, garbage_collect)
# define btr_search_drop_page_hash_index(block, not_garbage)
# define btr_search_move_or_delete_hash_entries(new_block, block)
# define btr_search_update_hash_on_insert(cursor, ahi_latch)
# define btr_search_update_hash_on_delete(cursor)

View file

@ -314,13 +314,14 @@ public:
/** Latch a buffer pool block.
@param block block to be latched
@param rw_latch RW_S_LATCH, RW_SX_LATCH, RW_X_LATCH, RW_NO_LATCH */
void page_lock(buf_block_t *block, ulint rw_latch);
@param rw_latch RW_S_LATCH, RW_SX_LATCH, RW_X_LATCH, RW_NO_LATCH
@return block */
buf_block_t *page_lock(buf_block_t *block, ulint rw_latch) noexcept;
/** Acquire a latch on a buffer-fixed buffer pool block.
@param savepoint savepoint location of the buffer-fixed block
@param rw_latch latch to acquire */
void upgrade_buffer_fix(ulint savepoint, rw_lock_type_t rw_latch);
void upgrade_buffer_fix(ulint savepoint, rw_lock_type_t rw_latch) noexcept;
/** Register a change to the page latch state. */
void lock_register(ulint savepoint, mtr_memo_type_t type)
@ -331,8 +332,10 @@ public:
slot.type= type;
}
/** Upgrade U locks on a block to X */
void page_lock_upgrade(const buf_block_t &block);
/** Upgrade U locks on a block to X
@param block block on which to upgrade
@return &block */
buf_block_t *page_lock_upgrade(const buf_block_t &block) noexcept;
/** Upgrade index U lock to X */
ATTRIBUTE_COLD void index_lock_upgrade();

View file

@ -1441,7 +1441,7 @@ bool mtr_t::memo_contains(const fil_space_t& space) const
return false;
}
void mtr_t::page_lock_upgrade(const buf_block_t &block)
buf_block_t *mtr_t::page_lock_upgrade(const buf_block_t &block) noexcept
{
ut_ad(block.page.lock.have_x());
@ -1454,12 +1454,10 @@ void mtr_t::page_lock_upgrade(const buf_block_t &block)
ut_d(if (dict_index_t *index= block.index))
ut_ad(!index->freed());
#endif /* BTR_CUR_HASH_ADAPT */
return const_cast<buf_block_t*>(&block);
}
/** Latch a buffer pool block.
@param block block to be latched
@param rw_latch RW_S_LATCH, RW_SX_LATCH, RW_X_LATCH, RW_NO_LATCH */
void mtr_t::page_lock(buf_block_t *block, ulint rw_latch)
buf_block_t *mtr_t::page_lock(buf_block_t *block, ulint rw_latch) noexcept
{
mtr_memo_type_t fix_type;
ut_d(const auto state= block->page.state());
@ -1485,23 +1483,21 @@ void mtr_t::page_lock(buf_block_t *block, ulint rw_latch)
{
block->unfix();
page_lock_upgrade(*block);
return;
return block;
}
ut_ad(!block->page.is_io_fixed());
}
#ifdef BTR_CUR_HASH_ADAPT
btr_search_drop_page_hash_index(block, true);
#endif
done:
ut_ad(state < buf_page_t::UNFIXED ||
page_id_t(page_get_space_id(block->page.frame),
page_get_page_no(block->page.frame)) == block->page.id());
memo_push(block, fix_type);
return block;
}
void mtr_t::upgrade_buffer_fix(ulint savepoint, rw_lock_type_t rw_latch)
noexcept
{
ut_ad(is_active());
mtr_memo_slot_t &slot= m_memo[savepoint];
@ -1531,9 +1527,6 @@ void mtr_t::upgrade_buffer_fix(ulint savepoint, rw_lock_type_t rw_latch)
ut_ad(!block->page.is_io_fixed());
}
#ifdef BTR_CUR_HASH_ADAPT
btr_search_drop_page_hash_index(block, true);
#endif
ut_ad(page_id_t(page_get_space_id(block->page.frame),
page_get_page_no(block->page.frame)) == block->page.id());
}
@ -1763,7 +1756,7 @@ void mtr_t::free(const fil_space_t &space, uint32_t offset)
}
}
else if (slot.type & (MTR_MEMO_PAGE_X_FIX | MTR_MEMO_PAGE_SX_FIX) &&
block->page.id() == id)
block->page.id() == id)
{
ut_ad(!block->page.is_freed());
ut_ad(!freed);
@ -1786,7 +1779,7 @@ void mtr_t::free(const fil_space_t &space, uint32_t offset)
}
#ifdef BTR_CUR_HASH_ADAPT
if (block->index)
btr_search_drop_page_hash_index(block, false);
btr_search_drop_page_hash_index(block, nullptr);
#endif /* BTR_CUR_HASH_ADAPT */
block->page.set_freed(block->page.state());
}

View file

@ -4396,7 +4396,7 @@ page_zip_reorganize(
mtr_log_t log_mode = mtr_set_log_mode(mtr, MTR_LOG_NONE);
temp_block = buf_block_alloc();
btr_search_drop_page_hash_index(block, false);
btr_search_drop_page_hash_index(block, nullptr);
temp_page = temp_block->page.frame;
/* Copy the old page to temporary space */