MDEV-35049: btr_search_check_free_space_in_heap() is a bottleneck

Let us use implement a simple fixed-size allocator for the adaptive hash
index, insted of complicating mem_heap_t or mem_block_info_t.

MEM_HEAP_BTR_SEARCH: Remove.

mem_block_info_t::free_block(), mem_heap_free_block_free(): Remove.

mem_heap_free_top(), mem_heap_get_top(): Remove.

btr_sea::partition::spare: Replaces mem_block_info_t::free_block.
This keeps one spare block per adaptive hash index partition, to
process an insert.

We must not wait for buf_pool.mutex while holding
any btr_sea::partition::latch. That is why we cache one block for
future allocations. This is protected by a new
btr_sea::partition::blocks_mutex in order to relieve pressure on
btr_sea::partition::latch.

btr_sea::partition::prepare_insert(): Replaces
btr_search_check_free_space_in_heap().

btr_sea::partition::erase(): Replaces ha_search_and_delete_if_found().

btr_sea::partition::cleanup_after_erase(): Replaces the most part of
ha_delete_hash_node(). Unlike the previous implementation, we will
retain a spare block for prepare_insert().
This should reduce some contention on buf_pool.mutex.

btr_search.n_parts: Replaces btr_ahi_parts.

btr_search.enabled: Replaces btr_search_enabled. This must hold
whenever buf_block_t::index is set while a thread is holding a
btr_sea::partition::latch.

dict_index_t::search_info: Remove pointer indirection, and use
Atomic_relaxed or Atomic_counter for most fields.

btr_search_guess_on_hash(): Let the caller ensure that latch_mode is
BTR_MODIFY_LEAF or BTR_SEARCH_LEAF. Release btr_sea::partition::latch
before buffer-fixing the block. The page latch that we already acquired
is preventing buffer pool eviction. We must validate both
block->index and block->page.state while holding part.latch
in order to avoid race conditions with buffer page relocation
or buf_pool_t::resize().

btr_search_check_guess(): Remove the constant parameter
can_only_compare_to_cursor_rec=false.

ahi_node: Replaces ha_node_t.

This has been tested by running the regression test suite
with the adaptive hash index enabled:
./mtr --mysqld=--loose-innodb-adaptive-hash-index=ON

Reviewed by: Vladislav Lesin
This commit is contained in:
Marko Mäkelä 2025-01-10 16:12:34 +02:00
parent 221aa5e08f
commit a338b3581f
58 changed files with 2959 additions and 4156 deletions

View file

@ -1843,8 +1843,8 @@ struct my_option xb_server_options[] =
#ifdef BTR_CUR_HASH_ADAPT
{"innodb_adaptive_hash_index", OPT_INNODB_ADAPTIVE_HASH_INDEX,
"Enable InnoDB adaptive hash index (disabled by default).",
&btr_search_enabled,
&btr_search_enabled,
&btr_search.enabled,
&btr_search.enabled,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
#endif /* BTR_CUR_HASH_ADAPT */
{"innodb_autoextend_increment", OPT_INNODB_AUTOEXTEND_INCREMENT,
@ -2409,7 +2409,7 @@ static bool innodb_init_param()
srv_page_size = 0;
srv_page_size_shift = 0;
#ifdef BTR_CUR_HASH_ADAPT
btr_ahi_parts = 1;
btr_search.n_parts = 1;
#endif /* BTR_CUR_HASH_ADAPT */
if (innobase_page_size != (1LL << 14)) {

View file

@ -196,7 +196,6 @@ SET(INNOBASE_SOURCES
include/btr0pcur.h
include/btr0pcur.inl
include/btr0sea.h
include/btr0sea.inl
include/btr0types.h
include/buf0buddy.h
include/buf0buf.h
@ -263,8 +262,6 @@ SET(INNOBASE_SOURCES
include/gis0rtree.inl
include/gis0type.h
include/ha_prototypes.h
include/ha0ha.h
include/ha0ha.inl
include/ha0storage.h
include/ha0storage.inl
include/handler0alter.h
@ -370,7 +367,6 @@ SET(INNOBASE_SOURCES
include/ut0sort.h
include/ut0stage.h
include/ut0ut.h
include/ut0ut.inl
include/ut0vec.h
include/ut0vec.inl
include/ut0wqueue.h

View file

@ -284,7 +284,7 @@ btr_root_block_get(
#ifndef BTR_CUR_ADAPT
static constexpr buf_block_t *guess= nullptr;
#else
buf_block_t *&guess= btr_search_get_info(index)->root_guess;
buf_block_t *&guess= index->search_info.root_guess;
guess=
#endif
block=
@ -597,8 +597,8 @@ buf_block_t *btr_root_block_sx(dict_index_t *index, mtr_t *mtr, dberr_t *err)
return root;
}
#ifdef BTR_CUR_HASH_ADAPT
else
ut_ad(!root->index || !root->index->freed());
ut_d(else if (dict_index_t *index= root->index))
ut_ad(!index->freed());
#endif
return root;
}
@ -863,7 +863,7 @@ static rec_offs *btr_page_get_parent(rec_offs *offsets, mem_heap_t *heap,
{
ut_ad(block->page.lock.have_u_or_x() ||
(!block->page.lock.have_s() && index->lock.have_x()));
ulint up_match= 0, low_match= 0;
uint16_t up_match= 0, low_match= 0;
cursor->page_cur.block= block;
if (page_cur_search_with_match(tuple, PAGE_CUR_LE, &up_match,
&low_match, &cursor->page_cur,
@ -1221,7 +1221,7 @@ dberr_t dict_index_t::clear(que_thr_t *thr)
#ifndef BTR_CUR_ADAPT
static constexpr buf_block_t *guess= nullptr;
#else
buf_block_t *&guess= btr_search_get_info(this)->root_guess;
buf_block_t *&guess= search_info.root_guess;
guess=
#endif
root_block= buf_page_get_gen({table->space_id, page},
@ -1231,14 +1231,12 @@ dberr_t dict_index_t::clear(que_thr_t *thr)
{
btr_free_but_not_root(root_block, mtr.get_log_mode()
#ifdef BTR_CUR_HASH_ADAPT
,n_ahi_pages() != 0
,any_ahi_pages()
#endif
);
btr_search_drop_page_hash_index(root_block, false);
#ifdef BTR_CUR_HASH_ADAPT
if (root_block->index)
btr_search_drop_page_hash_index(root_block, false);
ut_ad(n_ahi_pages() == 0);
ut_ad(!any_ahi_pages());
#endif
mtr.memset(root_block, PAGE_HEADER + PAGE_BTR_SEG_LEAF,
FSEG_HEADER_SIZE, 0);
@ -1283,7 +1281,7 @@ void btr_drop_temporary_table(const dict_table_t &table)
#ifndef BTR_CUR_ADAPT
static constexpr buf_block_t *guess= nullptr;
#else
buf_block_t *guess= index->search_info->root_guess;
buf_block_t *guess= index->search_info.root_guess;
#endif
if (buf_block_t *block= buf_page_get_low({SRV_TMP_SPACE_ID, index->page},
0, RW_X_LATCH, guess, BUF_GET,
@ -2151,7 +2149,7 @@ btr_root_raise_and_insert(
ut_ad(dtuple_check_typed(tuple));
/* Reposition the cursor to the child node */
ulint low_match = 0, up_match = 0;
uint16_t low_match = 0, up_match = 0;
if (page_cur_search_with_match(tuple, PAGE_CUR_LE,
&up_match, &low_match,
@ -2837,7 +2835,7 @@ btr_insert_into_right_sibling(
return nullptr;
}
ulint up_match = 0, low_match = 0;
uint16_t up_match = 0, low_match = 0;
if (page_cur_search_with_match(tuple,
PAGE_CUR_LE, &up_match, &low_match,
@ -3369,7 +3367,7 @@ insert_empty:
page_cursor = btr_cur_get_page_cur(cursor);
page_cursor->block = insert_block;
ulint up_match = 0, low_match = 0;
uint16_t up_match = 0, low_match = 0;
if (page_cur_search_with_match(tuple,
PAGE_CUR_LE, &up_match, &low_match,

View file

@ -1114,8 +1114,7 @@ dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode,
MEM_UNDEFINED(&up_bytes, sizeof up_bytes);
MEM_UNDEFINED(&low_match, sizeof low_match);
MEM_UNDEFINED(&low_bytes, sizeof low_bytes);
ut_d(up_match= ULINT_UNDEFINED);
ut_d(low_match= ULINT_UNDEFINED);
ut_d(up_match= low_match= uint16_t(~0u));
ut_ad(!(latch_mode & BTR_ALREADY_S_LATCHED) ||
mtr->memo_contains_flagged(&index()->lock,
@ -1164,24 +1163,25 @@ dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode,
#ifndef BTR_CUR_ADAPT
guess= nullptr;
#else
btr_search_t *info= btr_search_get_info(index());
auto info= &index()->search_info;
guess= info->root_guess;
# ifdef BTR_CUR_HASH_ADAPT
# ifdef UNIV_SEARCH_PERF_STAT
info->n_searches++;
# endif
bool ahi_enabled= btr_search_enabled && !index()->is_ibuf();
/* We do a dirty read of btr_search_enabled below,
and btr_search_guess_on_hash() will have to check it again. */
if (!ahi_enabled);
else if (btr_search_guess_on_hash(index(), info, tuple, mode,
if (latch_mode > BTR_MODIFY_LEAF)
/* The adaptive hash index cannot be useful for these searches. */;
/* We do a dirty read of btr_search.enabled below,
and btr_search_guess_on_hash() will have to check it again. */
else if (!btr_search.enabled);
else if (btr_search_guess_on_hash(index(), tuple, mode,
latch_mode, this, mtr))
{
/* Search using the hash index succeeded */
ut_ad(up_match != ULINT_UNDEFINED || mode != PAGE_CUR_GE);
ut_ad(up_match != ULINT_UNDEFINED || mode != PAGE_CUR_LE);
ut_ad(low_match != ULINT_UNDEFINED || mode != PAGE_CUR_LE);
ut_ad(up_match != uint16_t(~0U) || mode != PAGE_CUR_GE);
ut_ad(up_match != uint16_t(~0U) || mode != PAGE_CUR_LE);
ut_ad(low_match != uint16_t(~0U) || mode != PAGE_CUR_LE);
++btr_cur_n_sea;
return DB_SUCCESS;
@ -1456,9 +1456,9 @@ dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode,
if (page_cur_search_with_match(tuple, mode, &up_match, &low_match,
&page_cur, nullptr))
goto corrupted;
ut_ad(up_match != ULINT_UNDEFINED || mode != PAGE_CUR_GE);
ut_ad(up_match != ULINT_UNDEFINED || mode != PAGE_CUR_LE);
ut_ad(low_match != ULINT_UNDEFINED || mode != PAGE_CUR_LE);
ut_ad(up_match != uint16_t(~0U) || mode != PAGE_CUR_GE);
ut_ad(up_match != uint16_t(~0U) || mode != PAGE_CUR_LE);
ut_ad(low_match != uint16_t(~0U) || mode != PAGE_CUR_LE);
goto func_exit;
}
@ -1508,22 +1508,22 @@ release_tree:
reached_latched_leaf:
#ifdef BTR_CUR_HASH_ADAPT
if (ahi_enabled && !(tuple->info_bits & REC_INFO_MIN_REC_FLAG))
if (!(tuple->info_bits & REC_INFO_MIN_REC_FLAG) && !index()->is_ibuf() &&
btr_search.enabled)
{
if (page_cur_search_with_match_bytes(tuple, mode,
&up_match, &up_bytes,
&low_match, &low_bytes, &page_cur))
if (page_cur_search_with_match_bytes(*tuple, mode, &up_match, &low_match,
&page_cur, &up_bytes, &low_bytes))
goto corrupted;
}
else
#endif /* BTR_CUR_HASH_ADAPT */
if (page_cur_search_with_match(tuple, mode, &up_match, &low_match,
&page_cur, nullptr))
goto corrupted;
if (page_cur_search_with_match(tuple, mode, &up_match, &low_match,
&page_cur, nullptr))
goto corrupted;
ut_ad(up_match != ULINT_UNDEFINED || mode != PAGE_CUR_GE);
ut_ad(up_match != ULINT_UNDEFINED || mode != PAGE_CUR_LE);
ut_ad(low_match != ULINT_UNDEFINED || mode != PAGE_CUR_LE);
ut_ad(up_match != uint16_t(~0U) || mode != PAGE_CUR_GE);
ut_ad(up_match != uint16_t(~0U) || mode != PAGE_CUR_LE);
ut_ad(low_match != uint16_t(~0U) || mode != PAGE_CUR_LE);
if (latch_mode == BTR_MODIFY_TREE &&
btr_cur_need_opposite_intention(block->page, index()->is_clust(),
@ -1533,18 +1533,18 @@ release_tree:
goto need_opposite_intention;
#ifdef BTR_CUR_HASH_ADAPT
/* We do a dirty read of btr_search_enabled here. We will
properly check btr_search_enabled again in
/* We do a dirty read of btr_search.enabled here. We will recheck in
btr_search_build_page_hash_index() before building a page hash
index, while holding search latch. */
if (!btr_search_enabled);
if (!btr_search.enabled);
else if (tuple->info_bits & REC_INFO_MIN_REC_FLAG)
/* This may be a search tuple for btr_pcur_t::restore_position(). */
ut_ad(tuple->is_metadata() ||
(tuple->is_metadata(tuple->info_bits ^ REC_STATUS_INSTANT)));
else if (index()->table->is_temporary());
else if (!rec_is_metadata(page_cur.rec, *index()))
btr_search_info_update(index(), this);
else if (!rec_is_metadata(page_cur.rec, *index()) &&
index()->search_info.hash_analysis_useful())
search_info_update();
#endif /* BTR_CUR_HASH_ADAPT */
goto func_exit;
@ -1775,23 +1775,23 @@ dberr_t btr_cur_t::pessimistic_search_leaf(const dtuple_t *tuple,
err= DB_CORRUPTION;
else
{
ut_ad(up_match != ULINT_UNDEFINED || mode != PAGE_CUR_GE);
ut_ad(up_match != ULINT_UNDEFINED || mode != PAGE_CUR_LE);
ut_ad(low_match != ULINT_UNDEFINED || mode != PAGE_CUR_LE);
ut_ad(up_match != uint16_t(~0U) || mode != PAGE_CUR_GE);
ut_ad(up_match != uint16_t(~0U) || mode != PAGE_CUR_LE);
ut_ad(low_match != uint16_t(~0U) || mode != PAGE_CUR_LE);
#ifdef BTR_CUR_HASH_ADAPT
/* We do a dirty read of btr_search_enabled here. We will
properly check btr_search_enabled again in
/* We do a dirty read of btr_search.enabled here. We will recheck in
btr_search_build_page_hash_index() before building a page hash
index, while holding search latch. */
if (!btr_search_enabled);
if (!btr_search.enabled);
else if (tuple->info_bits & REC_INFO_MIN_REC_FLAG)
/* This may be a search tuple for btr_pcur_t::restore_position(). */
ut_ad(tuple->is_metadata() ||
(tuple->is_metadata(tuple->info_bits ^ REC_STATUS_INSTANT)));
else if (index()->table->is_temporary());
else if (!rec_is_metadata(page_cur.rec, *index()))
btr_search_info_update(index(), this);
else if (!rec_is_metadata(page_cur.rec, *index()) &&
index()->search_info.hash_analysis_useful())
search_info_update();
#endif /* BTR_CUR_HASH_ADAPT */
err= DB_SUCCESS;
}
@ -1894,8 +1894,7 @@ dberr_t btr_cur_search_to_nth_level(ulint level,
#ifndef BTR_CUR_ADAPT
buf_block_t *block= nullptr;
#else
btr_search_t *info= btr_search_get_info(index);
buf_block_t *block= info->root_guess;
buf_block_t *block= index->search_info.root_guess;
#endif /* BTR_CUR_ADAPT */
ut_ad(mtr->memo_contains_flagged(&index->lock,
@ -2650,15 +2649,8 @@ fail_err:
ut_ad(entry->is_metadata());
ut_ad(index->is_instant());
ut_ad(flags == BTR_NO_LOCKING_FLAG);
} else if (index->table->is_temporary()) {
} else {
srw_spin_lock* ahi_latch = btr_search_sys.get_latch(*index);
if (!reorg && cursor->flag == BTR_CUR_HASH) {
btr_search_update_hash_node_on_insert(
cursor, ahi_latch);
} else {
btr_search_update_hash_on_insert(cursor, ahi_latch);
}
} else if (!index->table->is_temporary()) {
btr_search_update_hash_on_insert(cursor, reorg);
}
#endif /* BTR_CUR_HASH_ADAPT */
@ -2848,10 +2840,8 @@ btr_cur_pessimistic_insert(
ut_ad(index->is_instant());
ut_ad(flags & BTR_NO_LOCKING_FLAG);
ut_ad(!(flags & BTR_CREATE_FLAG));
} else if (index->table->is_temporary()) {
} else {
btr_search_update_hash_on_insert(
cursor, btr_search_sys.get_latch(*index));
} else if (!index->table->is_temporary()) {
btr_search_update_hash_on_insert(cursor, false);
}
#endif /* BTR_CUR_HASH_ADAPT */
if (inherit && !(flags & BTR_NO_LOCKING_FLAG)) {
@ -3435,9 +3425,9 @@ btr_cur_update_in_place(
#ifdef BTR_CUR_HASH_ADAPT
{
srw_spin_lock* ahi_latch = block->index
? btr_search_sys.get_latch(*index) : NULL;
if (ahi_latch) {
auto part = block->index
? &btr_search.get_part(*index) : nullptr;
if (part) {
/* TO DO: Can we skip this if none of the fields
index->search_info->curr_n_fields
are being updated? */
@ -3455,7 +3445,7 @@ btr_cur_update_in_place(
btr_search_update_hash_on_delete(cursor);
}
ahi_latch->wr_lock(SRW_LOCK_CALL);
part->latch.wr_lock(SRW_LOCK_CALL);
}
assert_block_ahi_valid(block);
@ -3465,8 +3455,8 @@ btr_cur_update_in_place(
mtr);
#ifdef BTR_CUR_HASH_ADAPT
if (ahi_latch) {
ahi_latch->wr_unlock();
if (part) {
part->latch.wr_unlock();
}
}
#endif /* BTR_CUR_HASH_ADAPT */
@ -3526,7 +3516,7 @@ static void btr_cur_trim_alter_metadata(dtuple_t* entry,
if (n_fields != index->n_uniq) {
ut_ad(n_fields
>= index->n_core_fields);
entry->n_fields = n_fields;
entry->n_fields = uint16_t(n_fields);
return;
}
@ -3562,7 +3552,7 @@ static void btr_cur_trim_alter_metadata(dtuple_t* entry,
ut_ad(n_fields >= index->n_core_fields);
mtr.commit();
entry->n_fields = n_fields + 1;
entry->n_fields = uint16_t(n_fields + 1);
}
/** Trim an update tuple due to instant ADD COLUMN, if needed.
@ -3618,7 +3608,7 @@ btr_cur_trim(
ulint n_fields = upd_get_nth_field(update, 0)
->field_no;
ut_ad(n_fields + 1 >= entry->n_fields);
entry->n_fields = n_fields;
entry->n_fields = uint16_t(n_fields);
}
} else {
entry->trim(*index);
@ -5130,10 +5120,10 @@ class btr_est_cur_t
/** Matched fields and bytes which are used for on-page search, see
btr_cur_t::(up|low)_(match|bytes) comments for details */
ulint m_up_match= 0;
ulint m_up_bytes= 0;
ulint m_low_match= 0;
ulint m_low_bytes= 0;
uint16_t m_up_match= 0;
uint16_t m_up_bytes= 0;
uint16_t m_low_match= 0;
uint16_t m_low_bytes= 0;
public:
btr_est_cur_t(dict_index_t *index, const dtuple_t &tuple,
@ -5157,12 +5147,7 @@ public:
m_page_mode= PAGE_CUR_LE;
break;
default:
#ifdef PAGE_CUR_LE_OR_EXTENDS
ut_ad(mode == PAGE_CUR_L || mode == PAGE_CUR_LE ||
mode == PAGE_CUR_LE_OR_EXTENDS);
#else /* PAGE_CUR_LE_OR_EXTENDS */
ut_ad(mode == PAGE_CUR_L || mode == PAGE_CUR_LE);
#endif /* PAGE_CUR_LE_OR_EXTENDS */
m_page_mode= mode;
break;
}

View file

@ -450,7 +450,7 @@ btr_pcur_t::restore_position(btr_latch_mode restore_latch_mode, mtr_t *mtr)
rec_offs_init(offsets);
restore_status ret_val= restore_status::NOT_SAME;
if (rel_pos == BTR_PCUR_ON && btr_pcur_is_on_user_rec(this)) {
ulint n_matched_fields= 0;
uint16_t n_matched_fields= 0;
if (!cmp_dtuple_rec_with_match(
tuple, btr_pcur_get_rec(this), index,
rec_get_offsets(btr_pcur_get_rec(this), index, offsets,

File diff suppressed because it is too large Load diff

View file

@ -1040,6 +1040,26 @@ void buf_page_print(const byte *read_buf, ulint zip_size) noexcept
#endif
}
#ifdef BTR_CUR_HASH_ADAPT
/** Ensure that some adaptive hash index fields are initialized */
static void buf_block_init_low(buf_block_t *block) noexcept
{
/* No adaptive hash index entries may point to a previously unused
(and now freshly allocated) block. */
MEM_MAKE_DEFINED(&block->index, sizeof block->index);
MEM_MAKE_DEFINED(&block->n_pointers, sizeof block->n_pointers);
MEM_MAKE_DEFINED(&block->n_hash_helps, sizeof block->n_hash_helps);
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(!block->index);
ut_a(!block->n_pointers);
ut_a(!block->n_hash_helps);
# endif
}
#else /* BTR_CUR_HASH_ADAPT */
inline void buf_block_init_low(buf_block_t*) {}
#endif /* BTR_CUR_HASH_ADAPT */
/** Initialize a buffer page descriptor.
@param[in,out] block buffer page descriptor
@param[in] frame buffer page frame */
@ -1049,8 +1069,7 @@ buf_block_init(buf_block_t* block, byte* frame)
{
/* This function should only be executed at database startup or by
buf_pool.resize(). Either way, adaptive hash index must not exist. */
assert_block_ahi_empty_on_init(block);
buf_block_init_low(block);
block->page.frame = frame;
MEM_MAKE_DEFINED(&block->modify_clock, sizeof block->modify_clock);
@ -1058,10 +1077,6 @@ buf_block_init(buf_block_t* block, byte* frame)
MEM_MAKE_DEFINED(&block->page.lock, sizeof block->page.lock);
block->page.lock.init();
block->page.init(buf_page_t::NOT_USED, page_id_t(~0ULL));
#ifdef BTR_CUR_HASH_ADAPT
MEM_MAKE_DEFINED(&block->index, sizeof block->index);
ut_ad(!block->index);
#endif /* BTR_CUR_HASH_ADAPT */
ut_d(block->in_unzip_LRU_list = false);
ut_d(block->in_withdraw_list = false);
@ -1491,19 +1506,7 @@ inline bool buf_pool_t::realloc(buf_block_t *block) noexcept
mysql_mutex_unlock(&buf_pool.flush_list_mutex);
block->page.set_corrupt_id();
/* set other flags of buf_block_t */
#ifdef BTR_CUR_HASH_ADAPT
/* This code should only be executed by resize(),
while the adaptive hash index is disabled. */
assert_block_ahi_empty(block);
assert_block_ahi_empty_on_init(new_block);
ut_ad(!block->index);
new_block->index = NULL;
new_block->n_hash_helps = 0;
new_block->n_fields = 1;
new_block->left_side = TRUE;
#endif /* BTR_CUR_HASH_ADAPT */
buf_block_init_low(new_block);
ut_d(block->page.set_state(buf_page_t::MEMORY));
/* free block */
new_block = block;
@ -1794,11 +1797,9 @@ inline void buf_pool_t::resize()
/* disable AHI if needed */
buf_resize_status("Disabling adaptive hash index.");
btr_search_s_lock_all();
const bool btr_search_disabled = btr_search_enabled;
btr_search_s_unlock_all();
const bool btr_search_disabled = btr_search.enabled;
btr_search_disable();
btr_search.disable();
if (btr_search_disabled) {
ib::info() << "disabled adaptive hash index.";
@ -2100,7 +2101,7 @@ calc_buf_pool_size:
#ifdef BTR_CUR_HASH_ADAPT
/* enable AHI if needed */
if (btr_search_disabled) {
btr_search_enable(true);
btr_search.enable(true);
ib::info() << "Re-enabled adaptive hash index.";
}
#endif /* BTR_CUR_HASH_ADAPT */
@ -2587,27 +2588,6 @@ buf_page_t *buf_page_get_zip(const page_id_t page_id) noexcept
return bpage;
}
/********************************************************************//**
Initialize some fields of a control block. */
UNIV_INLINE
void
buf_block_init_low(
/*===============*/
buf_block_t* block) /*!< in: block to init */
{
#ifdef BTR_CUR_HASH_ADAPT
/* No adaptive hash index entries may point to a previously
unused (and now freshly allocated) block. */
assert_block_ahi_empty_on_init(block);
block->index = NULL;
block->n_hash_helps = 0;
block->n_fields = 1;
block->n_bytes = 0;
block->left_side = TRUE;
#endif /* BTR_CUR_HASH_ADAPT */
}
bool buf_zip_decompress(buf_block_t *block, bool check) noexcept
{
const byte* frame = block->page.zip.data;

View file

@ -273,6 +273,10 @@ buf_block_t* buf_LRU_get_free_only()
while (block != NULL) {
ut_ad(block->page.in_free_list);
ut_d(block->page.in_free_list = FALSE);
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!block->n_pointers);
ut_ad(!block->index);
#endif
ut_ad(!block->page.oldest_modification());
ut_ad(!block->page.in_LRU_list);
ut_a(!block->page.in_file());
@ -282,10 +286,6 @@ buf_block_t* buf_LRU_get_free_only()
|| UT_LIST_GET_LEN(buf_pool.withdraw)
>= buf_pool.withdraw_target
|| !buf_pool.will_be_withdrawn(block->page)) {
/* No adaptive hash index entries may point to
a free block. */
assert_block_ahi_empty(block);
block->page.set_state(buf_page_t::MEMORY);
block->page.set_os_used();
break;
@ -975,7 +975,10 @@ buf_LRU_block_free_non_file_page(
void* data;
ut_ad(block->page.state() == buf_page_t::MEMORY);
#ifdef BTR_CUR_HASH_ADAPT
assert_block_ahi_empty(block);
block->n_hash_helps = 0;
#endif
ut_ad(!block->page.in_free_list);
ut_ad(!block->page.oldest_modification());
ut_ad(!block->page.in_LRU_list);

View file

@ -76,20 +76,7 @@ void dtuple_t::trim(const dict_index_t& index)
}
}
n_fields = i;
}
/*********************************************************************//**
Sets number of fields used in a tuple. Normally this is set in
dtuple_create, but if you want later to set it smaller, you can use this. */
void
dtuple_set_n_fields(
/*================*/
dtuple_t* tuple, /*!< in: tuple */
ulint n_fields) /*!< in: number of fields */
{
tuple->n_fields = n_fields;
tuple->n_fields_cmp = n_fields;
n_fields = uint16_t(i);
}
/**********************************************************//**
@ -587,8 +574,8 @@ dtuple_convert_big_rec(
ulint longest;
const bool mblob = entry->is_alter_metadata();
ut_ad(entry->n_fields - mblob >= index->first_user_field());
ut_ad(entry->n_fields - mblob <= index->n_fields);
ut_ad(unsigned(entry->n_fields - mblob) >= index->first_user_field());
ut_ad(unsigned(entry->n_fields - mblob) <= index->n_fields);
if (mblob) {
longest_i = index->first_user_field();

View file

@ -1283,7 +1283,7 @@ dict_create_index_step(
}
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!node->index->search_info->ref_count);
ut_ad(!node->index->search_info.ref_count);
#endif /* BTR_CUR_HASH_ADAPT */
dict_index_remove_from_cache(table, node->index);
node->index = NULL;

View file

@ -1232,7 +1232,7 @@ static bool dict_table_can_be_evicted(dict_table_t *table)
for (const dict_index_t* index
= dict_table_get_first_index(table);
index; index = dict_table_get_next_index(index)) {
if (index->n_ahi_pages()) {
if (index->any_ahi_pages()) {
return false;
}
}
@ -1259,9 +1259,6 @@ dict_index_t *dict_index_t::clone() const
ut_ad(!rtr_track);
const size_t size= sizeof *this + n_fields * sizeof(*fields) +
#ifdef BTR_CUR_ADAPT
sizeof *search_info +
#endif
1 + strlen(name) +
n_uniq * (sizeof *stat_n_diff_key_vals +
sizeof *stat_n_sample_sizes +
@ -1276,9 +1273,6 @@ dict_index_t *dict_index_t::clone() const
index->name= mem_heap_strdup(heap, name);
index->fields= static_cast<dict_field_t*>
(mem_heap_dup(heap, fields, n_fields * sizeof *fields));
#ifdef BTR_CUR_ADAPT
index->search_info= btr_search_info_create(index->heap);
#endif /* BTR_CUR_ADAPT */
index->stat_n_diff_key_vals= static_cast<ib_uint64_t*>
(mem_heap_zalloc(heap, n_uniq * sizeof *stat_n_diff_key_vals));
index->stat_n_sample_sizes= static_cast<ib_uint64_t*>
@ -1293,7 +1287,7 @@ dict_index_t *dict_index_t::clone() const
@return this or a clone */
dict_index_t *dict_index_t::clone_if_needed()
{
if (!search_info->ref_count)
if (!search_info.ref_count)
return this;
dict_index_t *prev= UT_LIST_GET_PREV(indexes, this);
@ -2050,9 +2044,6 @@ dict_index_add_to_cache(
/* Add the new index as the last index for the table */
UT_LIST_ADD_LAST(new_index->table->indexes, new_index);
#ifdef BTR_CUR_ADAPT
new_index->search_info = btr_search_info_create(new_index->heap);
#endif /* BTR_CUR_ADAPT */
new_index->page = unsigned(page_no);
new_index->lock.SRW_LOCK_INIT(index_tree_rw_lock_key);
@ -2118,7 +2109,7 @@ dict_index_remove_from_cache_low(
only free the dict_index_t struct when this count drops to
zero. See also: dict_table_can_be_evicted() */
if (index->n_ahi_pages()) {
if (index->any_ahi_pages()) {
table->autoinc_mutex.wr_lock();
index->set_freed();
UT_LIST_ADD_LAST(table->freed_indexes, index);
@ -2371,8 +2362,8 @@ dict_table_copy_v_types(
/* tuple could have more virtual columns than existing table,
if we are calling this for creating index along with adding
virtual columns */
ulint n_fields = ut_min(dtuple_get_n_v_fields(tuple),
static_cast<ulint>(table->n_v_def));
ulint n_fields = std::min<ulint>(dtuple_get_n_v_fields(tuple),
table->n_v_def);
for (ulint i = 0; i < n_fields; i++) {
@ -3727,7 +3718,7 @@ dict_index_build_node_ptr(
dtuple_t* tuple;
dfield_t* field;
byte* buf;
ulint n_unique;
uint16_t n_unique;
if (dict_index_is_ibuf(index)) {
/* In a universal index tree, we take the whole record as
@ -3795,7 +3786,7 @@ dict_index_build_data_tuple(
{
ut_ad(!index->is_clust());
dtuple_t* tuple = dtuple_create(heap, n_fields);
dtuple_t* tuple = dtuple_create(heap, uint16_t(n_fields));
dict_index_copy_types(tuple, index, n_fields);

View file

@ -1334,7 +1334,7 @@ static dberr_t dict_load_columns(dict_table_t *table, unsigned use_uncommitted,
dfield_t dfield;
dtuple_t tuple{
0,1,1,&dfield,0,nullptr
0,1,1,0,&dfield,nullptr
#ifdef UNIV_DEBUG
, DATA_TUPLE_MAGIC_N
#endif
@ -1461,7 +1461,7 @@ dict_load_virtual_col(dict_table_t *table, bool uncommitted, ulint nth_v_col)
dfield_t dfield[2];
dtuple_t tuple{
0,2,2,dfield,0,nullptr
0,2,2,0,dfield,nullptr
#ifdef UNIV_DEBUG
, DATA_TUPLE_MAGIC_N
#endif
@ -1705,7 +1705,7 @@ static dberr_t dict_load_fields(dict_index_t *index, bool uncommitted,
dfield_t dfield;
dtuple_t tuple{
0,1,1,&dfield,0,nullptr
0,1,1,0,&dfield,nullptr
#ifdef UNIV_DEBUG
, DATA_TUPLE_MAGIC_N
#endif
@ -1964,7 +1964,7 @@ dberr_t dict_load_indexes(dict_table_t *table, bool uncommitted,
dfield_t dfield;
dtuple_t tuple{
0,1,1,&dfield,0,nullptr
0,1,1,0,&dfield,nullptr
#ifdef UNIV_DEBUG
, DATA_TUPLE_MAGIC_N
#endif
@ -2353,7 +2353,7 @@ static dict_table_t *dict_load_table_one(const span<const char> &name,
dfield_t dfield;
dtuple_t tuple{
0,1,1,&dfield,0,nullptr
0,1,1,0,&dfield,nullptr
#ifdef UNIV_DEBUG
, DATA_TUPLE_MAGIC_N
#endif
@ -2607,7 +2607,7 @@ dict_load_table_on_id(
dfield_t dfield;
dtuple_t tuple{
0,1,1,&dfield,0,nullptr
0,1,1,0,&dfield,nullptr
#ifdef UNIV_DEBUG
, DATA_TUPLE_MAGIC_N
#endif
@ -2716,7 +2716,7 @@ static dberr_t dict_load_foreign_cols(dict_foreign_t *foreign, trx_id_t trx_id)
dfield_t dfield;
dtuple_t tuple{
0,1,1,&dfield,0,nullptr
0,1,1,0,&dfield,nullptr
#ifdef UNIV_DEBUG
, DATA_TUPLE_MAGIC_N
#endif
@ -2892,7 +2892,7 @@ dict_load_foreign(
dfield_t dfield;
dtuple_t tuple{
0,1,1,&dfield,0,nullptr
0,1,1,0,&dfield,nullptr
#ifdef UNIV_DEBUG
, DATA_TUPLE_MAGIC_N
#endif
@ -3103,7 +3103,7 @@ dict_load_foreigns(
bool check_recursive = !trx_id;
dfield_t dfield;
dtuple_t tuple{
0,1,1,&dfield,0,nullptr
0,1,1,0,&dfield,nullptr
#ifdef UNIV_DEBUG
, DATA_TUPLE_MAGIC_N
#endif

View file

@ -3410,7 +3410,7 @@ fts_add_doc_by_id(
/* Search based on Doc ID. Here, we'll need to consider the case
when there is no primary index on Doc ID */
const ulint n_uniq = table->fts_n_uniq();
const auto n_uniq = table->fts_n_uniq();
tuple = dtuple_create(heap, n_uniq);
dfield = dtuple_get_nth_field(tuple, 0);
dfield->type.mtype = DATA_INT;
@ -3461,9 +3461,7 @@ fts_add_doc_by_id(
doc_pcur = &pcur;
} else {
dtuple_t* clust_ref;
ulint n_fields;
n_fields = dict_index_get_n_unique(clust_index);
auto n_fields = dict_index_get_n_unique(clust_index);
clust_ref = dtuple_create(heap, n_fields);
dict_index_copy_types(clust_ref, clust_index, n_fields);

View file

@ -141,12 +141,11 @@ rtr_index_build_node_ptr(
dtuple_t* tuple;
dfield_t* field;
byte* buf;
ulint n_unique;
ulint info_bits;
ut_ad(dict_index_is_spatial(index));
n_unique = DICT_INDEX_SPATIAL_NODEPTR_SIZE;
uint16_t n_unique = DICT_INDEX_SPATIAL_NODEPTR_SIZE;
tuple = dtuple_create(heap, n_unique + 1);
@ -211,8 +210,8 @@ rtr_update_mbr_field(
big_rec_t* dummy_big_rec;
buf_block_t* block;
rec_t* child_rec;
ulint up_match = 0;
ulint low_match = 0;
uint16_t up_match = 0;
uint16_t low_match = 0;
ulint child;
ulint rec_info;
bool ins_suc = true;
@ -607,7 +606,7 @@ rtr_adjust_upper_level(
/* Insert the node for the new page. */
node_ptr_upper = rtr_index_build_node_ptr(
sea_cur->index(), new_mbr, first, new_page_no, heap);
ulint up_match = 0, low_match = 0;
uint16_t up_match = 0, low_match = 0;
err = page_cur_search_with_match(node_ptr_upper,
PAGE_CUR_LE,
&up_match, &low_match,
@ -1109,7 +1108,7 @@ corrupted:
page_cursor->block = cur_split_node->n_node != first_rec_group
? new_block : block;
ulint up_match = 0, low_match = 0;
uint16_t up_match = 0, low_match = 0;
if (page_cur_search_with_match(tuple,
PAGE_CUR_LE, &up_match, &low_match,

View file

@ -338,7 +338,7 @@ rtr_pcur_getnext_from_path(
if (mode == PAGE_CUR_RTREE_LOCATE) {
if (target_level == 0 && level == 0) {
ulint low_match = 0, up_match = 0;
uint16_t low_match = 0, up_match = 0;
found = false;
@ -565,8 +565,8 @@ dberr_t rtr_search_to_nth_level(ulint level, const dtuple_t *tuple,
MEM_UNDEFINED(&cur->up_bytes, sizeof cur->up_bytes);
MEM_UNDEFINED(&cur->low_match, sizeof cur->low_match);
MEM_UNDEFINED(&cur->low_bytes, sizeof cur->low_bytes);
ut_d(cur->up_match= ULINT_UNDEFINED);
ut_d(cur->low_match= ULINT_UNDEFINED);
ut_d(cur->up_match= uint16_t(~0U));
ut_d(cur->low_match= uint16_t(~0U));
const bool latch_by_caller= latch_mode & BTR_ALREADY_S_LATCHED;
@ -583,8 +583,7 @@ dberr_t rtr_search_to_nth_level(ulint level, const dtuple_t *tuple,
#ifndef BTR_CUR_ADAPT
buf_block_t *guess= nullptr;
#else
btr_search_t *const info= btr_search_get_info(index);
buf_block_t *guess= info->root_guess;
buf_block_t *&guess= index->search_info.root_guess;
#endif
/* Store the position of the tree latch we push to mtr so that we
@ -620,7 +619,7 @@ dberr_t rtr_search_to_nth_level(ulint level, const dtuple_t *tuple,
/* Start with the root page. */
page_id_t page_id(index->table->space_id, index->page);
ulint up_match= 0, up_bytes= 0, low_match= 0, low_bytes= 0;
uint16_t up_match= 0, up_bytes= 0, low_match= 0, low_bytes= 0;
ulint height= ULINT_UNDEFINED;
/* We use these modified search modes on non-leaf levels of the
@ -635,14 +634,8 @@ dberr_t rtr_search_to_nth_level(ulint level, const dtuple_t *tuple,
page_mode= PAGE_CUR_LE;
break;
default:
#ifdef PAGE_CUR_LE_OR_EXTENDS
ut_ad(mode == PAGE_CUR_L || mode == PAGE_CUR_LE
|| RTREE_SEARCH_MODE(mode)
|| mode == PAGE_CUR_LE_OR_EXTENDS);
#else /* PAGE_CUR_LE_OR_EXTENDS */
ut_ad(mode == PAGE_CUR_L || mode == PAGE_CUR_LE
|| RTREE_SEARCH_MODE(mode));
#endif /* PAGE_CUR_LE_OR_EXTENDS */
ut_ad(mode == PAGE_CUR_L || mode == PAGE_CUR_LE ||
RTREE_SEARCH_MODE(mode));
page_mode= mode;
break;
}
@ -733,7 +726,7 @@ dberr_t rtr_search_to_nth_level(ulint level, const dtuple_t *tuple,
rtr_get_mbr_from_tuple(tuple, &cur->rtr_info->mbr);
#ifdef BTR_CUR_ADAPT
info->root_guess= block;
guess= block;
#endif
}
@ -976,9 +969,9 @@ dberr_t rtr_search_to_nth_level(ulint level, const dtuple_t *tuple,
cur->up_match= up_match;
cur->up_bytes= up_bytes;
ut_ad(up_match != ULINT_UNDEFINED || mode != PAGE_CUR_GE);
ut_ad(up_match != ULINT_UNDEFINED || mode != PAGE_CUR_LE);
ut_ad(low_match != ULINT_UNDEFINED || mode != PAGE_CUR_LE);
ut_ad(up_match != uint16_t(~0U) || mode != PAGE_CUR_GE);
ut_ad(up_match != uint16_t(~0U) || mode != PAGE_CUR_LE);
ut_ad(low_match != uint16_t(~0U) || mode != PAGE_CUR_LE);
}
goto func_exit;
@ -1676,7 +1669,7 @@ rtr_cur_restore_position(
ut_ad(r_cursor == node->cursor);
search_again:
ulint up_match = 0, low_match = 0;
uint16_t up_match = 0, low_match = 0;
page_cursor->block = buf_page_get_gen(
page_id_t(index->table->space_id, page_no),

View file

@ -1482,7 +1482,7 @@ static void innodb_drop_database(handlerton*, char *path)
dfield_t dfield;
dtuple_t tuple{
0,1,1,&dfield,0,nullptr
0,1,1,0,&dfield,nullptr
#ifdef UNIV_DEBUG
, DATA_TUPLE_MAGIC_N
#endif
@ -17588,9 +17588,9 @@ innodb_adaptive_hash_index_update(THD*, st_mysql_sys_var*, void*,
{
mysql_mutex_unlock(&LOCK_global_system_variables);
if (*(my_bool*) save) {
btr_search_enable();
btr_search.enable();
} else {
btr_search_disable();
btr_search.disable();
}
mysql_mutex_lock(&LOCK_global_system_variables);
}
@ -19168,18 +19168,15 @@ static MYSQL_SYSVAR_BOOL(stats_traditional, srv_stats_sample_traditional,
NULL, NULL, TRUE);
#ifdef BTR_CUR_HASH_ADAPT
static MYSQL_SYSVAR_BOOL(adaptive_hash_index, btr_search_enabled,
static MYSQL_SYSVAR_BOOL(adaptive_hash_index, *(my_bool*) &btr_search.enabled,
PLUGIN_VAR_OPCMDARG,
"Enable InnoDB adaptive hash index (disabled by default).",
NULL, innodb_adaptive_hash_index_update, false);
/** Number of distinct partitions of AHI.
Each partition is protected by its own latch and so we have parts number
of latches protecting complete search system. */
static MYSQL_SYSVAR_ULONG(adaptive_hash_index_parts, btr_ahi_parts,
static MYSQL_SYSVAR_ULONG(adaptive_hash_index_parts, btr_search.n_parts,
PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY,
"Number of InnoDB Adaptive Hash Index Partitions (default 8)",
NULL, NULL, 8, 1, 512, 0);
NULL, NULL, 8, 1, array_elements(btr_search.parts), 0);
#endif /* BTR_CUR_HASH_ADAPT */
static MYSQL_SYSVAR_UINT(compression_level, page_zip_level,

View file

@ -3625,9 +3625,9 @@ innobase_row_to_mysql(
/* The InnoDB row may contain an extra FTS_DOC_ID column at the end. */
ut_ad(row->n_fields == dict_table_get_n_cols(itab));
ut_ad(n_fields == row->n_fields - DATA_N_SYS_COLS
+ dict_table_get_n_v_cols(itab)
- !!(DICT_TF2_FLAG_IS_SET(itab, DICT_TF2_FTS_HAS_DOC_ID)));
ut_ad(row->n_fields == n_fields + DATA_N_SYS_COLS
- dict_table_get_n_v_cols(itab)
+ !!(DICT_TF2_FLAG_IS_SET(itab, DICT_TF2_FTS_HAS_DOC_ID)));
for (uint i = 0; i < n_fields; i++) {
Field* field = table->field[i];
@ -4695,11 +4695,9 @@ innobase_build_col_map(
DBUG_ENTER("innobase_build_col_map");
DBUG_ASSERT(altered_table != table);
DBUG_ASSERT(new_table != old_table);
DBUG_ASSERT(dict_table_get_n_cols(new_table)
+ dict_table_get_n_v_cols(new_table)
DBUG_ASSERT(unsigned(new_table->n_cols + new_table->n_v_cols)
>= altered_table->s->fields + DATA_N_SYS_COLS);
DBUG_ASSERT(dict_table_get_n_cols(old_table)
+ dict_table_get_n_v_cols(old_table)
DBUG_ASSERT(unsigned(old_table->n_cols + old_table->n_v_cols)
>= table->s->fields + DATA_N_SYS_COLS
|| ha_innobase::omits_virtual_cols(*table->s));
DBUG_ASSERT(!!defaults == !!(ha_alter_info->handler_flags
@ -5984,12 +5982,12 @@ static bool innobase_instant_try(
#ifdef BTR_CUR_HASH_ADAPT
/* Acquire the ahi latch to avoid a race condition
between ahi access and instant alter table */
srw_spin_lock* ahi_latch = btr_search_sys.get_latch(*index);
ahi_latch->wr_lock(SRW_LOCK_CALL);
btr_sea::partition& part = btr_search.get_part(*index);
part.latch.wr_lock(SRW_LOCK_CALL);
#endif /* BTR_CUR_HASH_ADAPT */
const bool metadata_changed = ctx->instant_column();
#ifdef BTR_CUR_HASH_ADAPT
ahi_latch->wr_unlock();
part.latch.wr_unlock();
#endif /* BTR_CUR_HASH_ADAPT */
DBUG_ASSERT(index->n_fields >= n_old_fields);

View file

@ -471,9 +471,6 @@ err_exit:
ibuf.index->id = DICT_IBUF_ID_MIN + IBUF_SPACE_ID;
ibuf.index->n_uniq = REC_MAX_N_FIELDS;
ibuf.index->lock.SRW_LOCK_INIT(index_tree_rw_lock_key);
#ifdef BTR_CUR_ADAPT
ibuf.index->search_info = btr_search_info_create(ibuf.index->heap);
#endif /* BTR_CUR_ADAPT */
ibuf.index->page = FSP_IBUF_TREE_ROOT_PAGE_NO;
ut_d(ibuf.index->cached = TRUE);
@ -1359,7 +1356,7 @@ ibuf_build_entry_from_ibuf_rec_func(
n_fields = rec_get_n_fields_old(ibuf_rec) - IBUF_REC_FIELD_USER;
tuple = dtuple_create(heap, n_fields);
tuple = dtuple_create(heap, uint16_t(n_fields));
types = rec_get_nth_field_old(ibuf_rec, IBUF_REC_FIELD_METADATA, &len);
@ -1567,7 +1564,7 @@ ibuf_entry_build(
n_fields = dtuple_get_n_fields(entry);
tuple = dtuple_create(heap, n_fields + IBUF_REC_FIELD_USER);
tuple = dtuple_create(heap, uint16_t(n_fields + IBUF_REC_FIELD_USER));
/* 1) Space Id */
@ -2268,7 +2265,7 @@ static void ibuf_delete_recs(const page_id_t page_id)
return;
dfield_t dfield[IBUF_REC_FIELD_METADATA];
dtuple_t tuple {0,IBUF_REC_FIELD_METADATA,IBUF_REC_FIELD_METADATA,
dfield,0,nullptr
0,dfield,nullptr
#ifdef UNIV_DEBUG
,DATA_TUPLE_MAGIC_N
#endif
@ -2464,7 +2461,7 @@ ibuf_merge_space(
dfield_t dfield[IBUF_REC_FIELD_METADATA];
dtuple_t tuple {0, IBUF_REC_FIELD_METADATA,
IBUF_REC_FIELD_METADATA,dfield,0,nullptr
IBUF_REC_FIELD_METADATA,0,dfield,nullptr
#ifdef UNIV_DEBUG
, DATA_TUPLE_MAGIC_N
#endif
@ -3623,7 +3620,7 @@ ibuf_insert_to_index_page(
return DB_CORRUPTION;
}
ulint up_match = 0, low_match = 0;
uint16_t up_match = 0, low_match = 0;
page_cur.index = index;
page_cur.block = block;
@ -3747,7 +3744,7 @@ ibuf_set_del_mark(
page_cur_t page_cur;
page_cur.block = block;
page_cur.index = index;
ulint up_match = 0, low_match = 0;
uint16_t up_match = 0, low_match = 0;
ut_ad(ibuf_inside(mtr));
ut_ad(dtuple_check_typed(entry));
@ -3806,7 +3803,7 @@ ibuf_delete(
page_cur_t page_cur;
page_cur.block = block;
page_cur.index = index;
ulint up_match = 0, low_match = 0;
uint16_t up_match = 0, low_match = 0;
ut_ad(ibuf_inside(mtr));
ut_ad(dtuple_check_typed(entry));
@ -4370,7 +4367,7 @@ void ibuf_delete_for_discarded_space(uint32_t space)
dfield_t dfield[IBUF_REC_FIELD_METADATA];
dtuple_t search_tuple {0,IBUF_REC_FIELD_METADATA,
IBUF_REC_FIELD_METADATA,dfield,0
IBUF_REC_FIELD_METADATA,0,dfield
,nullptr
#ifdef UNIV_DEBUG
,DATA_TUPLE_MAGIC_N

View file

@ -33,9 +33,6 @@ Created 10/16/1994 Heikki Tuuri
#include "rem0types.h"
#include "gis0type.h"
#include "my_base.h"
#ifdef BTR_CUR_HASH_ADAPT
# include "srw_lock.h"
#endif
/** Mode flags for btr_cur operations; these can be ORed */
enum {
@ -658,14 +655,13 @@ struct btr_path_t {
/** Values for the flag documenting the used search method */
enum btr_cur_method {
BTR_CUR_HASH = 1, /*!< successful shortcut using
BTR_CUR_BINARY, /*!< success using the binary search */
#ifdef BTR_CUR_HASH_ADAPT
BTR_CUR_HASH, /*!< successful shortcut using
the hash index */
BTR_CUR_HASH_FAIL, /*!< failure using hash, success using
binary search: the misleading hash
reference is stored in the field
hash_node, and might be necessary to
update */
BTR_CUR_BINARY, /*!< success using the binary search */
binary search */
#endif
BTR_CUR_INSERT_TO_IBUF, /*!< performed the intended insert to
the insert buffer */
BTR_CUR_DEL_MARK_IBUF, /*!< performed the intended delete
@ -695,7 +691,7 @@ struct btr_cur_t {
ulint tree_height; /*!< Tree height if the search is done
for a pessimistic insert or update
operation */
ulint up_match; /*!< If the search mode was PAGE_CUR_LE,
uint16_t up_match; /*!< If the search mode was PAGE_CUR_LE,
the number of matched fields to the
the first user record to the right of
the cursor record after search_leaf();
@ -708,27 +704,26 @@ struct btr_cur_t {
record if that record is on a
different leaf page! (See the note in
row_ins_duplicate_error_in_clust.) */
ulint up_bytes; /*!< number of matched bytes to the
uint16_t up_bytes; /*!< number of matched bytes to the
right at the time cursor positioned;
only used internally in searches: not
defined after the search */
ulint low_match; /*!< if search mode was PAGE_CUR_LE,
uint16_t low_match; /*!< if search mode was PAGE_CUR_LE,
the number of matched fields to the
first user record AT THE CURSOR or
to the left of it after search_leaf();
NOT defined for PAGE_CUR_GE or any
other search modes; see also the NOTE
in up_match! */
ulint low_bytes; /*!< number of matched bytes to the
uint16_t low_bytes; /*!< number of matched bytes to the
left at the time cursor positioned;
only used internally in searches: not
defined after the search */
ulint n_fields; /*!< prefix length used in a hash
search if hash_node != NULL */
ulint n_bytes; /*!< hash prefix bytes if hash_node !=
NULL */
ulint fold; /*!< fold value used in the search if
#ifdef BTR_CUR_HASH_ADAPT
uint32_t n_bytes_fields; /*!< prefix used in a hash search */
uint32_t fold; /*!< fold value used in the search if
flag is BTR_CUR_HASH */
#endif
/* @} */
rtr_info_t* rtr_info; /*!< rtree search info */
btr_cur_t() { memset((void*) this, 0, sizeof *this); }
@ -770,6 +765,20 @@ struct btr_cur_t {
@return error code */
inline dberr_t open_random_leaf(rec_offs *&offsets, mem_heap_t *& heap,
mtr_t &mtr);
#ifdef BTR_CUR_HASH_ADAPT
void search_info_update() const noexcept;
/** Check if a guessed position for a tree cursor is correct.
@param tuple search key
@param mode PAGE_CUR_L, PAGE_CUR_LE, PAGE_CUR_G, PAGE_CUR_GE
@param comp nonzero if ROW_FORMAT=REDUNDANT is not being used
@retval true on mismatch or corruption
@retval false on a match; if mode=PAGE_CUR_LE, then up_match,low_match
will be set correctly. */
bool check_mismatch(const dtuple_t& tuple, page_cur_mode_t mode, ulint comp)
noexcept;
#endif
};
/** Modify the delete-mark flag of a record.

View file

@ -59,7 +59,7 @@ btr_pcur_get_up_match(
btr_cursor = btr_pcur_get_btr_cur(cursor);
ut_ad(btr_cursor->up_match != ULINT_UNDEFINED);
ut_ad(btr_cursor->up_match != uint16_t(~0U));
return(btr_cursor->up_match);
}
@ -80,7 +80,7 @@ btr_pcur_get_low_match(
|| (cursor->pos_state == BTR_PCUR_IS_POSITIONED));
btr_cursor = btr_pcur_get_btr_cur(cursor);
ut_ad(btr_cursor->low_match != ULINT_UNDEFINED);
ut_ad(btr_cursor->low_match != uint16_t(~0U));
return(btr_cursor->low_match);
}

View file

@ -24,43 +24,24 @@ The index tree adaptive search
Created 2/17/1996 Heikki Tuuri
*************************************************************************/
#ifndef btr0sea_h
#define btr0sea_h
#pragma once
#include "dict0dict.h"
#ifdef BTR_CUR_HASH_ADAPT
#include "ha0ha.h"
#include "srw_lock.h"
# include "buf0buf.h"
#ifdef UNIV_PFS_RWLOCK
# ifdef UNIV_PFS_RWLOCK
extern mysql_pfs_key_t btr_search_latch_key;
#endif /* UNIV_PFS_RWLOCK */
# endif /* UNIV_PFS_RWLOCK */
#define btr_search_sys_create() btr_search_sys.create()
#define btr_search_sys_free() btr_search_sys.free()
/** Disable the adaptive hash search system and empty the index. */
void btr_search_disable();
/** Enable the adaptive hash search system.
@param resize whether buf_pool_t::resize() is the caller */
void btr_search_enable(bool resize= false);
/*********************************************************************//**
Updates the search info. */
UNIV_INLINE
void
btr_search_info_update(
/*===================*/
dict_index_t* index, /*!< in: index of the cursor */
btr_cur_t* cursor);/*!< in: cursor which was just positioned */
# define btr_search_sys_create() btr_search.create()
# define btr_search_sys_free() btr_search.free()
/** Tries to guess the right search position based on the hash search info
of the index. Note that if mode is PAGE_CUR_LE, which is used in inserts,
and the function returns TRUE, then cursor->up_match and cursor->low_match
both have sensible values.
@param[in,out] index index
@param[in,out] info index search info
@param[in] tuple logical record
@param[in] mode PAGE_CUR_L, ....
@param[in] latch_mode BTR_SEARCH_LEAF, ...
@ -70,90 +51,158 @@ both have sensible values.
bool
btr_search_guess_on_hash(
dict_index_t* index,
btr_search_t* info,
const dtuple_t* tuple,
ulint mode,
ulint latch_mode,
page_cur_mode_t mode,
btr_latch_mode latch_mode,
btr_cur_t* cursor,
mtr_t* mtr);
mtr_t* mtr) noexcept;
/** Move or delete hash entries for moved records, usually in a page split.
If new_block is already hashed, then any hash index for block is dropped.
If new_block is not hashed, and block is hashed, then a new hash index is
built to new_block with the same parameters as block.
@param[in,out] new_block destination page
@param[in,out] block source page (subject to deletion later) */
void
btr_search_move_or_delete_hash_entries(
buf_block_t* new_block,
buf_block_t* block);
@param new_block destination page
@param block source page (subject to deletion later) */
void btr_search_move_or_delete_hash_entries(buf_block_t *new_block,
buf_block_t *block) noexcept;
/** Drop any adaptive hash index entries that point to an index page.
@param[in,out] block block containing index page, s- or x-latched, or an
index page for which we know that
block->buf_fix_count == 0 or it is an index page which
has already been removed from the buf_pool.page_hash
i.e.: it is in state BUF_BLOCK_REMOVE_HASH
@param[in] garbage_collect drop ahi only if the index is marked
as freed */
@param block latched block containing index page, or a buffer-unfixed
index page or a block in state BUF_BLOCK_REMOVE_HASH
@param garbage_collect drop ahi only if the index is marked as freed */
void btr_search_drop_page_hash_index(buf_block_t* block,
bool garbage_collect);
bool garbage_collect) noexcept;
/** Drop possible adaptive hash index entries when a page is evicted
from the buffer pool or freed in a file, or the index is being dropped.
@param[in] page_id page id */
void btr_search_drop_page_hash_when_freed(const page_id_t page_id);
@param page_id page identifier of the being-dropped page */
void btr_search_drop_page_hash_when_freed(const page_id_t page_id) noexcept;
/** Updates the page hash index when a single record is inserted on a page.
@param[in] cursor cursor which was positioned to the place to insert
using btr_cur_search_, and the new record has been
inserted next to the cursor.
@param[in] ahi_latch the adaptive hash index latch */
void btr_search_update_hash_node_on_insert(btr_cur_t *cursor,
srw_spin_lock *ahi_latch);
/** Update the page hash index after a single record is inserted on a page.
@param cursor cursor which was positioned before the inserted record
@param reorg whether the page was reorganized */
void btr_search_update_hash_on_insert(btr_cur_t *cursor, bool reorg) noexcept;
/** Updates the page hash index when a single record is inserted on a page.
@param[in,out] cursor cursor which was positioned to the
place to insert using btr_cur_search_...,
and the new record has been inserted next
to the cursor
@param[in] ahi_latch the adaptive hash index latch */
void btr_search_update_hash_on_insert(btr_cur_t *cursor,
srw_spin_lock *ahi_latch);
/** Updates the page hash index when a single record is deleted from a page.
@param[in] cursor cursor which was positioned on the record to delete
using btr_cur_search_, the record is not yet deleted.*/
void btr_search_update_hash_on_delete(btr_cur_t *cursor);
/** Updates the page hash index before a single record is deleted from a page.
@param cursor cursor positioned on the to-be-deleted record */
void btr_search_update_hash_on_delete(btr_cur_t *cursor) noexcept;
/** Validates the search system.
@param thd connection, for checking if CHECK TABLE has been killed
@return true if ok */
bool btr_search_validate(THD *thd);
/** Lock all search latches in exclusive mode. */
static inline void btr_search_x_lock_all();
/** Unlock all search latches from exclusive mode. */
static inline void btr_search_x_unlock_all();
/** Lock all search latches in shared mode. */
static inline void btr_search_s_lock_all();
/** Unlock all search latches from shared mode. */
static inline void btr_search_s_unlock_all();
bool btr_search_validate(THD *thd) noexcept;
# ifdef UNIV_DEBUG
/** @return if the index is marked as freed */
bool btr_search_check_marked_free_index(const buf_block_t *block);
bool btr_search_check_marked_free_index(const buf_block_t *block) noexcept;
# endif /* UNIV_DEBUG */
struct ahi_node;
/** The hash index system */
struct btr_sea
{
/** the actual value of innodb_adaptive_hash_index, protected by
all partition::latch. Note that if buf_block_t::index is not nullptr
while a thread is holding a partition::latch, then also this must hold. */
Atomic_relaxed<bool> enabled;
/** Disable the adaptive hash search system and empty the index. */
void disable() noexcept;
/** Enable the adaptive hash search system.
@param resize whether buf_pool_t::resize() is the caller */
void enable(bool resize= false) noexcept;
/** Partition of the hash table */
struct partition
{
/** latch protecting table */
alignas(CPU_LEVEL1_DCACHE_LINESIZE) srw_spin_lock latch;
/** map of CRC-32C of rec prefix to rec_t* in buf_page_t::frame */
hash_table_t table;
/** latch protecting blocks, spare; may be acquired while holding latch */
srw_mutex blocks_mutex;
/** allocated blocks */
UT_LIST_BASE_NODE_T(buf_page_t) blocks;
/** a cached block to extend blocks */
Atomic_relaxed<buf_block_t*> spare;
inline void init() noexcept;
inline void alloc(ulint hash_size) noexcept;
inline void clear() noexcept;
inline void free() noexcept;
/** Ensure that there is a spare block for a future insert() */
void prepare_insert() noexcept;
/** Clean up after erasing an AHI node
@param erase node being erased
@return buffer block to be freed
@retval nullptr if no buffer block was freed */
buf_block_t *cleanup_after_erase(ahi_node *erase) noexcept;
__attribute__((nonnull))
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Insert or replace an entry into the hash table.
@param fold CRC-32C of rec prefix
@param rec B-tree leaf page record
@param block the buffer block that contains rec */
void insert(uint32_t fold, const rec_t *rec, buf_block_t *block) noexcept;
# else
/** Insert or replace an entry into the hash table.
@param fold CRC-32C of rec prefix
@param rec B-tree leaf page record */
void insert(uint32_t fold, const rec_t *rec) noexcept;
# endif
/** Delete a pointer to a record if it exists.
@param fold CRC-32C of rec prefix
@param rec B-tree leaf page record
@return whether a record existed and was removed */
inline bool erase(uint32_t fold, const rec_t *rec) noexcept;
};
/** innodb_adaptive_hash_index_parts */
ulong n_parts;
/** Partitions of the adaptive hash index */
partition parts[512];
/** Get an adaptive hash index partition */
partition &get_part(index_id_t id) noexcept { return parts[id % n_parts]; }
/** Get an adaptive hash index partition */
partition &get_part(const dict_index_t &index) noexcept
{ return get_part(index.id); }
/** Create and initialize at startup */
void create() noexcept;
void alloc(ulint hash_size) noexcept;
/** Clear when disabling the adaptive hash index */
inline void clear() noexcept;
/** Free at shutdown */
void free() noexcept;
};
/** The adaptive hash index */
extern btr_sea btr_search;
# ifdef UNIV_SEARCH_PERF_STAT
/** Number of successful adaptive hash index lookups */
extern ulint btr_search_n_succ;
/** Number of failed adaptive hash index lookups */
extern ulint btr_search_n_hash_fail;
# endif /* UNIV_SEARCH_PERF_STAT */
#else /* BTR_CUR_HASH_ADAPT */
# define btr_search_sys_create()
# define btr_search_sys_free()
# define btr_search_drop_page_hash_index(block, garbage_collect)
# define btr_search_s_lock_all(index)
# define btr_search_s_unlock_all(index)
# define btr_search_info_update(index, cursor)
# define btr_search_move_or_delete_hash_entries(new_block, block)
# define btr_search_update_hash_on_insert(cursor, ahi_latch)
# define btr_search_update_hash_on_delete(cursor)
@ -161,243 +210,3 @@ bool btr_search_check_marked_free_index(const buf_block_t *block);
# define btr_search_check_marked_free_index(block)
# endif /* UNIV_DEBUG */
#endif /* BTR_CUR_HASH_ADAPT */
#ifdef BTR_CUR_ADAPT
/** Create and initialize search info.
@param[in,out] heap heap where created
@return own: search info struct */
static inline btr_search_t* btr_search_info_create(mem_heap_t* heap)
MY_ATTRIBUTE((nonnull, warn_unused_result));
/** @return the search info of an index */
static inline btr_search_t* btr_search_get_info(dict_index_t* index)
{
return(index->search_info);
}
#endif /* BTR_CUR_ADAPT */
/** The search info struct in an index */
struct btr_search_t{
/* @{ The following fields are not protected by any latch.
Unfortunately, this means that they must be aligned to
the machine word, i.e., they cannot be turned into bit-fields. */
buf_block_t* root_guess;/*!< the root page frame when it was last time
fetched, or NULL */
#ifdef BTR_CUR_HASH_ADAPT
ulint hash_analysis; /*!< when this exceeds
BTR_SEARCH_HASH_ANALYSIS, the hash
analysis starts; this is reset if no
success noticed */
ibool last_hash_succ; /*!< TRUE if the last search would have
succeeded, or did succeed, using the hash
index; NOTE that the value here is not exact:
it is not calculated for every search, and the
calculation itself is not always accurate! */
ulint n_hash_potential;
/*!< number of consecutive searches
which would have succeeded, or did succeed,
using the hash index;
the range is 0 .. BTR_SEARCH_BUILD_LIMIT + 5 */
/* @} */
ulint ref_count; /*!< Number of blocks in this index tree
that have search index built
i.e. block->index points to this index.
Protected by search latch except
when during initialization in
btr_search_info_create(). */
/*---------------------- @{ */
uint16_t n_fields; /*!< recommended prefix length for hash search:
number of full fields */
uint16_t n_bytes; /*!< recommended prefix: number of bytes in
an incomplete field
@see BTR_PAGE_MAX_REC_SIZE */
bool left_side; /*!< true or false, depending on whether
the leftmost record of several records with
the same prefix should be indexed in the
hash index */
/*---------------------- @} */
#ifdef UNIV_SEARCH_PERF_STAT
ulint n_hash_succ; /*!< number of successful hash searches thus
far */
ulint n_hash_fail; /*!< number of failed hash searches */
ulint n_patt_succ; /*!< number of successful pattern searches thus
far */
ulint n_searches; /*!< number of searches */
#endif /* UNIV_SEARCH_PERF_STAT */
#endif /* BTR_CUR_HASH_ADAPT */
#ifdef UNIV_DEBUG
ulint magic_n; /*!< magic number @see BTR_SEARCH_MAGIC_N */
/** value of btr_search_t::magic_n, used in assertions */
# define BTR_SEARCH_MAGIC_N 1112765
#endif /* UNIV_DEBUG */
};
#ifdef BTR_CUR_HASH_ADAPT
/** The hash index system */
struct btr_search_sys_t
{
/** Partition of the hash table */
struct partition
{
/** latches protecting hash_table */
srw_spin_lock latch;
/** mapping of dtuple_fold() to rec_t* in buf_block_t::frame */
hash_table_t table;
/** memory heap for table */
mem_heap_t *heap;
#ifdef _MSC_VER
#pragma warning(push)
// nonstandard extension - zero sized array, if perfschema is not compiled
#pragma warning(disable : 4200)
#endif
char pad[(CPU_LEVEL1_DCACHE_LINESIZE - sizeof latch -
sizeof table - sizeof heap) &
(CPU_LEVEL1_DCACHE_LINESIZE - 1)];
#ifdef _MSC_VER
#pragma warning(pop)
#endif
void init()
{
memset((void*) this, 0, sizeof *this);
latch.SRW_LOCK_INIT(btr_search_latch_key);
}
void alloc(ulint hash_size)
{
table.create(hash_size);
heap= mem_heap_create_typed(std::min<ulong>(4096,
MEM_MAX_ALLOC_IN_BUF / 2
- MEM_BLOCK_HEADER_SIZE
- MEM_SPACE_NEEDED(0)),
MEM_HEAP_FOR_BTR_SEARCH);
}
void clear()
{
mem_heap_free(heap);
heap= nullptr;
ut_free(table.array);
}
void free()
{
latch.destroy();
if (heap)
clear();
}
};
/** Partitions of the adaptive hash index */
partition *parts;
/** Get an adaptive hash index partition */
partition *get_part(index_id_t id, ulint space_id) const
{
return parts + ut_fold_ulint_pair(ulint(id), space_id) % btr_ahi_parts;
}
/** Get an adaptive hash index partition */
partition *get_part(const dict_index_t &index) const
{
ut_ad(!index.table->space ||
index.table->space->id == index.table->space_id);
return get_part(ulint(index.id), index.table->space_id);
}
/** Get the search latch for the adaptive hash index partition */
srw_spin_lock *get_latch(const dict_index_t &index) const
{ return &get_part(index)->latch; }
/** Create and initialize at startup */
void create()
{
parts= static_cast<partition*>(ut_malloc(btr_ahi_parts * sizeof *parts,
mem_key_ahi));
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].init();
if (btr_search_enabled)
btr_search_enable();
}
void alloc(ulint hash_size)
{
hash_size/= btr_ahi_parts;
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].alloc(hash_size);
}
/** Clear when disabling the adaptive hash index */
void clear() { for (ulong i= 0; i < btr_ahi_parts; ++i) parts[i].clear(); }
/** Free at shutdown */
void free()
{
if (parts)
{
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].free();
ut_free(parts);
parts= nullptr;
}
}
};
/** The adaptive hash index */
extern btr_search_sys_t btr_search_sys;
/** @return number of leaf pages pointed to by the adaptive hash index */
TRANSACTIONAL_INLINE inline ulint dict_index_t::n_ahi_pages() const
{
if (!btr_search_enabled)
return 0;
srw_spin_lock *latch= &btr_search_sys.get_part(*this)->latch;
#if !defined NO_ELISION && !defined SUX_LOCK_GENERIC
if (xbegin())
{
if (latch->is_locked())
xabort();
ulint ref_count= search_info->ref_count;
xend();
return ref_count;
}
#endif
latch->rd_lock(SRW_LOCK_CALL);
ulint ref_count= search_info->ref_count;
latch->rd_unlock();
return ref_count;
}
#ifdef UNIV_SEARCH_PERF_STAT
/** Number of successful adaptive hash index lookups */
extern ulint btr_search_n_succ;
/** Number of failed adaptive hash index lookups */
extern ulint btr_search_n_hash_fail;
#endif /* UNIV_SEARCH_PERF_STAT */
/** After change in n_fields or n_bytes in info, this many rounds are waited
before starting the hash analysis again: this is to save CPU time when there
is no hope in building a hash index. */
#define BTR_SEARCH_HASH_ANALYSIS 17
/** Limit of consecutive searches for trying a search shortcut on the search
pattern */
#define BTR_SEARCH_ON_PATTERN_LIMIT 3
/** Limit of consecutive searches for trying a search shortcut using
the hash index */
#define BTR_SEARCH_ON_HASH_LIMIT 3
/** We do this many searches before trying to keep the search latch
over calls from MySQL. If we notice someone waiting for the latch, we
again set this much timeout. This is to reduce contention. */
#define BTR_SEA_TIMEOUT 10000
#endif /* BTR_CUR_HASH_ADAPT */
#include "btr0sea.inl"
#endif

View file

@ -1,117 +0,0 @@
/*****************************************************************************
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2018, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*****************************************************************************/
/********************************************************************//**
@file include/btr0sea.ic
The index tree adaptive search
Created 2/17/1996 Heikki Tuuri
*************************************************************************/
#include "dict0mem.h"
#include "btr0cur.h"
#include "buf0buf.h"
/** Create and initialize search info.
@param[in,out] heap heap where created
@return own: search info struct */
static inline btr_search_t* btr_search_info_create(mem_heap_t* heap)
{
btr_search_t* info = static_cast<btr_search_t*>(
mem_heap_zalloc(heap, sizeof(btr_search_t)));
ut_d(info->magic_n = BTR_SEARCH_MAGIC_N);
#ifdef BTR_CUR_HASH_ADAPT
info->n_fields = 1;
info->left_side = TRUE;
#endif /* BTR_CUR_HASH_ADAPT */
return(info);
}
#ifdef BTR_CUR_HASH_ADAPT
/** Updates the search info.
@param[in,out] info search info
@param[in,out] cursor cursor which was just positioned */
void btr_search_info_update_slow(btr_search_t *info, btr_cur_t *cursor);
/*********************************************************************//**
Updates the search info. */
static inline
void
btr_search_info_update(
/*===================*/
dict_index_t* index, /*!< in: index of the cursor */
btr_cur_t* cursor) /*!< in: cursor which was just positioned */
{
ut_ad(!index->is_spatial());
ut_ad(!index->table->is_temporary());
if (!btr_search_enabled) {
return;
}
btr_search_t* info;
info = btr_search_get_info(index);
info->hash_analysis++;
if (info->hash_analysis < BTR_SEARCH_HASH_ANALYSIS) {
/* Do nothing */
return;
}
ut_ad(cursor->flag != BTR_CUR_HASH);
btr_search_info_update_slow(info, cursor);
}
/** Lock all search latches in exclusive mode. */
static inline void btr_search_x_lock_all()
{
for (ulint i = 0; i < btr_ahi_parts; ++i) {
btr_search_sys.parts[i].latch.wr_lock(SRW_LOCK_CALL);
}
}
/** Unlock all search latches from exclusive mode. */
static inline void btr_search_x_unlock_all()
{
for (ulint i = 0; i < btr_ahi_parts; ++i) {
btr_search_sys.parts[i].latch.wr_unlock();
}
}
/** Lock all search latches in shared mode. */
static inline void btr_search_s_lock_all()
{
for (ulint i = 0; i < btr_ahi_parts; ++i) {
btr_search_sys.parts[i].latch.rd_lock(SRW_LOCK_CALL);
}
}
/** Unlock all search latches from shared mode. */
static inline void btr_search_s_unlock_all()
{
for (ulint i = 0; i < btr_ahi_parts; ++i) {
btr_search_sys.parts[i].latch.rd_unlock();
}
}
#endif /* BTR_CUR_HASH_ADAPT */

View file

@ -33,17 +33,6 @@ Created 2/17/1996 Heikki Tuuri
struct btr_pcur_t;
/** B-tree cursor */
struct btr_cur_t;
/** B-tree search information for the adaptive hash index */
struct btr_search_t;
#ifdef BTR_CUR_HASH_ADAPT
/** Is search system enabled.
Search system is protected by array of latches. */
extern char btr_search_enabled;
/** Number of adaptive hash index partition. */
extern ulong btr_ahi_parts;
#endif /* BTR_CUR_HASH_ADAPT */
/** The size of a reference to data stored on a different page.
The reference is stored at the end of the prefix of the field

View file

@ -919,91 +919,37 @@ struct buf_block_t{
x-latch on the block */
/* @} */
#ifdef BTR_CUR_HASH_ADAPT
/** @name Hash search fields (unprotected)
NOTE that these fields are NOT protected by any semaphore! */
/* @{ */
/** @name Hash search fields */
/* @{ */
/** flag: (true=first, false=last) identical-prefix key is included */
static constexpr uint32_t LEFT_SIDE= 1U << 31;
volatile uint16_t n_bytes; /*!< recommended prefix length for hash
search: number of bytes in
an incomplete last field */
volatile uint16_t n_fields; /*!< recommended prefix length for hash
search: number of full fields */
uint16_t n_hash_helps; /*!< counter which controls building
of a new hash index for the page */
volatile bool left_side; /*!< true or false, depending on
whether the leftmost record of several
records with the same prefix should be
indexed in the hash index */
/* @} */
/** @name Hash search fields
These 5 fields may only be modified when:
we are holding the appropriate x-latch in btr_search_latches[], and
one of the following holds:
(1) in_file(), and we are holding lock in any mode, or
(2) !is_read_fixed()&&(state()>=UNFIXED||state()==REMOVE_HASH).
An exception to this is when we init or create a page
in the buffer pool in buf0buf.cc.
Another exception for buf_pool_t::clear_hash_index() is that
assigning block->index = NULL (and block->n_pointers = 0)
is allowed whenever all AHI latches are exclusively locked.
Another exception is that ha_insert_for_fold() may
decrement n_pointers without holding the appropriate latch
in btr_search_latches[]. Thus, n_pointers must be
protected by atomic memory access.
This implies that the fields may be read without race
condition whenever any of the following hold:
- the btr_search_sys.partition[].latch is being held, or
- state() == NOT_USED || state() == MEMORY,
and holding some latch prevents the state from changing to that.
Some use of assert_block_ahi_empty() or assert_block_ahi_valid()
is prone to race conditions while buf_pool_t::clear_hash_index() is
executing (the adaptive hash index is being disabled). Such use
is explicitly commented. */
/* @{ */
/** AHI parameters: LEFT_SIDE | prefix_bytes << 16 | prefix_fields.
Protected by the btr_sea::partition::latch and
(1) in_file(), and we are holding lock in any mode, or
(2) !is_read_fixed()&&(state()>=UNFIXED||state()==REMOVE_HASH). */
Atomic_relaxed<uint32_t> ahi_left_bytes_fields;
/** counter which controls building of a new hash index for the page;
may be nonzero even if !index */
Atomic_relaxed<uint16_t> n_hash_helps;
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
Atomic_counter<ulint>
n_pointers; /*!< used in debugging: the number of
pointers in the adaptive hash index
pointing to this frame */
# define assert_block_ahi_empty(block) \
ut_a((block)->n_pointers == 0)
# define assert_block_ahi_empty_on_init(block) do { \
MEM_MAKE_DEFINED(&(block)->n_pointers, sizeof (block)->n_pointers); \
assert_block_ahi_empty(block); \
} while (0)
# define assert_block_ahi_valid(block) \
ut_a((block)->index || (block)->n_pointers == 0)
/** number of pointers from the btr_sea::partition::table;
!n_pointers == !index */
Atomic_counter<uint16_t> n_pointers;
# define assert_block_ahi_empty(block) ut_a(!(block)->n_pointers)
# define assert_block_ahi_valid(b) ut_a((b)->index || !(b)->n_pointers)
# else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
# define assert_block_ahi_empty(block) /* nothing */
# define assert_block_ahi_empty_on_init(block) /* nothing */
# define assert_block_ahi_valid(block) /* nothing */
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
unsigned curr_n_fields:10;/*!< prefix length for hash indexing:
number of full fields */
unsigned curr_n_bytes:15;/*!< number of bytes in hash
indexing */
unsigned curr_left_side:1;/*!< TRUE or FALSE in hash indexing */
dict_index_t* index; /*!< Index for which the
adaptive hash index has been
created, or NULL if the page
does not exist in the
index. Note that it does not
guarantee that the index is
complete, though: there may
have been hash collisions,
record deletions, etc. */
/* @} */
/** index for which the adaptive hash index has been created,
or nullptr if the page does not exist in the index.
Protected by btr_sea::partition::latch. */
Atomic_relaxed<dict_index_t*> index;
/* @} */
#else /* BTR_CUR_HASH_ADAPT */
# define assert_block_ahi_empty(block) /* nothing */
# define assert_block_ahi_empty_on_init(block) /* nothing */
# define assert_block_ahi_valid(block) /* nothing */
#endif /* BTR_CUR_HASH_ADAPT */
void fix() noexcept { page.fix(); }

View file

@ -193,7 +193,7 @@ dtuple_set_info_bits(
Gets number of fields used in record comparisons.
@return number of fields used in comparisons in rem0cmp.* */
UNIV_INLINE
ulint
uint16_t
dtuple_get_n_fields_cmp(
/*====================*/
const dtuple_t* tuple) /*!< in: tuple */
@ -270,6 +270,7 @@ dtuple_create_with_vcol(
/*********************************************************************//**
Sets number of fields used in a tuple. Normally this is set in
dtuple_create, but if you want later to set it smaller, you can use this. */
inline
void
dtuple_set_n_fields(
/*================*/
@ -316,20 +317,6 @@ dtuple_get_n_ext(
/*=============*/
const dtuple_t* tuple) /*!< in: tuple */
MY_ATTRIBUTE((nonnull));
/** Fold a prefix given as the number of fields of a tuple.
@param[in] tuple index record
@param[in] n_fields number of complete fields to fold
@param[in] n_bytes number of bytes to fold in the last field
@param[in] index_id index tree ID
@return the folded value */
UNIV_INLINE
ulint
dtuple_fold(
const dtuple_t* tuple,
ulint n_fields,
ulint n_bytes,
index_id_t tree_id)
MY_ATTRIBUTE((warn_unused_result));
/*******************************************************************//**
Sets types of fields binary in a tuple. */
UNIV_INLINE
@ -499,20 +486,20 @@ struct dfield_t{
/** Structure for an SQL data tuple of fields (logical record) */
struct dtuple_t {
ulint info_bits; /*!< info bits of an index record:
byte info_bits; /*!< info bits of an index record:
the default is 0; this field is used
if an index record is built from
a data tuple */
ulint n_fields; /*!< number of fields in dtuple */
ulint n_fields_cmp; /*!< number of fields which should
uint16_t n_fields; /*!< number of fields in dtuple */
uint16_t n_fields_cmp; /*!< number of fields which should
be used in comparison services
of rem0cmp.*; the index search
is performed by comparing only these
fields, others are ignored; the
default value in dtuple creation is
the same value as n_fields */
uint16_t n_v_fields; /*!< number of virtual fields */
dfield_t* fields; /*!< fields */
ulint n_v_fields; /*!< number of virtual fields */
dfield_t* v_fields; /*!< fields on virtual column */
#ifdef UNIV_DEBUG
ulint magic_n; /*!< magic number, used in
@ -574,7 +561,7 @@ struct dtuple_t {
inline void copy_field_types(const dict_index_t &index);
};
inline ulint dtuple_get_n_fields(const dtuple_t* tuple)
inline uint16_t dtuple_get_n_fields(const dtuple_t* tuple)
{ return tuple->n_fields; }
inline dtype_t* dfield_get_type(dfield_t* field) { return &field->type; }
inline const dtype_t* dfield_get_type(const dfield_t* field)
@ -608,7 +595,7 @@ inline void dfield_set_ext(dfield_t* field) { field->ext = 1; }
/** Gets number of virtual fields in a data tuple.
@param[in] tuple dtuple to check
@return number of fields */
inline ulint
inline uint16_t
dtuple_get_n_v_fields(const dtuple_t* tuple) { return tuple->n_v_fields; }
inline const dfield_t* dtuple_get_nth_field(const dtuple_t* tuple, ulint n)

View file

@ -238,14 +238,14 @@ dtuple_set_info_bits(
dtuple_t* tuple, /*!< in: tuple */
ulint info_bits) /*!< in: info bits */
{
tuple->info_bits = info_bits;
tuple->info_bits = byte(info_bits);
}
/*********************************************************************//**
Gets number of fields used in record comparisons.
@return number of fields used in comparisons in rem0cmp.* */
UNIV_INLINE
ulint
uint16_t
dtuple_get_n_fields_cmp(
/*====================*/
const dtuple_t* tuple) /*!< in: tuple */
@ -264,7 +264,7 @@ dtuple_set_n_fields_cmp(
comparisons in rem0cmp.* */
{
ut_ad(n_fields_cmp <= tuple->n_fields);
tuple->n_fields_cmp = n_fields_cmp;
tuple->n_fields_cmp = uint16_t(n_fields_cmp);
}
/** Creates a data tuple from an already allocated chunk of memory.
@ -291,9 +291,9 @@ dtuple_create_from_mem(
tuple = (dtuple_t*) buf;
tuple->info_bits = 0;
tuple->n_fields = n_fields;
tuple->n_v_fields = n_v_fields;
tuple->n_fields_cmp = n_fields;
tuple->n_fields = uint16_t(n_fields);
tuple->n_v_fields = uint16_t(n_v_fields);
tuple->n_fields_cmp = uint16_t(n_fields);
tuple->fields = (dfield_t*) &tuple[1];
if (n_v_fields > 0) {
tuple->v_fields = &tuple->fields[n_fields];
@ -398,6 +398,12 @@ dtuple_create_with_vcol(
return(tuple);
}
inline void dtuple_set_n_fields(dtuple_t *tuple, ulint n_fields)
{
tuple->n_fields= uint16_t(n_fields);
tuple->n_fields_cmp= uint16_t(n_fields);
}
/** Copies a data tuple's virtual fields to another. This is a shallow copy;
@param[in,out] d_tuple destination tuple
@param[in] s_tuple source tuple */
@ -432,7 +438,7 @@ dtuple_copy(
ulint n_fields = dtuple_get_n_fields(tuple);
ulint n_v_fields = dtuple_get_n_v_fields(tuple);
dtuple_t* new_tuple = dtuple_create_with_vcol(
heap, n_fields, n_v_fields);
heap, tuple->n_fields, tuple->n_v_fields);
ulint i;
for (i = 0; i < n_fields; i++) {
@ -527,63 +533,6 @@ dtuple_set_types_binary(
}
}
/** Fold a prefix given as the number of fields of a tuple.
@param[in] tuple index record
@param[in] n_fields number of complete fields to fold
@param[in] n_bytes number of bytes to fold in the last field
@param[in] index_id index tree ID
@return the folded value */
UNIV_INLINE
ulint
dtuple_fold(
const dtuple_t* tuple,
ulint n_fields,
ulint n_bytes,
index_id_t tree_id)
{
const dfield_t* field;
ulint i;
const byte* data;
ulint len;
ulint fold;
ut_ad(tuple);
ut_ad(tuple->magic_n == DATA_TUPLE_MAGIC_N);
ut_ad(dtuple_check_typed(tuple));
fold = ut_fold_ull(tree_id);
for (i = 0; i < n_fields; i++) {
field = dtuple_get_nth_field(tuple, i);
data = (const byte*) dfield_get_data(field);
len = dfield_get_len(field);
if (len != UNIV_SQL_NULL) {
fold = ut_fold_ulint_pair(fold,
ut_fold_binary(data, len));
}
}
if (n_bytes > 0) {
field = dtuple_get_nth_field(tuple, i);
data = (const byte*) dfield_get_data(field);
len = dfield_get_len(field);
if (len != UNIV_SQL_NULL) {
if (len > n_bytes) {
len = n_bytes;
}
fold = ut_fold_ulint_pair(fold,
ut_fold_binary(data, len));
}
}
return(fold);
}
/**********************************************************************//**
Writes an SQL null field full of zeros. */
UNIV_INLINE

View file

@ -663,7 +663,7 @@ Gets the number of all non-virtual columns (also system) in a table
in the dictionary cache.
@return number of columns of a table */
UNIV_INLINE
unsigned
uint16_t
dict_table_get_n_cols(
/*==================*/
const dict_table_t* table) /*!< in: table */
@ -673,7 +673,7 @@ dict_table_get_n_cols(
@param[in] table the table to check
@return number of virtual columns of a table */
UNIV_INLINE
unsigned
uint16_t
dict_table_get_n_v_cols(
const dict_table_t* table);

View file

@ -264,7 +264,7 @@ Gets the number of all non-virtual columns (also system) in a table
in the dictionary cache.
@return number of non-virtual columns of a table */
UNIV_INLINE
unsigned
uint16_t
dict_table_get_n_cols(
/*==================*/
const dict_table_t* table) /*!< in: table */
@ -277,7 +277,7 @@ dict_table_get_n_cols(
@param[in] table the table to check
@return number of virtual columns of a table */
UNIV_INLINE
unsigned
uint16_t
dict_table_get_n_v_cols(
const dict_table_t* table)
{

View file

@ -1075,8 +1075,59 @@ struct dict_index_t {
UT_LIST_NODE_T(dict_index_t)
indexes;/*!< list of indexes of the table */
#ifdef BTR_CUR_ADAPT
btr_search_t* search_info;
/*!< info used in optimistic searches */
/** The search info struct in an index */
struct ahi {
ahi()= default;
ahi(const ahi&)= default;
~ahi()= default;
/** Dummy assignment operator for dict_index_t::clone(), which
will return a clone where these fields are reset to default values
(because no AHI entries exist yet for the clone) */
ahi &operator=(const ahi&) { new(this) ahi(); return *this; }
/** the root page when it was last time fetched, or nullptr */
buf_block_t *root_guess= nullptr;
# ifdef BTR_CUR_HASH_ADAPT
private:
/** After change in n_fields or n_bytes, this many rounds are
waited before starting the hash analysis again: this is to save
CPU time when there is no hope in building a hash index. */
static constexpr uint8_t HASH_ANALYSIS= 16;
/** the number of calls to hash_analysis_useful() */
Atomic_relaxed<uint8_t> hash_analysis{0};
public:
bool hash_analysis_useful() noexcept
{
return hash_analysis > HASH_ANALYSIS ||
hash_analysis.fetch_add(1) >= HASH_ANALYSIS;
}
void hash_analysis_reset() noexcept { hash_analysis= 0; }
/** number of consecutive searches which would have succeeded, or
did succeed, using the hash index; the range is 0
.. BTR_SEARCH_BUILD_LIMIT */
Atomic_relaxed<uint8_t> n_hash_potential{0};
/** whether the last search would have succeeded, or
did succeed, using the hash index; NOTE that the value
here is not exact: it is not calculated for every
search, and the calculation itself is not always accurate! */
Atomic_relaxed<bool> last_hash_succ{false};
/** recommended parameters; @see buf_block_t::left_bytes_fields */
Atomic_relaxed<uint32_t> left_bytes_fields{buf_block_t::LEFT_SIDE | 1};
/** number of buf_block_t::index pointers to this index */
Atomic_counter<size_t> ref_count{0};
# ifdef UNIV_SEARCH_PERF_STAT
/** number of successful hash searches */
size_t n_hash_succ{0};
/** number of failed hash searches */
size_t n_hash_fail{0};
/** number of searches */
size_t n_searches{0};
# endif /* UNIV_SEARCH_PERF_STAT */
# endif /* BTR_CUR_HASH_ADAPT */
} search_info;
#endif /* BTR_CUR_ADAPT */
row_log_t* online_log;
/*!< the log of modifications
@ -1367,8 +1418,8 @@ public:
/** Clone this index for lazy dropping of the adaptive hash index.
@return this or a clone */
dict_index_t* clone_if_needed();
/** @return number of leaf pages pointed to by the adaptive hash index */
inline ulint n_ahi_pages() const;
/** @return whether any leaf pages may be in the adaptive hash index */
bool any_ahi_pages() const noexcept { return search_info.ref_count; }
/** @return whether mark_freed() had been invoked */
bool freed() const { return UNIV_UNLIKELY(page == 1); }
/** Note that the index is waiting for btr_search_lazy_free() */
@ -2546,7 +2597,7 @@ public:
bool is_stats_table() const;
/** @return number of unique columns in FTS_DOC_ID index */
unsigned fts_n_uniq() const { return versioned() ? 2 : 1; }
uint16_t fts_n_uniq() const { return versioned() ? 2 : 1; }
/** @return the index for that starts with a specific column */
dict_index_t *get_index(const dict_col_t &col) const;

View file

@ -1,60 +0,0 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2018, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*****************************************************************************/
/**************************************************//**
@file include/ha0ha.h
The hash table interface for the adaptive hash index
Created 8/18/1994 Heikki Tuuri
*******************************************************/
#ifndef ha0ha_h
#define ha0ha_h
#include "hash0hash.h"
#include "page0types.h"
#include "buf0types.h"
#include "rem0types.h"
#ifdef BTR_CUR_HASH_ADAPT
/*************************************************************//**
Looks for an element in a hash table.
@return pointer to the data of the first hash table node in chain
having the fold number, NULL if not found */
UNIV_INLINE
const rec_t*
ha_search_and_get_data(
/*===================*/
hash_table_t* table, /*!< in: hash table */
ulint fold); /*!< in: folded value of the searched data */
/** The hash table external chain node */
struct ha_node_t {
ulint fold; /*!< fold value for the data */
ha_node_t* next; /*!< next chain node or NULL if none */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block; /*!< buffer block containing the data, or NULL */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const rec_t* data; /*!< pointer to the data */
};
#include "ha0ha.inl"
#endif /* BTR_CUR_HASH_ADAPT */
#endif

View file

@ -1,154 +0,0 @@
/*****************************************************************************
Copyright (c) 1994, 2015, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2018, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*****************************************************************************/
/********************************************************************//**
@file include/ha0ha.ic
The hash table interface for the adaptive hash index
Created 8/18/1994 Heikki Tuuri
*************************************************************************/
#ifdef BTR_CUR_HASH_ADAPT
#include "btr0types.h"
/******************************************************************//**
Gets a hash node data.
@return pointer to the data */
UNIV_INLINE
const rec_t*
ha_node_get_data(
/*=============*/
const ha_node_t* node) /*!< in: hash chain node */
{
return(node->data);
}
/******************************************************************//**
Sets hash node data. */
UNIV_INLINE
void
ha_node_set_data_func(
/*==================*/
ha_node_t* node, /*!< in: hash chain node */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block, /*!< in: buffer block containing the data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const rec_t* data) /*!< in: pointer to the data */
{
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
node->block = block;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
node->data = data;
}
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Sets hash node data.
@param n in: hash chain node
@param b in: buffer block containing the data
@param d in: pointer to the data */
# define ha_node_set_data(n,b,d) ha_node_set_data_func(n,b,d)
#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/** Sets hash node data.
@param n in: hash chain node
@param b in: buffer block containing the data
@param d in: pointer to the data */
# define ha_node_set_data(n,b,d) ha_node_set_data_func(n,d)
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/******************************************************************//**
Gets the next node in a hash chain.
@return next node, NULL if none */
UNIV_INLINE
ha_node_t*
ha_chain_get_next(
/*==============*/
const ha_node_t* node) /*!< in: hash chain node */
{
return(node->next);
}
/******************************************************************//**
Gets the first node in a hash chain.
@return first node, NULL if none */
UNIV_INLINE
ha_node_t*
ha_chain_get_first(
/*===============*/
hash_table_t* table, /*!< in: hash table */
ulint fold) /*!< in: fold value determining the chain */
{
return static_cast<ha_node_t*>(table->array[table->calc_hash(fold)].node);
}
/*************************************************************//**
Looks for an element in a hash table.
@return pointer to the data of the first hash table node in chain
having the fold number, NULL if not found */
UNIV_INLINE
const rec_t*
ha_search_and_get_data(
/*===================*/
hash_table_t* table, /*!< in: hash table */
ulint fold) /*!< in: folded value of the searched data */
{
ut_ad(btr_search_enabled);
for (const ha_node_t* node = ha_chain_get_first(table, fold);
node != NULL;
node = ha_chain_get_next(node)) {
if (node->fold == fold) {
return(node->data);
}
}
return(NULL);
}
/*********************************************************//**
Looks for an element when we know the pointer to the data.
@return pointer to the hash table node, NULL if not found in the table */
UNIV_INLINE
ha_node_t*
ha_search_with_data(
/*================*/
hash_table_t* table, /*!< in: hash table */
ulint fold, /*!< in: folded value of the searched data */
const rec_t* data) /*!< in: pointer to the data */
{
ha_node_t* node;
ut_ad(btr_search_enabled);
node = ha_chain_get_first(table, fold);
while (node) {
if (node->data == data) {
return(node);
}
node = ha_chain_get_next(node);
}
return(NULL);
}
#endif /* BTR_CUR_HASH_ADAPT */

View file

@ -28,8 +28,6 @@ Created 6/9/1994 Heikki Tuuri
#define mem0mem_h
#include "ut0mem.h"
#include "ut0rnd.h"
#include "mach0data.h"
#include <memory>
@ -42,22 +40,14 @@ typedef struct mem_block_info_t mem_block_t;
/** A memory heap is a nonempty linear list of memory blocks */
typedef mem_block_t mem_heap_t;
struct buf_block_t;
/** Types of allocation for memory heaps: DYNAMIC means allocation from the
dynamic memory pool of the C compiler, BUFFER means allocation from the
buffer pool; the latter method is used for very big heaps */
#define MEM_HEAP_DYNAMIC 0 /* the most common type */
#define MEM_HEAP_BUFFER 1
#define MEM_HEAP_BTR_SEARCH 2 /* this flag can optionally be
ORed to MEM_HEAP_BUFFER, in which
case heap->free_block is used in
some cases for memory allocations,
and if it's NULL, the memory
allocation functions can return
NULL. */
/** Different type of heaps in terms of which datastructure is using them */
#define MEM_HEAP_FOR_BTR_SEARCH (MEM_HEAP_BTR_SEARCH | MEM_HEAP_BUFFER)
#define MEM_HEAP_FOR_LOCK_HEAP (MEM_HEAP_BUFFER)
/** The following start size is used for the first block in the memory heap if
@ -110,8 +100,7 @@ A single user buffer of 'size' will fit in the block.
@param[in] file_name File name where created
@param[in] line Line where created
@param[in] type Heap type
@return own: memory heap, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return own: memory heap */
UNIV_INLINE
mem_heap_t*
mem_heap_create_func(
@ -145,8 +134,7 @@ mem_heap_zalloc(
@param[in] heap memory heap
@param[in] n number of bytes; if the heap is allowed to grow into
the buffer pool, this must be <= MEM_MAX_ALLOC_IN_BUF
@return allocated storage, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return allocated storage */
UNIV_INLINE
void*
mem_heap_alloc(
@ -180,26 +168,6 @@ void
mem_heap_empty(
mem_heap_t* heap);
/** Returns a pointer to the topmost element in a memory heap.
The size of the element must be given.
@param[in] heap memory heap
@param[in] n size of the topmost element
@return pointer to the topmost element */
UNIV_INLINE
void*
mem_heap_get_top(
mem_heap_t* heap,
ulint n);
/*****************************************************************//**
Frees the topmost element in a memory heap.
The size of the element must be given. */
UNIV_INLINE
void
mem_heap_free_top(
/*==============*/
mem_heap_t* heap, /*!< in: memory heap */
ulint n); /*!< in: size of the topmost element */
/*****************************************************************//**
Returns the space in bytes occupied by a memory heap. */
UNIV_INLINE
@ -319,19 +287,13 @@ struct mem_block_info_t {
in the heap. This is defined only in the base
node and is set to ULINT_UNDEFINED in others. */
ulint type; /*!< type of heap: MEM_HEAP_DYNAMIC, or
MEM_HEAP_BUF possibly ORed to MEM_HEAP_BTR_SEARCH */
MEM_HEAP_BUFFER */
ulint free; /*!< offset in bytes of the first free position for
user data in the block */
ulint start; /*!< the value of the struct field 'free' at the
creation of the block */
void* free_block;
/* if the MEM_HEAP_BTR_SEARCH bit is set in type,
and this is the heap root, this can contain an
allocated buffer frame, which can be appended as a
free block to the heap, if we need more space;
otherwise, this is NULL */
void* buf_block;
buf_block_t* buf_block;
/* if this block has been allocated from the buffer
pool, this contains the buf_block_t handle;
otherwise, this is NULL */

View file

@ -39,8 +39,7 @@ Created 6/8/1994 Heikki Tuuri
#endif /* UNIV_DEBUG */
/***************************************************************//**
Creates a memory heap block where data can be allocated.
@return own: memory heap block, NULL if did not succeed (only possible
for MEM_HEAP_BTR_SEARCH type heaps) */
@return own: memory heap block */
mem_block_t*
mem_heap_create_block_func(
/*=======================*/
@ -62,19 +61,11 @@ mem_heap_block_free(
mem_heap_t* heap, /*!< in: heap */
mem_block_t* block); /*!< in: block to free */
/******************************************************************//**
Frees the free_block field from a memory heap. */
void
mem_heap_free_block_free(
/*=====================*/
mem_heap_t* heap); /*!< in: heap */
/***************************************************************//**
Adds a new block to a memory heap.
@param[in] heap memory heap
@param[in] n number of bytes needed
@return created block, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return created block */
mem_block_t*
mem_heap_add_block(
mem_heap_t* heap,
@ -100,9 +91,7 @@ UNIV_INLINE
void
mem_block_set_type(mem_block_t* block, ulint type)
{
ut_ad((type == MEM_HEAP_DYNAMIC) || (type == MEM_HEAP_BUFFER)
|| (type == MEM_HEAP_BUFFER + MEM_HEAP_BTR_SEARCH));
ut_ad(type == MEM_HEAP_DYNAMIC || type == MEM_HEAP_BUFFER);
block->type = type;
}
@ -157,8 +146,6 @@ mem_heap_zalloc(
mem_heap_t* heap,
ulint n)
{
ut_ad(heap);
ut_ad(!(heap->type & MEM_HEAP_BTR_SEARCH));
return(memset(mem_heap_alloc(heap, n), 0, n));
}
@ -166,8 +153,7 @@ mem_heap_zalloc(
@param[in] heap memory heap
@param[in] n number of bytes; if the heap is allowed to grow into
the buffer pool, this must be <= MEM_MAX_ALLOC_IN_BUF
@return allocated storage, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return allocated storage */
UNIV_INLINE
void*
mem_heap_alloc(
@ -290,62 +276,6 @@ mem_heap_empty(
mem_heap_t* heap)
{
mem_heap_free_heap_top(heap, (byte*) heap + mem_block_get_start(heap));
if (heap->free_block) {
mem_heap_free_block_free(heap);
}
}
/** Returns a pointer to the topmost element in a memory heap.
The size of the element must be given.
@param[in] heap memory heap
@param[in] n size of the topmost element
@return pointer to the topmost element */
UNIV_INLINE
void*
mem_heap_get_top(
mem_heap_t* heap,
ulint n)
{
mem_block_t* block;
byte* buf;
block = UT_LIST_GET_LAST(heap->base);
buf = (byte*) block + mem_block_get_free(block) - MEM_SPACE_NEEDED(n);
return((void*) buf);
}
/*****************************************************************//**
Frees the topmost element in a memory heap. The size of the element must be
given. */
UNIV_INLINE
void
mem_heap_free_top(
/*==============*/
mem_heap_t* heap, /*!< in: memory heap */
ulint n) /*!< in: size of the topmost element */
{
mem_block_t* block;
n += REDZONE_SIZE;
block = UT_LIST_GET_LAST(heap->base);
/* Subtract the free field of block */
mem_block_set_free(block, mem_block_get_free(block)
- MEM_SPACE_NEEDED(n));
/* If free == start, we may free the block if it is not the first
one */
if ((heap != block) && (mem_block_get_free(block)
== mem_block_get_start(block))) {
mem_heap_block_free(heap, block);
} else {
MEM_NOACCESS((byte*) block + mem_block_get_free(block), n);
}
}
/** Creates a memory heap.
@ -356,8 +286,7 @@ A single user buffer of 'size' will fit in the block.
@param[in] file_name File name where created
@param[in] line Line where created
@param[in] type Heap type
@return own: memory heap, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return own: memory heap */
UNIV_INLINE
mem_heap_t*
mem_heap_create_func(
@ -406,10 +335,6 @@ mem_heap_free(
block = UT_LIST_GET_LAST(heap->base);
if (heap->free_block) {
mem_heap_free_block_free(heap);
}
while (block != NULL) {
/* Store the contents of info before freeing current block
(it is erased in freeing) */
@ -430,13 +355,7 @@ mem_heap_get_size(
/*==============*/
mem_heap_t* heap) /*!< in: heap */
{
ulint size = heap->total_size;
if (heap->free_block) {
size += srv_page_size;
}
return(size);
return heap->total_size;
}
/**********************************************************************//**

View file

@ -231,38 +231,34 @@ page_cur_search_with_match(
page_cur_mode_t mode, /*!< in: PAGE_CUR_L,
PAGE_CUR_LE, PAGE_CUR_G, or
PAGE_CUR_GE */
ulint* iup_matched_fields,
uint16_t* iup_matched_fields,
/*!< in/out: already matched
fields in upper limit record */
ulint* ilow_matched_fields,
uint16_t* ilow_matched_fields,
/*!< in/out: already matched
fields in lower limit record */
page_cur_t* cursor, /*!< in/out: page cursor */
rtr_info_t* rtr_info);/*!< in/out: rtree search stack */
#ifdef BTR_CUR_HASH_ADAPT
MY_ATTRIBUTE((warn_unused_result))
/** Search the right position for a page cursor.
@param[in] tuple key to be searched for
@param[in] mode search mode
@param[in,out] iup_matched_fields already matched fields in the
upper limit record
@param[in,out] iup_matched_bytes already matched bytes in the
first partially matched field in the upper limit record
@param[in,out] ilow_matched_fields already matched fields in the
lower limit record
@param[in,out] ilow_matched_bytes already matched bytes in the
first partially matched field in the lower limit record
@param[in,out] cursor page cursor */
bool
page_cur_search_with_match_bytes(
const dtuple_t* tuple,
page_cur_mode_t mode,
ulint* iup_matched_fields,
ulint* iup_matched_bytes,
ulint* ilow_matched_fields,
ulint* ilow_matched_bytes,
page_cur_t* cursor);
#endif /* BTR_CUR_HASH_ADAPT */
@param tuple search key
@param mode search mode
@param iup_fields matched fields in the upper limit record
@param ilow_fields matched fields in the low limit record
@param cursor page cursor
@param iup_bytes matched bytes after iup_fields
@param ilow_bytes matched bytes after ilow_fields
@return whether the first partially matched field in the lower limit record,
or the page is corrupted */
bool page_cur_search_with_match_bytes(const dtuple_t &tuple,
page_cur_mode_t mode,
uint16_t *iup_fields,
uint16_t *ilow_fields,
page_cur_t *cursor,
uint16_t *iup_bytes,
uint16_t *ilow_bytes)
noexcept;
/***********************************************************//**
Positions a page cursor on a randomly chosen user record on a page. If there
are no user records, sets the cursor on the infimum record. */

View file

@ -72,11 +72,6 @@ enum page_cur_mode_t {
PAGE_CUR_L = 3,
PAGE_CUR_LE = 4,
/* PAGE_CUR_LE_OR_EXTENDS = 5,*/ /* This is a search mode used in
"column LIKE 'abc%' ORDER BY column DESC";
we have to find strings which are <= 'abc' or
which extend it */
/* These search mode is for search R-tree index. */
PAGE_CUR_CONTAIN = 7,
PAGE_CUR_INTERSECT = 8,

View file

@ -55,6 +55,7 @@ cmp_cols_are_equal(
@retval positive if data1 is greater than data2 */
int cmp_data(ulint mtype, ulint prtype, bool descending,
const byte *data1, size_t len1, const byte *data2, size_t len2)
noexcept
MY_ATTRIBUTE((warn_unused_result));
/** Compare two data fields.
@ -148,33 +149,12 @@ inline int cmp_geometry_field(const void *a, const void *b)
int cmp_dtuple_rec_with_match_low(const dtuple_t *dtuple, const rec_t *rec,
const dict_index_t *index,
const rec_offs *offsets,
ulint n_cmp, ulint *matched_fields)
ulint n_cmp, uint16_t *matched_fields)
MY_ATTRIBUTE((nonnull));
#define cmp_dtuple_rec_with_match(tuple,rec,index,offsets,fields) \
cmp_dtuple_rec_with_match_low( \
tuple,rec,index,offsets,dtuple_get_n_fields_cmp(tuple),fields)
/** Compare a data tuple to a physical record.
@param[in] dtuple data tuple
@param[in] rec B-tree or R-tree index record
@param[in] index index tree
@param[in] offsets rec_get_offsets(rec)
@param[in,out] matched_fields number of completely matched fields
@param[in,out] matched_bytes number of matched bytes in the first
field that is not matched
@return the comparison result of dtuple and rec
@retval 0 if dtuple is equal to rec
@retval negative if dtuple is less than rec
@retval positive if dtuple is greater than rec */
int
cmp_dtuple_rec_with_match_bytes(
const dtuple_t* dtuple,
const rec_t* rec,
const dict_index_t* index,
const rec_offs* offsets,
ulint* matched_fields,
ulint* matched_bytes)
MY_ATTRIBUTE((warn_unused_result));
/** Compare a data tuple to a physical record.
@see cmp_dtuple_rec_with_match
@param dtuple data tuple
@param rec index record
@ -187,7 +167,7 @@ cmp_dtuple_rec_with_match_bytes(
inline int cmp_dtuple_rec(const dtuple_t *dtuple, const rec_t *rec,
const dict_index_t *index, const rec_offs *offsets)
{
ulint matched= 0;
uint16_t matched= 0;
return cmp_dtuple_rec_with_match(dtuple, rec, index, offsets, &matched);
}

View file

@ -178,7 +178,7 @@ The following function is used to get the number of fields
in an old-style record.
@return number of data fields */
UNIV_INLINE
ulint
uint16_t
rec_get_n_fields_old(
/*=================*/
const rec_t* rec) /*!< in: physical record */
@ -353,7 +353,7 @@ The following function is used to get the order number
of an old-style record in the heap of the index page.
@return heap order number */
UNIV_INLINE
ulint
uint16_t
rec_get_heap_no_old(
/*================*/
const rec_t* rec) /*!< in: physical record */
@ -363,7 +363,7 @@ The following function is used to get the order number
of a new-style record in the heap of the index page.
@return heap order number */
UNIV_INLINE
ulint
uint16_t
rec_get_heap_no_new(
/*================*/
const rec_t* rec) /*!< in: physical record */

View file

@ -164,7 +164,7 @@ rec_set_bit_field_1(
/******************************************************//**
Gets a bit field from within 2 bytes. */
UNIV_INLINE
ulint
uint16_t
rec_get_bit_field_2(
/*================*/
const rec_t* rec, /*!< in: pointer to record origin */
@ -174,7 +174,7 @@ rec_get_bit_field_2(
{
ut_ad(rec);
return((mach_read_from_2(rec - offs) & mask) >> shift);
return uint16_t((mach_read_from_2(rec - offs) & mask) >> shift);
}
/******************************************************//**
@ -307,18 +307,14 @@ The following function is used to get the number of fields
in an old-style record.
@return number of data fields */
UNIV_INLINE
ulint
uint16_t
rec_get_n_fields_old(
/*=================*/
const rec_t* rec) /*!< in: physical record */
{
ulint ret;
ut_ad(rec);
ret = rec_get_bit_field_2(rec, REC_OLD_N_FIELDS,
REC_OLD_N_FIELDS_MASK,
REC_OLD_N_FIELDS_SHIFT);
uint16_t ret = rec_get_bit_field_2(rec, REC_OLD_N_FIELDS,
REC_OLD_N_FIELDS_MASK,
REC_OLD_N_FIELDS_SHIFT);
ut_ad(ret <= REC_MAX_N_FIELDS);
ut_ad(ret > 0);
@ -397,7 +393,7 @@ rec_n_fields_is_sane(
/* a record for older SYS_INDEXES table
(missing merge_threshold column) is acceptable. */
|| (index->table->id == DICT_INDEXES_ID
&& n_fields == dtuple_get_n_fields(entry) - 1));
&& n_fields + 1 == dtuple_get_n_fields(entry)));
}
/******************************************************//**
@ -518,7 +514,7 @@ The following function is used to get the order number
of an old-style record in the heap of the index page.
@return heap order number */
UNIV_INLINE
ulint
uint16_t
rec_get_heap_no_old(
/*================*/
const rec_t* rec) /*!< in: physical record */
@ -532,7 +528,7 @@ The following function is used to get the order number
of a new-style record in the heap of the index page.
@return heap order number */
UNIV_INLINE
ulint
uint16_t
rec_get_heap_no_new(
/*================*/
const rec_t* rec) /*!< in: physical record */

View file

@ -53,49 +53,6 @@ Created 1/20/1994 Heikki Tuuri
#define ut_max std::max
#define ut_min std::min
/** Calculate the minimum of two pairs.
@param[out] min_hi MSB of the minimum pair
@param[out] min_lo LSB of the minimum pair
@param[in] a_hi MSB of the first pair
@param[in] a_lo LSB of the first pair
@param[in] b_hi MSB of the second pair
@param[in] b_lo LSB of the second pair */
UNIV_INLINE
void
ut_pair_min(
ulint* min_hi,
ulint* min_lo,
ulint a_hi,
ulint a_lo,
ulint b_hi,
ulint b_lo);
/******************************************************//**
Compares two ulints.
@return 1 if a > b, 0 if a == b, -1 if a < b */
UNIV_INLINE
int
ut_ulint_cmp(
/*=========*/
ulint a, /*!< in: ulint */
ulint b); /*!< in: ulint */
/** Compare two pairs of integers.
@param[in] a_h more significant part of first pair
@param[in] a_l less significant part of first pair
@param[in] b_h more significant part of second pair
@param[in] b_l less significant part of second pair
@return comparison result of (a_h,a_l) and (b_h,b_l)
@retval -1 if (a_h,a_l) is less than (b_h,b_l)
@retval 0 if (a_h,a_l) is equal to (b_h,b_l)
@retval 1 if (a_h,a_l) is greater than (b_h,b_l) */
UNIV_INLINE
int
ut_pair_cmp(
ulint a_h,
ulint a_l,
ulint b_h,
ulint b_l)
MY_ATTRIBUTE((warn_unused_result));
/*************************************************************//**
Calculates fast the remainder of n/m when m is a power of two.
@param n in: numerator
@ -119,24 +76,6 @@ when m is a power of two. In other words, rounds n up to m * k.
template <typename T> inline T ut_calc_align(T n, T m)
{ return static_cast<T>(UT_CALC_ALIGN(n, m)); }
/*************************************************************//**
Calculates fast the 2-logarithm of a number, rounded upward to an
integer.
@return logarithm in the base 2, rounded upward */
UNIV_INLINE
ulint
ut_2_log(
/*=====*/
ulint n); /*!< in: number */
/*************************************************************//**
Calculates 2 to power n.
@return 2 to power n */
UNIV_INLINE
ulint
ut_2_exp(
/*=====*/
ulint n); /*!< in: number */
/**********************************************************//**
Returns the number of milliseconds since some epoch. The
value may wrap around. It should only be used for heuristic
@ -424,7 +363,5 @@ private:
} // namespace ib
#include "ut0ut.inl"
#endif

View file

@ -1,143 +0,0 @@
/*****************************************************************************
Copyright (c) 1994, 2015, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*****************************************************************************/
/**************************************************************//**
@file include/ut0ut.ic
Various utilities
Created 5/30/1994 Heikki Tuuri
*******************************************************************/
#include <algorithm>
/** Calculate the minimum of two pairs.
@param[out] min_hi MSB of the minimum pair
@param[out] min_lo LSB of the minimum pair
@param[in] a_hi MSB of the first pair
@param[in] a_lo LSB of the first pair
@param[in] b_hi MSB of the second pair
@param[in] b_lo LSB of the second pair */
UNIV_INLINE
void
ut_pair_min(
ulint* min_hi,
ulint* min_lo,
ulint a_hi,
ulint a_lo,
ulint b_hi,
ulint b_lo)
{
if (a_hi == b_hi) {
*min_hi = a_hi;
*min_lo = std::min(a_lo, b_lo);
} else if (a_hi < b_hi) {
*min_hi = a_hi;
*min_lo = a_lo;
} else {
*min_hi = b_hi;
*min_lo = b_lo;
}
}
/******************************************************//**
Compares two ulints.
@return 1 if a > b, 0 if a == b, -1 if a < b */
UNIV_INLINE
int
ut_ulint_cmp(
/*=========*/
ulint a, /*!< in: ulint */
ulint b) /*!< in: ulint */
{
if (a < b) {
return(-1);
} else if (a == b) {
return(0);
} else {
return(1);
}
}
/** Compare two pairs of integers.
@param[in] a_h more significant part of first pair
@param[in] a_l less significant part of first pair
@param[in] b_h more significant part of second pair
@param[in] b_l less significant part of second pair
@return comparison result of (a_h,a_l) and (b_h,b_l)
@retval -1 if (a_h,a_l) is less than (b_h,b_l)
@retval 0 if (a_h,a_l) is equal to (b_h,b_l)
@retval 1 if (a_h,a_l) is greater than (b_h,b_l) */
UNIV_INLINE
int
ut_pair_cmp(
ulint a_h,
ulint a_l,
ulint b_h,
ulint b_l)
{
if (a_h < b_h) {
return(-1);
}
if (a_h > b_h) {
return(1);
}
return(ut_ulint_cmp(a_l, b_l));
}
/*************************************************************//**
Calculates fast the 2-logarithm of a number, rounded upward to an
integer.
@return logarithm in the base 2, rounded upward */
UNIV_INLINE
ulint
ut_2_log(
/*=====*/
ulint n) /*!< in: number != 0 */
{
ulint res;
res = 0;
ut_ad(n > 0);
n = n - 1;
for (;;) {
n = n / 2;
if (n == 0) {
break;
}
res++;
}
return(res + 1);
}
/*************************************************************//**
Calculates 2 to power n.
@return 2 to power n */
UNIV_INLINE
ulint
ut_2_exp(
/*=====*/
ulint n) /*!< in: number */
{
return((ulint) 1 << n);
}

View file

@ -68,6 +68,7 @@ Note that if write operation is very fast, a) or b) can be fine as alternative.
#include <sys/syscall.h>
#endif
#include <algorithm>
#include <atomic>
#include <thread>
#include <mutex>

View file

@ -215,7 +215,6 @@ mem_heap_validate(
case MEM_HEAP_DYNAMIC:
break;
case MEM_HEAP_BUFFER:
case MEM_HEAP_BUFFER | MEM_HEAP_BTR_SEARCH:
ut_ad(block->len <= srv_page_size);
break;
default:
@ -242,8 +241,7 @@ static void ut_strlcpy_rev(char* dst, const char* src, ulint size)
/***************************************************************//**
Creates a memory heap block where data can be allocated.
@return own: memory heap block, NULL if did not succeed (only possible
for MEM_HEAP_BTR_SEARCH type heaps) */
@return own: memory heap block */
mem_block_t*
mem_heap_create_block_func(
/*=======================*/
@ -257,12 +255,11 @@ mem_heap_create_block_func(
ulint type) /*!< in: type of heap: MEM_HEAP_DYNAMIC or
MEM_HEAP_BUFFER */
{
buf_block_t* buf_block = NULL;
buf_block_t* buf_block;
mem_block_t* block;
ulint len;
ut_ad((type == MEM_HEAP_DYNAMIC) || (type == MEM_HEAP_BUFFER)
|| (type == MEM_HEAP_BUFFER + MEM_HEAP_BTR_SEARCH));
ut_ad(type == MEM_HEAP_DYNAMIC || type == MEM_HEAP_BUFFER);
if (heap != NULL) {
ut_d(mem_heap_validate(heap));
@ -276,24 +273,11 @@ mem_heap_create_block_func(
ut_ad(type == MEM_HEAP_DYNAMIC || n <= MEM_MAX_ALLOC_IN_BUF);
block = static_cast<mem_block_t*>(ut_malloc_nokey(len));
buf_block = nullptr;
} else {
len = srv_page_size;
if ((type & MEM_HEAP_BTR_SEARCH) && heap) {
/* We cannot allocate the block from the
buffer pool, but must get the free block from
the heap header free block field */
buf_block = static_cast<buf_block_t*>(heap->free_block);
heap->free_block = NULL;
if (UNIV_UNLIKELY(!buf_block)) {
return(NULL);
}
} else {
buf_block = buf_block_alloc();
}
buf_block = buf_block_alloc();
block = (mem_block_t*) buf_block->page.frame;
}
@ -304,7 +288,6 @@ mem_heap_create_block_func(
}
block->buf_block = buf_block;
block->free_block = NULL;
ut_d(ut_strlcpy_rev(block->file_name, file_name,
sizeof(block->file_name)));
@ -340,8 +323,7 @@ mem_heap_create_block_func(
/***************************************************************//**
Adds a new block to a memory heap.
@return created block, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return created block */
mem_block_t*
mem_heap_add_block(
/*===============*/
@ -400,9 +382,6 @@ mem_heap_block_free(
{
ulint type;
ulint len;
buf_block_t* buf_block;
buf_block = static_cast<buf_block_t*>(block->buf_block);
UT_LIST_REMOVE(heap->base, block);
@ -413,25 +392,10 @@ mem_heap_block_free(
len = block->len;
if (type == MEM_HEAP_DYNAMIC || len < srv_page_size / 2) {
ut_ad(!buf_block);
ut_ad(!block->buf_block);
ut_free(block);
} else {
ut_ad(type & MEM_HEAP_BUFFER);
buf_block_free(buf_block);
}
}
/******************************************************************//**
Frees the free_block field from a memory heap. */
void
mem_heap_free_block_free(
/*=====================*/
mem_heap_t* heap) /*!< in: heap */
{
if (UNIV_LIKELY_NULL(heap->free_block)) {
buf_block_free(static_cast<buf_block_t*>(heap->free_block));
heap->free_block = NULL;
buf_block_free(block->buf_block);
}
}

View file

@ -31,9 +31,8 @@ Created 11/26/1995 Heikki Tuuri
#include "log0crypt.h"
#ifdef BTR_CUR_HASH_ADAPT
# include "btr0sea.h"
#else
# include "btr0cur.h"
#endif
#include "btr0cur.h"
#include "srv0start.h"
#include "log.h"
#include "mariadb_stats.h"
@ -1450,7 +1449,8 @@ void mtr_t::page_lock_upgrade(const buf_block_t &block)
(MTR_MEMO_PAGE_SX_FIX | MTR_MEMO_PAGE_X_FIX));
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!block.index || !block.index->freed());
ut_d(if (dict_index_t *index= block.index))
ut_ad(!index->freed());
#endif /* BTR_CUR_HASH_ADAPT */
}

File diff suppressed because it is too large Load diff

View file

@ -203,6 +203,7 @@ cmp_decimal(const byte* a, ulint a_length, const byte* b, ulint b_length)
@retval positive if data1 is greater than data2 */
int cmp_data(ulint mtype, ulint prtype, bool descending,
const byte *data1, size_t len1, const byte *data2, size_t len2)
noexcept
{
ut_ad(len1 != UNIV_SQL_DEFAULT);
ut_ad(len2 != UNIV_SQL_DEFAULT);
@ -341,15 +342,14 @@ int cmp_data(ulint mtype, ulint prtype, bool descending,
int cmp_dtuple_rec_with_match_low(const dtuple_t *dtuple, const rec_t *rec,
const dict_index_t *index,
const rec_offs *offsets,
ulint n_cmp, ulint *matched_fields)
ulint n_cmp, uint16_t *matched_fields)
{
ulint cur_field; /* current field number */
int ret = 0; /* return value */
ut_ad(dtuple_check_typed(dtuple));
ut_ad(rec_offs_validate(rec, index, offsets));
cur_field = *matched_fields;
auto cur_field = *matched_fields;
ut_ad(n_cmp > 0);
ut_ad(n_cmp <= dtuple_get_n_fields(dtuple));
@ -363,17 +363,16 @@ int cmp_dtuple_rec_with_match_low(const dtuple_t *dtuple, const rec_t *rec,
/* The "infimum node pointer" is always first. */
if (UNIV_UNLIKELY(rec_info & REC_INFO_MIN_REC_FLAG)) {
ret = !(tup_info & REC_INFO_MIN_REC_FLAG);
goto order_resolved;
return !(tup_info & REC_INFO_MIN_REC_FLAG);
} else if (UNIV_UNLIKELY(tup_info & REC_INFO_MIN_REC_FLAG)) {
ret = -1;
goto order_resolved;
return -1;
}
}
/* Match fields in a loop */
for (; cur_field < n_cmp; cur_field++) {
for (const bool may_descend{!index->is_ibuf()};
cur_field < n_cmp; cur_field++) {
const byte* rec_b_ptr;
const dfield_t* dtuple_field
= dtuple_get_nth_field(dtuple, cur_field);
@ -402,229 +401,17 @@ int cmp_dtuple_rec_with_match_low(const dtuple_t *dtuple, const rec_t *rec,
ut_ad(!dfield_is_ext(dtuple_field));
ret = cmp_data(type->mtype, type->prtype, !index->is_ibuf()
ret = cmp_data(type->mtype, type->prtype, may_descend
&& index->fields[cur_field].descending,
dtuple_b_ptr, dtuple_f_len,
rec_b_ptr, rec_f_len);
if (ret) {
goto order_resolved;
}
}
order_resolved:
*matched_fields = cur_field;
return(ret);
}
/** Get the pad character code point for a type.
@param[in] type
@return pad character code point
@retval ULINT_UNDEFINED if no padding is specified */
UNIV_INLINE
ulint
cmp_get_pad_char(
const dtype_t* type)
{
switch (type->mtype) {
case DATA_FIXBINARY:
case DATA_BINARY:
if (dtype_get_charset_coll(type->prtype)
== DATA_MYSQL_BINARY_CHARSET_COLL) {
/* Starting from 5.0.18, do not pad
VARBINARY or BINARY columns. */
return(ULINT_UNDEFINED);
}
/* Fall through */
case DATA_CHAR:
case DATA_VARCHAR:
case DATA_MYSQL:
case DATA_VARMYSQL:
/* Space is the padding character for all char and binary
strings, and starting from 5.0.3, also for TEXT strings. */
return(0x20);
case DATA_GEOMETRY:
/* DATA_GEOMETRY is binary data, not ASCII-based. */
return(ULINT_UNDEFINED);
case DATA_BLOB:
if (!(type->prtype & DATA_BINARY_TYPE)) {
return(0x20);
}
/* Fall through */
default:
/* No padding specified */
return(ULINT_UNDEFINED);
}
}
/** Compare a data tuple to a physical record.
@param[in] dtuple data tuple
@param[in] rec B-tree or R-tree index record
@param[in] index index tree
@param[in] offsets rec_get_offsets(rec)
@param[in,out] matched_fields number of completely matched fields
@param[in,out] matched_bytes number of matched bytes in the first
field that is not matched
@return the comparison result of dtuple and rec
@retval 0 if dtuple is equal to rec
@retval negative if dtuple is less than rec
@retval positive if dtuple is greater than rec */
int
cmp_dtuple_rec_with_match_bytes(
const dtuple_t* dtuple,
const rec_t* rec,
const dict_index_t* index,
const rec_offs* offsets,
ulint* matched_fields,
ulint* matched_bytes)
{
ut_ad(dtuple_check_typed(dtuple));
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!(REC_INFO_MIN_REC_FLAG
& dtuple_get_info_bits(dtuple)));
ut_ad(!index->is_ibuf());
if (UNIV_UNLIKELY(REC_INFO_MIN_REC_FLAG
& rec_get_info_bits(rec, rec_offs_comp(offsets)))) {
ut_ad(page_rec_is_first(rec, page_align(rec)));
ut_ad(!page_has_prev(page_align(rec)));
ut_ad(rec_is_metadata(rec, *index));
return 1;
}
ulint cur_field = *matched_fields;
ulint cur_bytes = *matched_bytes;
ulint n_cmp = dtuple_get_n_fields_cmp(dtuple);
int ret = 0;
ut_ad(n_cmp <= dtuple_get_n_fields(dtuple));
ut_ad(cur_field <= n_cmp);
ut_ad(cur_field + (cur_bytes > 0) <= rec_offs_n_fields(offsets));
/* Match fields in a loop; stop if we run out of fields in dtuple
or find an externally stored field */
while (cur_field < n_cmp) {
const dfield_t* dfield = dtuple_get_nth_field(
dtuple, cur_field);
const dtype_t* type = dfield_get_type(dfield);
ulint dtuple_f_len = dfield_get_len(dfield);
const byte* dtuple_b_ptr;
const byte* rec_b_ptr;
ulint rec_f_len;
dtuple_b_ptr = static_cast<const byte*>(
dfield_get_data(dfield));
ut_ad(!rec_offs_nth_default(offsets, cur_field));
rec_b_ptr = rec_get_nth_field(rec, offsets,
cur_field, &rec_f_len);
ut_ad(!rec_offs_nth_extern(offsets, cur_field));
/* If we have matched yet 0 bytes, it may be that one or
both the fields are SQL null, or the record or dtuple may be
the predefined minimum record. */
if (cur_bytes == 0) {
if (dtuple_f_len == UNIV_SQL_NULL) {
if (rec_f_len == UNIV_SQL_NULL) {
goto next_field;
}
ret = -1;
goto order_resolved;
} else if (rec_f_len == UNIV_SQL_NULL) {
/* We define the SQL null to be the
smallest possible value of a field
in the alphabetical order */
ret = 1;
goto order_resolved;
}
}
switch (type->mtype) {
case DATA_FIXBINARY:
case DATA_BINARY:
case DATA_INT:
case DATA_SYS_CHILD:
case DATA_SYS:
break;
case DATA_BLOB:
if (type->prtype & DATA_BINARY_TYPE) {
break;
}
/* fall through */
default:
ret = cmp_data(type->mtype, type->prtype, false,
dtuple_b_ptr, dtuple_f_len,
rec_b_ptr, rec_f_len);
if (!ret) {
goto next_field;
}
cur_bytes = 0;
goto order_resolved;
}
/* Set the pointers at the current byte */
rec_b_ptr += cur_bytes;
dtuple_b_ptr += cur_bytes;
/* Compare then the fields */
for (const ulint pad = cmp_get_pad_char(type);;
cur_bytes++) {
ulint rec_byte = pad;
ulint dtuple_byte = pad;
if (rec_f_len <= cur_bytes) {
if (dtuple_f_len <= cur_bytes) {
goto next_field;
}
if (rec_byte == ULINT_UNDEFINED) {
ret = 1;
goto order_resolved;
}
} else {
rec_byte = *rec_b_ptr++;
}
if (dtuple_f_len <= cur_bytes) {
if (dtuple_byte == ULINT_UNDEFINED) {
ret = -1;
goto order_resolved;
}
} else {
dtuple_byte = *dtuple_b_ptr++;
}
if (dtuple_byte < rec_byte) {
ret = -1;
goto order_resolved;
} else if (dtuple_byte > rec_byte) {
ret = 1;
goto order_resolved;
}
}
next_field:
cur_field++;
cur_bytes = 0;
}
ut_ad(cur_bytes == 0);
order_resolved:
*matched_fields = cur_field;
*matched_bytes = cur_bytes;
return !ret || UNIV_LIKELY(!index->fields[cur_field].descending)
? ret : -ret;
return ret;
}
/** Check if a dtuple is a prefix of a record.
@ -637,8 +424,7 @@ bool cmp_dtuple_is_prefix_of_rec(const dtuple_t *dtuple, const rec_t *rec,
const dict_index_t *index,
const rec_offs *offsets)
{
ulint matched_fields= 0;
ulint n_fields= dtuple_get_n_fields(dtuple);
uint16_t matched_fields= 0, n_fields= dtuple_get_n_fields(dtuple);
ut_ad(n_fields <= rec_offs_n_fields(offsets));
cmp_dtuple_rec_with_match(dtuple, rec, index, offsets, &matched_fields);
return matched_fields == n_fields;

View file

@ -3215,10 +3215,6 @@ static void add_fts_index(dict_table_t *table)
for (ulint i= 0; i < clust_index->n_uniq; i++)
dict_index_add_col(fts_index, table, clust_index->fields[i].col,
clust_index->fields[i].prefix_len);
#ifdef BTR_CUR_HASH_ADAPT
fts_index->search_info= btr_search_info_create(fts_index->heap);
fts_index->search_info->ref_count= 0;
#endif /* BTR_CUR_HASH_ADAPT */
UT_LIST_ADD_LAST(fts_index->table->indexes, fts_index);
}
@ -3321,9 +3317,6 @@ static dict_table_t *build_fts_hidden_table(
new_index->fields[old_index->n_fields].fixed_len= sizeof(doc_id_t);
}
#ifdef BTR_CUR_HASH_ADAPT
new_index->search_info= btr_search_info_create(new_index->heap);
#endif /* BTR_CUR_HASH_ADAPT */
UT_LIST_ADD_LAST(new_index->table->indexes, new_index);
old_index= UT_LIST_GET_NEXT(indexes, old_index);
if (UT_LIST_GET_LEN(new_table->indexes)

View file

@ -1980,15 +1980,13 @@ row_ins_dupl_error_with_rec(
dict_index_t* index, /*!< in: index */
const rec_offs* offsets)/*!< in: rec_get_offsets(rec, index) */
{
ulint matched_fields;
ulint n_unique;
ulint i;
ut_ad(rec_offs_validate(rec, index, offsets));
n_unique = dict_index_get_n_unique(index);
const auto n_unique = dict_index_get_n_unique(index);
matched_fields = 0;
uint16_t matched_fields = 0;
cmp_dtuple_rec_with_match(entry, rec, index, offsets, &matched_fields);
@ -2085,7 +2083,6 @@ row_ins_scan_sec_index_for_duplicate(
mem_heap_t* offsets_heap)
/*!< in/out: memory heap that can be emptied */
{
ulint n_unique;
int cmp;
ulint n_fields_cmp;
btr_pcur_t pcur;
@ -2097,7 +2094,7 @@ row_ins_scan_sec_index_for_duplicate(
ut_ad(!index->lock.have_any());
n_unique = dict_index_get_n_unique(index);
const auto n_unique = dict_index_get_n_unique(index);
/* If the secondary index is unique, but one of the fields in the
n_unique first fields is NULL, a unique key violation cannot occur,
@ -2227,7 +2224,7 @@ row_ins_duplicate_online(ulint n_uniq, const dtuple_t *entry,
const rec_t *rec, const dict_index_t *index,
rec_offs *offsets)
{
ulint fields = 0;
uint16_t fields = 0;
/* During rebuild, there should not be any delete-marked rows
in the new table. */
@ -2762,13 +2759,10 @@ avoid_bulk:
#endif /* WITH_WSREP */
#ifdef BTR_CUR_HASH_ADAPT
if (btr_search_enabled) {
btr_search_x_lock_all();
index->table->bulk_trx_id = trx->id;
btr_search_x_unlock_all();
} else {
index->table->bulk_trx_id = trx->id;
}
auto &part = btr_search.get_part(*index);
part.latch.wr_lock(SRW_LOCK_CALL);
index->table->bulk_trx_id = trx->id;
part.latch.wr_unlock();
#else /* BTR_CUR_HASH_ADAPT */
index->table->bulk_trx_id = trx->id;
#endif /* BTR_CUR_HASH_ADAPT */
@ -3289,7 +3283,7 @@ row_ins_clust_index_entry(
: index->table->is_temporary()
? BTR_NO_LOCKING_FLAG : 0;
#endif /* WITH_WSREP */
const ulint orig_n_fields = entry->n_fields;
const auto orig_n_fields = entry->n_fields;
/* For intermediate table during copy alter table,
skip the undo log and record lock checking for

View file

@ -744,7 +744,7 @@ row_log_table_low_redundant(
ulint avail_size;
mem_heap_t* heap = NULL;
dtuple_t* tuple;
const ulint n_fields = rec_get_n_fields_old(rec);
const auto n_fields = rec_get_n_fields_old(rec);
ut_ad(index->n_fields >= n_fields);
ut_ad(index->n_fields == n_fields || index->is_instant());
@ -1701,22 +1701,7 @@ err_exit:
if (error) {
goto err_exit;
}
#ifdef UNIV_DEBUG
switch (btr_pcur_get_btr_cur(pcur)->flag) {
case BTR_CUR_DELETE_REF:
case BTR_CUR_DEL_MARK_IBUF:
case BTR_CUR_DELETE_IBUF:
case BTR_CUR_INSERT_TO_IBUF:
/* We did not request buffering. */
break;
case BTR_CUR_HASH:
case BTR_CUR_HASH_FAIL:
case BTR_CUR_BINARY:
goto flag_ok;
}
ut_ad(0);
flag_ok:
#endif /* UNIV_DEBUG */
ut_ad(pcur->btr_cur.flag == BTR_CUR_BINARY);
if (page_rec_is_infimum(btr_pcur_get_rec(pcur))
|| btr_pcur_get_low_match(pcur) < index->n_uniq) {
@ -1785,22 +1770,8 @@ row_log_table_apply_delete(
if (err != DB_SUCCESS) {
goto all_done;
}
#ifdef UNIV_DEBUG
switch (btr_pcur_get_btr_cur(&pcur)->flag) {
case BTR_CUR_DELETE_REF:
case BTR_CUR_DEL_MARK_IBUF:
case BTR_CUR_DELETE_IBUF:
case BTR_CUR_INSERT_TO_IBUF:
/* We did not request buffering. */
break;
case BTR_CUR_HASH:
case BTR_CUR_HASH_FAIL:
case BTR_CUR_BINARY:
goto flag_ok;
}
ut_ad(0);
flag_ok:
#endif /* UNIV_DEBUG */
ut_ad(btr_pcur_get_btr_cur(&pcur)->flag == BTR_CUR_BINARY);
if (page_rec_is_infimum(btr_pcur_get_rec(&pcur))
|| btr_pcur_get_low_match(&pcur) < index->n_uniq) {
@ -1934,19 +1905,8 @@ func_exit_committed:
return error;
}
#ifdef UNIV_DEBUG
switch (btr_pcur_get_btr_cur(&pcur)->flag) {
case BTR_CUR_DELETE_REF:
case BTR_CUR_DEL_MARK_IBUF:
case BTR_CUR_DELETE_IBUF:
case BTR_CUR_INSERT_TO_IBUF:
ut_ad(0);/* We did not request buffering. */
case BTR_CUR_HASH:
case BTR_CUR_HASH_FAIL:
case BTR_CUR_BINARY:
break;
}
#endif /* UNIV_DEBUG */
ut_ad(btr_pcur_get_btr_cur(&pcur)->flag == BTR_CUR_BINARY);
ut_ad(!page_rec_is_infimum(btr_pcur_get_rec(&pcur))
&& btr_pcur_get_low_match(&pcur) >= index->n_uniq);

View file

@ -4088,7 +4088,7 @@ row_merge_drop_indexes(
prebuilt->ins_node->entry_list
in ins_node_create_entry_list(). */
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!index->search_info->ref_count);
ut_ad(!index->search_info.ref_count);
#endif /* BTR_CUR_HASH_ADAPT */
dict_index_remove_from_cache(
table, index);

View file

@ -2172,7 +2172,7 @@ row_create_index_for_mysql(
err = dict_create_index_tree_in_mem(index, trx);
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!index->search_info->ref_count);
ut_ad(!index->search_info.ref_count);
#endif /* BTR_CUR_HASH_ADAPT */
if (err != DB_SUCCESS) {

View file

@ -772,7 +772,7 @@ row_rec_to_index_entry_impl(
ut_ad(info_bits == 0);
ut_ad(!pad);
}
dtuple_t* entry = dtuple_create(heap, rec_len);
dtuple_t* entry = dtuple_create(heap, uint16_t(rec_len));
dfield_t* dfield = entry->fields;
dtuple_set_n_fields_cmp(entry,
@ -869,7 +869,7 @@ copy_user_fields:
}
if (mblob == 2) {
ulint n_fields = ulint(dfield - entry->fields);
uint16_t n_fields = uint16_t(dfield - entry->fields);
ut_ad(entry->n_fields >= n_fields);
entry->n_fields = n_fields;
}
@ -1300,8 +1300,10 @@ row_search_index_entry(
case BTR_CUR_INSERT_TO_IBUF:
return(ROW_BUFFERED);
#ifdef BTR_CUR_HASH_ADAPT
case BTR_CUR_HASH:
case BTR_CUR_HASH_FAIL:
#endif
case BTR_CUR_BINARY:
break;
}

View file

@ -2612,9 +2612,9 @@ row_sel_convert_mysql_key_to_innobase(
key_end = key_ptr + key_len;
/* Permit us to access any field in the tuple (ULINT_MAX): */
/* Permit us to access any field in the tuple: */
dtuple_set_n_fields(tuple, ULINT_MAX);
ut_d(dtuple_set_n_fields(tuple, uint16_t(~0)));
dfield = dtuple_get_nth_field(tuple, 0);
field = dict_index_get_nth_field(index, 0);
@ -2781,7 +2781,7 @@ row_sel_convert_mysql_key_to_innobase(
/* We set the length of tuple to n_fields: we assume that the memory
area allocated for it is big enough (usually bigger than n_fields). */
dtuple_set_n_fields(tuple, n_fields);
dtuple_set_n_fields(tuple, uint16_t(n_fields));
}
/**************************************************************//**
@ -3451,7 +3451,7 @@ Row_sel_get_clust_rec_for_mysql::operator()(
page_cur_t page_cursor;
page_cursor.block = block;
page_cursor.index = sec_index;
ulint up_match = 0, low_match = 0;
uint16_t up_match = 0, low_match = 0;
ut_ad(!page_cur_search_with_match(tuple, PAGE_CUR_LE,
&up_match,
&low_match,
@ -4169,8 +4169,7 @@ row_sel_fill_vrow(
offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
ULINT_UNDEFINED, &heap);
*vrow = dtuple_create_with_vcol(
heap, 0, dict_table_get_n_v_cols(index->table));
*vrow = dtuple_create_with_vcol(heap, 0, index->table->n_v_cols);
/* Initialize all virtual row's mtype to DATA_MISSING */
dtuple_init_v_fld(*vrow);
@ -4538,7 +4537,7 @@ early_not_found:
if (UNIV_UNLIKELY(direction == 0)
&& unique_search
&& btr_search_enabled
&& btr_search.enabled
&& dict_index_is_clust(index)
&& !index->table->is_temporary()
&& !prebuilt->templ_contains_blob
@ -6001,7 +6000,6 @@ row_count_rtree_recs(
mem_heap_t* heap;
dtuple_t* entry;
dtuple_t* search_entry = prebuilt->search_tuple;
ulint entry_len;
ulint i;
byte* buf;
@ -6012,10 +6010,9 @@ row_count_rtree_recs(
heap = mem_heap_create(256);
/* Build a search tuple. */
entry_len = dict_index_get_n_fields(index);
entry = dtuple_create(heap, entry_len);
entry = dtuple_create(heap, index->n_fields);
for (i = 0; i < entry_len; i++) {
for (i = 0; i < index->n_fields; i++) {
const dict_field_t* ind_field
= dict_index_get_nth_field(index, i);
const dict_col_t* col
@ -6793,9 +6790,9 @@ count_row:
if (prev_entry)
{
ulint matched_fields= 0;
uint16_t matched= 0;
int cmp= cmp_dtuple_rec_with_match(prev_entry, rec, index, offsets,
&matched_fields);
&matched);
const char* msg;
if (UNIV_LIKELY(cmp < 0));
@ -6808,7 +6805,7 @@ not_ok:
<< ": " << *prev_entry << ", "
<< rec_offsets_print(rec, offsets);
}
else if (index->is_unique() && matched_fields >=
else if (index->is_unique() && matched >=
dict_index_get_n_ordering_defined_by_user(index))
{
/* NULL values in unique indexes are considered not to be duplicates */

View file

@ -522,7 +522,7 @@ row_vers_build_cur_vrow_low(
const rec_t* version;
rec_t* prev_version;
mem_heap_t* heap = NULL;
ulint num_v = dict_table_get_n_v_cols(index->table);
const auto num_v = dict_table_get_n_v_cols(index->table);
const dfield_t* field;
ulint i;
bool all_filled = false;

View file

@ -773,20 +773,17 @@ srv_printf_innodb_monitor(
ibuf_print(file);
#ifdef BTR_CUR_HASH_ADAPT
if (btr_search_enabled) {
if (btr_search.enabled) {
fputs("-------------------\n"
"ADAPTIVE HASH INDEX\n"
"-------------------\n", file);
for (ulint i = 0; i < btr_ahi_parts; ++i) {
const auto part= &btr_search_sys.parts[i];
part->latch.rd_lock(SRW_LOCK_CALL);
ut_ad(part->heap->type == MEM_HEAP_FOR_BTR_SEARCH);
for (ulong i = 0; i < btr_search.n_parts; ++i) {
btr_sea::partition& part= btr_search.parts[i];
part.blocks_mutex.wr_lock();
fprintf(file, "Hash table size " ULINTPF
", node heap has " ULINTPF " buffer(s)\n",
part->table.n_cells,
part->heap->base.count
- !part->heap->free_block);
part->latch.rd_unlock();
part.table.n_cells, part.blocks.count + !!part.spare);
part.blocks_mutex.wr_unlock();
}
const ulint with_ahi = btr_cur_n_sea;
@ -859,17 +856,17 @@ srv_export_innodb_status(void)
export_vars.innodb_ahi_miss = btr_cur_n_non_sea;
ulint mem_adaptive_hash = 0;
for (ulong i = 0; i < btr_ahi_parts; i++) {
const auto part= &btr_search_sys.parts[i];
part->latch.rd_lock(SRW_LOCK_CALL);
if (part->heap) {
ut_ad(part->heap->type == MEM_HEAP_FOR_BTR_SEARCH);
mem_adaptive_hash += mem_heap_get_size(part->heap)
+ part->table.n_cells * sizeof(hash_cell_t);
}
part->latch.rd_unlock();
for (ulong i = 0; i < btr_search.n_parts; i++) {
btr_sea::partition& part= btr_search.parts[i];
part.blocks_mutex.wr_lock();
mem_adaptive_hash += part.blocks.count + !!part.spare;
part.blocks_mutex.wr_unlock();
}
mem_adaptive_hash <<= srv_page_size_shift;
btr_search.parts[0].latch.rd_lock(SRW_LOCK_CALL);
mem_adaptive_hash += btr_search.parts[0].table.n_cells
* sizeof *btr_search.parts[0].table.array * btr_search.n_parts;
btr_search.parts[0].latch.rd_unlock();
export_vars.innodb_mem_adaptive_hash = mem_adaptive_hash;
#endif

View file

@ -2041,7 +2041,7 @@ void innodb_shutdown()
#ifdef BTR_CUR_HASH_ADAPT
if (dict_sys.is_initialised()) {
btr_search_disable();
btr_search.disable();
}
#endif /* BTR_CUR_HASH_ADAPT */
ibuf_close();

View file

@ -45,8 +45,7 @@ Created 3/26/1996 Heikki Tuuri
const dtuple_t trx_undo_metadata = {
/* This also works for REC_INFO_METADATA_ALTER, because the
delete-mark (REC_INFO_DELETED_FLAG) is ignored when searching. */
REC_INFO_METADATA_ADD, 0, 0,
NULL, 0, NULL
REC_INFO_METADATA_ADD, 0, 0, 0, nullptr, nullptr
#ifdef UNIV_DEBUG
, DATA_TUPLE_MAGIC_N
#endif /* UNIV_DEBUG */
@ -580,7 +579,7 @@ trx_undo_rec_get_row_ref(
{
ut_ad(index->is_primary());
const ulint ref_len = dict_index_get_n_unique(index);
const uint16_t ref_len = dict_index_get_n_unique(index);
dtuple_t* tuple = dtuple_create(heap, ref_len);
*ref = tuple;

View file

@ -25,6 +25,12 @@ ADD_EXECUTABLE(innodb_fts-t innodb_fts-t.cc)
TARGET_LINK_LIBRARIES(innodb_fts-t mysys mytap)
ADD_DEPENDENCIES(innodb_fts-t GenError)
MY_ADD_TEST(innodb_fts)
IF (WITH_INNODB_AHI)
ADD_EXECUTABLE(innodb_ahi-t innodb_ahi-t.cc)
TARGET_LINK_LIBRARIES(innodb_ahi-t mysys mytap)
ADD_DEPENDENCIES(innodb_ahi-t GenError)
MY_ADD_TEST(innodb_ahi)
ENDIF()
# See explanation in innobase/CmakeLists.txt
IF(CMAKE_SYSTEM_PROCESSOR MATCHES "ppc64|powerpc64|s390x")
ADD_COMPILE_FLAGS(

View file

@ -0,0 +1,221 @@
#include "tap.h"
#define SUX_LOCK_GENERIC
#define NO_ELISION
#define thd_kill_level(thd) 0
#define srv0mon_h
#define MONITOR_INC(x)
#define MONITOR_INC_VALUE(x,y)
#include "../btr/btr0sea.cc"
const size_t alloc_max_retries= 0;
const byte zero[16384]= { '\0', };
const byte *field_ref_zero= zero;
ulint srv_buf_pool_curr_size, srv_buf_pool_old_size, srv_buf_pool_size;
uint32_t srv_page_size_shift= 14;
ulong srv_page_size= 1 << 14;
dict_sys_t dict_sys;
buf_pool_t buf_pool;
buf_pool_t::chunk_t::map *buf_pool_t::chunk_t::map_reg;
buf_pool_t::chunk_t::map *buf_pool_t::chunk_t::map_ref;
void buf_pool_t::free_block(buf_block_t*) noexcept {}
void dict_mem_table_free(dict_table_t*) {}
void dict_mem_index_free(dict_index_t*) {}
buf_block_t *buf_LRU_get_free_block(bool) { return nullptr; }
ibool dtuple_check_typed(const dtuple_t*) { return true; }
bool btr_cur_t::check_mismatch(const dtuple_t&,page_cur_mode_t,ulint) noexcept
{ return false; }
buf_block_t *buf_page_get_gen(const page_id_t, ulint, rw_lock_type_t,
buf_block_t*,ulint,mtr_t*,dberr_t*,bool) noexcept
{ return nullptr; }
bool buf_page_make_young_if_needed(buf_page_t*) { return false; }
mtr_t::mtr_t()= default;
mtr_t::~mtr_t()= default;
void mtr_t::start() {}
void mtr_t::commit() {}
void mtr_t::rollback_to_savepoint(ulint, ulint) {}
void small_vector_base::grow_by_1(void *, size_t) {}
void sql_print_error(const char*, ...) {}
ulint ut_find_prime(ulint n) { return n; }
void mem_heap_block_free(mem_block_info_t*, mem_block_info_t*){}
namespace ib { error::~error() {} fatal_or_error::~fatal_or_error() {} }
std::ostream &operator<<(std::ostream &out, const page_id_t) { return out; }
#ifdef UNIV_DEBUG
byte data_error;
bool srw_lock_debug::have_wr() const noexcept { return false; }
void srw_lock_debug::rd_unlock() noexcept {}
void srw_lock_debug::rd_lock(SRW_LOCK_ARGS(const char*,unsigned)) noexcept {}
#endif
void page_hash_latch::read_lock_wait() noexcept {}
# ifndef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
template<> void pthread_mutex_wrapper<true>::wr_wait() noexcept {}
# endif
template<> void srw_lock_<true>::rd_wait() noexcept {}
template<> void srw_lock_<true>::wr_wait() noexcept {}
template<bool spin> void ssux_lock_impl<spin>::wake() noexcept {}
template<bool spin> void srw_mutex_impl<spin>::wake() noexcept {}
#ifdef UNIV_PFS_MEMORY
PSI_memory_key ut_new_get_key_by_file(uint32_t){ return PSI_NOT_INSTRUMENTED; }
PSI_memory_key mem_key_other, mem_key_std;
#endif
#ifdef UNIV_PFS_RWLOCK
template<bool spin>
void srw_lock_impl<spin>::psi_wr_lock(const char*, unsigned) noexcept {}
template<bool spin>
void srw_lock_impl<spin>::psi_rd_lock(const char*, unsigned) noexcept {}
void dict_sys_t::unlock() noexcept {}
void dict_sys_t::freeze(const char *, unsigned) noexcept {}
void dict_sys_t::unfreeze() noexcept {}
#endif /* UNIV_PFS_RWLOCK */
void ut_dbg_assertion_failed(const char *e, const char *file, unsigned line)
{
fprintf(stderr, "%s:%u: Assertion %s failed\n", file, line, e ? e : "");
abort();
}
int main(int, char **argv)
{
MY_INIT(*argv);
plan(42);
btr_search.create();
btr_search.free();
dfield_t fields[2]= {{nullptr,0,0,UNIV_SQL_NULL,{0,DATA_VARCHAR,3,1,1}},
{(char*)"42",0,0,2,{0,DATA_CHAR,2,1,1}}};
dtuple_t tuple2{0,2,2,0,fields,nullptr, ut_d(DATA_TUPLE_MAGIC_N) };
dict_col_t cols[]={{}, {}, {DATA_NOT_NULL,DATA_CHAR,2,1,1,1,0,0,{nullptr,0}}};
dict_field_t ifields[3]= {{}, {}, {}};
dict_table_t table{};
dict_index_t index{};
index.table= &table;
index.n_uniq= 3;
index.n_nullable= 3;
index.n_fields= 3;
index.n_core_fields= 3;
index.n_core_null_bytes= 1;
index.fields= ifields;
ifields[0].col= &cols[0];
ifields[1].col= &cols[2];
ifields[2].col= &cols[2];
ifields[1].fixed_len= 2;
ifields[2].fixed_len= 2;
constexpr uint32_t crc42= 0x2e7d3dcb, crc3z42= 0x9a6e3c2c,
crc2z= 0xf16177d2, crc3z= 0x6064a37a;
{
btr_cur_t cursor;
cursor.page_cur.index= &index;
cursor.n_bytes_fields= 2;
ok(dtuple_fold(&tuple2, &cursor) == crc42, "dtuple_fold(NULL,'42')");
table.flags= DICT_TF_COMPACT;
ok(dtuple_fold(&tuple2, &cursor) == crc42, "dtuple_fold(NULL,'42')");
fields[0].type.mtype= DATA_CHAR;
ok(dtuple_fold(&tuple2, &cursor) == crc42, "dtuple_fold(NULL,'42')");
table.flags= 0;
ok(dtuple_fold(&tuple2, &cursor) == crc3z42, "dtuple_fold('\\0\\0\\0','42')");
fields[0].type.mtype= DATA_VARCHAR;
cursor.n_bytes_fields= 1;
ok(dtuple_fold(&tuple2, &cursor) == 0, "dtuple_fold(NULL)");
table.flags= DICT_TF_COMPACT;
ok(dtuple_fold(&tuple2, &cursor) == 0, "dtuple_fold(NULL)");
fields[0].type.mtype= DATA_CHAR;
ok(dtuple_fold(&tuple2, &cursor) == 0, "dtuple_fold(NULL)");
table.flags= 0;
ok(dtuple_fold(&tuple2, &cursor) == crc3z, "dtuple_fold('\\0\\0\\0')");
fields[0].type.mtype= DATA_VARCHAR;
cursor.n_bytes_fields= 2 << 16;
ok(dtuple_fold(&tuple2, &cursor) == 0, "dtuple_fold(NULL)");
table.flags= DICT_TF_COMPACT;
ok(dtuple_fold(&tuple2, &cursor) == 0, "dtuple_fold(NULL)");
fields[0].type.mtype= DATA_CHAR;
ok(dtuple_fold(&tuple2, &cursor) == 0, "dtuple_fold(NULL)");
table.flags= 0;
ok(dtuple_fold(&tuple2, &cursor) == crc2z, "dtuple_fold('\\0\\0')");
fields[0].type.mtype= DATA_VARCHAR;
}
byte *page= static_cast<byte*>(aligned_malloc(16384, 16384));
memset_aligned<16384>(page, 0, 16384);
byte *rec= &page[256];
page[PAGE_HEADER + PAGE_HEAP_TOP]= 1;
page[PAGE_HEADER + PAGE_HEAP_TOP + 1]= 4 + 2;
const byte r1_varchar[]= {2,0x80,0,0,0,2<<1|1,0,0, '4','2'};
const byte r2_varchar[]= {0,2,0x80,0,0,0,0,2<<1,0,0, '4','2'};
const byte r1_var3[]= {2,0x80,0x80,0,0,0,3<<1|1,0,0, '4','2'};
const byte r2_var3[]= {0,2,0x80,0,0x80,0,0,0,0,3<<1,0,0, '4','2'};
const byte r1_char[]={2+3,0x83,0,0,0,2<<1|1,0,0, 0,0,0,'4','2'};
const byte r2_char[]= {0,2+3,0x80,3,0,0,0,2<<1,0,0, 0,0,0,'4','2'};
const byte c[]= { 0,1,0,0,0,0,0, '4','2'};
const byte c3[]= { 0,3,0,0,0,0,0, '4','2'};
memcpy(rec - sizeof r1_varchar + 2, r1_varchar, sizeof r1_varchar);
ok(rec_fold(rec, index, 2, false) == crc42, "rec_fold(NULL, '42')");
ok(rec_fold(rec, index, 1, false) == 0, "rec_fold(NULL)");
ok(rec_fold(rec, index, 2 << 16, false) == 0, "rec_fold(NULL)");
memcpy(rec - sizeof r2_varchar + 2, r2_varchar, sizeof r2_varchar);
ok(rec_fold(rec, index, 2, false) == crc42, "rec_fold(NULL, '42')");
ok(rec_fold(rec, index, 1, false) == 0, "rec_fold(NULL)");
ok(rec_fold(rec, index, 2 << 16, false) == 0, "rec_fold(NULL)");
memcpy(rec - sizeof r1_var3 + 2, r1_var3, sizeof r1_var3);
ok(rec_fold(rec, index, 3, false) == crc42, "rec_fold(NULL, NULL, '42')");
ok(rec_fold(rec, index, 2, false) == 0, "rec_fold(NULL,NULL)");
ok(rec_fold(rec, index, 1 | 2 << 16, false) == 0, "rec_fold(NULL,NULL)");
memcpy(rec - sizeof r2_var3 + 2, r2_var3, sizeof r2_var3);
ok(rec_fold(rec, index, 3, false) == crc42, "rec_fold(NULL, NULL, '42')");
ok(rec_fold(rec, index, 2, false) == 0, "rec_fold(NULL,NULL)");
ok(rec_fold(rec, index, 1 | 2 << 16, false) == 0, "rec_fold(NULL,NULL)");
fields[0].type.mtype= DATA_CHAR;
memcpy(rec - sizeof r1_char + 3 + 2, r1_char, sizeof r1_char);
ok(rec_fold(rec, index, 2, false) == crc3z42, "rec_fold('\\0\\0\\0', '42')");
ok(rec_fold(rec, index, 1, false) == crc3z, "rec_fold('\\0\\0\\0')");
ok(rec_fold(rec, index, 2 << 16, false) == crc2z, "rec_fold('\\0\\0')");
memcpy(rec - sizeof r2_char + 3 + 2, r2_char, sizeof r2_char);
ok(rec_fold(rec, index, 2, false) == crc3z42, "rec_fold('\\0\\0\\0', '42')");
ok(rec_fold(rec, index, 1, false) == crc3z, "rec_fold('\\0\\0\\0')");
ok(rec_fold(rec, index, 2 << 16, false) == crc2z, "rec_fold('\\0\\0')");
page[PAGE_HEADER + PAGE_N_HEAP]= 0x80;
table.flags= DICT_TF_COMPACT;
memcpy(rec - sizeof c + 2, c, sizeof c);
ok(rec_fold(rec, index, 2, true) == crc42, "rec_fold(NULL, '42')");
ok(rec_fold(rec, index, 1, true) == 0, "rec_fold(NULL)");
ok(rec_fold(rec, index, 2 << 16, true) == 0, "rec_fold(NULL)");
fields[0].type.mtype= DATA_VARCHAR;
ok(rec_fold(rec, index, 2, true) == crc42, "rec_fold(NULL, '42')");
ok(rec_fold(rec, index, 1, true) == 0, "rec_fold(NULL)");
ok(rec_fold(rec, index, 2 << 16, true) == 0, "rec_fold(NULL)");
memcpy(rec - sizeof c3 + 2, c3, sizeof c3);
fields[0].type.mtype= DATA_CHAR;
ifields[1].col= &cols[1];
ifields[1].fixed_len= 0;
ok(rec_fold(rec, index, 3, true) == crc42, "rec_fold(NULL, NULL, '42')");
ok(rec_fold(rec, index, 2, true) == 0, "rec_fold(NULL, NULL)");
ok(rec_fold(rec, index, 1 | 2 << 16, true) == 0, "rec_fold(NULL, NULL)");
fields[0].type.mtype= DATA_VARCHAR;
ok(rec_fold(rec, index, 3, true) == crc42, "rec_fold(NULL, NULL, '42')");
ok(rec_fold(rec, index, 2, true) == 0, "rec_fold(NULL, NULL)");
ok(rec_fold(rec, index, 1 | 2 << 16, true) == 0, "rec_fold(NULL, NULL)");
aligned_free(page);
my_end(MY_CHECK_ERROR);
return exit_status();
}