MDEV-35190 HASH_SEARCH duplicates effort before HASH_INSERT or HASH_DELETE

The HASH_ macros are unnecessarily obfuscating the logic,
so we had better replace them.

hash_cell_t::search(): Implement most of the HASH_DELETE logic,
for a subsequent insert or remove().

hash_cell_t::remove(): Remove an element.

hash_cell_t::find(): Implement the HASH_SEARCH logic.

xb_filter_hash_free(): Avoid any hash table lookup;
just traverse the hash bucket chains and free each element.

xb_register_filter_entry(): Search databases_hash only once.

rm_if_not_found(): Make use of find_filter_in_hashtable().

dict_sys_t::acquire_temporary_table(), dict_sys_t::find_table():
Define non-inline to avoid unnecessary code duplication.

dict_sys_t::add(dict_table_t *table), dict_table_rename_in_cache():
Look for duplicate while finding the insert position.

dict_table_change_id_in_cache(): Merged to the only caller
row_discard_tablespace().

hash_insert(): Helper function of dict_sys_t::resize().

fil_space_t::create(): Look for a duplicate (and crash if found)
when searching for the insert position.

lock_rec_discard(): Take the hash array cell as a parameter
to avoid a duplicated lookup.

lock_rec_free_all_from_discard_page(): Remove a parameter.

Reviewed by: Debarun Banerjee
This commit is contained in:
Marko Mäkelä 2024-11-21 08:59:02 +02:00
parent bcbeef6772
commit 3c312d247c
17 changed files with 353 additions and 539 deletions

View file

@ -2642,25 +2642,19 @@ my_bool regex_list_check_match(
return(FALSE);
}
static
my_bool
find_filter_in_hashtable(
const char* name,
hash_table_t* table,
xb_filter_entry_t** result
)
static bool find_filter_in_hashtable(const char *name, hash_table_t *table,
xb_filter_entry_t **result) noexcept
{
xb_filter_entry_t* found = NULL;
const ulint fold = my_crc32c(0, name, strlen(name));
HASH_SEARCH(name_hash, table, fold,
xb_filter_entry_t*,
found, (void) 0,
!strcmp(found->name, name));
if (found && result) {
*result = found;
}
return (found != NULL);
const ulint fold= my_crc32c(0, name, strlen(name));
if (auto found= table->cell_get(fold)->
find(&xb_filter_entry_t::name_hash,[name](xb_filter_entry_t *f)
{ return !strcmp(f->name, name); }))
{
if (result)
*result= found;
return true;
}
return false;
}
/************************************************************************
@ -4143,14 +4137,13 @@ xb_add_filter(
const char* name, /*!< in: name of table/database */
hash_table_t* hash) /*!< in/out: hash to insert into */
{
xb_filter_entry_t* entry = xb_new_filter_entry(name);
xb_filter_entry_t *entry= xb_new_filter_entry(name);
if (UNIV_UNLIKELY(!hash->array)) {
hash->create(1000);
}
const ulint fold = my_crc32c(0, entry->name, strlen(entry->name));
HASH_INSERT(xb_filter_entry_t, name_hash, hash, fold, entry);
return entry;
if (UNIV_UNLIKELY(!hash->array))
hash->create(1000);
hash->cell_get(my_crc32c(0, entry->name, strlen(entry->name)))->
append(*entry, &xb_filter_entry_t::name_hash);
return entry;
}
/***********************************************************************
@ -4188,12 +4181,8 @@ xb_register_filter_entry(
hash_table_t* tables_hash
)
{
const char* p;
size_t namelen;
xb_filter_entry_t* db_entry = NULL;
namelen = strlen(name);
if ((p = strchr(name, '.')) != NULL) {
size_t namelen = strlen(name);
if (const char* p = strchr(name, '.')) {
char dbname[NAME_LEN + 1];
xb_validate_name(name, p - name);
@ -4202,18 +4191,20 @@ xb_register_filter_entry(
strncpy(dbname, name, p - name);
dbname[p - name] = 0;
if (databases_hash && databases_hash->array) {
const ulint fold = my_crc32c(0, dbname, p - name);
HASH_SEARCH(name_hash, databases_hash,
fold,
xb_filter_entry_t*,
db_entry, (void) 0,
!strcmp(db_entry->name, dbname));
if (UNIV_UNLIKELY(!databases_hash->array)) {
databases_hash->create(1000);
}
if (!db_entry) {
db_entry = xb_add_filter(dbname, databases_hash);
xb_filter_entry_t **prev =
databases_hash->cell_get(my_crc32c(0, name, p - name))
->search(&xb_filter_entry_t::name_hash,
[dbname](xb_filter_entry_t* f)
{ return f && !strcmp(f->name, dbname); });
if (!*prev) {
(*prev = xb_new_filter_entry(dbname))
->has_tables = TRUE;
}
db_entry->has_tables = TRUE;
ut_ad((*prev)->has_tables);
xb_add_filter(name, tables_hash);
} else {
xb_validate_name(name, namelen);
@ -4396,33 +4387,17 @@ xb_filters_init()
}
}
static
void
xb_filter_hash_free(hash_table_t* hash)
static void xb_filter_hash_free(hash_table_t* hash)
{
ulint i;
/* free the hash elements */
for (i = 0; i < hash->n_cells; i++) {
xb_filter_entry_t* table;
table = static_cast<xb_filter_entry_t *>
(HASH_GET_FIRST(hash, i));
while (table) {
xb_filter_entry_t* prev_table = table;
table = static_cast<xb_filter_entry_t *>
(HASH_GET_NEXT(name_hash, prev_table));
const ulint fold = my_crc32c(0, prev_table->name,
strlen(prev_table->name));
HASH_DELETE(xb_filter_entry_t, name_hash, hash,
fold, prev_table);
free(prev_table);
}
}
hash->free();
for (ulint i= 0; i < hash->n_cells; i++)
for (auto prev= static_cast<xb_filter_entry_t*>(hash->array[i].node);
prev; )
{
auto next= prev->name_hash;
free(prev);
prev= next;
}
hash->free();
}
static void xb_regex_list_free(regex_list_t* list)
@ -5331,8 +5306,8 @@ exit:
table->name = ((char*)table) + sizeof(xb_filter_entry_t);
memcpy(table->name, dest_space_name, len + 1);
const ulint fold = my_crc32c(0, dest_space_name, len);
HASH_INSERT(xb_filter_entry_t, name_hash, &inc_dir_tables_hash,
fold, table);
inc_dir_tables_hash.cell_get(fold)->append(
*table, &xb_filter_entry_t::name_hash);
mysql_mutex_lock(&fil_system.mutex);
fil_space = fil_space_get_by_name(dest_space_name);
@ -5752,8 +5727,8 @@ static ibool prepare_handle_new_files(const char *data_home_dir,
strcpy(table->name, table_name.c_str());
const ulint fold = my_crc32c(0, table->name,
table_name.size());
HASH_INSERT(xb_filter_entry_t, name_hash, &inc_dir_tables_hash,
fold, table);
inc_dir_tables_hash.cell_get(fold)->append(
*table, &xb_filter_entry_t::name_hash);
}
return TRUE;
@ -5769,29 +5744,15 @@ rm_if_not_found(
const char* data_home_dir, /*!<in: path to datadir */
const char* db_name, /*!<in: database name */
const char* file_name, /*!<in: file name with suffix */
void* arg __attribute__((unused)))
void*)
{
char name[FN_REFLEN];
xb_filter_entry_t* table;
snprintf(name, FN_REFLEN, "%s/%s", db_name, file_name);
/* Truncate ".ibd" */
const size_t len = strlen(name) - 4;
name[len] = '\0';
const ulint fold = my_crc32c(0, name, len);
HASH_SEARCH(name_hash, &inc_dir_tables_hash, fold,
xb_filter_entry_t*,
table, (void) 0,
!strcmp(table->name, name));
if (!table) {
snprintf(name, FN_REFLEN, "%s/%s/%s", data_home_dir,
db_name, file_name);
return os_file_delete(0, name);
}
return(TRUE);
char name[FN_REFLEN];
/* Truncate ".ibd" */
name[snprintf(name, FN_REFLEN, "%s/%s", db_name, file_name) - 4]= '\0';
if (find_filter_in_hashtable(name, &inc_dir_tables_hash, nullptr))
return true;
snprintf(name, FN_REFLEN, "%s/%s/%s", data_home_dir, db_name, file_name);
return os_file_delete(0, name);
}
/** Function enumerates files in datadir (provided by path) which are matched

View file

@ -543,9 +543,7 @@ static void ha_delete_hash_node(hash_table_t *table, mem_heap_t *heap,
ut_a(del_node->block->n_pointers-- < MAX_N_POINTERS);
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const ulint fold= del_node->fold;
HASH_DELETE(ha_node_t, next, table, fold, del_node);
table->cell_get(del_node->fold)->remove(*del_node, &ha_node_t::next);
ha_node_t *top= static_cast<ha_node_t*>(mem_heap_get_top(heap, sizeof *top));
@ -564,8 +562,7 @@ static void ha_delete_hash_node(hash_table_t *table, mem_heap_t *heap,
/* We have to look for the predecessor */
ha_node_t *node= static_cast<ha_node_t*>(cell->node);
while (top != HASH_GET_NEXT(next, node))
node= static_cast<ha_node_t*>(HASH_GET_NEXT(next, node));
while (top != node->next) node= node->next;
/* Now we have the predecessor node */
node->next= del_node;

View file

@ -341,58 +341,50 @@ static buf_buddy_free_t* buf_buddy_alloc_zip(ulint i)
}
/** Deallocate a buffer frame of srv_page_size.
@param[in] buf buffer frame to deallocate */
static
void
buf_buddy_block_free(void* buf)
@param buf buffer frame to deallocate */
static void buf_buddy_block_free(void *buf) noexcept
{
const ulint fold = BUF_POOL_ZIP_FOLD_PTR(buf);
buf_page_t* bpage;
buf_block_t* block;
mysql_mutex_assert_owner(&buf_pool.mutex);
ut_a(!ut_align_offset(buf, srv_page_size));
mysql_mutex_assert_owner(&buf_pool.mutex);
ut_a(!ut_align_offset(buf, srv_page_size));
const ulint fold= BUF_POOL_ZIP_FOLD_PTR(buf);
buf_page_t **prev= buf_pool.zip_hash.cell_get(fold)->
search(&buf_page_t::hash, [buf](const buf_page_t *b)
{
ut_ad(b->in_zip_hash);
ut_ad(b->state() == buf_page_t::MEMORY);
return b->frame == buf;
});
HASH_SEARCH(hash, &buf_pool.zip_hash, fold, buf_page_t*, bpage,
ut_ad(bpage->state() == buf_page_t::MEMORY
&& bpage->in_zip_hash),
bpage->frame == buf);
ut_a(bpage);
ut_a(bpage->state() == buf_page_t::MEMORY);
ut_ad(bpage->in_zip_hash);
ut_d(bpage->in_zip_hash = false);
HASH_DELETE(buf_page_t, hash, &buf_pool.zip_hash, fold, bpage);
bpage->hash = nullptr;
buf_page_t *bpage= *prev;
ut_a(bpage);
ut_a(bpage->frame == buf);
ut_d(bpage->in_zip_hash= false);
*prev= bpage->hash;
bpage->hash= nullptr;
ut_d(memset(buf, 0, srv_page_size));
MEM_UNDEFINED(buf, srv_page_size);
ut_d(memset(buf, 0, srv_page_size));
MEM_UNDEFINED(buf, srv_page_size);
block = (buf_block_t*) bpage;
buf_LRU_block_free_non_file_page(block);
ut_ad(buf_pool.buddy_n_frames > 0);
ut_d(buf_pool.buddy_n_frames--);
buf_LRU_block_free_non_file_page(reinterpret_cast<buf_block_t*>(bpage));
ut_ad(buf_pool.buddy_n_frames > 0);
ut_d(buf_pool.buddy_n_frames--);
}
/**********************************************************************//**
Allocate a buffer block to the buddy allocator. */
static
void
buf_buddy_block_register(
/*=====================*/
buf_block_t* block) /*!< in: buffer frame to allocate */
/** Allocate a buffer block to the buddy allocator.
@param block buffer block to register */
static void buf_buddy_block_register(buf_block_t *block) noexcept
{
const ulint fold = BUF_POOL_ZIP_FOLD(block);
ut_ad(block->page.state() == buf_page_t::MEMORY);
const ulint fold= BUF_POOL_ZIP_FOLD(block);
ut_ad(block->page.state() == buf_page_t::MEMORY);
ut_a(block->page.frame);
ut_a(!ut_align_offset(block->page.frame, srv_page_size));
ut_a(block->page.frame);
ut_a(!ut_align_offset(block->page.frame, srv_page_size));
ut_ad(!block->page.in_zip_hash);
ut_d(block->page.in_zip_hash = true);
HASH_INSERT(buf_page_t, hash, &buf_pool.zip_hash, fold, &block->page);
ut_d(buf_pool.buddy_n_frames++);
ut_ad(!block->page.in_zip_hash);
ut_d(block->page.in_zip_hash= true);
buf_pool.zip_hash.cell_get(fold)->append(block->page, &buf_page_t::hash);
ut_d(buf_pool.buddy_n_frames++);
}
/** Allocate a block from a bigger object.

View file

@ -1309,7 +1309,7 @@ function_exit:
return(thr);
}
bool dict_sys_t::load_sys_tables()
bool dict_sys_t::load_sys_tables() noexcept
{
ut_ad(!srv_any_background_activity());
bool mismatch= false;
@ -1352,7 +1352,7 @@ bool dict_sys_t::load_sys_tables()
return mismatch;
}
dberr_t dict_sys_t::create_or_check_sys_tables()
dberr_t dict_sys_t::create_or_check_sys_tables() noexcept
{
if (sys_tables_exist())
return DB_SUCCESS;

View file

@ -639,6 +639,46 @@ template bool
dict_table_t::parse_name<>(char(&)[NAME_LEN + 1], char(&)[NAME_LEN + 1],
size_t*, size_t*) const;
dict_table_t *dict_sys_t::acquire_temporary_table(table_id_t id) const noexcept
{
ut_ad(frozen());
ut_ad(id >= DICT_HDR_FIRST_ID);
return temp_id_hash.cell_get(ut_fold_ull(id))->
find(&dict_table_t::id_hash, [id](dict_table_t *t)
{
ut_ad(t->is_temporary());
ut_ad(t->cached);
if (t->id != id)
return false;
t->acquire();
return true;
});
}
dict_table_t *dict_sys_t::find_table(table_id_t id) const noexcept
{
ut_ad(frozen());
return table_id_hash.cell_get(ut_fold_ull(id))->
find(&dict_table_t::id_hash, [id](const dict_table_t *t)
{
ut_ad(!t->is_temporary());
ut_ad(t->cached);
return t->id == id;
});
}
dict_table_t *dict_sys_t::find_table(const span<const char> &name)
const noexcept
{
ut_ad(frozen());
return table_hash.cell_get(my_crc32c(0, name.data(), name.size()))->
find(&dict_table_t::name_hash, [name](const dict_table_t *t)
{
return strlen(t->name.m_name) == name.size() &&
!memcmp(t->name.m_name, name.data(), name.size());
});
}
/** Acquire MDL shared for the table name.
@tparam trylock whether to use non-blocking operation
@param[in,out] table table object
@ -927,7 +967,7 @@ dict_table_col_in_clustered_key(
}
/** Initialise the data dictionary cache. */
void dict_sys_t::create()
void dict_sys_t::create() noexcept
{
ut_ad(this == &dict_sys);
ut_ad(!is_initialised());
@ -1113,59 +1153,34 @@ void dict_table_t::add_to_cache()
}
/** Add a table definition to the data dictionary cache */
inline void dict_sys_t::add(dict_table_t* table)
inline void dict_sys_t::add(dict_table_t *table) noexcept
{
ut_ad(!find(table));
ulint fold = my_crc32c(0, table->name.m_name,
strlen(table->name.m_name));
table->autoinc_mutex.init();
table->lock_mutex_init();
/* Look for a table with the same name: error if such exists */
{
dict_table_t* table2;
HASH_SEARCH(name_hash, &table_hash, fold,
dict_table_t*, table2, ut_ad(table2->cached),
!strcmp(table2->name.m_name, table->name.m_name));
ut_a(table2 == NULL);
#ifdef UNIV_DEBUG
/* Look for the same table pointer with a different name */
HASH_SEARCH_ALL(name_hash, &table_hash,
dict_table_t*, table2, ut_ad(table2->cached),
table2 == table);
ut_ad(table2 == NULL);
#endif /* UNIV_DEBUG */
}
HASH_INSERT(dict_table_t, name_hash, &table_hash, fold, table);
/* Look for a table with the same id: error if such exists */
hash_table_t* id_hash = table->is_temporary()
? &temp_id_hash : &table_id_hash;
const ulint id_fold = ut_fold_ull(table->id);
{
dict_table_t* table2;
HASH_SEARCH(id_hash, id_hash, id_fold,
dict_table_t*, table2, ut_ad(table2->cached),
table2->id == table->id);
ut_a(table2 == NULL);
#ifdef UNIV_DEBUG
/* Look for the same table pointer with a different id */
HASH_SEARCH_ALL(id_hash, id_hash,
dict_table_t*, table2, ut_ad(table2->cached),
table2 == table);
ut_ad(table2 == NULL);
#endif /* UNIV_DEBUG */
HASH_INSERT(dict_table_t, id_hash, id_hash, id_fold, table);
}
UT_LIST_ADD_FIRST(table->can_be_evicted ? table_LRU : table_non_LRU,
table);
ut_ad(dict_lru_validate());
ut_ad(!table->name_hash);
ut_ad(!table->id_hash);
table->autoinc_mutex.init();
table->lock_mutex_init();
const char *name= table->name.m_name;
dict_table_t **prev= table_hash.cell_get(my_crc32c(0, name, strlen(name)))->
search(&dict_table_t::name_hash, [name](const dict_table_t *t)
{
if (!t) return true;
ut_ad(t->cached);
ut_a(strcmp(t->name.m_name, name));
return false;
});
*prev= table;
prev= (table->is_temporary() ? temp_id_hash : table_id_hash).
cell_get(ut_fold_ull(table->id))->
search(&dict_table_t::id_hash, [table](const dict_table_t *t)
{
if (!t) return true;
ut_ad(t->cached);
ut_a(t->id != table->id);
return false;
});
*prev= table;
UT_LIST_ADD_FIRST(table->can_be_evicted ? table_LRU : table_non_LRU, table);
ut_ad(dict_lru_validate());
}
/** Test whether a table can be evicted from dict_sys.table_LRU.
@ -1277,7 +1292,7 @@ dict_index_t *dict_index_t::clone_if_needed()
/** Evict unused, unlocked tables from table_LRU.
@param half whether to consider half the tables only (instead of all)
@return number of tables evicted */
ulint dict_sys_t::evict_table_LRU(bool half)
ulint dict_sys_t::evict_table_LRU(bool half) noexcept
{
#ifdef MYSQL_DYNAMIC_PLUGIN
constexpr ulint max_tables = 400;
@ -1475,9 +1490,6 @@ dict_table_rename_in_cache(
ut_a(old_name_len < sizeof old_name);
strcpy(old_name, table->name.m_name);
const uint32_t fold= my_crc32c(0, new_name.data(), new_name.size());
ut_a(!dict_sys.find_table(new_name));
if (!dict_table_is_file_per_table(table)) {
} else if (dberr_t err = table->rename_tablespace(new_name,
replace_new_file)) {
@ -1485,10 +1497,11 @@ dict_table_rename_in_cache(
}
/* Remove table from the hash tables of tables */
HASH_DELETE(dict_table_t, name_hash, &dict_sys.table_hash,
my_crc32c(0, table->name.m_name, old_name_len), table);
dict_sys.table_hash.cell_get(my_crc32c(0, table->name.m_name,
old_name_len))
->remove(*table, &dict_table_t::name_hash);
bool keep_mdl_name = !table->name.is_temporary();
bool keep_mdl_name = !table->name.is_temporary();
if (!keep_mdl_name) {
} else if (const char* s = static_cast<const char*>
@ -1521,8 +1534,16 @@ dict_table_rename_in_cache(
}
/* Add table to hash table of tables */
HASH_INSERT(dict_table_t, name_hash, &dict_sys.table_hash, fold,
table);
ut_ad(!table->name_hash);
dict_table_t** after = reinterpret_cast<dict_table_t**>(
&dict_sys.table_hash.cell_get(my_crc32c(0, new_name.data(),
new_name.size()))
->node);
for (; *after; after = &(*after)->name_hash) {
ut_ad((*after)->cached);
ut_a(strcmp((*after)->name.m_name, new_name.data()));
}
*after = table;
if (table->name.is_temporary()) {
/* In ALTER TABLE we think of the rename table operation
@ -1774,35 +1795,11 @@ dict_table_rename_in_cache(
return(DB_SUCCESS);
}
/**********************************************************************//**
Change the id of a table object in the dictionary cache. This is used in
DISCARD TABLESPACE. */
void
dict_table_change_id_in_cache(
/*==========================*/
dict_table_t* table, /*!< in/out: table object already in cache */
table_id_t new_id) /*!< in: new id to set */
{
ut_ad(dict_sys.locked());
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
ut_ad(!table->is_temporary());
/* Remove the table from the hash table of id's */
HASH_DELETE(dict_table_t, id_hash, &dict_sys.table_id_hash,
ut_fold_ull(table->id), table);
table->id = new_id;
/* Add the table back to the hash table */
HASH_INSERT(dict_table_t, id_hash, &dict_sys.table_id_hash,
ut_fold_ull(table->id), table);
}
/** Evict a table definition from the InnoDB data dictionary cache.
@param[in,out] table cached table definition to be evicted
@param[in] lru whether this is part of least-recently-used eviction
@param[in] keep whether to keep (not free) the object */
void dict_sys_t::remove(dict_table_t* table, bool lru, bool keep)
void dict_sys_t::remove(dict_table_t* table, bool lru, bool keep) noexcept
{
dict_foreign_t* foreign;
dict_index_t* index;
@ -1838,16 +1835,12 @@ void dict_sys_t::remove(dict_table_t* table, bool lru, bool keep)
}
/* Remove table from the hash tables of tables */
HASH_DELETE(dict_table_t, name_hash, &table_hash,
my_crc32c(0, table->name.m_name,
strlen(table->name.m_name)),
table);
hash_table_t* id_hash = table->is_temporary()
? &temp_id_hash : &table_id_hash;
const ulint id_fold = ut_fold_ull(table->id);
HASH_DELETE(dict_table_t, id_hash, id_hash, id_fold, table);
table_hash.cell_get(my_crc32c(0, table->name.m_name,
strlen(table->name.m_name)))
->remove(*table, &dict_table_t::name_hash);
(table->is_temporary() ? temp_id_hash : table_id_hash)
.cell_get(ut_fold_ull(table->id))
->remove(*table, &dict_table_t::id_hash);
/* Remove table from LRU or non-LRU list. */
if (table->can_be_evicted) {
@ -4450,8 +4443,21 @@ dict_fs2utf8(
}
}
/** Insert a table into the hash tables
@param table the table
@param id_hash dict_sys.table_id_hash or dict_sys.temp_id_hash */
static void hash_insert(dict_table_t *table, hash_table_t& id_hash) noexcept
{
ut_ad(table->cached);
dict_sys.table_hash.cell_get(my_crc32c(0, table->name.m_name,
strlen(table->name.m_name)))->
append(*table, &dict_table_t::name_hash);
id_hash.cell_get(ut_fold_ull(table->id))->append(*table,
&dict_table_t::id_hash);
}
/** Resize the hash tables based on the current buffer pool size. */
void dict_sys_t::resize()
void dict_sys_t::resize() noexcept
{
ut_ad(this == &dict_sys);
ut_ad(is_initialised());
@ -4472,32 +4478,18 @@ void dict_sys_t::resize()
table= UT_LIST_GET_NEXT(table_LRU, table))
{
ut_ad(!table->is_temporary());
ulint fold= my_crc32c(0, table->name.m_name, strlen(table->name.m_name));
ulint id_fold= ut_fold_ull(table->id);
HASH_INSERT(dict_table_t, name_hash, &table_hash, fold, table);
HASH_INSERT(dict_table_t, id_hash, &table_id_hash, id_fold, table);
hash_insert(table, table_id_hash);
}
for (dict_table_t *table = UT_LIST_GET_FIRST(table_non_LRU); table;
table= UT_LIST_GET_NEXT(table_LRU, table))
{
ulint fold= my_crc32c(0, table->name.m_name, strlen(table->name.m_name));
ulint id_fold= ut_fold_ull(table->id);
HASH_INSERT(dict_table_t, name_hash, &table_hash, fold, table);
hash_table_t *id_hash= table->is_temporary()
? &temp_id_hash : &table_id_hash;
HASH_INSERT(dict_table_t, id_hash, id_hash, id_fold, table);
}
hash_insert(table, table->is_temporary() ? temp_id_hash : table_id_hash);
unlock();
}
/** Close the data dictionary cache on shutdown. */
void dict_sys_t::close()
void dict_sys_t::close() noexcept
{
ut_ad(this == &dict_sys);
if (!is_initialised()) return;
@ -4507,8 +4499,7 @@ void dict_sys_t::close()
/* Free the hash elements. We don't remove them from table_hash
because we are invoking table_hash.free() below. */
for (ulint i= table_hash.n_cells; i--; )
while (dict_table_t *table= static_cast<dict_table_t*>
(HASH_GET_FIRST(&table_hash, i)))
while (auto table= static_cast<dict_table_t*>(table_hash.array[i].node))
dict_sys.remove(table);
table_hash.free();

View file

@ -2559,7 +2559,7 @@ corrupted:
}
dict_table_t *dict_sys_t::load_table(const span<const char> &name,
dict_err_ignore_t ignore)
dict_err_ignore_t ignore) noexcept
{
if (dict_table_t *table= find_table(name))
return table;

View file

@ -227,24 +227,12 @@ fil_validate_skip(void)
}
#endif /* UNIV_DEBUG */
/*******************************************************************//**
Returns the table space by a given id, NULL if not found.
It is unsafe to dereference the returned pointer. It is fine to check
for NULL. */
fil_space_t*
fil_space_get_by_id(
/*================*/
ulint id) /*!< in: space id */
fil_space_t *fil_space_get_by_id(ulint id) noexcept
{
fil_space_t* space;
ut_ad(fil_system.is_initialised());
mysql_mutex_assert_owner(&fil_system.mutex);
HASH_SEARCH(hash, &fil_system.spaces, id,
fil_space_t*, space,, space->id == id);
return(space);
ut_ad(fil_system.is_initialised());
mysql_mutex_assert_owner(&fil_system.mutex);
return fil_system.spaces.cell_get(id)->find
(&fil_space_t::hash, [id](const fil_space_t *s) { return s->id == id; });
}
/** Look up a tablespace.
@ -810,7 +798,7 @@ inline pfs_os_file_t fil_node_t::close_to_free(bool detach_handle)
pfs_os_file_t fil_system_t::detach(fil_space_t *space, bool detach_handle)
{
mysql_mutex_assert_owner(&fil_system.mutex);
HASH_DELETE(fil_space_t, hash, &spaces, space->id, space);
spaces.cell_get(space->id)->remove(*space, &fil_space_t::hash);
if (space->is_in_unflushed_spaces)
{
@ -979,9 +967,15 @@ fil_space_t *fil_space_t::create(ulint id, ulint flags,
DBUG_EXECUTE_IF("fil_space_create_failure", return(NULL););
fil_space_t** after = reinterpret_cast<fil_space_t**>(
&fil_system.spaces.cell_get(id)->node);
for (; *after; after = &(*after)->hash) {
ut_a((*after)->id != id);
}
/* FIXME: if calloc() is defined as an inline function that calls
memset() or bzero(), then GCC 6 -flifetime-dse can optimize it away */
space= new (ut_zalloc_nokey(sizeof(*space))) fil_space_t;
*after = space = new (ut_zalloc_nokey(sizeof(*space))) fil_space_t;
space->id = id;
@ -1005,20 +999,6 @@ fil_space_t *fil_space_t::create(ulint id, ulint flags,
space->latch.SRW_LOCK_INIT(fil_space_latch_key);
if (const fil_space_t *old_space = fil_space_get_by_id(id)) {
ib::error() << "Trying to add tablespace with id " << id
<< " to the cache, but tablespace '"
<< (old_space->chain.start
? old_space->chain.start->name
: "")
<< "' already exists in the cache!";
space->~fil_space_t();
ut_free(space);
return(NULL);
}
HASH_INSERT(fil_space_t, hash, &fil_system.spaces, id, space);
if (opened)
fil_system.add_opened_last_to_space_list(space);
else

View file

@ -47,7 +47,7 @@ dict_hdr_get_new_id(
/** Update dict_sys.row_id in the dictionary header file page. */
void dict_hdr_flush_row_id(row_id_t id);
/** @return A new value for GEN_CLUST_INDEX(DB_ROW_ID) */
inline row_id_t dict_sys_t::get_new_row_id()
inline row_id_t dict_sys_t::get_new_row_id() noexcept
{
row_id_t id= row_id.fetch_add(1);
if (!(id % ROW_ID_WRITE_MARGIN))
@ -56,7 +56,7 @@ inline row_id_t dict_sys_t::get_new_row_id()
}
/** Ensure that row_id is not smaller than id, on IMPORT TABLESPACE */
inline void dict_sys_t::update_row_id(row_id_t id)
inline void dict_sys_t::update_row_id(row_id_t id) noexcept
{
row_id_t sys_id= row_id;
while (id >= sys_id)

View file

@ -395,15 +395,6 @@ dict_index_remove_from_cache(
dict_index_t* index);
/**********************************************************************//**
Change the id of a table object in the dictionary cache. This is used in
DISCARD TABLESPACE. */
void
dict_table_change_id_in_cache(
/*==========================*/
dict_table_t* table, /*!< in/out: table object already in cache */
table_id_t new_id) /*!< in: new id to set */
MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Removes a foreign constraint struct from the dictionary cache. */
void
dict_foreign_remove_from_cache(
@ -1385,19 +1376,19 @@ public:
static const char fatal_msg[];
/** @return A new value for GEN_CLUST_INDEX(DB_ROW_ID) */
inline row_id_t get_new_row_id();
inline row_id_t get_new_row_id() noexcept;
/** Ensure that row_id is not smaller than id, on IMPORT TABLESPACE */
inline void update_row_id(row_id_t id);
inline void update_row_id(row_id_t id) noexcept;
/** Recover the global DB_ROW_ID sequence on database startup */
void recover_row_id(row_id_t id)
void recover_row_id(row_id_t id) noexcept
{
row_id= ut_uint64_align_up(id, ROW_ID_WRITE_MARGIN) + ROW_ID_WRITE_MARGIN;
}
/** @return a new temporary table ID */
table_id_t acquire_temporary_table_id()
table_id_t acquire_temporary_table_id() noexcept
{
return temp_table_id.fetch_add(1, std::memory_order_relaxed);
}
@ -1407,55 +1398,32 @@ public:
@return temporary table
@retval nullptr if the table does not exist
(should only happen during the rollback of CREATE...SELECT) */
dict_table_t *acquire_temporary_table(table_id_t id)
{
ut_ad(frozen());
dict_table_t *table;
ulint fold = ut_fold_ull(id);
HASH_SEARCH(id_hash, &temp_id_hash, fold, dict_table_t*, table,
ut_ad(table->cached), table->id == id);
if (UNIV_LIKELY(table != nullptr))
{
DBUG_ASSERT(table->is_temporary());
DBUG_ASSERT(table->id >= DICT_HDR_FIRST_ID);
table->acquire();
}
return table;
}
dict_table_t *acquire_temporary_table(table_id_t id) const noexcept;
/** Look up a persistent table.
@param id table ID
@return table
@retval nullptr if not cached */
dict_table_t *find_table(table_id_t id)
{
ut_ad(frozen());
dict_table_t *table;
ulint fold= ut_fold_ull(id);
HASH_SEARCH(id_hash, &table_id_hash, fold, dict_table_t*, table,
ut_ad(table->cached), table->id == id);
DBUG_ASSERT(!table || !table->is_temporary());
return table;
}
dict_table_t *find_table(table_id_t id) const noexcept;
bool is_initialised() const { return m_initialised; }
bool is_initialised() const noexcept { return m_initialised; }
/** Initialise the data dictionary cache. */
void create();
void create() noexcept;
/** Close the data dictionary cache on shutdown. */
void close();
void close() noexcept;
/** Resize the hash tables based on the current buffer pool size. */
void resize();
void resize() noexcept;
/** Add a table definition to the data dictionary cache */
inline void add(dict_table_t* table);
inline void add(dict_table_t *table) noexcept;
/** Remove a table definition from the data dictionary cache.
@param[in,out] table cached table definition to be evicted
@param[in] lru whether this is part of least-recently-used evictiono
@param[in] keep whether to keep (not free) the object */
void remove(dict_table_t* table, bool lru = false, bool keep = false);
void remove(dict_table_t *table, bool lru= false, bool keep= false) noexcept;
#ifdef UNIV_DEBUG
/** Find a table */
@ -1552,24 +1520,13 @@ public:
/** Evict unused, unlocked tables from table_LRU.
@param half whether to consider half the tables only (instead of all)
@return number of tables evicted */
ulint evict_table_LRU(bool half);
ulint evict_table_LRU(bool half) noexcept;
/** Look up a table in the dictionary cache.
@param name table name
@return table handle
@retval nullptr if not found */
dict_table_t *find_table(const span<const char> &name) const
{
ut_ad(frozen());
for (dict_table_t *table= static_cast<dict_table_t*>
(HASH_GET_FIRST(&table_hash, table_hash.calc_hash
(my_crc32c(0, name.data(), name.size()))));
table; table= table->name_hash)
if (strlen(table->name.m_name) == name.size() &&
!memcmp(table->name.m_name, name.data(), name.size()))
return table;
return nullptr;
}
dict_table_t *find_table(const span<const char> &name) const noexcept;
/** Look up or load a table definition
@param name table name
@ -1577,13 +1534,14 @@ public:
@return table handle
@retval nullptr if not found */
dict_table_t *load_table(const span<const char> &name,
dict_err_ignore_t ignore= DICT_ERR_IGNORE_NONE);
dict_err_ignore_t ignore= DICT_ERR_IGNORE_NONE)
noexcept;
/** Attempt to load the system tables on startup
@return whether any discrepancy with the expected definition was found */
bool load_sys_tables();
bool load_sys_tables() noexcept;
/** Create or check system tables on startup */
dberr_t create_or_check_sys_tables();
dberr_t create_or_check_sys_tables() noexcept;
};
/** the data dictionary cache */

View file

@ -1840,12 +1840,11 @@ fil_delete_file(
/*============*/
const char* path); /*!< in: filepath of the ibd tablespace */
/*******************************************************************//**
Returns the table space by a given id, NULL if not found. */
fil_space_t*
fil_space_get_by_id(
/*================*/
ulint id); /*!< in: space id */
/** Look up a table space by a given id.
@param id tablespace identifier
@return tablespace object
@retval nullptr if not found */
fil_space_t *fil_space_get_by_id(ulint id) noexcept;
/** Note that a non-predefined persistent tablespace has been modified
by redo log.

View file

@ -28,12 +28,27 @@ Created 5/20/1997 Heikki Tuuri
#include "ut0rnd.h"
#include "ut0new.h"
struct hash_table_t;
struct hash_cell_t
{
/** singly-linked, nullptr terminated list of hash buckets */
void *node;
private:
/** @return pointer to the first element
@tparam T type of the element */
template<typename T> T **begin() noexcept
{ return reinterpret_cast<T**>(&node); }
/** @return pointer to the last element
@tparam T type of the element
@param next the next-element pointer in T */
template<typename T> T **end(T *T::*next) noexcept
{
T **prev;
for (prev= begin<T>(); *prev; prev= &((*prev)->*next));
return prev;
}
public:
/** Append an element.
@tparam T type of the element
@param insert the being-inserted element
@ -41,129 +56,63 @@ struct hash_cell_t
template<typename T>
void append(T &insert, T *T::*next) noexcept
{
void **after;
for (after= &node; *after;
after= reinterpret_cast<void**>(&(static_cast<T*>(*after)->*next)));
insert.*next= nullptr;
*after= &insert;
*end<T>(next)= &insert;
}
/** Find for an element.
@tparam T type of the element
@tparam UnaryPred unary predicate
@param next the next-element pointer in T
@param u unary predicate for searching the element
@return the first matching element
@retval nullptr if not found */
template<typename T,typename UnaryPred>
T *find(T *T::*next, UnaryPred u) const noexcept
{
T *n;
for (n= static_cast<T*>(node); n && !u(n); n= n->*next);
return n;
}
/** Search for a pointer to an element.
@tparam T type of the element
@tparam UnaryPred unary predicate
@param next the next-element pointer in T
@param u unary predicate for searching the element
@return pointer to the first matching element,
or to the last element in the chain */
template<typename T,typename UnaryPred>
T **search(T *T::*next, UnaryPred u) noexcept
{
T **prev;
for (prev= begin<T>(); !u(*prev); prev= &((*prev)->*next));
return prev;
}
/** Remove an element.
@tparam T type of the element
@param prev pointer to the element to be removed
@param next the next-element pointer in T */
template<typename T>
void remove(T **prev, T *T::*next) noexcept
{
T &element= **prev;
*prev= element.*next;
element.*next= nullptr;
}
/** Remove an element.
@tparam T type of the element
@param element the being-removed element
@param next the next-element pointer in T */
template<typename T>
void remove(T &element, T *T::*next) noexcept
{
remove(search(next, [&element](const T *p){return p==&element;}), next);
}
};
/*******************************************************************//**
Inserts a struct to a hash table. */
#define HASH_INSERT(TYPE, NAME, TABLE, FOLD, DATA)\
do {\
hash_cell_t* cell3333;\
TYPE* struct3333;\
\
(DATA)->NAME = NULL;\
\
cell3333 = &(TABLE)->array[(TABLE)->calc_hash(FOLD)]; \
\
if (cell3333->node == NULL) {\
cell3333->node = DATA;\
} else {\
struct3333 = (TYPE*) cell3333->node;\
\
while (struct3333->NAME != NULL) {\
\
struct3333 = (TYPE*) struct3333->NAME;\
}\
\
struct3333->NAME = DATA;\
}\
} while (0)
#ifdef UNIV_HASH_DEBUG
# define HASH_ASSERT_VALID(DATA) ut_a((void*) (DATA) != (void*) -1)
# define HASH_INVALIDATE(DATA, NAME) *(void**) (&DATA->NAME) = (void*) -1
#else
# define HASH_ASSERT_VALID(DATA) do {} while (0)
# define HASH_INVALIDATE(DATA, NAME) do {} while (0)
#endif
/*******************************************************************//**
Deletes a struct from a hash table. */
#define HASH_DELETE(TYPE, NAME, TABLE, FOLD, DATA)\
do {\
hash_cell_t* cell3333;\
TYPE* struct3333;\
\
cell3333 = &(TABLE)->array[(TABLE)->calc_hash(FOLD)]; \
\
if (cell3333->node == DATA) {\
HASH_ASSERT_VALID(DATA->NAME);\
cell3333->node = DATA->NAME;\
} else {\
struct3333 = (TYPE*) cell3333->node;\
\
while (struct3333->NAME != DATA) {\
\
struct3333 = (TYPE*) struct3333->NAME;\
ut_a(struct3333);\
}\
\
struct3333->NAME = DATA->NAME;\
}\
HASH_INVALIDATE(DATA, NAME);\
} while (0)
/*******************************************************************//**
Gets the first struct in a hash chain, NULL if none. */
#define HASH_GET_FIRST(TABLE, HASH_VAL) (TABLE)->array[HASH_VAL].node
/*******************************************************************//**
Gets the next struct in a hash chain, NULL if none. */
#define HASH_GET_NEXT(NAME, DATA) ((DATA)->NAME)
/********************************************************************//**
Looks for a struct in a hash table. */
#define HASH_SEARCH(NAME, TABLE, FOLD, TYPE, DATA, ASSERTION, TEST)\
{\
(DATA) = (TYPE) HASH_GET_FIRST(TABLE, (TABLE)->calc_hash(FOLD)); \
HASH_ASSERT_VALID(DATA);\
\
while ((DATA) != NULL) {\
ASSERTION;\
if (TEST) {\
break;\
} else {\
HASH_ASSERT_VALID(HASH_GET_NEXT(NAME, DATA));\
(DATA) = (TYPE) HASH_GET_NEXT(NAME, DATA);\
}\
}\
}
/********************************************************************//**
Looks for an item in all hash buckets. */
#define HASH_SEARCH_ALL(NAME, TABLE, TYPE, DATA, ASSERTION, TEST) \
do { \
ulint i3333; \
\
for (i3333 = (TABLE)->n_cells; i3333--; ) { \
(DATA) = (TYPE) HASH_GET_FIRST(TABLE, i3333); \
\
while ((DATA) != NULL) { \
HASH_ASSERT_VALID(DATA); \
ASSERTION; \
\
if (TEST) { \
break; \
} \
\
(DATA) = (TYPE) HASH_GET_NEXT(NAME, DATA); \
} \
\
if ((DATA) != NULL) { \
break; \
} \
} \
} while (0)
/** Hash table with singly-linked overflow lists */
struct hash_table_t
{

View file

@ -1174,9 +1174,9 @@ lock_rec_create(
trx mutex */
/** Remove a record lock request, waiting or granted, on a discarded page
@param hash hash table
@param in_lock lock object */
void lock_rec_discard(lock_sys_t::hash_table &lock_hash, lock_t *in_lock);
@param in_lock lock object
@param cell hash table cell containing in_lock */
void lock_rec_discard(lock_t *in_lock, hash_cell_t &cell) noexcept;
/** Create a new record lock and inserts it to the lock queue,
without checking for deadlocks or conflicts.

View file

@ -180,7 +180,7 @@ lock_rec_get_next_on_page_const(
const page_id_t page_id{lock->un_member.rec_lock.page_id};
while (!!(lock= static_cast<const lock_t*>(HASH_GET_NEXT(hash, lock))))
while (!!(lock= static_cast<const lock_t*>(lock->hash)))
if (lock->un_member.rec_lock.page_id == page_id)
break;
return lock;

View file

@ -153,7 +153,6 @@ using the call command. */
ut_ad(lock_rec_validate_page())
assertions. */
#define UNIV_LRU_DEBUG /* debug the buffer pool LRU */
#define UNIV_HASH_DEBUG /* debug HASH_ macros */
#define UNIV_IBUF_DEBUG /* debug the insert buffer */
#define UNIV_PERF_DEBUG /* debug flag that enables
light weight performance

View file

@ -2363,8 +2363,7 @@ static void lock_rec_dequeue_from_page(lock_t *in_lock, bool owns_wait_mutex)
const ulint rec_fold = page_id.fold();
hash_cell_t &cell = *lock_hash.cell_get(rec_fold);
lock_sys.assert_locked(cell);
HASH_DELETE(lock_t, hash, &lock_hash, rec_fold, in_lock);
cell.remove(*in_lock, &lock_t::hash);
UT_LIST_REMOVE(in_lock->trx->lock.trx_locks, in_lock);
MONITOR_INC(MONITOR_RECLOCK_REMOVED);
@ -2414,16 +2413,14 @@ static void lock_rec_dequeue_from_page(lock_t *in_lock, bool owns_wait_mutex)
}
/** Remove a record lock request, waiting or granted, on a discarded page
@param hash hash table
@param in_lock lock object */
@param in_lock lock object
@param cell hash table cell containing in_lock */
TRANSACTIONAL_TARGET
void lock_rec_discard(lock_sys_t::hash_table &lock_hash, lock_t *in_lock)
void lock_rec_discard(lock_t *in_lock, hash_cell_t &cell) noexcept
{
ut_ad(!in_lock->is_table());
lock_hash.assert_locked(in_lock->un_member.rec_lock.page_id);
HASH_DELETE(lock_t, hash, &lock_hash,
in_lock->un_member.rec_lock.page_id.fold(), in_lock);
cell.remove(*in_lock, &lock_t::hash);
ut_d(uint32_t old_locks);
{
trx_t *trx= in_lock->trx;
@ -2441,17 +2438,16 @@ void lock_rec_discard(lock_sys_t::hash_table &lock_hash, lock_t *in_lock)
Removes record lock objects set on an index page which is discarded. This
function does not move locks, or check for waiting locks, therefore the
lock bitmaps must already be reset when this function is called. */
template<bool assert= IF_DBUG(true,false)>
static void
lock_rec_free_all_from_discard_page(page_id_t id, const hash_cell_t &cell,
lock_sys_t::hash_table &lock_hash)
lock_rec_free_all_from_discard_page(page_id_t id, hash_cell_t &cell) noexcept
{
for (lock_t *lock= lock_sys_t::get_first(cell, id); lock; )
{
ut_ad(&lock_hash != &lock_sys.rec_hash ||
lock_rec_find_set_bit(lock) == ULINT_UNDEFINED);
ut_ad(!assert || lock_rec_find_set_bit(lock) == ULINT_UNDEFINED);
ut_ad(!lock->is_waiting());
lock_t *next_lock= lock_rec_get_next_on_page(lock);
lock_rec_discard(lock_hash, lock);
lock_rec_discard(lock, cell);
lock= next_lock;
}
}
@ -2468,15 +2464,15 @@ ATTRIBUTE_COLD void lock_discard_for_index(const dict_index_t &index)
const ulint n= lock_sys.rec_hash.pad(lock_sys.rec_hash.n_cells);
for (ulint i= 0; i < n; i++)
{
for (lock_t *lock= static_cast<lock_t*>(lock_sys.rec_hash.array[i].node);
lock; )
hash_cell_t &cell= lock_sys.rec_hash.array[i];
for (lock_t *lock= static_cast<lock_t*>(cell.node); lock; )
{
ut_ad(!lock->is_table());
if (lock->index == &index)
{
ut_ad(!lock->is_waiting());
lock_rec_discard(lock_sys.rec_hash, lock);
lock= static_cast<lock_t*>(lock_sys.rec_hash.array[i].node);
lock_rec_discard(lock, cell);
lock= static_cast<lock_t*>(cell.node);
}
else
lock= lock->hash;
@ -3269,7 +3265,7 @@ lock_update_merge_right(
/* Reset the locks on the supremum of the left page, releasing
waiting transactions */
lock_rec_reset_and_release_wait(g.cell1(), l, PAGE_HEAP_NO_SUPREMUM);
lock_rec_free_all_from_discard_page(l, g.cell1(), lock_sys.rec_hash);
lock_rec_free_all_from_discard_page(l, g.cell1());
ut_d(lock_assert_no_spatial(l));
}
@ -3301,7 +3297,7 @@ void lock_update_copy_and_discard(const buf_block_t &new_block, page_id_t old)
/* Move the locks on the supremum of the old page to the supremum of new */
lock_rec_move(g.cell1(), new_block, id, g.cell2(), old,
PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
lock_rec_free_all_from_discard_page(old, g.cell2(), lock_sys.rec_hash);
lock_rec_free_all_from_discard_page(old, g.cell2());
}
/*************************************************************//**
@ -3359,7 +3355,7 @@ void lock_update_merge_left(const buf_block_t& left, const rec_t *orig_pred,
of the left page */
lock_rec_move(g.cell1(), left, l, g.cell2(), right,
PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
lock_rec_free_all_from_discard_page(right, g.cell2(), lock_sys.rec_hash);
lock_rec_free_all_from_discard_page(right, g.cell2());
/* there should exist no page lock on the right page,
otherwise, it will be blocked from merge */
@ -3450,21 +3446,18 @@ lock_update_discard(
} while (heap_no != PAGE_HEAP_NO_SUPREMUM);
}
lock_rec_free_all_from_discard_page(page_id, g.cell2(),
lock_sys.rec_hash);
lock_rec_free_all_from_discard_page(page_id, g.cell2());
} else {
const auto fold = page_id.fold();
auto cell = lock_sys.prdt_hash.cell_get(fold);
auto latch = lock_sys_t::hash_table::latch(cell);
latch->acquire();
lock_rec_free_all_from_discard_page(page_id, *cell,
lock_sys.prdt_hash);
lock_rec_free_all_from_discard_page<false>(page_id, *cell);
latch->release();
cell = lock_sys.prdt_page_hash.cell_get(fold);
latch = lock_sys_t::hash_table::latch(cell);
latch->acquire();
lock_rec_free_all_from_discard_page(page_id, *cell,
lock_sys.prdt_page_hash);
lock_rec_free_all_from_discard_page<false>(page_id, *cell);
latch->release();
}
}
@ -5118,25 +5111,13 @@ Calculates the number of record lock structs in the record lock hash table.
TRANSACTIONAL_TARGET
static ulint lock_get_n_rec_locks()
{
ulint n_locks = 0;
ulint i;
lock_sys.assert_locked();
for (i = 0; i < lock_sys.rec_hash.n_cells; i++) {
const lock_t* lock;
for (lock = static_cast<const lock_t*>(
HASH_GET_FIRST(&lock_sys.rec_hash, i));
lock != 0;
lock = static_cast<const lock_t*>(
HASH_GET_NEXT(hash, lock))) {
n_locks++;
}
}
return(n_locks);
ulint n_locks= 0;
lock_sys.assert_locked();
for (ulint i= 0; i < lock_sys.rec_hash.n_cells; i++)
for (auto lock= static_cast<lock_t*>(lock_sys.rec_hash.array[i].node);
lock; lock= lock->hash)
n_locks++;
return n_locks;
}
#endif /* PRINT_NUM_OF_LOCK_STRUCTS */
@ -5645,10 +5626,8 @@ lock_rec_validate(
lock_sys.assert_locked();
for (const lock_t* lock = static_cast<const lock_t*>(
HASH_GET_FIRST(&lock_sys.rec_hash, start));
lock != NULL;
lock = static_cast<const lock_t*>(HASH_GET_NEXT(hash, lock))) {
lock_sys.rec_hash.array[start].node);
lock; lock = lock->hash) {
ut_ad(!lock->trx->read_only
|| !lock->trx->is_autocommit_non_locking());
ut_ad(!lock->is_table());

View file

@ -895,7 +895,7 @@ void lock_sys_t::prdt_page_free_from_discard(const page_id_t id, bool all)
for (lock_t *lock= get_first(*cell, id), *next; lock; lock= next)
{
next= lock_rec_get_next_on_page(lock);
lock_rec_discard(prdt_page_hash, lock);
lock_rec_discard(lock, *cell);
}
if (all)
@ -907,7 +907,7 @@ void lock_sys_t::prdt_page_free_from_discard(const page_id_t id, bool all)
for (lock_t *lock= get_first(*cell, id), *next; lock; lock= next)
{
next= lock_rec_get_next_on_page(lock);
lock_rec_discard(prdt_hash, lock);
lock_rec_discard(lock, *cell);
}
}
@ -919,7 +919,7 @@ void lock_sys_t::prdt_page_free_from_discard(const page_id_t id, bool all)
for (lock_t *lock= get_first(*cell, id), *next; lock; lock= next)
{
next= lock_rec_get_next_on_page(lock);
lock_rec_discard(rec_hash, lock);
lock_rec_discard(lock, *cell);
}
latch->release();

View file

@ -2321,6 +2321,9 @@ row_discard_tablespace(
trx_t* trx, /*!< in/out: transaction handle */
dict_table_t* table) /*!< in/out: table to be discarded */
{
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
ut_ad(!table->is_temporary());
dberr_t err;
/* How do we prevent crashes caused by ongoing operations on
@ -2378,8 +2381,14 @@ row_discard_tablespace(
/* All persistent operations successful, update the
data dictionary memory cache. */
ut_ad(dict_sys.locked());
dict_table_change_id_in_cache(table, new_id);
/* Remove the table from the hash table of id's */
dict_sys.table_id_hash.cell_get(ut_fold_ull(table->id))
->remove(*table, &dict_table_t::id_hash);
table->id = new_id;
dict_sys.table_id_hash.cell_get(ut_fold_ull(table->id))
->append(*table, &dict_table_t::id_hash);
dict_index_t* index = UT_LIST_GET_FIRST(table->indexes);