MDEV-20612 preparation: LockMutexGuard

Let us use the RAII wrapper LockMutexGuard for most operations where
lock_sys.mutex is acquired.
This commit is contained in:
Marko Mäkelä 2021-02-11 14:36:11 +02:00
parent 2e64513fba
commit 903464929c
11 changed files with 315 additions and 459 deletions

View file

@ -2038,9 +2038,8 @@ withdraw_retry:
{found, withdraw_started, my_hrtime_coarse()};
withdraw_started = current_time;
lock_sys.mutex_lock();
LockMutexGuard g;
trx_sys.trx_list.for_each(f);
lock_sys.mutex_unlock();
}
if (should_retry_withdraw) {

View file

@ -386,11 +386,12 @@ rtr_pcur_getnext_from_path(
trx_t* trx = thr_get_trx(
btr_cur->rtr_info->thr);
lock_sys.mutex_lock();
lock_init_prdt_from_mbr(
&prdt, &btr_cur->rtr_info->mbr,
mode, trx->lock.lock_heap);
lock_sys.mutex_unlock();
{
LockMutexGuard g;
lock_init_prdt_from_mbr(
&prdt, &btr_cur->rtr_info->mbr,
mode, trx->lock.lock_heap);
}
if (rw_latch == RW_NO_LATCH) {
block->lock.s_lock();
@ -1182,18 +1183,15 @@ rtr_check_discard_page(
}
mysql_mutex_unlock(&rtr_info->rtr_path_mutex);
if (rtr_info->matches) {
mysql_mutex_lock(&rtr_info->matches->rtr_match_mutex);
if (auto matches = rtr_info->matches) {
mysql_mutex_lock(&matches->rtr_match_mutex);
if ((&rtr_info->matches->block)->page.id() == id) {
if (!rtr_info->matches->matched_recs->empty()) {
rtr_info->matches->matched_recs->clear();
}
ut_ad(rtr_info->matches->matched_recs->empty());
rtr_info->matches->valid = false;
if (matches->block.page.id() == id) {
matches->matched_recs->clear();
matches->valid = false;
}
mysql_mutex_unlock(&rtr_info->matches->rtr_match_mutex);
mysql_mutex_unlock(&matches->rtr_match_mutex);
}
}

View file

@ -4465,16 +4465,17 @@ static void innobase_kill_query(handlerton*, THD *thd, enum thd_kill_levels)
#endif /* WITH_WSREP */
if (trx->lock.wait_lock)
{
lock_sys.mutex_lock();
mysql_mutex_lock(&lock_sys.wait_mutex);
if (lock_t *lock= trx->lock.wait_lock)
{
trx->mutex_lock();
trx->error_state= DB_INTERRUPTED;
lock_cancel_waiting_and_release(lock);
trx->mutex_unlock();
LockMutexGuard g;
mysql_mutex_lock(&lock_sys.wait_mutex);
if (lock_t *lock= trx->lock.wait_lock)
{
trx->mutex_lock();
trx->error_state= DB_INTERRUPTED;
lock_cancel_waiting_and_release(lock);
trx->mutex_unlock();
}
}
lock_sys.mutex_unlock();
mysql_mutex_unlock(&lock_sys.wait_mutex);
}
}
@ -18101,11 +18102,10 @@ wsrep_abort_transaction(
wsrep_thd_transaction_state_str(victim_thd));
if (victim_trx) {
lock_sys.mutex_lock();
LockMutexGuard g;
victim_trx->mutex_lock();
int rcode= wsrep_innobase_kill_one_trx(bf_thd,
victim_trx, signal);
lock_sys.mutex_unlock();
victim_trx->mutex_unlock();
DBUG_RETURN(rcode);
} else {

View file

@ -3280,10 +3280,8 @@ commit_exit:
ibuf_mtr_commit(&bitmap_mtr);
goto fail_exit;
} else {
lock_sys.mutex_lock();
const auto lock_exists = lock_sys.get_first(page_id);
lock_sys.mutex_unlock();
if (lock_exists) {
LockMutexGuard g;
if (lock_sys.get_first(page_id)) {
goto commit_exit;
}
}

View file

@ -485,7 +485,7 @@ struct trx_lock_t
/** List of pending trx_t::evict_table() */
UT_LIST_BASE_NODE_T(dict_table_t) evicted_tables;
/** number of record locks; writes are protected by lock_sys.mutex */
/** number of record locks; writers use LockGuard or LockMutexGuard */
ulint n_rec_locks;
};

View file

@ -263,13 +263,8 @@ ib_uint64_t DeadlockChecker::s_lock_mark_counter = 0;
DeadlockChecker::state_t DeadlockChecker::s_states[4096];
#ifdef UNIV_DEBUG
/*********************************************************************//**
Validates the lock system.
@return TRUE if ok */
static
bool
lock_validate();
/*============*/
/** Validate the transactional locks. */
static void lock_validate();
/** Validate the record lock queues on a page.
@param block buffer pool block
@ -390,7 +385,7 @@ void lock_sys_t::resize(ulint n_cells)
{
ut_ad(this == &lock_sys);
mutex_lock();
LockMutexGuard g;
hash_table_t old_hash(rec_hash);
rec_hash.create(n_cells);
@ -409,7 +404,6 @@ void lock_sys_t::resize(ulint n_cells)
HASH_MIGRATE(&old_hash, &prdt_page_hash, lock_t, hash,
lock_rec_lock_fold);
old_hash.free();
mutex_unlock();
}
@ -832,8 +826,6 @@ lock_rec_other_has_expl_req(
requests by all transactions
are taken into account */
{
lock_sys.mutex_assert_locked();
ut_ad(mode == LOCK_X || mode == LOCK_S);
/* Only GAP lock can be on SUPREMUM, and we are not looking for
@ -934,13 +926,11 @@ lock_rec_other_has_conflicting(
ulint heap_no,/*!< in: heap number of the record */
const trx_t* trx) /*!< in: our transaction */
{
lock_t* lock;
lock_sys.mutex_assert_locked();
bool is_supremum = (heap_no == PAGE_HEAP_NO_SUPREMUM);
for (lock = lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
for (lock_t*lock = lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
@ -1889,9 +1879,6 @@ lock_rec_cancel(
/*============*/
lock_t* lock) /*!< in: waiting record lock request */
{
ut_ad(!lock->is_table());
lock_sys.mutex_assert_locked();
/* Reset the bit (there can be only one set bit) in the lock bitmap */
lock_rec_reset_nth_bit(lock, lock_rec_find_set_bit(lock));
@ -2259,124 +2246,122 @@ lock_move_reorganize_page(
const buf_block_t* oblock) /*!< in: copy of the old, not
reorganized page */
{
lock_t* lock;
UT_LIST_BASE_NODE_T(lock_t) old_locks;
mem_heap_t* heap = NULL;
ulint comp;
const page_id_t id{block->page.id()};
mem_heap_t *heap;
lock_sys.mutex_lock();
{
UT_LIST_BASE_NODE_T(lock_t) old_locks;
UT_LIST_INIT(old_locks, &lock_t::trx_locks);
/* FIXME: This needs to deal with predicate lock too */
lock = lock_sys.get_first(id);
const page_id_t id{block->page.id()};
LockMutexGuard g;
if (lock == NULL) {
lock_sys.mutex_unlock();
/* FIXME: This needs to deal with predicate lock too */
lock_t *lock= lock_sys.get_first(id);
return;
}
if (!lock)
return;
heap = mem_heap_create(256);
heap= mem_heap_create(256);
/* Copy first all the locks on the page to heap and reset the
bitmaps in the original locks; chain the copies of the locks
using the trx_locks field in them. */
/* Copy first all the locks on the page to heap and reset the
bitmaps in the original locks; chain the copies of the locks
using the trx_locks field in them. */
UT_LIST_INIT(old_locks, &lock_t::trx_locks);
do
{
/* Make a copy of the lock */
lock_t *old_lock= lock_rec_copy(lock, heap);
do {
/* Make a copy of the lock */
lock_t* old_lock = lock_rec_copy(lock, heap);
UT_LIST_ADD_LAST(old_locks, old_lock);
UT_LIST_ADD_LAST(old_locks, old_lock);
/* Reset bitmap of lock */
lock_rec_bitmap_reset(lock);
/* Reset bitmap of lock */
lock_rec_bitmap_reset(lock);
if (lock->is_waiting())
{
ut_ad(lock->trx->lock.wait_lock == lock);
lock->type_mode&= ~LOCK_WAIT;
}
if (lock->is_waiting()) {
ut_ad(lock->trx->lock.wait_lock == lock);
lock->type_mode &= ~LOCK_WAIT;
}
lock= lock_rec_get_next_on_page(lock);
}
while (lock);
lock = lock_rec_get_next_on_page(lock);
} while (lock != NULL);
const ulint comp= page_is_comp(block->frame);
ut_ad(comp == page_is_comp(oblock->frame));
comp = page_is_comp(block->frame);
ut_ad(comp == page_is_comp(oblock->frame));
lock_move_granted_locks_to_front(old_locks);
lock_move_granted_locks_to_front(old_locks);
DBUG_EXECUTE_IF("do_lock_reverse_page_reorganize",
ut_list_reverse(old_locks););
DBUG_EXECUTE_IF("do_lock_reverse_page_reorganize",
ut_list_reverse(old_locks););
for (lock= UT_LIST_GET_FIRST(old_locks); lock;
lock= UT_LIST_GET_NEXT(trx_locks, lock))
{
/* NOTE: we copy also the locks set on the infimum and
supremum of the page; the infimum may carry locks if an
update of a record is occurring on the page, and its locks
were temporarily stored on the infimum */
const rec_t *rec1= page_get_infimum_rec(block->frame);
const rec_t *rec2= page_get_infimum_rec(oblock->frame);
for (lock = UT_LIST_GET_FIRST(old_locks); lock;
lock = UT_LIST_GET_NEXT(trx_locks, lock)) {
/* Set locks according to old locks */
for (;;)
{
ulint old_heap_no;
ulint new_heap_no;
ut_d(const rec_t* const orec= rec1);
ut_ad(page_rec_is_metadata(rec1) == page_rec_is_metadata(rec2));
/* NOTE: we copy also the locks set on the infimum and
supremum of the page; the infimum may carry locks if an
update of a record is occurring on the page, and its locks
were temporarily stored on the infimum */
const rec_t* rec1 = page_get_infimum_rec(
buf_block_get_frame(block));
const rec_t* rec2 = page_get_infimum_rec(
buf_block_get_frame(oblock));
if (comp)
{
old_heap_no= rec_get_heap_no_new(rec2);
new_heap_no= rec_get_heap_no_new(rec1);
/* Set locks according to old locks */
for (;;) {
ulint old_heap_no;
ulint new_heap_no;
ut_d(const rec_t* const orec = rec1);
ut_ad(page_rec_is_metadata(rec1)
== page_rec_is_metadata(rec2));
rec1= page_rec_get_next_low(rec1, TRUE);
rec2= page_rec_get_next_low(rec2, TRUE);
}
else
{
old_heap_no= rec_get_heap_no_old(rec2);
new_heap_no= rec_get_heap_no_old(rec1);
ut_ad(!memcmp(rec1, rec2, rec_get_data_size_old(rec2)));
if (comp) {
old_heap_no = rec_get_heap_no_new(rec2);
new_heap_no = rec_get_heap_no_new(rec1);
rec1= page_rec_get_next_low(rec1, FALSE);
rec2= page_rec_get_next_low(rec2, FALSE);
}
rec1 = page_rec_get_next_low(rec1, TRUE);
rec2 = page_rec_get_next_low(rec2, TRUE);
} else {
old_heap_no = rec_get_heap_no_old(rec2);
new_heap_no = rec_get_heap_no_old(rec1);
ut_ad(!memcmp(rec1, rec2,
rec_get_data_size_old(rec2)));
/* Clear the bit in old_lock. */
if (old_heap_no < lock->un_member.rec_lock.n_bits &&
lock_rec_reset_nth_bit(lock, old_heap_no))
{
ut_ad(!page_rec_is_metadata(orec));
rec1 = page_rec_get_next_low(rec1, FALSE);
rec2 = page_rec_get_next_low(rec2, FALSE);
}
/* NOTE that the old lock bitmap could be too
small for the new heap number! */
lock_rec_add_to_queue(lock->type_mode, id, block->frame, new_heap_no,
lock->index, lock->trx, FALSE);
}
/* Clear the bit in old_lock. */
if (old_heap_no < lock->un_member.rec_lock.n_bits
&& lock_rec_reset_nth_bit(lock, old_heap_no)) {
ut_ad(!page_rec_is_metadata(orec));
if (new_heap_no == PAGE_HEAP_NO_SUPREMUM)
{
ut_ad(old_heap_no == PAGE_HEAP_NO_SUPREMUM);
break;
}
}
/* NOTE that the old lock bitmap could be too
small for the new heap number! */
ut_ad(lock_rec_find_set_bit(lock) == ULINT_UNDEFINED);
}
}
lock_rec_add_to_queue(
lock->type_mode, id, block->frame,
new_heap_no,
lock->index, lock->trx, FALSE);
}
if (new_heap_no == PAGE_HEAP_NO_SUPREMUM) {
ut_ad(old_heap_no == PAGE_HEAP_NO_SUPREMUM);
break;
}
}
ut_ad(lock_rec_find_set_bit(lock) == ULINT_UNDEFINED);
}
lock_sys.mutex_unlock();
mem_heap_free(heap);
mem_heap_free(heap);
#ifdef UNIV_DEBUG_LOCK_VALIDATE
if (fil_space_t* space = fil_space_t::get(page_id.space())) {
ut_ad(lock_rec_validate_page(block, space->is_latched()));
space->release();
}
if (fil_space_t *space= fil_space_t::get(id.space()))
{
ut_ad(lock_rec_validate_page(block, space->is_latched()));
space->release();
}
#endif
}
@ -2864,8 +2849,7 @@ lock_update_discard(
ulint heap_no;
const page_id_t heir(heir_block->page.id());
const page_id_t page_id(block->page.id());
LockMutexGuard g;
LockMutexGuard g;
if (lock_sys.get_first(page_id)) {
ut_ad(!lock_sys.get_first_prdt(page_id));
@ -3443,7 +3427,7 @@ lock_table(
err = DB_SUCCESS;
lock_sys.mutex_lock();
LockMutexGuard g;
/* We have to check if the new lock is compatible with any locks
other transactions have in the table lock queue. */
@ -3463,8 +3447,6 @@ lock_table(
lock_table_create(table, mode, trx);
}
lock_sys.mutex_unlock();
trx->mutex_unlock();
return(err);
@ -3681,7 +3663,7 @@ lock_rec_unlock(
heap_no = page_rec_get_heap_no(rec);
lock_sys.mutex_lock();
LockMutexGuard g;
first_lock = lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
@ -3695,8 +3677,6 @@ lock_rec_unlock(
}
}
lock_sys.mutex_unlock();
{
ib::error err;
err << "Unlock row could not find a " << lock_mode
@ -3734,8 +3714,6 @@ released:
#endif /* WITH_WSREP */
}
}
lock_sys.mutex_unlock();
}
#ifdef UNIV_DEBUG
@ -4190,14 +4168,12 @@ lock_print_info_all_transactions(
/*=============================*/
FILE* file) /*!< in/out: file where to print */
{
lock_sys.mutex_assert_locked();
fprintf(file, "LIST OF TRANSACTIONS FOR EACH SESSION:\n");
trx_sys.trx_list.for_each(lock_print_info(file, my_hrtime_coarse()));
lock_sys.mutex_unlock();
ut_ad(lock_validate());
ut_d(lock_validate());
}
#ifdef UNIV_DEBUG
@ -4463,7 +4439,6 @@ static bool lock_rec_validate_page(const buf_block_t *block, bool latched)
rec_offs_init(offsets_);
const page_id_t id{block->page.id()};
LockMutexGuard g;
loop:
lock = lock_sys.get_first(id);
@ -4622,44 +4597,30 @@ static my_bool lock_validate_table_locks(rw_trx_hash_element_t *element, void*)
}
/*********************************************************************//**
Validates the lock system.
@return TRUE if ok */
static
bool
lock_validate()
/*===========*/
/** Validate the transactional locks. */
static void lock_validate()
{
std::set<page_id_t> pages;
std::set<page_id_t> pages;
{
LockMutexGuard g;
/* Validate table locks */
trx_sys.rw_trx_hash.iterate(lock_validate_table_locks);
lock_sys.mutex_lock();
for (ulint i= 0; i < lock_sys.rec_hash.n_cells; i++)
{
page_id_t limit{0, 0};
while (const lock_t *lock= lock_rec_validate(i, &limit))
{
if (lock_rec_find_set_bit(lock) == ULINT_UNDEFINED)
/* The lock bitmap is empty; ignore it. */
continue;
pages.insert(lock->un_member.rec_lock.page_id);
}
}
}
/* Validate table locks */
trx_sys.rw_trx_hash.iterate(lock_validate_table_locks);
/* Iterate over all the record locks and validate the locks. We
don't want to hog the lock_sys_t::mutex. Release it during the
validation check. */
for (ulint i = 0; i < lock_sys.rec_hash.n_cells; i++) {
page_id_t limit(0, 0);
while (const lock_t* lock = lock_rec_validate(i, &limit)) {
if (lock_rec_find_set_bit(lock) == ULINT_UNDEFINED) {
/* The lock bitmap is empty; ignore it. */
continue;
}
pages.insert(lock->un_member.rec_lock.page_id);
}
}
lock_sys.mutex_unlock();
for (page_id_t page_id : pages) {
lock_rec_block_validate(page_id);
}
return(true);
for (page_id_t page_id : pages)
lock_rec_block_validate(page_id);
}
#endif /* UNIV_DEBUG */
/*============ RECORD LOCK CHECKS FOR ROW OPERATIONS ====================*/
@ -4684,129 +4645,97 @@ lock_rec_insert_check_and_lock(
LOCK_GAP type locks from the successor
record */
{
ut_ad(block->frame == page_align(rec));
ut_ad(mtr->is_named_space(index->table->space));
ut_ad(page_rec_is_leaf(rec));
ut_ad(block->frame == page_align(rec));
ut_ad(mtr->is_named_space(index->table->space));
ut_ad(page_is_leaf(block->frame));
ut_ad(!index->table->is_temporary());
ut_ad(!index->table->is_temporary());
ut_ad(page_is_leaf(block->frame));
dberr_t err= DB_SUCCESS;
bool inherit_in= *inherit;
trx_t *trx= thr_get_trx(thr);
const rec_t *next_rec= page_rec_get_next_const(rec);
ulint heap_no= page_rec_get_heap_no(next_rec);
const page_id_t id{block->page.id()};
ut_ad(!rec_is_metadata(next_rec, *index));
dberr_t err;
lock_t* lock;
bool inherit_in = *inherit;
trx_t* trx = thr_get_trx(thr);
const rec_t* next_rec = page_rec_get_next_const(rec);
ulint heap_no = page_rec_get_heap_no(next_rec);
ut_ad(!rec_is_metadata(next_rec, *index));
{
LockMutexGuard g;
/* Because this code is invoked for a running transaction by
the thread that is serving the transaction, it is not necessary
to hold trx->mutex here. */
const page_id_t id{block->page.id()};
/* When inserting a record into an index, the table must be at
least IX-locked. When we are building an index, we would pass
BTR_NO_LOCKING_FLAG and skip the locking altogether. */
ut_ad(lock_table_has(trx, index->table, LOCK_IX));
lock_sys.mutex_lock();
/* Because this code is invoked for a running transaction by
the thread that is serving the transaction, it is not necessary
to hold trx->mutex here. */
*inherit= lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
/* When inserting a record into an index, the table must be at
least IX-locked. When we are building an index, we would pass
BTR_NO_LOCKING_FLAG and skip the locking altogether. */
ut_ad(lock_table_has(trx, index->table, LOCK_IX));
if (*inherit)
{
/* Spatial index does not use GAP lock protection. It uses
"predicate lock" to protect the "range" */
if (index->is_spatial())
return DB_SUCCESS;
lock = lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
/* If another transaction has an explicit lock request which locks
the gap, waiting or granted, on the successor, the insert has to wait.
if (lock == NULL) {
/* We optimize CPU time usage in the simplest case */
An exception is the case where the lock by the another transaction
is a gap type lock which it placed to wait for its turn to insert. We
do not consider that kind of a lock conflicting with our insert. This
eliminates an unnecessary deadlock which resulted when 2 transactions
had to wait for their insert. Both had waiting gap type lock requests
on the successor, which produced an unnecessary deadlock. */
const unsigned type_mode= LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION;
lock_sys.mutex_unlock();
if (inherit_in && !dict_index_is_clust(index)) {
/* Update the page max trx id field */
page_update_max_trx_id(block,
buf_block_get_page_zip(block),
trx->id, mtr);
}
*inherit = false;
return(DB_SUCCESS);
}
/* Spatial index does not use GAP lock protection. It uses
"predicate lock" to protect the "range" */
if (dict_index_is_spatial(index)) {
return(DB_SUCCESS);
}
*inherit = true;
/* If another transaction has an explicit lock request which locks
the gap, waiting or granted, on the successor, the insert has to wait.
An exception is the case where the lock by the another transaction
is a gap type lock which it placed to wait for its turn to insert. We
do not consider that kind of a lock conflicting with our insert. This
eliminates an unnecessary deadlock which resulted when 2 transactions
had to wait for their insert. Both had waiting gap type lock requests
on the successor, which produced an unnecessary deadlock. */
const unsigned type_mode = LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION;
if (
lock_t *c_lock= lock_rec_other_has_conflicting(type_mode, id,
heap_no, trx);
if (c_lock)
{
trx->mutex_lock();
err= lock_rec_enqueue_waiting(
#ifdef WITH_WSREP
lock_t* c_lock =
#endif /* WITH_WSREP */
lock_rec_other_has_conflicting(type_mode, id, heap_no, trx)) {
trx->mutex_lock();
c_lock,
#endif
type_mode, id, block->frame, heap_no, index, thr, nullptr);
trx->mutex_unlock();
}
}
}
err = lock_rec_enqueue_waiting(
#ifdef WITH_WSREP
c_lock,
#endif /* WITH_WSREP */
type_mode, id, block->frame, heap_no, index,
thr, nullptr);
trx->mutex_unlock();
} else {
err = DB_SUCCESS;
}
lock_sys.mutex_unlock();
switch (err) {
case DB_SUCCESS_LOCKED_REC:
err = DB_SUCCESS;
/* fall through */
case DB_SUCCESS:
if (!inherit_in || dict_index_is_clust(index)) {
break;
}
/* Update the page max trx id field */
page_update_max_trx_id(
block, buf_block_get_page_zip(block), trx->id, mtr);
default:
/* We only care about the two return values. */
break;
}
switch (err) {
case DB_SUCCESS_LOCKED_REC:
err = DB_SUCCESS;
/* fall through */
case DB_SUCCESS:
if (!inherit_in || index->is_clust())
break;
/* Update the page max trx id field */
page_update_max_trx_id(block, buf_block_get_page_zip(block), trx->id, mtr);
default:
/* We only care about the two return values. */
break;
}
#ifdef UNIV_DEBUG
{
mem_heap_t* heap = NULL;
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
const rec_offs* offsets;
rec_offs_init(offsets_);
{
mem_heap_t *heap= nullptr;
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
const rec_offs *offsets;
rec_offs_init(offsets_);
offsets = rec_get_offsets(next_rec, index, offsets_, true,
ULINT_UNDEFINED, &heap);
offsets= rec_get_offsets(next_rec, index, offsets_, true,
ULINT_UNDEFINED, &heap);
ut_ad(lock_rec_queue_validate(false, id, next_rec, index, offsets));
ut_ad(lock_rec_queue_validate(false, id, next_rec, index, offsets));
if (heap != NULL) {
mem_heap_free(heap);
}
}
if (UNIV_LIKELY_NULL(heap))
mem_heap_free(heap);
}
#endif /* UNIV_DEBUG */
return(err);
return err;
}
/*********************************************************************//**
@ -5565,22 +5494,13 @@ lock_table_has_locks(
held on records in this table or on the
table itself */
{
ibool has_locks;
ut_ad(table != NULL);
lock_sys.mutex_lock();
has_locks = UT_LIST_GET_LEN(table->locks) > 0 || table->n_rec_locks > 0;
LockMutexGuard g;
bool has_locks= UT_LIST_GET_LEN(table->locks) > 0 || table->n_rec_locks > 0;
#ifdef UNIV_DEBUG
if (!has_locks) {
trx_sys.rw_trx_hash.iterate(lock_table_locks_lookup, table);
}
if (!has_locks)
trx_sys.rw_trx_hash.iterate(lock_table_locks_lookup, table);
#endif /* UNIV_DEBUG */
lock_sys.mutex_unlock();
return(has_locks);
return has_locks;
}
/*******************************************************************//**
@ -5606,7 +5526,7 @@ lock_trx_has_sys_table_locks(
const lock_t* strongest_lock = 0;
lock_mode strongest = LOCK_NONE;
lock_sys.mutex_lock();
LockMutexGuard g;
const lock_list::const_iterator end = trx->lock.table_locks.end();
lock_list::const_iterator it = trx->lock.table_locks.begin();
@ -5627,7 +5547,6 @@ lock_trx_has_sys_table_locks(
}
if (strongest == LOCK_NONE) {
lock_sys.mutex_unlock();
return(NULL);
}
@ -5652,8 +5571,6 @@ lock_trx_has_sys_table_locks(
}
}
lock_sys.mutex_unlock();
return(strongest_lock);
}
@ -5667,10 +5584,12 @@ bool lock_trx_has_expl_x_lock(const trx_t &trx, const dict_table_t &table,
page_id_t id, ulint heap_no)
{
ut_ad(heap_no > PAGE_HEAP_NO_SUPREMUM);
LockMutexGuard g;
ut_ad(lock_table_has(&trx, &table, LOCK_IX));
ut_ad(lock_table_has(&trx, &table, LOCK_X) ||
lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, id, heap_no, &trx));
if (!lock_table_has(&trx, &table, LOCK_X))
{
LockMutexGuard g;
ut_ad(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, id, heap_no, &trx));
}
return true;
}
#endif /* UNIV_DEBUG */

View file

@ -234,16 +234,13 @@ lock_prdt_has_lock(
attached to the new lock */
const trx_t* trx) /*!< in: transaction */
{
lock_t* lock;
lock_sys.mutex_assert_locked();
ut_ad((precise_mode & LOCK_MODE_MASK) == LOCK_S
|| (precise_mode & LOCK_MODE_MASK) == LOCK_X);
ut_ad(!(precise_mode & LOCK_INSERT_INTENTION));
for (lock = lock_rec_get_first(
lock_hash_get(type_mode), id, PRDT_HEAPNO);
lock != NULL;
for (lock_t* lock = lock_rec_get_first(lock_hash_get(type_mode), id,
PRDT_HEAPNO); lock;
lock = lock_rec_get_next(PRDT_HEAPNO, lock)) {
ut_ad(lock->type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE));
@ -291,10 +288,8 @@ lock_prdt_other_has_conflicting(
the new lock will be on */
const trx_t* trx) /*!< in: our transaction */
{
lock_sys.mutex_assert_locked();
for (lock_t* lock = lock_rec_get_first(
lock_hash_get(mode), id, PRDT_HEAPNO);
for (lock_t* lock = lock_rec_get_first(lock_hash_get(mode), id,
PRDT_HEAPNO);
lock != NULL;
lock = lock_rec_get_next(PRDT_HEAPNO, lock)) {
@ -388,8 +383,6 @@ lock_prdt_find_on_page(
{
lock_t* lock;
lock_sys.mutex_assert_locked();
for (lock = lock_sys.get_first(*lock_hash_get(type_mode),
block->page.id());
lock != NULL;
@ -502,83 +495,58 @@ lock_prdt_insert_check_and_lock(
lock_prdt_t* prdt) /*!< in: Predicates with Minimum Bound
Rectangle */
{
ut_ad(block->frame == page_align(rec));
ut_ad(!index->table->is_temporary());
ut_ad(index->is_spatial());
ut_ad(block->frame == page_align(rec));
ut_ad(!index->table->is_temporary());
ut_ad(index->is_spatial());
trx_t* trx = thr_get_trx(thr);
const page_id_t id{block->page.id()};
trx_t *trx= thr_get_trx(thr);
const page_id_t id{block->page.id()};
dberr_t err= DB_SUCCESS;
lock_sys.mutex_lock();
{
LockMutexGuard g;
/* Because this code is invoked for a running transaction by
the thread that is serving the transaction, it is not necessary
to hold trx->mutex here. */
ut_ad(lock_table_has(trx, index->table, LOCK_IX));
/* Because this code is invoked for a running transaction by
the thread that is serving the transaction, it is not necessary
to hold trx->mutex here. */
/* Only need to check locks on prdt_hash */
if (ut_d(lock_t *lock=)
lock_rec_get_first(&lock_sys.prdt_hash, id, PRDT_HEAPNO))
{
ut_ad(lock->type_mode & LOCK_PREDICATE);
ut_ad(lock_table_has(trx, index->table, LOCK_IX));
/* If another transaction has an explicit lock request which locks
the predicate, waiting or granted, on the successor, the insert
has to wait.
lock_t* lock;
Similar to GAP lock, we do not consider lock from inserts conflicts
with each other */
/* Only need to check locks on prdt_hash */
lock = lock_rec_get_first(&lock_sys.prdt_hash, id, PRDT_HEAPNO);
const ulint mode= LOCK_X | LOCK_PREDICATE | LOCK_INSERT_INTENTION;
lock_t *c_lock= lock_prdt_other_has_conflicting(mode, id, prdt, trx);
if (lock == NULL) {
lock_sys.mutex_unlock();
/* Update the page max trx id field */
page_update_max_trx_id(block, buf_block_get_page_zip(block),
trx->id, mtr);
return(DB_SUCCESS);
}
ut_ad(lock->type_mode & LOCK_PREDICATE);
dberr_t err;
/* If another transaction has an explicit lock request which locks
the predicate, waiting or granted, on the successor, the insert
has to wait.
Similar to GAP lock, we do not consider lock from inserts conflicts
with each other */
const ulint mode = LOCK_X | LOCK_PREDICATE | LOCK_INSERT_INTENTION;
const lock_t* wait_for = lock_prdt_other_has_conflicting(
mode, id, prdt, trx);
if (wait_for != NULL) {
rtr_mbr_t* mbr = prdt_get_mbr_from_prdt(prdt);
/* Allocate MBR on the lock heap */
lock_init_prdt_from_mbr(prdt, mbr, 0, trx->lock.lock_heap);
/* Note that we may get DB_SUCCESS also here! */
trx->mutex_lock();
err = lock_rec_enqueue_waiting(
if (c_lock)
{
rtr_mbr_t *mbr= prdt_get_mbr_from_prdt(prdt);
/* Allocate MBR on the lock heap */
lock_init_prdt_from_mbr(prdt, mbr, 0, trx->lock.lock_heap);
trx->mutex_lock();
err= lock_rec_enqueue_waiting(
#ifdef WITH_WSREP
NULL, /* FIXME: replicate SPATIAL INDEX locks */
c_lock,
#endif
LOCK_X | LOCK_PREDICATE | LOCK_INSERT_INTENTION,
id, block->frame, PRDT_HEAPNO, index, thr, prdt);
mode, id, block->frame, PRDT_HEAPNO, index, thr, prdt);
trx->mutex_unlock();
}
}
}
trx->mutex_unlock();
} else {
err = DB_SUCCESS;
}
if (err == DB_SUCCESS)
/* Update the page max trx id field */
page_update_max_trx_id(block, buf_block_get_page_zip(block), trx->id, mtr);
lock_sys.mutex_unlock();
if (err == DB_SUCCESS) {
/* Update the page max trx id field */
page_update_max_trx_id(block,
buf_block_get_page_zip(block),
trx->id, mtr);
}
return(err);
return err;
}
/**************************************************************//**
@ -593,7 +561,7 @@ lock_prdt_update_parent(
lock_prdt_t* right_prdt, /*!< in: MBR on the new page */
const page_id_t page_id) /*!< in: parent page */
{
lock_sys.mutex_lock();
LockMutexGuard g;
/* Get all locks in parent */
for (lock_t *lock = lock_sys.get_first_prdt(page_id);
@ -630,8 +598,6 @@ lock_prdt_update_parent(
lock_prdt, false);
}
}
lock_sys.mutex_unlock();
}
/**************************************************************//**
@ -694,15 +660,13 @@ lock_prdt_update_split(
lock_prdt_t* new_prdt, /*!< in: MBR on the new page */
const page_id_t page_id) /*!< in: page number */
{
lock_sys.mutex_lock();
LockMutexGuard g;
lock_prdt_update_split_low(new_block, prdt, new_prdt,
page_id, LOCK_PREDICATE);
lock_prdt_update_split_low(new_block, NULL, NULL,
page_id, LOCK_PRDT_PAGE);
lock_sys.mutex_unlock();
}
/*********************************************************************//**
@ -768,7 +732,7 @@ lock_prdt_lock(
index record, and this would not have been possible if another active
transaction had modified this secondary index record. */
lock_sys.mutex_lock();
LockMutexGuard g;
const unsigned prdt_mode = type_mode | mode;
lock_t* lock = lock_sys.get_first(hash, id);
@ -831,8 +795,6 @@ lock_prdt_lock(
}
}
lock_sys.mutex_unlock();
if (status == LOCK_REC_SUCCESS_CREATED && type_mode == LOCK_PREDICATE) {
/* Append the predicate in the lock record */
lock_prdt_set_prdt(lock, prdt);
@ -861,7 +823,7 @@ lock_place_prdt_page_lock(
index record, and this would not have been possible if another active
transaction had modified this secondary index record. */
lock_sys.mutex_lock();
LockMutexGuard g;
const lock_t* lock = lock_sys.get_first_prdt_page(page_id);
const ulint mode = LOCK_S | LOCK_PRDT_PAGE;
@ -891,8 +853,6 @@ lock_place_prdt_page_lock(
#endif /* PRDT_DIAG */
}
lock_sys.mutex_unlock();
return(DB_SUCCESS);
}
@ -902,15 +862,9 @@ lock_place_prdt_page_lock(
@return true if there is none */
bool lock_test_prdt_page_lock(const trx_t *trx, const page_id_t page_id)
{
lock_t* lock;
lock_sys.mutex_lock();
lock = lock_sys.get_first_prdt_page(page_id);
lock_sys.mutex_unlock();
return(!lock || trx == lock->trx);
LockMutexGuard g;
lock_t *lock= lock_sys.get_first_prdt_page(page_id);
return !lock || trx == lock->trx;
}
/*************************************************************//**
@ -923,7 +877,7 @@ lock_prdt_rec_move(
the receiving record */
const page_id_t donator) /*!< in: target page */
{
lock_sys.mutex_lock();
LockMutexGuard g;
for (lock_t *lock = lock_rec_get_first(&lock_sys.prdt_hash,
donator, PRDT_HEAPNO);
@ -942,8 +896,6 @@ lock_prdt_rec_move(
type_mode, receiver, lock->index, lock->trx,
lock_prdt, false);
}
lock_sys.mutex_unlock();
}
/** Removes predicate lock objects set on an index page which is discarded.
@ -952,18 +904,12 @@ lock_prdt_rec_move(
void
lock_prdt_page_free_from_discard(const page_id_t id, hash_table_t *lock_hash)
{
lock_t* lock;
lock_t* next_lock;
lock_sys.mutex_assert_locked();
lock_sys.mutex_assert_locked();
lock = lock_sys.get_first(*lock_hash, id);
while (lock != NULL) {
next_lock = lock_rec_get_next_on_page(lock);
lock_rec_discard(lock);
lock = next_lock;
}
for (lock_t *lock= lock_sys.get_first(*lock_hash, id), *next; lock;
lock= next)
{
next= lock_rec_get_next_on_page(lock);
lock_rec_discard(lock);
}
}

View file

@ -707,11 +707,12 @@ row_ins_foreign_trx_print(
ut_ad(!srv_read_only_mode);
lock_sys.mutex_lock();
n_rec_locks = trx->lock.n_rec_locks;
n_trx_locks = UT_LIST_GET_LEN(trx->lock.trx_locks);
heap_size = mem_heap_get_size(trx->lock.lock_heap);
lock_sys.mutex_unlock();
{
LockMutexGuard g;
n_rec_locks = trx->lock.n_rec_locks;
n_trx_locks = UT_LIST_GET_LEN(trx->lock.trx_locks);
heap_size = mem_heap_get_size(trx->lock.lock_heap);
}
mysql_mutex_lock(&dict_foreign_err_mutex);
rewind(dict_foreign_err_file);

View file

@ -2613,9 +2613,10 @@ skip:
}
if (!srv_fast_shutdown && !trx_sys.any_active_transactions()) {
lock_sys.mutex_lock();
skip = UT_LIST_GET_LEN(table->locks) != 0;
lock_sys.mutex_unlock();
{
LockMutexGuard g;
skip = UT_LIST_GET_LEN(table->locks) != 0;
}
if (skip) {
/* We cannot drop tables that are locked by XA
PREPARE transactions. */

View file

@ -1181,7 +1181,7 @@ static void fetch_data_into_cache_low(trx_i_s_cache_t *cache, const trx_t *trx)
static void fetch_data_into_cache(trx_i_s_cache_t *cache)
{
lock_sys.mutex_assert_locked();
LockMutexGuard g;
trx_i_s_cache_clear(cache);
/* Capture the state of transactions */
@ -1211,10 +1211,7 @@ trx_i_s_possibly_fetch_data_into_cache(
}
/* We need to read trx_sys and record/table lock queues */
lock_sys.mutex_lock();
fetch_data_into_cache(cache);
lock_sys.mutex_unlock();
/* update cache last read time */
cache->last_read = my_interval_timer();

View file

@ -1257,12 +1257,12 @@ trx_update_mod_tables_timestamp(
/* recheck while holding the mutex that blocks
table->acquire() */
dict_sys.mutex_lock();
lock_sys.mutex_lock();
const bool do_evict = !table->get_ref_count()
&& !UT_LIST_GET_LEN(table->locks);
lock_sys.mutex_unlock();
if (do_evict) {
dict_sys.remove(table, true);
{
LockMutexGuard g;
if (!table->get_ref_count()
&& !UT_LIST_GET_LEN(table->locks)) {
dict_sys.remove(table, true);
}
}
dict_sys.mutex_unlock();
#endif
@ -1862,18 +1862,15 @@ trx_print(
ulint max_query_len) /*!< in: max query length to print,
or 0 to use the default max length */
{
ulint n_rec_locks;
ulint n_trx_locks;
ulint heap_size;
ulint n_rec_locks, n_trx_locks, heap_size;
{
LockMutexGuard g;
n_rec_locks= trx->lock.n_rec_locks;
n_trx_locks= UT_LIST_GET_LEN(trx->lock.trx_locks);
heap_size= mem_heap_get_size(trx->lock.lock_heap);
}
lock_sys.mutex_lock();
n_rec_locks = trx->lock.n_rec_locks;
n_trx_locks = UT_LIST_GET_LEN(trx->lock.trx_locks);
heap_size = mem_heap_get_size(trx->lock.lock_heap);
lock_sys.mutex_unlock();
trx_print_low(f, trx, max_query_len,
n_rec_locks, n_trx_locks, heap_size);
trx_print_low(f, trx, max_query_len, n_rec_locks, n_trx_locks, heap_size);
}
/** Prepare a transaction.