mirror of
https://github.com/MariaDB/server.git
synced 2025-02-01 19:41:47 +01:00
FT-584 use trylock inside of the lock tree manager get_status function so that it is non-blocking
This commit is contained in:
parent
8fc7c28874
commit
61195094c7
2 changed files with 34 additions and 19 deletions
|
@ -598,33 +598,33 @@ void locktree::manager::get_status(LTM_STATUS statp) {
|
|||
STATUS_VALUE(LTM_LONG_WAIT_ESCALATION_COUNT) = m_long_wait_escalation_count;
|
||||
STATUS_VALUE(LTM_LONG_WAIT_ESCALATION_TIME) = m_long_wait_escalation_time;
|
||||
|
||||
mutex_lock();
|
||||
|
||||
uint64_t lock_requests_pending = 0;
|
||||
uint64_t sto_num_eligible = 0;
|
||||
uint64_t sto_end_early_count = 0;
|
||||
tokutime_t sto_end_early_time = 0;
|
||||
|
||||
struct lt_counters lt_counters = m_lt_counters;
|
||||
|
||||
size_t num_locktrees = m_locktree_map.size();
|
||||
for (size_t i = 0; i < num_locktrees; i++) {
|
||||
locktree *lt;
|
||||
int r = m_locktree_map.fetch(i, <);
|
||||
invariant_zero(r);
|
||||
size_t num_locktrees = 0;
|
||||
struct lt_counters lt_counters = {};
|
||||
|
||||
toku_mutex_lock(<->m_lock_request_info.mutex);
|
||||
lock_requests_pending += lt->m_lock_request_info.pending_lock_requests.size();
|
||||
add_lt_counters(<_counters, <->m_lock_request_info.counters);
|
||||
toku_mutex_unlock(<->m_lock_request_info.mutex);
|
||||
|
||||
sto_num_eligible += lt->sto_txnid_is_valid_unsafe() ? 1 : 0;
|
||||
sto_end_early_count += lt->m_sto_end_early_count;
|
||||
sto_end_early_time += lt->m_sto_end_early_time;
|
||||
if (toku_mutex_trylock(&m_mutex) == 0) {
|
||||
lt_counters = m_lt_counters;
|
||||
num_locktrees = m_locktree_map.size();
|
||||
for (size_t i = 0; i < num_locktrees; i++) {
|
||||
locktree *lt;
|
||||
int r = m_locktree_map.fetch(i, <);
|
||||
invariant_zero(r);
|
||||
if (toku_mutex_trylock(<->m_lock_request_info.mutex) == 0) {
|
||||
lock_requests_pending += lt->m_lock_request_info.pending_lock_requests.size();
|
||||
add_lt_counters(<_counters, <->m_lock_request_info.counters);
|
||||
toku_mutex_unlock(<->m_lock_request_info.mutex);
|
||||
}
|
||||
sto_num_eligible += lt->sto_txnid_is_valid_unsafe() ? 1 : 0;
|
||||
sto_end_early_count += lt->m_sto_end_early_count;
|
||||
sto_end_early_time += lt->m_sto_end_early_time;
|
||||
}
|
||||
mutex_unlock();
|
||||
}
|
||||
|
||||
mutex_unlock();
|
||||
|
||||
STATUS_VALUE(LTM_NUM_LOCKTREES) = num_locktrees;
|
||||
STATUS_VALUE(LTM_LOCK_REQUESTS_PENDING) = lock_requests_pending;
|
||||
STATUS_VALUE(LTM_STO_NUM_ELIGIBLE) = sto_num_eligible;
|
||||
|
|
|
@ -213,6 +213,21 @@ toku_mutex_lock(toku_mutex_t *mutex) {
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline int
|
||||
toku_mutex_trylock(toku_mutex_t *mutex) {
|
||||
int r = pthread_mutex_trylock(&mutex->pmutex);
|
||||
#if TOKU_PTHREAD_DEBUG
|
||||
if (r == 0) {
|
||||
invariant(mutex->valid);
|
||||
invariant(!mutex->locked);
|
||||
invariant(mutex->owner == 0);
|
||||
mutex->locked = true;
|
||||
mutex->owner = pthread_self();
|
||||
}
|
||||
#endif
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void
|
||||
toku_mutex_unlock(toku_mutex_t *mutex) {
|
||||
#if TOKU_PTHREAD_DEBUG
|
||||
|
|
Loading…
Add table
Reference in a new issue