mirror of
https://github.com/MariaDB/server.git
synced 2026-05-14 19:07:15 +02:00
Typically, index_lock and fil_space_t::latch will be held for a longer time than the spin loop in latch acquisition would be waiting for. Let us avoid spin loops for those as well as dict_sys.latch, which could be held in exclusive mode for a longer time (while loading metadata into the buffer pool and the dictionary cache). Performance testing on a dual Intel Xeon E5-2630 v4 (2 NUMA nodes) suggests that the buffer pool page latch (block_lock) benefits from a spin loop in both read-only and read-write workloads where the working set is slightly larger than the buffer pool. Presumably, most contention would occur on leaf page latches. Contention on upper level pages in the buffer pool should intuitively last longer. We introduce srw_spin_lock and srw_spin_mutex to allow users of srw_lock or srw_mutex to opt in for the spin loop. On Microsoft Windows, a spin loop variant was and will not be available; srw_mutex and srw_lock will simply wrap SRWLOCK. That is, on Microsoft Windows, the parameters innodb_sync_spin_loops and innodb_spin_wait_delay will only affect block_lock.
227 lines
8.1 KiB
C++
227 lines
8.1 KiB
C++
/*****************************************************************************
|
|
|
|
Copyright (c) 2020, 2021, MariaDB Corporation.
|
|
|
|
This program is free software; you can redistribute it and/or modify it under
|
|
the terms of the GNU General Public License as published by the Free Software
|
|
Foundation; version 2 of the License.
|
|
|
|
This program is distributed in the hope that it will be useful, but WITHOUT
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License along with
|
|
this program; if not, write to the Free Software Foundation, Inc.,
|
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
|
|
|
*****************************************************************************/
|
|
|
|
#pragma once
|
|
#include <atomic>
|
|
#include "my_dbug.h"
|
|
|
|
#if !(defined __linux__ || defined __OpenBSD__ || defined _WIN32)
|
|
# define SUX_LOCK_GENERIC
|
|
#elif 0 // defined SAFE_MUTEX
|
|
# define SUX_LOCK_GENERIC /* Use dummy implementation for debugging purposes */
|
|
#endif
|
|
|
|
#ifdef SUX_LOCK_GENERIC
|
|
/** Simple read-update-write lock based on std::atomic */
|
|
#else
|
|
/** Simple read-write lock based on std::atomic */
|
|
#endif
|
|
class rw_lock
|
|
{
|
|
/** The lock word */
|
|
std::atomic<uint32_t> lock;
|
|
|
|
protected:
|
|
/** Available lock */
|
|
static constexpr uint32_t UNLOCKED= 0;
|
|
/** Flag to indicate that write_lock() is being held */
|
|
static constexpr uint32_t WRITER= 1U << 31;
|
|
/** Flag to indicate that write_lock_wait() is pending */
|
|
static constexpr uint32_t WRITER_WAITING= 1U << 30;
|
|
/** Flag to indicate that write_lock() or write_lock_wait() is pending */
|
|
static constexpr uint32_t WRITER_PENDING= WRITER | WRITER_WAITING;
|
|
#ifdef SUX_LOCK_GENERIC
|
|
/** Flag to indicate that an update lock exists */
|
|
static constexpr uint32_t UPDATER= 1U << 29;
|
|
#endif /* SUX_LOCK_GENERIC */
|
|
|
|
/** Start waiting for an exclusive lock.
|
|
@return current value of the lock word */
|
|
uint32_t write_lock_wait_start()
|
|
{ return lock.fetch_or(WRITER_WAITING, std::memory_order_relaxed); }
|
|
/** Wait for an exclusive lock.
|
|
@param l the value of the lock word
|
|
@return whether the exclusive lock was acquired */
|
|
bool write_lock_wait_try(uint32_t &l)
|
|
{
|
|
return lock.compare_exchange_strong(l, WRITER, std::memory_order_acquire,
|
|
std::memory_order_relaxed);
|
|
}
|
|
/** Try to acquire a shared lock.
|
|
@tparam prioritize_updater whether to ignore WRITER_WAITING for UPDATER
|
|
@param l the value of the lock word
|
|
@return whether the lock was acquired */
|
|
#ifdef SUX_LOCK_GENERIC
|
|
template<bool prioritize_updater= false>
|
|
#endif /* SUX_LOCK_GENERIC */
|
|
bool read_trylock(uint32_t &l)
|
|
{
|
|
l= UNLOCKED;
|
|
while (!lock.compare_exchange_strong(l, l + 1, std::memory_order_acquire,
|
|
std::memory_order_relaxed))
|
|
{
|
|
DBUG_ASSERT(!(WRITER & l) || !(~WRITER_PENDING & l));
|
|
#ifdef SUX_LOCK_GENERIC
|
|
DBUG_ASSERT((~(WRITER_PENDING | UPDATER) & l) < UPDATER);
|
|
if (prioritize_updater
|
|
? (WRITER & l) || ((WRITER_WAITING | UPDATER) & l) == WRITER_WAITING
|
|
: (WRITER_PENDING & l))
|
|
#else /* SUX_LOCK_GENERIC */
|
|
if (l & WRITER_PENDING)
|
|
#endif /* SUX_LOCK_GENERIC */
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
#ifdef SUX_LOCK_GENERIC
|
|
/** Try to acquire an update lock.
|
|
@param l the value of the lock word
|
|
@return whether the lock was acquired */
|
|
bool update_trylock(uint32_t &l)
|
|
{
|
|
l= UNLOCKED;
|
|
while (!lock.compare_exchange_strong(l, l | UPDATER,
|
|
std::memory_order_acquire,
|
|
std::memory_order_relaxed))
|
|
{
|
|
DBUG_ASSERT(!(WRITER & l) || !(~WRITER_PENDING & l));
|
|
DBUG_ASSERT((~(WRITER_PENDING | UPDATER) & l) < UPDATER);
|
|
if ((WRITER_PENDING | UPDATER) & l)
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
/** Try to upgrade an update lock to an exclusive lock.
|
|
@return whether the update lock was upgraded to exclusive */
|
|
bool upgrade_trylock()
|
|
{
|
|
auto l= UPDATER;
|
|
while (!lock.compare_exchange_strong(l, WRITER,
|
|
std::memory_order_acquire,
|
|
std::memory_order_relaxed))
|
|
{
|
|
/* Either conflicting (read) locks have been granted, or
|
|
the WRITER_WAITING flag was set by some thread that is waiting
|
|
to become WRITER. */
|
|
DBUG_ASSERT(((WRITER | UPDATER) & l) == UPDATER);
|
|
if (~(WRITER_WAITING | UPDATER) & l)
|
|
return false;
|
|
}
|
|
DBUG_ASSERT((l & ~WRITER_WAITING) == UPDATER);
|
|
/* Any thread that had set WRITER_WAITING will eventually be woken
|
|
up by ssux_lock_impl::x_unlock() or ssux_lock_impl::u_unlock()
|
|
(not ssux_lock_impl::wr_u_downgrade() to keep the code simple). */
|
|
return true;
|
|
}
|
|
/** Downgrade an exclusive lock to an update lock. */
|
|
void downgrade()
|
|
{
|
|
IF_DBUG_ASSERT(auto l=,)
|
|
lock.fetch_xor(WRITER | UPDATER, std::memory_order_relaxed);
|
|
DBUG_ASSERT((l & ~WRITER_WAITING) == WRITER);
|
|
}
|
|
#endif /* SUX_LOCK_GENERIC */
|
|
|
|
/** Wait for an exclusive lock.
|
|
@return whether the exclusive lock was acquired */
|
|
bool write_lock_poll()
|
|
{
|
|
auto l= WRITER_WAITING;
|
|
if (write_lock_wait_try(l))
|
|
return true;
|
|
if (!(l & WRITER_WAITING))
|
|
/* write_lock() must have succeeded for another thread */
|
|
write_lock_wait_start();
|
|
return false;
|
|
}
|
|
/** @return the lock word value */
|
|
uint32_t value() const { return lock.load(std::memory_order_acquire); }
|
|
|
|
public:
|
|
/** Default constructor */
|
|
rw_lock() : lock(UNLOCKED) {}
|
|
|
|
/** Release a shared lock.
|
|
@return whether any writers may have to be woken up */
|
|
bool read_unlock()
|
|
{
|
|
auto l= lock.fetch_sub(1, std::memory_order_release);
|
|
DBUG_ASSERT(!(l & WRITER)); /* no write lock must have existed */
|
|
#ifdef SUX_LOCK_GENERIC
|
|
DBUG_ASSERT(~(WRITER_PENDING | UPDATER) & l); /* at least one read lock */
|
|
return (~(WRITER_PENDING | UPDATER) & l) == 1;
|
|
#else /* SUX_LOCK_GENERIC */
|
|
DBUG_ASSERT(~(WRITER_PENDING) & l); /* at least one read lock */
|
|
return (~WRITER_PENDING & l) == 1;
|
|
#endif /* SUX_LOCK_GENERIC */
|
|
}
|
|
#ifdef SUX_LOCK_GENERIC
|
|
/** Release an update lock */
|
|
void update_unlock()
|
|
{
|
|
IF_DBUG_ASSERT(auto l=,)
|
|
lock.fetch_and(~UPDATER, std::memory_order_release);
|
|
/* the update lock must have existed */
|
|
DBUG_ASSERT((l & (WRITER | UPDATER)) == UPDATER);
|
|
}
|
|
#endif /* SUX_LOCK_GENERIC */
|
|
/** Release an exclusive lock */
|
|
void write_unlock()
|
|
{
|
|
IF_DBUG_ASSERT(auto l=,)
|
|
lock.fetch_and(~WRITER, std::memory_order_release);
|
|
/* the write lock must have existed */
|
|
#ifdef SUX_LOCK_GENERIC
|
|
DBUG_ASSERT((l & (WRITER | UPDATER)) == WRITER);
|
|
#else /* SUX_LOCK_GENERIC */
|
|
DBUG_ASSERT(l & WRITER);
|
|
#endif /* SUX_LOCK_GENERIC */
|
|
}
|
|
/** Try to acquire a shared lock.
|
|
@return whether the lock was acquired */
|
|
bool read_trylock() { uint32_t l; return read_trylock(l); }
|
|
/** Try to acquire an exclusive lock.
|
|
@return whether the lock was acquired */
|
|
bool write_trylock()
|
|
{
|
|
auto l= UNLOCKED;
|
|
return lock.compare_exchange_strong(l, WRITER, std::memory_order_acquire,
|
|
std::memory_order_relaxed);
|
|
}
|
|
|
|
/** @return whether an exclusive lock is being held by any thread */
|
|
bool is_write_locked() const
|
|
{ return !!(lock.load(std::memory_order_relaxed) & WRITER); }
|
|
#ifdef SUX_LOCK_GENERIC
|
|
/** @return whether an update lock is being held by any thread */
|
|
bool is_update_locked() const
|
|
{ return !!(lock.load(std::memory_order_relaxed) & UPDATER); }
|
|
#endif /* SUX_LOCK_GENERIC */
|
|
/** @return whether a shared lock is being held by any thread */
|
|
bool is_read_locked() const
|
|
{
|
|
auto l= lock.load(std::memory_order_relaxed);
|
|
return (l & ~WRITER_PENDING) && !(l & WRITER);
|
|
}
|
|
/** @return whether any lock is being held or waited for by any thread */
|
|
bool is_locked_or_waiting() const
|
|
{ return lock.load(std::memory_order_relaxed) != 0; }
|
|
/** @return whether any lock is being held by any thread */
|
|
bool is_locked() const
|
|
{ return (lock.load(std::memory_order_relaxed) & ~WRITER_WAITING) != 0; }
|
|
};
|