mirror of
https://github.com/MariaDB/server.git
synced 2025-01-15 19:42:28 +01:00
MDEV-25281: Switch to use non-atomic (vs atomic) distributed counter to
track page-access counter As part of MDEV-21212, n_page_gets that is meant to track page access, is ported to use distributed counter that default uses atomic sub-counters. n_page_gets originally was a non-atomic counter that represented an approximate value of pages tracked. Using the said analogy it doesn't need to be an atomic distributed counter. This patch introduces an interface that allows distributed counter to be used with atomic and non-atomic sub-counter (through template) and also port n_page_gets to use non-atomic distributed counter using the said updated interface.
This commit is contained in:
parent
8048831a5b
commit
0f6f72965b
3 changed files with 27 additions and 17 deletions
|
@ -45,6 +45,7 @@ public:
|
|||
Type operator=(const Type val)
|
||||
{ m.store(val, std::memory_order_relaxed); return val; }
|
||||
Type operator=(const Atomic_relaxed<Type> &rhs) { return *this= Type{rhs}; }
|
||||
Type operator+=(const Type i) { return fetch_add(i); }
|
||||
Type fetch_add(const Type i, std::memory_order o= std::memory_order_relaxed)
|
||||
{ return m.fetch_add(i, o); }
|
||||
Type fetch_sub(const Type i, std::memory_order o= std::memory_order_relaxed)
|
||||
|
|
|
@ -1223,7 +1223,7 @@ struct buf_pool_stat_t{
|
|||
/** Initialize the counters */
|
||||
void init() { memset((void*) this, 0, sizeof *this); }
|
||||
|
||||
ib_counter_t<ulint> n_page_gets;
|
||||
ib_counter_t<ulint, ib_counter_element_t> n_page_gets;
|
||||
/*!< number of page gets performed;
|
||||
also successful searches through
|
||||
the adaptive hash index are
|
||||
|
|
|
@ -62,11 +62,31 @@ get_rnd_value()
|
|||
#endif /* !_WIN32 */
|
||||
}
|
||||
|
||||
/** Atomic which occupies whole CPU cache line.
|
||||
Note: We rely on the default constructor of std::atomic and
|
||||
do not explicitly initialize the contents. This works for us,
|
||||
because ib_counter_t is only intended for usage with global
|
||||
memory that is allocated from the .bss and thus guaranteed to
|
||||
be zero-initialized by the run-time environment.
|
||||
@see srv_stats */
|
||||
template <typename Type>
|
||||
struct ib_atomic_counter_element_t {
|
||||
MY_ALIGNED(CACHE_LINE_SIZE) Atomic_relaxed<Type> value;
|
||||
};
|
||||
|
||||
template <typename Type>
|
||||
struct ib_counter_element_t {
|
||||
MY_ALIGNED(CACHE_LINE_SIZE) Type value;
|
||||
};
|
||||
|
||||
|
||||
/** Class for using fuzzy counters. The counter is multi-instance relaxed atomic
|
||||
so the results are not guaranteed to be 100% accurate but close
|
||||
enough. Creates an array of counters and separates each element by the
|
||||
CACHE_LINE_SIZE bytes */
|
||||
template <typename Type, int N = 128 >
|
||||
template <typename Type,
|
||||
template <typename T> class Element = ib_atomic_counter_element_t,
|
||||
int N = 128 >
|
||||
struct ib_counter_t {
|
||||
/** Increment the counter by 1. */
|
||||
void inc() { add(1); }
|
||||
|
@ -83,12 +103,12 @@ struct ib_counter_t {
|
|||
/** Add to the counter.
|
||||
@param[in] index a reasonably thread-unique identifier
|
||||
@param[in] n amount to be added */
|
||||
void add(size_t index, Type n) {
|
||||
TPOOL_SUPPRESS_TSAN void add(size_t index, Type n) {
|
||||
index = index % N;
|
||||
|
||||
ut_ad(index < UT_ARR_SIZE(m_counter));
|
||||
|
||||
m_counter[index].value.fetch_add(n);
|
||||
m_counter[index].value += n;
|
||||
}
|
||||
|
||||
/* @return total value - not 100% accurate, since it is relaxed atomic*/
|
||||
|
@ -103,20 +123,9 @@ struct ib_counter_t {
|
|||
}
|
||||
|
||||
private:
|
||||
/** Atomic which occupies whole CPU cache line.
|
||||
Note: We rely on the default constructor of std::atomic and
|
||||
do not explicitly initialize the contents. This works for us,
|
||||
because ib_counter_t is only intended for usage with global
|
||||
memory that is allocated from the .bss and thus guaranteed to
|
||||
be zero-initialized by the run-time environment.
|
||||
@see srv_stats */
|
||||
struct ib_counter_element_t {
|
||||
MY_ALIGNED(CACHE_LINE_SIZE) Atomic_relaxed<Type> value;
|
||||
};
|
||||
static_assert(sizeof(ib_counter_element_t) == CACHE_LINE_SIZE, "");
|
||||
|
||||
static_assert(sizeof(Element<Type>) == CACHE_LINE_SIZE, "");
|
||||
/** Array of counter elements */
|
||||
MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_element_t m_counter[N];
|
||||
MY_ALIGNED(CACHE_LINE_SIZE) Element<Type> m_counter[N];
|
||||
};
|
||||
|
||||
#endif /* ut0counter_h */
|
||||
|
|
Loading…
Reference in a new issue