2021-03-30 08:58:24 +02:00
|
|
|
/* Copyright (c) 2020, 2021, MariaDB
|
2020-05-19 13:07:34 +02:00
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; version 2 of the License.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software
|
|
|
|
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
|
|
|
|
|
2020-12-02 09:34:37 +01:00
|
|
|
#pragma once
|
2020-05-19 13:07:34 +02:00
|
|
|
#ifdef __cplusplus
|
|
|
|
#include <atomic>
|
|
|
|
/**
|
|
|
|
A wrapper for std::atomic, defaulting to std::memory_order_relaxed.
|
|
|
|
|
|
|
|
When it comes to atomic loads or stores at std::memory_order_relaxed
|
|
|
|
on IA-32 or AMD64, this wrapper is only introducing some constraints
|
|
|
|
to the C++ compiler, to prevent some optimizations of loads or
|
|
|
|
stores.
|
|
|
|
|
|
|
|
On POWER and ARM, atomic loads and stores involve different instructions
|
|
|
|
from normal loads and stores and will thus incur some overhead.
|
|
|
|
|
|
|
|
Because atomic read-modify-write operations will always incur
|
|
|
|
overhead, we intentionally do not define
|
|
|
|
operator++(), operator--(), operator+=(), operator-=(), or similar,
|
|
|
|
to make the overhead stand out in the users of this code.
|
|
|
|
*/
|
|
|
|
template <typename Type> class Atomic_relaxed
|
|
|
|
{
|
|
|
|
std::atomic<Type> m;
|
|
|
|
public:
|
|
|
|
Atomic_relaxed(const Atomic_relaxed<Type> &rhs)
|
|
|
|
{ m.store(rhs, std::memory_order_relaxed); }
|
|
|
|
Atomic_relaxed(Type val) : m(val) {}
|
2023-02-07 12:57:20 +01:00
|
|
|
Atomic_relaxed() = default;
|
2020-05-19 13:07:34 +02:00
|
|
|
|
2021-03-30 08:58:24 +02:00
|
|
|
Type load(std::memory_order o= std::memory_order_relaxed) const
|
|
|
|
{ return m.load(o); }
|
2021-06-26 10:16:40 +02:00
|
|
|
void store(Type i, std::memory_order o= std::memory_order_relaxed)
|
|
|
|
{ m.store(i, o); }
|
2021-03-30 08:58:24 +02:00
|
|
|
operator Type() const { return m.load(); }
|
2021-06-26 10:16:40 +02:00
|
|
|
Type operator=(const Type i) { store(i); return i; }
|
2020-05-19 13:07:34 +02:00
|
|
|
Type operator=(const Atomic_relaxed<Type> &rhs) { return *this= Type{rhs}; }
|
2021-03-29 05:51:00 +02:00
|
|
|
Type operator+=(const Type i) { return fetch_add(i); }
|
2020-05-19 13:07:34 +02:00
|
|
|
Type fetch_add(const Type i, std::memory_order o= std::memory_order_relaxed)
|
|
|
|
{ return m.fetch_add(i, o); }
|
|
|
|
Type fetch_sub(const Type i, std::memory_order o= std::memory_order_relaxed)
|
|
|
|
{ return m.fetch_sub(i, o); }
|
2020-09-03 15:55:14 +02:00
|
|
|
Type fetch_xor(const Type i, std::memory_order o= std::memory_order_relaxed)
|
|
|
|
{ return m.fetch_xor(i, o); }
|
2020-12-02 09:34:37 +01:00
|
|
|
Type fetch_and(const Type i, std::memory_order o= std::memory_order_relaxed)
|
|
|
|
{ return m.fetch_and(i, o); }
|
|
|
|
Type fetch_or(const Type i, std::memory_order o= std::memory_order_relaxed)
|
|
|
|
{ return m.fetch_or(i, o); }
|
2020-05-19 13:07:34 +02:00
|
|
|
bool compare_exchange_strong(Type& i1, const Type i2,
|
|
|
|
std::memory_order o1= std::memory_order_relaxed,
|
|
|
|
std::memory_order o2= std::memory_order_relaxed)
|
|
|
|
{ return m.compare_exchange_strong(i1, i2, o1, o2); }
|
|
|
|
Type exchange(const Type i, std::memory_order o= std::memory_order_relaxed)
|
|
|
|
{ return m.exchange(i, o); }
|
|
|
|
};
|
|
|
|
#endif /* __cplusplus */
|