2022-12-29 11:59:34 +01:00
|
|
|
/* Copyright 2018-2023 Codership Oy <info@codership.com>
|
2019-01-23 12:30:00 +01:00
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; version 2 of the License.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software
|
|
|
|
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
|
|
|
|
|
|
|
|
#include "wsrep_high_priority_service.h"
|
|
|
|
#include "wsrep_applier.h"
|
|
|
|
#include "wsrep_binlog.h"
|
|
|
|
#include "wsrep_schema.h"
|
|
|
|
#include "wsrep_xid.h"
|
|
|
|
#include "wsrep_trans_observer.h"
|
2022-02-15 14:36:02 +01:00
|
|
|
#include "wsrep_server_state.h"
|
2019-01-23 12:30:00 +01:00
|
|
|
|
|
|
|
#include "sql_class.h" /* THD */
|
|
|
|
#include "transaction.h"
|
|
|
|
#include "debug_sync.h"
|
|
|
|
/* RLI */
|
|
|
|
#include "rpl_rli.h"
|
|
|
|
#define NUMBER_OF_FIELDS_TO_IDENTIFY_COORDINATOR 1
|
|
|
|
#define NUMBER_OF_FIELDS_TO_IDENTIFY_WORKER 2
|
|
|
|
#include "slave.h"
|
|
|
|
#include "rpl_mi.h"
|
2023-11-14 08:24:46 +01:00
|
|
|
#include "rpl_constants.h"
|
2019-01-23 12:30:00 +01:00
|
|
|
|
|
|
|
namespace
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
Scoped mode for applying non-transactional write sets (TOI)
|
|
|
|
*/
|
|
|
|
class Wsrep_non_trans_mode
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
Wsrep_non_trans_mode(THD* thd, const wsrep::ws_meta& ws_meta)
|
|
|
|
: m_thd(thd)
|
|
|
|
, m_option_bits(thd->variables.option_bits)
|
|
|
|
, m_server_status(thd->server_status)
|
|
|
|
{
|
|
|
|
m_thd->variables.option_bits&= ~OPTION_BEGIN;
|
2023-10-21 19:18:39 +02:00
|
|
|
m_thd->server_status&= ~(SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY);
|
2019-07-22 15:34:12 +02:00
|
|
|
m_thd->wsrep_cs().enter_toi_mode(ws_meta);
|
2019-01-23 12:30:00 +01:00
|
|
|
}
|
|
|
|
~Wsrep_non_trans_mode()
|
|
|
|
{
|
|
|
|
m_thd->variables.option_bits= m_option_bits;
|
|
|
|
m_thd->server_status= m_server_status;
|
2019-07-22 15:34:12 +02:00
|
|
|
m_thd->wsrep_cs().leave_toi_mode();
|
2019-01-23 12:30:00 +01:00
|
|
|
}
|
|
|
|
private:
|
|
|
|
Wsrep_non_trans_mode(const Wsrep_non_trans_mode&);
|
|
|
|
Wsrep_non_trans_mode& operator=(const Wsrep_non_trans_mode&);
|
|
|
|
THD* m_thd;
|
|
|
|
ulonglong m_option_bits;
|
|
|
|
uint m_server_status;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
static rpl_group_info* wsrep_relay_group_init(THD* thd, const char* log_fname)
|
|
|
|
{
|
|
|
|
Relay_log_info* rli= new Relay_log_info(false);
|
|
|
|
|
|
|
|
if (!rli->relay_log.description_event_for_exec)
|
|
|
|
{
|
|
|
|
rli->relay_log.description_event_for_exec=
|
2023-11-14 08:24:46 +01:00
|
|
|
new Format_description_log_event(4, 0, BINLOG_CHECKSUM_ALG_OFF);
|
2019-01-23 12:30:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static LEX_CSTRING connection_name= { STRING_WITH_LEN("wsrep") };
|
|
|
|
|
|
|
|
/*
|
|
|
|
Master_info's constructor initializes rpl_filter by either an already
|
|
|
|
constructed Rpl_filter object from global 'rpl_filters' list if the
|
|
|
|
specified connection name is same, or it constructs a new Rpl_filter
|
|
|
|
object and adds it to rpl_filters. This object is later destructed by
|
|
|
|
Mater_info's destructor by looking it up based on connection name in
|
|
|
|
rpl_filters list.
|
|
|
|
|
|
|
|
However, since all Master_info objects created here would share same
|
|
|
|
connection name ("wsrep"), destruction of any of the existing Master_info
|
|
|
|
objects (in wsrep_return_from_bf_mode()) would free rpl_filter referenced
|
|
|
|
by any/all existing Master_info objects.
|
|
|
|
|
|
|
|
In order to avoid that, we have added a check in Master_info's destructor
|
|
|
|
to not free the "wsrep" rpl_filter. It will eventually be freed by
|
|
|
|
free_all_rpl_filters() when server terminates.
|
|
|
|
*/
|
|
|
|
rli->mi= new Master_info(&connection_name, false);
|
|
|
|
|
|
|
|
struct rpl_group_info *rgi= new rpl_group_info(rli);
|
|
|
|
rgi->thd= rli->sql_driver_thd= thd;
|
|
|
|
|
|
|
|
if ((rgi->deferred_events_collecting= rli->mi->rpl_filter->is_on()))
|
|
|
|
{
|
|
|
|
rgi->deferred_events= new Deferred_log_events(rli);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rgi;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void wsrep_setup_uk_and_fk_checks(THD* thd)
|
|
|
|
{
|
|
|
|
/* Tune FK and UK checking policy. These are reset back to original
|
|
|
|
in Wsrep_high_priority_service destructor. */
|
|
|
|
if (wsrep_slave_UK_checks == FALSE)
|
|
|
|
thd->variables.option_bits|= OPTION_RELAXED_UNIQUE_CHECKS;
|
|
|
|
else
|
|
|
|
thd->variables.option_bits&= ~OPTION_RELAXED_UNIQUE_CHECKS;
|
|
|
|
|
|
|
|
if (wsrep_slave_FK_checks == FALSE)
|
|
|
|
thd->variables.option_bits|= OPTION_NO_FOREIGN_KEY_CHECKS;
|
|
|
|
else
|
|
|
|
thd->variables.option_bits&= ~OPTION_NO_FOREIGN_KEY_CHECKS;
|
|
|
|
}
|
|
|
|
|
2019-08-28 08:19:24 +02:00
|
|
|
static int apply_events(THD* thd,
|
|
|
|
Relay_log_info* rli,
|
|
|
|
const wsrep::const_buffer& data,
|
2024-05-19 20:08:46 +02:00
|
|
|
wsrep::mutable_buffer& err,
|
|
|
|
bool const include_msg)
|
2019-08-28 08:19:24 +02:00
|
|
|
{
|
|
|
|
int const ret= wsrep_apply_events(thd, rli, data.data(), data.size());
|
|
|
|
if (ret || wsrep_thd_has_ignored_error(thd))
|
|
|
|
{
|
|
|
|
if (ret)
|
|
|
|
{
|
2024-05-19 20:08:46 +02:00
|
|
|
wsrep_store_error(thd, err, include_msg);
|
2019-08-28 08:19:24 +02:00
|
|
|
}
|
|
|
|
wsrep_dump_rbr_buf_with_header(thd, data.data(), data.size());
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-23 12:30:00 +01:00
|
|
|
/****************************************************************************
|
|
|
|
High priority service
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
Wsrep_high_priority_service::Wsrep_high_priority_service(THD* thd)
|
|
|
|
: wsrep::high_priority_service(Wsrep_server_state::instance())
|
|
|
|
, wsrep::high_priority_context(thd->wsrep_cs())
|
|
|
|
, m_thd(thd)
|
|
|
|
, m_rli()
|
|
|
|
{
|
|
|
|
LEX_CSTRING db_str= { NULL, 0 };
|
|
|
|
m_shadow.option_bits = thd->variables.option_bits;
|
|
|
|
m_shadow.server_status= thd->server_status;
|
|
|
|
m_shadow.vio = thd->net.vio;
|
|
|
|
m_shadow.tx_isolation = thd->variables.tx_isolation;
|
|
|
|
m_shadow.db = (char *)thd->db.str;
|
|
|
|
m_shadow.db_length = thd->db.length;
|
|
|
|
m_shadow.user_time = thd->user_time;
|
|
|
|
m_shadow.row_count_func= thd->get_row_count_func();
|
|
|
|
m_shadow.wsrep_applier= thd->wsrep_applier;
|
|
|
|
|
|
|
|
/* Disable general logging on applier threads */
|
|
|
|
thd->variables.option_bits |= OPTION_LOG_OFF;
|
MDEV-24327 wsrep XID checkpointing order with log_slave_updates=OFF
If log_slave_updates==OFF, wsrep applier threads used to be configured
with option: thd->variables.option_bits&= ~(OPTION_BIN_LOG);
(i.e. like sql_log_bin=ON). And this was regardless of log-bin configuration.
With this, having configuration of: --log-bin && --log-slave-updates=OFF,
local threads used binlogging, but applier threads did not. And further:
local threads went through binlog group commit, while applier threads did
direct commits. This resulted in situation, where applier threads entered
earlier in wsrep XID checkpointing, and could sync their wsrep XID out of order.
Later local thread commit would see that higher seqno was already checkpointed,
and fire an assert because of this.
As a fix, applier threads are now forced to enable binlogging regardless of
log-slave-updates configuration.
This PR comes with new mtr test: galera.MDEV-24327, which causes a scenario
where applier transaction is applied and committed while earlier local transaction
is parked before commit order monitor enter. A buggy mariadb versoin would fail
for assertion because of wsrep XID checkpoint order violation.
2020-12-02 16:28:49 +01:00
|
|
|
|
|
|
|
/* enable binlogging regardless of log_slave_updates setting
|
|
|
|
this is for ensuring that both local and applier transaction go through
|
|
|
|
same commit ordering algorithm in group commit control
|
|
|
|
*/
|
|
|
|
thd->variables.option_bits|= OPTION_BIN_LOG;
|
2019-01-23 12:30:00 +01:00
|
|
|
|
|
|
|
thd->net.vio= 0;
|
|
|
|
thd->reset_db(&db_str);
|
|
|
|
thd->clear_error();
|
|
|
|
thd->variables.tx_isolation= ISO_READ_COMMITTED;
|
|
|
|
thd->tx_isolation = ISO_READ_COMMITTED;
|
|
|
|
|
|
|
|
/* From trans_begin() */
|
|
|
|
thd->variables.option_bits|= OPTION_BEGIN;
|
|
|
|
thd->server_status|= SERVER_STATUS_IN_TRANS;
|
|
|
|
|
|
|
|
/* Make THD wsrep_applier so that it cannot be killed */
|
|
|
|
thd->wsrep_applier= true;
|
|
|
|
|
|
|
|
if (!thd->wsrep_rgi) thd->wsrep_rgi= wsrep_relay_group_init(thd, "wsrep_relay");
|
|
|
|
|
|
|
|
m_rgi= thd->wsrep_rgi;
|
|
|
|
m_rgi->thd= thd;
|
|
|
|
m_rli= m_rgi->rli;
|
|
|
|
thd_proc_info(thd, "wsrep applier idle");
|
|
|
|
}
|
|
|
|
|
|
|
|
Wsrep_high_priority_service::~Wsrep_high_priority_service()
|
|
|
|
{
|
|
|
|
THD* thd= m_thd;
|
|
|
|
thd->variables.option_bits = m_shadow.option_bits;
|
|
|
|
thd->server_status = m_shadow.server_status;
|
|
|
|
thd->net.vio = m_shadow.vio;
|
|
|
|
thd->variables.tx_isolation= m_shadow.tx_isolation;
|
|
|
|
LEX_CSTRING db_str= { m_shadow.db, m_shadow.db_length };
|
|
|
|
thd->reset_db(&db_str);
|
|
|
|
thd->user_time = m_shadow.user_time;
|
|
|
|
|
|
|
|
if (thd->wsrep_rgi && thd->wsrep_rgi->rli)
|
|
|
|
delete thd->wsrep_rgi->rli->mi;
|
|
|
|
if (thd->wsrep_rgi)
|
|
|
|
delete thd->wsrep_rgi->rli;
|
|
|
|
delete thd->wsrep_rgi;
|
|
|
|
thd->wsrep_rgi= NULL;
|
|
|
|
|
|
|
|
thd->set_row_count_func(m_shadow.row_count_func);
|
|
|
|
thd->wsrep_applier = m_shadow.wsrep_applier;
|
|
|
|
}
|
|
|
|
|
|
|
|
int Wsrep_high_priority_service::start_transaction(
|
|
|
|
const wsrep::ws_handle& ws_handle, const wsrep::ws_meta& ws_meta)
|
|
|
|
{
|
|
|
|
DBUG_ENTER(" Wsrep_high_priority_service::start_transaction");
|
2019-02-21 20:57:52 +01:00
|
|
|
DBUG_RETURN(m_thd->wsrep_cs().start_transaction(ws_handle, ws_meta) ||
|
|
|
|
trans_begin(m_thd));
|
2019-01-23 12:30:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
const wsrep::transaction& Wsrep_high_priority_service::transaction() const
|
|
|
|
{
|
|
|
|
DBUG_ENTER(" Wsrep_high_priority_service::transaction");
|
|
|
|
DBUG_RETURN(m_thd->wsrep_trx());
|
|
|
|
}
|
|
|
|
|
2019-10-30 09:45:22 +01:00
|
|
|
int Wsrep_high_priority_service::next_fragment(const wsrep::ws_meta& ws_meta)
|
|
|
|
{
|
|
|
|
DBUG_ENTER(" Wsrep_high_priority_service::next_fragment");
|
|
|
|
DBUG_RETURN(m_thd->wsrep_cs().next_fragment(ws_meta));
|
|
|
|
}
|
|
|
|
|
2019-02-21 20:57:52 +01:00
|
|
|
int Wsrep_high_priority_service::adopt_transaction(
|
|
|
|
const wsrep::transaction& transaction)
|
2019-01-23 12:30:00 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER(" Wsrep_high_priority_service::adopt_transaction");
|
2019-02-21 20:57:52 +01:00
|
|
|
/* Adopt transaction first to set up transaction meta data for
|
|
|
|
trans begin. If trans_begin() fails for some reason, roll back
|
|
|
|
the wsrep transaction before return. */
|
2019-01-23 12:30:00 +01:00
|
|
|
m_thd->wsrep_cs().adopt_transaction(transaction);
|
2019-02-21 20:57:52 +01:00
|
|
|
int ret= trans_begin(m_thd);
|
|
|
|
if (ret)
|
|
|
|
{
|
|
|
|
m_thd->wsrep_cs().before_rollback();
|
|
|
|
m_thd->wsrep_cs().after_rollback();
|
|
|
|
}
|
|
|
|
DBUG_RETURN(ret);
|
2019-01-23 12:30:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int Wsrep_high_priority_service::append_fragment_and_commit(
|
|
|
|
const wsrep::ws_handle& ws_handle,
|
|
|
|
const wsrep::ws_meta& ws_meta,
|
2019-10-30 09:45:22 +01:00
|
|
|
const wsrep::const_buffer& data,
|
|
|
|
const wsrep::xid& xid WSREP_UNUSED)
|
2019-01-23 12:30:00 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("Wsrep_high_priority_service::append_fragment_and_commit");
|
|
|
|
int ret= start_transaction(ws_handle, ws_meta);
|
|
|
|
/*
|
|
|
|
Start transaction explicitly to avoid early commit via
|
|
|
|
trans_commit_stmt() in append_fragment()
|
|
|
|
*/
|
|
|
|
ret= ret || trans_begin(m_thd);
|
|
|
|
ret= ret || wsrep_schema->append_fragment(m_thd,
|
|
|
|
ws_meta.server_id(),
|
|
|
|
ws_meta.transaction_id(),
|
|
|
|
ws_meta.seqno(),
|
|
|
|
ws_meta.flags(),
|
|
|
|
data);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Note: The commit code below seems to be identical to
|
|
|
|
Wsrep_storage_service::commit(). Consider implementing
|
|
|
|
common utility function to deal with commit.
|
|
|
|
*/
|
|
|
|
const bool do_binlog_commit= (opt_log_slave_updates &&
|
2019-08-28 08:19:24 +02:00
|
|
|
wsrep_gtid_mode &&
|
|
|
|
m_thd->variables.gtid_seq_no);
|
2019-01-23 12:30:00 +01:00
|
|
|
/*
|
|
|
|
Write skip event into binlog if gtid_mode is on. This is to
|
|
|
|
maintain gtid continuity.
|
|
|
|
*/
|
|
|
|
if (do_binlog_commit)
|
|
|
|
{
|
|
|
|
ret= wsrep_write_skip_event(m_thd);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
{
|
|
|
|
ret= m_thd->wsrep_cs().prepare_for_ordering(ws_handle,
|
|
|
|
ws_meta, true);
|
|
|
|
}
|
|
|
|
|
2019-02-21 20:57:52 +01:00
|
|
|
ret= ret || trans_commit(m_thd);
|
2019-08-28 08:19:24 +02:00
|
|
|
ret= ret || (m_thd->wsrep_cs().after_applying(), 0);
|
2023-01-26 09:27:31 +01:00
|
|
|
|
2020-12-02 15:16:29 +01:00
|
|
|
m_thd->release_transactional_locks();
|
2019-01-23 12:30:00 +01:00
|
|
|
|
2020-07-24 14:17:59 +02:00
|
|
|
free_root(m_thd->mem_root, MYF(MY_KEEP_PREALLOC));
|
|
|
|
|
2019-01-23 12:30:00 +01:00
|
|
|
thd_proc_info(m_thd, "wsrep applier committed");
|
|
|
|
|
|
|
|
DBUG_RETURN(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
int Wsrep_high_priority_service::remove_fragments(const wsrep::ws_meta& ws_meta)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("Wsrep_high_priority_service::remove_fragments");
|
|
|
|
int ret= wsrep_schema->remove_fragments(m_thd,
|
|
|
|
ws_meta.server_id(),
|
|
|
|
ws_meta.transaction_id(),
|
|
|
|
m_thd->wsrep_sr().fragments());
|
|
|
|
DBUG_RETURN(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
int Wsrep_high_priority_service::commit(const wsrep::ws_handle& ws_handle,
|
|
|
|
const wsrep::ws_meta& ws_meta)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("Wsrep_high_priority_service::commit");
|
|
|
|
THD* thd= m_thd;
|
|
|
|
DBUG_ASSERT(thd->wsrep_trx().active());
|
|
|
|
thd->wsrep_cs().prepare_for_ordering(ws_handle, ws_meta, true);
|
|
|
|
thd_proc_info(thd, "committing");
|
2020-08-26 12:32:15 +02:00
|
|
|
int ret=0;
|
2019-01-23 12:30:00 +01:00
|
|
|
|
|
|
|
const bool is_ordered= !ws_meta.seqno().is_undefined();
|
2020-08-26 12:32:15 +02:00
|
|
|
|
2020-08-27 08:34:23 +02:00
|
|
|
if (!thd->transaction->stmt.is_empty())
|
2020-08-26 12:32:15 +02:00
|
|
|
ret= trans_commit_stmt(thd);
|
|
|
|
|
|
|
|
if (ret == 0)
|
|
|
|
ret= trans_commit(thd);
|
2019-01-23 12:30:00 +01:00
|
|
|
|
|
|
|
if (ret == 0)
|
|
|
|
{
|
|
|
|
m_rgi->cleanup_context(thd, 0);
|
|
|
|
}
|
|
|
|
|
2020-12-02 15:16:29 +01:00
|
|
|
m_thd->release_transactional_locks();
|
2019-01-23 12:30:00 +01:00
|
|
|
|
|
|
|
thd_proc_info(thd, "wsrep applier committed");
|
|
|
|
|
|
|
|
if (!is_ordered)
|
|
|
|
{
|
|
|
|
m_thd->wsrep_cs().before_rollback();
|
|
|
|
m_thd->wsrep_cs().after_rollback();
|
|
|
|
}
|
2019-02-21 20:57:52 +01:00
|
|
|
else if (m_thd->wsrep_trx().state() == wsrep::transaction::s_executing)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
Wsrep commit was ordered but it did not go through commit time
|
|
|
|
hooks and remains active. Cycle through commit hooks to release
|
|
|
|
commit order and to make cleanup happen in after_applying() call.
|
|
|
|
|
|
|
|
This is a workaround for CTAS with empty result set.
|
|
|
|
*/
|
|
|
|
WSREP_DEBUG("Commit not finished for applier %llu", thd->thread_id);
|
|
|
|
ret= ret || m_thd->wsrep_cs().before_commit() ||
|
|
|
|
m_thd->wsrep_cs().ordered_commit() ||
|
|
|
|
m_thd->wsrep_cs().after_commit();
|
|
|
|
}
|
|
|
|
|
|
|
|
thd->lex->sql_command= SQLCOM_END;
|
2019-01-23 12:30:00 +01:00
|
|
|
|
2020-07-24 14:17:59 +02:00
|
|
|
free_root(thd->mem_root, MYF(MY_KEEP_PREALLOC));
|
|
|
|
|
2019-01-23 12:30:00 +01:00
|
|
|
must_exit_= check_exit_status();
|
|
|
|
DBUG_RETURN(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
int Wsrep_high_priority_service::rollback(const wsrep::ws_handle& ws_handle,
|
|
|
|
const wsrep::ws_meta& ws_meta)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("Wsrep_high_priority_service::rollback");
|
2019-08-28 08:19:24 +02:00
|
|
|
if (ws_meta.ordered())
|
|
|
|
{
|
|
|
|
m_thd->wsrep_cs().prepare_for_ordering(ws_handle, ws_meta, false);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
assert(ws_meta == wsrep::ws_meta());
|
|
|
|
assert(ws_handle == wsrep::ws_handle());
|
|
|
|
}
|
2019-01-23 12:30:00 +01:00
|
|
|
int ret= (trans_rollback_stmt(m_thd) || trans_rollback(m_thd));
|
2023-01-26 09:27:31 +01:00
|
|
|
|
|
|
|
WSREP_DEBUG("::rollback() thread: %lu, client_state %s "
|
|
|
|
"client_mode %s trans_state %s killed %d",
|
|
|
|
thd_get_thread_id(m_thd),
|
|
|
|
wsrep_thd_client_state_str(m_thd),
|
|
|
|
wsrep_thd_client_mode_str(m_thd),
|
|
|
|
wsrep_thd_transaction_state_str(m_thd),
|
|
|
|
m_thd->killed);
|
|
|
|
|
2024-01-17 15:32:29 +01:00
|
|
|
#ifdef ENABLED_DEBUG_SYNC
|
|
|
|
DBUG_EXECUTE_IF("sync.wsrep_rollback_mdl_release",
|
|
|
|
{
|
|
|
|
const char act[]=
|
|
|
|
"now "
|
|
|
|
"SIGNAL sync.wsrep_rollback_mdl_release_reached "
|
|
|
|
"WAIT_FOR signal.wsrep_rollback_mdl_release";
|
|
|
|
DBUG_ASSERT(!debug_sync_set_action(m_thd,
|
|
|
|
STRING_WITH_LEN(act)));
|
|
|
|
};);
|
|
|
|
#endif
|
|
|
|
|
2020-12-02 15:16:29 +01:00
|
|
|
m_thd->release_transactional_locks();
|
2020-07-24 14:17:59 +02:00
|
|
|
|
|
|
|
free_root(m_thd->mem_root, MYF(MY_KEEP_PREALLOC));
|
|
|
|
|
2019-01-23 12:30:00 +01:00
|
|
|
DBUG_RETURN(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
int Wsrep_high_priority_service::apply_toi(const wsrep::ws_meta& ws_meta,
|
2019-07-22 15:34:12 +02:00
|
|
|
const wsrep::const_buffer& data,
|
2019-08-28 08:19:24 +02:00
|
|
|
wsrep::mutable_buffer& err)
|
2019-01-23 12:30:00 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("Wsrep_high_priority_service::apply_toi");
|
|
|
|
THD* thd= m_thd;
|
|
|
|
Wsrep_non_trans_mode non_trans_mode(thd, ws_meta);
|
|
|
|
|
|
|
|
wsrep::client_state& client_state(thd->wsrep_cs());
|
|
|
|
DBUG_ASSERT(client_state.in_toi());
|
|
|
|
|
|
|
|
thd_proc_info(thd, "wsrep applier toi");
|
|
|
|
|
|
|
|
WSREP_DEBUG("Wsrep_high_priority_service::apply_toi: %lld",
|
|
|
|
client_state.toi_meta().seqno().get());
|
|
|
|
|
2022-09-23 16:37:52 +02:00
|
|
|
#ifdef ENABLED_DEBUG_SYNC
|
2021-08-25 16:35:44 +02:00
|
|
|
DBUG_EXECUTE_IF("sync.wsrep_apply_toi",
|
|
|
|
{
|
|
|
|
const char act[]=
|
|
|
|
"now "
|
|
|
|
"SIGNAL sync.wsrep_apply_toi_reached "
|
|
|
|
"WAIT_FOR signal.wsrep_apply_toi";
|
|
|
|
DBUG_ASSERT(!debug_sync_set_action(thd,
|
|
|
|
STRING_WITH_LEN(act)));
|
|
|
|
};);
|
2022-09-23 16:37:52 +02:00
|
|
|
#endif
|
2021-08-25 16:35:44 +02:00
|
|
|
|
2024-04-27 18:40:58 +02:00
|
|
|
thd->set_time();
|
2024-05-19 20:08:46 +02:00
|
|
|
int ret= apply_events(thd, m_rli, data, err, false);
|
2019-08-28 08:19:24 +02:00
|
|
|
wsrep_thd_set_ignored_error(thd, false);
|
2019-01-23 12:30:00 +01:00
|
|
|
trans_commit(thd);
|
|
|
|
|
|
|
|
thd->close_temporary_tables();
|
2019-02-21 20:57:52 +01:00
|
|
|
thd->lex->sql_command= SQLCOM_END;
|
|
|
|
|
2019-04-01 13:23:05 +02:00
|
|
|
wsrep_gtid_server.signal_waiters(thd->wsrep_current_gtid_seqno, false);
|
|
|
|
wsrep_set_SE_checkpoint(client_state.toi_meta().gtid(), wsrep_gtid_server.gtid());
|
2019-01-23 12:30:00 +01:00
|
|
|
|
|
|
|
must_exit_= check_exit_status();
|
|
|
|
|
|
|
|
DBUG_RETURN(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Wsrep_high_priority_service::store_globals()
|
|
|
|
{
|
2019-08-30 07:42:24 +02:00
|
|
|
wsrep_store_threadvars(m_thd);
|
|
|
|
m_thd->wsrep_cs().acquire_ownership();
|
2019-01-23 12:30:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void Wsrep_high_priority_service::reset_globals()
|
|
|
|
{
|
2019-08-30 07:42:24 +02:00
|
|
|
wsrep_reset_threadvars(m_thd);
|
2019-01-23 12:30:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void Wsrep_high_priority_service::switch_execution_context(wsrep::high_priority_service& orig_high_priority_service)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("Wsrep_high_priority_service::switch_execution_context");
|
|
|
|
Wsrep_high_priority_service&
|
|
|
|
orig_hps= static_cast<Wsrep_high_priority_service&>(orig_high_priority_service);
|
|
|
|
m_thd->thread_stack= orig_hps.m_thd->thread_stack;
|
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
int Wsrep_high_priority_service::log_dummy_write_set(const wsrep::ws_handle& ws_handle,
|
2019-07-22 15:34:12 +02:00
|
|
|
const wsrep::ws_meta& ws_meta,
|
|
|
|
wsrep::mutable_buffer& err)
|
2019-01-23 12:30:00 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("Wsrep_high_priority_service::log_dummy_write_set");
|
|
|
|
int ret= 0;
|
|
|
|
DBUG_PRINT("info",
|
|
|
|
("Wsrep_high_priority_service::log_dummy_write_set: seqno=%lld",
|
|
|
|
ws_meta.seqno().get()));
|
2022-09-23 16:37:52 +02:00
|
|
|
#ifdef ENABLED_DEBUG_SYNC
|
2021-08-25 16:35:44 +02:00
|
|
|
DBUG_EXECUTE_IF("sync.wsrep_log_dummy_write_set",
|
|
|
|
{
|
|
|
|
const char act[]=
|
|
|
|
"now "
|
|
|
|
"SIGNAL sync.wsrep_log_dummy_write_set_reached ";
|
|
|
|
DBUG_ASSERT(!debug_sync_set_action(m_thd,
|
|
|
|
STRING_WITH_LEN(act)));
|
|
|
|
};);
|
2022-09-23 16:37:52 +02:00
|
|
|
#endif
|
2021-08-25 16:35:44 +02:00
|
|
|
|
2019-07-22 15:34:12 +02:00
|
|
|
if (ws_meta.ordered())
|
2019-01-23 12:30:00 +01:00
|
|
|
{
|
2019-07-22 15:34:12 +02:00
|
|
|
wsrep::client_state& cs(m_thd->wsrep_cs());
|
|
|
|
if (!cs.transaction().active())
|
|
|
|
{
|
|
|
|
cs.start_transaction(ws_handle, ws_meta);
|
|
|
|
}
|
|
|
|
adopt_apply_error(err);
|
|
|
|
WSREP_DEBUG("Log dummy write set %lld", ws_meta.seqno().get());
|
|
|
|
ret= cs.provider().commit_order_enter(ws_handle, ws_meta);
|
|
|
|
if (!(ret && opt_log_slave_updates && wsrep_gtid_mode &&
|
|
|
|
m_thd->variables.gtid_seq_no))
|
|
|
|
{
|
|
|
|
cs.before_rollback();
|
|
|
|
cs.after_rollback();
|
|
|
|
}
|
2020-12-02 19:52:35 +01:00
|
|
|
|
|
|
|
if (!WSREP_EMULATE_BINLOG(m_thd))
|
|
|
|
{
|
|
|
|
wsrep_register_for_group_commit(m_thd);
|
2023-02-13 17:14:50 +01:00
|
|
|
/* wait_for_prior_commit() ensures that all preceding transactions
|
|
|
|
have been committed and seqno has been synced into
|
|
|
|
storage engine. We don't release commit order here yet to
|
|
|
|
avoid following transactions to sync seqno before
|
|
|
|
wsrep_set_SE_checkpoint() below returns. This effectively pauses
|
|
|
|
group commit for the checkpoint operation, but is the only way to
|
|
|
|
ensure proper ordering. */
|
2020-12-02 19:52:35 +01:00
|
|
|
m_thd->wait_for_prior_commit();
|
|
|
|
}
|
|
|
|
|
MDEV-29293 MariaDB stuck on starting commit state
This commit contains a merge from 10.5-MDEV-29293-squash
into 10.6.
Although the bug MDEV-29293 was not reproducible with 10.6,
the fix contains several improvements for wsrep KILL query and
BF abort handling, and addresses the following issues:
* MDEV-30307 KILL command issued inside a transaction is
problematic for galera replication:
This commit will remove KILL TOI replication, so Galera side
transaction context is not lost during KILL.
* MDEV-21075 KILL QUERY maintains nodes data consistency but
breaks GTID sequence: This is fixed as well as KILL does not
use TOI, and thus does not change GTID state.
* MDEV-30372 Assertion in wsrep-lib state: This was caused by
BF abort or KILL when local transaction was in the middle
of group commit. This commit disables THD::killed handling
during commit, so the problem is avoided.
* MDEV-30963 Assertion failure !lock.was_chosen_as_deadlock_victim
in trx0trx.h:1065: The assertion happened when the victim was
BF aborted via MDL while it was committing. This commit changes
MDL BF aborts so that transactions which are committing cannot
be BF aborted via MDL. The RQG grammar attached in the issue
could not reproduce the crash anymore.
Original commit message from 10.5 fix:
MDEV-29293 MariaDB stuck on starting commit state
The problem seems to be a deadlock between KILL command execution
and BF abort issued by an applier, where:
* KILL has locked victim's LOCK_thd_kill and LOCK_thd_data.
* Applier has innodb side global lock mutex and victim trx mutex.
* KILL is calling innobase_kill_query, and is blocked by innodb
global lock mutex.
* Applier is in wsrep_innobase_kill_one_trx and is blocked by
victim's LOCK_thd_kill.
The fix in this commit removes the TOI replication of KILL command
and makes KILL execution less intrusive operation. Aborting the
victim happens now by using awake_no_mutex() and ha_abort_transaction().
If the KILL happens when the transaction is committing, the
KILL operation is postponed to happen after the statement
has completed in order to avoid KILL to interrupt commit
processing.
Notable changes in this commit:
* wsrep client connections's error state may remain sticky after
client connection is closed. This error message will then pop
up for the next client session issuing first SQL statement.
This problem raised with test galera.galera_bf_kill.
The fix is to reset wsrep client error state, before a THD is
reused for next connetion.
* Release THD locks in wsrep_abort_transaction when locking
innodb mutexes. This guarantees same locking order as with applier
BF aborting.
* BF abort from MDL was changed to do BF abort on server/wsrep-lib
side first, and only then do the BF abort on InnoDB side. This
removes the need to call back from InnoDB for BF aborts which originate
from MDL and simplifies the locking.
* Removed wsrep_thd_set_wsrep_aborter() from service_wsrep.h.
The manipulation of the wsrep_aborter can be done solely on
server side. Moreover, it is now debug only variable and
could be excluded from optimized builds.
* Remove LOCK_thd_kill from wsrep_thd_LOCK/UNLOCK to allow more
fine grained locking for SR BF abort which may require locking
of victim LOCK_thd_kill. Added explicit call for
wsrep_thd_kill_LOCK/UNLOCK where appropriate.
* Wsrep-lib was updated to version which allows external
locking for BF abort calls.
Changes to MTR tests:
* Disable galera_bf_abort_group_commit. This test is going to
be removed (MDEV-30855).
* Make galera_var_retry_autocommit result more readable by echoing
cases and expectations into result. Only one expected result for
reap to verify that server returns expected status for query.
* Record galera_gcache_recover_manytrx as result file was incomplete.
Trivial change.
* Make galera_create_table_as_select more deterministic:
Wait until CTAS execution has reached MDL wait for multi-master
conflict case. Expected error from multi-master conflict is
ER_QUERY_INTERRUPTED. This is because CTAS does not yet have open
wsrep transaction when it is waiting for MDL, query gets interrupted
instead of BF aborted. This should be addressed in separate task.
* A new test galera_bf_abort_registering to check that registering trx gets
BF aborted through MDL.
* A new test galera_kill_group_commit to verify correct behavior
when KILL is executed while the transaction is committing.
Co-authored-by: Seppo Jaakola <seppo.jaakola@iki.fi>
Co-authored-by: Jan Lindström <jan.lindstrom@galeracluster.com>
Signed-off-by: Julius Goryavsky <julius.goryavsky@mariadb.com>
2023-04-19 15:51:55 +02:00
|
|
|
WSREP_DEBUG("checkpointing dummy write set %lld", ws_meta.seqno().get());
|
2019-04-01 13:23:05 +02:00
|
|
|
wsrep_set_SE_checkpoint(ws_meta.gtid(), wsrep_gtid_server.gtid());
|
2020-12-02 19:52:35 +01:00
|
|
|
|
|
|
|
if (!WSREP_EMULATE_BINLOG(m_thd))
|
|
|
|
{
|
|
|
|
wsrep_unregister_from_group_commit(m_thd);
|
|
|
|
}
|
2023-02-13 17:14:50 +01:00
|
|
|
ret= ret || cs.provider().commit_order_leave(ws_handle, ws_meta, err);
|
2019-07-22 15:34:12 +02:00
|
|
|
cs.after_applying();
|
2019-01-23 12:30:00 +01:00
|
|
|
}
|
|
|
|
DBUG_RETURN(ret);
|
|
|
|
}
|
|
|
|
|
2019-08-28 08:19:24 +02:00
|
|
|
void Wsrep_high_priority_service::adopt_apply_error(wsrep::mutable_buffer& err)
|
|
|
|
{
|
|
|
|
m_thd->wsrep_cs().adopt_apply_error(err);
|
|
|
|
}
|
|
|
|
|
2019-01-23 12:30:00 +01:00
|
|
|
void Wsrep_high_priority_service::debug_crash(const char* crash_point)
|
|
|
|
{
|
|
|
|
DBUG_ASSERT(m_thd == current_thd);
|
|
|
|
DBUG_EXECUTE_IF(crash_point, DBUG_SUICIDE(););
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
Applier service
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
Wsrep_applier_service::Wsrep_applier_service(THD* thd)
|
|
|
|
: Wsrep_high_priority_service(thd)
|
|
|
|
{
|
|
|
|
thd->wsrep_applier_service= this;
|
|
|
|
thd->wsrep_cs().open(wsrep::client_id(thd->thread_id));
|
|
|
|
thd->wsrep_cs().before_command();
|
|
|
|
thd->wsrep_cs().debug_log_level(wsrep_debug);
|
2020-03-16 15:03:00 +01:00
|
|
|
if (!thd->slave_thread)
|
|
|
|
thd->system_thread_info.rpl_sql_info=
|
|
|
|
new rpl_sql_thread_info(thd->wsrep_rgi->rli->mi->rpl_filter);
|
2019-01-23 12:30:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Wsrep_applier_service::~Wsrep_applier_service()
|
|
|
|
{
|
2020-03-16 15:03:00 +01:00
|
|
|
if (!m_thd->slave_thread)
|
|
|
|
delete m_thd->system_thread_info.rpl_sql_info;
|
2019-01-23 12:30:00 +01:00
|
|
|
m_thd->wsrep_cs().after_command_before_result();
|
|
|
|
m_thd->wsrep_cs().after_command_after_result();
|
|
|
|
m_thd->wsrep_cs().close();
|
|
|
|
m_thd->wsrep_cs().cleanup();
|
2019-03-15 06:09:13 +01:00
|
|
|
m_thd->wsrep_applier_service= NULL;
|
2019-01-23 12:30:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int Wsrep_applier_service::apply_write_set(const wsrep::ws_meta& ws_meta,
|
2019-07-22 15:34:12 +02:00
|
|
|
const wsrep::const_buffer& data,
|
2019-08-28 08:19:24 +02:00
|
|
|
wsrep::mutable_buffer& err)
|
2019-01-23 12:30:00 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("Wsrep_applier_service::apply_write_set");
|
|
|
|
THD* thd= m_thd;
|
|
|
|
|
|
|
|
thd->variables.option_bits |= OPTION_BEGIN;
|
2023-12-12 02:53:36 +01:00
|
|
|
thd->variables.option_bits |= OPTION_GTID_BEGIN;
|
2019-01-23 12:30:00 +01:00
|
|
|
thd->variables.option_bits |= OPTION_NOT_AUTOCOMMIT;
|
|
|
|
DBUG_ASSERT(thd->wsrep_trx().active());
|
|
|
|
DBUG_ASSERT(thd->wsrep_trx().state() == wsrep::transaction::s_executing);
|
|
|
|
|
|
|
|
thd_proc_info(thd, "applying write set");
|
2020-06-10 03:51:49 +02:00
|
|
|
|
2019-01-23 12:30:00 +01:00
|
|
|
/* moved dbug sync point here, after possible THD switch for SR transactions
|
|
|
|
has ben done
|
|
|
|
*/
|
2020-06-10 11:04:01 +02:00
|
|
|
#ifdef ENABLED_DEBUG_SYNC
|
2019-01-23 12:30:00 +01:00
|
|
|
/* Allow tests to block the applier thread using the DBUG facilities */
|
|
|
|
DBUG_EXECUTE_IF("sync.wsrep_apply_cb",
|
|
|
|
{
|
|
|
|
const char act[]=
|
|
|
|
"now "
|
|
|
|
"SIGNAL sync.wsrep_apply_cb_reached "
|
|
|
|
"WAIT_FOR signal.wsrep_apply_cb";
|
|
|
|
DBUG_ASSERT(!debug_sync_set_action(thd,
|
|
|
|
STRING_WITH_LEN(act)));
|
|
|
|
};);
|
2020-06-10 11:04:01 +02:00
|
|
|
#endif /* ENABLED_DEBUG_SYNC */
|
2019-01-23 12:30:00 +01:00
|
|
|
|
|
|
|
wsrep_setup_uk_and_fk_checks(thd);
|
2024-05-19 20:08:46 +02:00
|
|
|
int ret= apply_events(thd, m_rli, data, err, true);
|
2019-01-23 12:30:00 +01:00
|
|
|
|
|
|
|
thd->close_temporary_tables();
|
|
|
|
if (!ret && !(ws_meta.flags() & wsrep::provider::flag::commit))
|
|
|
|
{
|
|
|
|
thd->wsrep_cs().fragment_applied(ws_meta.seqno());
|
|
|
|
}
|
|
|
|
thd_proc_info(thd, "wsrep applied write set");
|
2023-12-12 02:53:36 +01:00
|
|
|
|
|
|
|
thd->variables.option_bits &= ~OPTION_GTID_BEGIN;
|
2019-01-23 12:30:00 +01:00
|
|
|
DBUG_RETURN(ret);
|
|
|
|
}
|
|
|
|
|
2019-12-16 06:50:15 +01:00
|
|
|
int Wsrep_applier_service::apply_nbo_begin(const wsrep::ws_meta& ws_meta,
|
|
|
|
const wsrep::const_buffer& data,
|
|
|
|
wsrep::mutable_buffer& err)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("Wsrep_applier_service::apply_nbo_begin");
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
2019-01-23 12:30:00 +01:00
|
|
|
void Wsrep_applier_service::after_apply()
|
|
|
|
{
|
|
|
|
DBUG_ENTER("Wsrep_applier_service::after_apply");
|
|
|
|
wsrep_after_apply(m_thd);
|
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Wsrep_applier_service::check_exit_status() const
|
|
|
|
{
|
|
|
|
bool ret= false;
|
|
|
|
mysql_mutex_lock(&LOCK_wsrep_slave_threads);
|
|
|
|
if (wsrep_slave_count_change < 0)
|
|
|
|
{
|
|
|
|
++wsrep_slave_count_change;
|
|
|
|
ret= true;
|
|
|
|
}
|
|
|
|
mysql_mutex_unlock(&LOCK_wsrep_slave_threads);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
Replayer service
|
|
|
|
*****************************************************************************/
|
|
|
|
|
2019-04-06 11:33:51 +02:00
|
|
|
Wsrep_replayer_service::Wsrep_replayer_service(THD* replayer_thd, THD* orig_thd)
|
|
|
|
: Wsrep_high_priority_service(replayer_thd)
|
|
|
|
, m_orig_thd(orig_thd)
|
2019-01-23 12:30:00 +01:00
|
|
|
, m_da_shadow()
|
|
|
|
, m_replay_status()
|
|
|
|
{
|
|
|
|
/* Response must not have been sent to client */
|
2019-04-06 11:33:51 +02:00
|
|
|
DBUG_ASSERT(!orig_thd->get_stmt_da()->is_sent());
|
2019-01-23 12:30:00 +01:00
|
|
|
/* PS reprepare observer should have been removed already
|
|
|
|
open_table() will fail if we have dangling observer here */
|
2019-04-06 11:33:51 +02:00
|
|
|
DBUG_ASSERT(!orig_thd->m_reprepare_observer);
|
2019-01-23 12:30:00 +01:00
|
|
|
/* Replaying should happen always from after_statement() hook
|
|
|
|
after rollback, which should guarantee that there are no
|
|
|
|
transactional locks */
|
2019-04-06 11:33:51 +02:00
|
|
|
DBUG_ASSERT(!orig_thd->mdl_context.has_transactional_locks());
|
2019-01-23 12:30:00 +01:00
|
|
|
|
2022-12-29 11:59:34 +01:00
|
|
|
replayer_thd->system_thread_info.rpl_sql_info=
|
|
|
|
new rpl_sql_thread_info(replayer_thd->wsrep_rgi->rli->mi->rpl_filter);
|
|
|
|
|
2019-01-23 12:30:00 +01:00
|
|
|
/* Make a shadow copy of diagnostics area and reset */
|
2019-04-06 11:33:51 +02:00
|
|
|
m_da_shadow.status= orig_thd->get_stmt_da()->status();
|
2019-01-23 12:30:00 +01:00
|
|
|
if (m_da_shadow.status == Diagnostics_area::DA_OK)
|
|
|
|
{
|
2019-04-06 11:33:51 +02:00
|
|
|
m_da_shadow.affected_rows= orig_thd->get_stmt_da()->affected_rows();
|
|
|
|
m_da_shadow.last_insert_id= orig_thd->get_stmt_da()->last_insert_id();
|
|
|
|
strmake(m_da_shadow.message, orig_thd->get_stmt_da()->message(),
|
2019-01-23 12:30:00 +01:00
|
|
|
sizeof(m_da_shadow.message) - 1);
|
|
|
|
}
|
2019-04-06 11:33:51 +02:00
|
|
|
orig_thd->get_stmt_da()->reset_diagnostics_area();
|
2019-01-23 12:30:00 +01:00
|
|
|
|
|
|
|
/* Release explicit locks */
|
2019-04-06 11:33:51 +02:00
|
|
|
if (orig_thd->locked_tables_mode && orig_thd->lock)
|
2019-01-23 12:30:00 +01:00
|
|
|
{
|
|
|
|
WSREP_WARN("releasing table lock for replaying (%llu)",
|
2019-04-06 11:33:51 +02:00
|
|
|
orig_thd->thread_id);
|
|
|
|
orig_thd->locked_tables_list.unlock_locked_tables(orig_thd);
|
|
|
|
orig_thd->variables.option_bits&= ~(OPTION_TABLE_LOCK);
|
2019-01-23 12:30:00 +01:00
|
|
|
}
|
|
|
|
|
2019-04-06 11:33:51 +02:00
|
|
|
thd_proc_info(orig_thd, "wsrep replaying trx");
|
|
|
|
|
2019-01-23 12:30:00 +01:00
|
|
|
/*
|
2019-08-30 07:42:24 +02:00
|
|
|
Switch execution context to replayer_thd and prepare it for
|
2019-04-06 11:33:51 +02:00
|
|
|
replay execution.
|
2019-01-23 12:30:00 +01:00
|
|
|
*/
|
2019-08-30 07:42:24 +02:00
|
|
|
/* Copy thd vars from orig_thd before reset, otherwise reset
|
|
|
|
for orig thd clears thread local storage before copy. */
|
|
|
|
wsrep_assign_from_threadvars(replayer_thd);
|
|
|
|
wsrep_reset_threadvars(orig_thd);
|
|
|
|
wsrep_store_threadvars(replayer_thd);
|
2019-04-06 11:33:51 +02:00
|
|
|
wsrep_open(replayer_thd);
|
|
|
|
wsrep_before_command(replayer_thd);
|
|
|
|
replayer_thd->wsrep_cs().clone_transaction_for_replay(orig_thd->wsrep_trx());
|
2019-01-23 12:30:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Wsrep_replayer_service::~Wsrep_replayer_service()
|
|
|
|
{
|
2019-12-27 16:43:28 +01:00
|
|
|
/* Switch execution context back to original. */
|
2022-12-29 11:59:34 +01:00
|
|
|
wsrep_after_apply(m_thd);
|
|
|
|
wsrep_after_command_ignore_result(m_thd);
|
|
|
|
wsrep_close(m_thd);
|
|
|
|
wsrep_reset_threadvars(m_thd);
|
|
|
|
wsrep_store_threadvars(m_orig_thd);
|
2019-04-06 11:33:51 +02:00
|
|
|
|
2022-12-29 11:59:34 +01:00
|
|
|
DBUG_ASSERT(!m_orig_thd->get_stmt_da()->is_sent());
|
|
|
|
DBUG_ASSERT(!m_orig_thd->get_stmt_da()->is_set());
|
|
|
|
|
|
|
|
delete m_thd->system_thread_info.rpl_sql_info;
|
|
|
|
m_thd->system_thread_info.rpl_sql_info= nullptr;
|
2019-04-06 11:33:51 +02:00
|
|
|
|
2019-01-23 12:30:00 +01:00
|
|
|
if (m_replay_status == wsrep::provider::success)
|
|
|
|
{
|
2022-12-29 11:59:34 +01:00
|
|
|
DBUG_ASSERT(m_thd->wsrep_cs().current_error() == wsrep::e_success);
|
|
|
|
m_orig_thd->reset_kill_query();
|
|
|
|
my_ok(m_orig_thd, m_da_shadow.affected_rows, m_da_shadow.last_insert_id);
|
2019-01-23 12:30:00 +01:00
|
|
|
}
|
|
|
|
else if (m_replay_status == wsrep::provider::error_certification_failed)
|
|
|
|
{
|
2022-12-29 11:59:34 +01:00
|
|
|
wsrep_override_error(m_orig_thd, ER_LOCK_DEADLOCK);
|
2019-01-23 12:30:00 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
DBUG_ASSERT(0);
|
|
|
|
WSREP_ERROR("trx_replay failed for: %d, schema: %s, query: %s",
|
|
|
|
m_replay_status,
|
2022-12-29 11:59:34 +01:00
|
|
|
m_orig_thd->db.str, wsrep_thd_query(m_orig_thd));
|
2019-01-23 12:30:00 +01:00
|
|
|
unireg_abort(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int Wsrep_replayer_service::apply_write_set(const wsrep::ws_meta& ws_meta,
|
2019-07-22 15:34:12 +02:00
|
|
|
const wsrep::const_buffer& data,
|
2019-08-28 08:19:24 +02:00
|
|
|
wsrep::mutable_buffer& err)
|
2019-01-23 12:30:00 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("Wsrep_replayer_service::apply_write_set");
|
|
|
|
THD* thd= m_thd;
|
|
|
|
|
|
|
|
DBUG_ASSERT(thd->wsrep_trx().active());
|
|
|
|
DBUG_ASSERT(thd->wsrep_trx().state() == wsrep::transaction::s_replaying);
|
|
|
|
|
2022-09-23 16:37:52 +02:00
|
|
|
#ifdef ENABLED_DEBUG_SYNC
|
2021-03-16 12:13:50 +01:00
|
|
|
/* Allow tests to block the replayer thread using the DBUG facilities */
|
|
|
|
DBUG_EXECUTE_IF("sync.wsrep_replay_cb",
|
|
|
|
{
|
|
|
|
const char act[]=
|
|
|
|
"now "
|
|
|
|
"SIGNAL sync.wsrep_replay_cb_reached "
|
|
|
|
"WAIT_FOR signal.wsrep_replay_cb";
|
|
|
|
DBUG_ASSERT(!debug_sync_set_action(thd,
|
|
|
|
STRING_WITH_LEN(act)));
|
|
|
|
};);
|
2022-09-23 16:37:52 +02:00
|
|
|
#endif
|
2021-03-16 12:13:50 +01:00
|
|
|
|
2019-01-23 12:30:00 +01:00
|
|
|
wsrep_setup_uk_and_fk_checks(thd);
|
|
|
|
|
|
|
|
int ret= 0;
|
|
|
|
if (!wsrep::starts_transaction(ws_meta.flags()))
|
|
|
|
{
|
|
|
|
DBUG_ASSERT(thd->wsrep_trx().is_streaming());
|
|
|
|
ret= wsrep_schema->replay_transaction(thd,
|
|
|
|
m_rli,
|
|
|
|
ws_meta,
|
|
|
|
thd->wsrep_sr().fragments());
|
|
|
|
}
|
2024-05-19 20:08:46 +02:00
|
|
|
ret= ret || apply_events(thd, m_rli, data, err, true);
|
2019-01-23 12:30:00 +01:00
|
|
|
thd->close_temporary_tables();
|
|
|
|
if (!ret && !(ws_meta.flags() & wsrep::provider::flag::commit))
|
|
|
|
{
|
|
|
|
thd->wsrep_cs().fragment_applied(ws_meta.seqno());
|
|
|
|
}
|
|
|
|
|
|
|
|
thd_proc_info(thd, "wsrep replayed write set");
|
|
|
|
DBUG_RETURN(ret);
|
|
|
|
}
|