mirror of
https://github.com/MariaDB/server.git
synced 2025-01-16 12:02:42 +01:00
5099033c26
Added argument to maria_end_bulk_insert() to know if the table will be deleted after the operation Fixed wrong call to strmake Don't call bulk insert in case of inserting only one row (speed optimization as starting/stopping bulk insert Allow storing year 2155 in year field When running with purify/valgrind avoid copying structures over themself Added hook 'trnnam_end_trans_hook' that is called when transaction ends Added trn->used_tables that is used to an entry for all tables used by transaction Fixed that ndb doesn't crash on duplicate key error when start_bulk_insert/end_bulk_insert are not called include/maria.h: Added argument to maria_end_bulk_insert() to know if the table will be deleted after the operation include/my_tree.h: Added macro 'reset_free_element()' to be able to ignore calls to the external free function. Is used to optimize end-bulk-insert in case of failures, in which case we don't want write the remaining keys in the tree mysql-test/install_test_db.sh: Upgrade to new mysql_install_db options mysql-test/r/maria-mvcc.result: New tests mysql-test/r/maria.result: New tests mysql-test/suite/ndb/r/ndb_auto_increment.result: Fixed error message now when bulk insert is not always called mysql-test/suite/ndb/t/ndb_auto_increment.test: Fixed error message now when bulk insert is not always called mysql-test/t/maria-mvcc.test: Added testing of versioning of count(*) mysql-test/t/maria-page-checksum.test: Added comment mysql-test/t/maria.test: More tests mysys/hash.c: Code style change sql/field.cc: Allow storing year 2155 in year field sql/ha_ndbcluster.cc: Added new argument to end_bulk_insert() to signal if the bulk insert should ignored sql/ha_ndbcluster.h: Added new argument to end_bulk_insert() to signal if the bulk insert should ignored sql/ha_partition.cc: Added new argument to end_bulk_insert() to signal if the bulk insert should ignored sql/ha_partition.h: Added new argument to end_bulk_insert() to signal if the bulk insert should ignored sql/handler.cc: Don't call get_dup_key() if there is no table object. This can happen if the handler generates a duplicate key error on commit sql/handler.h: Added new argument to end_bulk_insert() to signal if the bulk insert should ignored (ie, the table will be deleted) sql/item.cc: Style fix Removed compiler warning sql/log_event.cc: Added new argument to ha_end_bulk_insert() sql/log_event_old.cc: Added new argument to ha_end_bulk_insert() sql/mysqld.cc: Removed compiler warning sql/protocol.cc: Added DBUG sql/sql_class.cc: Added DBUG Fixed wrong call to strmake sql/sql_insert.cc: Don't call bulk insert in case of inserting only one row (speed optimization as starting/stopping bulk insert involves a lot of if's) Added new argument to ha_end_bulk_insert() sql/sql_load.cc: Added new argument to ha_end_bulk_insert() sql/sql_parse.cc: Style fixes Avoid goto in common senario sql/sql_select.cc: When running with purify/valgrind avoid copying structures over themself. This is not a real bug in itself, but it's a waste of cycles and causes valgrind warnings sql/sql_select.h: Avoid copying structures over themself. This is not a real bug in itself, but it's a waste of cycles and causes valgrind warnings sql/sql_table.cc: Call HA_EXTRA_PREPARE_FOR_DROP if table created by ALTER TABLE is going to be dropped Added new argument to ha_end_bulk_insert() storage/archive/ha_archive.cc: Added new argument to end_bulk_insert() storage/archive/ha_archive.h: Added new argument to end_bulk_insert() storage/federated/ha_federated.cc: Added new argument to end_bulk_insert() storage/federated/ha_federated.h: Added new argument to end_bulk_insert() storage/maria/Makefile.am: Added ma_state.c and ma_state.h storage/maria/ha_maria.cc: Versioning of count(*) and checksum - share->state.state is now assumed to be correct, not handler->state - Call _ma_setup_live_state() in external lock to get count(*)/checksum versioning. In case of not versioned and not concurrent insertable table, file->s->state.state contains the correct state information Other things: - file->s -> share - Added DBUG_ASSERT() for unlikely case - Optimized end_bulk_insert() to not write anything if table is going to be deleted (as in failed alter table) - Indentation changes in external_lock becasue of removed 'goto' caused a big conflict even if very little was changed storage/maria/ha_maria.h: New argument to end_bulk_insert() storage/maria/ma_blockrec.c: Update for versioning of count(*) and checksum Keep share->state.state.data_file_length up to date (not info->state->data_file_length) Moved _ma_block_xxxx_status() and maria_versioning() functions to ma_state.c storage/maria/ma_check.c: Update and use share->state.state instead of info->state info->s to share Update info->state at end of repair Call _ma_reset_state() to update share->state_history at end of repair storage/maria/ma_checkpoint.c: Call _ma_remove_not_visible_states() on checkpoint to clean up not visible state history from tables storage/maria/ma_close.c: Remember state history for running transaction even if table is closed storage/maria/ma_commit.c: Ensure we always call trnman_commit_trn() even if other calls fails. If we don't do that, the translog and state structures will not be freed storage/maria/ma_delete.c: Versioning of count(*) and checksum: - Always update info->state->checksum and info->state->records storage/maria/ma_delete_all.c: Versioning of count(*) and checksum: - Ensure that share->state.state is updated, as here is where we store the primary information storage/maria/ma_dynrec.c: Use lock_key_trees instead of concurrent_insert to check if trees should be locked. This allows us to lock trees both for concurrent_insert and for index versioning. storage/maria/ma_extra.c: Versioning of count(*) and checksum: - Use share->state.state instead of info->state - share->concurrent_insert -> share->non_transactional_concurrent_insert - Don't update share->state.state from info->state if transactional table Optimization: - Don't flush io_cache or bitmap if we are using FLUSH_IGNORE_CHANGED storage/maria/ma_info.c: Get most state information from current state storage/maria/ma_init.c: Add hash table and free function to store states for closed tables Install hook for transaction commit/rollback to update history state storage/maria/ma_key_recover.c: Versioning of count(*) and checksum: - Use share->state.state instead of info->state storage/maria/ma_locking.c: Versioning of count(*) and checksum: - Call virtual functions (if exists) to restore/update status - Move _ma_xxx_status() functions to ma_state.c info->s -> share storage/maria/ma_open.c: Versioning of count(*) and checksum: - For not transactional tables, set info->state to point to new allocated state structure. - Initialize new info->state_start variable that points to state at start of transaction - Copy old history states from hash table (maria_stored_states) first time the table is opened - Split flag share->concurrent_insert to non_transactional_concurrent_insert & lock_key_tree - For now, only enable versioning of tables without keys (to be fixed in soon!) - Added new virtual function to restore status in maria_lock_database) More DBUG storage/maria/ma_page.c: Versioning of count(*) and checksum: - Use share->state.state instead of info->state - Modify share->state.state.key_file_length under share->intern_lock storage/maria/ma_range.c: Versioning of count(*) and checksum: - Lock trees based on share->lock_key_trees info->s -> share storage/maria/ma_recovery.c: Versioning of count(*) and checksum: - Use share->state.state instead of info->state - Update state information on close and when reenabling logging storage/maria/ma_rkey.c: Versioning of count(*) and checksum: - Lock trees based on share->lock_key_trees storage/maria/ma_rnext.c: Versioning of count(*) and checksum: - Lock trees based on share->lock_key_trees storage/maria/ma_rnext_same.c: Versioning of count(*) and checksum: - Lock trees based on share->lock_key_trees - Only skip rows based on file length if non_transactional_concurrent_insert is set storage/maria/ma_rprev.c: Versioning of count(*) and checksum: - Lock trees based on share->lock_key_trees storage/maria/ma_rsame.c: Versioning of count(*) and checksum: - Lock trees based on share->lock_key_trees storage/maria/ma_sort.c: Use share->state.state instead of info->state Fixed indentation storage/maria/ma_static.c: Added maria_stored_state storage/maria/ma_update.c: Versioning of count(*) and checksum: - Always update info->state->checksum and info->state->records - Remove optimization for index file update as it doesn't work for transactional tables storage/maria/ma_write.c: Versioning of count(*) and checksum: - Always update info->state->checksum and info->state->records storage/maria/maria_def.h: Move MARIA_STATUS_INFO to ma_state.h Changes to MARIA_SHARE: - Added state_history to store count(*)/checksum states - Added in_trans as counter if table is used by running transactions - Split concurrent_insert into lock_key_trees and on_transactional_concurrent_insert. - Added virtual function lock_restore_status Changes to MARIA_HA: - save_state -> state_save - Added state_start to store state at start of transaction storage/maria/maria_pack.c: Versioning of count(*) and checksum: - Use share->state.state instead of info->state Indentation fixes storage/maria/trnman.c: Added hook 'trnnam_end_trans_hook' that is called when transaction ends Added trn->used_tables that is used to an entry for all tables used by transaction More DBUG Changed return type of trnman_end_trn() to my_bool Added trnman_get_min_trid() to get minimum trid in use. Added trnman_exists_active_transactions() to check if there exist a running transaction started between two commit id storage/maria/trnman.h: Added 'used_tables' Moved all pointers into same groups to get better memory alignment storage/maria/trnman_public.h: Added prototypes for new functions and variables Chagned return type of trnman_end_trn() to my_bool storage/myisam/ha_myisam.cc: Added argument to end_bulk_insert() if operation should be aborted storage/myisam/ha_myisam.h: Added argument to end_bulk_insert() if operation should be aborted storage/maria/ma_state.c: Functions to handle state of count(*) and checksum storage/maria/ma_state.h: Structures and declarations to handle state of count(*) and checksum
854 lines
25 KiB
C
854 lines
25 KiB
C
/* Copyright (C) 2006 MySQL AB
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
it under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation; version 2 of the License.
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with this program; if not, write to the Free Software
|
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
|
|
|
|
|
#include <my_global.h>
|
|
#include <my_sys.h>
|
|
#include <m_string.h>
|
|
#include "trnman.h"
|
|
#include "ma_checkpoint.h"
|
|
#include "ma_control_file.h"
|
|
|
|
/*
|
|
status variables:
|
|
how many trns in the active list currently,
|
|
in the committed list currently, allocated since startup.
|
|
*/
|
|
uint trnman_active_transactions, trnman_committed_transactions,
|
|
trnman_allocated_transactions;
|
|
|
|
/* list of active transactions in the trid order */
|
|
static TRN active_list_min, active_list_max;
|
|
/* list of committed transactions in the trid order */
|
|
static TRN committed_list_min, committed_list_max;
|
|
|
|
/* a counter, used to generate transaction ids */
|
|
static TrID global_trid_generator;
|
|
|
|
/* the mutex for everything above */
|
|
static pthread_mutex_t LOCK_trn_list;
|
|
|
|
/* LIFO pool of unused TRN structured for reuse */
|
|
static TRN *pool;
|
|
|
|
/* a hash for committed transactions that maps trid to a TRN structure */
|
|
static LF_HASH trid_to_committed_trn;
|
|
|
|
/* an array that maps short_trid of an active transaction to a TRN structure */
|
|
static TRN **short_trid_to_active_trn;
|
|
|
|
/* locks for short_trid_to_active_trn and pool */
|
|
static my_atomic_rwlock_t LOCK_short_trid_to_trn, LOCK_pool;
|
|
static my_bool default_trnman_end_trans_hook(TRN *, my_bool, my_bool);
|
|
|
|
my_bool (*trnman_end_trans_hook)(TRN *, my_bool, my_bool)=
|
|
default_trnman_end_trans_hook;
|
|
|
|
/*
|
|
Simple interface functions
|
|
QQ: if they stay so simple, should we make them inline?
|
|
*/
|
|
|
|
uint trnman_increment_locked_tables(TRN *trn)
|
|
{
|
|
return trn->locked_tables++;
|
|
}
|
|
|
|
uint trnman_has_locked_tables(TRN *trn)
|
|
{
|
|
return trn->locked_tables;
|
|
}
|
|
|
|
uint trnman_decrement_locked_tables(TRN *trn)
|
|
{
|
|
return --trn->locked_tables;
|
|
}
|
|
|
|
void trnman_reset_locked_tables(TRN *trn, uint locked_tables)
|
|
{
|
|
trn->locked_tables= locked_tables;
|
|
}
|
|
|
|
|
|
static my_bool
|
|
default_trnman_end_trans_hook(TRN *trn __attribute__ ((unused)),
|
|
my_bool commit __attribute__ ((unused)),
|
|
my_bool active_transactions
|
|
__attribute__ ((unused)))
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
|
|
/*
|
|
NOTE
|
|
Just as short_id doubles as loid, this function doubles as
|
|
short_trid_to_LOCK_OWNER. See the compile-time assert below.
|
|
*/
|
|
|
|
#ifdef NOT_USED
|
|
static TRN *short_trid_to_TRN(uint16 short_trid)
|
|
{
|
|
TRN *trn;
|
|
compile_time_assert(offsetof(TRN, locks) == 0);
|
|
my_atomic_rwlock_rdlock(&LOCK_short_trid_to_trn);
|
|
trn= my_atomic_loadptr((void **)&short_trid_to_active_trn[short_trid]);
|
|
my_atomic_rwlock_rdunlock(&LOCK_short_trid_to_trn);
|
|
return (TRN *)trn;
|
|
}
|
|
#endif
|
|
|
|
static uchar *trn_get_hash_key(const uchar *trn, size_t *len,
|
|
my_bool unused __attribute__ ((unused)))
|
|
{
|
|
*len= sizeof(TrID);
|
|
return (uchar *) & ((*((TRN **)trn))->trid);
|
|
}
|
|
|
|
|
|
/**
|
|
@brief Initializes transaction manager.
|
|
|
|
@param initial_trid Generated TrIDs will start from initial_trid+1.
|
|
|
|
@return Operation status
|
|
@retval 0 OK
|
|
@retval !=0 Error
|
|
*/
|
|
|
|
int trnman_init(TrID initial_trid)
|
|
{
|
|
DBUG_ENTER("trnman_init");
|
|
|
|
short_trid_to_active_trn= (TRN **)my_malloc(SHORT_TRID_MAX*sizeof(TRN*),
|
|
MYF(MY_WME|MY_ZEROFILL));
|
|
if (unlikely(!short_trid_to_active_trn))
|
|
DBUG_RETURN(1);
|
|
short_trid_to_active_trn--; /* min short_trid is 1 */
|
|
|
|
/*
|
|
Initialize lists.
|
|
active_list_max.min_read_from must be larger than any trid,
|
|
so that when an active list is empty we would could free
|
|
all committed list.
|
|
And committed_list_max itself can not be freed so
|
|
committed_list_max.commit_trid must not be smaller that
|
|
active_list_max.min_read_from
|
|
*/
|
|
|
|
active_list_max.trid= active_list_min.trid= 0;
|
|
active_list_max.min_read_from= ~(ulong) 0;
|
|
active_list_max.next= active_list_min.prev= 0;
|
|
active_list_max.prev= &active_list_min;
|
|
active_list_min.next= &active_list_max;
|
|
|
|
committed_list_max.commit_trid= ~(ulong) 0;
|
|
committed_list_max.next= committed_list_min.prev= 0;
|
|
committed_list_max.prev= &committed_list_min;
|
|
committed_list_min.next= &committed_list_max;
|
|
|
|
trnman_active_transactions= 0;
|
|
trnman_committed_transactions= 0;
|
|
trnman_allocated_transactions= 0;
|
|
|
|
pool= 0;
|
|
global_trid_generator= initial_trid;
|
|
lf_hash_init(&trid_to_committed_trn, sizeof(TRN*), LF_HASH_UNIQUE,
|
|
0, 0, trn_get_hash_key, 0);
|
|
DBUG_PRINT("info", ("pthread_mutex_init LOCK_trn_list"));
|
|
pthread_mutex_init(&LOCK_trn_list, MY_MUTEX_INIT_FAST);
|
|
my_atomic_rwlock_init(&LOCK_short_trid_to_trn);
|
|
my_atomic_rwlock_init(&LOCK_pool);
|
|
|
|
#ifdef NOT_USED
|
|
lockman_init(&maria_lockman, (loid_to_lo_func *)&short_trid_to_TRN, 10000);
|
|
#endif
|
|
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
/*
|
|
NOTE
|
|
this could only be called in the "idle" state - no transaction can be
|
|
running. See asserts below.
|
|
*/
|
|
void trnman_destroy()
|
|
{
|
|
DBUG_ENTER("trnman_destroy");
|
|
|
|
if (short_trid_to_active_trn == NULL) /* trnman already destroyed */
|
|
DBUG_VOID_RETURN;
|
|
DBUG_ASSERT(trid_to_committed_trn.count == 0);
|
|
DBUG_ASSERT(trnman_active_transactions == 0);
|
|
DBUG_ASSERT(trnman_committed_transactions == 0);
|
|
DBUG_ASSERT(active_list_max.prev == &active_list_min);
|
|
DBUG_ASSERT(active_list_min.next == &active_list_max);
|
|
DBUG_ASSERT(committed_list_max.prev == &committed_list_min);
|
|
DBUG_ASSERT(committed_list_min.next == &committed_list_max);
|
|
while (pool)
|
|
{
|
|
TRN *trn= pool;
|
|
pool= pool->next;
|
|
DBUG_ASSERT(trn->locks.mutex == 0);
|
|
DBUG_ASSERT(trn->locks.cond == 0);
|
|
my_free((void *)trn, MYF(0));
|
|
}
|
|
lf_hash_destroy(&trid_to_committed_trn);
|
|
DBUG_PRINT("info", ("pthread_mutex_destroy LOCK_trn_list"));
|
|
pthread_mutex_destroy(&LOCK_trn_list);
|
|
my_atomic_rwlock_destroy(&LOCK_short_trid_to_trn);
|
|
my_atomic_rwlock_destroy(&LOCK_pool);
|
|
my_free((void *)(short_trid_to_active_trn+1), MYF(0));
|
|
short_trid_to_active_trn= NULL;
|
|
#ifdef NOT_USED
|
|
lockman_destroy(&maria_lockman);
|
|
#endif
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
/*
|
|
NOTE
|
|
TrID is limited to 6 bytes. Initial value of the generator
|
|
is set by the recovery code - being read from the last checkpoint
|
|
(or 1 on a first run).
|
|
*/
|
|
static TrID new_trid()
|
|
{
|
|
DBUG_ENTER("new_trid");
|
|
DBUG_ASSERT(global_trid_generator < 0xffffffffffffLL);
|
|
DBUG_PRINT("info", ("safe_mutex_assert_owner LOCK_trn_list"));
|
|
safe_mutex_assert_owner(&LOCK_trn_list);
|
|
DBUG_RETURN(++global_trid_generator);
|
|
}
|
|
|
|
static void set_short_trid(TRN *trn)
|
|
{
|
|
int i= (int) ((global_trid_generator + (intptr)trn) * 312089 %
|
|
SHORT_TRID_MAX + 1);
|
|
for ( ; !trn->short_id ; i= 1)
|
|
{
|
|
my_atomic_rwlock_wrlock(&LOCK_short_trid_to_trn);
|
|
for ( ; i <= SHORT_TRID_MAX; i++) /* the range is [1..SHORT_TRID_MAX] */
|
|
{
|
|
void *tmp= NULL;
|
|
if (short_trid_to_active_trn[i] == NULL &&
|
|
my_atomic_casptr((void **)&short_trid_to_active_trn[i], &tmp, trn))
|
|
{
|
|
trn->short_id= i;
|
|
break;
|
|
}
|
|
}
|
|
my_atomic_rwlock_wrunlock(&LOCK_short_trid_to_trn);
|
|
}
|
|
}
|
|
|
|
/*
|
|
DESCRIPTION
|
|
start a new transaction, allocate and initialize transaction object
|
|
mutex and cond will be used for lock waits
|
|
*/
|
|
|
|
TRN *trnman_new_trn(pthread_mutex_t *mutex, pthread_cond_t *cond,
|
|
void *stack_end)
|
|
{
|
|
TRN *trn;
|
|
DBUG_ENTER("trnman_new_trn");
|
|
|
|
/*
|
|
we have a mutex, to do simple things under it - allocate a TRN,
|
|
increment trnman_active_transactions, set trn->min_read_from.
|
|
|
|
Note that all the above is fast. generating short_trid may be slow,
|
|
as it involves scanning a large array - so it's done outside of the
|
|
mutex.
|
|
*/
|
|
|
|
DBUG_PRINT("info", ("pthread_mutex_lock LOCK_trn_list"));
|
|
pthread_mutex_lock(&LOCK_trn_list);
|
|
|
|
/* Allocating a new TRN structure */
|
|
trn= pool;
|
|
/*
|
|
Popping an unused TRN from the pool
|
|
(ABA isn't possible, we're behind a mutex
|
|
*/
|
|
my_atomic_rwlock_wrlock(&LOCK_pool);
|
|
while (trn && !my_atomic_casptr((void **)&pool, (void **)&trn,
|
|
(void *)trn->next))
|
|
/* no-op */;
|
|
my_atomic_rwlock_wrunlock(&LOCK_pool);
|
|
|
|
/* Nothing in the pool ? Allocate a new one */
|
|
if (!trn)
|
|
{
|
|
/*
|
|
trn should be completely initalized at create time to allow
|
|
one to keep a known state on it.
|
|
(Like redo_lns, which is assumed to be 0 at start of row handling
|
|
and reset to zero before end of row handling)
|
|
*/
|
|
trn= (TRN *)my_malloc(sizeof(TRN), MYF(MY_WME | MY_ZEROFILL));
|
|
if (unlikely(!trn))
|
|
{
|
|
DBUG_PRINT("info", ("pthread_mutex_unlock LOCK_trn_list"));
|
|
pthread_mutex_unlock(&LOCK_trn_list);
|
|
return 0;
|
|
}
|
|
trnman_allocated_transactions++;
|
|
}
|
|
trn->pins= lf_hash_get_pins(&trid_to_committed_trn, stack_end);
|
|
if (!trn->pins)
|
|
{
|
|
trnman_free_trn(trn);
|
|
return 0;
|
|
}
|
|
|
|
trnman_active_transactions++;
|
|
|
|
trn->min_read_from= active_list_min.next->trid;
|
|
|
|
trn->trid= new_trid();
|
|
trn->short_id= 0;
|
|
|
|
trn->next= &active_list_max;
|
|
trn->prev= active_list_max.prev;
|
|
active_list_max.prev= trn->prev->next= trn;
|
|
DBUG_PRINT("info", ("pthread_mutex_unlock LOCK_trn_list"));
|
|
pthread_mutex_unlock(&LOCK_trn_list);
|
|
|
|
if (unlikely(!trn->min_read_from))
|
|
{
|
|
/*
|
|
We are the only transaction. Set min_read_from so that we can read
|
|
our own rows
|
|
*/
|
|
trn->min_read_from= trn->trid + 1;
|
|
}
|
|
|
|
trn->commit_trid= 0;
|
|
trn->rec_lsn= trn->undo_lsn= trn->first_undo_lsn= 0;
|
|
trn->used_tables= 0;
|
|
|
|
trn->locks.mutex= mutex;
|
|
trn->locks.cond= cond;
|
|
trn->locks.waiting_for= 0;
|
|
trn->locks.all_locks= 0;
|
|
#ifdef NOT_USED
|
|
trn->locks.pins= lf_alloc_get_pins(&maria_lockman.alloc);
|
|
#endif
|
|
|
|
trn->locked_tables= 0;
|
|
|
|
/*
|
|
only after the following function TRN is considered initialized,
|
|
so it must be done the last
|
|
*/
|
|
set_short_trid(trn);
|
|
|
|
DBUG_PRINT("exit", ("trn: x%lx trid: 0x%lu",
|
|
(ulong) trn, (ulong) trn->trid));
|
|
|
|
DBUG_RETURN(trn);
|
|
}
|
|
|
|
/*
|
|
remove a trn from the active list.
|
|
if necessary - move to committed list and set commit_trid
|
|
|
|
NOTE
|
|
Locks are released at the end. In particular, after placing the
|
|
transaction in commit list, and after setting commit_trid. It's
|
|
important, as commit_trid affects visibility. Locks don't affect
|
|
anything they simply delay execution of other threads - they could be
|
|
released arbitrarily late. In other words, when locks are released it
|
|
serves as a start banner for other threads, they start to run. So
|
|
everything they may need must be ready at that point.
|
|
|
|
RETURN
|
|
0 ok
|
|
1 error
|
|
*/
|
|
my_bool trnman_end_trn(TRN *trn, my_bool commit)
|
|
{
|
|
int res= 1;
|
|
TRN *free_me= 0;
|
|
LF_PINS *pins= trn->pins;
|
|
DBUG_ENTER("trnman_end_trn");
|
|
|
|
DBUG_ASSERT(trn->rec_lsn == 0);
|
|
/* if a rollback, all UNDO records should have been executed */
|
|
DBUG_ASSERT(commit || trn->undo_lsn == 0);
|
|
DBUG_PRINT("info", ("pthread_mutex_lock LOCK_trn_list"));
|
|
pthread_mutex_lock(&LOCK_trn_list);
|
|
|
|
/* remove from active list */
|
|
trn->next->prev= trn->prev;
|
|
trn->prev->next= trn->next;
|
|
|
|
/*
|
|
if trn was the oldest active transaction, now that it goes away there
|
|
may be committed transactions in the list which no active transaction
|
|
needs to bother about - clean up the committed list
|
|
*/
|
|
if (trn->prev == &active_list_min)
|
|
{
|
|
uint free_me_count;
|
|
TRN *t;
|
|
for (t= committed_list_min.next, free_me_count= 0;
|
|
t->commit_trid < active_list_min.next->min_read_from;
|
|
t= t->next, free_me_count++) /* no-op */;
|
|
|
|
DBUG_ASSERT((t != committed_list_min.next && free_me_count > 0) ||
|
|
(t == committed_list_min.next && free_me_count == 0));
|
|
/* found transactions committed before the oldest active one */
|
|
if (t != committed_list_min.next)
|
|
{
|
|
free_me= committed_list_min.next;
|
|
committed_list_min.next= t;
|
|
t->prev->next= 0;
|
|
t->prev= &committed_list_min;
|
|
trnman_committed_transactions-= free_me_count;
|
|
}
|
|
}
|
|
|
|
/*
|
|
if transaction is committed and it was not the only active transaction -
|
|
add it to the committed list (which is used for read-from relation)
|
|
*/
|
|
if (commit && active_list_min.next != &active_list_max)
|
|
{
|
|
trn->commit_trid= global_trid_generator;
|
|
trn->next= &committed_list_max;
|
|
trn->prev= committed_list_max.prev;
|
|
trnman_committed_transactions++;
|
|
|
|
res= lf_hash_insert(&trid_to_committed_trn, pins, &trn);
|
|
/*
|
|
By going on with life is res<0, we let other threads block on
|
|
our rows (because they will never see us committed in
|
|
trid_to_committed_trn) until they timeout. Though correct, this is not a
|
|
good situation:
|
|
- if connection reconnects and wants to check if its rows have been
|
|
committed, it will not be able to do that (it will just lock on them) so
|
|
connection stays permanently in doubt
|
|
- internal structures trid_to_committed_trn and committed_list are
|
|
desynchronized.
|
|
So we should take Maria down immediately, the two problems being
|
|
automatically solved at restart.
|
|
*/
|
|
DBUG_ASSERT(res <= 0);
|
|
}
|
|
if (res)
|
|
{
|
|
/*
|
|
res == 1 means the condition in the if() above was false.
|
|
res == -1 means lf_hash_insert failed
|
|
*/
|
|
trn->next= free_me;
|
|
free_me= trn;
|
|
}
|
|
else
|
|
{
|
|
committed_list_max.prev= trn->prev->next= trn;
|
|
}
|
|
if ((*trnman_end_trans_hook)(trn, commit,
|
|
active_list_min.next != &active_list_max))
|
|
res= -1;
|
|
trnman_active_transactions--;
|
|
pthread_mutex_unlock(&LOCK_trn_list);
|
|
|
|
/* the rest is done outside of a critical section */
|
|
#ifdef NOT_USED
|
|
lockman_release_locks(&maria_lockman, &trn->locks);
|
|
#endif
|
|
trn->locks.mutex= 0;
|
|
trn->locks.cond= 0;
|
|
my_atomic_rwlock_rdlock(&LOCK_short_trid_to_trn);
|
|
my_atomic_storeptr((void **)&short_trid_to_active_trn[trn->short_id], 0);
|
|
my_atomic_rwlock_rdunlock(&LOCK_short_trid_to_trn);
|
|
|
|
/*
|
|
we, under the mutex, removed going-in-free_me transactions from the
|
|
active and committed lists, thus nobody else may see them when it scans
|
|
those lists, and thus nobody may want to free them. Now we don't
|
|
need a mutex to access free_me list
|
|
*/
|
|
/* QQ: send them to the purge thread */
|
|
while (free_me)
|
|
{
|
|
TRN *t= free_me;
|
|
free_me= free_me->next;
|
|
|
|
/*
|
|
ignore OOM here. it's harmless, and there's nothing we could do, anyway
|
|
*/
|
|
(void)lf_hash_delete(&trid_to_committed_trn, pins, &t->trid, sizeof(TrID));
|
|
|
|
trnman_free_trn(t);
|
|
}
|
|
|
|
lf_hash_put_pins(pins);
|
|
#ifdef NOT_USED
|
|
lf_pinbox_put_pins(trn->locks.pins);
|
|
#endif
|
|
|
|
DBUG_RETURN(res < 0);
|
|
}
|
|
|
|
/*
|
|
free a trn (add to the pool, that is)
|
|
note - we can never really free() a TRN if there's at least one other
|
|
running transaction - see, e.g., how lock waits are implemented in
|
|
lockman.c
|
|
The same is true for other lock-free data structures too. We may need some
|
|
kind of FLUSH command to reset them all - ensuring that no transactions are
|
|
running. It may even be called automatically on checkpoints if no
|
|
transactions are running.
|
|
*/
|
|
void trnman_free_trn(TRN *trn)
|
|
{
|
|
TRN *tmp= pool;
|
|
|
|
my_atomic_rwlock_wrlock(&LOCK_pool);
|
|
do
|
|
{
|
|
/*
|
|
without this volatile cast gcc-3.4.4 moved the assignment
|
|
down after the loop at -O2
|
|
*/
|
|
*(TRN * volatile *)&(trn->next)= tmp;
|
|
} while (!my_atomic_casptr((void **)&pool, (void **)&tmp, trn));
|
|
my_atomic_rwlock_wrunlock(&LOCK_pool);
|
|
}
|
|
|
|
/*
|
|
NOTE
|
|
here we access the hash in a lock-free manner.
|
|
It's safe, a 'found' TRN can never be freed/reused before we access it.
|
|
In fact, it cannot be freed before 'trn' ends, because a 'found' TRN
|
|
can only be removed from the hash when:
|
|
found->commit_trid < ALL (trn->min_read_from)
|
|
that is, at least
|
|
found->commit_trid < trn->min_read_from
|
|
but
|
|
found->trid >= trn->min_read_from
|
|
and
|
|
found->commit_trid > found->trid
|
|
|
|
RETURN
|
|
1 can
|
|
0 cannot
|
|
-1 error (OOM)
|
|
*/
|
|
int trnman_can_read_from(TRN *trn, TrID trid)
|
|
{
|
|
TRN **found;
|
|
my_bool can;
|
|
LF_REQUIRE_PINS(3);
|
|
|
|
if (trid < trn->min_read_from)
|
|
return 1; /* Row is visible by all transactions in the system */
|
|
|
|
if (trid >= trn->trid)
|
|
{
|
|
/*
|
|
We have now two cases
|
|
trid > trn->trid, in which case the row is from a new transaction
|
|
and not visible, in which case we should return 0.
|
|
trid == trn->trid in which case the row is from the current transaction
|
|
and we should return 1
|
|
*/
|
|
return trid == trn->trid;
|
|
}
|
|
|
|
found= lf_hash_search(&trid_to_committed_trn, trn->pins, &trid, sizeof(trid));
|
|
if (found == NULL)
|
|
return 0; /* not in the hash of committed transactions = cannot read */
|
|
if (found == MY_ERRPTR)
|
|
return -1;
|
|
|
|
can= (*found)->commit_trid < trn->trid;
|
|
lf_hash_search_unpin(trn->pins);
|
|
return can;
|
|
}
|
|
|
|
/* TODO: the stubs below are waiting for savepoints to be implemented */
|
|
|
|
void trnman_new_statement(TRN *trn __attribute__ ((unused)))
|
|
{
|
|
}
|
|
|
|
void trnman_rollback_statement(TRN *trn __attribute__ ((unused)))
|
|
{
|
|
}
|
|
|
|
|
|
/**
|
|
@brief Allocates buffers and stores in them some info about transactions
|
|
|
|
Does the allocation because the caller cannot know the size itself.
|
|
Memory freeing is to be done by the caller (if the "str" member of the
|
|
LEX_STRING is not NULL).
|
|
The caller has the intention of doing checkpoints.
|
|
|
|
@param[out] str_act pointer to where the allocated buffer,
|
|
and its size, will be put; buffer will be filled
|
|
with info about active transactions
|
|
@param[out] str_com pointer to where the allocated buffer,
|
|
and its size, will be put; buffer will be filled
|
|
with info about committed transactions
|
|
@param[out] min_first_undo_lsn pointer to where the minimum
|
|
first_undo_lsn of all transactions will be put
|
|
|
|
@return Operation status
|
|
@retval 0 OK
|
|
@retval 1 Error
|
|
*/
|
|
|
|
my_bool trnman_collect_transactions(LEX_STRING *str_act, LEX_STRING *str_com,
|
|
LSN *min_rec_lsn, LSN *min_first_undo_lsn)
|
|
{
|
|
my_bool error;
|
|
TRN *trn;
|
|
char *ptr;
|
|
uint stored_transactions= 0;
|
|
LSN minimum_rec_lsn= LSN_MAX, minimum_first_undo_lsn= LSN_MAX;
|
|
DBUG_ENTER("trnman_collect_transactions");
|
|
|
|
DBUG_ASSERT((NULL == str_act->str) && (NULL == str_com->str));
|
|
|
|
/* validate the use of read_non_atomic() in general: */
|
|
compile_time_assert((sizeof(LSN) == 8) && (sizeof(LSN_WITH_FLAGS) == 8));
|
|
pthread_mutex_lock(&LOCK_trn_list);
|
|
str_act->length= 2 + /* number of active transactions */
|
|
LSN_STORE_SIZE + /* minimum of their rec_lsn */
|
|
TRANSID_SIZE + /* current TrID generator value */
|
|
(2 + /* short id */
|
|
6 + /* long id */
|
|
LSN_STORE_SIZE + /* undo_lsn */
|
|
#ifdef MARIA_VERSIONING /* not enabled yet */
|
|
LSN_STORE_SIZE + /* undo_purge_lsn */
|
|
#endif
|
|
LSN_STORE_SIZE /* first_undo_lsn */
|
|
) * trnman_active_transactions;
|
|
str_com->length= 4 + /* number of committed transactions */
|
|
(6 + /* long id */
|
|
#ifdef MARIA_VERSIONING /* not enabled yet */
|
|
LSN_STORE_SIZE + /* undo_purge_lsn */
|
|
#endif
|
|
LSN_STORE_SIZE /* first_undo_lsn */
|
|
) * trnman_committed_transactions;
|
|
if ((NULL == (str_act->str= my_malloc(str_act->length, MYF(MY_WME)))) ||
|
|
(NULL == (str_com->str= my_malloc(str_com->length, MYF(MY_WME)))))
|
|
goto err;
|
|
/* First, the active transactions */
|
|
ptr= str_act->str + 2 + LSN_STORE_SIZE;
|
|
transid_store(ptr, global_trid_generator);
|
|
ptr+= TRANSID_SIZE;
|
|
for (trn= active_list_min.next; trn != &active_list_max; trn= trn->next)
|
|
{
|
|
/*
|
|
trns with a short trid of 0 are not even initialized, we can ignore
|
|
them. trns with undo_lsn==0 have done no writes, we can ignore them
|
|
too. XID not needed now.
|
|
*/
|
|
uint sid;
|
|
LSN rec_lsn, undo_lsn, first_undo_lsn;
|
|
if ((sid= trn->short_id) == 0)
|
|
{
|
|
/*
|
|
Not even inited, has done nothing. Or it is the
|
|
dummy_transaction_object, which does only non-transactional
|
|
immediate-sync operations (CREATE/DROP/RENAME/REPAIR TABLE), and so
|
|
can be forgotten for Checkpoint.
|
|
*/
|
|
continue;
|
|
}
|
|
/* needed for low-water mark calculation */
|
|
if (((rec_lsn= lsn_read_non_atomic(trn->rec_lsn)) > 0) &&
|
|
(cmp_translog_addr(rec_lsn, minimum_rec_lsn) < 0))
|
|
minimum_rec_lsn= rec_lsn;
|
|
/*
|
|
trn may have logged REDOs but not yet UNDO, that's why we read rec_lsn
|
|
before deciding to ignore if undo_lsn==0.
|
|
*/
|
|
if ((undo_lsn= trn->undo_lsn) == 0) /* trn can be forgotten */
|
|
continue;
|
|
stored_transactions++;
|
|
int2store(ptr, sid);
|
|
ptr+= 2;
|
|
int6store(ptr, trn->trid);
|
|
ptr+= 6;
|
|
lsn_store(ptr, undo_lsn); /* needed for rollback */
|
|
ptr+= LSN_STORE_SIZE;
|
|
/* needed for low-water mark calculation */
|
|
if (((first_undo_lsn= lsn_read_non_atomic(trn->first_undo_lsn)) > 0) &&
|
|
(cmp_translog_addr(first_undo_lsn, minimum_first_undo_lsn) < 0))
|
|
minimum_first_undo_lsn= first_undo_lsn;
|
|
lsn_store(ptr, first_undo_lsn);
|
|
ptr+= LSN_STORE_SIZE;
|
|
#ifdef MARIA_VERSIONING /* not enabled yet */
|
|
/* to know where purging should start (last delete of this trn) */
|
|
lsn_store(ptr, trn->undo_purge_lsn);
|
|
ptr+= LSN_STORE_SIZE;
|
|
#endif
|
|
/**
|
|
@todo RECOVERY: add a comment explaining why we can dirtily read some
|
|
vars, inspired by the text of "assumption 8" in WL#3072
|
|
*/
|
|
}
|
|
str_act->length= ptr - str_act->str; /* as we maybe over-estimated */
|
|
ptr= str_act->str;
|
|
DBUG_PRINT("info",("collected %u active transactions",
|
|
(uint)stored_transactions));
|
|
int2store(ptr, stored_transactions);
|
|
ptr+= 2;
|
|
/* this LSN influences how REDOs for any page can be ignored by Recovery */
|
|
lsn_store(ptr, minimum_rec_lsn);
|
|
/* one day there will also be a list of prepared transactions */
|
|
/* do the same for committed ones */
|
|
ptr= str_com->str;
|
|
int4store(ptr, trnman_committed_transactions);
|
|
ptr+= 4;
|
|
DBUG_PRINT("info",("collected %u committed transactions",
|
|
(uint)trnman_committed_transactions));
|
|
for (trn= committed_list_min.next; trn != &committed_list_max;
|
|
trn= trn->next)
|
|
{
|
|
LSN first_undo_lsn;
|
|
int6store(ptr, trn->trid);
|
|
ptr+= 6;
|
|
#ifdef MARIA_VERSIONING /* not enabled yet */
|
|
lsn_store(ptr, trn->undo_purge_lsn);
|
|
ptr+= LSN_STORE_SIZE;
|
|
#endif
|
|
first_undo_lsn= LSN_WITH_FLAGS_TO_LSN(trn->first_undo_lsn);
|
|
if (cmp_translog_addr(first_undo_lsn, minimum_first_undo_lsn) < 0)
|
|
minimum_first_undo_lsn= first_undo_lsn;
|
|
lsn_store(ptr, first_undo_lsn);
|
|
ptr+= LSN_STORE_SIZE;
|
|
}
|
|
/*
|
|
TODO: if we see there exists no transaction (active and committed) we can
|
|
tell the lock-free structures to do some freeing (my_free()).
|
|
*/
|
|
error= 0;
|
|
*min_rec_lsn= minimum_rec_lsn;
|
|
*min_first_undo_lsn= minimum_first_undo_lsn;
|
|
goto end;
|
|
err:
|
|
error= 1;
|
|
end:
|
|
pthread_mutex_unlock(&LOCK_trn_list);
|
|
DBUG_RETURN(error);
|
|
}
|
|
|
|
|
|
TRN *trnman_recreate_trn_from_recovery(uint16 shortid, TrID longid)
|
|
{
|
|
TrID old_trid_generator= global_trid_generator;
|
|
TRN *trn;
|
|
DBUG_ASSERT(maria_in_recovery && !maria_multi_threaded);
|
|
if (unlikely((trn= trnman_new_trn(NULL, NULL, NULL)) == NULL))
|
|
return NULL;
|
|
/* deallocate excessive allocations of trnman_new_trn() */
|
|
global_trid_generator= old_trid_generator;
|
|
set_if_bigger(global_trid_generator, longid);
|
|
short_trid_to_active_trn[trn->short_id]= 0;
|
|
DBUG_ASSERT(short_trid_to_active_trn[shortid] == NULL);
|
|
short_trid_to_active_trn[shortid]= trn;
|
|
trn->trid= longid;
|
|
trn->short_id= shortid;
|
|
return trn;
|
|
}
|
|
|
|
|
|
TRN *trnman_get_any_trn()
|
|
{
|
|
TRN *trn= active_list_min.next;
|
|
return (trn != &active_list_max) ? trn : NULL;
|
|
}
|
|
|
|
|
|
/**
|
|
Returns the minimum existing transaction id.
|
|
*/
|
|
|
|
TrID trnman_get_min_trid()
|
|
{
|
|
TrID min_read_from;
|
|
if (short_trid_to_active_trn == NULL)
|
|
{
|
|
/* Transaction manager not initialize; Probably called from maria_chk */
|
|
return ~(TrID) 0;
|
|
}
|
|
|
|
pthread_mutex_lock(&LOCK_trn_list);
|
|
min_read_from= active_list_min.next->min_read_from;
|
|
pthread_mutex_unlock(&LOCK_trn_list);
|
|
return min_read_from;
|
|
}
|
|
|
|
|
|
/**
|
|
Returns maximum transaction id given to a transaction so far.
|
|
*/
|
|
|
|
TrID trnman_get_max_trid()
|
|
{
|
|
TrID id;
|
|
if (short_trid_to_active_trn == NULL)
|
|
return 0;
|
|
pthread_mutex_lock(&LOCK_trn_list);
|
|
id= global_trid_generator;
|
|
pthread_mutex_unlock(&LOCK_trn_list);
|
|
return id;
|
|
}
|
|
|
|
/**
|
|
Check if there exist an active transaction between two commit_id's
|
|
|
|
@todo
|
|
Improve speed of this.
|
|
- Store transactions in tree or skip list
|
|
- Have function to copying all active transaction id's to b-tree
|
|
and use b-tree for checking states. This could be a big win
|
|
for checkpoint that will call this function for a lot of objects.
|
|
|
|
@return
|
|
0 No transaction exists
|
|
1 There is at least on active transaction in the given range
|
|
*/
|
|
|
|
my_bool trnman_exists_active_transactions(TrID min_id, TrID max_id,
|
|
my_bool trnman_is_locked)
|
|
{
|
|
TRN *trn;
|
|
my_bool ret= 0;
|
|
|
|
if (!trnman_is_locked)
|
|
pthread_mutex_lock(&LOCK_trn_list);
|
|
for (trn= active_list_min.next; trn != &active_list_max; trn= trn->next)
|
|
{
|
|
if (trn->trid > min_id && trn->trid < max_id)
|
|
{
|
|
ret= 1;
|
|
break;
|
|
}
|
|
}
|
|
if (!trnman_is_locked)
|
|
pthread_mutex_unlock(&LOCK_trn_list);
|
|
return ret;
|
|
}
|