mirror of
https://github.com/MariaDB/server.git
synced 2026-05-14 19:07:15 +02:00
Creation of mysql-trunk = {summit + "Innodb plugin replacing the builtin"}:
bzr branch mysql-5.1-performance-version mysql-trunk # Summit cd mysql-trunk bzr merge mysql-5.1-innodb_plugin # which is 5.1 + Innodb plugin bzr rm innobase # remove the builtin Next step: build, test fixes.
This commit is contained in:
commit
b57e4dbd88
1102 changed files with 135661 additions and 71407 deletions
|
|
@ -1,248 +0,0 @@
|
|||
/******************************************************
|
||||
Mutex, the basic synchronization primitive
|
||||
|
||||
(c) 1995 Innobase Oy
|
||||
|
||||
Created 9/5/1995 Heikki Tuuri
|
||||
*******************************************************/
|
||||
|
||||
/**********************************************************************
|
||||
Sets the waiters field in a mutex. */
|
||||
|
||||
void
|
||||
mutex_set_waiters(
|
||||
/*==============*/
|
||||
mutex_t* mutex, /* in: mutex */
|
||||
ulint n); /* in: value to set */
|
||||
/**********************************************************************
|
||||
Reserves a mutex for the current thread. If the mutex is reserved, the
|
||||
function spins a preset time (controlled by SYNC_SPIN_ROUNDS) waiting
|
||||
for the mutex before suspending the thread. */
|
||||
|
||||
void
|
||||
mutex_spin_wait(
|
||||
/*============*/
|
||||
mutex_t* mutex, /* in: pointer to mutex */
|
||||
const char* file_name, /* in: file name where mutex
|
||||
requested */
|
||||
ulint line); /* in: line where requested */
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
/**********************************************************************
|
||||
Sets the debug information for a reserved mutex. */
|
||||
|
||||
void
|
||||
mutex_set_debug_info(
|
||||
/*=================*/
|
||||
mutex_t* mutex, /* in: mutex */
|
||||
const char* file_name, /* in: file where requested */
|
||||
ulint line); /* in: line where requested */
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
/**********************************************************************
|
||||
Releases the threads waiting in the primary wait array for this mutex. */
|
||||
|
||||
void
|
||||
mutex_signal_object(
|
||||
/*================*/
|
||||
mutex_t* mutex); /* in: mutex */
|
||||
|
||||
/**********************************************************************
|
||||
Performs an atomic test-and-set instruction to the lock_word field of a
|
||||
mutex. */
|
||||
UNIV_INLINE
|
||||
byte
|
||||
mutex_test_and_set(
|
||||
/*===============*/
|
||||
/* out: the previous value of lock_word: 0 or
|
||||
1 */
|
||||
mutex_t* mutex) /* in: mutex */
|
||||
{
|
||||
#if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER)
|
||||
byte res;
|
||||
byte* lw; /* assembler code is used to ensure that
|
||||
lock_word is loaded from memory */
|
||||
ut_ad(mutex);
|
||||
ut_ad(sizeof(byte) == 1);
|
||||
|
||||
lw = &(mutex->lock_word);
|
||||
|
||||
__asm MOV ECX, lw
|
||||
__asm MOV EDX, 1
|
||||
__asm XCHG DL, BYTE PTR [ECX]
|
||||
__asm MOV res, DL
|
||||
|
||||
/* The fence below would prevent this thread from
|
||||
reading the data structure protected by the mutex
|
||||
before the test-and-set operation is committed, but
|
||||
the fence is apparently not needed:
|
||||
|
||||
In a posting to comp.arch newsgroup (August 10, 1997)
|
||||
Andy Glew said that in P6 a LOCKed instruction like
|
||||
XCHG establishes a fence with respect to memory reads
|
||||
and writes and thus an explicit fence is not
|
||||
needed. In P5 he seemed to agree with a previous
|
||||
newsgroup poster that LOCKed instructions serialize
|
||||
all instruction execution, and, consequently, also
|
||||
memory operations. This is confirmed in Intel Software
|
||||
Dev. Manual, Vol. 3. */
|
||||
|
||||
/* mutex_fence(); */
|
||||
|
||||
return(res);
|
||||
#elif defined(MY_ATOMIC_NOLOCK)
|
||||
return ((byte)my_atomic_swap8(
|
||||
(int8 volatile *)&(mutex->lock_word), 1));
|
||||
#else
|
||||
ibool ret;
|
||||
|
||||
ret = os_fast_mutex_trylock(&(mutex->os_fast_mutex));
|
||||
|
||||
if (ret == 0) {
|
||||
/* We check that os_fast_mutex_trylock does not leak
|
||||
and allow race conditions */
|
||||
ut_a(mutex->lock_word == 0);
|
||||
|
||||
mutex->lock_word = 1;
|
||||
}
|
||||
|
||||
return((byte)ret);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
Performs a reset instruction to the lock_word field of a mutex. This
|
||||
instruction also serializes memory operations to the program order. */
|
||||
UNIV_INLINE
|
||||
void
|
||||
mutex_reset_lock_word(
|
||||
/*==================*/
|
||||
mutex_t* mutex) /* in: mutex */
|
||||
{
|
||||
#if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER)
|
||||
byte* lw; /* assembler code is used to ensure that
|
||||
lock_word is loaded from memory */
|
||||
ut_ad(mutex);
|
||||
|
||||
lw = &(mutex->lock_word);
|
||||
|
||||
__asm MOV EDX, 0
|
||||
__asm MOV ECX, lw
|
||||
__asm XCHG DL, BYTE PTR [ECX]
|
||||
#elif defined(MY_ATOMIC_NOLOCK)
|
||||
/* In theory __sync_lock_release should be used to release the lock.
|
||||
Unfortunately, it does not work properly alone. The workaround is
|
||||
that more conservative __sync_lock_test_and_set is used instead. */
|
||||
(void)my_atomic_swap8((int8 volatile *)&(mutex->lock_word), 0);
|
||||
#else
|
||||
mutex->lock_word = 0;
|
||||
|
||||
os_fast_mutex_unlock(&(mutex->os_fast_mutex));
|
||||
#endif
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
Gets the value of the lock word. */
|
||||
UNIV_INLINE
|
||||
byte
|
||||
mutex_get_lock_word(
|
||||
/*================*/
|
||||
const mutex_t* mutex) /* in: mutex */
|
||||
{
|
||||
const volatile byte* ptr; /* declared volatile to ensure that
|
||||
lock_word is loaded from memory */
|
||||
ut_ad(mutex);
|
||||
|
||||
ptr = &(mutex->lock_word);
|
||||
|
||||
return(*ptr);
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
Gets the waiters field in a mutex. */
|
||||
UNIV_INLINE
|
||||
ulint
|
||||
mutex_get_waiters(
|
||||
/*==============*/
|
||||
/* out: value to set */
|
||||
const mutex_t* mutex) /* in: mutex */
|
||||
{
|
||||
const volatile ulint* ptr; /* declared volatile to ensure that
|
||||
the value is read from memory */
|
||||
ut_ad(mutex);
|
||||
|
||||
ptr = &(mutex->waiters);
|
||||
|
||||
return(*ptr); /* Here we assume that the read of a single
|
||||
word from memory is atomic */
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
Unlocks a mutex owned by the current thread. */
|
||||
UNIV_INLINE
|
||||
void
|
||||
mutex_exit(
|
||||
/*=======*/
|
||||
mutex_t* mutex) /* in: pointer to mutex */
|
||||
{
|
||||
ut_ad(mutex_own(mutex));
|
||||
|
||||
ut_d(mutex->thread_id = (os_thread_id_t) ULINT_UNDEFINED);
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
sync_thread_reset_level(mutex);
|
||||
#endif
|
||||
mutex_reset_lock_word(mutex);
|
||||
|
||||
/* A problem: we assume that mutex_reset_lock word
|
||||
is a memory barrier, that is when we read the waiters
|
||||
field next, the read must be serialized in memory
|
||||
after the reset. A speculative processor might
|
||||
perform the read first, which could leave a waiting
|
||||
thread hanging indefinitely.
|
||||
|
||||
Our current solution call every second
|
||||
sync_arr_wake_threads_if_sema_free()
|
||||
to wake up possible hanging threads if
|
||||
they are missed in mutex_signal_object. */
|
||||
|
||||
if (mutex_get_waiters(mutex) != 0) {
|
||||
|
||||
mutex_signal_object(mutex);
|
||||
}
|
||||
|
||||
#ifdef UNIV_SYNC_PERF_STAT
|
||||
mutex_exit_count++;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
Locks a mutex for the current thread. If the mutex is reserved, the function
|
||||
spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting for the mutex
|
||||
before suspending the thread. */
|
||||
UNIV_INLINE
|
||||
void
|
||||
mutex_enter_func(
|
||||
/*=============*/
|
||||
mutex_t* mutex, /* in: pointer to mutex */
|
||||
const char* file_name, /* in: file name where locked */
|
||||
ulint line) /* in: line where locked */
|
||||
{
|
||||
ut_ad(mutex_validate(mutex));
|
||||
ut_ad(!mutex_own(mutex));
|
||||
|
||||
/* Note that we do not peek at the value of lock_word before trying
|
||||
the atomic test_and_set; we could peek, and possibly save time. */
|
||||
|
||||
#if defined UNIV_DEBUG && !defined UNIV_HOTBACKUP
|
||||
mutex->count_using++;
|
||||
#endif /* UNIV_DEBUG && !UNIV_HOTBACKUP */
|
||||
|
||||
if (!mutex_test_and_set(mutex)) {
|
||||
ut_d(mutex->thread_id = os_thread_get_curr_id());
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
mutex_set_debug_info(mutex, file_name, line);
|
||||
#endif
|
||||
return; /* Succeeded! */
|
||||
}
|
||||
|
||||
mutex_spin_wait(mutex, file_name, line);
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue