mirror of
https://github.com/MariaDB/server.git
synced 2026-05-15 19:37:16 +02:00
Applied InnoDB snapshot innodb-5.1-ss2298
Fixes the following bugs: - Bug #33349: possible race condition revolving around data dictionary and repartitioning Introduce retry/sleep logic as a workaround for a transient bug where ::open fails for partitioned tables randomly if we are using one file per table. - Bug #34053: normal users can enable innodb_monitor logging In CREATE TABLE and DROP TABLE check whether the table in question is one of the magic innodb_monitor tables and whether the user has enough rights to mess with it before doing anything else. - Bug #22868: 'Thread thrashing' with > 50 concurrent conns under an upd-intensive workloadw - Bug #29560: InnoDB >= 5.0.30 hangs on adaptive hash rw-lock 'waiting for an X-lock' This is a combination of changes that forward port the scalability fix applied to 5.0 through r1001. It reverts changes r149 and r122 (these were 5.1 specific changes made in lieu of scalability fix of 5.0) Then it applies r1001 to 5.0 which is the original scalability fix. Finally it applies r2082 which fixes an issue with the original fix. - Bug #30930: Add auxiliary function to retrieve THD::thread_id Add thd_get_thread_id() function. Also make check_global_access() function visible to InnoDB under INNODB_COMPATIBILITY_HOOKS #define.
This commit is contained in:
parent
a6d50c102c
commit
b8b6c7fcf7
24 changed files with 629 additions and 332 deletions
|
|
@ -112,9 +112,13 @@ os_event_set(
|
|||
os_event_t event); /* in: event to set */
|
||||
/**************************************************************
|
||||
Resets an event semaphore to the nonsignaled state. Waiting threads will
|
||||
stop to wait for the event. */
|
||||
stop to wait for the event.
|
||||
The return value should be passed to os_even_wait_low() if it is desired
|
||||
that this thread should not wait in case of an intervening call to
|
||||
os_event_set() between this os_event_reset() and the
|
||||
os_event_wait_low() call. See comments for os_event_wait_low(). */
|
||||
|
||||
void
|
||||
ib_longlong
|
||||
os_event_reset(
|
||||
/*===========*/
|
||||
os_event_t event); /* in: event to reset */
|
||||
|
|
@ -125,16 +129,38 @@ void
|
|||
os_event_free(
|
||||
/*==========*/
|
||||
os_event_t event); /* in: event to free */
|
||||
|
||||
/**************************************************************
|
||||
Waits for an event object until it is in the signaled state. If
|
||||
srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS this also exits the
|
||||
waiting thread when the event becomes signaled (or immediately if the
|
||||
event is already in the signaled state). */
|
||||
event is already in the signaled state).
|
||||
|
||||
Typically, if the event has been signalled after the os_event_reset()
|
||||
we'll return immediately because event->is_set == TRUE.
|
||||
There are, however, situations (e.g.: sync_array code) where we may
|
||||
lose this information. For example:
|
||||
|
||||
thread A calls os_event_reset()
|
||||
thread B calls os_event_set() [event->is_set == TRUE]
|
||||
thread C calls os_event_reset() [event->is_set == FALSE]
|
||||
thread A calls os_event_wait() [infinite wait!]
|
||||
thread C calls os_event_wait() [infinite wait!]
|
||||
|
||||
Where such a scenario is possible, to avoid infinite wait, the
|
||||
value returned by os_event_reset() should be passed in as
|
||||
reset_sig_count. */
|
||||
|
||||
#define os_event_wait(event) os_event_wait_low((event), 0)
|
||||
|
||||
void
|
||||
os_event_wait(
|
||||
/*==========*/
|
||||
os_event_t event); /* in: event to wait */
|
||||
os_event_wait_low(
|
||||
/*==============*/
|
||||
os_event_t event, /* in: event to wait */
|
||||
ib_longlong reset_sig_count);/* in: zero or the value
|
||||
returned by previous call of
|
||||
os_event_reset(). */
|
||||
|
||||
/**************************************************************
|
||||
Waits for an event object until it is in the signaled state or
|
||||
a timeout is exceeded. In Unix the timeout is always infinite. */
|
||||
|
|
|
|||
|
|
@ -111,10 +111,6 @@ struct read_view_struct{
|
|||
dulint undo_no; /* (0, 0) or if type is VIEW_HIGH_GRANULARITY
|
||||
transaction undo_no when this high-granularity
|
||||
consistent read view was created */
|
||||
ibool can_be_too_old; /* TRUE if the system has had to purge old
|
||||
versions which this read view should be able
|
||||
to access: the read view can bump into the
|
||||
DB_MISSING_HISTORY error */
|
||||
dulint low_limit_no; /* The view does not need to see the undo
|
||||
logs for transactions whose transaction number
|
||||
is strictly smaller (<) than this value: they
|
||||
|
|
|
|||
|
|
@ -319,7 +319,7 @@ row_mysql_unfreeze_data_dictionary(
|
|||
/*===============================*/
|
||||
trx_t* trx); /* in: transaction */
|
||||
/*************************************************************************
|
||||
Drops a table for MySQL. If the name of the table ends in
|
||||
Creates a table for MySQL. If the name of the table ends in
|
||||
one of "innodb_monitor", "innodb_lock_monitor", "innodb_tablespace_monitor",
|
||||
"innodb_table_monitor", then this will also start the printing of monitor
|
||||
output by the master thread. If the table name ends in "innodb_mem_validate",
|
||||
|
|
@ -464,6 +464,16 @@ row_check_table_for_mysql(
|
|||
row_prebuilt_t* prebuilt); /* in: prebuilt struct in MySQL
|
||||
handle */
|
||||
|
||||
/*************************************************************************
|
||||
Determines if a table is a magic monitor table. */
|
||||
|
||||
ibool
|
||||
row_is_magic_monitor_table(
|
||||
/*=======================*/
|
||||
/* out: TRUE if monitor table */
|
||||
const char* table_name); /* in: name of the table, in the
|
||||
form database/table_name */
|
||||
|
||||
/* A struct describing a place for an individual column in the MySQL
|
||||
row format which is presented to the table handler in ha_innobase.
|
||||
This template struct is used to speed up row transformations between
|
||||
|
|
|
|||
|
|
@ -66,26 +66,21 @@ sync_array_wait_event(
|
|||
sync_array_t* arr, /* in: wait array */
|
||||
ulint index); /* in: index of the reserved cell */
|
||||
/**********************************************************************
|
||||
Frees the cell safely by reserving the sync array mutex and decrementing
|
||||
n_reserved if necessary. Should only be called from mutex_spin_wait. */
|
||||
Frees the cell. NOTE! sync_array_wait_event frees the cell
|
||||
automatically! */
|
||||
|
||||
void
|
||||
sync_array_free_cell_protected(
|
||||
/*===========================*/
|
||||
sync_array_free_cell(
|
||||
/*=================*/
|
||||
sync_array_t* arr, /* in: wait array */
|
||||
ulint index); /* in: index of the cell in array */
|
||||
/**************************************************************************
|
||||
Looks for the cells in the wait array which refer
|
||||
to the wait object specified,
|
||||
and sets their corresponding events to the signaled state. In this
|
||||
way releases the threads waiting for the object to contend for the object.
|
||||
It is possible that no such cell is found, in which case does nothing. */
|
||||
Note that one of the wait objects was signalled. */
|
||||
|
||||
void
|
||||
sync_array_signal_object(
|
||||
/*=====================*/
|
||||
sync_array_t* arr, /* in: wait array */
|
||||
void* object);/* in: wait object */
|
||||
sync_array_object_signalled(
|
||||
/*========================*/
|
||||
sync_array_t* arr); /* in: wait array */
|
||||
/**************************************************************************
|
||||
If the wakeup algorithm does not work perfectly at semaphore relases,
|
||||
this function will do the waking (see the comment in mutex_exit). This
|
||||
|
|
|
|||
|
|
@ -421,6 +421,18 @@ blocked by readers, a writer may queue for the lock by setting the writer
|
|||
field. Then no new readers are allowed in. */
|
||||
|
||||
struct rw_lock_struct {
|
||||
os_event_t event; /* Used by sync0arr.c for thread queueing */
|
||||
|
||||
#ifdef __WIN__
|
||||
os_event_t wait_ex_event; /* This windows specific event is
|
||||
used by the thread which has set the
|
||||
lock state to RW_LOCK_WAIT_EX. The
|
||||
rw_lock design guarantees that this
|
||||
thread will be the next one to proceed
|
||||
once the current the event gets
|
||||
signalled. See LEMMA 2 in sync0sync.c */
|
||||
#endif
|
||||
|
||||
ulint reader_count; /* Number of readers who have locked this
|
||||
lock in the shared mode */
|
||||
ulint writer; /* This field is set to RW_LOCK_EX if there
|
||||
|
|
|
|||
|
|
@ -381,7 +381,11 @@ rw_lock_s_unlock_func(
|
|||
mutex_exit(mutex);
|
||||
|
||||
if (UNIV_UNLIKELY(sg)) {
|
||||
sync_array_signal_object(sync_primary_wait_array, lock);
|
||||
#ifdef __WIN__
|
||||
os_event_set(lock->wait_ex_event);
|
||||
#endif
|
||||
os_event_set(lock->event);
|
||||
sync_array_object_signalled(sync_primary_wait_array);
|
||||
}
|
||||
|
||||
ut_ad(rw_lock_validate(lock));
|
||||
|
|
@ -461,7 +465,11 @@ rw_lock_x_unlock_func(
|
|||
mutex_exit(&(lock->mutex));
|
||||
|
||||
if (UNIV_UNLIKELY(sg)) {
|
||||
sync_array_signal_object(sync_primary_wait_array, lock);
|
||||
#ifdef __WIN__
|
||||
os_event_set(lock->wait_ex_event);
|
||||
#endif
|
||||
os_event_set(lock->event);
|
||||
sync_array_object_signalled(sync_primary_wait_array);
|
||||
}
|
||||
|
||||
ut_ad(rw_lock_validate(lock));
|
||||
|
|
|
|||
|
|
@ -470,6 +470,7 @@ Do not use its fields directly! The structure used in the spin lock
|
|||
implementation of a mutual exclusion semaphore. */
|
||||
|
||||
struct mutex_struct {
|
||||
os_event_t event; /* Used by sync0arr.c for the wait queue */
|
||||
ulint lock_word; /* This ulint is the target of the atomic
|
||||
test-and-set instruction in Win32 */
|
||||
#if !defined(_WIN32) || !defined(UNIV_CAN_USE_X86_ASSEMBLER)
|
||||
|
|
|
|||
|
|
@ -211,7 +211,7 @@ mutex_exit(
|
|||
perform the read first, which could leave a waiting
|
||||
thread hanging indefinitely.
|
||||
|
||||
Our current solution call every 10 seconds
|
||||
Our current solution call every second
|
||||
sync_arr_wake_threads_if_sema_free()
|
||||
to wake up possible hanging threads if
|
||||
they are missed in mutex_signal_object. */
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue