2012-02-17 23:27:15 +01:00
|
|
|
/* Copyright (C) 2012 Monty Program Ab
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; version 2 of the License.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software
|
Update FSF address
This commit is based on the work of Michal Schorm, rebased on the
earliest MariaDB version.
Th command line used to generate this diff was:
find ./ -type f \
-exec sed -i -e 's/Foundation, Inc., 59 Temple Place, Suite 330, Boston, /Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, /g' {} \; \
-exec sed -i -e 's/Foundation, Inc. 59 Temple Place.* Suite 330, Boston, /Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, /g' {} \; \
-exec sed -i -e 's/MA.*.....-1307.*USA/MA 02110-1335 USA/g' {} \; \
-exec sed -i -e 's/Foundation, Inc., 59 Temple/Foundation, Inc., 51 Franklin/g' {} \; \
-exec sed -i -e 's/Place, Suite 330, Boston, MA.*02111-1307.*USA/Street, Fifth Floor, Boston, MA 02110-1335 USA/g' {} \; \
-exec sed -i -e 's/MA.*.....-1307/MA 02110-1335/g' {} \;
2019-05-10 19:49:46 +02:00
|
|
|
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */
|
2012-02-17 23:27:15 +01:00
|
|
|
|
2019-05-26 13:25:12 +02:00
|
|
|
#if (defined HAVE_POOL_OF_THREADS) && !defined(EMBEDDED_LIBRARY)
|
|
|
|
|
|
|
|
#include "threadpool_generic.h"
|
2017-06-18 05:42:16 +02:00
|
|
|
#include "mariadb.h"
|
2011-12-08 19:17:49 +01:00
|
|
|
#include <violite.h>
|
|
|
|
#include <sql_priv.h>
|
|
|
|
#include <sql_class.h>
|
|
|
|
#include <my_pthread.h>
|
|
|
|
#include <scheduler.h>
|
|
|
|
#include <sql_connect.h>
|
|
|
|
#include <mysqld.h>
|
|
|
|
#include <debug_sync.h>
|
|
|
|
#include <time.h>
|
2012-01-15 11:17:45 +01:00
|
|
|
#include <sql_plist.h>
|
2011-12-08 19:17:49 +01:00
|
|
|
#include <threadpool.h>
|
2019-05-31 10:28:22 +02:00
|
|
|
#include <algorithm>
|
2011-12-08 19:17:49 +01:00
|
|
|
|
2019-05-26 13:25:12 +02:00
|
|
|
#ifdef HAVE_IOCP
|
|
|
|
#define OPTIONAL_IO_POLL_READ_PARAM this
|
|
|
|
#else
|
|
|
|
#define OPTIONAL_IO_POLL_READ_PARAM 0
|
|
|
|
#endif
|
2016-09-21 16:28:42 +02:00
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
static void io_poll_close(TP_file_handle fd)
|
2016-09-21 16:28:42 +02:00
|
|
|
{
|
|
|
|
#ifdef _WIN32
|
2017-11-20 21:39:59 +01:00
|
|
|
CloseHandle(fd);
|
2016-09-21 16:28:42 +02:00
|
|
|
#else
|
|
|
|
close(fd);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-02-16 16:59:04 +01:00
|
|
|
/** Maximum number of native events a listener can read in one go */
|
|
|
|
#define MAX_EVENTS 1024
|
|
|
|
|
|
|
|
/** Indicates that threadpool was initialized*/
|
|
|
|
static bool threadpool_started= false;
|
2011-12-08 19:17:49 +01:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
/*
|
|
|
|
Define PSI Keys for performance schema.
|
|
|
|
We have a mutex per group, worker threads, condition per worker thread,
|
|
|
|
and timer thread with its own mutex and condition.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
2013-06-22 14:02:03 +02:00
|
|
|
#ifdef HAVE_PSI_INTERFACE
|
2011-12-08 19:17:49 +01:00
|
|
|
static PSI_mutex_key key_group_mutex;
|
|
|
|
static PSI_mutex_key key_timer_mutex;
|
|
|
|
static PSI_mutex_info mutex_list[]=
|
|
|
|
{
|
|
|
|
{ &key_group_mutex, "group_mutex", 0},
|
|
|
|
{ &key_timer_mutex, "timer_mutex", PSI_FLAG_GLOBAL}
|
|
|
|
};
|
|
|
|
|
|
|
|
static PSI_cond_key key_worker_cond;
|
|
|
|
static PSI_cond_key key_timer_cond;
|
|
|
|
static PSI_cond_info cond_list[]=
|
|
|
|
{
|
|
|
|
{ &key_worker_cond, "worker_cond", 0},
|
|
|
|
{ &key_timer_cond, "timer_cond", PSI_FLAG_GLOBAL}
|
|
|
|
};
|
|
|
|
|
|
|
|
static PSI_thread_key key_worker_thread;
|
|
|
|
static PSI_thread_key key_timer_thread;
|
|
|
|
static PSI_thread_info thread_list[] =
|
|
|
|
{
|
|
|
|
{&key_worker_thread, "worker_thread", 0},
|
|
|
|
{&key_timer_thread, "timer_thread", PSI_FLAG_GLOBAL}
|
|
|
|
};
|
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
/* Macro to simplify performance schema registration */
|
2012-01-15 11:17:45 +01:00
|
|
|
#define PSI_register(X) \
|
2012-01-28 01:09:28 +01:00
|
|
|
if(PSI_server) PSI_server->register_ ## X("threadpool", X ## _list, array_elements(X ## _list))
|
2013-06-22 14:02:03 +02:00
|
|
|
#else
|
|
|
|
#define PSI_register(X) /* no-op */
|
|
|
|
#endif
|
2011-12-08 19:17:49 +01:00
|
|
|
|
2019-05-26 13:25:12 +02:00
|
|
|
thread_group_t *all_groups;
|
2011-12-19 13:28:30 +01:00
|
|
|
static uint group_count;
|
2020-03-26 23:13:41 +01:00
|
|
|
static Atomic_counter<uint32_t> shutdown_group_count;
|
2011-12-08 19:17:49 +01:00
|
|
|
|
2012-01-17 18:50:40 +01:00
|
|
|
/**
|
|
|
|
Used for printing "pool blocked" message, see
|
|
|
|
print_pool_blocked_message();
|
|
|
|
*/
|
2012-02-16 16:59:04 +01:00
|
|
|
static ulonglong pool_block_start;
|
2012-01-17 18:50:40 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
/* Global timer for all groups */
|
|
|
|
struct pool_timer_t
|
|
|
|
{
|
|
|
|
mysql_mutex_t mutex;
|
|
|
|
mysql_cond_t cond;
|
2012-01-15 11:17:45 +01:00
|
|
|
volatile uint64 current_microtime;
|
2020-03-26 23:08:53 +01:00
|
|
|
std::atomic<uint64_t> next_timeout_check;
|
2012-01-15 11:17:45 +01:00
|
|
|
int tick_interval;
|
2011-12-08 19:17:49 +01:00
|
|
|
bool shutdown;
|
2016-06-13 13:54:12 +02:00
|
|
|
pthread_t timer_thread_id;
|
2011-12-08 19:17:49 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static pool_timer_t pool_timer;
|
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
static void queue_put(thread_group_t *thread_group, TP_connection_generic *connection);
|
|
|
|
static void queue_put(thread_group_t *thread_group, native_event *ev, int cnt);
|
2019-05-26 13:25:12 +02:00
|
|
|
static int wake_thread(thread_group_t *thread_group,bool due_to_stall);
|
|
|
|
static int wake_or_create_thread(thread_group_t *thread_group, bool due_to_stall=false);
|
|
|
|
static int create_worker(thread_group_t *thread_group, bool due_to_stall);
|
2011-12-08 19:17:49 +01:00
|
|
|
static void *worker_main(void *param);
|
|
|
|
static void check_stall(thread_group_t *thread_group);
|
|
|
|
static void set_next_timeout_check(ulonglong abstime);
|
2012-01-17 18:50:40 +01:00
|
|
|
static void print_pool_blocked_message(bool);
|
2011-12-08 19:17:49 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
Asynchronous network IO.
|
2020-05-29 12:21:27 +02:00
|
|
|
|
|
|
|
We use native edge-triggered network IO multiplexing facility.
|
2011-12-08 19:17:49 +01:00
|
|
|
This maps to different APIs on different Unixes.
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
Supported are currently Linux with epoll, Solaris with event ports,
|
2016-09-21 16:28:42 +02:00
|
|
|
OSX and BSD with kevent, Windows with IOCP. All those API's are used with one-shot flags
|
2020-05-29 12:21:27 +02:00
|
|
|
(the event is signalled once client has written something into the socket,
|
2011-12-08 19:17:49 +01:00
|
|
|
then socket is removed from the "poll-set" until the command is finished,
|
|
|
|
and we need to re-arm/re-register socket)
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
No implementation for poll/select is currently provided.
|
2020-05-29 12:21:27 +02:00
|
|
|
|
|
|
|
The API closely resembles all of the above mentioned platform APIs
|
|
|
|
and consists of following functions.
|
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
- io_poll_create()
|
2020-05-29 12:21:27 +02:00
|
|
|
Creates an io_poll descriptor
|
2011-12-08 19:17:49 +01:00
|
|
|
On Linux: epoll_create()
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
- io_poll_associate_fd(int poll_fd, TP_file_handle fd, void *data, void *opt)
|
2020-05-29 12:21:27 +02:00
|
|
|
Associate file descriptor with io poll descriptor
|
2011-12-08 19:17:49 +01:00
|
|
|
On Linux : epoll_ctl(..EPOLL_CTL_ADD))
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
- io_poll_disassociate_fd(TP_file_handle pollfd, TP_file_handle fd)
|
2020-05-29 12:21:27 +02:00
|
|
|
Associate file descriptor with io poll descriptor
|
2011-12-08 19:17:49 +01:00
|
|
|
On Linux: epoll_ctl(..EPOLL_CTL_DEL)
|
2020-05-29 12:21:27 +02:00
|
|
|
|
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
- io_poll_start_read(int poll_fd,int fd, void *data, void *opt)
|
2020-05-29 12:21:27 +02:00
|
|
|
The same as io_poll_associate_fd(), but cannot be used before
|
2011-12-08 19:17:49 +01:00
|
|
|
io_poll_associate_fd() was called.
|
|
|
|
On Linux : epoll_ctl(..EPOLL_CTL_MOD)
|
2020-05-29 12:21:27 +02:00
|
|
|
|
|
|
|
- io_poll_wait (TP_file_handle pollfd, native_event *native_events, int maxevents,
|
2011-12-08 19:17:49 +01:00
|
|
|
int timeout_ms)
|
2020-05-29 12:21:27 +02:00
|
|
|
|
|
|
|
wait until one or more descriptors added with io_poll_associate_fd()
|
|
|
|
or io_poll_start_read() becomes readable. Data associated with
|
|
|
|
descriptors can be retrieved from native_events array, using
|
2011-12-08 19:17:49 +01:00
|
|
|
native_event_get_userdata() function.
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
On Linux: epoll_wait()
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if defined (__linux__)
|
2011-12-26 01:08:46 +01:00
|
|
|
#ifndef EPOLLRDHUP
|
|
|
|
/* Early 2.6 kernel did not have EPOLLRDHUP */
|
|
|
|
#define EPOLLRDHUP 0
|
|
|
|
#endif
|
2017-11-20 21:39:59 +01:00
|
|
|
static TP_file_handle io_poll_create()
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
return epoll_create(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
int io_poll_associate_fd(TP_file_handle pollfd, TP_file_handle fd, void *data, void*)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
struct epoll_event ev;
|
2011-12-29 12:17:30 +01:00
|
|
|
ev.data.u64= 0; /* Keep valgrind happy */
|
2011-12-08 19:17:49 +01:00
|
|
|
ev.data.ptr= data;
|
|
|
|
ev.events= EPOLLIN|EPOLLET|EPOLLERR|EPOLLRDHUP|EPOLLONESHOT;
|
|
|
|
return epoll_ctl(pollfd, EPOLL_CTL_ADD, fd, &ev);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
int io_poll_start_read(TP_file_handle pollfd, TP_file_handle fd, void *data, void *)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
struct epoll_event ev;
|
2011-12-29 12:17:30 +01:00
|
|
|
ev.data.u64= 0; /* Keep valgrind happy */
|
2011-12-08 19:17:49 +01:00
|
|
|
ev.data.ptr= data;
|
|
|
|
ev.events= EPOLLIN|EPOLLET|EPOLLERR|EPOLLRDHUP|EPOLLONESHOT;
|
2020-05-29 12:21:27 +02:00
|
|
|
return epoll_ctl(pollfd, EPOLL_CTL_MOD, fd, &ev);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
int io_poll_disassociate_fd(TP_file_handle pollfd, TP_file_handle fd)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
struct epoll_event ev;
|
2011-12-19 13:28:30 +01:00
|
|
|
return epoll_ctl(pollfd, EPOLL_CTL_DEL, fd, &ev);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-26 04:35:54 +01:00
|
|
|
/*
|
|
|
|
Wrapper around epoll_wait.
|
|
|
|
NOTE - in case of EINTR, it restarts with original timeout. Since we use
|
|
|
|
either infinite or 0 timeouts, this is not critical
|
|
|
|
*/
|
2020-05-29 12:21:27 +02:00
|
|
|
int io_poll_wait(TP_file_handle pollfd, native_event *native_events, int maxevents,
|
2011-12-08 19:17:49 +01:00
|
|
|
int timeout_ms)
|
|
|
|
{
|
|
|
|
int ret;
|
2020-05-29 12:21:27 +02:00
|
|
|
do
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
ret = epoll_wait(pollfd, native_events, maxevents, timeout_ms);
|
|
|
|
}
|
|
|
|
while(ret == -1 && errno == EINTR);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
static void *native_event_get_userdata(native_event *event)
|
|
|
|
{
|
|
|
|
return event->data.ptr;
|
|
|
|
}
|
|
|
|
|
2013-02-19 23:46:52 +01:00
|
|
|
#elif defined(HAVE_KQUEUE)
|
2013-05-27 16:35:42 +02:00
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
/*
|
2013-05-27 16:35:42 +02:00
|
|
|
NetBSD is incompatible with other BSDs , last parameter in EV_SET macro
|
2020-05-29 12:21:27 +02:00
|
|
|
(udata, user data) needs to be intptr_t, whereas it needs to be void*
|
2013-05-27 16:35:42 +02:00
|
|
|
everywhere else.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef __NetBSD__
|
|
|
|
#define MY_EV_SET(a, b, c, d, e, f, g) EV_SET(a, b, c, d, e, f, (intptr_t)g)
|
|
|
|
#else
|
|
|
|
#define MY_EV_SET(a, b, c, d, e, f, g) EV_SET(a, b, c, d, e, f, g)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
TP_file_handle io_poll_create()
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
return kqueue();
|
|
|
|
}
|
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
int io_poll_start_read(TP_file_handle pollfd, TP_file_handle fd, void *data,void *)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
struct kevent ke;
|
2020-05-29 12:21:27 +02:00
|
|
|
MY_EV_SET(&ke, fd, EVFILT_READ, EV_ADD|EV_ONESHOT,
|
2011-12-08 19:17:49 +01:00
|
|
|
0, 0, data);
|
2020-05-29 12:21:27 +02:00
|
|
|
return kevent(pollfd, &ke, 1, 0, 0, 0);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
int io_poll_associate_fd(TP_file_handle pollfd, TP_file_handle fd, void *data,void *)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2012-04-12 01:40:44 +02:00
|
|
|
struct kevent ke;
|
2020-05-29 12:21:27 +02:00
|
|
|
MY_EV_SET(&ke, fd, EVFILT_READ, EV_ADD|EV_ONESHOT,
|
2012-04-12 01:40:44 +02:00
|
|
|
0, 0, data);
|
2020-05-29 12:21:27 +02:00
|
|
|
return io_poll_start_read(pollfd,fd, data, 0);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
int io_poll_disassociate_fd(TP_file_handle pollfd, TP_file_handle fd)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
struct kevent ke;
|
2013-05-27 16:35:42 +02:00
|
|
|
MY_EV_SET(&ke,fd, EVFILT_READ, EV_DELETE, 0, 0, 0);
|
2011-12-19 13:28:30 +01:00
|
|
|
return kevent(pollfd, &ke, 1, 0, 0, 0);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
int io_poll_wait(TP_file_handle pollfd, struct kevent *events, int maxevents, int timeout_ms)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
struct timespec ts;
|
|
|
|
int ret;
|
|
|
|
if (timeout_ms >= 0)
|
|
|
|
{
|
|
|
|
ts.tv_sec= timeout_ms/1000;
|
|
|
|
ts.tv_nsec= (timeout_ms%1000)*1000000;
|
|
|
|
}
|
|
|
|
do
|
|
|
|
{
|
2020-05-29 12:21:27 +02:00
|
|
|
ret= kevent(pollfd, 0, 0, events, maxevents,
|
2011-12-08 19:17:49 +01:00
|
|
|
(timeout_ms >= 0)?&ts:NULL);
|
|
|
|
}
|
|
|
|
while (ret == -1 && errno == EINTR);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void* native_event_get_userdata(native_event *event)
|
|
|
|
{
|
2013-05-27 16:35:42 +02:00
|
|
|
return (void *)event->udata;
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
2012-01-27 19:52:53 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
#elif defined (__sun)
|
2012-01-27 19:52:53 +01:00
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
static TP_file_handle io_poll_create()
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
return port_create();
|
|
|
|
}
|
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
int io_poll_start_read(TP_file_handle pollfd, TP_file_handle fd, void *data, void *)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
return port_associate(pollfd, PORT_SOURCE_FD, fd, POLLIN, data);
|
|
|
|
}
|
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
static int io_poll_associate_fd(TP_file_handle pollfd, TP_file_handle fd, void *data, void *)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2016-09-21 16:28:42 +02:00
|
|
|
return io_poll_start_read(pollfd, fd, data, 0);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
int io_poll_disassociate_fd(TP_file_handle pollfd, TP_file_handle fd)
|
2011-12-19 13:28:30 +01:00
|
|
|
{
|
2012-01-16 02:18:24 +01:00
|
|
|
return port_dissociate(pollfd, PORT_SOURCE_FD, fd);
|
2011-12-19 13:28:30 +01:00
|
|
|
}
|
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
int io_poll_wait(TP_file_handle pollfd, native_event *events, int maxevents, int timeout_ms)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
struct timespec ts;
|
|
|
|
int ret;
|
|
|
|
uint_t nget= 1;
|
|
|
|
if (timeout_ms >= 0)
|
|
|
|
{
|
|
|
|
ts.tv_sec= timeout_ms/1000;
|
|
|
|
ts.tv_nsec= (timeout_ms%1000)*1000000;
|
|
|
|
}
|
|
|
|
do
|
|
|
|
{
|
|
|
|
ret= port_getn(pollfd, events, maxevents, &nget,
|
|
|
|
(timeout_ms >= 0)?&ts:NULL);
|
|
|
|
}
|
|
|
|
while (ret == -1 && errno == EINTR);
|
2012-01-26 04:35:54 +01:00
|
|
|
DBUG_ASSERT(nget < INT_MAX);
|
|
|
|
return (int)nget;
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
static void* native_event_get_userdata(native_event *event)
|
|
|
|
{
|
|
|
|
return event->portev_user;
|
|
|
|
}
|
2016-09-21 16:28:42 +02:00
|
|
|
|
|
|
|
#elif defined(HAVE_IOCP)
|
|
|
|
|
2017-09-28 12:38:02 +02:00
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
static TP_file_handle io_poll_create()
|
2016-09-21 16:28:42 +02:00
|
|
|
{
|
2017-11-20 21:39:59 +01:00
|
|
|
return CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0, 0);
|
2016-09-21 16:28:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
int io_poll_start_read(TP_file_handle pollfd, TP_file_handle fd, void *, void *opt)
|
2016-09-21 16:28:42 +02:00
|
|
|
{
|
|
|
|
static char c;
|
2017-11-20 21:39:59 +01:00
|
|
|
TP_connection_generic *con= (TP_connection_generic *)opt;
|
|
|
|
OVERLAPPED *overlapped= &con->overlapped;
|
|
|
|
if (con->vio_type == VIO_TYPE_NAMEDPIPE)
|
|
|
|
{
|
|
|
|
if (ReadFile(fd, &c, 0, NULL, overlapped))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
WSABUF buf;
|
|
|
|
buf.buf= &c;
|
|
|
|
buf.len= 0;
|
|
|
|
DWORD flags=0;
|
2016-09-21 16:28:42 +02:00
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
if (WSARecv((SOCKET)fd, &buf, 1,NULL, &flags,overlapped, NULL) == 0)
|
|
|
|
return 0;
|
|
|
|
}
|
2016-09-21 16:28:42 +02:00
|
|
|
|
|
|
|
if (GetLastError() == ERROR_IO_PENDING)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
static int io_poll_associate_fd(TP_file_handle pollfd, TP_file_handle fd, void *data, void *opt)
|
2016-09-21 16:28:42 +02:00
|
|
|
{
|
2017-11-20 21:39:59 +01:00
|
|
|
HANDLE h= CreateIoCompletionPort(fd, pollfd, (ULONG_PTR)data, 0);
|
2020-05-29 12:21:27 +02:00
|
|
|
if (!h)
|
2016-09-21 16:28:42 +02:00
|
|
|
return -1;
|
2020-05-29 12:21:27 +02:00
|
|
|
return io_poll_start_read(pollfd,fd, 0, opt);
|
2016-09-21 16:28:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-05-19 22:55:52 +02:00
|
|
|
typedef LONG NTSTATUS;
|
|
|
|
|
|
|
|
typedef struct _IO_STATUS_BLOCK {
|
|
|
|
union {
|
|
|
|
NTSTATUS Status;
|
|
|
|
PVOID Pointer;
|
|
|
|
};
|
|
|
|
ULONG_PTR Information;
|
|
|
|
} IO_STATUS_BLOCK, * PIO_STATUS_BLOCK;
|
|
|
|
|
|
|
|
struct FILE_COMPLETION_INFORMATION {
|
|
|
|
HANDLE Port;
|
|
|
|
PVOID Key;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum FILE_INFORMATION_CLASS {
|
|
|
|
FileReplaceCompletionInformation = 0x3D
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
typedef NTSTATUS(WINAPI* pNtSetInformationFile)(HANDLE, PIO_STATUS_BLOCK, PVOID, ULONG, FILE_INFORMATION_CLASS);
|
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
int io_poll_disassociate_fd(TP_file_handle pollfd, TP_file_handle fd)
|
2016-09-21 16:28:42 +02:00
|
|
|
{
|
2020-05-19 22:55:52 +02:00
|
|
|
static pNtSetInformationFile my_NtSetInformationFile = (pNtSetInformationFile)
|
|
|
|
GetProcAddress(GetModuleHandle("ntdll"), "NtSetInformationFile");
|
|
|
|
if (!my_NtSetInformationFile)
|
|
|
|
return -1; /* unexpected, we only support Windows 8.1+*/
|
|
|
|
IO_STATUS_BLOCK iosb{};
|
|
|
|
FILE_COMPLETION_INFORMATION fci{};
|
|
|
|
if (my_NtSetInformationFile(fd,&iosb,&fci,sizeof(fci),FileReplaceCompletionInformation))
|
|
|
|
return -1;
|
2016-09-21 16:28:42 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-11-20 21:39:59 +01:00
|
|
|
int io_poll_wait(TP_file_handle pollfd, native_event *events, int maxevents, int timeout_ms)
|
2016-09-21 16:28:42 +02:00
|
|
|
{
|
|
|
|
ULONG n;
|
2020-05-29 12:21:27 +02:00
|
|
|
BOOL ok = GetQueuedCompletionStatusEx(pollfd, events,
|
2016-09-21 16:28:42 +02:00
|
|
|
maxevents, &n, timeout_ms, FALSE);
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
return ok ? (int)n : -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void* native_event_get_userdata(native_event *event)
|
|
|
|
{
|
|
|
|
return (void *)event->lpCompletionKey;
|
|
|
|
}
|
2011-12-08 19:17:49 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
/* Dequeue element from a workqueue */
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
static TP_connection_generic *queue_get(thread_group_t *thread_group)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("queue_get");
|
|
|
|
thread_group->queue_event_count++;
|
2016-09-21 16:28:42 +02:00
|
|
|
TP_connection_generic *c;
|
|
|
|
for (int i=0; i < NQUEUES;i++)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2016-09-21 16:28:42 +02:00
|
|
|
c= thread_group->queues[i].pop_front();
|
|
|
|
if (c)
|
|
|
|
DBUG_RETURN(c);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
2020-05-29 12:21:27 +02:00
|
|
|
DBUG_RETURN(0);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
2020-03-28 01:46:53 +01:00
|
|
|
static TP_connection_generic* queue_get(thread_group_t* group, operation_origin origin)
|
|
|
|
{
|
|
|
|
auto ret = queue_get(group);
|
|
|
|
if (ret)
|
|
|
|
{
|
|
|
|
TP_INCREMENT_GROUP_COUNTER(group, dequeues[(int)origin]);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
static bool is_queue_empty(thread_group_t *thread_group)
|
|
|
|
{
|
|
|
|
for (int i=0; i < NQUEUES; i++)
|
|
|
|
{
|
|
|
|
if (!thread_group->queues[i].is_empty())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void queue_init(thread_group_t *thread_group)
|
|
|
|
{
|
|
|
|
for (int i=0; i < NQUEUES; i++)
|
|
|
|
{
|
|
|
|
thread_group->queues[i].empty();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void queue_put(thread_group_t *thread_group, native_event *ev, int cnt)
|
|
|
|
{
|
2019-05-26 13:25:12 +02:00
|
|
|
ulonglong now= threadpool_exact_stats?microsecond_interval_timer():pool_timer.current_microtime;
|
2016-09-21 16:28:42 +02:00
|
|
|
for(int i=0; i < cnt; i++)
|
|
|
|
{
|
|
|
|
TP_connection_generic *c = (TP_connection_generic *)native_event_get_userdata(&ev[i]);
|
2019-05-26 13:25:12 +02:00
|
|
|
c->enqueue_time= now;
|
2016-09-21 16:28:42 +02:00
|
|
|
thread_group->queues[c->priority].push_back(c);
|
|
|
|
}
|
|
|
|
}
|
2011-12-08 19:17:49 +01:00
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
/*
|
|
|
|
Handle wait timeout :
|
2011-12-08 19:17:49 +01:00
|
|
|
Find connections that have been idle for too long and kill them.
|
|
|
|
Also, recalculate time when next timeout check should run.
|
|
|
|
*/
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2019-01-19 23:32:35 +01:00
|
|
|
static my_bool timeout_check(THD *thd, pool_timer_t *timer)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("timeout_check");
|
2019-01-19 23:32:35 +01:00
|
|
|
if (thd->net.reading_or_writing == 1)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2019-01-19 23:32:35 +01:00
|
|
|
/*
|
|
|
|
Check if connection does not have scheduler data. This happens for example
|
|
|
|
if THD belongs to a different scheduler, that is listening to extra_port.
|
|
|
|
*/
|
|
|
|
if (auto connection= (TP_connection_generic *) thd->event_scheduler.data)
|
2012-01-15 11:17:45 +01:00
|
|
|
{
|
2019-01-19 23:32:35 +01:00
|
|
|
if (connection->abs_wait_timeout < timer->current_microtime)
|
|
|
|
tp_timeout_handler(connection);
|
|
|
|
else
|
|
|
|
set_next_timeout_check(connection->abs_wait_timeout);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
}
|
2019-01-19 23:32:35 +01:00
|
|
|
DBUG_RETURN(0);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
/*
|
|
|
|
Timer thread.
|
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
Periodically, check if one of the thread groups is stalled. Stalls happen if
|
2020-05-29 12:21:27 +02:00
|
|
|
events are not being dequeued from the queue, or from the network, Primary
|
|
|
|
reason for stall can be a lengthy executing non-blocking request. It could
|
|
|
|
also happen that thread is waiting but wait_begin/wait_end is forgotten by
|
|
|
|
storage engine. Timer thread will create a new thread in group in case of
|
2011-12-08 19:17:49 +01:00
|
|
|
a stall.
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
Besides checking for stalls, timer thread is also responsible for terminating
|
|
|
|
clients that have been idle for longer than wait_timeout seconds.
|
2012-02-16 16:59:04 +01:00
|
|
|
|
|
|
|
TODO: Let the timer sleep for long time if there is no work to be done.
|
|
|
|
Currently it wakes up rather often on and idle server.
|
2011-12-08 19:17:49 +01:00
|
|
|
*/
|
2012-01-15 11:17:45 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
static void* timer_thread(void *param)
|
|
|
|
{
|
|
|
|
uint i;
|
|
|
|
pool_timer_t* timer=(pool_timer_t *)param;
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
my_thread_init();
|
|
|
|
DBUG_ENTER("timer_thread");
|
2020-03-26 23:08:53 +01:00
|
|
|
timer->next_timeout_check.store(std::numeric_limits<uint64_t>::max(),
|
|
|
|
std::memory_order_relaxed);
|
2012-01-26 04:35:54 +01:00
|
|
|
timer->current_microtime= microsecond_interval_timer();
|
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
for(;;)
|
|
|
|
{
|
|
|
|
struct timespec ts;
|
2012-01-15 11:17:45 +01:00
|
|
|
int err;
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
set_timespec_nsec(ts,timer->tick_interval*1000000);
|
|
|
|
mysql_mutex_lock(&timer->mutex);
|
2012-01-15 11:17:45 +01:00
|
|
|
err= mysql_cond_timedwait(&timer->cond, &timer->mutex, &ts);
|
2011-12-08 19:17:49 +01:00
|
|
|
if (timer->shutdown)
|
2011-12-29 01:59:05 +01:00
|
|
|
{
|
|
|
|
mysql_mutex_unlock(&timer->mutex);
|
2011-12-08 19:17:49 +01:00
|
|
|
break;
|
2011-12-29 01:59:05 +01:00
|
|
|
}
|
2011-12-08 19:17:49 +01:00
|
|
|
if (err == ETIMEDOUT)
|
|
|
|
{
|
2011-12-10 19:35:44 +01:00
|
|
|
timer->current_microtime= microsecond_interval_timer();
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-02-16 16:59:04 +01:00
|
|
|
/* Check stalls in thread groups */
|
2013-11-05 06:18:59 +01:00
|
|
|
for (i= 0; i < threadpool_max_size; i++)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
if(all_groups[i].connection_count)
|
|
|
|
check_stall(&all_groups[i]);
|
|
|
|
}
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
/* Check if any client exceeded wait_timeout */
|
2020-03-26 23:08:53 +01:00
|
|
|
if (timer->next_timeout_check.load(std::memory_order_relaxed) <=
|
|
|
|
timer->current_microtime)
|
2019-01-19 23:32:35 +01:00
|
|
|
{
|
|
|
|
/* Reset next timeout check, it will be recalculated below */
|
2020-03-26 23:08:53 +01:00
|
|
|
timer->next_timeout_check.store(std::numeric_limits<uint64_t>::max(),
|
|
|
|
std::memory_order_relaxed);
|
2019-01-19 23:32:35 +01:00
|
|
|
server_threads.iterate(timeout_check, timer);
|
|
|
|
}
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
mysql_mutex_unlock(&timer->mutex);
|
|
|
|
}
|
2011-12-29 01:59:05 +01:00
|
|
|
|
|
|
|
mysql_mutex_destroy(&timer->mutex);
|
2011-12-08 19:17:49 +01:00
|
|
|
my_thread_end();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void check_stall(thread_group_t *thread_group)
|
|
|
|
{
|
2016-09-21 16:28:42 +02:00
|
|
|
mysql_mutex_lock(&thread_group->mutex);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Bump priority for the low priority connections that spent too much
|
|
|
|
time in low prio queue.
|
|
|
|
*/
|
|
|
|
TP_connection_generic *c;
|
|
|
|
for (;;)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2016-09-21 16:28:42 +02:00
|
|
|
c= thread_group->queues[TP_PRIORITY_LOW].front();
|
2019-05-26 13:25:12 +02:00
|
|
|
if (c && pool_timer.current_microtime - c->enqueue_time > 1000ULL * threadpool_prio_kickup_timer)
|
2016-09-21 16:28:42 +02:00
|
|
|
{
|
|
|
|
thread_group->queues[TP_PRIORITY_LOW].remove(c);
|
|
|
|
thread_group->queues[TP_PRIORITY_HIGH].push_back(c);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
break;
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-05-29 12:21:27 +02:00
|
|
|
Check if listener is present. If not, check whether any IO
|
|
|
|
events were dequeued since last time. If not, this means
|
|
|
|
listener is either in tight loop or thd_wait_begin()
|
2011-12-08 19:17:49 +01:00
|
|
|
was forgotten. Create a new worker(it will make itself listener).
|
|
|
|
*/
|
|
|
|
if (!thread_group->listener && !thread_group->io_event_count)
|
|
|
|
{
|
2019-05-26 13:25:12 +02:00
|
|
|
wake_or_create_thread(thread_group, true);
|
2011-12-08 19:17:49 +01:00
|
|
|
mysql_mutex_unlock(&thread_group->mutex);
|
|
|
|
return;
|
|
|
|
}
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
/* Reset io event count */
|
|
|
|
thread_group->io_event_count= 0;
|
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
/*
|
2011-12-08 19:17:49 +01:00
|
|
|
Check whether requests from the workqueue are being dequeued.
|
2012-02-17 23:27:15 +01:00
|
|
|
|
|
|
|
The stall detection and resolution works as follows:
|
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
1. There is a counter thread_group->queue_event_count for the number of
|
2012-02-17 23:27:15 +01:00
|
|
|
events removed from the queue. Timer resets the counter to 0 on each run.
|
|
|
|
2. Timer determines stall if this counter remains 0 since last check
|
|
|
|
and the queue is not empty.
|
|
|
|
3. Once timer determined a stall it sets thread_group->stalled flag and
|
|
|
|
wakes and idle worker (or creates a new one, subject to throttling).
|
|
|
|
4. The stalled flag is reset, when an event is dequeued.
|
|
|
|
|
|
|
|
Q : Will this handling lead to an unbound growth of threads, if queue
|
|
|
|
stalls permanently?
|
|
|
|
A : No. If queue stalls permanently, it is an indication for many very long
|
2020-05-29 12:21:27 +02:00
|
|
|
simultaneous queries. The maximum number of simultanoues queries is
|
2012-02-17 23:27:15 +01:00
|
|
|
max_connections, further we have threadpool_max_threads limit, upon which no
|
2020-05-29 12:21:27 +02:00
|
|
|
worker threads are created. So in case there is a flood of very long
|
2012-02-17 23:27:15 +01:00
|
|
|
queries, threadpool would slowly approach thread-per-connection behavior.
|
|
|
|
NOTE:
|
|
|
|
If long queries never wait, creation of the new threads is done by timer,
|
2020-05-29 12:21:27 +02:00
|
|
|
so it is slower than in real thread-per-connection. However if long queries
|
2012-02-17 23:27:15 +01:00
|
|
|
do wait and indicate that via thd_wait_begin/end callbacks, thread creation
|
|
|
|
will be faster.
|
2011-12-08 19:17:49 +01:00
|
|
|
*/
|
2016-09-21 16:28:42 +02:00
|
|
|
if (!is_queue_empty(thread_group) && !thread_group->queue_event_count)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
thread_group->stalled= true;
|
2019-05-26 13:25:12 +02:00
|
|
|
TP_INCREMENT_GROUP_COUNTER(thread_group,stalls);
|
|
|
|
wake_or_create_thread(thread_group,true);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
/* Reset queue event count */
|
|
|
|
thread_group->queue_event_count= 0;
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
mysql_mutex_unlock(&thread_group->mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void start_timer(pool_timer_t* timer)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("start_timer");
|
|
|
|
mysql_mutex_init(key_timer_mutex,&timer->mutex, NULL);
|
|
|
|
mysql_cond_init(key_timer_cond, &timer->cond, NULL);
|
|
|
|
timer->shutdown = false;
|
2016-06-13 13:54:12 +02:00
|
|
|
mysql_thread_create(key_timer_thread, &timer->timer_thread_id, NULL,
|
|
|
|
timer_thread, timer);
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
static void stop_timer(pool_timer_t *timer)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("stop_timer");
|
|
|
|
mysql_mutex_lock(&timer->mutex);
|
|
|
|
timer->shutdown = true;
|
|
|
|
mysql_cond_signal(&timer->cond);
|
|
|
|
mysql_mutex_unlock(&timer->mutex);
|
2016-06-13 13:54:12 +02:00
|
|
|
pthread_join(timer->timer_thread_id, NULL);
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
/**
|
|
|
|
Poll for socket events and distribute them to worker threads
|
2011-12-08 19:17:49 +01:00
|
|
|
In many case current thread will handle single event itself.
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
@return a ready connection, or NULL on shutdown
|
2011-12-08 19:17:49 +01:00
|
|
|
*/
|
2020-05-29 12:21:27 +02:00
|
|
|
static TP_connection_generic * listener(worker_thread_t *current_thread,
|
2011-12-08 19:17:49 +01:00
|
|
|
thread_group_t *thread_group)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("listener");
|
2016-09-21 16:28:42 +02:00
|
|
|
TP_connection_generic *retval= NULL;
|
2012-02-16 16:59:04 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
for(;;)
|
|
|
|
{
|
|
|
|
native_event ev[MAX_EVENTS];
|
|
|
|
int cnt;
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
if (thread_group->shutdown)
|
2012-01-15 11:17:45 +01:00
|
|
|
break;
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
cnt = io_poll_wait(thread_group->pollfd, ev, MAX_EVENTS, -1);
|
2020-03-28 01:46:53 +01:00
|
|
|
TP_INCREMENT_GROUP_COUNTER(thread_group, polls[(int)operation_origin::LISTENER]);
|
2011-12-08 19:17:49 +01:00
|
|
|
if (cnt <=0)
|
|
|
|
{
|
|
|
|
DBUG_ASSERT(thread_group->shutdown);
|
2012-01-15 11:17:45 +01:00
|
|
|
break;
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
mysql_mutex_lock(&thread_group->mutex);
|
|
|
|
|
|
|
|
if (thread_group->shutdown)
|
|
|
|
{
|
|
|
|
mysql_mutex_unlock(&thread_group->mutex);
|
2012-01-15 11:17:45 +01:00
|
|
|
break;
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
2020-05-29 12:21:27 +02:00
|
|
|
|
|
|
|
thread_group->io_event_count += cnt;
|
|
|
|
|
|
|
|
/*
|
2012-01-15 11:17:45 +01:00
|
|
|
We got some network events and need to make decisions : whether
|
|
|
|
listener hould handle events and whether or not any wake worker
|
|
|
|
threads so they can handle events.
|
2020-05-29 12:21:27 +02:00
|
|
|
|
|
|
|
Q1 : Should listener handle an event itself, or put all events into
|
2012-01-15 11:17:45 +01:00
|
|
|
queue and let workers handle the events?
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
Solution :
|
2020-05-29 12:21:27 +02:00
|
|
|
Generally, listener that handles events itself is preferable. We do not
|
|
|
|
want listener thread to change its state from waiting to running too
|
2012-01-15 11:17:45 +01:00
|
|
|
often, Since listener has just woken from poll, it better uses its time
|
|
|
|
slice and does some work. Besides, not handling events means they go to
|
|
|
|
the queue, and often to wake another worker must wake up to handle the
|
|
|
|
event. This is not good, as we want to avoid wakeups.
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
The downside of listener that also handles queries is that we can
|
2020-05-29 12:21:27 +02:00
|
|
|
potentially leave thread group for long time not picking the new
|
2012-01-15 11:17:45 +01:00
|
|
|
network events. It is not a major problem, because this stall will be
|
|
|
|
detected sooner or later by the timer thread. Still, relying on timer
|
|
|
|
is not always good, because it may "tick" too slow (large timer_interval)
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
We use following strategy to solve this problem - if queue was not empty
|
2020-05-29 12:21:27 +02:00
|
|
|
we suspect flood of network events and listener stays, Otherwise, it
|
2012-01-15 11:17:45 +01:00
|
|
|
handles a query.
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
Q2: If queue is not empty, how many workers to wake?
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
Solution:
|
2020-05-29 12:21:27 +02:00
|
|
|
We generally try to keep one thread per group active (threads handling
|
|
|
|
queries are considered active, unless they stuck in inside some "wait")
|
|
|
|
Thus, we will wake only one worker, and only if there is not active
|
|
|
|
threads currently,and listener is not going to handle a query. When we
|
|
|
|
don't wake, we hope that currently active threads will finish fast and
|
2012-01-15 11:17:45 +01:00
|
|
|
handle the queue. If this does not happen, timer thread will detect stall
|
|
|
|
and wake a worker.
|
2020-05-29 12:21:27 +02:00
|
|
|
|
|
|
|
NOTE: Currently nothing is done to detect or prevent long queuing times.
|
|
|
|
A solution for the future would be to give up "one active thread per
|
|
|
|
group" principle, if events stay in the queue for too long, and just wake
|
2012-01-17 18:50:40 +01:00
|
|
|
more workers.
|
2012-01-15 11:17:45 +01:00
|
|
|
*/
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2019-05-26 13:35:07 +02:00
|
|
|
bool listener_picks_event=is_queue_empty(thread_group) && !threadpool_dedicated_listener;
|
2016-09-21 16:28:42 +02:00
|
|
|
queue_put(thread_group, ev, cnt);
|
2012-01-26 04:35:54 +01:00
|
|
|
if (listener_picks_event)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2012-01-26 04:35:54 +01:00
|
|
|
/* Handle the first event. */
|
2020-03-28 01:46:53 +01:00
|
|
|
retval= queue_get(thread_group, operation_origin::LISTENER);
|
2012-01-26 04:35:54 +01:00
|
|
|
mysql_mutex_unlock(&thread_group->mutex);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(thread_group->active_thread_count==0)
|
|
|
|
{
|
|
|
|
/* We added some work items to queue, now wake a worker. */
|
2019-05-26 13:25:12 +02:00
|
|
|
if(wake_thread(thread_group, false))
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2020-05-29 12:21:27 +02:00
|
|
|
/*
|
2012-01-26 04:35:54 +01:00
|
|
|
Wake failed, hence groups has no idle threads. Now check if there are
|
|
|
|
any threads in the group except listener.
|
2020-05-29 12:21:27 +02:00
|
|
|
*/
|
2012-01-28 01:09:28 +01:00
|
|
|
if(thread_group->thread_count == 1)
|
2012-01-15 11:17:45 +01:00
|
|
|
{
|
2012-01-26 04:35:54 +01:00
|
|
|
/*
|
2012-01-15 11:17:45 +01:00
|
|
|
Currently there is no worker thread in the group, as indicated by
|
2020-05-29 12:21:27 +02:00
|
|
|
thread_count == 1 (this means listener is the only one thread in
|
2012-01-26 04:35:54 +01:00
|
|
|
the group).
|
|
|
|
The queue is not empty, and listener is not going to handle
|
|
|
|
events. In order to drain the queue, we create a worker here.
|
2020-05-29 12:21:27 +02:00
|
|
|
Alternatively, we could just rely on timer to detect stall, and
|
2012-01-26 04:35:54 +01:00
|
|
|
create thread, but waiting for timer would be an inefficient and
|
|
|
|
pointless delay.
|
2012-01-15 11:17:45 +01:00
|
|
|
*/
|
2019-05-26 13:25:12 +02:00
|
|
|
create_worker(thread_group, false);
|
2012-01-15 11:17:45 +01:00
|
|
|
}
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
mysql_mutex_unlock(&thread_group->mutex);
|
|
|
|
}
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
DBUG_RETURN(retval);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
2012-01-28 01:09:28 +01:00
|
|
|
/**
|
2020-05-29 12:21:27 +02:00
|
|
|
Adjust thread counters in group or global
|
2012-01-28 01:09:28 +01:00
|
|
|
whenever thread is created or is about to exit
|
|
|
|
|
|
|
|
@param thread_group
|
|
|
|
@param count - 1, when new thread is created
|
|
|
|
-1, when thread is about to exit
|
|
|
|
*/
|
2011-12-08 19:17:49 +01:00
|
|
|
|
2012-01-28 01:09:28 +01:00
|
|
|
static void add_thread_count(thread_group_t *thread_group, int32 count)
|
|
|
|
{
|
|
|
|
thread_group->thread_count += count;
|
|
|
|
/* worker starts out and end in "active" state */
|
|
|
|
thread_group->active_thread_count += count;
|
2020-03-26 23:24:49 +01:00
|
|
|
tp_stats.num_worker_threads+= count;
|
2012-01-28 01:09:28 +01:00
|
|
|
}
|
2011-12-08 19:17:49 +01:00
|
|
|
|
|
|
|
|
2012-01-26 04:35:54 +01:00
|
|
|
/**
|
2020-05-29 12:21:27 +02:00
|
|
|
Creates a new worker thread.
|
|
|
|
thread_mutex must be held when calling this function
|
2011-12-18 23:03:35 +01:00
|
|
|
|
|
|
|
NOTE: in rare cases, the number of threads can exceed
|
|
|
|
threadpool_max_threads, because we need at least 2 threads
|
|
|
|
per group to prevent deadlocks (one listener + one worker)
|
|
|
|
*/
|
2011-12-08 19:17:49 +01:00
|
|
|
|
2019-05-26 13:25:12 +02:00
|
|
|
static int create_worker(thread_group_t *thread_group, bool due_to_stall)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
pthread_t thread_id;
|
2012-01-17 18:50:40 +01:00
|
|
|
bool max_threads_reached= false;
|
2011-12-08 19:17:49 +01:00
|
|
|
int err;
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_ENTER("create_worker");
|
2020-03-26 23:24:49 +01:00
|
|
|
if (tp_stats.num_worker_threads >= threadpool_max_threads
|
2011-12-18 23:03:35 +01:00
|
|
|
&& thread_group->thread_count >= 2)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2012-01-17 18:50:40 +01:00
|
|
|
err= 1;
|
|
|
|
max_threads_reached= true;
|
|
|
|
goto end;
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
2012-01-28 01:09:28 +01:00
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
err= mysql_thread_create(key_worker_thread, &thread_id,
|
2011-12-28 16:23:46 +01:00
|
|
|
thread_group->pthread_attr, worker_main, thread_group);
|
2011-12-08 19:17:49 +01:00
|
|
|
if (!err)
|
|
|
|
{
|
2011-12-10 19:35:44 +01:00
|
|
|
thread_group->last_thread_creation_time=microsecond_interval_timer();
|
2016-02-01 11:45:39 +01:00
|
|
|
statistic_increment(thread_created,&LOCK_status);
|
2012-01-28 01:09:28 +01:00
|
|
|
add_thread_count(thread_group, 1);
|
2019-05-26 13:25:12 +02:00
|
|
|
TP_INCREMENT_GROUP_COUNTER(thread_group,thread_creations);
|
|
|
|
if(due_to_stall)
|
|
|
|
{
|
|
|
|
TP_INCREMENT_GROUP_COUNTER(thread_group, thread_creations_due_to_stall);
|
|
|
|
}
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
2012-01-17 18:50:40 +01:00
|
|
|
else
|
|
|
|
{
|
|
|
|
my_errno= errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
end:
|
|
|
|
if (err)
|
|
|
|
print_pool_blocked_message(max_threads_reached);
|
|
|
|
else
|
|
|
|
pool_block_start= 0; /* Reset pool blocked timer, if it was set */
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_RETURN(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
/**
|
|
|
|
Calculate microseconds throttling delay for thread creation.
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
The value depends on how many threads are already in the group:
|
|
|
|
small number of threads means no delay, the more threads the larger
|
|
|
|
the delay.
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
The actual values were not calculated using any scientific methods.
|
|
|
|
They just look right, and behave well in practice.
|
|
|
|
*/
|
2019-05-31 10:28:22 +02:00
|
|
|
|
|
|
|
#define THROTTLING_FACTOR (threadpool_stall_limit/std::max(DEFAULT_THREADPOOL_STALL_LIMIT,threadpool_stall_limit))
|
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
static ulonglong microsecond_throttling_interval(thread_group_t *thread_group)
|
|
|
|
{
|
|
|
|
int count= thread_group->thread_count;
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2019-05-31 10:28:22 +02:00
|
|
|
if (count < 1+ (int)threadpool_oversubscribe)
|
2012-01-15 11:17:45 +01:00
|
|
|
return 0;
|
2019-05-31 10:28:22 +02:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
if (count < 8)
|
2019-05-31 10:28:22 +02:00
|
|
|
return 50*1000*THROTTLING_FACTOR;
|
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
if(count < 16)
|
2019-05-31 10:28:22 +02:00
|
|
|
return 100*1000*THROTTLING_FACTOR;
|
|
|
|
|
|
|
|
return 200*100*THROTTLING_FACTOR;
|
2012-01-15 11:17:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2020-05-29 12:21:27 +02:00
|
|
|
Wakes a worker thread, or creates a new one.
|
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
Worker creation is throttled, so we avoid too many threads
|
|
|
|
to be created during the short time.
|
|
|
|
*/
|
2019-05-26 13:25:12 +02:00
|
|
|
static int wake_or_create_thread(thread_group_t *thread_group, bool due_to_stall)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("wake_or_create_thread");
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-28 01:09:28 +01:00
|
|
|
if (thread_group->shutdown)
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
|
2019-05-26 13:25:12 +02:00
|
|
|
if (wake_thread(thread_group, due_to_stall) == 0)
|
|
|
|
{
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_RETURN(0);
|
2019-05-26 13:25:12 +02:00
|
|
|
}
|
2011-12-08 19:17:49 +01:00
|
|
|
|
2011-12-18 23:03:35 +01:00
|
|
|
if (thread_group->thread_count > thread_group->connection_count)
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
if (thread_group->active_thread_count == 0)
|
|
|
|
{
|
|
|
|
/*
|
2020-05-29 12:21:27 +02:00
|
|
|
We're better off creating a new thread here with no delay, either there
|
|
|
|
are no workers at all, or they all are all blocking and there was no
|
|
|
|
idle thread to wakeup. Smells like a potential deadlock or very slowly
|
2012-01-26 04:35:54 +01:00
|
|
|
executing requests, e.g sleeps or user locks.
|
2011-12-08 19:17:49 +01:00
|
|
|
*/
|
2019-05-26 13:25:12 +02:00
|
|
|
DBUG_RETURN(create_worker(thread_group, due_to_stall));
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
ulonglong now = microsecond_interval_timer();
|
|
|
|
ulonglong time_since_last_thread_created =
|
|
|
|
(now - thread_group->last_thread_creation_time);
|
2020-05-29 12:21:27 +02:00
|
|
|
|
|
|
|
/* Throttle thread creation. */
|
2012-01-15 11:17:45 +01:00
|
|
|
if (time_since_last_thread_created >
|
|
|
|
microsecond_throttling_interval(thread_group))
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2019-05-26 13:25:12 +02:00
|
|
|
DBUG_RETURN(create_worker(thread_group, due_to_stall));
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
2019-05-26 13:25:12 +02:00
|
|
|
|
|
|
|
TP_INCREMENT_GROUP_COUNTER(thread_group,throttles);
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_RETURN(-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int thread_group_init(thread_group_t *thread_group, pthread_attr_t* thread_attr)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("thread_group_init");
|
|
|
|
thread_group->pthread_attr = thread_attr;
|
|
|
|
mysql_mutex_init(key_group_mutex, &thread_group->mutex, NULL);
|
2017-11-20 21:39:59 +01:00
|
|
|
thread_group->pollfd= INVALID_HANDLE_VALUE;
|
2012-01-15 11:17:45 +01:00
|
|
|
thread_group->shutdown_pipe[0]= -1;
|
|
|
|
thread_group->shutdown_pipe[1]= -1;
|
2016-09-21 16:28:42 +02:00
|
|
|
queue_init(thread_group);
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
void thread_group_destroy(thread_group_t *thread_group)
|
|
|
|
{
|
|
|
|
mysql_mutex_destroy(&thread_group->mutex);
|
2017-11-20 21:39:59 +01:00
|
|
|
if (thread_group->pollfd != INVALID_HANDLE_VALUE)
|
2012-01-15 11:17:45 +01:00
|
|
|
{
|
2016-09-21 16:28:42 +02:00
|
|
|
io_poll_close(thread_group->pollfd);
|
2017-11-20 21:39:59 +01:00
|
|
|
thread_group->pollfd= INVALID_HANDLE_VALUE;
|
2012-01-15 11:17:45 +01:00
|
|
|
}
|
2016-09-21 16:28:42 +02:00
|
|
|
#ifndef HAVE_IOCP
|
2012-01-15 11:17:45 +01:00
|
|
|
for(int i=0; i < 2; i++)
|
|
|
|
{
|
|
|
|
if(thread_group->shutdown_pipe[i] != -1)
|
|
|
|
{
|
|
|
|
close(thread_group->shutdown_pipe[i]);
|
|
|
|
thread_group->shutdown_pipe[i]= -1;
|
|
|
|
}
|
|
|
|
}
|
2016-09-21 16:28:42 +02:00
|
|
|
#endif
|
|
|
|
|
2020-03-26 23:13:41 +01:00
|
|
|
if (!--shutdown_group_count)
|
2013-11-06 19:53:39 +01:00
|
|
|
my_free(all_groups);
|
2012-01-15 11:17:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
Wake sleeping thread from waiting list
|
2012-01-26 04:35:54 +01:00
|
|
|
*/
|
|
|
|
|
2019-05-26 13:25:12 +02:00
|
|
|
static int wake_thread(thread_group_t *thread_group,bool due_to_stall)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("wake_thread");
|
2012-01-15 11:17:45 +01:00
|
|
|
worker_thread_t *thread = thread_group->waiting_threads.front();
|
2011-12-08 19:17:49 +01:00
|
|
|
if(thread)
|
|
|
|
{
|
|
|
|
thread->woken= true;
|
2012-01-15 11:17:45 +01:00
|
|
|
thread_group->waiting_threads.remove(thread);
|
2012-02-16 16:59:04 +01:00
|
|
|
mysql_cond_signal(&thread->cond);
|
2019-05-26 13:25:12 +02:00
|
|
|
TP_INCREMENT_GROUP_COUNTER(thread_group, wakes);
|
|
|
|
if (due_to_stall)
|
|
|
|
{
|
|
|
|
TP_INCREMENT_GROUP_COUNTER(thread_group, wakes_due_to_stall);
|
|
|
|
}
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
2012-02-16 16:59:04 +01:00
|
|
|
DBUG_RETURN(1); /* no thread in waiter list => missed wakeup */
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
/*
|
2016-09-21 16:28:42 +02:00
|
|
|
Wake listener thread (during shutdown)
|
|
|
|
Self-pipe trick is used in most cases,except IOCP.
|
|
|
|
*/
|
|
|
|
static int wake_listener(thread_group_t *thread_group)
|
|
|
|
{
|
|
|
|
#ifndef HAVE_IOCP
|
|
|
|
if (pipe(thread_group->shutdown_pipe))
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
2011-12-08 19:17:49 +01:00
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
/* Wake listener */
|
|
|
|
if (io_poll_associate_fd(thread_group->pollfd,
|
|
|
|
thread_group->shutdown_pipe[0], NULL, NULL))
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
char c= 0;
|
|
|
|
if (write(thread_group->shutdown_pipe[1], &c, 1) < 0)
|
|
|
|
return -1;
|
|
|
|
#else
|
2017-11-20 21:39:59 +01:00
|
|
|
PostQueuedCompletionStatus(thread_group->pollfd, 0, 0, 0);
|
2016-09-21 16:28:42 +02:00
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
2012-01-26 04:35:54 +01:00
|
|
|
/**
|
2012-01-15 11:17:45 +01:00
|
|
|
Initiate shutdown for thread group.
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
The shutdown is asynchronous, we only care to wake all threads in here, so
|
|
|
|
they can finish. We do not wait here until threads terminate. Final cleanup
|
2012-01-26 04:35:54 +01:00
|
|
|
of the group (thread_group_destroy) will be done by the last exiting threads.
|
2011-12-08 19:17:49 +01:00
|
|
|
*/
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
static void thread_group_close(thread_group_t *thread_group)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("thread_group_close");
|
|
|
|
|
|
|
|
mysql_mutex_lock(&thread_group->mutex);
|
2020-05-29 12:21:27 +02:00
|
|
|
if (thread_group->thread_count == 0)
|
2011-12-19 13:28:30 +01:00
|
|
|
{
|
|
|
|
mysql_mutex_unlock(&thread_group->mutex);
|
2012-01-15 11:17:45 +01:00
|
|
|
thread_group_destroy(thread_group);
|
2011-12-19 13:28:30 +01:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
thread_group->shutdown= true;
|
2011-12-08 19:17:49 +01:00
|
|
|
thread_group->listener= NULL;
|
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
wake_listener(thread_group);
|
2011-12-08 19:17:49 +01:00
|
|
|
|
|
|
|
/* Wake all workers. */
|
2020-05-29 12:21:27 +02:00
|
|
|
while(wake_thread(thread_group, false) == 0)
|
|
|
|
{
|
2012-01-15 11:17:45 +01:00
|
|
|
}
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
mysql_mutex_unlock(&thread_group->mutex);
|
2011-12-08 19:17:49 +01:00
|
|
|
|
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
/*
|
2012-01-15 11:17:45 +01:00
|
|
|
Add work to the queue. Maybe wake a worker if they all sleep.
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
Currently, this function is only used when new connections need to
|
|
|
|
perform login (this is done in worker threads).
|
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
*/
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
static void queue_put(thread_group_t *thread_group, TP_connection_generic *connection)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2012-01-15 11:17:45 +01:00
|
|
|
DBUG_ENTER("queue_put");
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2019-05-26 13:35:07 +02:00
|
|
|
connection->enqueue_time= threadpool_exact_stats?microsecond_interval_timer():pool_timer.current_microtime;
|
2016-09-21 16:28:42 +02:00
|
|
|
thread_group->queues[connection->priority].push_back(connection);
|
2012-01-28 01:09:28 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
if (thread_group->active_thread_count == 0)
|
|
|
|
wake_or_create_thread(thread_group);
|
2012-01-28 01:09:28 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
/*
|
|
|
|
Prevent too many threads executing at the same time,if the workload is
|
2012-01-26 04:35:54 +01:00
|
|
|
not CPU bound.
|
2011-12-08 19:17:49 +01:00
|
|
|
*/
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
static bool too_many_threads(thread_group_t *thread_group)
|
|
|
|
{
|
2020-05-29 12:21:27 +02:00
|
|
|
return (thread_group->active_thread_count >= 1+(int)threadpool_oversubscribe
|
2011-12-31 05:24:11 +01:00
|
|
|
&& !thread_group->stalled);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
/**
|
|
|
|
Retrieve a connection with pending event.
|
2020-05-29 12:21:27 +02:00
|
|
|
|
|
|
|
Pending event in our case means that there is either a pending login request
|
2012-01-15 11:17:45 +01:00
|
|
|
(if connection is not yet logged in), or there are unread bytes on the socket.
|
2011-12-08 19:17:49 +01:00
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
If there are no pending events currently, thread will wait.
|
2012-01-27 19:52:53 +01:00
|
|
|
If timeout specified in abstime parameter passes, the function returns NULL.
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
@param current_thread - current worker thread
|
|
|
|
@param thread_group - current thread group
|
|
|
|
@param abstime - absolute wait timeout
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
@return
|
2020-05-29 12:21:27 +02:00
|
|
|
connection with pending event.
|
2012-01-27 19:52:53 +01:00
|
|
|
NULL is returned if timeout has expired,or on shutdown.
|
2011-12-08 19:17:49 +01:00
|
|
|
*/
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
TP_connection_generic *get_event(worker_thread_t *current_thread,
|
2012-01-15 11:17:45 +01:00
|
|
|
thread_group_t *thread_group, struct timespec *abstime)
|
2020-05-29 12:21:27 +02:00
|
|
|
{
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_ENTER("get_event");
|
2016-09-21 16:28:42 +02:00
|
|
|
TP_connection_generic *connection = NULL;
|
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
|
|
|
|
mysql_mutex_lock(&thread_group->mutex);
|
|
|
|
DBUG_ASSERT(thread_group->active_thread_count >= 0);
|
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
for(;;)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2016-09-21 16:28:42 +02:00
|
|
|
int err=0;
|
2020-05-29 12:21:27 +02:00
|
|
|
bool oversubscribed = too_many_threads(thread_group);
|
2011-12-08 19:17:49 +01:00
|
|
|
if (thread_group->shutdown)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Check if queue is not empty */
|
2012-01-27 19:52:53 +01:00
|
|
|
if (!oversubscribed)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2020-03-28 01:46:53 +01:00
|
|
|
connection = queue_get(thread_group, operation_origin::WORKER);
|
2012-01-15 11:17:45 +01:00
|
|
|
if(connection)
|
2019-05-26 13:25:12 +02:00
|
|
|
{
|
2011-12-08 19:17:49 +01:00
|
|
|
break;
|
2019-05-26 13:25:12 +02:00
|
|
|
}
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If there is currently no listener in the group, become one. */
|
|
|
|
if(!thread_group->listener)
|
|
|
|
{
|
|
|
|
thread_group->listener= current_thread;
|
2012-01-18 21:12:04 +01:00
|
|
|
thread_group->active_thread_count--;
|
2011-12-08 19:17:49 +01:00
|
|
|
mysql_mutex_unlock(&thread_group->mutex);
|
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
connection = listener(current_thread, thread_group);
|
2020-03-28 01:46:53 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
mysql_mutex_lock(&thread_group->mutex);
|
2012-01-18 21:12:04 +01:00
|
|
|
thread_group->active_thread_count++;
|
2011-12-08 19:17:49 +01:00
|
|
|
/* There is no listener anymore, it just returned. */
|
|
|
|
thread_group->listener= NULL;
|
|
|
|
break;
|
|
|
|
}
|
2016-09-21 16:28:42 +02:00
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Last thing we try before going to sleep is to
|
2016-09-21 16:28:42 +02:00
|
|
|
non-blocking event poll, i.e with timeout = 0.
|
|
|
|
If this returns events, pick one
|
2011-12-08 19:17:49 +01:00
|
|
|
*/
|
2012-01-27 19:52:53 +01:00
|
|
|
if (!oversubscribed)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2016-09-21 16:28:42 +02:00
|
|
|
native_event ev[MAX_EVENTS];
|
|
|
|
int cnt = io_poll_wait(thread_group->pollfd, ev, MAX_EVENTS, 0);
|
2020-03-28 01:46:53 +01:00
|
|
|
TP_INCREMENT_GROUP_COUNTER(thread_group, polls[(int)operation_origin::WORKER]);
|
2016-09-21 16:28:42 +02:00
|
|
|
if (cnt > 0)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2016-09-21 16:28:42 +02:00
|
|
|
queue_put(thread_group, ev, cnt);
|
2020-03-28 01:46:53 +01:00
|
|
|
connection= queue_get(thread_group,operation_origin::WORKER);
|
2011-12-08 19:17:49 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
/* And now, finally sleep */
|
2011-12-08 19:17:49 +01:00
|
|
|
current_thread->woken = false; /* wake() sets this to true */
|
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
/*
|
2011-12-08 19:17:49 +01:00
|
|
|
Add current thread to the head of the waiting list and wait.
|
|
|
|
It is important to add thread to the head rather than tail
|
|
|
|
as it ensures LIFO wakeup order (hot caches, working inactivity timeout)
|
|
|
|
*/
|
2012-01-15 11:17:45 +01:00
|
|
|
thread_group->waiting_threads.push_front(current_thread);
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-15 15:41:25 +01:00
|
|
|
thread_group->active_thread_count--;
|
2012-01-28 01:09:28 +01:00
|
|
|
if (abstime)
|
2012-01-27 19:52:53 +01:00
|
|
|
{
|
2020-05-29 12:21:27 +02:00
|
|
|
err = mysql_cond_timedwait(¤t_thread->cond, &thread_group->mutex,
|
2012-01-27 19:52:53 +01:00
|
|
|
abstime);
|
|
|
|
}
|
2011-12-08 19:17:49 +01:00
|
|
|
else
|
2012-01-27 19:52:53 +01:00
|
|
|
{
|
2011-12-08 19:17:49 +01:00
|
|
|
err = mysql_cond_wait(¤t_thread->cond, &thread_group->mutex);
|
2012-01-27 19:52:53 +01:00
|
|
|
}
|
2012-01-15 15:41:25 +01:00
|
|
|
thread_group->active_thread_count++;
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
if (!current_thread->woken)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
Thread was not signalled by wake(), it might be a spurious wakeup or
|
|
|
|
a timeout. Anyhow, we need to remove ourselves from the list now.
|
|
|
|
If thread was explicitly woken, than caller removed us from the list.
|
|
|
|
*/
|
2012-01-15 11:17:45 +01:00
|
|
|
thread_group->waiting_threads.remove(current_thread);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
2012-01-28 01:09:28 +01:00
|
|
|
if (err)
|
2011-12-08 19:17:49 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
thread_group->stalled= false;
|
2019-05-26 13:25:12 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
mysql_mutex_unlock(&thread_group->mutex);
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
DBUG_RETURN(connection);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
/**
|
2020-05-29 12:21:27 +02:00
|
|
|
Tells the pool that worker starts waiting on IO, lock, condition,
|
2011-12-08 19:17:49 +01:00
|
|
|
sleep() or similar.
|
|
|
|
*/
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
void wait_begin(thread_group_t *thread_group)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("wait_begin");
|
|
|
|
mysql_mutex_lock(&thread_group->mutex);
|
2012-01-15 15:41:25 +01:00
|
|
|
thread_group->active_thread_count--;
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_ASSERT(thread_group->active_thread_count >=0);
|
|
|
|
DBUG_ASSERT(thread_group->connection_count > 0);
|
2016-09-21 16:28:42 +02:00
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
if ((thread_group->active_thread_count == 0) &&
|
2019-12-17 21:57:40 +01:00
|
|
|
(!is_queue_empty(thread_group) || !thread_group->listener))
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2020-05-29 12:21:27 +02:00
|
|
|
/*
|
|
|
|
Group might stall while this thread waits, thus wake
|
2012-01-15 11:17:45 +01:00
|
|
|
or create a worker to prevent stall.
|
|
|
|
*/
|
2011-12-08 19:17:49 +01:00
|
|
|
wake_or_create_thread(thread_group);
|
|
|
|
}
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
mysql_mutex_unlock(&thread_group->mutex);
|
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
/**
|
|
|
|
Tells the pool has finished waiting.
|
2011-12-08 19:17:49 +01:00
|
|
|
*/
|
2012-01-15 11:17:45 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
void wait_end(thread_group_t *thread_group)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("wait_end");
|
|
|
|
mysql_mutex_lock(&thread_group->mutex);
|
2012-01-15 15:41:25 +01:00
|
|
|
thread_group->active_thread_count++;
|
2011-12-08 19:17:49 +01:00
|
|
|
mysql_mutex_unlock(&thread_group->mutex);
|
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
TP_connection * TP_pool_generic::new_connection(CONNECT *c)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2016-09-21 16:28:42 +02:00
|
|
|
return new (std::nothrow) TP_connection_generic(c);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
/**
|
2020-05-29 12:21:27 +02:00
|
|
|
Add a new connection to thread pool
|
2011-12-08 19:17:49 +01:00
|
|
|
*/
|
2012-01-15 11:17:45 +01:00
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
void TP_pool_generic::add(TP_connection *c)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("tp_add_connection");
|
2016-02-01 11:45:39 +01:00
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
TP_connection_generic *connection=(TP_connection_generic *)c;
|
|
|
|
thread_group_t *thread_group= connection->thread_group;
|
2016-02-01 11:45:39 +01:00
|
|
|
/*
|
2020-05-29 12:21:27 +02:00
|
|
|
Add connection to the work queue.Actual logon
|
2016-02-01 11:45:39 +01:00
|
|
|
will be done by a worker thread.
|
|
|
|
*/
|
2016-09-21 16:28:42 +02:00
|
|
|
mysql_mutex_lock(&thread_group->mutex);
|
|
|
|
queue_put(thread_group, connection);
|
|
|
|
mysql_mutex_unlock(&thread_group->mutex);
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
|
|
|
|
/**
|
2012-01-26 04:35:54 +01:00
|
|
|
MySQL scheduler callback: wait begin
|
2012-01-15 11:17:45 +01:00
|
|
|
*/
|
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
void TP_connection_generic::wait_begin(int type)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2016-09-21 16:28:42 +02:00
|
|
|
DBUG_ENTER("wait_begin");
|
|
|
|
|
|
|
|
DBUG_ASSERT(!waiting);
|
|
|
|
waiting++;
|
|
|
|
if (waiting == 1)
|
|
|
|
::wait_begin(thread_group);
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
/**
|
2012-01-26 04:35:54 +01:00
|
|
|
MySQL scheduler callback: wait end
|
2012-01-15 11:17:45 +01:00
|
|
|
*/
|
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
void TP_connection_generic::wait_end()
|
|
|
|
{
|
2016-09-21 16:28:42 +02:00
|
|
|
DBUG_ENTER("wait_end");
|
|
|
|
DBUG_ASSERT(waiting);
|
|
|
|
waiting--;
|
|
|
|
if (waiting == 0)
|
|
|
|
::wait_end(thread_group);
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
static void set_next_timeout_check(ulonglong abstime)
|
|
|
|
{
|
2020-03-26 23:08:53 +01:00
|
|
|
auto old= pool_timer.next_timeout_check.load(std::memory_order_relaxed);
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_ENTER("set_next_timeout_check");
|
2020-03-26 23:08:53 +01:00
|
|
|
while (abstime < old)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2020-03-26 23:08:53 +01:00
|
|
|
if (pool_timer.next_timeout_check.
|
|
|
|
compare_exchange_weak(old, abstime,
|
|
|
|
std::memory_order_relaxed,
|
|
|
|
std::memory_order_relaxed))
|
|
|
|
break;
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
2020-05-23 13:17:50 +02:00
|
|
|
static size_t get_group_id(my_thread_id tid)
|
|
|
|
{
|
|
|
|
return size_t(tid % group_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
TP_connection_generic::TP_connection_generic(CONNECT *c):
|
|
|
|
TP_connection(c),
|
|
|
|
thread_group(0),
|
|
|
|
next_in_queue(0),
|
|
|
|
prev_in_queue(0),
|
|
|
|
abs_wait_timeout(ULONGLONG_MAX),
|
|
|
|
bound_to_poll_descriptor(false),
|
2020-05-23 13:17:50 +02:00
|
|
|
waiting(false),
|
|
|
|
fix_group(false)
|
2016-09-21 16:28:42 +02:00
|
|
|
#ifdef HAVE_IOCP
|
|
|
|
, overlapped()
|
|
|
|
#endif
|
2020-05-23 13:17:50 +02:00
|
|
|
#ifdef _WIN32
|
|
|
|
, vio_type(c->vio_type)
|
|
|
|
#endif
|
2016-09-21 16:28:42 +02:00
|
|
|
{
|
2019-05-16 22:38:35 +02:00
|
|
|
DBUG_ASSERT(c->vio_type != VIO_CLOSED);
|
2017-11-20 21:39:59 +01:00
|
|
|
|
|
|
|
#ifdef _WIN32
|
2019-05-16 22:38:35 +02:00
|
|
|
fd= (c->vio_type == VIO_TYPE_NAMEDPIPE) ?
|
|
|
|
c->pipe: (TP_file_handle) mysql_socket_getfd(c->sock);
|
2017-11-20 21:39:59 +01:00
|
|
|
#else
|
2019-05-16 22:38:35 +02:00
|
|
|
fd= mysql_socket_getfd(c->sock);
|
2017-11-20 21:39:59 +01:00
|
|
|
#endif
|
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
/* Assign connection to a group. */
|
|
|
|
thread_group_t *group=
|
2020-05-23 13:17:50 +02:00
|
|
|
&all_groups[get_group_id(c->thread_id)];
|
2016-09-21 16:28:42 +02:00
|
|
|
thread_group=group;
|
|
|
|
|
|
|
|
mysql_mutex_lock(&group->mutex);
|
|
|
|
group->connection_count++;
|
|
|
|
mysql_mutex_unlock(&group->mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
TP_connection_generic::~TP_connection_generic()
|
|
|
|
{
|
|
|
|
mysql_mutex_lock(&thread_group->mutex);
|
|
|
|
thread_group->connection_count--;
|
|
|
|
mysql_mutex_unlock(&thread_group->mutex);
|
|
|
|
}
|
2012-01-15 11:17:45 +01:00
|
|
|
|
|
|
|
/**
|
2020-05-29 12:21:27 +02:00
|
|
|
Set wait timeout for connection.
|
2012-01-15 11:17:45 +01:00
|
|
|
*/
|
2012-01-28 01:09:28 +01:00
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
void TP_connection_generic::set_io_timeout(int timeout_sec)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("set_wait_timeout");
|
2020-05-29 12:21:27 +02:00
|
|
|
/*
|
2011-12-08 19:17:49 +01:00
|
|
|
Calculate wait deadline for this connection.
|
2020-05-29 12:21:27 +02:00
|
|
|
Instead of using microsecond_interval_timer() which has a syscall
|
|
|
|
overhead, use pool_timer.current_microtime and take
|
|
|
|
into account that its value could be off by at most
|
2011-12-08 19:17:49 +01:00
|
|
|
one tick interval.
|
|
|
|
*/
|
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
abs_wait_timeout= pool_timer.current_microtime +
|
2011-12-08 19:17:49 +01:00
|
|
|
1000LL*pool_timer.tick_interval +
|
2016-09-21 16:28:42 +02:00
|
|
|
1000000LL*timeout_sec;
|
2011-12-08 19:17:49 +01:00
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
set_next_timeout_check(abs_wait_timeout);
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
2011-12-19 13:28:30 +01:00
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
/**
|
2020-05-29 12:21:27 +02:00
|
|
|
Handle a (rare) special case,where connection needs to
|
2012-01-15 11:17:45 +01:00
|
|
|
migrate to a different group because group_count has changed
|
2020-05-29 12:21:27 +02:00
|
|
|
after thread_pool_size setting.
|
2011-12-19 13:28:30 +01:00
|
|
|
*/
|
2012-01-28 01:09:28 +01:00
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
static int change_group(TP_connection_generic *c,
|
2011-12-19 13:28:30 +01:00
|
|
|
thread_group_t *old_group,
|
|
|
|
thread_group_t *new_group)
|
2020-05-29 12:21:27 +02:00
|
|
|
{
|
2011-12-19 13:28:30 +01:00
|
|
|
int ret= 0;
|
|
|
|
|
|
|
|
DBUG_ASSERT(c->thread_group == old_group);
|
|
|
|
|
|
|
|
/* Remove connection from the old group. */
|
|
|
|
mysql_mutex_lock(&old_group->mutex);
|
2012-01-17 18:50:40 +01:00
|
|
|
if (c->bound_to_poll_descriptor)
|
|
|
|
{
|
2017-11-20 21:39:59 +01:00
|
|
|
io_poll_disassociate_fd(old_group->pollfd,c->fd);
|
2012-01-17 18:50:40 +01:00
|
|
|
c->bound_to_poll_descriptor= false;
|
|
|
|
}
|
2011-12-19 13:28:30 +01:00
|
|
|
c->thread_group->connection_count--;
|
2011-12-20 22:49:24 +01:00
|
|
|
mysql_mutex_unlock(&old_group->mutex);
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-19 13:28:30 +01:00
|
|
|
/* Add connection to the new group. */
|
|
|
|
mysql_mutex_lock(&new_group->mutex);
|
|
|
|
c->thread_group= new_group;
|
|
|
|
new_group->connection_count++;
|
|
|
|
/* Ensure that there is a listener in the new group. */
|
2012-01-28 01:09:28 +01:00
|
|
|
if (!new_group->thread_count)
|
2019-05-26 13:25:12 +02:00
|
|
|
ret= create_worker(new_group, false);
|
2011-12-19 13:28:30 +01:00
|
|
|
mysql_mutex_unlock(&new_group->mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-05-23 13:17:50 +02:00
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
int TP_connection_generic::start_io()
|
2017-11-20 21:39:59 +01:00
|
|
|
{
|
2011-12-19 13:28:30 +01:00
|
|
|
/*
|
|
|
|
Usually, connection will stay in the same group for the entire
|
|
|
|
connection's life. However, we do allow group_count to
|
|
|
|
change at runtime, which means in rare cases when it changes is
|
|
|
|
connection should need to migrate to another group, this ensures
|
|
|
|
to ensure equal load between groups.
|
|
|
|
|
|
|
|
So we recalculate in which group the connection should be, based
|
|
|
|
on thread_id and current group count, and migrate if necessary.
|
2020-05-23 13:17:50 +02:00
|
|
|
*/
|
|
|
|
if (fix_group)
|
2011-12-19 13:28:30 +01:00
|
|
|
{
|
2020-05-23 13:17:50 +02:00
|
|
|
fix_group = false;
|
|
|
|
thread_group_t *new_group= &all_groups[get_group_id(thd->thread_id)];
|
|
|
|
|
|
|
|
if (new_group != thread_group)
|
|
|
|
{
|
|
|
|
if (change_group(this, thread_group, new_group))
|
|
|
|
return -1;
|
|
|
|
}
|
2011-12-19 13:28:30 +01:00
|
|
|
}
|
2016-09-21 16:28:42 +02:00
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
/*
|
|
|
|
Bind to poll descriptor if not yet done.
|
|
|
|
*/
|
2016-09-21 16:28:42 +02:00
|
|
|
if (!bound_to_poll_descriptor)
|
2011-12-19 13:28:30 +01:00
|
|
|
{
|
2016-09-21 16:28:42 +02:00
|
|
|
bound_to_poll_descriptor= true;
|
|
|
|
return io_poll_associate_fd(thread_group->pollfd, fd, this, OPTIONAL_IO_POLL_READ_PARAM);
|
2011-12-19 13:28:30 +01:00
|
|
|
}
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
return io_poll_start_read(thread_group->pollfd, fd, this, OPTIONAL_IO_POLL_READ_PARAM);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
2012-01-27 19:52:53 +01:00
|
|
|
|
|
|
|
|
2012-01-15 11:17:45 +01:00
|
|
|
/**
|
2012-01-26 04:35:54 +01:00
|
|
|
Worker thread's main
|
2012-01-15 11:17:45 +01:00
|
|
|
*/
|
2012-01-26 04:35:54 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
static void *worker_main(void *param)
|
|
|
|
{
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
worker_thread_t this_thread;
|
|
|
|
pthread_detach_this_thread();
|
|
|
|
my_thread_init();
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_ENTER("worker_main");
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
thread_group_t *thread_group = (thread_group_t *)param;
|
|
|
|
|
|
|
|
/* Init per-thread structure */
|
|
|
|
mysql_cond_init(key_worker_cond, &this_thread.cond, NULL);
|
|
|
|
this_thread.thread_group= thread_group;
|
|
|
|
this_thread.event_count=0;
|
|
|
|
|
|
|
|
/* Run event loop */
|
|
|
|
for(;;)
|
|
|
|
{
|
2016-09-21 16:28:42 +02:00
|
|
|
TP_connection_generic *connection;
|
2011-12-08 19:17:49 +01:00
|
|
|
struct timespec ts;
|
|
|
|
set_timespec(ts,threadpool_idle_timeout);
|
2012-01-15 11:17:45 +01:00
|
|
|
connection = get_event(&this_thread, thread_group, &ts);
|
|
|
|
if (!connection)
|
2011-12-08 19:17:49 +01:00
|
|
|
break;
|
|
|
|
this_thread.event_count++;
|
2016-09-21 16:28:42 +02:00
|
|
|
tp_callback(connection);
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Thread shutdown: cleanup per-worker-thread structure. */
|
|
|
|
mysql_cond_destroy(&this_thread.cond);
|
|
|
|
|
2012-01-28 01:09:28 +01:00
|
|
|
bool last_thread; /* last thread in group exits */
|
2011-12-08 19:17:49 +01:00
|
|
|
mysql_mutex_lock(&thread_group->mutex);
|
2012-01-28 01:09:28 +01:00
|
|
|
add_thread_count(thread_group, -1);
|
|
|
|
last_thread= ((thread_group->thread_count == 0) && thread_group->shutdown);
|
2011-12-08 19:17:49 +01:00
|
|
|
mysql_mutex_unlock(&thread_group->mutex);
|
|
|
|
|
2012-01-28 01:09:28 +01:00
|
|
|
/* Last thread in group exits and pool is terminating, destroy group.*/
|
|
|
|
if (last_thread)
|
2012-01-15 11:17:45 +01:00
|
|
|
thread_group_destroy(thread_group);
|
2012-01-28 01:09:28 +01:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
my_thread_end();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
TP_pool_generic::TP_pool_generic()
|
|
|
|
{}
|
|
|
|
|
|
|
|
int TP_pool_generic::init()
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2016-09-21 16:28:42 +02:00
|
|
|
DBUG_ENTER("TP_pool_generic::TP_pool_generic");
|
2013-11-11 22:40:53 +01:00
|
|
|
threadpool_max_size= MY_MAX(threadpool_size, 128);
|
2013-11-05 06:18:59 +01:00
|
|
|
all_groups= (thread_group_t *)
|
2020-01-29 13:50:26 +01:00
|
|
|
my_malloc(PSI_INSTRUMENT_ME,
|
|
|
|
sizeof(thread_group_t) * threadpool_max_size, MYF(MY_WME|MY_ZEROFILL));
|
2013-11-05 06:18:59 +01:00
|
|
|
if (!all_groups)
|
|
|
|
{
|
|
|
|
threadpool_max_size= 0;
|
2016-09-21 16:28:42 +02:00
|
|
|
sql_print_error("Allocation failed");
|
|
|
|
DBUG_RETURN(-1);
|
2013-11-05 06:18:59 +01:00
|
|
|
}
|
2011-12-08 19:17:49 +01:00
|
|
|
scheduler_init();
|
2016-09-21 16:28:42 +02:00
|
|
|
threadpool_started= true;
|
2013-11-05 06:18:59 +01:00
|
|
|
for (uint i= 0; i < threadpool_max_size; i++)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
2020-05-29 12:21:27 +02:00
|
|
|
thread_group_init(&all_groups[i], get_connection_attrib());
|
2011-12-08 19:17:49 +01:00
|
|
|
}
|
2016-09-21 16:28:42 +02:00
|
|
|
set_pool_size(threadpool_size);
|
2012-02-16 16:59:04 +01:00
|
|
|
if(group_count == 0)
|
|
|
|
{
|
|
|
|
/* Something went wrong */
|
|
|
|
sql_print_error("Can't set threadpool size to %d",threadpool_size);
|
2016-09-21 16:28:42 +02:00
|
|
|
DBUG_RETURN(-1);
|
2012-02-16 16:59:04 +01:00
|
|
|
}
|
2011-12-08 19:17:49 +01:00
|
|
|
PSI_register(mutex);
|
|
|
|
PSI_register(cond);
|
|
|
|
PSI_register(thread);
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-08 19:17:49 +01:00
|
|
|
pool_timer.tick_interval= threadpool_stall_limit;
|
|
|
|
start_timer(&pool_timer);
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
TP_pool_generic::~TP_pool_generic()
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
DBUG_ENTER("tp_end");
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-02-16 16:59:04 +01:00
|
|
|
if (!threadpool_started)
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
|
|
|
|
stop_timer(&pool_timer);
|
2013-11-06 19:53:39 +01:00
|
|
|
shutdown_group_count= threadpool_max_size;
|
2013-11-05 06:18:59 +01:00
|
|
|
for (uint i= 0; i < threadpool_max_size; i++)
|
2011-12-08 19:17:49 +01:00
|
|
|
{
|
|
|
|
thread_group_close(&all_groups[i]);
|
|
|
|
}
|
2012-02-16 16:59:04 +01:00
|
|
|
threadpool_started= false;
|
2011-12-08 19:17:49 +01:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
2011-12-19 13:28:30 +01:00
|
|
|
|
2012-01-17 18:50:40 +01:00
|
|
|
|
2020-05-23 13:17:50 +02:00
|
|
|
static my_bool thd_reset_group(THD* thd, void*)
|
|
|
|
{
|
|
|
|
auto c= (TP_connection_generic*)thd->event_scheduler.data;
|
|
|
|
if(c)
|
|
|
|
c->fix_group= true;
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
2012-01-17 18:50:40 +01:00
|
|
|
/** Ensure that poll descriptors are created when threadpool_size changes */
|
2016-09-21 16:28:42 +02:00
|
|
|
int TP_pool_generic::set_pool_size(uint size)
|
2011-12-19 13:28:30 +01:00
|
|
|
{
|
|
|
|
bool success= true;
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2011-12-19 13:28:30 +01:00
|
|
|
for(uint i=0; i< size; i++)
|
|
|
|
{
|
|
|
|
thread_group_t *group= &all_groups[i];
|
|
|
|
mysql_mutex_lock(&group->mutex);
|
2017-11-20 21:39:59 +01:00
|
|
|
if (group->pollfd == INVALID_HANDLE_VALUE)
|
2011-12-19 13:28:30 +01:00
|
|
|
{
|
|
|
|
group->pollfd= io_poll_create();
|
2017-11-20 21:39:59 +01:00
|
|
|
success= (group->pollfd != INVALID_HANDLE_VALUE);
|
2012-02-16 16:59:04 +01:00
|
|
|
if(!success)
|
|
|
|
{
|
2019-01-26 18:12:00 +01:00
|
|
|
sql_print_error("io_poll_create() failed, errno=%d", errno);
|
2012-02-16 16:59:04 +01:00
|
|
|
}
|
2020-05-29 12:21:27 +02:00
|
|
|
}
|
2018-01-30 06:30:39 +01:00
|
|
|
mysql_mutex_unlock(&group->mutex);
|
2011-12-19 13:28:30 +01:00
|
|
|
if (!success)
|
|
|
|
{
|
2012-02-16 16:59:04 +01:00
|
|
|
group_count= i;
|
2016-09-21 16:28:42 +02:00
|
|
|
return -1;
|
2011-12-19 13:28:30 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
group_count= size;
|
2020-05-23 13:17:50 +02:00
|
|
|
server_threads.iterate(thd_reset_group);
|
2016-09-21 16:28:42 +02:00
|
|
|
return 0;
|
2011-12-19 13:28:30 +01:00
|
|
|
}
|
2011-12-29 21:11:06 +01:00
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
int TP_pool_generic::set_stall_limit(uint limit)
|
2011-12-29 21:11:06 +01:00
|
|
|
{
|
|
|
|
mysql_mutex_lock(&(pool_timer.mutex));
|
|
|
|
pool_timer.tick_interval= limit;
|
|
|
|
mysql_mutex_unlock(&(pool_timer.mutex));
|
2012-01-17 18:50:40 +01:00
|
|
|
mysql_cond_signal(&(pool_timer.cond));
|
2016-09-21 16:28:42 +02:00
|
|
|
return 0;
|
2011-12-29 21:11:06 +01:00
|
|
|
}
|
2012-01-15 15:41:25 +01:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
Calculate number of idle/waiting threads in the pool.
|
2020-05-29 12:21:27 +02:00
|
|
|
|
|
|
|
Sum idle threads over all groups.
|
2012-01-28 01:09:28 +01:00
|
|
|
Don't do any locking, it is not required for stats.
|
2012-01-15 15:41:25 +01:00
|
|
|
*/
|
2012-01-28 01:09:28 +01:00
|
|
|
|
2016-09-21 16:28:42 +02:00
|
|
|
int TP_pool_generic::get_idle_thread_count()
|
2012-01-15 15:41:25 +01:00
|
|
|
{
|
|
|
|
int sum=0;
|
2017-11-20 21:39:59 +01:00
|
|
|
for (uint i= 0; i < threadpool_max_size && all_groups[i].pollfd != INVALID_HANDLE_VALUE; i++)
|
2012-01-15 15:41:25 +01:00
|
|
|
{
|
|
|
|
sum+= (all_groups[i].thread_count - all_groups[i].active_thread_count);
|
|
|
|
}
|
|
|
|
return sum;
|
2012-01-16 02:18:24 +01:00
|
|
|
}
|
2012-01-17 18:50:40 +01:00
|
|
|
|
|
|
|
|
|
|
|
/* Report threadpool problems */
|
|
|
|
|
2020-05-29 12:21:27 +02:00
|
|
|
/**
|
2012-02-16 16:59:04 +01:00
|
|
|
Delay in microseconds, after which "pool blocked" message is printed.
|
|
|
|
(30 sec == 30 Mio usec)
|
|
|
|
*/
|
2015-09-01 11:47:06 +02:00
|
|
|
#define BLOCK_MSG_DELAY (30*1000000)
|
2012-01-17 18:50:40 +01:00
|
|
|
|
2012-02-16 16:59:04 +01:00
|
|
|
#define MAX_THREADS_REACHED_MSG \
|
|
|
|
"Threadpool could not create additional thread to handle queries, because the \
|
|
|
|
number of allowed threads was reached. Increasing 'thread_pool_max_threads' \
|
|
|
|
parameter can help in this situation.\n \
|
|
|
|
If 'extra_port' parameter is set, you can still connect to the database with \
|
|
|
|
superuser account (it must be TCP connection using extra_port as TCP port) \
|
|
|
|
and troubleshoot the situation. \
|
|
|
|
A likely cause of pool blocks are clients that lock resources for long time. \
|
|
|
|
'show processlist' or 'show engine innodb status' can give additional hints."
|
2012-01-17 18:50:40 +01:00
|
|
|
|
2012-02-16 16:59:04 +01:00
|
|
|
#define CREATE_THREAD_ERROR_MSG "Can't create threads in threadpool (errno=%d)."
|
2012-01-17 18:50:40 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
Write a message when blocking situation in threadpool occurs.
|
|
|
|
The message is written only when pool blocks for BLOCK_MSG_DELAY (30) seconds.
|
|
|
|
It will be just a single message for each blocking situation (to prevent
|
|
|
|
log flood).
|
|
|
|
*/
|
2012-01-28 01:09:28 +01:00
|
|
|
|
2012-01-17 18:50:40 +01:00
|
|
|
static void print_pool_blocked_message(bool max_threads_reached)
|
|
|
|
{
|
2012-02-16 16:59:04 +01:00
|
|
|
ulonglong now;
|
2012-01-17 18:50:40 +01:00
|
|
|
static bool msg_written;
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-02-16 16:59:04 +01:00
|
|
|
now= microsecond_interval_timer();
|
2012-01-17 18:50:40 +01:00
|
|
|
if (pool_block_start == 0)
|
|
|
|
{
|
|
|
|
pool_block_start= now;
|
|
|
|
msg_written = false;
|
|
|
|
return;
|
|
|
|
}
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-28 01:09:28 +01:00
|
|
|
if (now > pool_block_start + BLOCK_MSG_DELAY && !msg_written)
|
2012-01-17 18:50:40 +01:00
|
|
|
{
|
2012-01-28 01:09:28 +01:00
|
|
|
if (max_threads_reached)
|
2012-02-16 16:59:04 +01:00
|
|
|
sql_print_error(MAX_THREADS_REACHED_MSG);
|
2012-01-17 18:50:40 +01:00
|
|
|
else
|
2012-02-16 16:59:04 +01:00
|
|
|
sql_print_error(CREATE_THREAD_ERROR_MSG, my_errno);
|
2020-05-29 12:21:27 +02:00
|
|
|
|
2012-01-26 04:35:54 +01:00
|
|
|
sql_print_information("Threadpool has been blocked for %u seconds\n",
|
2012-02-16 16:59:04 +01:00
|
|
|
(uint)((now- pool_block_start)/1000000));
|
2012-01-17 18:50:40 +01:00
|
|
|
/* avoid reperated messages for the same blocking situation */
|
|
|
|
msg_written= true;
|
|
|
|
}
|
|
|
|
}
|
2014-02-27 16:44:00 +01:00
|
|
|
|
|
|
|
#endif /* HAVE_POOL_OF_THREADS */
|