mirror of
https://github.com/MariaDB/server.git
synced 2025-01-17 20:42:30 +01:00
Merge moonlight.intranet:/home/tomash/src/mysql_ab/mysql-4.1-bug21915
into moonlight.intranet:/home/tomash/src/mysql_ab/mysql-5.0-bug21915 sql/mysql_priv.h: SCCS merged sql/mysqld.cc: SCCS merged
This commit is contained in:
commit
b372ebc5ef
2 changed files with 35 additions and 9 deletions
|
@ -131,6 +131,8 @@ MY_LOCALE *my_locale_by_name(const char *name);
|
|||
#define MAX_ACCEPT_RETRY 10 // Test accept this many times
|
||||
#define MAX_FIELDS_BEFORE_HASH 32
|
||||
#define USER_VARS_HASH_SIZE 16
|
||||
#define TABLE_OPEN_CACHE_MIN 64
|
||||
#define TABLE_OPEN_CACHE_DEFAULT 64
|
||||
|
||||
/*
|
||||
Value of 9236 discovered through binary search 2006-09-26 on Ubuntu Dapper
|
||||
|
|
|
@ -2641,19 +2641,43 @@ static int init_common_variables(const char *conf_file_name, int argc,
|
|||
|
||||
/* connections and databases needs lots of files */
|
||||
{
|
||||
uint files, wanted_files;
|
||||
uint files, wanted_files, max_open_files;
|
||||
|
||||
wanted_files= 10+(uint) max(max_connections*5,
|
||||
max_connections+table_cache_size*2);
|
||||
set_if_bigger(wanted_files, open_files_limit);
|
||||
files= my_set_max_open_files(wanted_files);
|
||||
/* MyISAM requires two file handles per table. */
|
||||
wanted_files= 10+max_connections+table_cache_size*2;
|
||||
/*
|
||||
We are trying to allocate no less than max_connections*5 file
|
||||
handles (i.e. we are trying to set the limit so that they will
|
||||
be available). In addition, we allocate no less than how much
|
||||
was already allocated. However below we report a warning and
|
||||
recompute values only if we got less file handles than were
|
||||
explicitly requested. No warning and re-computation occur if we
|
||||
can't get max_connections*5 but still got no less than was
|
||||
requested (value of wanted_files).
|
||||
*/
|
||||
max_open_files= max(max(wanted_files, max_connections*5),
|
||||
open_files_limit);
|
||||
files= my_set_max_open_files(max_open_files);
|
||||
|
||||
if (files < wanted_files)
|
||||
{
|
||||
if (!open_files_limit)
|
||||
{
|
||||
max_connections= (ulong) min((files-10),max_connections);
|
||||
table_cache_size= (ulong) max((files-10-max_connections)/2,64);
|
||||
/*
|
||||
If we have requested too much file handles than we bring
|
||||
max_connections in supported bounds.
|
||||
*/
|
||||
max_connections= (ulong) min(files-10-TABLE_OPEN_CACHE_MIN*2,
|
||||
max_connections);
|
||||
/*
|
||||
Decrease table_cache_size according to max_connections, but
|
||||
not below TABLE_OPEN_CACHE_MIN. Outer min() ensures that we
|
||||
never increase table_cache_size automatically (that could
|
||||
happen if max_connections is decreased above).
|
||||
*/
|
||||
table_cache_size= (ulong) min(max((files-10-max_connections)/2,
|
||||
TABLE_OPEN_CACHE_MIN),
|
||||
table_cache_size);
|
||||
DBUG_PRINT("warning",
|
||||
("Changed limits: max_open_files: %u max_connections: %ld table_cache: %ld",
|
||||
files, max_connections, table_cache_size));
|
||||
|
@ -5943,8 +5967,8 @@ The minimum value for this variable is 4096.",
|
|||
0, 0, 0, 0},
|
||||
{"table_cache", OPT_TABLE_CACHE,
|
||||
"The number of open tables for all threads.", (gptr*) &table_cache_size,
|
||||
(gptr*) &table_cache_size, 0, GET_ULONG, REQUIRED_ARG, 64, 1, 512*1024L,
|
||||
0, 1, 0},
|
||||
(gptr*) &table_cache_size, 0, GET_ULONG, REQUIRED_ARG,
|
||||
TABLE_OPEN_CACHE_DEFAULT, 1, 512*1024L, 0, 1, 0},
|
||||
{"table_lock_wait_timeout", OPT_TABLE_LOCK_WAIT_TIMEOUT, "Timeout in "
|
||||
"seconds to wait for a table level lock before returning an error. Used"
|
||||
" only if the connection has active cursors.",
|
||||
|
|
Loading…
Reference in a new issue