mirror of
https://github.com/MariaDB/server.git
synced 2025-01-22 14:54:20 +01:00
f631b361b6
The table opening process now works the following way: - Create common TABLE_SHARE object - Read the .frm file and unpack it into the TABLE_SHARE object - Create a TABLE object based on the information in the TABLE_SHARE object and open a handler to the table object Other noteworthy changes: - In TABLE_SHARE the most common strings are now LEX_STRING's - Better error message when table is not found - Variable table_cache is now renamed 'table_open_cache' - New variable 'table_definition_cache' that is the number of table defintions that will be cached - strxnmov() calls are now fixed to avoid overflows - strxnmov() will now always add one end \0 to result - engine objects are now created with a TABLE_SHARE object instead of a TABLE object. - After creating a field object one must call field->init(table) before using it - For a busy system this change will give you: - Less memory usage for table object - Faster opening of tables (if it's has been in use or is in table definition cache) - Allow you to cache many table definitions objects - Faster drop of table mysql-test/mysql-test-run.sh: Fixed some problems with --gdb option Test both with socket and tcp/ip port that all old servers are killed mysql-test/r/flush_table.result: More tests with lock table with 2 threads + flush table mysql-test/r/information_schema.result: Removed old (now wrong) result mysql-test/r/innodb.result: Better error messages (thanks to TDC patch) mysql-test/r/merge.result: Extra flush table test mysql-test/r/ndb_bitfield.result: Better error messages (thanks to TDC patch) mysql-test/r/ndb_partition_error.result: Better error messages (thanks to TDC patch) mysql-test/r/query_cache.result: Remove tables left from old tests mysql-test/r/temp_table.result: Test truncate with temporary tables mysql-test/r/variables.result: Table_cache -> Table_open_cache mysql-test/t/flush_table.test: More tests with lock table with 2 threads + flush table mysql-test/t/merge.test: Extra flush table test mysql-test/t/multi_update.test: Added 'sleep' to make test predictable mysql-test/t/query_cache.test: Remove tables left from old tests mysql-test/t/temp_table.test: Test truncate with temporary tables mysql-test/t/variables.test: Table_cache -> Table_open_cache mysql-test/valgrind.supp: Remove warning that may happens becasue threads dies in different order mysys/hash.c: Fixed wrong DBUG_PRINT mysys/mf_dirname.c: More DBUG mysys/mf_pack.c: Better comment mysys/mf_tempdir.c: More DBUG Ensure that we call cleanup_dirname() on all temporary directory paths. If we don't do this, we will get a failure when comparing temporary table names as in some cases the temporary table name is run through convert_dirname()) mysys/my_alloc.c: Indentation fix sql/examples/ha_example.cc: We new use TABLE_SHARE instead of TABLE when creating engine handlers sql/examples/ha_example.h: We new use TABLE_SHARE instead of TABLE when creating engine handlers sql/examples/ha_tina.cc: We new use TABLE_SHARE instead of TABLE when creating engine handlers sql/examples/ha_tina.h: We new use TABLE_SHARE instead of TABLE when creating engine handlers sql/field.cc: Update for table definition cache: - Field creation now takes TABLE_SHARE instead of TABLE as argument (This is becasue field definitions are now cached in TABLE_SHARE) When a field is created, one now must call field->init(TABLE) before using it - Use s->db instead of s->table_cache_key - Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE - make_field() takes TABLE_SHARE as argument instead of TABLE - move_field() -> move_field_offset() sql/field.h: Update for table definition cache: - Field creation now takes TABLE_SHARE instead of TABLE as argument (This is becasue field definitions are now cached in TABLE_SHARE) When a field is created, one now must call field->init(TABLE) before using it - Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE - make_field() takes TABLE_SHARE as argument instead of TABLE - move_field() -> move_field_offset() sql/ha_archive.cc: We new use TABLE_SHARE instead of TABLE when creating engine handlers sql/ha_archive.h: We new use TABLE_SHARE instead of TABLE when creating engine handlers sql/ha_berkeley.cc: We new use TABLE_SHARE instead of TABLE when creating engine handlers Changed name of argument create() to not hide internal 'table' variable. table->s -> table_share sql/ha_berkeley.h: We new use TABLE_SHARE instead of TABLE when creating engine handlers sql/ha_blackhole.cc: We new use TABLE_SHARE instead of TABLE when creating engine handlers sql/ha_blackhole.h: We new use TABLE_SHARE instead of TABLE when creating engine handlers sql/ha_federated.cc: We new use TABLE_SHARE instead of TABLE when creating engine handlers Fixed comments Remove index variable and replace with pointers (simple optimization) move_field() -> move_field_offset() Removed some strlen() calls sql/ha_federated.h: We new use TABLE_SHARE instead of TABLE when creating engine handlers sql/ha_heap.cc: We new use TABLE_SHARE instead of TABLE when creating engine handlers Simplify delete_table() and create() as the given file names are now without extension sql/ha_heap.h: We new use TABLE_SHARE instead of TABLE when creating engine handlers sql/ha_innodb.cc: We new use TABLE_SHARE instead of TABLE when creating engine handlers sql/ha_innodb.h: We new use TABLE_SHARE instead of TABLE when creating engine handlers sql/ha_myisam.cc: We new use TABLE_SHARE instead of TABLE when creating engine handlers Remove not needed fn_format() Fixed for new table->s structure sql/ha_myisam.h: We new use TABLE_SHARE instead of TABLE when creating engine handlers sql/ha_myisammrg.cc: We new use TABLE_SHARE instead of TABLE when creating engine handlers Don't set 'is_view' for MERGE tables Use new interface to find_temporary_table() sql/ha_myisammrg.h: We new use TABLE_SHARE instead of TABLE when creating engine handlers Added flag HA_NO_COPY_ON_ALTER sql/ha_ndbcluster.cc: We new use TABLE_SHARE instead of TABLE when creating engine handlers Fixed wrong calls to strxnmov() Give error HA_ERR_TABLE_DEF_CHANGED if table definition has changed drop_table -> intern_drop_table() table->s -> table_share Move part_info to TABLE Fixed comments & DBUG print's New arguments to print_error() sql/ha_ndbcluster.h: We new use TABLE_SHARE instead of TABLE when creating engine handlers sql/ha_partition.cc: We new use TABLE_SHARE instead of TABLE when creating engine handlers We can't set up or use part_info when creating handler as there is not yet any table object New ha_intialise() to work with TDC (Done by Mikael) sql/ha_partition.h: We new use TABLE_SHARE instead of TABLE when creating engine handlers Got set_part_info() from Mikael sql/handler.cc: We new use TABLE_SHARE instead of TABLE when creating engine handlers ha_delete_table() now also takes database as an argument handler::ha_open() now takes TABLE as argument ha_open() now calls ha_allocate_read_write_set() Simplify ha_allocate_read_write_set() Remove ha_deallocate_read_write_set() Use table_share (Cached by table definition cache) sql/handler.h: New table flag: HA_NO_COPY_ON_ALTER (used by merge tables) Remove ha_deallocate_read_write_set() get_new_handler() now takes TABLE_SHARE as argument ha_delete_table() now gets database as argument sql/item.cc: table_name and db are now LEX_STRING objects When creating fields, we have now have to call field->init(table) move_field -> move_field_offset() sql/item.h: tmp_table_field_from_field_type() now takes an extra paramenter 'fixed_length' to allow one to force usage of CHAR instead of BLOB sql/item_cmpfunc.cc: Fixed call to tmp_table_field_from_field_type() sql/item_create.cc: Assert if new not handled cast type sql/item_func.cc: When creating fields, we have now have to call field->init(table) dummy_table used by 'sp' now needs a TABLE_SHARE object sql/item_subselect.cc: Trivial code cleanups sql/item_sum.cc: When creating fields, we have now have to call field->init(table) sql/item_timefunc.cc: Item_func_str_to_date::tmp_table_field() now replaced by call to tmp_table_field_from_field_type() (see item_timefunc.h) sql/item_timefunc.h: Simply tmp_table_field() sql/item_uniq.cc: When creating fields, we have now have to call field->init(table) sql/key.cc: Added 'KEY' argument to 'find_ref_key' to simplify code sql/lock.cc: More debugging Use create_table_def_key() to create key for table cache Allocate TABLE_SHARE properly when creating name lock Fix that locked_table_name doesn't test same table twice sql/mysql_priv.h: New functions for table definition cache New interfaces to a lot of functions. New faster interface to find_temporary_table() and close_temporary_table() sql/mysqld.cc: Added support for table definition cache of size 'table_def_size' Fixed som calls to strnmov() Changed name of 'table_cache' to 'table_open_cache' sql/opt_range.cc: Use new interfaces Fixed warnings from valgrind sql/parse_file.cc: Safer calls to strxnmov() Fixed typo sql/set_var.cc: Added variable 'table_definition_cache' Variable table_cache renamed to 'table_open_cache' sql/slave.cc: Use new interface sql/sp.cc: Proper use of TABLE_SHARE sql/sp_head.cc: Remove compiler warnings We have now to call field->init(table) sql/sp_head.h: Pointers to parsed strings are now const sql/sql_acl.cc: table_name is now a LEX_STRING sql/sql_base.cc: Main implementation of table definition cache (The #ifdef's are there for the future when table definition cache will replace open table cache) Now table definitions are cached indepndent of open tables, which will speed up things when a table is in use at once from several places Views are not yet cached; For the moment we only cache if a table is a view or not. Faster implementation of find_temorary_table() Replace 'wait_for_refresh()' with the more general function 'wait_for_condition()' Drop table is slightly faster as we can use the table definition cache to know the type of the table sql/sql_cache.cc: table_cache_key and table_name are now LEX_STRING 'sDBUG print fixes sql/sql_class.cc: table_cache_key is now a LEX_STRING safer strxnmov() sql/sql_class.h: Added number of open table shares (table definitions) sql/sql_db.cc: safer strxnmov() sql/sql_delete.cc: Use new interface to find_temporary_table() sql/sql_derived.cc: table_name is now a LEX_STRING sql/sql_handler.cc: TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's sql/sql_insert.cc: TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's sql/sql_lex.cc: Make parsed string a const (to quickly find out if anything is trying to change the query string) sql/sql_lex.h: Make parsed string a const (to quickly find out if anything is trying to change the query string) sql/sql_load.cc: Safer strxnmov() sql/sql_parse.cc: Better error if wrong DB name sql/sql_partition.cc: part_info moved to TABLE from TABLE_SHARE Indentation changes sql/sql_select.cc: Indentation fixes Call field->init(TABLE) for new created fields Update create_tmp_table() to use TABLE_SHARE properly sql/sql_select.h: Call field->init(TABLE) for new created fields sql/sql_show.cc: table_name is now a LEX_STRING part_info moved to TABLE sql/sql_table.cc: Use table definition cache to speed up delete of tables Fixed calls to functions with new interfaces Don't use 'share_not_to_be_used' Instead of doing openfrm() when doing repair, we now have to call get_table_share() followed by open_table_from_share(). Replace some fn_format() with faster unpack_filename(). Safer strxnmov() part_info is now in TABLE Added Mikaels patch for partition and ALTER TABLE Instead of using 'TABLE_SHARE->is_view' use 'table_flags() & HA_NO_COPY_ON_ALTER sql/sql_test.cc: table_name and table_cache_key are now LEX_STRING's sql/sql_trigger.cc: TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's safer strxnmov() Removed compiler warnings sql/sql_update.cc: Call field->init(TABLE) after field is created sql/sql_view.cc: safer strxnmov() Create common TABLE_SHARE object for views to allow us to cache if table is a view sql/structs.h: Added SHOW_TABLE_DEFINITIONS sql/table.cc: Creation and destruct of TABLE_SHARE objects that are common for many TABLE objects The table opening process now works the following way: - Create common TABLE_SHARE object - Read the .frm file and unpack it into the TABLE_SHARE object - Create a TABLE object based on the information in the TABLE_SHARE object and open a handler to the table object open_table_def() is written in such a way that it should be trival to add parsing of the .frm files in new formats sql/table.h: TABLE objects for the same database table now share a common TABLE_SHARE object In TABLE_SHARE the most common strings are now LEX_STRING's sql/unireg.cc: Changed arguments to rea_create_table() to have same order as other functions Call field->init(table) for new created fields sql/unireg.h: Added OPEN_VIEW strings/strxnmov.c: Change strxnmov() to always add end \0 This makes usage of strxnmov() safer as most of MySQL code assumes that strxnmov() will create a null terminated string
534 lines
15 KiB
C++
534 lines
15 KiB
C++
/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
it under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
(at your option) any later version.
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with this program; if not, write to the Free Software
|
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
|
|
|
|
|
/* Write some debug info */
|
|
|
|
|
|
#include "mysql_priv.h"
|
|
#include "sql_select.h"
|
|
#include <hash.h>
|
|
#include <thr_alarm.h>
|
|
#if defined(HAVE_MALLINFO) && defined(HAVE_MALLOC_H)
|
|
#include <malloc.h>
|
|
#elif defined(HAVE_MALLINFO) && defined(HAVE_SYS_MALLOC_H)
|
|
#include <sys/malloc.h>
|
|
#endif
|
|
|
|
static const char *lock_descriptions[] =
|
|
{
|
|
"No lock",
|
|
"Low priority read lock",
|
|
"Shared Read lock",
|
|
"High priority read lock",
|
|
"Read lock without concurrent inserts",
|
|
"Write lock that allows other writers",
|
|
"Write lock, but allow reading",
|
|
"Concurrent insert lock",
|
|
"Lock Used by delayed insert",
|
|
"Low priority write lock",
|
|
"High priority write lock",
|
|
"Highest priority write lock"
|
|
};
|
|
|
|
|
|
#ifndef DBUG_OFF
|
|
|
|
void
|
|
print_where(COND *cond,const char *info)
|
|
{
|
|
if (cond)
|
|
{
|
|
char buff[256];
|
|
String str(buff,(uint32) sizeof(buff), system_charset_info);
|
|
str.length(0);
|
|
cond->print(&str);
|
|
str.append('\0');
|
|
DBUG_LOCK_FILE;
|
|
(void) fprintf(DBUG_FILE,"\nWHERE:(%s) ",info);
|
|
(void) fputs(str.ptr(),DBUG_FILE);
|
|
(void) fputc('\n',DBUG_FILE);
|
|
DBUG_UNLOCK_FILE;
|
|
}
|
|
}
|
|
/* This is for debugging purposes */
|
|
|
|
|
|
void print_cached_tables(void)
|
|
{
|
|
uint idx,count,unused;
|
|
TABLE *start_link,*lnk;
|
|
|
|
VOID(pthread_mutex_lock(&LOCK_open));
|
|
puts("DB Table Version Thread L.thread Open Lock");
|
|
|
|
for (idx=unused=0 ; idx < open_cache.records ; idx++)
|
|
{
|
|
TABLE *entry=(TABLE*) hash_element(&open_cache,idx);
|
|
printf("%-14.14s %-32s%6ld%8ld%10ld%6d %s\n",
|
|
entry->s->db.str, entry->s->table_name.str, entry->s->version,
|
|
entry->in_use ? entry->in_use->thread_id : 0L,
|
|
entry->in_use ? entry->in_use->dbug_thread_id : 0L,
|
|
entry->db_stat ? 1 : 0, entry->in_use ? lock_descriptions[(int)entry->reginfo.lock_type] : "Not in use");
|
|
if (!entry->in_use)
|
|
unused++;
|
|
}
|
|
count=0;
|
|
if ((start_link=lnk=unused_tables))
|
|
{
|
|
do
|
|
{
|
|
if (lnk != lnk->next->prev || lnk != lnk->prev->next)
|
|
{
|
|
printf("unused_links isn't linked properly\n");
|
|
return;
|
|
}
|
|
} while (count++ < open_cache.records && (lnk=lnk->next) != start_link);
|
|
if (lnk != start_link)
|
|
{
|
|
printf("Unused_links aren't connected\n");
|
|
}
|
|
}
|
|
if (count != unused)
|
|
printf("Unused_links (%d) doesn't match open_cache: %d\n", count,unused);
|
|
printf("\nCurrent refresh version: %ld\n",refresh_version);
|
|
if (hash_check(&open_cache))
|
|
printf("Error: File hash table is corrupted\n");
|
|
fflush(stdout);
|
|
VOID(pthread_mutex_unlock(&LOCK_open));
|
|
return;
|
|
}
|
|
|
|
|
|
void TEST_filesort(SORT_FIELD *sortorder,uint s_length)
|
|
{
|
|
char buff[256],buff2[256];
|
|
String str(buff,sizeof(buff),system_charset_info);
|
|
String out(buff2,sizeof(buff2),system_charset_info);
|
|
const char *sep;
|
|
DBUG_ENTER("TEST_filesort");
|
|
|
|
out.length(0);
|
|
for (sep=""; s_length-- ; sortorder++, sep=" ")
|
|
{
|
|
out.append(sep);
|
|
if (sortorder->reverse)
|
|
out.append('-');
|
|
if (sortorder->field)
|
|
{
|
|
if (sortorder->field->table_name)
|
|
{
|
|
out.append(*sortorder->field->table_name);
|
|
out.append('.');
|
|
}
|
|
out.append(sortorder->field->field_name ? sortorder->field->field_name:
|
|
"tmp_table_column");
|
|
}
|
|
else
|
|
{
|
|
str.length(0);
|
|
sortorder->item->print(&str);
|
|
out.append(str);
|
|
}
|
|
}
|
|
out.append('\0'); // Purify doesn't like c_ptr()
|
|
DBUG_LOCK_FILE;
|
|
VOID(fputs("\nInfo about FILESORT\n",DBUG_FILE));
|
|
fprintf(DBUG_FILE,"Sortorder: %s\n",out.ptr());
|
|
DBUG_UNLOCK_FILE;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
void
|
|
TEST_join(JOIN *join)
|
|
{
|
|
uint i,ref;
|
|
DBUG_ENTER("TEST_join");
|
|
|
|
DBUG_LOCK_FILE;
|
|
VOID(fputs("\nInfo about JOIN\n",DBUG_FILE));
|
|
for (i=0 ; i < join->tables ; i++)
|
|
{
|
|
JOIN_TAB *tab=join->join_tab+i;
|
|
TABLE *form=tab->table;
|
|
char key_map_buff[128];
|
|
fprintf(DBUG_FILE,"%-16.16s type: %-7s q_keys: %s refs: %d key: %d len: %d\n",
|
|
form->alias,
|
|
join_type_str[tab->type],
|
|
tab->keys.print(key_map_buff),
|
|
tab->ref.key_parts,
|
|
tab->ref.key,
|
|
tab->ref.key_length);
|
|
if (tab->select)
|
|
{
|
|
char buf[MAX_KEY/8+1];
|
|
if (tab->use_quick == 2)
|
|
fprintf(DBUG_FILE,
|
|
" quick select checked for each record (keys: %s)\n",
|
|
tab->select->quick_keys.print(buf));
|
|
else if (tab->select->quick)
|
|
{
|
|
fprintf(DBUG_FILE, " quick select used:\n");
|
|
tab->select->quick->dbug_dump(18, FALSE);
|
|
}
|
|
else
|
|
VOID(fputs(" select used\n",DBUG_FILE));
|
|
}
|
|
if (tab->ref.key_parts)
|
|
{
|
|
VOID(fputs(" refs: ",DBUG_FILE));
|
|
for (ref=0 ; ref < tab->ref.key_parts ; ref++)
|
|
{
|
|
Item *item=tab->ref.items[ref];
|
|
fprintf(DBUG_FILE,"%s ", item->full_name());
|
|
}
|
|
VOID(fputc('\n',DBUG_FILE));
|
|
}
|
|
}
|
|
DBUG_UNLOCK_FILE;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/*
|
|
Print the current state during query optimization.
|
|
|
|
SYNOPSIS
|
|
print_plan()
|
|
join pointer to the structure providing all context info for
|
|
the query
|
|
read_time the cost of the best partial plan
|
|
record_count estimate for the number of records returned by the best
|
|
partial plan
|
|
idx length of the partial QEP in 'join->positions';
|
|
also an index in the array 'join->best_ref';
|
|
info comment string to appear above the printout
|
|
|
|
DESCRIPTION
|
|
This function prints to the log file DBUG_FILE the members of 'join' that
|
|
are used during query optimization (join->positions, join->best_positions,
|
|
and join->best_ref) and few other related variables (read_time,
|
|
record_count).
|
|
Useful to trace query optimizer functions.
|
|
|
|
RETURN
|
|
None
|
|
*/
|
|
|
|
void
|
|
print_plan(JOIN* join, double read_time, double record_count,
|
|
uint idx, const char *info)
|
|
{
|
|
uint i;
|
|
POSITION pos;
|
|
JOIN_TAB *join_table;
|
|
JOIN_TAB **plan_nodes;
|
|
TABLE* table;
|
|
|
|
if (info == 0)
|
|
info= "";
|
|
|
|
DBUG_LOCK_FILE;
|
|
if (join->best_read == DBL_MAX)
|
|
{
|
|
fprintf(DBUG_FILE,"%s; idx:%u, best: DBL_MAX, current:%g\n",
|
|
info, idx, read_time);
|
|
}
|
|
else
|
|
{
|
|
fprintf(DBUG_FILE,"%s; idx: %u, best: %g, current: %g\n",
|
|
info, idx, join->best_read, read_time);
|
|
}
|
|
|
|
/* Print the tables in JOIN->positions */
|
|
fputs(" POSITIONS: ", DBUG_FILE);
|
|
for (i= 0; i < idx ; i++)
|
|
{
|
|
pos = join->positions[i];
|
|
table= pos.table->table;
|
|
if (table)
|
|
fputs(table->s->table_name.str, DBUG_FILE);
|
|
fputc(' ', DBUG_FILE);
|
|
}
|
|
fputc('\n', DBUG_FILE);
|
|
|
|
/*
|
|
Print the tables in JOIN->best_positions only if at least one complete plan
|
|
has been found. An indicator for this is the value of 'join->best_read'.
|
|
*/
|
|
fputs("BEST_POSITIONS: ", DBUG_FILE);
|
|
if (join->best_read < DBL_MAX)
|
|
{
|
|
for (i= 0; i < idx ; i++)
|
|
{
|
|
pos= join->best_positions[i];
|
|
table= pos.table->table;
|
|
if (table)
|
|
fputs(table->s->table_name.str, DBUG_FILE);
|
|
fputc(' ', DBUG_FILE);
|
|
}
|
|
}
|
|
fputc('\n', DBUG_FILE);
|
|
|
|
/* Print the tables in JOIN->best_ref */
|
|
fputs(" BEST_REF: ", DBUG_FILE);
|
|
for (plan_nodes= join->best_ref ; *plan_nodes ; plan_nodes++)
|
|
{
|
|
join_table= (*plan_nodes);
|
|
fputs(join_table->table->s->table_name.str, DBUG_FILE);
|
|
fprintf(DBUG_FILE, "(%lu,%lu,%lu)",
|
|
(ulong) join_table->found_records,
|
|
(ulong) join_table->records,
|
|
(ulong) join_table->read_time);
|
|
fputc(' ', DBUG_FILE);
|
|
}
|
|
fputc('\n', DBUG_FILE);
|
|
|
|
DBUG_UNLOCK_FILE;
|
|
}
|
|
|
|
#endif
|
|
|
|
typedef struct st_debug_lock
|
|
{
|
|
ulong thread_id;
|
|
char table_name[FN_REFLEN];
|
|
bool waiting;
|
|
const char *lock_text;
|
|
enum thr_lock_type type;
|
|
} TABLE_LOCK_INFO;
|
|
|
|
static int dl_compare(TABLE_LOCK_INFO *a,TABLE_LOCK_INFO *b)
|
|
{
|
|
if (a->thread_id > b->thread_id)
|
|
return 1;
|
|
if (a->thread_id < b->thread_id)
|
|
return -1;
|
|
if (a->waiting == b->waiting)
|
|
return 0;
|
|
else if (a->waiting)
|
|
return -1;
|
|
return 1;
|
|
}
|
|
|
|
|
|
static void push_locks_into_array(DYNAMIC_ARRAY *ar, THR_LOCK_DATA *data,
|
|
bool wait, const char *text)
|
|
{
|
|
if (data)
|
|
{
|
|
TABLE *table=(TABLE *)data->debug_print_param;
|
|
if (table && table->s->tmp_table == NO_TMP_TABLE)
|
|
{
|
|
TABLE_LOCK_INFO table_lock_info;
|
|
table_lock_info.thread_id= table->in_use->thread_id;
|
|
memcpy(table_lock_info.table_name, table->s->table_cache_key.str,
|
|
table->s->table_cache_key.length);
|
|
table_lock_info.table_name[strlen(table_lock_info.table_name)]='.';
|
|
table_lock_info.waiting=wait;
|
|
table_lock_info.lock_text=text;
|
|
// lock_type is also obtainable from THR_LOCK_DATA
|
|
table_lock_info.type=table->reginfo.lock_type;
|
|
VOID(push_dynamic(ar,(gptr) &table_lock_info));
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
Regarding MERGE tables:
|
|
|
|
For now, the best option is to use the common TABLE *pointer for all
|
|
cases; The drawback is that for MERGE tables we will see many locks
|
|
for the merge tables even if some of them are for individual tables.
|
|
|
|
The way to solve this is to add to 'THR_LOCK' structure a pointer to
|
|
the filename and use this when printing the data.
|
|
(We can for now ignore this and just print the same name for all merge
|
|
table parts; Please add the above as a comment to the display_lock
|
|
function so that we can easily add this if we ever need this.
|
|
*/
|
|
|
|
static void display_table_locks(void)
|
|
{
|
|
LIST *list;
|
|
DYNAMIC_ARRAY saved_table_locks;
|
|
|
|
VOID(my_init_dynamic_array(&saved_table_locks,sizeof(TABLE_LOCK_INFO),open_cache.records + 20,50));
|
|
VOID(pthread_mutex_lock(&THR_LOCK_lock));
|
|
for (list= thr_lock_thread_list; list; list= list_rest(list))
|
|
{
|
|
THR_LOCK *lock=(THR_LOCK*) list->data;
|
|
|
|
VOID(pthread_mutex_lock(&lock->mutex));
|
|
push_locks_into_array(&saved_table_locks, lock->write.data, FALSE,
|
|
"Locked - write");
|
|
push_locks_into_array(&saved_table_locks, lock->write_wait.data, TRUE,
|
|
"Waiting - write");
|
|
push_locks_into_array(&saved_table_locks, lock->read.data, FALSE,
|
|
"Locked - read");
|
|
push_locks_into_array(&saved_table_locks, lock->read_wait.data, TRUE,
|
|
"Waiting - read");
|
|
VOID(pthread_mutex_unlock(&lock->mutex));
|
|
}
|
|
VOID(pthread_mutex_unlock(&THR_LOCK_lock));
|
|
if (!saved_table_locks.elements) goto end;
|
|
|
|
qsort((gptr) dynamic_element(&saved_table_locks,0,TABLE_LOCK_INFO *),saved_table_locks.elements,sizeof(TABLE_LOCK_INFO),(qsort_cmp) dl_compare);
|
|
freeze_size(&saved_table_locks);
|
|
|
|
puts("\nThread database.table_name Locked/Waiting Lock_type\n");
|
|
|
|
unsigned int i;
|
|
for (i=0 ; i < saved_table_locks.elements ; i++)
|
|
{
|
|
TABLE_LOCK_INFO *dl_ptr=dynamic_element(&saved_table_locks,i,TABLE_LOCK_INFO*);
|
|
printf("%-8ld%-28.28s%-22s%s\n",
|
|
dl_ptr->thread_id,dl_ptr->table_name,dl_ptr->lock_text,lock_descriptions[(int)dl_ptr->type]);
|
|
}
|
|
puts("\n\n");
|
|
end:
|
|
delete_dynamic(&saved_table_locks);
|
|
}
|
|
|
|
|
|
static int print_key_cache_status(const char *name, KEY_CACHE *key_cache)
|
|
{
|
|
char llbuff1[22];
|
|
char llbuff2[22];
|
|
char llbuff3[22];
|
|
char llbuff4[22];
|
|
|
|
if (!key_cache->key_cache_inited)
|
|
{
|
|
printf("%s: Not in use\n", name);
|
|
}
|
|
else
|
|
{
|
|
printf("%s\n\
|
|
Buffer_size: %10lu\n\
|
|
Block_size: %10lu\n\
|
|
Division_limit: %10lu\n\
|
|
Age_limit: %10lu\n\
|
|
blocks used: %10lu\n\
|
|
not flushed: %10lu\n\
|
|
w_requests: %10s\n\
|
|
writes: %10s\n\
|
|
r_requests: %10s\n\
|
|
reads: %10s\n\n",
|
|
name,
|
|
(ulong) key_cache->param_buff_size, key_cache->param_block_size,
|
|
key_cache->param_division_limit, key_cache->param_age_threshold,
|
|
key_cache->blocks_used,key_cache->global_blocks_changed,
|
|
llstr(key_cache->global_cache_w_requests,llbuff1),
|
|
llstr(key_cache->global_cache_write,llbuff2),
|
|
llstr(key_cache->global_cache_r_requests,llbuff3),
|
|
llstr(key_cache->global_cache_read,llbuff4));
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
void mysql_print_status()
|
|
{
|
|
char current_dir[FN_REFLEN];
|
|
STATUS_VAR tmp;
|
|
|
|
calc_sum_of_all_status(&tmp);
|
|
printf("\nStatus information:\n\n");
|
|
my_getwd(current_dir, sizeof(current_dir),MYF(0));
|
|
printf("Current dir: %s\n", current_dir);
|
|
printf("Running threads: %d Stack size: %ld\n", thread_count,
|
|
(long) thread_stack);
|
|
thr_print_locks(); // Write some debug info
|
|
#ifndef DBUG_OFF
|
|
print_cached_tables();
|
|
#endif
|
|
/* Print key cache status */
|
|
puts("\nKey caches:");
|
|
process_key_caches(print_key_cache_status);
|
|
pthread_mutex_lock(&LOCK_status);
|
|
printf("\nhandler status:\n\
|
|
read_key: %10lu\n\
|
|
read_next: %10lu\n\
|
|
read_rnd %10lu\n\
|
|
read_first: %10lu\n\
|
|
write: %10lu\n\
|
|
delete %10lu\n\
|
|
update: %10lu\n",
|
|
tmp.ha_read_key_count,
|
|
tmp.ha_read_next_count,
|
|
tmp.ha_read_rnd_count,
|
|
tmp.ha_read_first_count,
|
|
tmp.ha_write_count,
|
|
tmp.ha_delete_count,
|
|
tmp.ha_update_count);
|
|
pthread_mutex_unlock(&LOCK_status);
|
|
printf("\nTable status:\n\
|
|
Opened tables: %10lu\n\
|
|
Open tables: %10lu\n\
|
|
Open files: %10lu\n\
|
|
Open streams: %10lu\n",
|
|
tmp.opened_tables,
|
|
(ulong) cached_open_tables(),
|
|
(ulong) my_file_opened,
|
|
(ulong) my_stream_opened);
|
|
|
|
ALARM_INFO alarm_info;
|
|
#ifndef DONT_USE_THR_ALARM
|
|
thr_alarm_info(&alarm_info);
|
|
printf("\nAlarm status:\n\
|
|
Active alarms: %u\n\
|
|
Max used alarms: %u\n\
|
|
Next alarm time: %lu\n",
|
|
alarm_info.active_alarms,
|
|
alarm_info.max_used_alarms,
|
|
alarm_info.next_alarm_time);
|
|
#endif
|
|
display_table_locks();
|
|
fflush(stdout);
|
|
my_checkmalloc();
|
|
TERMINATE(stdout); // Write malloc information
|
|
|
|
#ifdef HAVE_MALLINFO
|
|
struct mallinfo info= mallinfo();
|
|
printf("\nMemory status:\n\
|
|
Non-mmapped space allocated from system: %d\n\
|
|
Number of free chunks: %d\n\
|
|
Number of fastbin blocks: %d\n\
|
|
Number of mmapped regions: %d\n\
|
|
Space in mmapped regions: %d\n\
|
|
Maximum total allocated space: %d\n\
|
|
Space available in freed fastbin blocks: %d\n\
|
|
Total allocated space: %d\n\
|
|
Total free space: %d\n\
|
|
Top-most, releasable space: %d\n\
|
|
Estimated memory (with thread stack): %ld\n",
|
|
(int) info.arena ,
|
|
(int) info.ordblks,
|
|
(int) info.smblks,
|
|
(int) info.hblks,
|
|
(int) info.hblkhd,
|
|
(int) info.usmblks,
|
|
(int) info.fsmblks,
|
|
(int) info.uordblks,
|
|
(int) info.fordblks,
|
|
(int) info.keepcost,
|
|
(long) (thread_count * thread_stack + info.hblkhd + info.arena));
|
|
#endif
|
|
puts("");
|
|
}
|